• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python data.insert_c1c2函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tools.data.insert_c1c2函数的典型用法代码示例。如果您正苦于以下问题:Python insert_c1c2函数的具体用法?Python insert_c1c2怎么用?Python insert_c1c2使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了insert_c1c2函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: blacklisted_directory_test

    def blacklisted_directory_test(self):
        cluster = self.cluster
        cluster.set_datadir_count(3)
        cluster.populate(1)
        [node] = cluster.nodelist()
        remove_perf_disable_shared_mem(node)
        cluster.start(wait_for_binary_proto=True)

        session = self.patient_cql_connection(node)
        create_ks(session, 'ks', 1)
        create_c1c2_table(self, session)
        insert_c1c2(session, n=10000)
        node.flush()
        for k in xrange(0, 10000):
            query_c1c2(session, k)

        node.compact()
        mbean = make_mbean('db', type='BlacklistedDirectories')
        with JolokiaAgent(node) as jmx:
            jmx.execute_method(mbean, 'markUnwritable', [os.path.join(node.get_path(), 'data0')])

        for k in xrange(0, 10000):
            query_c1c2(session, k)

        node.nodetool('relocatesstables')

        for k in xrange(0, 10000):
            query_c1c2(session, k)
开发者ID:jeffjirsa,项目名称:cassandra-dtest,代码行数:28,代码来源:disk_balance_test.py


示例2: _do_hinted_handoff

    def _do_hinted_handoff(self, node1, node2, enabled, keyspace='ks'):
        """
        Test that if we stop one node the other one
        will store hints only when hinted handoff is enabled
        """
        session = self.patient_exclusive_cql_connection(node1)
        create_ks(session, keyspace, 2)
        create_c1c2_table(self, session)

        node2.stop(wait_other_notice=True)

        insert_c1c2(session, n=100, consistency=ConsistencyLevel.ONE)

        log_mark = node1.mark_log()
        node2.start(wait_other_notice=True)

        if enabled:
            node1.watch_log_for(["Finished hinted"], from_mark=log_mark, timeout=120)

        node1.stop(wait_other_notice=True)

        # Check node2 for all the keys that should have been delivered via HH if enabled or not if not enabled
        session = self.patient_exclusive_cql_connection(node2, keyspace=keyspace)
        for n in xrange(0, 100):
            if enabled:
                query_c1c2(session, n, ConsistencyLevel.ONE)
            else:
                query_c1c2(session, n, ConsistencyLevel.ONE, tolerate_missing=True, must_be_missing=True)
开发者ID:iamaleksey,项目名称:cassandra-dtest,代码行数:28,代码来源:hintedhandoff_test.py


示例3: test_decommission

    def test_decommission(self):
        cluster = self.cluster

        tokens = cluster.balanced_tokens(4)
        cluster.populate(4, tokens=tokens).start()
        node1, node2, node3, node4 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 2)
        create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})

        insert_c1c2(session, n=30000, consistency=ConsistencyLevel.QUORUM)

        cluster.flush()
        sizes = [node.data_size() for node in cluster.nodelist() if node.is_running()]
        init_size = sizes[0]
        assert_almost_equal(*sizes)

        time.sleep(.5)
        node4.decommission()
        node4.stop()
        cluster.cleanup()
        time.sleep(.5)

        # Check we can get all the keys
        for n in range(0, 30000):
            query_c1c2(session, n, ConsistencyLevel.QUORUM)

        sizes = [node.data_size() for node in cluster.nodelist() if node.is_running()]
        logger.debug(sizes)
        assert_almost_equal(sizes[0], sizes[1])
        assert_almost_equal((2.0 / 3.0) * sizes[0], sizes[2])
        assert_almost_equal(sizes[2], init_size)
开发者ID:vinaykumarchella,项目名称:cassandra-dtest,代码行数:33,代码来源:topology_test.py


示例4: test_move_single_node

    def test_move_single_node(self):
        """ Test moving a node in a single-node cluster (#4200) """
        cluster = self.cluster

        # Create an unbalanced ring
        cluster.populate(1, tokens=[0]).start()
        node1 = cluster.nodelist()[0]
        time.sleep(0.2)

        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 1)
        create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})

        insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ONE)

        cluster.flush()

        node1.move(2**25)
        time.sleep(1)

        cluster.cleanup()

        # Check we can get all the keys
        for n in range(0, 10000):
            query_c1c2(session, n, ConsistencyLevel.ONE)
开发者ID:vinaykumarchella,项目名称:cassandra-dtest,代码行数:25,代码来源:topology_test.py


示例5: readrepair_test

    def readrepair_test(self):
        cluster = self.cluster
        cluster.set_configuration_options(values={'hinted_handoff_enabled': False})

        if DISABLE_VNODES:
            cluster.populate(2).start()
        else:
            tokens = cluster.balanced_tokens(2)
            cluster.populate(2, tokens=tokens).start()
        node1, node2 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 2)
        create_c1c2_table(self, session, read_repair=1.0)

        node2.stop(wait_other_notice=True)

        insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ONE)

        node2.start(wait_for_binary_proto=True, wait_other_notice=True)

        # query everything to cause RR
        for n in xrange(0, 10000):
            query_c1c2(session, n, ConsistencyLevel.QUORUM)

        node1.stop(wait_other_notice=True)

        # Check node2 for all the keys that should have been repaired
        session = self.patient_cql_connection(node2, keyspace='ks')
        for n in xrange(0, 10000):
            query_c1c2(session, n, ConsistencyLevel.ONE)
开发者ID:ifesdjeen,项目名称:cassandra-dtest,代码行数:31,代码来源:consistency_test.py


示例6: quorum_available_during_failure_test

    def quorum_available_during_failure_test(self):
        CL = ConsistencyLevel.QUORUM
        RF = 3

        debug("Creating a ring")
        cluster = self.cluster
        if DISABLE_VNODES:
            cluster.populate(3).start()
        else:
            tokens = cluster.balanced_tokens(3)
            cluster.populate(3, tokens=tokens).start()
        node1, node2, node3 = cluster.nodelist()

        debug("Set to talk to node 2")
        session = self.patient_cql_connection(node2)
        create_ks(session, 'ks', RF)
        create_c1c2_table(self, session)

        debug("Generating some data")
        insert_c1c2(session, n=100, consistency=CL)

        debug("Taking down node1")
        node1.stop(wait_other_notice=True)

        debug("Reading back data.")
        for n in xrange(100):
            query_c1c2(session, n, CL)
开发者ID:ifesdjeen,项目名称:cassandra-dtest,代码行数:27,代码来源:consistency_test.py


示例7: test_resumable_decommission

    def test_resumable_decommission(self):
        """
        @jira_ticket CASSANDRA-12008

        Test decommission operation is resumable
        """
        self.fixture_dtest_setup.ignore_log_patterns = [r'Streaming error occurred',
                                                        r'Error while decommissioning node',
                                                        r'Remote peer 127.0.0.2 failed stream session',
                                                        r'Remote peer 127.0.0.2:7000 failed stream session']
        cluster = self.cluster
        cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
        cluster.populate(3, install_byteman=True).start(wait_other_notice=True)
        node1, node2, node3 = cluster.nodelist()

        session = self.patient_cql_connection(node2)
        # reduce system_distributed RF to 2 so we don't require forceful decommission
        session.execute("ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'2'};")
        create_ks(session, 'ks', 2)
        create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
        insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ALL)

        # Execute first rebuild, should fail
        with pytest.raises(ToolError):
            if cluster.version() >= '4.0':
                script = ['./byteman/4.0/decommission_failure_inject.btm']
            else:
                script = ['./byteman/pre4.0/decommission_failure_inject.btm']
            node2.byteman_submit(script)
            node2.nodetool('decommission')

        # Make sure previous ToolError is due to decommission
        node2.watch_log_for('Error while decommissioning node')

        # Decommission again
        mark = node2.mark_log()
        node2.nodetool('decommission')

        # Check decommision is done and we skipped transfereed ranges
        node2.watch_log_for('DECOMMISSIONED', from_mark=mark)
        node2.grep_log("Skipping transferred range .* of keyspace ks, endpoint {}".format(node2.address_for_current_version_slashy()), filename='debug.log')

        # Check data is correctly forwarded to node1 and node3
        cluster.remove(node2)
        node3.stop(gently=False)
        session = self.patient_exclusive_cql_connection(node1)
        session.execute('USE ks')
        for i in range(0, 10000):
            query_c1c2(session, i, ConsistencyLevel.ONE)
        node1.stop(gently=False)
        node3.start()
        session.shutdown()
        mark = node3.mark_log()
        node3.watch_log_for('Starting listening for CQL clients', from_mark=mark)
        session = self.patient_exclusive_cql_connection(node3)
        session.execute('USE ks')
        for i in range(0, 10000):
            query_c1c2(session, i, ConsistencyLevel.ONE)
开发者ID:vinaykumarchella,项目名称:cassandra-dtest,代码行数:58,代码来源:topology_test.py


示例8: _deprecated_repair_jmx

    def _deprecated_repair_jmx(self, method, arguments):
        """
        * Launch a two node, two DC cluster
        * Create a keyspace and table
        * Insert some data
        * Call the deprecated repair JMX API based on the arguments passed into this method
        * Check the node log to see if the correct repair was performed based on the jmx args
        """
        cluster = self.cluster

        logger.debug("Starting cluster..")
        cluster.populate([1, 1])
        node1, node2 = cluster.nodelist()
        remove_perf_disable_shared_mem(node1)
        cluster.start()
        supports_pull_repair = cluster.version() >= LooseVersion('3.10')

        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 2)
        create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'})

        insert_c1c2(session, n=1000, consistency=ConsistencyLevel.ALL)

        # Run repair
        mbean = make_mbean('db', 'StorageService')
        with JolokiaAgent(node1) as jmx:
            # assert repair runs and returns valid cmd number
            assert jmx.execute_method(mbean, method, arguments) == 1
        # wait for log to start
        node1.watch_log_for("Starting repair command")
        # get repair parameters from the log
        line = node1.grep_log(("Starting repair command #1" + (" \([^\)]+\)" if cluster.version() >= LooseVersion("3.10") else "") +
                               ", repairing keyspace ks with repair options \(parallelism: (?P<parallelism>\w+), primary range: (?P<pr>\w+), "
                               "incremental: (?P<incremental>\w+), job threads: (?P<jobs>\d+), ColumnFamilies: (?P<cfs>.+), dataCenters: (?P<dc>.+), "
                               "hosts: (?P<hosts>.+), # of ranges: (?P<ranges>\d+)(, pull repair: (?P<pullrepair>true|false))?\)"))

        assert_length_equal(line, 1)
        line, m = line[0]

        if supports_pull_repair:
            assert m.group("pullrepair"), "false" == "Pull repair cannot be enabled through the deprecated API so the pull repair option should always be false."

        return {"parallelism": m.group("parallelism"),
                "primary_range": m.group("pr"),
                "incremental": m.group("incremental"),
                "job_threads": m.group("jobs"),
                "column_families": m.group("cfs"),
                "data_centers": m.group("dc"),
                "hosts": m.group("hosts"),
                "ranges": m.group("ranges")}
开发者ID:beobal,项目名称:cassandra-dtest,代码行数:50,代码来源:deprecated_repair_test.py


示例9: resumable_decommission_test

    def resumable_decommission_test(self):
        """
        @jira_ticket CASSANDRA-12008

        Test decommission operation is resumable
        """
        self.ignore_log_patterns = [r'Streaming error occurred', r'Error while decommissioning node', r'Remote peer 127.0.0.2 failed stream session']
        cluster = self.cluster
        cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
        cluster.populate(3, install_byteman=True).start(wait_other_notice=True)
        node1, node2, node3 = cluster.nodelist()

        session = self.patient_cql_connection(node2)
        create_ks(session, 'ks', 2)
        create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
        insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ALL)

        # Execute first rebuild, should fail
        with self.assertRaises(ToolError):
            script = ['./byteman/decommission_failure_inject.btm']
            node2.byteman_submit(script)
            node2.nodetool('decommission')

        # Make sure previous ToolError is due to decommission
        node2.watch_log_for('Error while decommissioning node')

        # Decommission again
        mark = node2.mark_log()
        node2.nodetool('decommission')

        # Check decommision is done and we skipped transfereed ranges
        node2.watch_log_for('DECOMMISSIONED', from_mark=mark)
        node2.grep_log("Skipping transferred range .* of keyspace ks, endpoint /127.0.0.3", filename='debug.log')

        # Check data is correctly forwarded to node1 and node3
        cluster.remove(node2)
        node3.stop(gently=False)
        session = self.patient_exclusive_cql_connection(node1)
        session.execute('USE ks')
        for i in xrange(0, 10000):
            query_c1c2(session, i, ConsistencyLevel.ONE)
        node1.stop(gently=False)
        node3.start()
        session.shutdown()
        mark = node3.mark_log()
        node3.watch_log_for('Starting listening for CQL clients', from_mark=mark)
        session = self.patient_exclusive_cql_connection(node3)
        session.execute('USE ks')
        for i in xrange(0, 10000):
            query_c1c2(session, i, ConsistencyLevel.ONE)
开发者ID:jeffjirsa,项目名称:cassandra-dtest,代码行数:50,代码来源:topology_test.py


示例10: hintedhandoff_decom_test

 def hintedhandoff_decom_test(self):
     self.cluster.populate(4).start(wait_for_binary_proto=True)
     [node1, node2, node3, node4] = self.cluster.nodelist()
     session = self.patient_cql_connection(node1)
     create_ks(session, 'ks', 2)
     create_c1c2_table(self, session)
     node4.stop(wait_other_notice=True)
     insert_c1c2(session, n=100, consistency=ConsistencyLevel.ONE)
     node1.decommission()
     node4.start(wait_for_binary_proto=True)
     node2.decommission()
     node3.decommission()
     time.sleep(5)
     for x in xrange(0, 100):
         query_c1c2(session, x, ConsistencyLevel.ONE)
开发者ID:jeffjirsa,项目名称:cassandra-dtest,代码行数:15,代码来源:hintedhandoff_test.py


示例11: test_non_local_read

    def test_non_local_read(self):
        """ This test reads from a coordinator we know has no copy of the data """
        cluster = self.cluster

        cluster.populate(3).start()
        node1, node2, node3 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 2)
        create_c1c2_table(self, session)

        # insert and get at CL.QUORUM (since RF=2, node1 won't have all key locally)
        insert_c1c2(session, n=1000, consistency=ConsistencyLevel.QUORUM)
        for n in range(0, 1000):
            query_c1c2(session, n, ConsistencyLevel.QUORUM)
开发者ID:vinaykumarchella,项目名称:cassandra-dtest,代码行数:15,代码来源:putget_test.py


示例12: test_movement

    def test_movement(self):
        cluster = self.cluster

        # Create an unbalanced ring
        cluster.populate(3, tokens=[0, 2**48, 2**62]).start()
        node1, node2, node3 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 1)
        create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})

        insert_c1c2(session, n=30000, consistency=ConsistencyLevel.ONE)

        cluster.flush()

        # Move nodes to balance the cluster
        def move_node(node, token):
            mark = node.mark_log()
            node.move(token)  # can't assume 0 is balanced with m3p
            node.watch_log_for('{} state jump to NORMAL'.format(node.address_for_current_version()), from_mark=mark, timeout=180)
            time.sleep(3)

        balancing_tokens = cluster.balanced_tokens(3)

        move_node(node1, balancing_tokens[0])
        move_node(node2, balancing_tokens[1])
        move_node(node3, balancing_tokens[2])

        time.sleep(1)
        cluster.cleanup()
        for node in cluster.nodelist():
            # after moving nodes we need to relocate any tokens in the wrong places, and after doing that
            # we might have overlapping tokens on the disks, so run a major compaction to get balance even
            if cluster.version() >= '3.2':
                node.nodetool("relocatesstables")
            node.nodetool("compact")

        # Check we can get all the keys
        for n in range(0, 30000):
            query_c1c2(session, n, ConsistencyLevel.ONE)

        # Now the load should be basically even
        sizes = [node.data_size() for node in [node1, node2, node3]]

        assert_almost_equal(sizes[0], sizes[1], error=0.05)
        assert_almost_equal(sizes[0], sizes[2], error=0.05)
        assert_almost_equal(sizes[1], sizes[2], error=0.05)
开发者ID:vinaykumarchella,项目名称:cassandra-dtest,代码行数:47,代码来源:topology_test.py


示例13: test_consistent_reads_after_bootstrap

    def test_consistent_reads_after_bootstrap(self):
        logger.debug("Creating a ring")
        cluster = self.cluster
        cluster.set_configuration_options(values={'hinted_handoff_enabled': False,
                                                  'write_request_timeout_in_ms': 60000,
                                                  'read_request_timeout_in_ms': 60000,
                                                  'dynamic_snitch_badness_threshold': 0.0})
        cluster.set_batch_commitlog(enabled=True)

        cluster.populate(2)
        node1, node2 = cluster.nodelist()
        cluster.start(wait_for_binary_proto=True, wait_other_notice=True)

        logger.debug("Set to talk to node 2")
        n2session = self.patient_cql_connection(node2)
        create_ks(n2session, 'ks', 2)
        create_c1c2_table(self, n2session)

        logger.debug("Generating some data for all nodes")
        insert_c1c2(n2session, keys=list(range(10, 20)), consistency=ConsistencyLevel.ALL)

        node1.flush()
        logger.debug("Taking down node1")
        node1.stop(wait_other_notice=True)

        logger.debug("Writing data to only node2")
        insert_c1c2(n2session, keys=list(range(30, 1000)), consistency=ConsistencyLevel.ONE)
        node2.flush()

        logger.debug("Restart node1")
        node1.start(wait_other_notice=True)

        logger.debug("Bootstraping node3")
        node3 = new_node(cluster)
        node3.start(wait_for_binary_proto=True)

        n3session = self.patient_cql_connection(node3)
        n3session.execute("USE ks")
        logger.debug("Checking that no data was lost")
        for n in range(10, 20):
            query_c1c2(n3session, n, ConsistencyLevel.ALL)

        for n in range(30, 1000):
            query_c1c2(n3session, n, ConsistencyLevel.ALL)
开发者ID:beobal,项目名称:cassandra-dtest,代码行数:44,代码来源:consistent_bootstrap_test.py


示例14: _test_streaming

    def _test_streaming(self, op_zerocopy, op_partial, num_partial, num_zerocopy,
                        compaction_strategy='LeveledCompactionStrategy', num_keys=1000, rf=3, num_nodes=3):
        keys = num_keys

        cluster = self.cluster
        tokens = cluster.balanced_tokens(num_nodes)
        cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
        cluster.set_configuration_options(values={'num_tokens': 1})

        cluster.populate(num_nodes)
        nodes = cluster.nodelist()

        for i in range(0, len(nodes)):
            nodes[i].set_configuration_options(values={'initial_token': tokens[i]})

        cluster.start(wait_for_binary_proto=True)

        session = self.patient_cql_connection(nodes[0])

        create_ks(session, name='ks2', rf=rf)

        create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'},
                  compaction_strategy=compaction_strategy)
        insert_c1c2(session, n=keys, consistency=ConsistencyLevel.ALL)

        session_n2 = self.patient_exclusive_cql_connection(nodes[1])
        session_n2.execute("TRUNCATE system.available_ranges;")

        mark = nodes[1].mark_log()
        nodes[1].nodetool('rebuild -ks ks2')

        nodes[1].watch_log_for('Completed submission of build tasks', filename='debug.log', timeout=120)
        zerocopy_streamed_sstable = len(
            nodes[1].grep_log('.*CassandraEntireSSTableStreamReader.*?Finished receiving Data.*', filename='debug.log',
                              from_mark=mark))
        partial_streamed_sstable = len(
            nodes[1].grep_log('.*CassandraStreamReader.*?Finished receiving file.*', filename='debug.log',
                              from_mark=mark))

        assert op_zerocopy(zerocopy_streamed_sstable, num_zerocopy), "%s %s %s" % (num_zerocopy, opmap.get(op_zerocopy),
                                                                                   zerocopy_streamed_sstable)
        assert op_partial(partial_streamed_sstable, num_partial), "%s %s %s" % (num_partial, op_partial,
                                                                                partial_streamed_sstable)
开发者ID:jasobrown,项目名称:cassandra-dtest,代码行数:43,代码来源:streaming_test.py


示例15: test_movement

    def test_movement(self):
        cluster = self.cluster

        # Create an unbalanced ring
        cluster.populate(3, tokens=[0, 2**48, 2**62]).start()
        node1, node2, node3 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 1)
        create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})

        insert_c1c2(session, n=30000, consistency=ConsistencyLevel.ONE)

        cluster.flush()

        # Move nodes to balance the cluster
        def move_node(node, token):
            mark = node.mark_log()
            node.move(token)  # can't assume 0 is balanced with m3p
            node.watch_log_for('{} state jump to NORMAL'.format(node.address_for_current_version()), from_mark=mark, timeout=180)
            time.sleep(3)

        balancing_tokens = cluster.balanced_tokens(3)

        move_node(node1, balancing_tokens[0])
        move_node(node2, balancing_tokens[1])
        move_node(node3, balancing_tokens[2])

        time.sleep(1)

        cluster.cleanup()

        # Check we can get all the keys
        for n in range(0, 30000):
            query_c1c2(session, n, ConsistencyLevel.ONE)

        # Now the load should be basically even
        sizes = [node.data_size() for node in [node1, node2, node3]]

        assert_almost_equal(sizes[0], sizes[1])
        assert_almost_equal(sizes[0], sizes[2])
        assert_almost_equal(sizes[1], sizes[2])
开发者ID:beobal,项目名称:cassandra-dtest,代码行数:42,代码来源:topology_test.py


示例16: consistent_reads_after_move_test

    def consistent_reads_after_move_test(self):
        debug("Creating a ring")
        cluster = self.cluster
        cluster.set_configuration_options(values={'hinted_handoff_enabled': False, 'write_request_timeout_in_ms': 60000,
                                                  'read_request_timeout_in_ms': 60000, 'dynamic_snitch_badness_threshold': 0.0})
        cluster.set_batch_commitlog(enabled=True)

        cluster.populate(3, tokens=[0, 2**48, 2**62]).start()
        node1, node2, node3 = cluster.nodelist()

        debug("Set to talk to node 2")
        n2session = self.patient_cql_connection(node2)
        create_ks(n2session, 'ks', 2)
        create_c1c2_table(self, n2session)

        debug("Generating some data for all nodes")
        insert_c1c2(n2session, keys=range(10, 20), consistency=ConsistencyLevel.ALL)

        node1.flush()
        debug("Taking down node1")
        node1.stop(wait_other_notice=True)

        debug("Writing data to node2")
        insert_c1c2(n2session, keys=range(30, 1000), consistency=ConsistencyLevel.ONE)
        node2.flush()

        debug("Restart node1")
        node1.start(wait_other_notice=True)

        debug("Move token on node3")
        node3.move(2)

        debug("Checking that no data was lost")
        for n in xrange(10, 20):
            query_c1c2(n2session, n, ConsistencyLevel.ALL)

        for n in xrange(30, 1000):
            query_c1c2(n2session, n, ConsistencyLevel.ALL)
开发者ID:iamaleksey,项目名称:cassandra-dtest,代码行数:38,代码来源:consistent_bootstrap_test.py


示例17: test_concurrent_decommission_not_allowed

    def test_concurrent_decommission_not_allowed(self):
        """
        Test concurrent decommission is not allowed
        """
        cluster = self.cluster
        cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
        cluster.populate(2).start(wait_other_notice=True)
        node1, node2 = cluster.nodelist()

        session = self.patient_cql_connection(node2)
        create_ks(session, 'ks', 1)
        create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
        insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ALL)

        mark = node2.mark_log()

        def decommission():
            node2.nodetool('decommission')

        # Launch first decommission in a external thread
        t = Thread(target=decommission)
        t.start()

        # Make sure first decommission is initialized before second decommission
        node2.watch_log_for('DECOMMISSIONING', filename='debug.log')

        # Launch a second decommission, should fail
        with pytest.raises(ToolError):
            node2.nodetool('decommission')

        # Check data is correctly forwarded to node1 after node2 is decommissioned
        t.join()
        node2.watch_log_for('DECOMMISSIONED', from_mark=mark)
        session = self.patient_cql_connection(node1)
        session.execute('USE ks')
        for n in range(0, 10000):
            query_c1c2(session, n, ConsistencyLevel.ONE)
开发者ID:vinaykumarchella,项目名称:cassandra-dtest,代码行数:37,代码来源:topology_test.py


示例18: resumable_rebuild_test

    def resumable_rebuild_test(self):
        """
        @jira_ticket CASSANDRA-10810

        Test rebuild operation is resumable
        """
        self.ignore_log_patterns = list(self.ignore_log_patterns) + [r'Error while rebuilding node',
                                                                     r'Streaming error occurred on session with peer 127.0.0.3',
                                                                     r'Remote peer 127.0.0.3 failed stream session']
        cluster = self.cluster
        cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})

        # Create 2 nodes on dc1
        node1 = cluster.create_node('node1', False,
                                    ('127.0.0.1', 9160),
                                    ('127.0.0.1', 7000),
                                    '7100', '2000', None,
                                    binary_interface=('127.0.0.1', 9042))
        node2 = cluster.create_node('node2', False,
                                    ('127.0.0.2', 9160),
                                    ('127.0.0.2', 7000),
                                    '7200', '2001', None,
                                    binary_interface=('127.0.0.2', 9042))

        cluster.add(node1, True, data_center='dc1')
        cluster.add(node2, True, data_center='dc1')

        node1.start(wait_for_binary_proto=True)
        node2.start(wait_for_binary_proto=True)

        # Insert data into node1 and node2
        session = self.patient_exclusive_cql_connection(node1)
        create_ks(session, 'ks', {'dc1': 1})
        create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
        insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ALL)
        key = list(range(10000, 20000))
        session = self.patient_exclusive_cql_connection(node2)
        session.execute('USE ks')
        insert_c1c2(session, keys=key, consistency=ConsistencyLevel.ALL)
        session.shutdown()

        # Create a new node3 on dc2
        node3 = cluster.create_node('node3', False,
                                    ('127.0.0.3', 9160),
                                    ('127.0.0.3', 7000),
                                    '7300', '2002', None,
                                    binary_interface=('127.0.0.3', 9042),
                                    byteman_port='8300')

        cluster.add(node3, False, data_center='dc2')

        node3.start(wait_other_notice=False, wait_for_binary_proto=True)

        # Wait for snitch to be refreshed
        time.sleep(5)

        # Alter necessary keyspace for rebuild operation
        session = self.patient_exclusive_cql_connection(node3)
        session.execute("ALTER KEYSPACE ks WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
        session.execute("ALTER KEYSPACE system_auth WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")

        # Path to byteman script which makes the streaming to node2 throw an exception, making rebuild fail
        script = ['./byteman/inject_failure_streaming_to_node2.btm']
        node3.byteman_submit(script)

        # First rebuild must fail and data must be incomplete
        with self.assertRaises(ToolError, msg='Unexpected: SUCCEED'):
            debug('Executing first rebuild -> '),
            node3.nodetool('rebuild dc1')
        debug('Expected: FAILED')

        session.execute('USE ks')
        with self.assertRaises(AssertionError, msg='Unexpected: COMPLETE'):
            debug('Checking data is complete -> '),
            for i in xrange(0, 20000):
                query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE)
        debug('Expected: INCOMPLETE')

        debug('Executing second rebuild -> '),
        node3.nodetool('rebuild dc1')
        debug('Expected: SUCCEED')

        # Check all streaming sessions completed, streamed ranges are skipped and verify streamed data
        node3.watch_log_for('All sessions completed')
        node3.watch_log_for('Skipping streaming those ranges.')
        debug('Checking data is complete -> '),
        for i in xrange(0, 20000):
            query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE)
        debug('Expected: COMPLETE')
开发者ID:pauloricardomg,项目名称:cassandra-dtest,代码行数:89,代码来源:rebuild_test.py


示例19: rebuild_ranges_test

    def rebuild_ranges_test(self):
        """
        @jira_ticket CASSANDRA-10406
        """
        keys = 1000

        cluster = self.cluster
        tokens = cluster.balanced_tokens_across_dcs(['dc1', 'dc2'])
        cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
        cluster.set_configuration_options(values={'num_tokens': 1})
        node1 = cluster.create_node('node1', False,
                                    ('127.0.0.1', 9160),
                                    ('127.0.0.1', 7000),
                                    '7100', '2000', tokens[0],
                                    binary_interface=('127.0.0.1', 9042))
        node1.set_configuration_options(values={'initial_token': tokens[0]})
        cluster.add(node1, True, data_center='dc1')
        node1 = cluster.nodelist()[0]

        # start node in dc1
        node1.start(wait_for_binary_proto=True)

        # populate data in dc1
        session = self.patient_exclusive_cql_connection(node1)
        # ks1 will be rebuilt in node2
        create_ks(session, 'ks1', {'dc1': 1})
        create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
        insert_c1c2(session, n=keys, consistency=ConsistencyLevel.ALL)
        # ks2 will not be rebuilt in node2
        create_ks(session, 'ks2', {'dc1': 1})
        create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
        insert_c1c2(session, n=keys, consistency=ConsistencyLevel.ALL)
        session.shutdown()

        # Bootstraping a new node in dc2 with auto_bootstrap: false
        node2 = cluster.create_node('node2', False,
                                    ('127.0.0.2', 9160),
                                    ('127.0.0.2', 7000),
                                    '7200', '2001', tokens[1],
                                    binary_interface=('127.0.0.2', 9042))
        node2.set_configuration_options(values={'initial_token': tokens[1]})
        cluster.add(node2, False, data_center='dc2')
        node2.start(wait_other_notice=True, wait_for_binary_proto=True)

        # wait for snitch to reload
        time.sleep(60)
        # alter keyspace to replicate to dc2
        session = self.patient_exclusive_cql_connection(node2)
        session.execute("ALTER KEYSPACE ks1 WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
        session.execute("ALTER KEYSPACE ks2 WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
        session.execute('USE ks1')

        # rebuild only ks1 with range that is node1's replica
        node2.nodetool('rebuild -ks ks1 -ts (%s,%s] dc1' % (tokens[1], str(pow(2, 63) - 1)))

        # check data is sent by stopping node1
        node1.stop()
        for i in xrange(0, keys):
            query_c1c2(session, i, ConsistencyLevel.ONE)
        # ks2 should not be streamed
        session.execute('USE ks2')
        for i in xrange(0, keys):
            query_c1c2(session, i, ConsistencyLevel.ONE, tolerate_missing=True, must_be_missing=True)
开发者ID:pauloricardomg,项目名称:cassandra-dtest,代码行数:63,代码来源:rebuild_test.py


示例20: multiple_repair_test

    def multiple_repair_test(self):
        """
        * Launch a three node cluster
        * Create a keyspace with RF 3 and a table
        * Insert 49 rows
        * Stop node3
        * Insert 50 more rows
        * Restart node3
        * Issue an incremental repair on node3
        * Stop node2
        * Insert a final50 rows
        * Restart node2
        * Issue an incremental repair on node2
        * Replace node3 with a new node
        * Verify data integrity
        # TODO: Several more verifications of data need to be interspersed throughout the test. The final assertion is insufficient.
        @jira_ticket CASSANDRA-10644
        """
        cluster = self.cluster
        cluster.populate(3).start()
        node1, node2, node3 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 3)
        create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'})

        debug("insert data")

        insert_c1c2(session, keys=range(1, 50), consistency=ConsistencyLevel.ALL)
        node1.flush()

        debug("bringing down node 3")
        node3.flush()
        node3.stop(gently=False)

        debug("inserting additional data into node 1 and 2")
        insert_c1c2(session, keys=range(50, 100), consistency=ConsistencyLevel.TWO)
        node1.flush()
        node2.flush()

        debug("restarting and repairing node 3")
        node3.start(wait_for_binary_proto=True)

        if cluster.version() >= "2.2":
            node3.repair()
        else:
            node3.nodetool("repair -par -inc")

        # wait stream handlers to be closed on windows
        # after session is finished (See CASSANDRA-10644)
        if is_win:
            time.sleep(2)

        debug("stopping node 2")
        node2.stop(gently=False)

        debug("inserting data in nodes 1 and 3")
        insert_c1c2(session, keys=range(100, 150), consistency=ConsistencyLevel.TWO)
        node1.flush()
        node3.flush()

        debug("start and repair node 2")
        node2.start(wait_for_binary_proto=True)

        if cluster.version() >= "2.2":
            node2.repair()
        else:
            node2.nodetool("repair -par -inc")

        debug("replace node and check data integrity")
        node3.stop(gently=False)
        node5 = Node('node5', cluster, True, ('127.0.0.5', 9160), ('127.0.0.5', 7000), '7500', '0', None, ('127.0.0.5', 9042))
        cluster.add(node5, False)
        node5.start(replace_address='127.0.0.3', wait_other_notice=True)

        assert_one(session, "SELECT COUNT(*) FROM ks.cf LIMIT 200", [149])
开发者ID:snazy,项目名称:cassandra-dtest,代码行数:76,代码来源:incremental_repair_test.py




鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python data.query_c1c2函数代码示例发布时间:2022-05-27
下一篇:
Python clog.info函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap