Document status - Public
Copyright 2022, Altinity Inc. All Rights Reserved. All information contained herein is, and remains the property of Altinity Inc. Any dissemination of this information or reproduction of this material is strictly forbidden unless prior written permission is obtained from Altinity Inc.
(c) 2022 Altinity Inc. All Rights Reserved.
Status: Approved for release by QA [vzakaznikov@altinity.com Wed 05 Oct 2022 08:05:01 AM EDT]
Reviewed by: azvonov@altinity.com
Date: 05 October 2022
Stage | Status |
---|---|
Integration | Fail |
Stateful | Pass |
Stateless | Fail |
TestFlows | Fail |
Results https://altinity-test-reports.s3.amazonaws.com/index.html#builds/stable/v22.3.12.20/2022-10-04T14-57-47.486/
Pipeline https://gitlab.com/altinity-qa/clickhouse/cicd/release/-/pipelines/659360983
Results
https://altinity-test-reports.s3.amazonaws.com/builds/stable/v22.3.12.20/2022-10-04T14-57-47.486/integration/integration_results_1.html
https://altinity-test-reports.s3.amazonaws.com/builds/stable/v22.3.12.20/2022-10-04T14-57-47.486/integration/integration_results_2.html
Test: /integration/test_storage_postgresql_replica/test.py::test_rename_table
Reason:
[gw4] linux -- Python 3.8.10 /usr/bin/python3
started_cluster = <helpers.cluster.ClickHouseCluster object at 0x7fe5e71e48e0>
def test_rename_table(started_cluster):
conn = get_postgres_conn(
ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True,
)
cursor = conn.cursor()
create_postgres_table(cursor, "postgresql_replica")
instance.query("DROP TABLE IF EXISTS test.postgresql_replica")
create_materialized_table(
ip=started_cluster.postgres_ip, port=started_cluster.postgres_port
)
instance.query(
"INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(25)"
)
result = instance.query("SELECT count() FROM test.postgresql_replica;")
while int(result) != 25:
> time.sleep(0.5)
E Failed: Timeout >900.0s
test_storage_postgresql_replica/test.py:606: Failed
Comment:
Status: ERROR
Test: /integration/test_materialized_mysql_database/test.py::test_mysql_settings[clickhouse_node1]
Reason:
____________________ test_mysql_settings[clickhouse_node1] _____________________
[gw0] linux -- Python 3.8.10 /usr/bin/python3
started_cluster = <helpers.cluster.ClickHouseCluster object at 0x7f11c9362fd0>
started_mysql_8_0 = <test_materialized_mysql_database.test.MySQLConnection object at 0x7f11c9277b50>
started_mysql_5_7 = <test_materialized_mysql_database.test.MySQLConnection object at 0x7f11c92d2610>
clickhouse_node = <helpers.cluster.ClickHouseInstance object at 0x7f11c9374040>
@pytest.mark.parametrize(
("clickhouse_node"), [node_disable_bytes_settings, node_disable_rows_settings]
)
def test_mysql_settings(
started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node
):
> materialize_with_ddl.mysql_settings_test(
clickhouse_node, started_mysql_5_7, "mysql57"
)
test_materialized_mysql_database/test.py:448:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
clickhouse_node = <helpers.cluster.ClickHouseInstance object at 0x7f11c9374040>
mysql_node = <test_materialized_mysql_database.test.MySQLConnection object at 0x7f11c92d2610>
service_name = 'mysql57'
def mysql_settings_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database")
mysql_node.query("CREATE DATABASE test_database")
mysql_node.query(
"CREATE TABLE test_database.a (id INT(11) NOT NULL PRIMARY KEY, value VARCHAR(255))"
)
mysql_node.query("INSERT INTO test_database.a VALUES(1, 'foo')")
mysql_node.query("INSERT INTO test_database.a VALUES(2, 'bar')")
clickhouse_node.query(
"CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(
service_name
)
)
check_query(
clickhouse_node, "SELECT COUNT() FROM test_database.a FORMAT TSV", "2\n"
)
> assert (
clickhouse_node.query(
"SELECT COUNT(DISTINCT blockNumber()) FROM test_database.a FORMAT TSV"
)
== "2\n"
)
E AssertionError
test_materialized_mysql_database/materialize_with_ddl.py:1795: AssertionError
Comment: Misconfiguration: only 1 CPU/thread is available to CH, while Kafka consumer requires/configure to use more (DB::Exception: Number of consumers can not be bigger than 1).
Status: FAIL
Test: /integration/test_materialized_mysql_database/test.py::test_mysql_settings[clickhouse_node0]
Reason:
____________________ test_mysql_settings[clickhouse_node0] _____________________
[gw0] linux -- Python 3.8.10 /usr/bin/python3
started_cluster = <helpers.cluster.ClickHouseCluster object at 0x7f11c9362fd0>
started_mysql_8_0 = <test_materialized_mysql_database.test.MySQLConnection object at 0x7f11c9277b50>
started_mysql_5_7 = <test_materialized_mysql_database.test.MySQLConnection object at 0x7f11c92d2610>
clickhouse_node = <helpers.cluster.ClickHouseInstance object at 0x7f11c9362910>
@pytest.mark.parametrize(
("clickhouse_node"), [node_disable_bytes_settings, node_disable_rows_settings]
)
def test_mysql_settings(
started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node
):
> materialize_with_ddl.mysql_settings_test(
clickhouse_node, started_mysql_5_7, "mysql57"
)
test_materialized_mysql_database/test.py:448:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
clickhouse_node = <helpers.cluster.ClickHouseInstance object at 0x7f11c9362910>
mysql_node = <test_materialized_mysql_database.test.MySQLConnection object at 0x7f11c92d2610>
service_name = 'mysql57'
def mysql_settings_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database")
mysql_node.query("CREATE DATABASE test_database")
mysql_node.query(
"CREATE TABLE test_database.a (id INT(11) NOT NULL PRIMARY KEY, value VARCHAR(255))"
)
mysql_node.query("INSERT INTO test_database.a VALUES(1, 'foo')")
mysql_node.query("INSERT INTO test_database.a VALUES(2, 'bar')")
clickhouse_node.query(
"CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(
service_name
)
)
check_query(
clickhouse_node, "SELECT COUNT() FROM test_database.a FORMAT TSV", "2\n"
)
> assert (
clickhouse_node.query(
"SELECT COUNT(DISTINCT blockNumber()) FROM test_database.a FORMAT TSV"
)
== "2\n"
)
E AssertionError
test_materialized_mysql_database/materialize_with_ddl.py:1795: AssertionError
Comment: Misconfiguration: only 1 CPU/thread is available to CH, while Kafka consumer requires/configure to use more (DB::Exception: Number of consumers can not be bigger than 1).
Status: FAIL
Test: /integration/test_storage_kafka/test.py::test_kafka_csv_with_thread_per_consumer
Reason:
[gw1] linux -- Python 3.8.10 /usr/bin/python3
kafka_cluster = <helpers.cluster.ClickHouseCluster object at 0x7fcdc2a77670>
def test_kafka_csv_with_thread_per_consumer(kafka_cluster):
> instance.query(
"""
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv_with_thread_per_consumer',
kafka_group_name = 'csv_with_thread_per_consumer',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n',
kafka_num_consumers = 4,
kafka_commit_on_select = 1,
kafka_thread_per_consumer = 1;
"""
)
test_storage_kafka/test.py:3304:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
helpers/cluster.py:2802: in query
return self.client.query(
helpers/client.py:31: in query
return self.get_query_request(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <helpers.client.CommandRequest object at 0x7fcd94fe8430>
def get_answer(self):
self.process.wait()
self.stdout_file.seek(0)
self.stderr_file.seek(0)
stdout = self.stdout_file.read().decode("utf-8", errors="replace")
stderr = self.stderr_file.read().decode("utf-8", errors="replace")
if (
self.timer is not None
and not self.process_finished_before_timeout
and not self.ignore_error
):
logging.debug(f"Timed out. Last stdout:{stdout}, stderr:{stderr}")
raise QueryTimeoutExceedException("Client timed out!")
if (self.process.returncode != 0 or stderr) and not self.ignore_error:
> raise QueryRuntimeException(
"Client failed! Return code: {}, stderr: {}".format(
self.process.returncode, stderr
),
self.process.returncode,
stderr,
)
E helpers.client.QueryRuntimeException: Client failed! Return code: 36, stderr: Received exception from server (version 22.3.12):
E Code: 36. DB::Exception: Received from 172.16.5.8:9000. DB::Exception: Number of consumers can not be bigger than 1. Stack trace:
E
E 0. DB::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int, bool) @ 0xb3765ba in /usr/bin/clickhouse
E 1. DB::Exception::Exception<unsigned int&>(int, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, unsigned int&) @ 0x15302d71 in /usr/bin/clickhouse
E 2. ? @ 0x15302129 in /usr/bin/clickhouse
E 3. DB::StorageFactory::get(DB::ASTCreateQuery const&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, std::__1::shared_ptr<DB::Context>, std::__1::shared_ptr<DB::Context>, DB::ColumnsDescription const&, DB::ConstraintsDescription const&, bool) const @ 0x15f5ac3a in /usr/bin/clickhouse
E 4. DB::InterpreterCreateQuery::doCreateTable(DB::ASTCreateQuery&, DB::InterpreterCreateQuery::TableProperties const&) @ 0x159febd9 in /usr/bin/clickhouse
E 5. DB::InterpreterCreateQuery::createTable(DB::ASTCreateQuery&) @ 0x159f98ff in /usr/bin/clickhouse
E 6. DB::InterpreterCreateQuery::execute() @ 0x15a0212b in /usr/bin/clickhouse
E 7. ? @ 0x15d3940f in /usr/bin/clickhouse
E 8. DB::executeQuery(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, std::__1::shared_ptr<DB::Context>, bool, DB::QueryProcessingStage::Enum) @ 0x15d36eb5 in /usr/bin/clickhouse
E 9. DB::TCPHandler::runImpl() @ 0x168bfa50 in /usr/bin/clickhouse
E 10. DB::TCPHandler::run() @ 0x168cf5f9 in /usr/bin/clickhouse
E 11. Poco::Net::TCPServerConnection::start() @ 0x19b90b6f in /usr/bin/clickhouse
E 12. ? @ 0x19b92fc1 in /usr/bin/clickhouse
E 13. Poco::PooledThread::run() @ 0x19d4ff89 in /usr/bin/clickhouse
E 14. Poco::ThreadImpl::runnableEntry(void*) @ 0x19d4d2e0 in /usr/bin/clickhouse
E 15. ? @ 0x7f0b605e1609 in ?
E 16. clone @ 0x7f0b60506133 in ?
E . (BAD_ARGUMENTS)
E (query: CREATE TABLE test.kafka (key UInt64, value UInt64)
E ENGINE = Kafka
E SETTINGS kafka_broker_list = 'kafka1:19092',
E kafka_topic_list = 'csv_with_thread_per_consumer',
E kafka_group_name = 'csv_with_thread_per_consumer',
E kafka_format = 'CSV',
E kafka_row_delimiter = '\n',
E kafka_num_consumers = 4,
E kafka_commit_on_select = 1,
E kafka_thread_per_consumer = 1;)
helpers/client.py:187: QueryRuntimeException
Comment: Misconfiguration: only 1 CPU/thread is available to CH, while Kafka consumer requires/configure to use more (DB::Exception: Number of consumers can not be bigger than 1).
Status: FAIL
Test: /integration/test_storage_kafka/test.py::test_kafka_consumer_hang
Reason:
[gw1] linux -- Python 3.8.10 /usr/bin/python3
kafka_cluster = <helpers.cluster.ClickHouseCluster object at 0x7fcdc2a77670>
def test_kafka_consumer_hang(kafka_cluster):
admin_client = KafkaAdminClient(
bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)
)
topic_name = "consumer_hang"
kafka_create_topic(admin_client, topic_name, num_partitions=8)
> instance.query(
f"""
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 8;
CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = Memory();
CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka;
"""
)
test_storage_kafka/test.py:1016:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
helpers/cluster.py:2802: in query
return self.client.query(
helpers/client.py:31: in query
return self.get_query_request(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <helpers.client.CommandRequest object at 0x7fcd95150d90>
def get_answer(self):
self.process.wait()
self.stdout_file.seek(0)
self.stderr_file.seek(0)
stdout = self.stdout_file.read().decode("utf-8", errors="replace")
stderr = self.stderr_file.read().decode("utf-8", errors="replace")
if (
self.timer is not None
and not self.process_finished_before_timeout
and not self.ignore_error
):
logging.debug(f"Timed out. Last stdout:{stdout}, stderr:{stderr}")
raise QueryTimeoutExceedException("Client timed out!")
if (self.process.returncode != 0 or stderr) and not self.ignore_error:
> raise QueryRuntimeException(
"Client failed! Return code: {}, stderr: {}".format(
self.process.returncode, stderr
),
self.process.returncode,
stderr,
)
E helpers.client.QueryRuntimeException: Client failed! Return code: 36, stderr: Received exception from server (version 22.3.12):
E Code: 36. DB::Exception: Received from 172.16.5.8:9000. DB::Exception: Number of consumers can not be bigger than 1. Stack trace:
E
E 0. DB::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int, bool) @ 0xb3765ba in /usr/bin/clickhouse
E 1. DB::Exception::Exception<unsigned int&>(int, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, unsigned int&) @ 0x15302d71 in /usr/bin/clickhouse
E 2. ? @ 0x15302129 in /usr/bin/clickhouse
E 3. DB::StorageFactory::get(DB::ASTCreateQuery const&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, std::__1::shared_ptr<DB::Context>, std::__1::shared_ptr<DB::Context>, DB::ColumnsDescription const&, DB::ConstraintsDescription const&, bool) const @ 0x15f5ac3a in /usr/bin/clickhouse
E 4. DB::InterpreterCreateQuery::doCreateTable(DB::ASTCreateQuery&, DB::InterpreterCreateQuery::TableProperties const&) @ 0x159febd9 in /usr/bin/clickhouse
E 5. DB::InterpreterCreateQuery::createTable(DB::ASTCreateQuery&) @ 0x159f98ff in /usr/bin/clickhouse
E 6. DB::InterpreterCreateQuery::execute() @ 0x15a0212b in /usr/bin/clickhouse
E 7. ? @ 0x15d3940f in /usr/bin/clickhouse
E 8. DB::executeQuery(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, std::__1::shared_ptr<DB::Context>, bool, DB::QueryProcessingStage::Enum) @ 0x15d36eb5 in /usr/bin/clickhouse
E 9. DB::TCPHandler::runImpl() @ 0x168bfa50 in /usr/bin/clickhouse
E 10. DB::TCPHandler::run() @ 0x168cf5f9 in /usr/bin/clickhouse
E 11. Poco::Net::TCPServerConnection::start() @ 0x19b90b6f in /usr/bin/clickhouse
E 12. ? @ 0x19b92fc1 in /usr/bin/clickhouse
E 13. Poco::PooledThread::run() @ 0x19d4ff89 in /usr/bin/clickhouse
E 14. Poco::ThreadImpl::runnableEntry(void*) @ 0x19d4d2e0 in /usr/bin/clickhouse
E 15. ? @ 0x7f0b605e1609 in ?
E 16. clone @ 0x7f0b60506133 in ?
E . (BAD_ARGUMENTS)
E (query: CREATE TABLE test.kafka (key UInt64, value UInt64)
E ENGINE = Kafka
E SETTINGS kafka_broker_list = 'kafka1:19092',
E kafka_topic_list = 'consumer_hang',
E kafka_group_name = 'consumer_hang',
E kafka_format = 'JSONEachRow',
E kafka_num_consumers = 8;)
helpers/client.py:187: QueryRuntimeException
Comment: Misconfiguration: only 1 CPU/thread is available to CH, while Kafka consumer requires/configure to use more (DB::Exception: Number of consumers can not be bigger than 1).
Status: FAIL
Test: /integration/test_storage_kafka/test.py::test_kafka_virtual_columns2
Reason:
[gw1] linux -- Python 3.8.10 /usr/bin/python3
kafka_cluster = <helpers.cluster.ClickHouseCluster object at 0x7fcdc2a77670>
def test_kafka_virtual_columns2(kafka_cluster):
admin_client = KafkaAdminClient(
bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)
)
topic_config = {
# default retention, since predefined timestamp_ms is used.
"retention.ms": "-1",
}
kafka_create_topic(admin_client, "virt2_0", num_partitions=2, config=topic_config)
kafka_create_topic(admin_client, "virt2_1", num_partitions=2, config=topic_config)
> instance.query(
"""
CREATE TABLE test.kafka (value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2_0,virt2_1',
kafka_group_name = 'virt2',
kafka_num_consumers = 2,
kafka_format = 'JSONEachRow';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp), toUnixTimestamp64Milli(_timestamp_ms), _headers.name, _headers.value FROM test.kafka;
"""
)
test_storage_kafka/test.py:2150:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
helpers/cluster.py:2802: in query
return self.client.query(
helpers/client.py:31: in query
return self.get_query_request(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <helpers.client.CommandRequest object at 0x7fcdc09c10a0>
def get_answer(self):
self.process.wait()
self.stdout_file.seek(0)
self.stderr_file.seek(0)
stdout = self.stdout_file.read().decode("utf-8", errors="replace")
stderr = self.stderr_file.read().decode("utf-8", errors="replace")
if (
self.timer is not None
and not self.process_finished_before_timeout
and not self.ignore_error
):
logging.debug(f"Timed out. Last stdout:{stdout}, stderr:{stderr}")
raise QueryTimeoutExceedException("Client timed out!")
if (self.process.returncode != 0 or stderr) and not self.ignore_error:
> raise QueryRuntimeException(
"Client failed! Return code: {}, stderr: {}".format(
self.process.returncode, stderr
),
self.process.returncode,
stderr,
)
E helpers.client.QueryRuntimeException: Client failed! Return code: 36, stderr: Received exception from server (version 22.3.12):
E Code: 36. DB::Exception: Received from 172.16.5.8:9000. DB::Exception: Number of consumers can not be bigger than 1. Stack trace:
E
E 0. DB::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int, bool) @ 0xb3765ba in /usr/bin/clickhouse
E 1. DB::Exception::Exception<unsigned int&>(int, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, unsigned int&) @ 0x15302d71 in /usr/bin/clickhouse
E 2. ? @ 0x15302129 in /usr/bin/clickhouse
E 3. DB::StorageFactory::get(DB::ASTCreateQuery const&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, std::__1::shared_ptr<DB::Context>, std::__1::shared_ptr<DB::Context>, DB::ColumnsDescription const&, DB::ConstraintsDescription const&, bool) const @ 0x15f5ac3a in /usr/bin/clickhouse
E 4. DB::InterpreterCreateQuery::doCreateTable(DB::ASTCreateQuery&, DB::InterpreterCreateQuery::TableProperties const&) @ 0x159febd9 in /usr/bin/clickhouse
E 5. DB::InterpreterCreateQuery::createTable(DB::ASTCreateQuery&) @ 0x159f98ff in /usr/bin/clickhouse
E 6. DB::InterpreterCreateQuery::execute() @ 0x15a0212b in /usr/bin/clickhouse
E 7. ? @ 0x15d3940f in /usr/bin/clickhouse
E 8. DB::executeQuery(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, std::__1::shared_ptr<DB::Context>, bool, DB::QueryProcessingStage::Enum) @ 0x15d36eb5 in /usr/bin/clickhouse
E 9. DB::TCPHandler::runImpl() @ 0x168bfa50 in /usr/bin/clickhouse
E 10. DB::TCPHandler::run() @ 0x168cf5f9 in /usr/bin/clickhouse
E 11. Poco::Net::TCPServerConnection::start() @ 0x19b90b6f in /usr/bin/clickhouse
E 12. ? @ 0x19b92fc1 in /usr/bin/clickhouse
E 13. Poco::PooledThread::run() @ 0x19d4ff89 in /usr/bin/clickhouse
E 14. Poco::ThreadImpl::runnableEntry(void*) @ 0x19d4d2e0 in /usr/bin/clickhouse
E 15. ? @ 0x7f0b605e1609 in ?
E 16. clone @ 0x7f0b60506133 in ?
E . (BAD_ARGUMENTS)
E (query: CREATE TABLE test.kafka (value UInt64)
E ENGINE = Kafka
E SETTINGS kafka_broker_list = 'kafka1:19092',
E kafka_topic_list = 'virt2_0,virt2_1',
E kafka_group_name = 'virt2',
E kafka_num_consumers = 2,
E kafka_format = 'JSONEachRow';)
helpers/client.py:187: QueryRuntimeException
Comment: Misconfiguration: only 1 CPU/thread is available to CH, while Kafka consumer requires/configure to use more (DB::Exception: Number of consumers can not be bigger than 1).
Status: FAIL
Test: /integration/test_storage_kafka/test.py::test_kafka_read_consumers_in_parallel
Reason:
[gw1] linux -- Python 3.8.10 /usr/bin/python3
kafka_cluster = <helpers.cluster.ClickHouseCluster object at 0x7fcdc2a77670>
def test_kafka_read_consumers_in_parallel(kafka_cluster):
admin_client = KafkaAdminClient(
bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)
)
topic_name = "read_consumers_in_parallel"
kafka_create_topic(admin_client, topic_name, num_partitions=8)
cancel = threading.Event()
def produce():
while not cancel.is_set():
messages = []
for _ in range(100):
messages.append(json.dumps({"key": 0, "value": 0}))
kafka_produce(kafka_cluster, "read_consumers_in_parallel", messages)
time.sleep(1)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
# when we have more than 1 consumer in a single table,
# and kafka_thread_per_consumer=0
# all the consumers should be read in parallel, not in sequence.
# then reading in parallel 8 consumers with 1 seconds kafka_poll_timeout_ms and less than 1 sec limit
# we should have exactly 1 poll per consumer (i.e. 8 polls) every 1 seconds (from different threads)
# in case parallel consuming is not working we will have only 1 poll every 1 seconds (from the same thread).
> instance.query(
f"""
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 8,
kafka_thread_per_consumer = 0,
kafka_poll_timeout_ms = 1000,
kafka_flush_interval_ms = 999;
CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = Memory();
CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka;
"""
)
test_storage_kafka/test.py:1179:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
helpers/cluster.py:2802: in query
return self.client.query(
helpers/client.py:31: in query
return self.get_query_request(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <helpers.client.CommandRequest object at 0x7fcdc0837b50>
def get_answer(self):
self.process.wait()
self.stdout_file.seek(0)
self.stderr_file.seek(0)
stdout = self.stdout_file.read().decode("utf-8", errors="replace")
stderr = self.stderr_file.read().decode("utf-8", errors="replace")
if (
self.timer is not None
and not self.process_finished_before_timeout
and not self.ignore_error
):
logging.debug(f"Timed out. Last stdout:{stdout}, stderr:{stderr}")
raise QueryTimeoutExceedException("Client timed out!")
if (self.process.returncode != 0 or stderr) and not self.ignore_error:
> raise QueryRuntimeException(
"Client failed! Return code: {}, stderr: {}".format(
self.process.returncode, stderr
),
self.process.returncode,
stderr,
)
E helpers.client.QueryRuntimeException: Client failed! Return code: 36, stderr: Received exception from server (version 22.3.12):
E Code: 36. DB::Exception: Received from 172.16.5.8:9000. DB::Exception: Number of consumers can not be bigger than 1. Stack trace:
E
E 0. DB::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int, bool) @ 0xb3765ba in /usr/bin/clickhouse
E 1. DB::Exception::Exception<unsigned int&>(int, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, unsigned int&) @ 0x15302d71 in /usr/bin/clickhouse
E 2. ? @ 0x15302129 in /usr/bin/clickhouse
E 3. DB::StorageFactory::get(DB::ASTCreateQuery const&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, std::__1::shared_ptr<DB::Context>, std::__1::shared_ptr<DB::Context>, DB::ColumnsDescription const&, DB::ConstraintsDescription const&, bool) const @ 0x15f5ac3a in /usr/bin/clickhouse
E 4. DB::InterpreterCreateQuery::doCreateTable(DB::ASTCreateQuery&, DB::InterpreterCreateQuery::TableProperties const&) @ 0x159febd9 in /usr/bin/clickhouse
E 5. DB::InterpreterCreateQuery::createTable(DB::ASTCreateQuery&) @ 0x159f98ff in /usr/bin/clickhouse
E 6. DB::InterpreterCreateQuery::execute() @ 0x15a0212b in /usr/bin/clickhouse
E 7. ? @ 0x15d3940f in /usr/bin/clickhouse
E 8. DB::executeQuery(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, std::__1::shared_ptr<DB::Context>, bool, DB::QueryProcessingStage::Enum) @ 0x15d36eb5 in /usr/bin/clickhouse
E 9. DB::TCPHandler::runImpl() @ 0x168bfa50 in /usr/bin/clickhouse
E 10. DB::TCPHandler::run() @ 0x168cf5f9 in /usr/bin/clickhouse
E 11. Poco::Net::TCPServerConnection::start() @ 0x19b90b6f in /usr/bin/clickhouse
E 12. ? @ 0x19b92fc1 in /usr/bin/clickhouse
E 13. Poco::PooledThread::run() @ 0x19d4ff89 in /usr/bin/clickhouse
E 14. Poco::ThreadImpl::runnableEntry(void*) @ 0x19d4d2e0 in /usr/bin/clickhouse
E 15. ? @ 0x7f0b605e1609 in ?
E 16. clone @ 0x7f0b60506133 in ?
E . (BAD_ARGUMENTS)
E (query: CREATE TABLE test.kafka (key UInt64, value UInt64)
E ENGINE = Kafka
E SETTINGS kafka_broker_list = 'kafka1:19092',
E kafka_topic_list = 'read_consumers_in_parallel',
E kafka_group_name = 'read_consumers_in_parallel',
E kafka_format = 'JSONEachRow',
E kafka_num_consumers = 8,
E kafka_thread_per_consumer = 0,
E kafka_poll_timeout_ms = 1000,
E kafka_flush_interval_ms = 999;)
helpers/client.py:187: QueryRuntimeException
Comment: Misconfiguration: only 1 CPU/thread is available to CH, while Kafka consumer requires/configure to use more (DB::Exception: Number of consumers can not be bigger than 1).
Status: FAIL
Test: /integration/test_storage_kafka/test.py::test_kafka_recreate_kafka_table
Reason:
[gw1] linux -- Python 3.8.10 /usr/bin/python3
kafka_cluster = <helpers.cluster.ClickHouseCluster object at 0x7fcdc2a77670>
def test_kafka_recreate_kafka_table(kafka_cluster):
"""
Checks that materialized view work properly after dropping and recreating the Kafka table.
"""
# line for backporting:
# admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
admin_client = KafkaAdminClient(
bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)
)
topic_name = "recreate_kafka_table"
kafka_create_topic(admin_client, topic_name, num_partitions=6)
> instance.query(
"""
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'recreate_kafka_table',
kafka_group_name = 'recreate_kafka_table_group',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 6,
kafka_flush_interval_ms = 1000,
kafka_skip_broken_messages = 1048577;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
"""
)
test_storage_kafka/test.py:1556:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
helpers/cluster.py:2802: in query
return self.client.query(
helpers/client.py:31: in query
return self.get_query_request(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <helpers.client.CommandRequest object at 0x7fcdc0017a60>
def get_answer(self):
self.process.wait()
self.stdout_file.seek(0)
self.stderr_file.seek(0)
stdout = self.stdout_file.read().decode("utf-8", errors="replace")
stderr = self.stderr_file.read().decode("utf-8", errors="replace")
if (
self.timer is not None
and not self.process_finished_before_timeout
and not self.ignore_error
):
logging.debug(f"Timed out. Last stdout:{stdout}, stderr:{stderr}")
raise QueryTimeoutExceedException("Client timed out!")
if (self.process.returncode != 0 or stderr) and not self.ignore_error:
> raise QueryRuntimeException(
"Client failed! Return code: {}, stderr: {}".format(
self.process.returncode, stderr
),
self.process.returncode,
stderr,
)
E helpers.client.QueryRuntimeException: Client failed! Return code: 36, stderr: Received exception from server (version 22.3.12):
E Code: 36. DB::Exception: Received from 172.16.5.8:9000. DB::Exception: Number of consumers can not be bigger than 1. Stack trace:
E
E 0. DB::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int, bool) @ 0xb3765ba in /usr/bin/clickhouse
E 1. DB::Exception::Exception<unsigned int&>(int, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, unsigned int&) @ 0x15302d71 in /usr/bin/clickhouse
E 2. ? @ 0x15302129 in /usr/bin/clickhouse
E 3. DB::StorageFactory::get(DB::ASTCreateQuery const&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, std::__1::shared_ptr<DB::Context>, std::__1::shared_ptr<DB::Context>, DB::ColumnsDescription const&, DB::ConstraintsDescription const&, bool) const @ 0x15f5ac3a in /usr/bin/clickhouse
E 4. DB::InterpreterCreateQuery::doCreateTable(DB::ASTCreateQuery&, DB::InterpreterCreateQuery::TableProperties const&) @ 0x159febd9 in /usr/bin/clickhouse
E 5. DB::InterpreterCreateQuery::createTable(DB::ASTCreateQuery&) @ 0x159f98ff in /usr/bin/clickhouse
E 6. DB::InterpreterCreateQuery::execute() @ 0x15a0212b in /usr/bin/clickhouse
E 7. ? @ 0x15d3940f in /usr/bin/clickhouse
E 8. DB::executeQuery(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, std::__1::shared_ptr<DB::Context>, bool, DB::QueryProcessingStage::Enum) @ 0x15d36eb5 in /usr/bin/clickhouse
E 9. DB::TCPHandler::runImpl() @ 0x168bfa50 in /usr/bin/clickhouse
E 10. DB::TCPHandler::run() @ 0x168cf5f9 in /usr/bin/clickhouse
E 11. Poco::Net::TCPServerConnection::start() @ 0x19b90b6f in /usr/bin/clickhouse
E 12. ? @ 0x19b92fc1 in /usr/bin/clickhouse
E 13. Poco::PooledThread::run() @ 0x19d4ff89 in /usr/bin/clickhouse
E 14. Poco::ThreadImpl::runnableEntry(void*) @ 0x19d4d2e0 in /usr/bin/clickhouse
E 15. ? @ 0x7f0b605e1609 in ?
E 16. clone @ 0x7f0b60506133 in ?
E . (BAD_ARGUMENTS)
E (query: CREATE TABLE test.kafka (key UInt64, value UInt64)
E ENGINE = Kafka
E SETTINGS kafka_broker_list = 'kafka1:19092',
E kafka_topic_list = 'recreate_kafka_table',
E kafka_group_name = 'recreate_kafka_table_group',
E kafka_format = 'JSONEachRow',
E kafka_num_consumers = 6,
E kafka_flush_interval_ms = 1000,
E kafka_skip_broken_messages = 1048577;)
helpers/client.py:187: QueryRuntimeException
Comment: Misconfiguration: only 1 CPU/thread is available to CH, while Kafka consumer requires/configure to use more (DB::Exception: Number of consumers can not be bigger than 1).
Status: FAIL
Test: /integration/test_storage_kafka/test.py::test_issue26643
Reason:
[gw1] linux -- Python 3.8.10 /usr/bin/python3
kafka_cluster = <helpers.cluster.ClickHouseCluster object at 0x7fcdc2a77670>
def test_issue26643(kafka_cluster):
# for backporting:
# admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
admin_client = KafkaAdminClient(
bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)
)
producer = KafkaProducer(
bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port),
value_serializer=producer_serializer,
)
topic_list = []
topic_list.append(
NewTopic(name="test_issue26643", num_partitions=4, replication_factor=1)
)
admin_client.create_topics(new_topics=topic_list, validate_only=False)
msg = message_with_repeated_pb2.Message(
tnow=1629000000,
server="server1",
clien="host1",
sPort=443,
cPort=50000,
r=[
message_with_repeated_pb2.dd(
name="1", type=444, ttl=123123, data=b"adsfasd"
),
message_with_repeated_pb2.dd(name="2"),
],
method="GET",
)
data = b""
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
msg = message_with_repeated_pb2.Message(tnow=1629000002)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer.send(topic="test_issue26643", value=data)
data = _VarintBytes(len(serialized_msg)) + serialized_msg
producer.send(topic="test_issue26643", value=data)
producer.flush()
> instance.query(
"""
CREATE TABLE IF NOT EXISTS test.test_queue
(
`tnow` UInt32,
`server` String,
`client` String,
`sPort` UInt16,
`cPort` UInt16,
`r.name` Array(String),
`r.class` Array(UInt16),
`r.type` Array(UInt16),
`r.ttl` Array(UInt32),
`r.data` Array(String),
`method` String
)
ENGINE = Kafka
SETTINGS
kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_issue26643',
kafka_group_name = 'test_issue26643_group',
kafka_format = 'Protobuf',
kafka_schema = 'message_with_repeated.proto:Message',
kafka_num_consumers = 4,
kafka_skip_broken_messages = 10000;
SET allow_suspicious_low_cardinality_types=1;
CREATE TABLE test.log
(
`tnow` DateTime('Asia/Istanbul') CODEC(DoubleDelta, LZ4),
`server` LowCardinality(String),
`client` LowCardinality(String),
`sPort` LowCardinality(UInt16),
`cPort` UInt16 CODEC(T64, LZ4),
`r.name` Array(String),
`r.class` Array(LowCardinality(UInt16)),
`r.type` Array(LowCardinality(UInt16)),
`r.ttl` Array(LowCardinality(UInt32)),
`r.data` Array(String),
`method` LowCardinality(String)
)
ENGINE = MergeTree
PARTITION BY toYYYYMMDD(tnow)
ORDER BY (tnow, server)
TTL toDate(tnow) + toIntervalMonth(1000)
SETTINGS index_granularity = 16384, merge_with_ttl_timeout = 7200;
CREATE MATERIALIZED VIEW test.test_consumer TO test.log AS
SELECT
toDateTime(a.tnow) AS tnow,
a.server AS server,
a.client AS client,
a.sPort AS sPort,
a.cPort AS cPort,
a.`r.name` AS `r.name`,
a.`r.class` AS `r.class`,
a.`r.type` AS `r.type`,
a.`r.ttl` AS `r.ttl`,
a.`r.data` AS `r.data`,
a.method AS method
FROM test.test_queue AS a;
"""
)
test_storage_kafka/test.py:4040:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
helpers/cluster.py:2802: in query
return self.client.query(
helpers/client.py:31: in query
return self.get_query_request(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <helpers.client.CommandRequest object at 0x7fcda3692f40>
def get_answer(self):
self.process.wait()
self.stdout_file.seek(0)
self.stderr_file.seek(0)
stdout = self.stdout_file.read().decode("utf-8", errors="replace")
stderr = self.stderr_file.read().decode("utf-8", errors="replace")
if (
self.timer is not None
and not self.process_finished_before_timeout
and not self.ignore_error
):
logging.debug(f"Timed out. Last stdout:{stdout}, stderr:{stderr}")
raise QueryTimeoutExceedException("Client timed out!")
if (self.process.returncode != 0 or stderr) and not self.ignore_error:
> raise QueryRuntimeException(
"Client failed! Return code: {}, stderr: {}".format(
self.process.returncode, stderr
),
self.process.returncode,
stderr,
)
E helpers.client.QueryRuntimeException: Client failed! Return code: 36, stderr: Received exception from server (version 22.3.12):
E Code: 36. DB::Exception: Received from 172.16.5.8:9000. DB::Exception: Number of consumers can not be bigger than 1. Stack trace:
E
E 0. DB::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int, bool) @ 0xb3765ba in /usr/bin/clickhouse
E 1. DB::Exception::Exception<unsigned int&>(int, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, unsigned int&) @ 0x15302d71 in /usr/bin/clickhouse
E 2. ? @ 0x15302129 in /usr/bin/clickhouse
E 3. DB::StorageFactory::get(DB::ASTCreateQuery const&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, std::__1::shared_ptr<DB::Context>, std::__1::shared_ptr<DB::Context>, DB::ColumnsDescription const&, DB::ConstraintsDescription const&, bool) const @ 0x15f5ac3a in /usr/bin/clickhouse
E 4. DB::InterpreterCreateQuery::doCreateTable(DB::ASTCreateQuery&, DB::InterpreterCreateQuery::TableProperties const&) @ 0x159febd9 in /usr/bin/clickhouse
E 5. DB::InterpreterCreateQuery::createTable(DB::ASTCreateQuery&) @ 0x159f98ff in /usr/bin/clickhouse
E 6. DB::InterpreterCreateQuery::execute() @ 0x15a0212b in /usr/bin/clickhouse
E 7. ? @ 0x15d3940f in /usr/bin/clickhouse
E 8. DB::executeQuery(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, std::__1::shared_ptr<DB::Context>, bool, DB::QueryProcessingStage::Enum) @ 0x15d36eb5 in /usr/bin/clickhouse
E 9. DB::TCPHandler::runImpl() @ 0x168bfa50 in /usr/bin/clickhouse
E 10. DB::TCPHandler::run() @ 0x168cf5f9 in /usr/bin/clickhouse
E 11. Poco::Net::TCPServerConnection::start() @ 0x19b90b6f in /usr/bin/clickhouse
E 12. ? @ 0x19b92fc1 in /usr/bin/clickhouse
E 13. Poco::PooledThread::run() @ 0x19d4ff89 in /usr/bin/clickhouse
E 14. Poco::ThreadImpl::runnableEntry(void*) @ 0x19d4d2e0 in /usr/bin/clickhouse
E 15. ? @ 0x7f0b605e1609 in ?
E 16. clone @ 0x7f0b60506133 in ?
E . (BAD_ARGUMENTS)
E (query: CREATE TABLE IF NOT EXISTS test.test_queue
E (
E `tnow` UInt32,
E `server` String,
E `client` String,
E `sPort` UInt16,
E `cPort` UInt16,
E `r.name` Array(String),
E `r.class` Array(UInt16),
E `r.type` Array(UInt16),
E `r.ttl` Array(UInt32),
E `r.data` Array(String),
E `method` String
E )
E ENGINE = Kafka
E SETTINGS
E kafka_broker_list = 'kafka1:19092',
E kafka_topic_list = 'test_issue26643',
E kafka_group_name = 'test_issue26643_group',
E kafka_format = 'Protobuf',
E kafka_schema = 'message_with_repeated.proto:Message',
E kafka_num_consumers = 4,
E kafka_skip_broken_messages = 10000;)
helpers/client.py:187: QueryRuntimeException
Comment: Misconfiguration: only 1 CPU/thread is available to CH, while Kafka consumer requires/configure to use more (DB::Exception: Number of consumers can not be bigger than 1).
Status: FAIL
Test: /stateless/02149_read_in_order_fixed_prefix
Reason:
2022-09-28 09:45:38 --- /usr/share/clickhouse-test/queries/0_stateless/02149_read_in_order_fixed_prefix.reference 2022-09-28 09:42:37.288611167 -0600
2022-09-28 09:45:38 +++ /tmp/clickhouse-test/0_stateless/02149_read_in_order_fixed_prefix.stdout 2022-09-28 09:45:38.443136937 -0600
2022-09-28 09:45:38 @@ -29,10 +29,8 @@
2022-09-28 09:45:38 ExpressionTransform × 2
2022-09-28 09:45:38 (SettingQuotaAndLimits)
2022-09-28 09:45:38 (ReadFromMergeTree)
2022-09-28 09:45:38 - ReverseTransform
2022-09-28 09:45:38 - MergeTreeReverse 0 → 1
2022-09-28 09:45:38 - ReverseTransform
2022-09-28 09:45:38 - MergeTreeReverse 0 → 1
2022-09-28 09:45:38 + ReverseTransform × 2
2022-09-28 09:45:38 + MergeTreeReverse × 2 0 → 1
2022-09-28 09:45:38 2020-10-01 9
2022-09-28 09:45:38 2020-10-01 9
2022-09-28 09:45:38 2020-10-01 9
2022-09-28 09:45:38
2022-09-28 09:45:38
2022-09-28 09:45:38 Settings used in the test: --max_insert_threads=11 --group_by_two_level_threshold=100000 --group_by_two_level_threshold_bytes=50000000 --distributed_aggregation_memory_efficient=0 --fsync_metadata=1 --priority=1 --output_format_parallel_formatting=0 --input_format_parallel_parsing=0
2022-09-28 09:45:38
2022-09-28 09:45:38 Database: test_s0qu88
Comment: Not enough threads.
Status: FAIL
Test: /stateless/01701_parallel_parsing_infinite_segmentation
Reason:
2022-09-28 09:47:35 --- /usr/share/clickhouse-test/queries/0_stateless/01701_parallel_parsing_infinite_segmentation.reference 2022-09-28 09:42:37.264599167 -0600
2022-09-28 09:47:35 +++ /tmp/clickhouse-test/0_stateless/01701_parallel_parsing_infinite_segmentation.stdout 2022-09-28 09:47:35.857810929 -0600
2022-09-28 09:47:35 @@ -1 +1 @@
2022-09-28 09:47:35 -Ok.
2022-09-28 09:47:35 +FAIL
2022-09-28 09:47:35
2022-09-28 09:47:35
2022-09-28 09:47:35 Settings used in the test: --max_insert_threads=14 --group_by_two_level_threshold=100000 --group_by_two_level_threshold_bytes=50000000 --distributed_aggregation_memory_efficient=1 --fsync_metadata=1 --priority=2 --output_format_parallel_formatting=0 --input_format_parallel_parsing=0
2022-09-28 09:47:35
2022-09-28 09:47:35 Database: test_4dslpa
Comment: Not enough threads.
Status: FAIL (OK to fail)
Test: /stateless/01532_primary_key_without_order_by_zookeeper
Reason:
2022-09-28 09:48:13 --- /usr/share/clickhouse-test/queries/0_stateless/01532_primary_key_without_order_by_zookeeper.reference 2022-09-28 09:42:37.256595167 -0600
2022-09-28 09:48:13 +++ /tmp/clickhouse-test/0_stateless/01532_primary_key_without_order_by_zookeeper.stdout 2022-09-28 09:48:13.780761620 -0600
2022-09-28 09:48:13 @@ -9,8 +9,8 @@
2022-09-28 09:48:13 1 c
2022-09-28 09:48:13 2 b
2022-09-28 09:48:13 1 c 0
2022-09-28 09:48:13 -2 e 555
2022-09-28 09:48:13 2 b 0
2022-09-28 09:48:13 +2 e 555
2022-09-28 09:48:13 CREATE TABLE default.merge_tree_pk_sql\n(\n `key` UInt64,\n `value` String,\n `key2` UInt64\n)\nENGINE = ReplacingMergeTree\nPRIMARY KEY key\nORDER BY (key, key2)\nSETTINGS index_granularity = 8192
2022-09-28 09:48:13 CREATE TABLE default.replicated_merge_tree_pk_sql\n(\n `key` UInt64,\n `value` String\n)\nENGINE = ReplicatedReplacingMergeTree(\'/clickhouse/test/01532_primary_key_without\', \'r1\')\nPRIMARY KEY key\nORDER BY key\nSETTINGS index_granularity = 8192
2022-09-28 09:48:13 1 a
2022-09-28 09:48:13 @@ -18,6 +18,6 @@
2022-09-28 09:48:13 1 c
2022-09-28 09:48:13 2 b
2022-09-28 09:48:13 1 c 0
2022-09-28 09:48:13 -2 e 555
2022-09-28 09:48:13 2 b 0
2022-09-28 09:48:13 +2 e 555
2022-09-28 09:48:13 CREATE TABLE default.replicated_merge_tree_pk_sql\n(\n `key` UInt64,\n `value` String,\n `key2` UInt64\n)\nENGINE = ReplicatedReplacingMergeTree(\'/clickhouse/test/01532_primary_key_without\', \'r1\')\nPRIMARY KEY key\nORDER BY (key, key2)\nSETTINGS index_granularity = 8192
2022-09-28 09:48:13
2022-09-28 09:48:13
2022-09-28 09:48:13 Settings used in the test: --max_insert_threads=5 --group_by_two_level_threshold=100000 --group_by_two_level_threshold_bytes=50000000 --distributed_aggregation_memory_efficient=0 --fsync_metadata=0 --priority=1 --output_format_parallel_formatting=1 --input_format_parallel_parsing=1
2022-09-28 09:48:13
2022-09-28 09:48:13 Database: test_k9gddd
Comment: Minor: wrong order of elements (column not included in ORDER BY).
Status: FAIL (OK to fail).
Test: /stateless/01524_do_not_merge_across_partitions_select_final
Reason:
2022-09-28 09:48:16 +++ /tmp/clickhouse-test/0_stateless/01524_do_not_merge_across_partitions_select_final.stdout 2022-09-28 09:48:16.274007536 -0600
2022-09-28 09:48:16 @@ -6,4 +6,4 @@
2022-09-28 09:48:16 2020-01-01 00:00:00 2
2022-09-28 09:48:16 1
2022-09-28 09:48:16 499999
2022-09-28 09:48:16 -5
2022-09-28 09:48:16 +2
2022-09-28 09:48:16
2022-09-28 09:48:16
2022-09-28 09:48:16 Settings used in the test: --max_insert_threads=6 --group_by_two_level_threshold=100000 --group_by_two_level_threshold_bytes=50000000 --distributed_aggregation_memory_efficient=1 --fsync_metadata=0 --priority=0 --output_format_parallel_formatting=1 --input_format_parallel_parsing=0
2022-09-28 09:48:16
2022-09-28 09:48:16 Database: test_qs9xs5
Comment: depends on number of threads availble to the system. Expects 5, actually only 2 used (same configuration issue as others) .
Status: FAIL
Test: /stateless/01275_parallel_mv
Reason:
2022-09-28 09:49:16 --- /usr/share/clickhouse-test/queries/0_stateless/01275_parallel_mv.reference 2022-09-28 09:42:37.240587168 -0600
2022-09-28 09:49:16 +++ /tmp/clickhouse-test/0_stateless/01275_parallel_mv.gen.stdout 2022-09-28 09:49:16.428067479 -0600
2022-09-28 09:49:16 @@ -113,7 +113,7 @@
2022-09-28 09:49:16 Settings['parallel_view_processing'] = '1' and
2022-09-28 09:49:16 Settings['optimize_trivial_insert_select'] = '0' and
2022-09-28 09:49:16 Settings['max_insert_threads'] = '0';
2022-09-28 09:49:16 -5
2022-09-28 09:49:16 +2
2022-09-28 09:49:16 select count() from testX;
2022-09-28 09:49:16 50
2022-09-28 09:49:16 select count() from testXA;
2022-09-28 09:49:16 @@ -137,7 +137,7 @@
2022-09-28 09:49:16 Settings['parallel_view_processing'] = '1' and
2022-09-28 09:49:16 Settings['optimize_trivial_insert_select'] = '0' and
2022-09-28 09:49:16 Settings['max_insert_threads'] = '16';
2022-09-28 09:49:16 -5
2022-09-28 09:49:16 +2
2022-09-28 09:49:16 select count() from testX;
2022-09-28 09:49:16 60
2022-09-28 09:49:16 select count() from testXA;
2022-09-28 09:49:16 @@ -161,7 +161,7 @@
2022-09-28 09:49:16 Settings['parallel_view_processing'] = '1' and
2022-09-28 09:49:16 Settings['optimize_trivial_insert_select'] = '1' and
2022-09-28 09:49:16 Settings['max_insert_threads'] = '0';
2022-09-28 09:49:16 -5
2022-09-28 09:49:16 +2
2022-09-28 09:49:16 select count() from testX;
2022-09-28 09:49:16 70
2022-09-28 09:49:16 select count() from testXA;
2022-09-28 09:49:16 @@ -185,7 +185,7 @@
2022-09-28 09:49:16 Settings['parallel_view_processing'] = '1' and
2022-09-28 09:49:16 Settings['optimize_trivial_insert_select'] = '1' and
2022-09-28 09:49:16 Settings['max_insert_threads'] = '16';
2022-09-28 09:49:16 -5
2022-09-28 09:49:16 +2
2022-09-28 09:49:16 select count() from testX;
2022-09-28 09:49:16 80
2022-09-28 09:49:16 select count() from testXA;
2022-09-28 09:49:16
2022-09-28 09:49:16
2022-09-28 09:49:16 Settings used in the test: --max_insert_threads=0 --group_by_two_level_threshold=100000 --group_by_two_level_threshold_bytes=50000000 --distributed_aggregation_memory_efficient=1 --fsync_metadata=1 --priority=0 --output_format_parallel_formatting=1 --input_format_parallel_parsing=1
2022-09-28 09:49:16
2022-09-28 09:49:16 Database: test_dtnmdo
Comment: Not enough awailable threads/CPUs for CH.
Status: FAIL (OK to fail)
Test: /stateless/01091_num_threads
Reason:
2022-09-28 09:49:37 --- /usr/share/clickhouse-test/queries/0_stateless/01091_num_threads.reference 2022-09-28 09:42:37.232583167 -0600
2022-09-28 09:49:37 +++ /tmp/clickhouse-test/0_stateless/01091_num_threads.stdout 2022-09-28 09:49:37.230462759 -0600
2022-09-28 09:49:37 @@ -3,4 +3,4 @@
2022-09-28 09:49:37 499999500000
2022-09-28 09:49:37 1
2022-09-28 09:49:37 499999500000
2022-09-28 09:49:37 -1
2022-09-28 09:49:37 +0
2022-09-28 09:49:37
2022-09-28 09:49:37
2022-09-28 09:49:37 Settings used in the test: --max_insert_threads=16 --group_by_two_level_threshold=1152921504606846976 --group_by_two_level_threshold_bytes=50000000 --distributed_aggregation_memory_efficient=0 --fsync_metadata=0 --priority=1 --output_format_parallel_formatting=1 --input_format_parallel_parsing=0
2022-09-28 09:49:37
2022-09-28 09:49:37 Database: test_soc2as
Comment: Not enough awailable threads/CPUs for CH.
Status: FAIL (OK to fail)
Test: /stateless/01193_metadata_loading
Reason:
2022-09-28 10:07:16 --- /usr/share/clickhouse-test/queries/0_stateless/01193_metadata_loading.reference 2022-09-28 09:42:37.236585168 -0600
2022-09-28 10:07:16 +++ /tmp/clickhouse-test/0_stateless/01193_metadata_loading.stdout 2022-09-28 10:07:16.427760010 -0600
2022-09-28 10:07:16 @@ -1,5 +1,5 @@
2022-09-28 10:07:16 1000 0 2020-06-25 hello [1,2] [3,4]
2022-09-28 10:07:16 1000 1 2020-06-26 word [10,20] [30,40]
2022-09-28 10:07:16 -ok
2022-09-28 10:07:16 +[4329,4092,4012,4301,4284]
2022-09-28 10:07:16 8000 0 2020-06-25 hello [1,2] [3,4]
2022-09-28 10:07:16 8000 1 2020-06-26 word [10,20] [30,40]
2022-09-28 10:07:16
2022-09-28 10:07:16
2022-09-28 10:07:16 Settings used in the test: --max_insert_threads=0 --group_by_two_level_threshold=1 --group_by_two_level_threshold_bytes=50000000 --distributed_aggregation_memory_efficient=0 --fsync_metadata=1 --priority=2 --output_format_parallel_formatting=1 --input_format_parallel_parsing=0
2022-09-28 10:07:16
2022-09-28 10:07:16 Database: test_1b3igh
Comment: Minor: known failure, depends on execution speed - inconsistency of hardware (different from upstream CI/CD runners).
Status: FAIL (OK to fail)
Passed:
- AES Encryption
- ClickHouse Keeper
- DateTime64 Extended Range
- Disk Level Encryption
- Example
- Extended Precision Data Types
- Kafka
- Kerberos
- LDAP
- Lightweight Delete
- Map Type
- Part Moves Between Shards
- RBAC
- S3 AWS
- S3 GCS
- S3 Minio
- SSL Server
- Tiered Storage GCS
- Tiered Storage AWS
- Tiered Storage original
- Tiered Storage Minio
- Window Functions
Failed:
- S3 Minio
Test: /s3/minio zero copy replication/lost data during mutation
Details:
AssertionError
Traceback (most recent call last):
File "regression/s3/regression.py", line 451, in
regression()
File "regression/s3/regression.py", line 422, in regression
minio_regression(
File "regression/s3/regression.py", line 253, in minio_regression
Feature(test=load("s3.tests.zero_copy_replication", "minio"))(
File "/builds/altinity-qa/clickhouse/cicd/release/regression/s3/../s3/tests/zero_copy_replication.py", line 2176, in minio
outline()
File "/builds/altinity-qa/clickhouse/cicd/release/regression/s3/../s3/tests/zero_copy_replication.py", line 2130, in outline
scenario()
File "/builds/altinity-qa/clickhouse/cicd/release/regression/s3/../s3/tests/zero_copy_replication.py", line 2050, in lost_data_during_mutation
node.query(
File "/builds/altinity-qa/clickhouse/cicd/release/regression/s3/../helpers/cluster.py", line 620, in query
return self.query(
File "/builds/altinity-qa/clickhouse/cicd/release/regression/s3/../helpers/cluster.py", line 653, in query
assert False, error(r.output)
AssertionError: Oops! Assertion failed
The following assertion was not satisfied
assert False, error(r.output)
Description
Clickhouse1 9000 57 Code: 57. DB::Exception: Table default.table_be082398_434b_11ed_bdf5_0242ac110003 already exists. (TABLE_ALREADY_EXISTS) (version 22.3.12.20.altinitystable (altinity build)) 2 0
Received exception from server (version 22.3.12):
Code: 57. DB::Exception: Received from localhost:9000. DB::Exception: There was an error on [clickhouse1:9000]: Code: 57. DB::Exception: Table default.table_be082398_434b_11ed_bdf5_0242ac110003 already exists. (TABLE_ALREADY_EXISTS) (version 22.3.12.20.altinitystable (altinity build)). (TABLE_ALREADY_EXISTS)
(query: create table table_be082398_434b_11ed_bdf5_0242ac110003 on cluster 'sharded_cluster' (key UInt32, value1 String, value2 String, value3 String) engine=ReplicatedMergeTree('/table_be082398_434b_11ed_bdf5_0242ac110003', '{replica}')
order by key
partition by (key % 4)
settings storage_policy='external'
)
Assertion values
assert False, error(r.output)
^ is False
Where
File '/builds/altinity-qa/clickhouse/cicd/release/regression/s3/../helpers/cluster.py', line 653 in 'query'
645\| ) if steps else NullStep():
646\| assert message in r.output, error(r.output)
647\|
648\| if message is None or "Exception:" not in message:
649\| with Then("check if output has exception") if steps else NullStep():
650\| if "Exception:" in r.output:
651\| if raise_on_exception:
652\| raise QueryRuntimeException(r.output)
653\|> assert False, error(r.output)
654\|
655\| return r