/alter | Fail 1h 51m | AssertionError
Traceback (most recent call last):
File "/usr/lib/python3.10/threading.py", line 973, in _bootstrap
self._bootstrap_inner()
File "/usr/lib/python3.10/threading.py", line 1016, in _bootstrap_inner
self.run()
File "/usr/lib/python3.10/threading.py", line 953, in run
self._target(*self._args, **self._kwargs)
File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run
result = self.fn(*self.args, **self.kwargs)
File "/home/ubuntu/_work/ClickHouse/ClickHouse/alter/../alter/table/attach_partition/part_names/merge_increment.py", line 192, in merge_increment
Scenario(
File "/home/ubuntu/_work/ClickHouse/ClickHouse/alter/../alter/table/attach_partition/part_names/merge_increment.py", line 78, in check_merge_increment
assert part_name.output == expected_part_name, error(
AssertionError: Oops! Assertion failed
The following assertion was not satisfied
@TestScenario
def check_merge_increment(self, engine):
"""Check that when two or more parts are merged into one, the chunk level is
incremented by one from highest level."""
first_table = "first_" + getuid()
node = self.context.node
with Given("I create an empty source table"):
create_MergeTree_table_with_data(
table_name=first_table,
engine=engine,
order_by="id",
number_of_rows=0,
)
with And(
"I insert data to create multiple parts and optimize table so first part will have chunk level 2"
):
node.query(
f"INSERT INTO {first_table} (id, sign) SELECT number,1 FROM numbers(20)"
)
optimize_table(table_name=first_table)
optimize_table(table_name=first_table)
node.query(
f"INSERT INTO {first_table} (id, sign) SELECT number,1 FROM numbers(2)"
)
node.query(
f"INSERT INTO {first_table} (id, sign) SELECT number,1 FROM numbers(10)"
)
node.query(
f"INSERT INTO {first_table} (id, sign) SELECT number,1 FROM numbers(5)"
)
with And("I create second table"):
second_table = "second_" + getuid()
create_MergeTree_table_with_data(
table_name=second_table,
engine=engine,
order_by="id",
number_of_rows=30,
)
with And("I optimize the second table 10 times to increase chunk level to 10"):
for _ in range(10):
optimize_table(table_name=second_table)
with And("I attach partition from the second table to the first table"):
attach_partition_from(
source_table=second_table,
destination_table=first_table,
partition="tuple()",
)
with And("I check part names"):
part_names = node.query(
f"SELECT name FROM system.parts WHERE table = '{first_table}' AND active"
).output.split("\n")
with And(
"I merge all parts into one and expect chunk level to be incremented by one from highest level"
):
optimize_table(table_name=first_table)
expected_part_name = "all_1_5_11"
part_name = node.query(
f"SELECT name FROM system.parts WHERE table = '{first_table}' AND active"
)
for attempt in retries(timeout=30, delay=2):
with attempt:
assert part_name.output == expected_part_name, error(
Description
Unexpected part name: all_1_5_12
Where
File '/home/ubuntu/_work/ClickHouse/ClickHouse/alter/../alter/table/attach_partition/part_names/merge_increment.py', line 78 in 'check_merge_increment'
70\| optimize_table(table_name=first_table)
71\| expected_part_name = "all_1_5_11"
72\|
73\| part_name = node.query(
74\| f"SELECT name FROM system.parts WHERE table = '{first_table}' AND active"
75\| )
76\| for attempt in retries(timeout=30, delay=2):
77\| with attempt:
78\|> assert part_name.output == expected_part_name, error(
79\| f"Unexpected part name: {part_name.output}"
80\| )
81\| |
/alter/attach partition | Fail 1h 48m | AssertionError
Traceback (most recent call last):
File "/usr/lib/python3.10/threading.py", line 973, in _bootstrap
self._bootstrap_inner()
File "/usr/lib/python3.10/threading.py", line 1016, in _bootstrap_inner
self.run()
File "/usr/lib/python3.10/threading.py", line 953, in run
self._target(*self._args, **self._kwargs)
File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run
result = self.fn(*self.args, **self.kwargs)
File "/home/ubuntu/_work/ClickHouse/ClickHouse/alter/../alter/table/attach_partition/part_names/merge_increment.py", line 192, in merge_increment
Scenario(
File "/home/ubuntu/_work/ClickHouse/ClickHouse/alter/../alter/table/attach_partition/part_names/merge_increment.py", line 78, in check_merge_increment
assert part_name.output == expected_part_name, error(
AssertionError: Oops! Assertion failed
The following assertion was not satisfied
@TestScenario
def check_merge_increment(self, engine):
"""Check that when two or more parts are merged into one, the chunk level is
incremented by one from highest level."""
first_table = "first_" + getuid()
node = self.context.node
with Given("I create an empty source table"):
create_MergeTree_table_with_data(
table_name=first_table,
engine=engine,
order_by="id",
number_of_rows=0,
)
with And(
"I insert data to create multiple parts and optimize table so first part will have chunk level 2"
):
node.query(
f"INSERT INTO {first_table} (id, sign) SELECT number,1 FROM numbers(20)"
)
optimize_table(table_name=first_table)
optimize_table(table_name=first_table)
node.query(
f"INSERT INTO {first_table} (id, sign) SELECT number,1 FROM numbers(2)"
)
node.query(
f"INSERT INTO {first_table} (id, sign) SELECT number,1 FROM numbers(10)"
)
node.query(
f"INSERT INTO {first_table} (id, sign) SELECT number,1 FROM numbers(5)"
)
with And("I create second table"):
second_table = "second_" + getuid()
create_MergeTree_table_with_data(
table_name=second_table,
engine=engine,
order_by="id",
number_of_rows=30,
)
with And("I optimize the second table 10 times to increase chunk level to 10"):
for _ in range(10):
optimize_table(table_name=second_table)
with And("I attach partition from the second table to the first table"):
attach_partition_from(
source_table=second_table,
destination_table=first_table,
partition="tuple()",
)
with And("I check part names"):
part_names = node.query(
f"SELECT name FROM system.parts WHERE table = '{first_table}' AND active"
).output.split("\n")
with And(
"I merge all parts into one and expect chunk level to be incremented by one from highest level"
):
optimize_table(table_name=first_table)
expected_part_name = "all_1_5_11"
part_name = node.query(
f"SELECT name FROM system.parts WHERE table = '{first_table}' AND active"
)
for attempt in retries(timeout=30, delay=2):
with attempt:
assert part_name.output == expected_part_name, error(
Description
Unexpected part name: all_1_5_12
Where
File '/home/ubuntu/_work/ClickHouse/ClickHouse/alter/../alter/table/attach_partition/part_names/merge_increment.py', line 78 in 'check_merge_increment'
70\| optimize_table(table_name=first_table)
71\| expected_part_name = "all_1_5_11"
72\|
73\| part_name = node.query(
74\| f"SELECT name FROM system.parts WHERE table = '{first_table}' AND active"
75\| )
76\| for attempt in retries(timeout=30, delay=2):
77\| with attempt:
78\|> assert part_name.output == expected_part_name, error(
79\| f"Unexpected part name: {part_name.output}"
80\| )
81\| |
/alter/attach partition/part level | Fail 1m 33s | AssertionError
Traceback (most recent call last):
File "/usr/lib/python3.10/threading.py", line 973, in _bootstrap
self._bootstrap_inner()
File "/usr/lib/python3.10/threading.py", line 1016, in _bootstrap_inner
self.run()
File "/usr/lib/python3.10/threading.py", line 953, in run
self._target(*self._args, **self._kwargs)
File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run
result = self.fn(*self.args, **self.kwargs)
File "/home/ubuntu/_work/ClickHouse/ClickHouse/alter/../alter/table/attach_partition/part_names/merge_increment.py", line 192, in merge_increment
Scenario(
File "/home/ubuntu/_work/ClickHouse/ClickHouse/alter/../alter/table/attach_partition/part_names/merge_increment.py", line 78, in check_merge_increment
assert part_name.output == expected_part_name, error(
AssertionError: Oops! Assertion failed
The following assertion was not satisfied
@TestScenario
def check_merge_increment(self, engine):
"""Check that when two or more parts are merged into one, the chunk level is
incremented by one from highest level."""
first_table = "first_" + getuid()
node = self.context.node
with Given("I create an empty source table"):
create_MergeTree_table_with_data(
table_name=first_table,
engine=engine,
order_by="id",
number_of_rows=0,
)
with And(
"I insert data to create multiple parts and optimize table so first part will have chunk level 2"
):
node.query(
f"INSERT INTO {first_table} (id, sign) SELECT number,1 FROM numbers(20)"
)
optimize_table(table_name=first_table)
optimize_table(table_name=first_table)
node.query(
f"INSERT INTO {first_table} (id, sign) SELECT number,1 FROM numbers(2)"
)
node.query(
f"INSERT INTO {first_table} (id, sign) SELECT number,1 FROM numbers(10)"
)
node.query(
f"INSERT INTO {first_table} (id, sign) SELECT number,1 FROM numbers(5)"
)
with And("I create second table"):
second_table = "second_" + getuid()
create_MergeTree_table_with_data(
table_name=second_table,
engine=engine,
order_by="id",
number_of_rows=30,
)
with And("I optimize the second table 10 times to increase chunk level to 10"):
for _ in range(10):
optimize_table(table_name=second_table)
with And("I attach partition from the second table to the first table"):
attach_partition_from(
source_table=second_table,
destination_table=first_table,
partition="tuple()",
)
with And("I check part names"):
part_names = node.query(
f"SELECT name FROM system.parts WHERE table = '{first_table}' AND active"
).output.split("\n")
with And(
"I merge all parts into one and expect chunk level to be incremented by one from highest level"
):
optimize_table(table_name=first_table)
expected_part_name = "all_1_5_11"
part_name = node.query(
f"SELECT name FROM system.parts WHERE table = '{first_table}' AND active"
)
for attempt in retries(timeout=30, delay=2):
with attempt:
assert part_name.output == expected_part_name, error(
Description
Unexpected part name: all_1_5_12
Where
File '/home/ubuntu/_work/ClickHouse/ClickHouse/alter/../alter/table/attach_partition/part_names/merge_increment.py', line 78 in 'check_merge_increment'
70\| optimize_table(table_name=first_table)
71\| expected_part_name = "all_1_5_11"
72\|
73\| part_name = node.query(
74\| f"SELECT name FROM system.parts WHERE table = '{first_table}' AND active"
75\| )
76\| for attempt in retries(timeout=30, delay=2):
77\| with attempt:
78\|> assert part_name.output == expected_part_name, error(
79\| f"Unexpected part name: {part_name.output}"
80\| )
81\| |
/alter/attach partition/part level/merge increment | Fail 1m 24s | AssertionError
Traceback (most recent call last):
File "/usr/lib/python3.10/threading.py", line 973, in _bootstrap
self._bootstrap_inner()
File "/usr/lib/python3.10/threading.py", line 1016, in _bootstrap_inner
self.run()
File "/usr/lib/python3.10/threading.py", line 953, in run
self._target(*self._args, **self._kwargs)
File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run
result = self.fn(*self.args, **self.kwargs)
File "/home/ubuntu/_work/ClickHouse/ClickHouse/alter/../alter/table/attach_partition/part_names/merge_increment.py", line 192, in merge_increment
Scenario(
File "/home/ubuntu/_work/ClickHouse/ClickHouse/alter/../alter/table/attach_partition/part_names/merge_increment.py", line 78, in check_merge_increment
assert part_name.output == expected_part_name, error(
AssertionError: Oops! Assertion failed
The following assertion was not satisfied
@TestScenario
def check_merge_increment(self, engine):
"""Check that when two or more parts are merged into one, the chunk level is
incremented by one from highest level."""
first_table = "first_" + getuid()
node = self.context.node
with Given("I create an empty source table"):
create_MergeTree_table_with_data(
table_name=first_table,
engine=engine,
order_by="id",
number_of_rows=0,
)
with And(
"I insert data to create multiple parts and optimize table so first part will have chunk level 2"
):
node.query(
f"INSERT INTO {first_table} (id, sign) SELECT number,1 FROM numbers(20)"
)
optimize_table(table_name=first_table)
optimize_table(table_name=first_table)
node.query(
f"INSERT INTO {first_table} (id, sign) SELECT number,1 FROM numbers(2)"
)
node.query(
f"INSERT INTO {first_table} (id, sign) SELECT number,1 FROM numbers(10)"
)
node.query(
f"INSERT INTO {first_table} (id, sign) SELECT number,1 FROM numbers(5)"
)
with And("I create second table"):
second_table = "second_" + getuid()
create_MergeTree_table_with_data(
table_name=second_table,
engine=engine,
order_by="id",
number_of_rows=30,
)
with And("I optimize the second table 10 times to increase chunk level to 10"):
for _ in range(10):
optimize_table(table_name=second_table)
with And("I attach partition from the second table to the first table"):
attach_partition_from(
source_table=second_table,
destination_table=first_table,
partition="tuple()",
)
with And("I check part names"):
part_names = node.query(
f"SELECT name FROM system.parts WHERE table = '{first_table}' AND active"
).output.split("\n")
with And(
"I merge all parts into one and expect chunk level to be incremented by one from highest level"
):
optimize_table(table_name=first_table)
expected_part_name = "all_1_5_11"
part_name = node.query(
f"SELECT name FROM system.parts WHERE table = '{first_table}' AND active"
)
for attempt in retries(timeout=30, delay=2):
with attempt:
assert part_name.output == expected_part_name, error(
Description
Unexpected part name: all_1_5_12
Where
File '/home/ubuntu/_work/ClickHouse/ClickHouse/alter/../alter/table/attach_partition/part_names/merge_increment.py', line 78 in 'check_merge_increment'
70\| optimize_table(table_name=first_table)
71\| expected_part_name = "all_1_5_11"
72\|
73\| part_name = node.query(
74\| f"SELECT name FROM system.parts WHERE table = '{first_table}' AND active"
75\| )
76\| for attempt in retries(timeout=30, delay=2):
77\| with attempt:
78\|> assert part_name.output == expected_part_name, error(
79\| f"Unexpected part name: {part_name.output}"
80\| )
81\| |