diff --git a/.gitignore b/.gitignore index 2c19b1b3a2cd5..3dcc328829ab7 100644 --- a/.gitignore +++ b/.gitignore @@ -124,6 +124,7 @@ iotdb-core/tsfile/src/main/antlr4/org/apache/tsfile/parser/gen/ .mvn/.gradle-enterprise/ .mvn/.develocity/ .run/ +*.sevo # Relational Grammar ANTLR iotdb-core/relational-grammar/src/main/antlr4/org/apache/iotdb/db/relational/grammar/sql/.antlr/ diff --git a/dependencies.json b/dependencies.json index 1e88db84e77ed..ac83e0fed9250 100644 --- a/dependencies.json +++ b/dependencies.json @@ -113,7 +113,6 @@ "org.bouncycastle:bcprov-jdk18on", "org.bouncycastle:bcutil-jdk18on", "org.checkerframework:checker-qual", - "org.checkerframework:checker-qual", "org.eclipse.collections:eclipse-collections", "org.eclipse.collections:eclipse-collections-api", "org.eclipse.jetty:jetty-http", diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/runtime/RequestDelegate.java b/integration-test/src/main/java/org/apache/iotdb/itbase/runtime/RequestDelegate.java index 47f31004e6f28..78e979d5c2565 100644 --- a/integration-test/src/main/java/org/apache/iotdb/itbase/runtime/RequestDelegate.java +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/runtime/RequestDelegate.java @@ -126,6 +126,10 @@ protected void handleExceptions(Exception[] exceptions) throws SQLException { break; } } + + if (!exceptionInconsistent && exceptionMsg[0] != null) { + throw new SQLException(exceptionMsg[0]); + } for (int i = 0; i < businessExceptions.length; i++) { if (businessExceptions[i] != null) { // As each exception has its own stacktrace, in order to display them clearly, we can only @@ -134,9 +138,6 @@ protected void handleExceptions(Exception[] exceptions) throws SQLException { "Exception happens during request to {}", getEndpoints().get(i), businessExceptions[i]); } } - if (!exceptionInconsistent && exceptionMsg[0] != null) { - throw new SQLException(exceptionMsg[0]); - } if (exceptionInconsistent) { throw new InconsistentDataException(Arrays.asList(exceptionMsg), getEndpoints()); } diff --git a/integration-test/src/test/java/org/apache/iotdb/relational/it/db/it/IoTDBLoadTsFileIT.java b/integration-test/src/test/java/org/apache/iotdb/relational/it/db/it/IoTDBLoadTsFileIT.java index 5f88d50c9d90c..5a754c6afe808 100644 --- a/integration-test/src/test/java/org/apache/iotdb/relational/it/db/it/IoTDBLoadTsFileIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/relational/it/db/it/IoTDBLoadTsFileIT.java @@ -19,6 +19,10 @@ package org.apache.iotdb.relational.it.db.it; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.ColumnRename; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolutionFile; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.TableRename; import org.apache.iotdb.it.env.EnvFactory; import org.apache.iotdb.it.framework.IoTDBTestRunner; import org.apache.iotdb.it.utils.TsFileTableGenerator; @@ -45,12 +49,18 @@ import java.nio.file.Files; import java.sql.Connection; import java.sql.ResultSet; +import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.fail; + @RunWith(IoTDBTestRunner.class) @Category({TableLocalStandaloneIT.class, TableClusterIT.class}) public class IoTDBLoadTsFileIT { @@ -255,7 +265,91 @@ public void testLoadWithTableMod() throws Exception { try (final ResultSet resultSet = statement.executeQuery("show tables")) { Assert.assertTrue(resultSet.next()); - Assert.assertFalse(resultSet.next()); + assertFalse(resultSet.next()); + } + } + } + + @Test + public void testLoadWithSevoFile() throws Exception { + final int lineCount = 10000; + + List> measurementSchemas = + generateMeasurementSchemas(); + List columnCategories = + generateTabletColumnCategory(0, measurementSchemas.size()); + + final File file = new File(tmpDir, "1-0-0-0.tsfile"); + + List schemaList1 = + measurementSchemas.stream().map(pair -> pair.left).collect(Collectors.toList()); + + try (final TsFileTableGenerator generator = new TsFileTableGenerator(file)) { + generator.registerTable(SchemaConfig.TABLE_0, new ArrayList<>(schemaList1), columnCategories); + generator.generateData(SchemaConfig.TABLE_0, lineCount, PARTITION_INTERVAL / 10_000); + } + + // rename table0 to table1 + File sevoFile = new File(tmpDir, "0.sevo"); + SchemaEvolutionFile schemaEvolutionFile = new SchemaEvolutionFile(sevoFile.getAbsolutePath()); + SchemaEvolution schemaEvolution = new TableRename(SchemaConfig.TABLE_0, SchemaConfig.TABLE_1); + schemaEvolutionFile.append(Collections.singletonList(schemaEvolution)); + // rename INT322INT32 to INT322INT32_NEW + schemaEvolution = new ColumnRename(SchemaConfig.TABLE_1, "INT322INT32", "INT322INT32_NEW"); + schemaEvolutionFile.append(Collections.singletonList(schemaEvolution)); + + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute(String.format("create database if not exists %s", SchemaConfig.DATABASE_0)); + statement.execute(String.format("use %s", SchemaConfig.DATABASE_0)); + statement.execute( + String.format( + "load '%s' with ('database'='%s', 'sevo-file-path'='%s')", + file.getAbsolutePath(), SchemaConfig.DATABASE_0, schemaEvolutionFile.getFilePath())); + + // cannot query using table0 + try (final ResultSet resultSet = + statement.executeQuery(String.format("select count(*) from %s", SchemaConfig.TABLE_0))) { + fail(); + } catch (SQLException e) { + assertEquals("550: Table 'root.test' does not exist.", e.getMessage()); + } + + // can query with table1 + try (final ResultSet resultSet = + statement.executeQuery(String.format("select count(*) from %s", SchemaConfig.TABLE_1))) { + if (resultSet.next()) { + Assert.assertEquals(lineCount, resultSet.getLong(1)); + } else { + Assert.fail("This ResultSet is empty."); + } + } + + // cannot query using INT322INT32 + try (final ResultSet resultSet = + statement.executeQuery( + String.format("select count(%s) from %s", "INT322INT32", SchemaConfig.TABLE_1))) { + fail(); + } catch (SQLException e) { + assertEquals("616: Column 'int322int32' cannot be resolved", e.getMessage()); + } + + // can query with INT322INT32_NEW + try (final ResultSet resultSet = + statement.executeQuery( + String.format("select count(%s) from %s", "INT322INT32_NEW", SchemaConfig.TABLE_1))) { + if (resultSet.next()) { + Assert.assertEquals(lineCount, resultSet.getLong(1)); + } else { + Assert.fail("This ResultSet is empty."); + } + } + + try (final ResultSet resultSet = statement.executeQuery("show tables")) { + Assert.assertTrue(resultSet.next()); + assertEquals(SchemaConfig.TABLE_1, resultSet.getString(1)); + assertFalse(resultSet.next()); } } } diff --git a/integration-test/src/test/java/org/apache/iotdb/relational/it/schema/IoTDBTableIT.java b/integration-test/src/test/java/org/apache/iotdb/relational/it/schema/IoTDBTableIT.java index 0923877d9771b..f08eb3350a3d9 100644 --- a/integration-test/src/test/java/org/apache/iotdb/relational/it/schema/IoTDBTableIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/relational/it/schema/IoTDBTableIT.java @@ -31,12 +31,15 @@ import org.apache.tsfile.enums.ColumnCategory; import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.ColumnSchema; +import org.apache.tsfile.file.metadata.TableSchema; import org.apache.tsfile.write.record.Tablet; import org.apache.tsfile.write.schema.IMeasurementSchema; import org.apache.tsfile.write.schema.MeasurementSchema; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; +import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; @@ -52,6 +55,7 @@ import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.stream.Collectors; import static org.apache.iotdb.commons.schema.column.ColumnHeaderConstant.describeTableColumnHeaders; import static org.apache.iotdb.commons.schema.column.ColumnHeaderConstant.describeTableDetailsColumnHeaders; @@ -151,22 +155,6 @@ public void testManageTable() { assertEquals(tableNames.length, cnt); } - // Test unsupported, to be deleted - try { - statement.execute("alter table test1.table1 rename to tableN"); - } catch (final SQLException e) { - assertEquals("701: The renaming for base table is currently unsupported", e.getMessage()); - } - - // Test unsupported, to be deleted - try { - statement.execute( - "alter table if exists test_db.table1 rename column if exists model to modelType"); - } catch (final SQLException e) { - assertEquals( - "701: The renaming for base table column is currently unsupported", e.getMessage()); - } - // Alter table properties statement.execute("alter table test1.table1 set properties ttl=1000000"); ttls = new String[] {"1000000"}; @@ -636,7 +624,6 @@ public void testManageTable() { assertEquals("701: Columns in table shall not share the same name time.", e.getMessage()); } } catch (final SQLException e) { - e.printStackTrace(); fail(e.getMessage()); } } @@ -708,7 +695,7 @@ public void testConcurrentAutoCreateAndDropColumn() throws Exception { try (final ITableSession session = EnvFactory.getEnv().getTableSessionConnection(); final Connection adminCon = EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); final Statement adminStmt = adminCon.createStatement()) { - adminStmt.execute("create database db1"); + adminStmt.execute("create database if not exists db1"); session.executeNonQueryStatement("USE \"db1\""); final StringBuilder sb = new StringBuilder("CREATE TABLE table8 (tag1 string tag"); @@ -1113,4 +1100,919 @@ public void testTreeViewTable() throws Exception { } } } + + @Test + public void testAllowAlterTableName() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + try { + statement.execute( + "CREATE TABLE IF NOT EXISTS alter_table_name_disabled () WITH (allow_alter_name=1)"); + fail("allow_alter_name must be boolean"); + } catch (SQLException e) { + assertEquals( + "701: allow_alter_name value must be a BooleanLiteral, but now is LongLiteral, value: 1", + e.getMessage()); + } + + statement.execute( + "CREATE TABLE IF NOT EXISTS alter_table_name_disabled () WITH (allow_alter_name=false)"); + + try { + statement.execute( + "ALTER TABLE alter_table_name_disabled SET PROPERTIES allow_alter_name=true"); + fail("allow_alter_name cannot be altered"); + } catch (SQLException e) { + assertEquals("701: The property allow_alter_name cannot be altered.", e.getMessage()); + } + + try { + statement.execute("ALTER TABLE alter_table_name_disabled RENAME TO alter_table_named"); + fail("the table cannot be renamed"); + } catch (SQLException e) { + assertEquals( + "701: Table 'testdb.alter_table_name_disabled' is created in a old version and cannot be renamed, please migrate its data to a new table manually", + e.getMessage()); + } + } + } + + @Test + public void testAlterTableName() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + // alter once + statement.execute("CREATE TABLE IF NOT EXISTS alter_table_name (s1 int32)"); + statement.execute("INSERT INTO alter_table_name (time, s1) VALUES (1, 1)"); + statement.execute("ALTER TABLE alter_table_name RENAME TO alter_table_named"); + try { + statement.execute("INSERT INTO alter_table_name (time, s1) VALUES (0, 0)"); + fail(); + } catch (SQLException e) { + assertEquals("550: Table 'testdb.alter_table_name' does not exist.", e.getMessage()); + } + statement.execute("INSERT INTO alter_table_named (time, s1) VALUES (2, 2)"); + + ResultSet resultSet = statement.executeQuery("SELECT * FROM alter_table_named"); + for (int i = 1; i <= 2; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + assertEquals(i, resultSet.getLong(2)); + } + assertFalse(resultSet.next()); + resultSet = + statement.executeQuery("SELECT last(time), last_by(s1,time) FROM alter_table_named"); + assertTrue(resultSet.next()); + assertEquals(2, resultSet.getLong(1)); + assertEquals(2, resultSet.getLong(2)); + assertFalse(resultSet.next()); + + // alter twice + statement.execute("ALTER TABLE alter_table_named RENAME TO alter_table_named2"); + try { + statement.execute("INSERT INTO alter_table_named (time, s1) VALUES (0, 0)"); + fail(); + } catch (SQLException e) { + assertEquals("550: Table 'testdb.alter_table_named' does not exist.", e.getMessage()); + } + statement.execute("INSERT INTO alter_table_named2 (time, s1) VALUES (3, 3)"); + + resultSet = statement.executeQuery("SELECT * FROM alter_table_named2"); + for (int i = 1; i <= 3; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + assertEquals(i, resultSet.getLong(2)); + } + assertFalse(resultSet.next()); + resultSet = + statement.executeQuery("SELECT last(time), last_by(s1,time) FROM alter_table_named2"); + assertTrue(resultSet.next()); + assertEquals(3, resultSet.getLong(1)); + assertEquals(3, resultSet.getLong(2)); + assertFalse(resultSet.next()); + + // alter back + statement.execute("ALTER TABLE alter_table_named2 RENAME TO alter_table_name"); + try { + statement.execute("INSERT INTO alter_table_named2 (time, s1) VALUES (0, 0)"); + fail(); + } catch (SQLException e) { + assertEquals("550: Table 'testdb.alter_table_named2' does not exist.", e.getMessage()); + } + statement.execute("INSERT INTO alter_table_name (time, s1) VALUES (4, 4)"); + + resultSet = statement.executeQuery("SELECT * FROM alter_table_name"); + for (int i = 1; i <= 4; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + assertEquals(i, resultSet.getLong(2)); + } + assertFalse(resultSet.next()); + resultSet = + statement.executeQuery("SELECT last(time), last_by(s1,time) FROM alter_table_name"); + assertTrue(resultSet.next()); + assertEquals(4, resultSet.getLong(1)); + assertEquals(4, resultSet.getLong(2)); + assertFalse(resultSet.next()); + } + } + + @Test + public void testAlterColumnName() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + statement.execute("CREATE TABLE IF NOT EXISTS alter_column_name (s1 int32)"); + statement.execute("INSERT INTO alter_column_name (time, s1) VALUES (1, 1)"); + // alter once + statement.execute("ALTER TABLE alter_column_name RENAME COLUMN s1 TO s2"); + try { + statement.execute("INSERT INTO alter_column_name (time, s1) VALUES (0, 0)"); + fail(); + } catch (SQLException e) { + assertEquals( + "616: Unknown column category for s1. Cannot auto create column.", e.getMessage()); + } + statement.execute("INSERT INTO alter_column_name (time, s2) VALUES (2, 2)"); + + ResultSet resultSet = statement.executeQuery("SELECT * FROM alter_column_name"); + ResultSetMetaData metaData = resultSet.getMetaData(); + assertEquals(2, metaData.getColumnCount()); + assertEquals("s2", metaData.getColumnName(2)); + for (int i = 1; i <= 2; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + assertEquals(i, resultSet.getInt(2)); + } + assertFalse(resultSet.next()); + resultSet = + statement.executeQuery("SELECT last(time), last_by(s2,time) FROM alter_column_name"); + assertTrue(resultSet.next()); + assertEquals(2, resultSet.getLong(1)); + assertEquals(2, resultSet.getLong(2)); + assertFalse(resultSet.next()); + // alter twice + statement.execute("ALTER TABLE alter_column_name RENAME COLUMN s2 TO s3"); + try { + statement.execute("INSERT INTO alter_column_name (time, s2) VALUES (0, 0)"); + fail(); + } catch (SQLException e) { + assertEquals( + "616: Unknown column category for s2. Cannot auto create column.", e.getMessage()); + } + statement.execute("INSERT INTO alter_column_name (time, s3) VALUES (3, 3)"); + + resultSet = statement.executeQuery("SELECT * FROM alter_column_name"); + metaData = resultSet.getMetaData(); + assertEquals(2, metaData.getColumnCount()); + assertEquals("s3", metaData.getColumnName(2)); + for (int i = 1; i <= 3; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + assertEquals(i, resultSet.getInt(2)); + } + assertFalse(resultSet.next()); + resultSet = + statement.executeQuery("SELECT last(time), last_by(s3,time) FROM alter_column_name"); + assertTrue(resultSet.next()); + assertEquals(3, resultSet.getLong(1)); + assertEquals(3, resultSet.getLong(2)); + assertFalse(resultSet.next()); + // alter back + statement.execute("ALTER TABLE alter_column_name RENAME COLUMN s3 TO s1"); + try { + statement.execute("INSERT INTO alter_column_name (time, s3) VALUES (0, 0)"); + fail(); + } catch (SQLException e) { + assertEquals( + "616: Unknown column category for s3. Cannot auto create column.", e.getMessage()); + } + statement.execute("INSERT INTO alter_column_name (time, s1) VALUES (4, 4)"); + + resultSet = statement.executeQuery("SELECT * FROM alter_column_name"); + metaData = resultSet.getMetaData(); + assertEquals(2, metaData.getColumnCount()); + assertEquals("s1", metaData.getColumnName(2)); + for (int i = 1; i <= 4; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + assertEquals(i, resultSet.getInt(2)); + } + assertFalse(resultSet.next()); + resultSet = + statement.executeQuery("SELECT last(time), last_by(s1,time) FROM alter_column_name"); + assertTrue(resultSet.next()); + assertEquals(4, resultSet.getLong(1)); + assertEquals(4, resultSet.getLong(2)); + assertFalse(resultSet.next()); + } + } + + @Test + public void testTableRenameConflict() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + statement.execute("CREATE TABLE IF NOT EXISTS table_a ()"); + statement.execute("CREATE TABLE IF NOT EXISTS table_b ()"); + + try { + statement.execute("ALTER TABLE table_a RENAME TO table_b"); + fail(); + } catch (final SQLException e) { + // expect table already exists (use code 551) + assertTrue( + e.getMessage().startsWith("551") && e.getMessage().toLowerCase().contains("already")); + } + } + } + + @Test + public void testColumnRenameConflict() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + statement.execute("CREATE TABLE IF NOT EXISTS tconf (c1 int32, c2 int32)"); + + try { + statement.execute("ALTER TABLE tconf RENAME COLUMN c1 TO c2"); + fail(); + } catch (final SQLException e) { + // expect column already exist error (code 552) + assertTrue( + e.getMessage().startsWith("552") && e.getMessage().toLowerCase().contains("exist")); + } + } + } + + @Test + public void testAlterTableRenameToSameName() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + statement.execute("CREATE TABLE IF NOT EXISTS rename_same (s1 int32)"); + statement.execute("INSERT INTO rename_same (time, s1) VALUES (1, 1)"); + + // Renaming to the same name should be a no-op and not lose data + try { + statement.execute("ALTER TABLE rename_same RENAME TO rename_same"); + fail(); + } catch (SQLException e) { + assertEquals( + "701: The table's old name shall not be equal to the new one.", e.getMessage()); + } + } + } + + @Test + public void testAlterTableRenameToQuotedSpecialName() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + statement.execute("CREATE TABLE IF NOT EXISTS rename_special (s1 int32)"); + statement.execute("INSERT INTO rename_special (time, s1) VALUES (1, 1)"); + + // rename to a quoted name containing hyphen and unicode + statement.execute("ALTER TABLE rename_special RENAME TO \"rename-特殊\""); + + // old name should not exist + try { + statement.execute("INSERT INTO rename_special (time, s1) VALUES (2, 2)"); + fail(); + } catch (final SQLException e) { + assertTrue( + e.getMessage().startsWith("550") + || e.getMessage().toLowerCase().contains("does not exist")); + } + + // insert into new quoted name and verify + statement.execute("INSERT INTO \"rename-特殊\" (time, s1) VALUES (2, 2)"); + ResultSet rs = statement.executeQuery("SELECT * FROM \"rename-特殊\""); + for (int i = 1; i <= 2; i++) { + assertTrue(rs.next()); + assertEquals(i, rs.getLong(1)); + assertEquals(i, rs.getInt(2)); + } + assertFalse(rs.next()); + } + } + + @Test + public void testAlterTableRenameWithDots() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS db1"); + statement.execute("DROP DATABASE IF EXISTS db2"); + statement.execute("CREATE DATABASE IF NOT EXISTS db1"); + statement.execute("CREATE DATABASE IF NOT EXISTS db2"); + statement.execute("USE db1"); + + statement.execute("CREATE TABLE IF NOT EXISTS t1 (s1 int32)"); + statement.execute("INSERT INTO t1 (time, s1) VALUES (1, 1)"); + + statement.execute("ALTER TABLE t1 RENAME TO \"db2.t1\""); + + ResultSet rs = statement.executeQuery("SELECT * FROM \"db2.t1\""); + assertTrue(rs.next()); + assertEquals(1, rs.getLong(1)); + assertEquals(1, rs.getInt(2)); + assertFalse(rs.next()); + } + } + + @Test + public void testAlterColumnRenameCaseSensitivity() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + statement.execute("CREATE TABLE IF NOT EXISTS tcase (c1 int32)"); + statement.execute("INSERT INTO tcase (time, c1) VALUES (1, 1)"); + + statement.execute("ALTER TABLE tcase RENAME COLUMN c1 TO C1"); + + ResultSet rs = statement.executeQuery("SELECT * FROM tcase"); + ResultSetMetaData md = rs.getMetaData(); + assertEquals(2, md.getColumnCount()); + // server may normalize column names; accept either exact case or normalized lower-case + String colName = md.getColumnName(2); + assertTrue(colName.equals("C1") || colName.equals("c1")); + + // ensure data still accessible via the new identifier (try using the new name in insert) + try { + statement.execute("INSERT INTO tcase (time, c1) VALUES (2, 2)"); + // if server treats identifiers case-insensitively this may succeed + } catch (final SQLException ignored) { + // ignore - the purpose is to assert existence/behavior, not enforce one model here + } + } + } + + @Test + public void testAlterColumnRenameToQuotedSpecialChars() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + statement.execute("CREATE TABLE IF NOT EXISTS tcolspecial (s1 int32)"); + statement.execute("INSERT INTO tcolspecial (time, s1) VALUES (1, 1)"); + + statement.execute("ALTER TABLE tcolspecial RENAME COLUMN s1 TO \"s-特\""); + + try { + statement.execute("INSERT INTO tcolspecial (time, s1) VALUES (2, 2)"); + fail(); + } catch (final SQLException e) { + assertTrue( + e.getMessage().startsWith("616") || e.getMessage().toLowerCase().contains("unknown")); + } + + statement.execute("INSERT INTO tcolspecial (time, \"s-特\") VALUES (2, 2)"); + ResultSet rs = statement.executeQuery("SELECT * FROM tcolspecial"); + ResultSetMetaData md = rs.getMetaData(); + assertEquals(2, md.getColumnCount()); + String colName = md.getColumnName(2); + // accept either exact quoted name or normalized variant + assertTrue(colName.equals("s-特") || colName.equals("s特") || colName.equals("s_特")); + } + } + + @Test + public void testAlterColumnMultipleRenamesAndBack() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + statement.execute("CREATE TABLE IF NOT EXISTS tmulti (a int32)"); + statement.execute("INSERT INTO tmulti (time, a) VALUES (1, 1)"); + + statement.execute("ALTER TABLE tmulti RENAME COLUMN a TO b"); + statement.execute("INSERT INTO tmulti (time, b) VALUES (2, 2)"); + + statement.execute("ALTER TABLE tmulti RENAME COLUMN b TO c"); + statement.execute("INSERT INTO tmulti (time, c) VALUES (3, 3)"); + + statement.execute("ALTER TABLE tmulti RENAME COLUMN c TO a"); + statement.execute("INSERT INTO tmulti (time, a) VALUES (4, 4)"); + + ResultSet rs = statement.executeQuery("SELECT * FROM tmulti"); + for (int i = 1; i <= 4; i++) { + assertTrue(rs.next()); + assertEquals(i, rs.getLong(1)); + assertEquals(i, rs.getInt(2)); + } + assertFalse(rs.next()); + } + } + + @Test + public void testRenameNonExistentColumn() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + statement.execute("CREATE TABLE IF NOT EXISTS tnonexist (x int32)"); + + try { + statement.execute("ALTER TABLE tnonexist RENAME COLUMN y TO z"); + fail(); + } catch (final SQLException e) { + // error should indicate column does not exist (use code 616 + contains) + assertTrue(e.getMessage().startsWith("616")); + assertTrue( + e.getMessage().toLowerCase().contains("does not exist") + || e.getMessage().toLowerCase().contains("cannot be resolved")); + } + } + } + + @Test + public void testRenameTimeColumnForbidden() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + // create a table with explicit time column + statement.execute("CREATE TABLE IF NOT EXISTS ttime (time TIMESTAMP TIME, a INT32)"); + + try { + statement.execute("ALTER TABLE ttime RENAME COLUMN time TO newtime"); + fail(); + } catch (final SQLException e) { + assertEquals("615: The renaming for time column is not supported.", e.getMessage()); + } + } + } + + // Helper: recognize SQLExceptions that mean the target table/device cannot be found. + private static boolean isTableNotFound(final SQLException e) { + if (e == null) return false; + final String msg = e.getMessage(); + if (msg == null) return false; + final String lm = msg.toLowerCase(); + // code 550 is commonly used for 'does not exist' in this project; also match textual phrases + return msg.startsWith("550") || lm.contains("not exist"); + } + + @Test(timeout = 120000) + @SuppressWarnings("resource") + public void testConcurrentRenameVsQueries() throws Throwable { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement stmt = connection.createStatement()) { + final String db = "concrenamedb"; + final int tableCount = 6; + final int rows = 50; + stmt.execute("DROP DATABASE IF EXISTS " + db); + stmt.execute("CREATE DATABASE IF NOT EXISTS " + db); + stmt.execute("USE " + db); + + final String[] names = new String[tableCount]; + for (int i = 0; i < tableCount; i++) { + names[i] = "crtable" + i; + stmt.execute(String.format("CREATE TABLE IF NOT EXISTS %s (v int32)", names[i])); + for (int r = 1; r <= rows; r++) { + stmt.execute(String.format("INSERT INTO %s (time, v) VALUES (%d, %d)", names[i], r, r)); + } + } + + final java.util.concurrent.atomic.AtomicReference err = + new java.util.concurrent.atomic.AtomicReference<>(); + final java.util.concurrent.CountDownLatch startLatch = + new java.util.concurrent.CountDownLatch(1); + final java.util.concurrent.CountDownLatch doneLatch = + new java.util.concurrent.CountDownLatch(4); + + java.util.concurrent.ExecutorService exec = null; + try { + exec = java.util.concurrent.Executors.newFixedThreadPool(8); + + // Renamer task: rotate rename a subset of tables repeatedly + exec.submit( + () -> { + try (final Connection c = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement s = c.createStatement()) { + startLatch.await(); + // ensure this thread's connection uses the test database + try { + s.execute("USE " + db); + } catch (final SQLException ignore) { + } + for (int round = 0; round < 20 && err.get() == null; round++) { + for (int i = 0; i < tableCount / 2; i++) { + final String oldName = names[i]; + final String newName = oldName + "_r" + round; + try { + s.execute(String.format("ALTER TABLE %s RENAME TO %s", oldName, newName)); + // reflect change locally so queries target updated names + names[i] = newName; + } catch (final SQLException ex) { + // Only ignore if the failure is due to table not existing; otherwise record + // the error + if (isTableNotFound(ex)) { + // table not found: likely a transient race with concurrent rename — ignore + // and log + System.out.println( + "Ignored table-not-found during rename: " + ex.getMessage()); + } else { + err.compareAndSet(null, ex); + } + } + } + try { + Thread.sleep(50); + } catch (final InterruptedException ie) { + Thread.currentThread().interrupt(); + break; + } + } + } catch (final Throwable t) { + err.compareAndSet(null, t); + } finally { + doneLatch.countDown(); + } + }); + + // Queryer tasks: continuously query random tables + for (int q = 0; q < 2; q++) { + exec.submit( + () -> { + try (final Connection c = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement s = c.createStatement()) { + final java.util.Random rnd = new java.util.Random(); + startLatch.await(); + // ensure this thread's connection uses the test database + try { + s.execute("USE " + db); + } catch (final SQLException ignore) { + } + for (int iter = 0; iter < 200 && err.get() == null; iter++) { + final int idx = rnd.nextInt(tableCount); + final String tname = names[idx]; + try (final ResultSet rs = s.executeQuery("SELECT count(*) FROM " + tname)) { + if (rs.next()) { + rs.getLong(1); + } + } catch (final SQLException ex) { + // Only ignore table-not-found; otherwise surface the error to fail the test + if (!isTableNotFound(ex)) { + err.compareAndSet(null, ex); + break; + } + } + } + } catch (final Throwable t) { + err.compareAndSet(null, t); + } finally { + doneLatch.countDown(); + } + }); + } + + // Another queryer to trigger more parallel access + exec.submit( + () -> { + try (final Connection c = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement s = c.createStatement()) { + startLatch.await(); + // ensure this thread's connection uses the test database + try { + s.execute("USE " + db); + } catch (final SQLException ignore) { + } + for (int iter = 0; iter < 200 && err.get() == null; iter++) { + for (int i = 0; i < tableCount; i++) { + try (final ResultSet rs = + s.executeQuery("SELECT * FROM " + names[i] + " LIMIT 1")) { + // consume + while (rs.next()) { + rs.getLong(1); + } + } catch (final SQLException ex) { + if (!isTableNotFound(ex)) { + err.compareAndSet(null, ex); + break; + } + } + } + } + } catch (final Throwable t) { + err.compareAndSet(null, t); + } finally { + doneLatch.countDown(); + } + }); + + // start + startLatch.countDown(); + // wait for tasks + doneLatch.await(); + + if (err.get() != null) { + throw err.get(); + } + } finally { + if (exec != null) { + exec.shutdownNow(); + } + } + } + } + + @Test + public void testMultiTableCrossCheckAfterRenames() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement stmt = connection.createStatement()) { + final String db = "multicheckdb"; + stmt.execute("DROP DATABASE IF EXISTS " + db); + stmt.execute("CREATE DATABASE IF NOT EXISTS " + db); + stmt.execute("USE " + db); + + // create two related tables + stmt.execute("CREATE TABLE IF NOT EXISTS mta (k int32)"); + stmt.execute("CREATE TABLE IF NOT EXISTS mtb (k int32)"); + + for (int i = 1; i <= 10; i++) { + stmt.execute(String.format("INSERT INTO mta (time, k) VALUES (%d, %d)", i, i)); + stmt.execute(String.format("INSERT INTO mtb (time, k) VALUES (%d, %d)", i, i)); + } + + // baseline: read aggregates + long aCount = 0, bCount = 0; + try (final ResultSet ra = stmt.executeQuery("SELECT count(*) FROM mta")) { + if (ra.next()) { + aCount = ra.getLong(1); + } + } + try (final ResultSet rb = stmt.executeQuery("SELECT count(*) FROM mtb")) { + if (rb.next()) { + bCount = rb.getLong(1); + } + } + + // rename one table and verify cross results remain consistent when queried separately + stmt.execute("ALTER TABLE mtb RENAME TO mtb_renamed"); + + long bCountAfter = 0; + try (final ResultSet rb2 = stmt.executeQuery("SELECT count(*) FROM mtb_renamed")) { + if (rb2.next()) { + bCountAfter = rb2.getLong(1); + } + } + + // assert counts unchanged + assertEquals(bCount, bCountAfter); + assertEquals(10, aCount); + + // rename the other table and verify again + stmt.execute("ALTER TABLE mta RENAME TO mta_renamed"); + long aCountAfter = 0; + try (final ResultSet ra2 = stmt.executeQuery("SELECT count(*) FROM mta_renamed")) { + if (ra2.next()) { + aCountAfter = ra2.getLong(1); + } + } + assertEquals(aCount, aCountAfter); + } + } + + @Ignore("Performance test, not for regular CI") + @Test + public void testPerformanceWithQuotedSpecialNameRenames() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement stmt = connection.createStatement(); + final ITableSession session = EnvFactory.getEnv().getTableSessionConnection()) { + final String db = "perfquotedb"; + final int numColsPerTable = 100; + final int numTables = 800; + final int numRowsPerFile = 1000; + final int numFilesPerTable = 5; + final int runs = 10; + final float ratioAlteredTables = 0.5f; + stmt.execute("DROP DATABASE IF EXISTS " + db); + stmt.execute("CREATE DATABASE IF NOT EXISTS " + db); + stmt.execute("USE " + db); + stmt.execute("set configuration enable_seq_space_compaction='false'"); + session.executeNonQueryStatement("USE " + db); + + final String[] names = new String[numTables]; + StringBuilder createTableTemplate = new StringBuilder("CREATE TABLE IF NOT EXISTS %s ("); + for (int c = 0; c < numColsPerTable; c++) { + createTableTemplate.append(String.format("v%d int32,", c)); + } + createTableTemplate = + new StringBuilder( + createTableTemplate.substring(0, createTableTemplate.length() - 1) + ")"); + List columns = new ArrayList<>(); + for (int i = 0; i < numColsPerTable; i++) { + columns.add(new ColumnSchema("v" + i, TSDataType.INT32, ColumnCategory.FIELD)); + } + TableSchema tableSchema = + new TableSchema( + "", // place holder + columns); + + System.out.println("Start data preparation..."); + for (int i = 0; i < numTables; i++) { + names[i] = "qtable" + i; + stmt.execute(String.format(createTableTemplate.toString(), names[i])); + tableSchema.setTableName(names[i]); + Tablet tablet = + new Tablet( + tableSchema.getTableName(), + tableSchema.getColumnSchemas().stream() + .map(IMeasurementSchema::getMeasurementName) + .collect(Collectors.toList()), + tableSchema.getColumnSchemas().stream() + .map(IMeasurementSchema::getType) + .collect(Collectors.toList()), + tableSchema.getColumnTypes(), + numRowsPerFile); + for (int j = 0; j < numFilesPerTable; j++) { + tablet.reset(); + for (int r = 1; r <= numRowsPerFile; r++) { + tablet.addTimestamp(r - 1, r + j * numRowsPerFile); + for (int c = 0; c < numColsPerTable; c++) { + tablet.addValue(r - 1, c, r + j * numRowsPerFile); + } + } + session.insert(tablet); + stmt.execute("FLUSH"); + } + } + System.out.println("Data preparation done."); + + // baseline measurement: simple average over a few runs + double totalMs = 0.0; + for (int run = 0; run < runs; run++) { + final long start = System.nanoTime(); + for (int i = 0; i < numTables; i++) { + try (final ResultSet rs = stmt.executeQuery("SELECT count(*) FROM " + names[i])) { + assertTrue(rs.next()); + assertEquals(numRowsPerFile * numFilesPerTable, rs.getLong(1)); + } + } + final long end = System.nanoTime(); + if (run > runs * 0.1) { + totalMs += (end - start) / 1_000_000.0; + } + } + final double baseline = totalMs / (runs * 0.9); + System.out.println("baseline_total_ms=" + String.format("%.3f", baseline)); + + // rename some of them to quoted special names and measure again + for (int i = 0; i < numTables * ratioAlteredTables; i++) { + final String oldName = names[i]; + final String newName = "\"" + oldName + "-特\""; // quoted name + stmt.execute(String.format("ALTER TABLE %s RENAME TO %s", oldName, newName)); + names[i] = newName; + } + + totalMs = 0.0; + for (int run = 0; run < runs; run++) { + final long start = System.nanoTime(); + for (int i = 0; i < numTables; i++) { + try (final ResultSet rs = stmt.executeQuery("SELECT count(*) FROM " + names[i])) { + assertTrue(rs.next()); + assertEquals(numRowsPerFile * numFilesPerTable, rs.getLong(1)); + } + } + final long end = System.nanoTime(); + if (run > runs * 0.1) { + totalMs += (end - start) / 1_000_000.0; + } + } + final double after = totalMs / (runs * 0.9); + System.out.println("after_quoted_total_ms=" + String.format("%.3f", after)); + + // basic sanity: ensure queries still return counts + for (int i = 0; i < numTables; i++) { + try (final ResultSet rs = stmt.executeQuery("SELECT count(*) FROM " + names[i])) { + assertTrue(rs.next()); + assertEquals(numRowsPerFile * numFilesPerTable, rs.getLong(1)); + } + } + } + } + + @Test + public void testAlterTableAndColumnTogether() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement stmt = connection.createStatement()) { + final String db = "dualalterdb"; + stmt.execute("DROP DATABASE IF EXISTS " + db); + stmt.execute("CREATE DATABASE IF NOT EXISTS " + db); + stmt.execute("USE " + db); + + stmt.execute("CREATE TABLE IF NOT EXISTS tab1 (c1 int32, c2 int32)"); + stmt.execute("INSERT INTO tab1 (time, c1, c2) VALUES (1, 1, 10)"); + + // rename column first and then rename table + stmt.execute("ALTER TABLE tab1 RENAME COLUMN c1 TO c1_new"); + stmt.execute("ALTER TABLE tab1 RENAME TO tab1_new"); + + // old table name should not exist + try { + stmt.execute("INSERT INTO tab1 (time, c1_new) VALUES (2, 2)"); + fail(); + } catch (final SQLException e) { + assertTrue( + e.getMessage().startsWith("550") + || e.getMessage().toLowerCase().contains("does not exist")); + } + + // inserting using new table and new column names should succeed + stmt.execute("INSERT INTO tab1_new (time, c1_new, c2) VALUES (2, 2, 20)"); + + // verify data + try (final ResultSet rs = stmt.executeQuery("SELECT * FROM tab1_new ORDER BY time")) { + assertTrue(rs.next()); + assertEquals(1, rs.getLong(1)); + assertEquals(1, rs.getInt("c1_new")); + assertEquals(10, rs.getInt("c2")); + + assertTrue(rs.next()); + assertEquals(2, rs.getLong(1)); + assertEquals(2, rs.getInt("c1_new")); + assertEquals(20, rs.getInt("c2")); + + assertFalse(rs.next()); + } + + // rename column again on the renamed table and verify + stmt.execute("ALTER TABLE tab1_new RENAME COLUMN c1_new TO c1_final"); + try { + // old column identifier should fail + stmt.execute("INSERT INTO tab1_new (time, c1_new) VALUES (3, 3)"); + fail(); + } catch (final SQLException e) { + assertTrue( + e.getMessage().startsWith("616") + || e.getMessage().toLowerCase().contains("unknown") + || e.getMessage().toLowerCase().contains("cannot be resolved")); + } + + // use final name + stmt.execute("INSERT INTO tab1_new (time, c1_final, c2) VALUES (3, 3, 30)"); + try (final ResultSet rs = stmt.executeQuery("SELECT count(*) FROM tab1_new")) { + if (rs.next()) { + assertEquals(3L, rs.getLong(1)); + } else { + fail(); + } + } + } + } } diff --git a/iotdb-client/client-go b/iotdb-client/client-go index dc64b1a7648d3..0ddc50a4d11ed 160000 --- a/iotdb-client/client-go +++ b/iotdb-client/client-go @@ -1 +1 @@ -Subproject commit dc64b1a7648d3c505c10eed5419f422bb49f1def +Subproject commit 0ddc50a4d11ed453252270ac3e20a13c759ce940 diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnAsyncRequestType.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnAsyncRequestType.java index e5753bf1bd184..676d6b8b487fb 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnAsyncRequestType.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnAsyncRequestType.java @@ -129,6 +129,8 @@ public enum CnToDnAsyncRequestType { DELETE_DATA_FOR_TABLE_DEVICE, DELETE_TABLE_DEVICE_IN_BLACK_LIST, DETECT_TREE_DEVICE_VIEW_FIELD_TYPE, + EVOLVE_DATA_REGION_SCHEMA, + EVOLVE_SCHEMA_REGION_SCHEMA, // audit log and event write-back INSERT_RECORD, diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnInternalServiceAsyncRequestManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnInternalServiceAsyncRequestManager.java index cd69f8b2c846d..b7c44ab66e40a 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnInternalServiceAsyncRequestManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnInternalServiceAsyncRequestManager.java @@ -64,6 +64,7 @@ import org.apache.iotdb.mpp.rpc.thrift.TCreatePipePluginInstanceReq; import org.apache.iotdb.mpp.rpc.thrift.TCreateSchemaRegionReq; import org.apache.iotdb.mpp.rpc.thrift.TCreateTriggerInstanceReq; +import org.apache.iotdb.mpp.rpc.thrift.TDataRegionEvolveSchemaReq; import org.apache.iotdb.mpp.rpc.thrift.TDeactivateTemplateReq; import org.apache.iotdb.mpp.rpc.thrift.TDeleteColumnDataReq; import org.apache.iotdb.mpp.rpc.thrift.TDeleteDataForDeleteSchemaReq; @@ -97,6 +98,7 @@ import org.apache.iotdb.mpp.rpc.thrift.TRollbackSchemaBlackListReq; import org.apache.iotdb.mpp.rpc.thrift.TRollbackSchemaBlackListWithTemplateReq; import org.apache.iotdb.mpp.rpc.thrift.TRollbackViewSchemaBlackListReq; +import org.apache.iotdb.mpp.rpc.thrift.TSchemaRegionEvolveSchemaReq; import org.apache.iotdb.mpp.rpc.thrift.TTableDeviceDeletionWithPatternAndFilterReq; import org.apache.iotdb.mpp.rpc.thrift.TTableDeviceDeletionWithPatternOrModReq; import org.apache.iotdb.mpp.rpc.thrift.TTableDeviceInvalidateCacheReq; @@ -440,6 +442,16 @@ protected void initActionMapBuilder() { (req, client, handler) -> client.deleteColumnData( (TDeleteColumnDataReq) req, (DataNodeTSStatusRPCHandler) handler)); + actionMapBuilder.put( + CnToDnAsyncRequestType.EVOLVE_DATA_REGION_SCHEMA, + (req, client, handler) -> + client.evolveSchemaInDataRegion( + (TDataRegionEvolveSchemaReq) req, (DataNodeTSStatusRPCHandler) handler)); + actionMapBuilder.put( + CnToDnAsyncRequestType.EVOLVE_SCHEMA_REGION_SCHEMA, + (req, client, handler) -> + client.evolveSchemaInSchemaRegion( + (TSchemaRegionEvolveSchemaReq) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( CnToDnAsyncRequestType.CONSTRUCT_TABLE_DEVICE_BLACK_LIST, (req, client, handler) -> diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/DataNodeAsyncRequestRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/DataNodeAsyncRequestRPCHandler.java index b2e2ec3232781..2efabd4cbb902 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/DataNodeAsyncRequestRPCHandler.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/DataNodeAsyncRequestRPCHandler.java @@ -237,6 +237,8 @@ public static DataNodeAsyncRequestRPCHandler buildHandler( case DELETE_DEVICES_FOR_DROP_TABLE: case INVALIDATE_COLUMN_CACHE: case DELETE_COLUMN_DATA: + case EVOLVE_DATA_REGION_SCHEMA: + case EVOLVE_SCHEMA_REGION_SCHEMA: case CONSTRUCT_TABLE_DEVICE_BLACK_LIST: case ROLLBACK_TABLE_DEVICE_BLACK_LIST: case INVALIDATE_MATCHED_TABLE_DEVICE_CACHE: diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java index e51d1a43299b5..582ffd16220f3 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java @@ -48,7 +48,11 @@ import org.apache.iotdb.commons.conf.IoTDBConstant; import org.apache.iotdb.commons.conf.TrimProperties; import org.apache.iotdb.commons.exception.IllegalPathException; +import org.apache.iotdb.commons.exception.IoTDBRuntimeException; import org.apache.iotdb.commons.exception.MetadataException; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.FullDeviceIdKey; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.NoTableNameDeviceIdKey; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import org.apache.iotdb.commons.path.MeasurementPath; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathDeserializeUtil; @@ -108,6 +112,7 @@ import org.apache.iotdb.confignode.consensus.response.template.TemplateSetInfoResp; import org.apache.iotdb.confignode.consensus.response.ttl.ShowTTLResp; import org.apache.iotdb.confignode.consensus.statemachine.ConfigRegionStateMachine; +import org.apache.iotdb.confignode.exception.DatabaseNotExistsException; import org.apache.iotdb.confignode.manager.consensus.ConsensusManager; import org.apache.iotdb.confignode.manager.cq.CQManager; import org.apache.iotdb.confignode.manager.externalservice.ExternalServiceInfo; @@ -259,6 +264,7 @@ import org.apache.tsfile.enums.TSDataType; import org.apache.tsfile.file.metadata.IDeviceID; +import org.apache.tsfile.file.metadata.IDeviceID.Factory; import org.apache.tsfile.utils.Pair; import org.apache.tsfile.utils.ReadWriteIOUtils; import org.slf4j.Logger; @@ -278,6 +284,8 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Optional; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -825,9 +833,43 @@ private List calculateRelatedSlot( return Collections.emptyList(); } } + IDeviceID deviceID = Factory.DEFAULT_FACTORY.create(devicePath); + + SeriesPartitionKey seriesPartitionKey = getSeriesPartitionKey(deviceID, database.getFullPath()); return Collections.singletonList( - getPartitionManager() - .getSeriesPartitionSlot(IDeviceID.Factory.DEFAULT_FACTORY.create(devicePath))); + getPartitionManager().getSeriesPartitionSlot(seriesPartitionKey)); + } + + @SuppressWarnings("OptionalGetWithoutIsPresent") + @Override + public SeriesPartitionKey getSeriesPartitionKey(IDeviceID deviceID, String databaseName) { + SeriesPartitionKey seriesPartitionKey; + boolean isTableModel = false; + try { + TDatabaseSchema databaseSchema = + getClusterSchemaManager().getDatabaseSchemaByName(databaseName); + isTableModel = databaseSchema.isTableModel; + } catch (DatabaseNotExistsException e) { + throw new IoTDBRuntimeException(e, TSStatusCode.TABLE_NOT_EXISTS.getStatusCode()); + } + + if (isTableModel) { + try { + Optional tableOptional = + getClusterSchemaManager().getTableIfExists(databaseName, deviceID.getTableName()); + TsTable tsTable = tableOptional.get(); + boolean canAlterTableName = tsTable.canAlterName(); + seriesPartitionKey = + canAlterTableName + ? new NoTableNameDeviceIdKey(deviceID) + : new FullDeviceIdKey(deviceID); + } catch (NoSuchElementException | MetadataException e) { + throw new IoTDBRuntimeException(e, TSStatusCode.TABLE_NOT_EXISTS.getStatusCode()); + } + } else { + seriesPartitionKey = new FullDeviceIdKey(deviceID); + } + return seriesPartitionKey; } @Override @@ -921,9 +963,10 @@ public TSchemaPartitionTableResp getOrCreateSchemaPartition(final PathPatternTre for (final IDeviceID deviceID : devicePaths) { for (final String database : databases) { if (PathUtils.isStartWith(deviceID, database)) { + SeriesPartitionKey seriesPartitionKey = getSeriesPartitionKey(deviceID, database); partitionSlotsMap .computeIfAbsent(database, key -> new HashSet<>()) - .add(getPartitionManager().getSeriesPartitionSlot(deviceID)); + .add(getPartitionManager().getSeriesPartitionSlot(seriesPartitionKey)); break; } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/IManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/IManager.java index fe83b8bd0c189..61748b69f9719 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/IManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/IManager.java @@ -33,6 +33,7 @@ import org.apache.iotdb.common.rpc.thrift.TShowConfigurationResp; import org.apache.iotdb.commons.auth.entity.PrivilegeUnion; import org.apache.iotdb.commons.cluster.NodeStatus; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathPatternTree; import org.apache.iotdb.confignode.audit.CNAuditLogger; @@ -165,6 +166,8 @@ import org.apache.iotdb.consensus.common.DataSet; import org.apache.iotdb.rpc.TSStatusCode; +import org.apache.tsfile.file.metadata.IDeviceID; + import java.nio.ByteBuffer; import java.util.List; import java.util.Map; @@ -422,6 +425,9 @@ public interface IManager { */ TSStatus deleteDatabases(TDeleteDatabasesReq tDeleteReq); + @SuppressWarnings("OptionalGetWithoutIsPresent") + SeriesPartitionKey getSeriesPartitionKey(IDeviceID deviceID, String databaseName); + /** * Get SchemaPartition. * diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java index 576d805c78624..f76fb5421322f 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java @@ -35,6 +35,7 @@ import org.apache.iotdb.commons.partition.DataPartitionTable; import org.apache.iotdb.commons.partition.SchemaPartitionTable; import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; @@ -1055,11 +1056,11 @@ public boolean isDatabasePreDeleted(final String database) { /** * Get TSeriesPartitionSlot. * - * @param deviceID IDeviceID + * @param key IDeviceID * @return SeriesPartitionSlot */ - public TSeriesPartitionSlot getSeriesPartitionSlot(final IDeviceID deviceID) { - return executor.getSeriesPartitionSlot(deviceID); + public TSeriesPartitionSlot getSeriesPartitionSlot(final SeriesPartitionKey key) { + return executor.getSeriesPartitionSlot(key); } public RegionInfoListResp getRegionInfoList(final GetRegionInfoListPlan req) { @@ -1152,8 +1153,11 @@ public GetRegionIdResp getRegionId(final TGetRegionIdReq req) { } else { final IDeviceID deviceID = Deserializer.DEFAULT_DESERIALIZER.deserializeFrom(ByteBuffer.wrap(req.getDevice())); - plan.setDatabase(getClusterSchemaManager().getDatabaseNameByDevice(deviceID)); - plan.setSeriesSlotId(executor.getSeriesPartitionSlot(deviceID)); + String databaseName = getClusterSchemaManager().getDatabaseNameByDevice(deviceID); + plan.setDatabase(databaseName); + SeriesPartitionKey seriesPartitionKey = + configManager.getSeriesPartitionKey(deviceID, databaseName); + plan.setSeriesSlotId(executor.getSeriesPartitionSlot(seriesPartitionKey)); } if (Objects.equals(plan.getDatabase(), "")) { // Return empty result if Database not specified @@ -1189,8 +1193,11 @@ public GetTimeSlotListResp getTimeSlotList(TGetTimeSlotListReq req) { } else if (req.isSetDevice()) { IDeviceID deviceID = Deserializer.DEFAULT_DESERIALIZER.deserializeFrom(ByteBuffer.wrap(req.getDevice())); - plan.setDatabase(getClusterSchemaManager().getDatabaseNameByDevice(deviceID)); - plan.setSeriesSlotId(executor.getSeriesPartitionSlot(deviceID)); + String databaseName = getClusterSchemaManager().getDatabaseNameByDevice(deviceID); + plan.setDatabase(databaseName); + SeriesPartitionKey seriesPartitionKey = + configManager.getSeriesPartitionKey(deviceID, databaseName); + plan.setSeriesSlotId(executor.getSeriesPartitionSlot(seriesPartitionKey)); if (Objects.equals(plan.getDatabase(), "")) { // Return empty result if Database not specified return new GetTimeSlotListResp(RpcUtils.SUCCESS_STATUS, new ArrayList<>()); @@ -1218,8 +1225,11 @@ public CountTimeSlotListResp countTimeSlotList(TCountTimeSlotListReq req) { } else if (req.isSetDevice()) { IDeviceID deviceID = Deserializer.DEFAULT_DESERIALIZER.deserializeFrom(ByteBuffer.wrap(req.getDevice())); - plan.setDatabase(getClusterSchemaManager().getDatabaseNameByDevice(deviceID)); - plan.setSeriesSlotId(executor.getSeriesPartitionSlot(deviceID)); + String databaseName = getClusterSchemaManager().getDatabaseNameByDevice(deviceID); + plan.setDatabase(databaseName); + SeriesPartitionKey seriesPartitionKey = + configManager.getSeriesPartitionKey(deviceID, databaseName); + plan.setSeriesSlotId(executor.getSeriesPartitionSlot(seriesPartitionKey)); if (Objects.equals(plan.getDatabase(), "")) { // Return empty result if Database not specified return new CountTimeSlotListResp(RpcUtils.SUCCESS_STATUS, 0); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/schema/ClusterSchemaManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/schema/ClusterSchemaManager.java index 490370e38c31a..93910947a356e 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/schema/ClusterSchemaManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/schema/ClusterSchemaManager.java @@ -1486,6 +1486,17 @@ public synchronized Pair tableCheckForRenaming( null); } + if (!originalTable.canAlterName()) { + return new Pair<>( + RpcUtils.getStatus( + TSStatusCode.SEMANTIC_ERROR, + String.format( + "Table '%s.%s' is created in a old version and cannot be renamed, " + + "please migrate its data to a new table manually", + database, tableName)), + null); + } + final Optional> result = checkTable4View(database, originalTable, isTableView); if (result.isPresent()) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/CreateTableProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/CreateTableProcedure.java index d0fbdb605d12f..e882ffb59733d 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/CreateTableProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/CreateTableProcedure.java @@ -129,6 +129,10 @@ protected void checkTableExistence(final ConfigNodeProcedureEnv env) { && schema.getTTL() != Long.MAX_VALUE) { table.addProp(TsTable.TTL_PROPERTY, String.valueOf(schema.getTTL())); } + if (!table.getPropValue(TsTable.ALLOW_ALTER_NAME_PROPERTY).isPresent()) { + table.addProp( + TsTable.ALLOW_ALTER_NAME_PROPERTY, String.valueOf(TsTable.ALLOW_ALTER_NAME_DEFAULT)); + } setNextState(CreateTableState.PRE_CREATE); } } catch (final MetadataException | DatabaseNotExistsException e) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableColumnProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableColumnProcedure.java index da51dad267266..9bcb651dd424f 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableColumnProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableColumnProcedure.java @@ -19,10 +19,13 @@ package org.apache.iotdb.confignode.procedure.impl.schema.table; +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.exception.IoTDBException; import org.apache.iotdb.commons.exception.MetadataException; import org.apache.iotdb.commons.schema.table.TsTable; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.consensus.request.write.table.RenameTableColumnPlan; import org.apache.iotdb.confignode.consensus.request.write.table.view.RenameViewColumnPlan; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; @@ -30,9 +33,13 @@ import org.apache.iotdb.confignode.procedure.impl.schema.table.view.RenameViewColumnProcedure; import org.apache.iotdb.confignode.procedure.state.schema.RenameTableColumnState; import org.apache.iotdb.confignode.procedure.store.ProcedureType; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.ColumnRename; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; +import org.apache.iotdb.mpp.rpc.thrift.TDataRegionEvolveSchemaReq; import org.apache.iotdb.rpc.TSStatusCode; import org.apache.tsfile.utils.Pair; +import org.apache.tsfile.utils.PublicBAOS; import org.apache.tsfile.utils.ReadWriteIOUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,6 +47,10 @@ import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; import java.util.Objects; public class RenameTableColumnProcedure @@ -84,6 +95,10 @@ protected Flow executeFromState( LOGGER.info("Rename column to table {}.{} on config node", database, tableName); renameColumn(env); break; + case EXECUTE_ON_REGION: + LOGGER.info("Rename column to table {}.{} on data regions", database, tableName); + executeOnRegions(env); + break; case COMMIT_RELEASE: LOGGER.info( "Commit release info of table {}.{} when renaming column", database, tableName); @@ -141,10 +156,37 @@ private void renameColumn(final ConfigNodeProcedureEnv env) { if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { setFailure(new ProcedureException(new IoTDBException(status))); } else { - setNextState(RenameTableColumnState.COMMIT_RELEASE); + setNextState(RenameTableColumnState.EXECUTE_ON_REGION); } } + private void executeOnRegions(final ConfigNodeProcedureEnv env) { + final Map relatedRegionGroup = + env.getConfigManager().getRelatedDataRegionGroup4TableModel(database); + + if (!relatedRegionGroup.isEmpty()) { + List schemaEvolutions = + Collections.singletonList(new ColumnRename(tableName, oldName, newName)); + PublicBAOS publicBAOS = new PublicBAOS(); + try { + SchemaEvolution.serializeList(schemaEvolutions, publicBAOS); + } catch (IOException ignored) { + } + ByteBuffer byteBuffer = ByteBuffer.wrap(publicBAOS.getBuf(), 0, publicBAOS.size()); + new TableRegionTaskExecutor<>( + "evolve data region schema", + env, + relatedRegionGroup, + CnToDnAsyncRequestType.EVOLVE_DATA_REGION_SCHEMA, + ((dataNodeLocation, consensusGroupIdList) -> + new TDataRegionEvolveSchemaReq( + new ArrayList<>(consensusGroupIdList), byteBuffer))) + .execute(); + } + + setNextState(RenameTableColumnState.COMMIT_RELEASE); + } + @Override protected void rollbackState(final ConfigNodeProcedureEnv env, final RenameTableColumnState state) throws IOException, InterruptedException, ProcedureException { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableProcedure.java index 93d1035a7615c..bf91e168fe363 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableProcedure.java @@ -19,10 +19,13 @@ package org.apache.iotdb.confignode.procedure.impl.schema.table; +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.exception.IoTDBException; import org.apache.iotdb.commons.exception.MetadataException; import org.apache.iotdb.commons.schema.table.TsTable; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.consensus.request.write.table.RenameTablePlan; import org.apache.iotdb.confignode.consensus.request.write.table.view.RenameViewPlan; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; @@ -30,9 +33,14 @@ import org.apache.iotdb.confignode.procedure.impl.schema.table.view.RenameViewProcedure; import org.apache.iotdb.confignode.procedure.state.schema.RenameTableState; import org.apache.iotdb.confignode.procedure.store.ProcedureType; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.TableRename; +import org.apache.iotdb.mpp.rpc.thrift.TDataRegionEvolveSchemaReq; +import org.apache.iotdb.mpp.rpc.thrift.TSchemaRegionEvolveSchemaReq; import org.apache.iotdb.rpc.TSStatusCode; import org.apache.tsfile.utils.Pair; +import org.apache.tsfile.utils.PublicBAOS; import org.apache.tsfile.utils.ReadWriteIOUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,6 +48,10 @@ import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; public class RenameTableProcedure extends AbstractAlterOrDropTableProcedure { private static final Logger LOGGER = LoggerFactory.getLogger(RenameTableProcedure.class); @@ -74,9 +86,13 @@ protected Flow executeFromState(final ConfigNodeProcedureEnv env, final RenameTa preRelease(env); break; case RENAME_TABLE: - LOGGER.info("Rename column to table {}.{} on config node", database, tableName); + LOGGER.info("Rename table {}.{} on config node", database, tableName); renameTable(env); break; + case EXECUTE_ON_REGIONS: + LOGGER.info("Rename table {}.{} on regions", database, tableName); + executeOnRegions(env); + break; case COMMIT_RELEASE: LOGGER.info( "Commit release info of table {}.{} when renaming table", database, tableName); @@ -134,8 +150,58 @@ private void renameTable(final ConfigNodeProcedureEnv env) { if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { setFailure(new ProcedureException(new IoTDBException(status))); } else { - setNextState(RenameTableState.COMMIT_RELEASE); + setNextState(RenameTableState.EXECUTE_ON_REGIONS); + } + } + + private void executeOnRegions(final ConfigNodeProcedureEnv env) { + final Map relatedDataRegionGroup = + env.getConfigManager().getRelatedDataRegionGroup4TableModel(database); + + if (!relatedDataRegionGroup.isEmpty()) { + List schemaEvolutions = + Collections.singletonList(new TableRename(tableName, newName)); + PublicBAOS publicBAOS = new PublicBAOS(); + try { + SchemaEvolution.serializeList(schemaEvolutions, publicBAOS); + } catch (IOException ignored) { + } + ByteBuffer byteBuffer = ByteBuffer.wrap(publicBAOS.getBuf(), 0, publicBAOS.size()); + new TableRegionTaskExecutor<>( + "evolve data region schema", + env, + relatedDataRegionGroup, + CnToDnAsyncRequestType.EVOLVE_DATA_REGION_SCHEMA, + ((dataNodeLocation, consensusGroupIdList) -> + new TDataRegionEvolveSchemaReq( + new ArrayList<>(consensusGroupIdList), byteBuffer))) + .execute(); } + + final Map relatedSchemaRegionGroup = + env.getConfigManager().getRelatedSchemaRegionGroup4TableModel(database); + + if (!relatedSchemaRegionGroup.isEmpty()) { + List schemaEvolutions = + Collections.singletonList(new TableRename(tableName, newName)); + PublicBAOS publicBAOS = new PublicBAOS(); + try { + SchemaEvolution.serializeList(schemaEvolutions, publicBAOS); + } catch (IOException ignored) { + } + ByteBuffer byteBuffer = ByteBuffer.wrap(publicBAOS.getBuf(), 0, publicBAOS.size()); + new TableRegionTaskExecutor<>( + "evolve schema region schema", + env, + relatedSchemaRegionGroup, + CnToDnAsyncRequestType.EVOLVE_SCHEMA_REGION_SCHEMA, + ((dataNodeLocation, consensusGroupIdList) -> + new TSchemaRegionEvolveSchemaReq( + new ArrayList<>(consensusGroupIdList), byteBuffer))) + .execute(); + } + + setNextState(RenameTableState.COMMIT_RELEASE); } @Override diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/schema/RenameTableColumnState.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/schema/RenameTableColumnState.java index 398ef64222440..83428eadd6d3e 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/schema/RenameTableColumnState.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/schema/RenameTableColumnState.java @@ -23,5 +23,6 @@ public enum RenameTableColumnState { COLUMN_CHECK, PRE_RELEASE, RENAME_COLUMN, + EXECUTE_ON_REGION, COMMIT_RELEASE } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/schema/RenameTableState.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/schema/RenameTableState.java index 1c71cb50182ed..6f9e2c295588c 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/schema/RenameTableState.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/schema/RenameTableState.java @@ -23,5 +23,6 @@ public enum RenameTableState { COLUMN_CHECK, PRE_RELEASE, RENAME_TABLE, + EXECUTE_ON_REGIONS, COMMIT_RELEASE } diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/hash/DeviceGroupHashExecutorManualTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/hash/DeviceGroupHashExecutorManualTest.java index 628cc02524c5d..3cd19850cb306 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/hash/DeviceGroupHashExecutorManualTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/hash/DeviceGroupHashExecutorManualTest.java @@ -18,6 +18,7 @@ */ package org.apache.iotdb.confignode.manager.hash; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.FullDeviceIdKey; import org.apache.iotdb.confignode.manager.ConfigManager; import org.apache.iotdb.confignode.manager.partition.PartitionManager; import org.apache.iotdb.confignode.persistence.partition.PartitionInfo; @@ -74,7 +75,7 @@ public void GeneralIndexTest() throws IOException { List devices = genBatchDevices(); totalTime -= System.currentTimeMillis(); for (IDeviceID device : devices) { - bucket[manager.getSeriesPartitionSlot(device).getSlotId()] += 1; + bucket[manager.getSeriesPartitionSlot(new FullDeviceIdKey(device)).getSlotId()] += 1; } totalTime += System.currentTimeMillis(); } diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/DispatchLogHandler.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/DispatchLogHandler.java index bb0326d7473e7..9bfd48cea9b72 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/DispatchLogHandler.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/DispatchLogHandler.java @@ -106,7 +106,7 @@ public void onError(Exception exception) { ++retryCount; Throwable rootCause = ExceptionUtils.getRootCause(exception); logger.warn( - "Can not send {} to peer for {} times {} because {}", + "v {} to peer for {} times {} because {}", batch, thread.getPeer(), retryCount, diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/rest/IoTDBRestServiceConfig.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/rest/IoTDBRestServiceConfig.java index 64c0f65fe3034..12f70f9ce31da 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/rest/IoTDBRestServiceConfig.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/rest/IoTDBRestServiceConfig.java @@ -22,7 +22,7 @@ public class IoTDBRestServiceConfig { /** If the enableRestService is true, we will start REST Service. */ - private boolean enableRestService = false; + private boolean enableRestService = true; /** Set the REST Service port. */ private int restServicePort = 18080; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataExecutionVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataExecutionVisitor.java index ca81365846794..e82f98f531c8a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataExecutionVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataExecutionVisitor.java @@ -34,6 +34,7 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedDeleteDataNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedInsertNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.DeleteDataNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertMultiTabletsNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowsNode; @@ -287,6 +288,17 @@ public TSStatus visitDeleteData( } } + @Override + public TSStatus visitEvolveSchemaNode(EvolveSchemaNode node, DataRegion dataRegion) { + try { + dataRegion.applySchemaEvolution(node.getSchemaEvolutions()); + return StatusUtils.OK; + } catch (final IOException e) { + LOGGER.error("Error in executing plan node: {}", node, e); + return new TSStatus(TSStatusCode.WRITE_PROCESS_ERROR.getStatusCode()); + } + } + @Override public TSStatus visitPipeEnrichedDeleteDataNode( final PipeEnrichedDeleteDataNode node, final DataRegion context) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/schemaregion/SchemaExecutionVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/schemaregion/SchemaExecutionVisitor.java index 261b4908a9061..63747b6c922ed 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/schemaregion/SchemaExecutionVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/schemaregion/SchemaExecutionVisitor.java @@ -61,6 +61,7 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedNonWritePlanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedWritePlanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeOperateSchemaQueueNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.ConstructTableDevicesBlackListNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.CreateOrUpdateTableDeviceNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.DeleteTableDeviceNode; @@ -854,6 +855,17 @@ public TSStatus visitPipeOperateSchemaQueueNode( return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } + @Override + public TSStatus visitEvolveSchemaNode(EvolveSchemaNode node, ISchemaRegion schemaRegion) { + try { + schemaRegion.applySchemaEvolution(node); + } catch (MetadataException e) { + logMetaDataException(e); + return RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); + } + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + @Override public TSStatus visitPlan(final PlanNode node, final ISchemaRegion context) { return null; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java index 46d377be0a1bd..3f9cded33b5db 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java @@ -161,8 +161,10 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.view.DeleteLogicalViewNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.view.RollbackLogicalViewBlackListNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedDeleteDataNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedEvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedNonWritePlanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.DeleteDataNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.RelationalDeleteDataNode; import org.apache.iotdb.db.queryengine.plan.relational.metadata.fetcher.TableDeviceSchemaFetcher; import org.apache.iotdb.db.queryengine.plan.relational.metadata.fetcher.cache.TableDeviceSchemaCache; @@ -197,8 +199,9 @@ import org.apache.iotdb.db.storageengine.dataregion.compaction.settle.SettleRequestHandler; import org.apache.iotdb.db.storageengine.dataregion.flush.CompressionRatio; import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate; import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; import org.apache.iotdb.db.storageengine.rescon.quotas.DataNodeSpaceQuotaManager; import org.apache.iotdb.db.storageengine.rescon.quotas.DataNodeThrottleQuotaManager; import org.apache.iotdb.db.subscription.agent.SubscriptionAgent; @@ -238,6 +241,7 @@ import org.apache.iotdb.mpp.rpc.thrift.TCreateTriggerInstanceReq; import org.apache.iotdb.mpp.rpc.thrift.TDataNodeHeartbeatReq; import org.apache.iotdb.mpp.rpc.thrift.TDataNodeHeartbeatResp; +import org.apache.iotdb.mpp.rpc.thrift.TDataRegionEvolveSchemaReq; import org.apache.iotdb.mpp.rpc.thrift.TDeactivateTemplateReq; import org.apache.iotdb.mpp.rpc.thrift.TDeleteColumnDataReq; import org.apache.iotdb.mpp.rpc.thrift.TDeleteDataForDeleteSchemaReq; @@ -294,6 +298,7 @@ import org.apache.iotdb.mpp.rpc.thrift.TRollbackViewSchemaBlackListReq; import org.apache.iotdb.mpp.rpc.thrift.TSchemaFetchRequest; import org.apache.iotdb.mpp.rpc.thrift.TSchemaFetchResponse; +import org.apache.iotdb.mpp.rpc.thrift.TSchemaRegionEvolveSchemaReq; import org.apache.iotdb.mpp.rpc.thrift.TSendBatchPlanNodeReq; import org.apache.iotdb.mpp.rpc.thrift.TSendBatchPlanNodeResp; import org.apache.iotdb.mpp.rpc.thrift.TSendFragmentInstanceReq; @@ -793,6 +798,44 @@ public TSStatus deleteDataForDeleteSchema(final TDeleteDataForDeleteSchemaReq re .getStatus()); } + @Override + public TSStatus evolveSchemaInDataRegion(final TDataRegionEvolveSchemaReq req) { + final List schemaEvolutions = + SchemaEvolution.createListFrom(req.schemaEvolutions); + return executeInternalSchemaTask( + req.getDataRegionIdList(), + consensusGroupId -> + new RegionWriteExecutor() + .execute( + new DataRegionId(consensusGroupId.getId()), + // Now the deletion plan may be re-collected here by pipe, resulting multiple + // transfer to delete time series plan. Now just ignore. + req.isSetIsGeneratedByPipe() && req.isIsGeneratedByPipe() + ? new PipeEnrichedEvolveSchemaNode( + new EvolveSchemaNode(new PlanNodeId(""), schemaEvolutions)) + : new EvolveSchemaNode(new PlanNodeId(""), schemaEvolutions)) + .getStatus()); + } + + @Override + public TSStatus evolveSchemaInSchemaRegion(final TSchemaRegionEvolveSchemaReq req) { + final List schemaEvolutions = + SchemaEvolution.createListFrom(req.schemaEvolutions); + return executeInternalSchemaTask( + req.getSchemaRegionIdList(), + consensusGroupId -> + new RegionWriteExecutor() + .execute( + new SchemaRegionId(consensusGroupId.getId()), + // Now the deletion plan may be re-collected here by pipe, resulting multiple + // transfer to delete time series plan. Now just ignore. + req.isSetIsGeneratedByPipe() && req.isIsGeneratedByPipe() + ? new PipeEnrichedEvolveSchemaNode( + new EvolveSchemaNode(new PlanNodeId(""), schemaEvolutions)) + : new EvolveSchemaNode(new PlanNodeId(""), schemaEvolutions)) + .getStatus()); + } + @Override public TSStatus deleteTimeSeries(final TDeleteTimeSeriesReq req) throws TException { final PathPatternTree patternTree = @@ -1998,7 +2041,7 @@ public TSStatus deleteColumnData(final TDeleteColumnDataReq req) { new TableDeletionEntry( new DeletionPredicate( req.getTableName(), - new IDPredicate.NOP(), + new TagPredicate.NOP(), Collections.singletonList(req.getColumnName())), new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE)), // the request is only sent to associated region diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AlignedSeriesScanUtil.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AlignedSeriesScanUtil.java index f7ddaee472ea9..f17c01d2820fd 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AlignedSeriesScanUtil.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AlignedSeriesScanUtil.java @@ -62,6 +62,15 @@ public AlignedSeriesScanUtil( this(seriesPath, scanOrder, scanOptions, context, false, null); } + public AlignedSeriesScanUtil( + AlignedFullPath seriesPath, + Ordering scanOrder, + SeriesScanOptions scanOptions, + FragmentInstanceContext context, + long maxTsFileSetEndVersion) { + this(seriesPath, scanOrder, scanOptions, context, false, null, maxTsFileSetEndVersion); + } + public AlignedSeriesScanUtil( AlignedFullPath seriesPath, Ordering scanOrder, @@ -69,7 +78,25 @@ public AlignedSeriesScanUtil( FragmentInstanceContext context, boolean queryAllSensors, List givenDataTypes) { - super(seriesPath, scanOrder, scanOptions, context); + this( + seriesPath, + scanOrder, + scanOptions, + context, + queryAllSensors, + givenDataTypes, + Long.MAX_VALUE); + } + + public AlignedSeriesScanUtil( + AlignedFullPath seriesPath, + Ordering scanOrder, + SeriesScanOptions scanOptions, + FragmentInstanceContext context, + boolean queryAllSensors, + List givenDataTypes, + long maxTsFileSetEndVersion) { + super(seriesPath, scanOrder, scanOptions, context, maxTsFileSetEndVersion); isAligned = true; this.dataTypes = givenDataTypes != null @@ -100,7 +127,8 @@ protected AbstractAlignedTimeSeriesMetadata loadTimeSeriesMetadata( context, scanOptions.getGlobalTimeFilter(), isSeq, - ignoreAllNullRows); + ignoreAllNullRows, + maxTsFileSetEndVersion); } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/FileLoaderUtils.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/FileLoaderUtils.java index 70f3ce5e0e64e..dc79dc560cca0 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/FileLoaderUtils.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/FileLoaderUtils.java @@ -35,6 +35,7 @@ import org.apache.iotdb.db.storageengine.dataregion.read.reader.chunk.metadata.MemAlignedChunkMetadataLoader; import org.apache.iotdb.db.storageengine.dataregion.read.reader.chunk.metadata.MemChunkMetadataLoader; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.iotdb.db.storageengine.dataregion.tsfile.timeindex.ITimeIndex; import org.apache.iotdb.db.utils.ModificationUtils; import org.apache.iotdb.db.utils.SchemaUtils; @@ -88,7 +89,8 @@ public static TimeseriesMetadata loadTimeSeriesMetadata( FragmentInstanceContext context, Filter globalTimeFilter, Set allSensors, - boolean isSeq) + boolean isSeq, + long maxTsFileSetEndVersion) throws IOException { long t1 = System.nanoTime(); boolean loadFromMem = false; @@ -99,14 +101,20 @@ public static TimeseriesMetadata loadTimeSeriesMetadata( if (resource.isClosed()) { // when resource.getTimeIndexType() == 1, TsFileResource.timeIndexType is deviceTimeIndex // we should not ignore the non-exist of device in TsFileMetadata + EvolvedSchema evolvedSchema = resource.getMergedEvolvedSchema(maxTsFileSetEndVersion); + IDeviceID deviceId = seriesPath.getDeviceId(); + String measurement = seriesPath.getMeasurement(); + if (evolvedSchema != null) { + measurement = evolvedSchema.getOriginalColumnName(deviceId.getTableName(), measurement); + deviceId = evolvedSchema.rewriteToOriginal(deviceId); + } + timeSeriesMetadata = TimeSeriesMetadataCache.getInstance() .get( resource.getTsFilePath(), new TimeSeriesMetadataCache.TimeSeriesMetadataCacheKey( - resource.getTsFileID(), - seriesPath.getDeviceId(), - seriesPath.getMeasurement()), + resource.getTsFileID(), deviceId, measurement), allSensors, context.ignoreNotExistsDevice() || resource.getTimeIndexType() == ITimeIndex.FILE_TIME_INDEX_TYPE, @@ -116,8 +124,7 @@ public static TimeseriesMetadata loadTimeSeriesMetadata( SchemaUtils.changeMetadataModified(timeSeriesMetadata, seriesPath.getSeriesType()); long t2 = System.nanoTime(); List pathModifications = - context.getPathModifications( - resource, seriesPath.getDeviceId(), seriesPath.getMeasurement()); + context.getPathModifications(resource, deviceId, measurement); timeSeriesMetadata.setModified( timeSeriesMetadata.isModified() || !pathModifications.isEmpty()); timeSeriesMetadata.setChunkMetadataLoader( @@ -194,7 +201,8 @@ public static AbstractAlignedTimeSeriesMetadata loadAlignedTimeSeriesMetadata( FragmentInstanceContext context, Filter globalTimeFilter, boolean isSeq, - boolean ignoreAllNullRows) + boolean ignoreAllNullRows, + long maxTsFileSetEndVersion) throws IOException { final long t1 = System.nanoTime(); boolean loadFromMem = false; @@ -208,7 +216,12 @@ public static AbstractAlignedTimeSeriesMetadata loadAlignedTimeSeriesMetadata( if (resource.isClosed()) { alignedTimeSeriesMetadata = loadAlignedTimeSeriesMetadataFromDisk( - resource, alignedPath, context, globalTimeFilter, ignoreAllNullRows); + resource, + alignedPath, + context, + globalTimeFilter, + ignoreAllNullRows, + maxTsFileSetEndVersion); SchemaUtils.changeAlignedMetadataModified(alignedTimeSeriesMetadata, targetDataTypeList); } else { // if the tsfile is unclosed, we just get it directly from TsFileResource loadFromMem = true; @@ -277,7 +290,8 @@ private static AbstractAlignedTimeSeriesMetadata loadAlignedTimeSeriesMetadataFr AlignedFullPath alignedPath, FragmentInstanceContext context, Filter globalTimeFilter, - boolean ignoreAllNullRows) + boolean ignoreAllNullRows, + long maxTsFileSetEndVersion) throws IOException { AbstractAlignedTimeSeriesMetadata alignedTimeSeriesMetadata = null; // load all the TimeseriesMetadata of vector, the first one is for time column and the @@ -290,6 +304,20 @@ private static AbstractAlignedTimeSeriesMetadata loadAlignedTimeSeriesMetadataFr String filePath = resource.getTsFilePath(); IDeviceID deviceId = alignedPath.getDeviceId(); + EvolvedSchema evolvedSchema = resource.getMergedEvolvedSchema(maxTsFileSetEndVersion); + if (evolvedSchema != null) { + IDeviceID finalDeviceId = deviceId; + valueMeasurementList = + valueMeasurementList.stream() + .map(m -> evolvedSchema.getOriginalColumnName(finalDeviceId.getTableName(), m)) + .collect(Collectors.toList()); + allSensors = + allSensors.stream() + .map(m -> evolvedSchema.getOriginalColumnName(finalDeviceId.getTableName(), m)) + .collect(Collectors.toSet()); + deviceId = evolvedSchema.rewriteToOriginal(deviceId); + } + // when resource.getTimeIndexType() == 1, TsFileResource.timeIndexType is deviceTimeIndex // we should not ignore the non-exist of device in TsFileMetadata TimeseriesMetadata timeColumn = @@ -311,7 +339,7 @@ private static AbstractAlignedTimeSeriesMetadata loadAlignedTimeSeriesMetadataFr resource, timeColumn, Collections.emptyList(), - alignedPath, + deviceId, context, globalTimeFilter, false); @@ -346,7 +374,7 @@ private static AbstractAlignedTimeSeriesMetadata loadAlignedTimeSeriesMetadataFr resource, timeColumn, valueTimeSeriesMetadataList, - alignedPath, + deviceId, context, globalTimeFilter, ignoreAllNullRows); @@ -360,7 +388,7 @@ private static AbstractAlignedTimeSeriesMetadata setModifications( TsFileResource resource, TimeseriesMetadata timeColumnMetadata, List valueColumnMetadataList, - AlignedFullPath alignedPath, + IDeviceID deviceID, QueryContext context, Filter globalTimeFilter, boolean ignoreAllNullRows) { @@ -368,8 +396,7 @@ private static AbstractAlignedTimeSeriesMetadata setModifications( // deal with time column List timeModifications = - context.getPathModifications( - resource, alignedPath.getDeviceId(), timeColumnMetadata.getMeasurementId()); + context.getPathModifications(resource, deviceID, timeColumnMetadata.getMeasurementId()); // all rows are deleted, just return null to skip device data in this file if (ModificationUtils.isAllDeletedByMods( timeModifications, @@ -392,7 +419,7 @@ private static AbstractAlignedTimeSeriesMetadata setModifications( if (valueColumnMetadata != null) { List modifications = context.getPathModifications( - resource, alignedPath.getDeviceId(), valueColumnMetadata.getMeasurementId()); + resource, deviceID, valueColumnMetadata.getMeasurementId()); valueColumnMetadata.setModified(!modifications.isEmpty()); valueColumnsModifications.add(modifications); modified = (modified || !modifications.isEmpty()); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/SeriesScanUtil.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/SeriesScanUtil.java index a44a43d0f17a7..68e2cf82ae6bc 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/SeriesScanUtil.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/SeriesScanUtil.java @@ -146,6 +146,9 @@ public class SeriesScanUtil implements Accountable { protected final int MAX_NUMBER_OF_POINTS_IN_PAGE = TSFileDescriptor.getInstance().getConfig().getMaxNumberOfPointsInPage(); + // to restrict the scope of sevo files for compaction + protected final long maxTsFileSetEndVersion; + private static final long INSTANCE_SIZE = RamUsageEstimator.shallowSizeOfInstance(SeriesScanUtil.class) + RamUsageEstimator.shallowSizeOfInstance(IDeviceID.class) @@ -161,6 +164,15 @@ public SeriesScanUtil( Ordering scanOrder, SeriesScanOptions scanOptions, FragmentInstanceContext context) { + this(seriesPath, scanOrder, scanOptions, context, Long.MAX_VALUE); + } + + public SeriesScanUtil( + IFullPath seriesPath, + Ordering scanOrder, + SeriesScanOptions scanOptions, + FragmentInstanceContext context, + long maxTsFileSetEndVersion) { this.seriesPath = seriesPath; this.deviceID = seriesPath.getDeviceId(); this.dataType = seriesPath.getSeriesType(); @@ -198,6 +210,8 @@ public SeriesScanUtil( new PriorityQueue<>( orderUtils.comparingLong( versionPageReader -> orderUtils.getOrderTime(versionPageReader.getStatistics()))); + + this.maxTsFileSetEndVersion = maxTsFileSetEndVersion; } /** @@ -206,7 +220,7 @@ public SeriesScanUtil( * @param dataSource the query data source */ public void initQueryDataSource(QueryDataSource dataSource) { - dataSource.fillOrderIndexes(deviceID, orderUtils.getAscending()); + dataSource.fillOrderIndexes(deviceID, orderUtils.getAscending(), maxTsFileSetEndVersion); this.dataSource = dataSource; // updated filter concerning TTL @@ -1935,7 +1949,8 @@ protected ITimeSeriesMetadata loadTimeSeriesMetadata(TsFileResource resource, bo context, scanOptions.getGlobalTimeFilter(), scanOptions.getAllSensors(), - isSeq); + isSeq, + maxTsFileSetEndVersion); } public List getTsDataTypeList() { @@ -2352,26 +2367,38 @@ public Ordering getScanOrder() { @Override public boolean hasNextSeqResource() { - while (dataSource.hasNextSeqResource(curSeqFileIndex, false, deviceID)) { + while (dataSource.hasNextSeqResource( + curSeqFileIndex, false, deviceID, maxTsFileSetEndVersion)) { if (dataSource.isSeqSatisfied( - deviceID, curSeqFileIndex, scanOptions.getGlobalTimeFilter(), false)) { + deviceID, + curSeqFileIndex, + scanOptions.getGlobalTimeFilter(), + false, + maxTsFileSetEndVersion)) { break; } curSeqFileIndex--; } - return dataSource.hasNextSeqResource(curSeqFileIndex, false, deviceID); + return dataSource.hasNextSeqResource( + curSeqFileIndex, false, deviceID, maxTsFileSetEndVersion); } @Override public boolean hasNextUnseqResource() { - while (dataSource.hasNextUnseqResource(curUnseqFileIndex, false, deviceID)) { + while (dataSource.hasNextUnseqResource( + curUnseqFileIndex, false, deviceID, maxTsFileSetEndVersion)) { if (dataSource.isUnSeqSatisfied( - deviceID, curUnseqFileIndex, scanOptions.getGlobalTimeFilter(), false)) { + deviceID, + curUnseqFileIndex, + scanOptions.getGlobalTimeFilter(), + false, + maxTsFileSetEndVersion)) { break; } curUnseqFileIndex++; } - return dataSource.hasNextUnseqResource(curUnseqFileIndex, false, deviceID); + return dataSource.hasNextUnseqResource( + curUnseqFileIndex, false, deviceID, maxTsFileSetEndVersion); } @Override @@ -2481,26 +2508,37 @@ public Ordering getScanOrder() { @Override public boolean hasNextSeqResource() { - while (dataSource.hasNextSeqResource(curSeqFileIndex, true, deviceID)) { + while (dataSource.hasNextSeqResource( + curSeqFileIndex, true, deviceID, maxTsFileSetEndVersion)) { if (dataSource.isSeqSatisfied( - deviceID, curSeqFileIndex, scanOptions.getGlobalTimeFilter(), false)) { + deviceID, + curSeqFileIndex, + scanOptions.getGlobalTimeFilter(), + false, + maxTsFileSetEndVersion)) { break; } curSeqFileIndex++; } - return dataSource.hasNextSeqResource(curSeqFileIndex, true, deviceID); + return dataSource.hasNextSeqResource(curSeqFileIndex, true, deviceID, maxTsFileSetEndVersion); } @Override public boolean hasNextUnseqResource() { - while (dataSource.hasNextUnseqResource(curUnseqFileIndex, true, deviceID)) { + while (dataSource.hasNextUnseqResource( + curUnseqFileIndex, true, deviceID, maxTsFileSetEndVersion)) { if (dataSource.isUnSeqSatisfied( - deviceID, curUnseqFileIndex, scanOptions.getGlobalTimeFilter(), false)) { + deviceID, + curUnseqFileIndex, + scanOptions.getGlobalTimeFilter(), + false, + maxTsFileSetEndVersion)) { break; } curUnseqFileIndex++; } - return dataSource.hasNextUnseqResource(curUnseqFileIndex, true, deviceID); + return dataSource.hasNextUnseqResource( + curUnseqFileIndex, true, deviceID, maxTsFileSetEndVersion); } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeUtils.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeUtils.java index 2e0b0b52fdc09..41df1928112b8 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeUtils.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeUtils.java @@ -53,10 +53,10 @@ import org.apache.iotdb.db.schemaengine.table.DataNodeTableCache; import org.apache.iotdb.db.schemaengine.table.DataNodeTreeViewSchemaUtils; import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.And; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.SegmentExactMatch; import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.And; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.SegmentExactMatch; import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.TSStatusCode; @@ -436,23 +436,23 @@ private static TableDeletionEntry parsePredicate(Expression expression, TsTable Queue expressionQueue = new LinkedList<>(); expressionQueue.add(expression); DeletionPredicate predicate = new DeletionPredicate(table.getTableName()); - IDPredicate idPredicate = null; + TagPredicate tagPredicate = null; TimeRange timeRange = new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE, true); while (!expressionQueue.isEmpty()) { Expression currExp = expressionQueue.remove(); if (currExp instanceof LogicalExpression) { parseAndPredicate(((LogicalExpression) currExp), expressionQueue); } else if (currExp instanceof ComparisonExpression) { - idPredicate = - parseComparison(((ComparisonExpression) currExp), timeRange, idPredicate, table); + tagPredicate = + parseComparison(((ComparisonExpression) currExp), timeRange, tagPredicate, table); } else if (currExp instanceof IsNullPredicate) { - idPredicate = parseIsNull((IsNullPredicate) currExp, idPredicate, table); + tagPredicate = parseIsNull((IsNullPredicate) currExp, tagPredicate, table); } else { throw new SemanticException("Unsupported expression: " + currExp + " in " + expression); } } - if (idPredicate != null) { - predicate.setIdPredicate(idPredicate); + if (tagPredicate != null) { + predicate.setIdPredicate(tagPredicate); } if (timeRange.getStartTime() > timeRange.getEndTime()) { throw new SemanticException( @@ -472,8 +472,8 @@ private static void parseAndPredicate( expressionQueue.addAll(expression.getTerms()); } - private static IDPredicate parseIsNull( - IsNullPredicate isNullPredicate, IDPredicate oldPredicate, TsTable table) { + private static TagPredicate parseIsNull( + IsNullPredicate isNullPredicate, TagPredicate oldPredicate, TsTable table) { Expression leftHandExp = isNullPredicate.getValue(); if (!(leftHandExp instanceof Identifier)) { throw new SemanticException("Left hand expression is not an identifier: " + leftHandExp); @@ -486,25 +486,26 @@ private static IDPredicate parseIsNull( } // the first segment is the table name, so + 1 - IDPredicate newPredicate = new SegmentExactMatch(null, idColumnOrdinal + 1); + TagPredicate newPredicate = new SegmentExactMatch(null, idColumnOrdinal + 1); return combinePredicates(oldPredicate, newPredicate); } - private static IDPredicate combinePredicates(IDPredicate oldPredicate, IDPredicate newPredicate) { + private static TagPredicate combinePredicates( + TagPredicate oldPredicate, TagPredicate newPredicate) { if (oldPredicate == null) { return newPredicate; } - if (oldPredicate instanceof IDPredicate.And) { + if (oldPredicate instanceof TagPredicate.And) { ((And) oldPredicate).add(newPredicate); return oldPredicate; } - return new IDPredicate.And(oldPredicate, newPredicate); + return new TagPredicate.And(oldPredicate, newPredicate); } - private static IDPredicate parseComparison( + private static TagPredicate parseComparison( ComparisonExpression comparisonExpression, TimeRange timeRange, - IDPredicate oldPredicate, + TagPredicate oldPredicate, TsTable table) { Expression left = comparisonExpression.getLeft(); Expression right = comparisonExpression.getRight(); @@ -556,11 +557,11 @@ private static IDPredicate parseComparison( "The column '" + columnName + "' does not exist or is not a tag column"); } - IDPredicate newPredicate = getIdPredicate(comparisonExpression, right, idColumnOrdinal); + TagPredicate newPredicate = getIdPredicate(comparisonExpression, right, idColumnOrdinal); return combinePredicates(oldPredicate, newPredicate); } - private static IDPredicate getIdPredicate( + private static TagPredicate getIdPredicate( ComparisonExpression comparisonExpression, Expression right, int idColumnOrdinal) { if (comparisonExpression.getOperator() != ComparisonExpression.Operator.EQUAL) { throw new SemanticException("The operator of tag predicate must be '=' for " + right); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ClusterPartitionFetcher.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ClusterPartitionFetcher.java index 2274762341b5d..67db363a8f6af 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ClusterPartitionFetcher.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ClusterPartitionFetcher.java @@ -47,6 +47,7 @@ import org.apache.iotdb.db.protocol.client.ConfigNodeClientManager; import org.apache.iotdb.db.protocol.client.ConfigNodeInfo; import org.apache.iotdb.db.queryengine.plan.analyze.cache.partition.PartitionCache; +import org.apache.iotdb.db.utils.CommonUtils; import org.apache.iotdb.mpp.rpc.thrift.TRegionRouteReq; import org.apache.iotdb.rpc.TSStatusCode; @@ -347,6 +348,7 @@ private SchemaPartition getOrCreateSchemaPartition( final List partitionSlots = Objects.nonNull(deviceIDs) ? deviceIDs.stream() + .map(deviceID -> CommonUtils.getSeriesPartitionKey(deviceID, database, false)) .map(partitionExecutor::getSeriesPartitionSlot) .distinct() .collect(Collectors.toList()) @@ -458,6 +460,7 @@ private TDataPartitionReq constructDataPartitionReq( final Map> partitionSlotsMap = new HashMap<>(); for (final Map.Entry> entry : sgNameToQueryParamsMap.entrySet()) { + String databaseName = entry.getKey(); // for each sg final Map deviceToTimePartitionMap = new HashMap<>(); @@ -467,7 +470,9 @@ private TDataPartitionReq constructDataPartitionReq( for (final DataPartitionQueryParam queryParam : entry.getValue()) { seriesSlotTimePartitionMap .computeIfAbsent( - partitionExecutor.getSeriesPartitionSlot(queryParam.getDeviceID()), + partitionExecutor.getSeriesPartitionSlot( + CommonUtils.getSeriesPartitionKey( + queryParam.getDeviceID(), databaseName, false)), k -> new ComplexTimeSlotList( queryParam.isNeedLeftAll(), queryParam.isNeedRightAll())) @@ -479,7 +484,7 @@ private TDataPartitionReq constructDataPartitionReq( k, new TTimeSlotList( new ArrayList<>(v.timeSlotList), v.needLeftAll, v.needRightAll))); - partitionSlotsMap.put(entry.getKey(), deviceToTimePartitionMap); + partitionSlotsMap.put(databaseName, deviceToTimePartitionMap); } return new TDataPartitionReq(partitionSlotsMap); } @@ -491,6 +496,7 @@ private TDataPartitionReq constructDataPartitionReqForQuery( TTimeSlotList sharedTTimeSlotList = null; for (final Map.Entry> entry : sgNameToQueryParamsMap.entrySet()) { + String databaseName = entry.getKey(); // for each sg final Map deviceToTimePartitionMap = new HashMap<>(); @@ -503,10 +509,11 @@ private TDataPartitionReq constructDataPartitionReqForQuery( queryParam.isNeedRightAll()); } deviceToTimePartitionMap.putIfAbsent( - partitionExecutor.getSeriesPartitionSlot(queryParam.getDeviceID()), + partitionExecutor.getSeriesPartitionSlot( + CommonUtils.getSeriesPartitionKey(queryParam.getDeviceID(), databaseName, false)), sharedTTimeSlotList); } - partitionSlotsMap.put(entry.getKey(), deviceToTimePartitionMap); + partitionSlotsMap.put(databaseName, deviceToTimePartitionMap); } return new TDataPartitionReq(partitionSlotsMap); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/PartitionCache.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/PartitionCache.java index 3c76bd08c1d81..79f0c7d63ea4f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/PartitionCache.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/PartitionCache.java @@ -60,6 +60,7 @@ import org.apache.iotdb.db.protocol.session.SessionManager; import org.apache.iotdb.db.schemaengine.schemaregion.utils.MetaUtils; import org.apache.iotdb.db.service.metrics.CacheMetrics; +import org.apache.iotdb.db.utils.CommonUtils; import org.apache.iotdb.rpc.TSStatusCode; import com.github.benmanes.caffeine.cache.Cache; @@ -707,7 +708,8 @@ public SchemaPartition getSchemaPartition( List consensusGroupIds = new ArrayList<>(entry.getValue().size()); for (final IDeviceID device : entry.getValue()) { final TSeriesPartitionSlot seriesPartitionSlot = - partitionExecutor.getSeriesPartitionSlot(device); + partitionExecutor.getSeriesPartitionSlot( + CommonUtils.getSeriesPartitionKey(device, databaseName, true)); if (!map.containsKey(seriesPartitionSlot)) { // if one device not find, then return cache miss. if (logger.isDebugEnabled()) { @@ -874,7 +876,9 @@ public DataPartition getDataPartition( for (DataPartitionQueryParam param : params) { TSeriesPartitionSlot seriesPartitionSlot; if (null != param.getDeviceID()) { - seriesPartitionSlot = partitionExecutor.getSeriesPartitionSlot(param.getDeviceID()); + seriesPartitionSlot = + partitionExecutor.getSeriesPartitionSlot( + CommonUtils.getSeriesPartitionKey(param.getDeviceID(), databaseName, true)); } else { return null; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileAnalyzer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileAnalyzer.java index 8202de5c49947..9b2bf168f6668 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileAnalyzer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileAnalyzer.java @@ -41,6 +41,8 @@ import org.apache.iotdb.db.queryengine.plan.statement.crud.LoadTsFileStatement; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResourceStatus; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolutionFile; import org.apache.iotdb.db.storageengine.dataregion.utils.TsFileResourceUtils; import org.apache.iotdb.db.storageengine.load.active.ActiveLoadPathHelper; import org.apache.iotdb.db.storageengine.load.converter.LoadTsFileDataTypeConverter; @@ -73,6 +75,7 @@ import java.util.Map; import java.util.Objects; import java.util.Optional; +import java.util.stream.Collectors; import static org.apache.iotdb.db.queryengine.plan.execution.config.TableConfigTaskVisitor.DATABASE_NOT_SPECIFIED; import static org.apache.iotdb.db.storageengine.load.metrics.LoadTsFileCostMetricsSet.ANALYSIS; @@ -106,6 +109,8 @@ public class LoadTsFileAnalyzer implements AutoCloseable { private boolean isMiniTsFileConverted = false; private final List isTableModelTsFile; private int isTableModelTsFileReliableIndex = -1; + private final File schemaEvolutionFile; + private EvolvedSchema evolvedSchema; // User specified configs private final int databaseLevel; @@ -134,6 +139,7 @@ public LoadTsFileAnalyzer( this.tsFiles = loadTsFileStatement.getTsFiles(); this.isMiniTsFile = new ArrayList<>(Collections.nCopies(this.tsFiles.size(), false)); this.isTableModelTsFile = new ArrayList<>(Collections.nCopies(this.tsFiles.size(), false)); + this.schemaEvolutionFile = loadTsFileStatement.getSchemaEvolutionFile(); this.databaseLevel = loadTsFileStatement.getDatabaseLevel(); this.databaseForTableData = loadTsFileStatement.getDatabase(); @@ -158,6 +164,7 @@ public LoadTsFileAnalyzer( this.tsFiles = loadTsFileTableStatement.getTsFiles(); this.isMiniTsFile = new ArrayList<>(Collections.nCopies(this.tsFiles.size(), false)); this.isTableModelTsFile = new ArrayList<>(Collections.nCopies(this.tsFiles.size(), false)); + this.schemaEvolutionFile = loadTsFileTableStatement.getSchemaEvolutionFile(); this.databaseLevel = loadTsFileTableStatement.getDatabaseLevel(); this.databaseForTableData = loadTsFileTableStatement.getDatabase(); @@ -200,6 +207,12 @@ public IAnalysis analyzeFileByFile(IAnalysis analysis) { } try { + if (schemaEvolutionFile != null && schemaEvolutionFile.exists()) { + SchemaEvolutionFile sevoFile = + new SchemaEvolutionFile(schemaEvolutionFile.getAbsolutePath()); + evolvedSchema = sevoFile.readAsSchema(); + } + if (!doAnalyzeFileByFile(analysis)) { return analysis; } @@ -526,7 +539,7 @@ private void doAnalyzeSingleTableFile( final File tsFile, final TsFileSequenceReader reader, final TsFileSequenceReaderTimeseriesMetadataIterator timeseriesMetadataIterator, - final Map tableSchemaMap) + Map tableSchemaMap) throws IOException, LoadAnalyzeException { // construct tsfile resource final TsFileResource tsFileResource = constructTsFileResource(reader, tsFile); @@ -550,13 +563,28 @@ private void doAnalyzeSingleTableFile( } getOrCreateTableSchemaCache().setDatabase(databaseForTableData); + if (evolvedSchema != null) { + tableSchemaMap = evolvedSchema.rewriteToFinal(tableSchemaMap); + } getOrCreateTableSchemaCache().setTableSchemaMap(tableSchemaMap); getOrCreateTableSchemaCache().setCurrentModificationsAndTimeIndex(tsFileResource, reader); while (timeseriesMetadataIterator.hasNext()) { - final Map> device2TimeseriesMetadata = + Map> device2TimeseriesMetadata = timeseriesMetadataIterator.next(); + if (evolvedSchema != null) { + device2TimeseriesMetadata = + device2TimeseriesMetadata.entrySet().stream() + .collect( + Collectors.toMap( + e -> evolvedSchema.rewriteToFinal(e.getKey()), + e -> { + evolvedSchema.rewriteToFinal(e.getKey().getTableName(), e.getValue()); + return e.getValue(); + })); + } + // Update time index no matter if resource file exists or not, because resource file may be // untrusted TsFileResourceUtils.updateTsFileResource( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/TableConfigTaskVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/TableConfigTaskVisitor.java index a712e916d9e50..18e0860c71b18 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/TableConfigTaskVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/TableConfigTaskVisitor.java @@ -145,6 +145,7 @@ import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.AlterDB; import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.AlterPipe; import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.AstVisitor; +import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.BooleanLiteral; import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.ClearCache; import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.ColumnDefinition; import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.CreateDB; @@ -276,7 +277,7 @@ import static org.apache.iotdb.commons.conf.IoTDBConstant.TTL_INFINITE; import static org.apache.iotdb.commons.executable.ExecutableManager.getUnTrustedUriErrorMsg; import static org.apache.iotdb.commons.executable.ExecutableManager.isUriTrusted; -import static org.apache.iotdb.commons.schema.table.TsTable.TABLE_ALLOWED_PROPERTIES; +import static org.apache.iotdb.commons.schema.table.TsTable.ALLOW_ALTER_NAME_PROPERTY; import static org.apache.iotdb.commons.schema.table.TsTable.TIME_COLUMN_NAME; import static org.apache.iotdb.commons.schema.table.TsTable.TTL_PROPERTY; import static org.apache.iotdb.db.queryengine.plan.execution.config.metadata.relational.CreateDBTask.DATA_REGION_GROUP_NUM_KEY; @@ -575,7 +576,11 @@ private Pair parseTable4CreateTableOrView( final TsTable table = new TsTable(tableName); - table.setProps(convertPropertiesToMap(node.getProperties(), false)); + Map properties = convertPropertiesToMap(node.getProperties(), false); + // new tables' names can be altered by default + properties.putIfAbsent( + ALLOW_ALTER_NAME_PROPERTY, String.valueOf(TsTable.ALLOW_ALTER_NAME_DEFAULT)); + table.setProps(properties); if (Objects.nonNull(node.getComment())) { table.addProp(TsTable.COMMENT_KEY, node.getComment()); } @@ -791,10 +796,15 @@ protected IConfigTask visitSetProperties( accessControl.checkCanAlterTable( context.getSession().getUserName(), new QualifiedObjectName(database, tableName), context); + Map properties = convertPropertiesToMap(node.getProperties(), true); + if (properties.containsKey(ALLOW_ALTER_NAME_PROPERTY)) { + throw new SemanticException( + "The property " + ALLOW_ALTER_NAME_PROPERTY + " cannot be altered."); + } return new AlterTableSetPropertiesTask( database, tableName, - convertPropertiesToMap(node.getProperties(), true), + properties, context.getQueryId().getId(), node.ifExists(), node.getType() == SetProperties.Type.TREE_VIEW); @@ -871,7 +881,7 @@ private Map convertPropertiesToMap( final Map map = new HashMap<>(); for (final Property property : propertyList) { final String key = property.getName().getValue().toLowerCase(Locale.ENGLISH); - if (TABLE_ALLOWED_PROPERTIES.contains(key)) { + if (TTL_PROPERTY.equals(key)) { if (!property.isSetToDefault()) { final Expression value = property.getNonDefaultValue(); final Optional strValue = parseStringFromLiteralIfBinary(value); @@ -888,6 +898,27 @@ private Map convertPropertiesToMap( } else if (serializeDefault) { map.put(key, null); } + } else if (ALLOW_ALTER_NAME_PROPERTY.equals(key)) { + if (property.isSetToDefault()) { + // no such property, the table is from an older version and its table name + // cannot be altered + map.put(key, "false"); + } else { + Expression value = property.getNonDefaultValue(); + final Optional strValue = parseStringFromLiteralIfBinary(value); + if (strValue.isPresent()) { + try { + boolean ignored = Boolean.parseBoolean(strValue.get()); + } catch (Exception e) { + throw new SemanticException( + ALLOW_ALTER_NAME_PROPERTY + " value must be a boolean, but now is: " + value); + } + map.put(key, strValue.get()); + continue; + } + // TODO: support validation for other properties + map.put(key, String.valueOf(parseBooleanFromLiteral(value, ALLOW_ALTER_NAME_PROPERTY))); + } } else { throw new SemanticException("Table property '" + key + "' is currently not allowed."); } @@ -1085,6 +1116,18 @@ private long parseLongFromLiteral(final Object value, final String name) { return parsedValue; } + private boolean parseBooleanFromLiteral(final Object value, final String name) { + if (!(value instanceof BooleanLiteral)) { + throw new SemanticException( + name + + " value must be a BooleanLiteral, but now is " + + (Objects.nonNull(value) ? value.getClass().getSimpleName() : null) + + ", value: " + + value); + } + return ((BooleanLiteral) value).getParsedValue(); + } + private int parseIntFromLiteral(final Object value, final String name) { if (!(value instanceof LongLiteral)) { throw new SemanticException( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LogicalPlanVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LogicalPlanVisitor.java index 2274869c35543..cd5273616714b 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LogicalPlanVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LogicalPlanVisitor.java @@ -548,7 +548,8 @@ public PlanNode visitLoadFile( context.getQueryId().genPlanNodeId(), loadTsFileStatement.getResources(), isTableModel, - loadTsFileStatement.getDatabase()); + loadTsFileStatement.getDatabase(), + loadTsFileStatement.getSchemaEvolutionFile()); } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/SourceRewriter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/SourceRewriter.java index bc6475b23236f..0d3089cad3b7e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/SourceRewriter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/SourceRewriter.java @@ -24,6 +24,7 @@ import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.partition.DataPartition; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathPatternTree; import org.apache.iotdb.commons.schema.SchemaConstant; @@ -84,6 +85,7 @@ import org.apache.iotdb.db.queryengine.plan.statement.component.OrderByKey; import org.apache.iotdb.db.queryengine.plan.statement.component.Ordering; import org.apache.iotdb.db.queryengine.plan.statement.component.SortItem; +import org.apache.iotdb.db.utils.CommonUtils; import org.apache.iotdb.db.utils.constant.SqlConstant; import com.google.common.collect.ImmutableList; @@ -1442,7 +1444,10 @@ private List getDeviceReplicaSets( Map> slot2ReplicasMap = cache.computeIfAbsent(db, k -> new HashMap<>()); - TSeriesPartitionSlot tSeriesPartitionSlot = dataPartition.calculateDeviceGroupId(deviceID); + + SeriesPartitionKey seriesPartitionKey = CommonUtils.getSeriesPartitionKey(deviceID, db, true); + TSeriesPartitionSlot tSeriesPartitionSlot = + dataPartition.calculateDeviceGroupId(seriesPartitionKey); Map>> finalSeriesPartitionMap = seriesPartitionMap; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/PlanNodeType.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/PlanNodeType.java index 55aaefe8be618..c5865ed1e6b84 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/PlanNodeType.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/PlanNodeType.java @@ -60,6 +60,7 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.view.DeleteLogicalViewNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.view.RollbackLogicalViewBlackListNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedDeleteDataNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedEvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedInsertNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedNonWritePlanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedWritePlanNode; @@ -107,6 +108,7 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.source.TimeseriesRegionScanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.ContinuousSameSearchIndexSeparatorNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.DeleteDataNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertMultiTabletsNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowsNode; @@ -329,6 +331,8 @@ public enum PlanNodeType { RELATIONAL_INSERT_ROWS((short) 2002), RELATIONAL_DELETE_DATA((short) 2003), OBJECT_FILE_NODE((short) 2004), + EVOLVE_SCHEMA((short) 2005), + PIPE_ENRICHED_EVOLVE_SCHEMA((short) 2006), ; public static final int BYTES = Short.BYTES; @@ -374,6 +378,10 @@ public static PlanNode deserializeFromWAL(DataInputStream stream) throws IOExcep return RelationalDeleteDataNode.deserializeFromWAL(stream); case 2004: return ObjectNode.deserializeFromWAL(stream); + case 2005: + return EvolveSchemaNode.deserializeFromWAL(stream); + case 2006: + return PipeEnrichedEvolveSchemaNode.deserializeFromWAL(stream); default: throw new IllegalArgumentException("Invalid node type: " + nodeType); } @@ -402,6 +410,10 @@ public static PlanNode deserializeFromWAL(ByteBuffer buffer) { return RelationalDeleteDataNode.deserializeFromWAL(buffer); case 2004: return ObjectNode.deserialize(buffer); + case 2005: + return EvolveSchemaNode.deserializeFromWAL(buffer); + case 2006: + return PipeEnrichedEvolveSchemaNode.deserializeFromWAL(buffer); default: throw new IllegalArgumentException("Invalid node type: " + nodeType); } @@ -737,6 +749,10 @@ public static PlanNode deserialize(ByteBuffer buffer, short nodeType) { return RelationalDeleteDataNode.deserialize(buffer); case 2004: return ObjectNode.deserialize(buffer); + case 2005: + return EvolveSchemaNode.deserialize(buffer); + case 2006: + return PipeEnrichedEvolveSchemaNode.deserialize(buffer); default: throw new IllegalArgumentException("Invalid node type: " + nodeType); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/PlanVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/PlanVisitor.java index 44f1cd8bc1f67..bf54fd17c72df 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/PlanVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/PlanVisitor.java @@ -57,6 +57,7 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.view.DeleteLogicalViewNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.view.RollbackLogicalViewBlackListNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedDeleteDataNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedEvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedInsertNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedNonWritePlanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedWritePlanNode; @@ -111,6 +112,7 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.source.SourceNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.source.TimeseriesRegionScanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.DeleteDataNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertMultiTabletsNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowsNode; @@ -645,6 +647,10 @@ public R visitDeleteData(RelationalDeleteDataNode node, C context) { return visitPlan(node, context); } + public R visitEvolveSchemaNode(EvolveSchemaNode node, C context) { + return visitPlan(node, context); + } + public R visitWriteObjectFile(ObjectNode node, C context) { return visitPlan(node, context); } @@ -661,6 +667,10 @@ public R visitPipeEnrichedDeleteDataNode(PipeEnrichedDeleteDataNode node, C cont return visitPlan(node, context); } + public R visitPipeEnrichedEvolveSchemaNode(PipeEnrichedEvolveSchemaNode node, C context) { + return visitPlan(node, context); + } + public R visitPipeEnrichedWritePlanNode(PipeEnrichedWritePlanNode node, C context) { return visitPlan(node, context); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/load/LoadSingleTsFileNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/load/LoadSingleTsFileNode.java index 604fda6e1e8d5..c0d5b582fbee8 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/load/LoadSingleTsFileNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/load/LoadSingleTsFileNode.java @@ -62,6 +62,7 @@ public class LoadSingleTsFileNode extends WritePlanNode { private final boolean deleteAfterLoad; private final long writePointCount; private boolean needDecodeTsFile; + private final File schemaEvolutionFile; private TRegionReplicaSet localRegionReplicaSet; @@ -71,7 +72,8 @@ public LoadSingleTsFileNode( boolean isTableModel, String database, boolean deleteAfterLoad, - long writePointCount) { + long writePointCount, + File schemaEvolutionFile) { super(id); this.tsFile = resource.getTsFile(); this.resource = resource; @@ -79,6 +81,7 @@ public LoadSingleTsFileNode( this.database = database; this.deleteAfterLoad = deleteAfterLoad; this.writePointCount = writePointCount; + this.schemaEvolutionFile = schemaEvolutionFile; } public boolean isTsFileEmpty() { @@ -89,6 +92,12 @@ public boolean isTsFileEmpty() { public boolean needDecodeTsFile( Function>, List> partitionFetcher) { + if (schemaEvolutionFile != null) { + // with schema evolution, must split + needDecodeTsFile = true; + return needDecodeTsFile; + } + List> slotList = new ArrayList<>(); resource .getDevices() @@ -152,6 +161,10 @@ public long getWritePointCount() { return writePointCount; } + public File getSchemaEvolutionFile() { + return schemaEvolutionFile; + } + /** * only used for load locally. * diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/load/LoadTsFileNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/load/LoadTsFileNode.java index 3588b6ddbb052..41ae3b9b90b09 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/load/LoadTsFileNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/load/LoadTsFileNode.java @@ -34,6 +34,7 @@ import org.apache.tsfile.exception.NotImplementedException; import java.io.DataOutputStream; +import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -46,13 +47,19 @@ public class LoadTsFileNode extends WritePlanNode { private final List resources; private final List isTableModel; private final String database; + private final File schemaEvolutionFile; public LoadTsFileNode( - PlanNodeId id, List resources, List isTableModel, String database) { + PlanNodeId id, + List resources, + List isTableModel, + String database, + File schemaEvolutionFile) { super(id); this.resources = resources; this.isTableModel = isTableModel; this.database = database; + this.schemaEvolutionFile = schemaEvolutionFile; } @Override @@ -121,7 +128,8 @@ private List splitByPartitionForTreeModel(Analysis analysis) { isTableModel.get(i), database, statement.isDeleteAfterLoad(), - statement.getWritePointCount(i))); + statement.getWritePointCount(i), + schemaEvolutionFile)); } return res; } @@ -143,7 +151,8 @@ private List splitByPartitionForTableModel( isTableModel.get(i), database, statement.isDeleteAfterLoad(), - statement.getWritePointCount(i))); + statement.getWritePointCount(i), + schemaEvolutionFile)); } } return res; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/pipe/PipeEnrichedEvolveSchemaNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/pipe/PipeEnrichedEvolveSchemaNode.java new file mode 100644 index 0000000000000..794601eeb4e5e --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/pipe/PipeEnrichedEvolveSchemaNode.java @@ -0,0 +1,194 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe; + +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; +import org.apache.iotdb.commons.consensus.index.ProgressIndex; +import org.apache.iotdb.db.queryengine.plan.analyze.IAnalysis; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeId; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeType; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanVisitor; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.WritePlanNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.SearchNode; +import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.IWALByteBufferView; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.stream.Collectors; + +public class PipeEnrichedEvolveSchemaNode extends EvolveSchemaNode { + + private final EvolveSchemaNode evolveSchemaNode; + + public PipeEnrichedEvolveSchemaNode(final EvolveSchemaNode evolveSchemaNode) { + super(evolveSchemaNode.getPlanNodeId(), evolveSchemaNode.getSchemaEvolutions()); + this.evolveSchemaNode = evolveSchemaNode; + } + + public PlanNode EvolveSchemaNode() { + return evolveSchemaNode; + } + + @Override + public boolean isGeneratedByPipe() { + return evolveSchemaNode.isGeneratedByPipe(); + } + + @Override + public void markAsGeneratedByPipe() { + evolveSchemaNode.markAsGeneratedByPipe(); + } + + @Override + public PlanNodeId getPlanNodeId() { + return evolveSchemaNode.getPlanNodeId(); + } + + @Override + public void setPlanNodeId(final PlanNodeId id) { + evolveSchemaNode.setPlanNodeId(id); + } + + @Override + public ProgressIndex getProgressIndex() { + return evolveSchemaNode.getProgressIndex(); + } + + @Override + public void setProgressIndex(ProgressIndex progressIndex) { + evolveSchemaNode.setProgressIndex(progressIndex); + } + + @Override + public List getChildren() { + return evolveSchemaNode.getChildren(); + } + + @Override + public void addChild(final PlanNode child) { + evolveSchemaNode.addChild(child); + } + + @Override + public PlanNodeType getType() { + return PlanNodeType.PIPE_ENRICHED_EVOLVE_SCHEMA; + } + + @Override + public PlanNode clone() { + return new PipeEnrichedEvolveSchemaNode((EvolveSchemaNode) evolveSchemaNode.clone()); + } + + @Override + public PlanNode createSubNode(final int subNodeId, final int startIndex, final int endIndex) { + return new PipeEnrichedEvolveSchemaNode( + (EvolveSchemaNode) evolveSchemaNode.createSubNode(subNodeId, startIndex, endIndex)); + } + + @Override + public PlanNode cloneWithChildren(final List children) { + return new PipeEnrichedEvolveSchemaNode( + (EvolveSchemaNode) evolveSchemaNode.cloneWithChildren(children)); + } + + @Override + public int allowedChildCount() { + return evolveSchemaNode.allowedChildCount(); + } + + @Override + public List getOutputColumnNames() { + return evolveSchemaNode.getOutputColumnNames(); + } + + @Override + public R accept(final PlanVisitor visitor, final C context) { + return visitor.visitPipeEnrichedEvolveSchemaNode(this, context); + } + + @Override + protected void serializeAttributes(final ByteBuffer byteBuffer) { + PlanNodeType.PIPE_ENRICHED_EVOLVE_SCHEMA.serialize(byteBuffer); + evolveSchemaNode.serialize(byteBuffer); + } + + @Override + protected void serializeAttributes(final DataOutputStream stream) throws IOException { + PlanNodeType.PIPE_ENRICHED_EVOLVE_SCHEMA.serialize(stream); + evolveSchemaNode.serialize(stream); + } + + public static PipeEnrichedEvolveSchemaNode deserialize(final ByteBuffer buffer) { + return new PipeEnrichedEvolveSchemaNode((EvolveSchemaNode) PlanNodeType.deserialize(buffer)); + } + + @Override + public boolean equals(final Object o) { + return o instanceof PipeEnrichedEvolveSchemaNode + && evolveSchemaNode.equals(((PipeEnrichedEvolveSchemaNode) o).evolveSchemaNode); + } + + @Override + public int hashCode() { + return evolveSchemaNode.hashCode(); + } + + @Override + public TRegionReplicaSet getRegionReplicaSet() { + return evolveSchemaNode.getRegionReplicaSet(); + } + + @Override + public List splitByPartition(final IAnalysis analysis) { + return evolveSchemaNode.splitByPartition(analysis).stream() + .map( + plan -> + plan instanceof PipeEnrichedEvolveSchemaNode + ? plan + : new PipeEnrichedEvolveSchemaNode((EvolveSchemaNode) plan)) + .collect(Collectors.toList()); + } + + @Override + public void serializeToWAL(final IWALByteBufferView buffer) { + evolveSchemaNode.serializeToWAL(buffer); + } + + @Override + public int serializedSize() { + return evolveSchemaNode.serializedSize(); + } + + @Override + public SearchNode merge(List searchNodes) { + List unrichedNodes = + searchNodes.stream() + .map( + searchNode -> + (SearchNode) ((PipeEnrichedEvolveSchemaNode) searchNode).EvolveSchemaNode()) + .collect(Collectors.toList()); + return new PipeEnrichedEvolveSchemaNode( + (EvolveSchemaNode) evolveSchemaNode.merge(unrichedNodes)); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/EvolveSchemaNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/EvolveSchemaNode.java new file mode 100644 index 0000000000000..834fd05887df3 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/EvolveSchemaNode.java @@ -0,0 +1,206 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.queryengine.plan.planner.plan.node.write; + +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; +import org.apache.iotdb.commons.consensus.index.ProgressIndex; +import org.apache.iotdb.db.queryengine.plan.analyze.IAnalysis; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeId; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeType; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanVisitor; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.WritePlanNode; +import org.apache.iotdb.db.schemaengine.schemaregion.ISchemaRegionPlan; +import org.apache.iotdb.db.schemaengine.schemaregion.SchemaRegionPlanType; +import org.apache.iotdb.db.schemaengine.schemaregion.SchemaRegionPlanVisitor; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; +import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.IWALByteBufferView; +import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.WALEntryValue; +import org.apache.iotdb.db.utils.io.IOUtils; + +import org.apache.tsfile.utils.ReadWriteForEncodingUtils; +import org.apache.tsfile.utils.ReadWriteIOUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class EvolveSchemaNode extends SearchNode implements WALEntryValue, ISchemaRegionPlan { + + private static final Logger LOGGER = LoggerFactory.getLogger(EvolveSchemaNode.class); + + protected TRegionReplicaSet regionReplicaSet; + protected ProgressIndex progressIndex; + private List schemaEvolutions; + + public EvolveSchemaNode() { + super(new PlanNodeId("")); + } + + public EvolveSchemaNode(PlanNodeId id, List schemaEvolutions) { + super(id); + this.schemaEvolutions = schemaEvolutions; + } + + public static PlanNode deserializeFromWAL(DataInputStream stream) throws IOException { + long searchIndex = stream.readLong(); + int size = ReadWriteForEncodingUtils.readVarInt(stream); + List evolutions = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + evolutions.add(SchemaEvolution.createFrom(stream)); + } + + EvolveSchemaNode evolveSchemaNode = new EvolveSchemaNode(new PlanNodeId(""), evolutions); + evolveSchemaNode.setSearchIndex(searchIndex); + + return evolveSchemaNode; + } + + public static PlanNode deserializeFromWAL(ByteBuffer buffer) { + long searchIndex = buffer.getLong(); + int size = ReadWriteForEncodingUtils.readVarInt(buffer); + List evolutions = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + evolutions.add(SchemaEvolution.createFrom(buffer)); + } + + EvolveSchemaNode evolveSchemaNode = new EvolveSchemaNode(new PlanNodeId(""), evolutions); + evolveSchemaNode.setSearchIndex(searchIndex); + + return evolveSchemaNode; + } + + public static PlanNode deserialize(ByteBuffer buffer) { + int size = ReadWriteForEncodingUtils.readVarInt(buffer); + List evolutions = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + evolutions.add(SchemaEvolution.createFrom(buffer)); + } + + PlanNodeId planNodeId = PlanNodeId.deserialize(buffer); + + // EvolveSchemaNode has no child + int ignoredChildrenSize = ReadWriteIOUtils.readInt(buffer); + return new EvolveSchemaNode(planNodeId, evolutions); + } + + @Override + public SearchNode merge(List searchNodes) { + return this; + } + + @Override + public ProgressIndex getProgressIndex() { + return progressIndex; + } + + @Override + public void setProgressIndex(ProgressIndex progressIndex) { + this.progressIndex = progressIndex; + } + + @Override + public List splitByPartition(IAnalysis analysis) { + return Collections.singletonList(this); + } + + @Override + public TRegionReplicaSet getRegionReplicaSet() { + return regionReplicaSet; + } + + @Override + public List getChildren() { + return Collections.emptyList(); + } + + @Override + public void addChild(PlanNode child) { + throw new UnsupportedOperationException(); + } + + @Override + public PlanNode clone() { + return new EvolveSchemaNode(id, schemaEvolutions); + } + + @Override + public int allowedChildCount() { + return 0; + } + + @Override + public List getOutputColumnNames() { + return Collections.emptyList(); + } + + @Override + protected void serializeAttributes(ByteBuffer byteBuffer) { + PlanNodeType.EVOLVE_SCHEMA.serialize(byteBuffer); + IOUtils.writeList(schemaEvolutions, byteBuffer); + } + + @Override + protected void serializeAttributes(DataOutputStream stream) throws IOException { + PlanNodeType.EVOLVE_SCHEMA.serialize(stream); + IOUtils.writeList(schemaEvolutions, stream); + } + + @Override + public void serializeToWAL(IWALByteBufferView buffer) { + buffer.putShort(PlanNodeType.EVOLVE_SCHEMA.getNodeType()); + buffer.putLong(searchIndex); + try { + IOUtils.writeList(schemaEvolutions, buffer); + } catch (IOException e) { + LOGGER.warn("Error writing schema evolutions to WAL", e); + } + } + + @Override + public int serializedSize() { + return 0; + } + + public List getSchemaEvolutions() { + return schemaEvolutions; + } + + @Override + public R accept(PlanVisitor visitor, C context) { + return visitor.visitEvolveSchemaNode(this, context); + } + + @Override + public SchemaRegionPlanType getPlanType() { + return SchemaRegionPlanType.EVOLVE_SCHEMA; + } + + @Override + public R accept(SchemaRegionPlanVisitor visitor, C context) { + return visitor.visitEvolveSchema(this, context); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowNode.java index a2bd6cb1a00fc..ffe5a3b99cd09 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowNode.java @@ -21,6 +21,7 @@ import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; import org.apache.iotdb.commons.exception.IllegalPathException; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.FullDeviceIdKey; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.commons.utils.TimePartitionUtils; @@ -129,7 +130,7 @@ public List splitByPartition(IAnalysis analysis) { analysis .getDataPartitionInfo() .getDataRegionReplicaSetForWriting( - getDeviceID(), timePartitionSlot, analysis.getDatabaseName()); + new FullDeviceIdKey(getDeviceID()), timePartitionSlot, analysis.getDatabaseName()); // collect redirectInfo analysis.setRedirectNodeList( Collections.singletonList( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowsNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowsNode.java index 7392b7612705e..7494f62bc87e5 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowsNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowsNode.java @@ -23,6 +23,7 @@ import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.consensus.index.ProgressIndex; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.FullDeviceIdKey; import org.apache.iotdb.commons.utils.StatusUtils; import org.apache.iotdb.commons.utils.TimePartitionUtils; import org.apache.iotdb.db.exception.DataTypeInconsistentException; @@ -276,7 +277,7 @@ public List splitByPartition(IAnalysis analysis) { analysis .getDataPartitionInfo() .getDataRegionReplicaSetForWriting( - insertRowNode.targetPath.getIDeviceIDAsFullDevice(), + new FullDeviceIdKey(insertRowNode.targetPath.getIDeviceIDAsFullDevice()), TimePartitionUtils.getTimePartitionSlot(insertRowNode.getTime()), null); // Collect redirectInfo diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowsOfOneDeviceNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowsOfOneDeviceNode.java index f1e28d32b104d..f54d62a7a2b52 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowsOfOneDeviceNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowsOfOneDeviceNode.java @@ -23,6 +23,7 @@ import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; import org.apache.iotdb.commons.consensus.index.ProgressIndex; import org.apache.iotdb.commons.exception.IllegalPathException; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.FullDeviceIdKey; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.utils.StatusUtils; import org.apache.iotdb.commons.utils.TimePartitionUtils; @@ -173,7 +174,7 @@ public List splitByPartition(IAnalysis analysis) { analysis .getDataPartitionInfo() .getDataRegionReplicaSetForWriting( - targetPath.getIDeviceIDAsFullDevice(), + new FullDeviceIdKey(targetPath.getIDeviceIDAsFullDevice()), timePartitionSlot, analysis.getDatabaseName()); Map> tmpMap = diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertTabletNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertTabletNode.java index 39683e5d9f94e..c69913164473a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertTabletNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertTabletNode.java @@ -23,6 +23,7 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; import org.apache.iotdb.commons.exception.IllegalPathException; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.FullDeviceIdKey; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.utils.CommonDateTimeUtils; import org.apache.iotdb.commons.utils.TestOnly; @@ -288,7 +289,9 @@ protected Map> splitByReplicaSet( analysis .getDataPartitionInfo() .getDataRegionReplicaSetForWriting( - deviceID, splitInfo.timePartitionSlots, analysis.getDatabaseName()); + new FullDeviceIdKey(deviceID), + splitInfo.timePartitionSlots, + analysis.getDatabaseName()); splitInfo.replicaSets = replicaSets; // collect redirectInfo analysis.addEndPointToRedirectNodeList( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalInsertRowsNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalInsertRowsNode.java index 594ccf50471f9..20f3e10f40293 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalInsertRowsNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalInsertRowsNode.java @@ -21,6 +21,7 @@ import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import org.apache.iotdb.commons.utils.TimePartitionUtils; import org.apache.iotdb.db.exception.DataTypeInconsistentException; import org.apache.iotdb.db.queryengine.plan.analyze.IAnalysis; @@ -29,6 +30,7 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanVisitor; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.WritePlanNode; import org.apache.iotdb.db.storageengine.dataregion.memtable.AbstractMemTable; +import org.apache.iotdb.db.utils.CommonUtils; import org.apache.tsfile.file.metadata.IDeviceID; import org.apache.tsfile.file.metadata.IDeviceID.Factory; @@ -168,11 +170,14 @@ public List splitByPartition(IAnalysis analysis) { InsertRowNode insertRowNode = getInsertRowNodeList().get(i); // Data region for insert row node // each row may belong to different database, pass null for auto-detection + SeriesPartitionKey seriesPartitionKey = + CommonUtils.getSeriesPartitionKey( + insertRowNode.getDeviceID(), analysis.getDatabaseName(), true); TRegionReplicaSet dataRegionReplicaSet = analysis .getDataPartitionInfo() .getDataRegionReplicaSetForWriting( - insertRowNode.getDeviceID(), + seriesPartitionKey, TimePartitionUtils.getTimePartitionSlot(insertRowNode.getTime()), analysis.getDatabaseName()); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalInsertTabletNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalInsertTabletNode.java index 257f691e4a785..1a30d9323825f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalInsertTabletNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalInsertTabletNode.java @@ -22,6 +22,7 @@ import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.schema.table.column.TsTableColumnCategory; import org.apache.iotdb.commons.utils.TestOnly; @@ -36,6 +37,7 @@ import org.apache.iotdb.db.storageengine.dataregion.memtable.AbstractMemTable; import org.apache.iotdb.db.storageengine.dataregion.memtable.IWritableMemChunkGroup; import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.IWALByteBufferView; +import org.apache.iotdb.db.utils.CommonUtils; import org.apache.tsfile.enums.TSDataType; import org.apache.tsfile.file.metadata.IDeviceID; @@ -207,11 +209,13 @@ protected Map> splitByReplicaSet( for (final Map.Entry entry : deviceIDSplitInfoMap.entrySet()) { final IDeviceID deviceID = entry.getKey(); final PartitionSplitInfo splitInfo = entry.getValue(); + SeriesPartitionKey seriesPartitionKey = + CommonUtils.getSeriesPartitionKey(deviceID, analysis.getDatabaseName(), true); final List replicaSets = analysis .getDataPartitionInfo() .getDataRegionReplicaSetForWriting( - deviceID, splitInfo.timePartitionSlots, analysis.getDatabaseName()); + seriesPartitionKey, splitInfo.timePartitionSlots, analysis.getDatabaseName()); splitInfo.replicaSets = replicaSets; // collect redirectInfo endPointMap.put( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/RelationPlanner.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/RelationPlanner.java index d442d51a21418..9cae1acb6ef3a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/RelationPlanner.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/RelationPlanner.java @@ -1388,7 +1388,11 @@ protected RelationPlan visitLoadTsFile(final LoadTsFile node, final Void context } return new RelationPlan( new LoadTsFileNode( - idAllocator.genPlanNodeId(), node.getResources(), isTableModel, node.getDatabase()), + idAllocator.genPlanNodeId(), + node.getResources(), + isTableModel, + node.getDatabase(), + node.getSchemaEvolutionFile()), analysis.getRootScope(), Collections.emptyList(), outerContext); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/distribute/TableDistributedPlanGenerator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/distribute/TableDistributedPlanGenerator.java index 7072b5f519f73..29c55c01187a7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/distribute/TableDistributedPlanGenerator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/distribute/TableDistributedPlanGenerator.java @@ -26,6 +26,7 @@ import org.apache.iotdb.commons.exception.IoTDBRuntimeException; import org.apache.iotdb.commons.partition.DataPartition; import org.apache.iotdb.commons.partition.SchemaPartition; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import org.apache.iotdb.commons.schema.table.TsTable; import org.apache.iotdb.commons.schema.table.column.TsTableColumnCategory; import org.apache.iotdb.commons.utils.TimePartitionUtils; @@ -104,6 +105,7 @@ import org.apache.iotdb.db.queryengine.plan.statement.component.Ordering; import org.apache.iotdb.db.schemaengine.table.DataNodeTableCache; import org.apache.iotdb.db.schemaengine.table.DataNodeTreeViewSchemaUtils; +import org.apache.iotdb.db.utils.CommonUtils; import org.apache.iotdb.rpc.TSStatusCode; import com.google.common.collect.ImmutableList; @@ -714,7 +716,8 @@ private List constructDeviceTableScanByTags( seriesSlotMap, deviceEntry.getDeviceID(), node.getTimeFilter(), - cachedSeriesSlotWithRegions); + cachedSeriesSlotWithRegions, + dbName); regionReplicaSets.forEach( regionReplicaSet -> regionDeviceCount.put( @@ -805,7 +808,8 @@ private List constructDeviceTableScanByRegionReplicaSet( seriesSlotMap, deviceEntry.getDeviceID(), node.getTimeFilter(), - cachedSeriesSlotWithRegions); + cachedSeriesSlotWithRegions, + dbName); if (regionReplicaSets.size() > 1) { context.deviceCrossRegion = true; } @@ -895,7 +899,8 @@ public List visitTreeDeviceViewScan(TreeDeviceViewScanNode node, PlanC seriesSlotMap, deviceEntry.getDeviceID(), node.getTimeFilter(), - cachedSeriesSlotWithRegions); + cachedSeriesSlotWithRegions, + dbName); if (regionReplicaSets.size() > 1) { context.deviceCrossRegion = true; @@ -1215,7 +1220,8 @@ public List visitAggregationTableScan( seriesSlotMap, deviceEntry.getDeviceID(), node.getTimeFilter(), - cachedSeriesSlotWithRegions); + cachedSeriesSlotWithRegions, + dbName); if (regionReplicaSets.size() > 1) { needSplit = true; context.deviceCrossRegion = true; @@ -1298,10 +1304,14 @@ private List getDeviceReplicaSets( Map>> seriesSlotMap, IDeviceID deviceId, Filter timeFilter, - Map> cachedSeriesSlotWithRegions) { + Map> cachedSeriesSlotWithRegions, + String databaseName) { // given seriesPartitionSlot has already been calculated - final TSeriesPartitionSlot seriesPartitionSlot = dataPartition.calculateDeviceGroupId(deviceId); + SeriesPartitionKey seriesPartitionKey = + CommonUtils.getSeriesPartitionKey(deviceId, databaseName, true); + final TSeriesPartitionSlot seriesPartitionSlot = + dataPartition.calculateDeviceGroupId(seriesPartitionKey); List regionReplicaSets = cachedSeriesSlotWithRegions.get(seriesPartitionSlot.getSlotId()); if (regionReplicaSets != null) { @@ -1780,8 +1790,10 @@ public List visitTableDeviceFetch( final List partitionKeyList = node.getPartitionKeyList(); final List deviceIDArray = node.getDeviceIdList(); for (int i = 0; i < node.getPartitionKeyList().size(); ++i) { + SeriesPartitionKey seriesPartitionKey = + CommonUtils.getSeriesPartitionKey(partitionKeyList.get(i), database, true); final TRegionReplicaSet regionReplicaSet = - databaseMap.get(schemaPartition.calculateDeviceGroupId(partitionKeyList.get(i))); + databaseMap.get(schemaPartition.calculateDeviceGroupId(seriesPartitionKey)); if (Objects.nonNull(regionReplicaSet)) { tableDeviceFetchMap .computeIfAbsent( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/node/schema/CreateOrUpdateTableDeviceNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/node/schema/CreateOrUpdateTableDeviceNode.java index be4b1337e7e4c..6b7b78aeb9974 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/node/schema/CreateOrUpdateTableDeviceNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/node/schema/CreateOrUpdateTableDeviceNode.java @@ -20,6 +20,7 @@ package org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema; import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import org.apache.iotdb.db.queryengine.plan.analyze.IAnalysis; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeId; @@ -29,6 +30,7 @@ import org.apache.iotdb.db.schemaengine.schemaregion.ISchemaRegionPlan; import org.apache.iotdb.db.schemaengine.schemaregion.SchemaRegionPlanType; import org.apache.iotdb.db.schemaengine.schemaregion.SchemaRegionPlanVisitor; +import org.apache.iotdb.db.utils.CommonUtils; import org.apache.tsfile.file.metadata.IDeviceID; import org.apache.tsfile.utils.ReadWriteIOUtils; @@ -260,10 +262,10 @@ public List splitByPartition(final IAnalysis analysis) { final List partitionKeyList = getPartitionKeyList(); for (int i = 0; i < partitionKeyList.size(); i++) { // Use the string literal of deviceId as the partition key + SeriesPartitionKey seriesPartitionKey = + CommonUtils.getSeriesPartitionKey(partitionKeyList.get(i), database, true); final TRegionReplicaSet regionReplicaSet = - analysis - .getSchemaPartitionInfo() - .getSchemaRegionReplicaSet(database, partitionKeyList.get(i)); + analysis.getSchemaPartitionInfo().getSchemaRegionReplicaSet(database, seriesPartitionKey); splitMap.computeIfAbsent(regionReplicaSet, k -> new ArrayList<>()).add(i); } final List result = new ArrayList<>(splitMap.size()); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/BooleanLiteral.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/BooleanLiteral.java index 42f62559a7286..c8a06a501be4d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/BooleanLiteral.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/BooleanLiteral.java @@ -119,4 +119,8 @@ public long ramBytesUsed() { return INSTANCE_SIZE + AstMemoryEstimationHelper.getEstimatedSizeOfNodeLocation(getLocationInternal()); } + + public boolean getParsedValue() { + return value; + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/LoadTsFile.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/LoadTsFile.java index 9b7cd372ee940..1a20a90e9411f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/LoadTsFile.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/LoadTsFile.java @@ -64,6 +64,7 @@ public class LoadTsFile extends Statement { private List resources; private List writePointCountList; private List isTableModel; + private File schemaEvolutionFile; public LoadTsFile(NodeLocation location, String filePath, Map loadAttributes) { super(location); @@ -187,6 +188,10 @@ public long getWritePointCount(int resourceIndex) { return writePointCountList.get(resourceIndex); } + public File getSchemaEvolutionFile() { + return schemaEvolutionFile; + } + private void initAttributes() { this.databaseLevel = LoadTsFileConfigurator.parseOrGetDefaultDatabaseLevel(loadAttributes); this.database = LoadTsFileConfigurator.parseDatabaseName(loadAttributes); @@ -197,6 +202,7 @@ private void initAttributes() { LoadTsFileConfigurator.parseOrGetDefaultTabletConversionThresholdBytes(loadAttributes); this.verify = LoadTsFileConfigurator.parseOrGetDefaultVerify(loadAttributes); this.isAsyncLoad = LoadTsFileConfigurator.parseOrGetDefaultAsyncLoad(loadAttributes); + this.schemaEvolutionFile = LoadTsFileConfigurator.parseSevoFile(loadAttributes); } public boolean reconstructStatementIfMiniFileConverted(final List isMiniTsFile) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/RenameColumn.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/RenameColumn.java index 5e1277316a439..cbf2a5616453f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/RenameColumn.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/RenameColumn.java @@ -19,8 +19,6 @@ package org.apache.iotdb.db.queryengine.plan.relational.sql.ast; -import org.apache.iotdb.db.exception.sql.SemanticException; - import com.google.common.collect.ImmutableList; import org.apache.tsfile.utils.RamUsageEstimator; @@ -57,9 +55,6 @@ public RenameColumn( this.tableIfExists = tableIfExists; this.columnIfNotExists = columnIfNotExists; this.view = view; - if (!view) { - throw new SemanticException("The renaming for base table column is currently unsupported"); - } } public QualifiedName getTable() { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/RenameTable.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/RenameTable.java index 77c3296fee00b..5919dec7d03ff 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/RenameTable.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/RenameTable.java @@ -19,8 +19,6 @@ package org.apache.iotdb.db.queryengine.plan.relational.sql.ast; -import org.apache.iotdb.db.exception.sql.SemanticException; - import com.google.common.collect.ImmutableList; import org.apache.tsfile.utils.RamUsageEstimator; @@ -51,9 +49,6 @@ public RenameTable( this.target = requireNonNull(target, "target name is null"); this.tableIfExists = tableIfExists; this.view = view; - if (!view) { - throw new SemanticException("The renaming for base table is currently unsupported"); - } } public QualifiedName getSource() { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/load/LoadTsFileScheduler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/load/LoadTsFileScheduler.java index cf7ab8faeddaf..74c516fe3c365 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/load/LoadTsFileScheduler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/load/LoadTsFileScheduler.java @@ -34,6 +34,7 @@ import org.apache.iotdb.commons.partition.DataPartition; import org.apache.iotdb.commons.partition.DataPartitionQueryParam; import org.apache.iotdb.commons.partition.StorageExecutor; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import org.apache.iotdb.commons.service.metric.MetricService; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; @@ -72,6 +73,7 @@ import org.apache.iotdb.db.storageengine.load.splitter.DeletionData; import org.apache.iotdb.db.storageengine.load.splitter.TsFileData; import org.apache.iotdb.db.storageengine.load.splitter.TsFileSplitter; +import org.apache.iotdb.db.utils.CommonUtils; import org.apache.iotdb.metrics.utils.MetricLevel; import org.apache.iotdb.mpp.rpc.thrift.TLoadCommandReq; import org.apache.iotdb.rpc.TSStatusCode; @@ -315,7 +317,9 @@ private boolean firstPhase(LoadSingleTsFileNode node) { final TsFileDataManager tsFileDataManager = new TsFileDataManager(this, node, block); try { new TsFileSplitter( - node.getTsFileResource().getTsFile(), tsFileDataManager::addOrSendTsFileData) + node.getTsFileResource().getTsFile(), + tsFileDataManager::addOrSendTsFileData, + node.getSchemaEvolutionFile()) .splitTsFileByDataPartition(); if (!tsFileDataManager.sendAllTsFileData()) { return false; @@ -844,12 +848,15 @@ public List queryDataPartition( subSlotList.stream() .map( pair -> - // (database != null) means this file will be loaded into table-model - database != null - ? dataPartition.getDataRegionReplicaSetForWriting( - pair.left, pair.right, database) - : dataPartition.getDataRegionReplicaSetForWriting( - pair.left, pair.right)) + // (database != null) means this file will be loaded into table-model + { + SeriesPartitionKey seriesPartitionKey = + CommonUtils.getSeriesPartitionKey(pair.left, database, false); + return database != null + ? dataPartition.getDataRegionReplicaSetForWriting( + seriesPartitionKey, pair.right, database) + : dataPartition.getDataRegionReplicaSetForWriting(pair.left, pair.right); + }) .collect(Collectors.toList())); } return replicaSets; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/crud/LoadTsFileStatement.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/crud/LoadTsFileStatement.java index a51dcaf09d2b3..1c901ee10ddd8 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/crud/LoadTsFileStatement.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/crud/LoadTsFileStatement.java @@ -68,6 +68,7 @@ public class LoadTsFileStatement extends Statement { private List isTableModel; private List resources; private List writePointCountList; + private File schemaEvolutionFile; public LoadTsFileStatement(String filePath) throws FileNotFoundException { this.file = new File(filePath).getAbsoluteFile(); @@ -247,6 +248,10 @@ public void setLoadAttributes(final Map loadAttributes) { initAttributes(loadAttributes); } + public File getSchemaEvolutionFile() { + return schemaEvolutionFile; + } + public boolean isAsyncLoad() { return isAsyncLoad; } @@ -264,6 +269,7 @@ private void initAttributes(final Map loadAttributes) { if (LoadTsFileConfigurator.parseOrGetDefaultPipeGenerated(loadAttributes)) { markIsGeneratedByPipe(); } + this.schemaEvolutionFile = LoadTsFileConfigurator.parseSevoFile(loadAttributes); } public boolean reconstructStatementIfMiniFileConverted(final List isMiniTsFile) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/ISchemaRegion.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/ISchemaRegion.java index 4a11165353cd6..578b0a1e86273 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/ISchemaRegion.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/ISchemaRegion.java @@ -29,6 +29,7 @@ import org.apache.iotdb.db.exception.metadata.SchemaQuotaExceededException; import org.apache.iotdb.db.queryengine.common.schematree.ClusterSchemaTree; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.AlterEncodingCompressorNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.relational.metadata.fetcher.cache.TableId; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.ConstructTableDevicesBlackListNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.CreateOrUpdateTableDeviceNode; @@ -418,5 +419,7 @@ void commitUpdateAttribute(final TableDeviceAttributeCommitUpdateNode node) void addNodeLocation(final TableNodeLocationAddNode node) throws MetadataException; + void applySchemaEvolution(EvolveSchemaNode schemaEvolutions) throws MetadataException; + // endregion } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/SchemaRegionPlanType.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/SchemaRegionPlanType.java index b0d41c725eeb0..8b058742ea3c3 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/SchemaRegionPlanType.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/SchemaRegionPlanType.java @@ -63,6 +63,7 @@ public enum SchemaRegionPlanType { ROLLBACK_TABLE_DEVICES_BLACK_LIST((byte) 106), DELETE_TABLE_DEVICES_IN_BLACK_LIST((byte) 107), DROP_TABLE_ATTRIBUTE((byte) 108), + EVOLVE_SCHEMA((byte) 109), // query plan doesn't need any ser/deSer, thus use one type to represent all READ_SCHEMA(Byte.MAX_VALUE); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/SchemaRegionPlanVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/SchemaRegionPlanVisitor.java index 0cf087a40e117..e97b094291855 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/SchemaRegionPlanVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/SchemaRegionPlanVisitor.java @@ -20,6 +20,7 @@ package org.apache.iotdb.db.schemaengine.schemaregion; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.AlterEncodingCompressorNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.ConstructTableDevicesBlackListNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.CreateOrUpdateTableDeviceNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.DeleteTableDeviceNode; @@ -185,4 +186,8 @@ public R visitAlterEncodingCompressor( final AlterEncodingCompressorNode alterEncodingCompressorNode, final C context) { return visitSchemaRegionPlan(alterEncodingCompressorNode, context); } + + public R visitEvolveSchema(final EvolveSchemaNode evolveSchemaNode, final C context) { + return visitSchemaRegionPlan(evolveSchemaNode, context); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionMemoryImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionMemoryImpl.java index 230ed8330ca16..4baa250d78660 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionMemoryImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionMemoryImpl.java @@ -49,10 +49,12 @@ import org.apache.iotdb.db.queryengine.execution.operator.schema.source.DeviceBlackListConstructor; import org.apache.iotdb.db.queryengine.execution.operator.schema.source.TableDeviceQuerySource; import org.apache.iotdb.db.queryengine.execution.relational.ColumnTransformerBuilder; +import org.apache.iotdb.db.queryengine.execution.relational.ColumnTransformerBuilder.Context; import org.apache.iotdb.db.queryengine.plan.analyze.TypeProvider; import org.apache.iotdb.db.queryengine.plan.planner.LocalExecutionPlanner; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.AlterEncodingCompressorNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.CreateTimeSeriesNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.parameter.InputLocation; import org.apache.iotdb.db.queryengine.plan.relational.metadata.Metadata; import org.apache.iotdb.db.queryengine.plan.relational.metadata.fetcher.cache.TableDeviceSchemaCache; @@ -125,6 +127,8 @@ import org.apache.iotdb.db.schemaengine.schemaregion.write.req.view.IPreDeleteLogicalViewPlan; import org.apache.iotdb.db.schemaengine.schemaregion.write.req.view.IRollbackPreDeleteLogicalViewPlan; import org.apache.iotdb.db.schemaengine.table.DataNodeTableCache; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.TableRename; import org.apache.iotdb.db.storageengine.rescon.memory.SystemInfo; import org.apache.iotdb.db.utils.SchemaUtils; @@ -1555,7 +1559,7 @@ private DeviceAttributeUpdater constructDevicePredicateUpdater( Objects.nonNull(predicate) ? visitor.process( predicate, - new ColumnTransformerBuilder.Context( + new Context( sessionInfo, filterLeafColumnTransformerList, inputLocations, @@ -1582,8 +1586,8 @@ private DeviceAttributeUpdater constructDevicePredicateUpdater( // records common ColumnTransformer between filter and project expressions final List commonTransformerList = new ArrayList<>(); - final ColumnTransformerBuilder.Context projectColumnTransformerContext = - new ColumnTransformerBuilder.Context( + final Context projectColumnTransformerContext = + new Context( sessionInfo, projectLeafColumnTransformerList, inputLocations, @@ -1782,6 +1786,23 @@ public void addNodeLocation(final TableNodeLocationAddNode node) throws Metadata } } + @Override + public void applySchemaEvolution(EvolveSchemaNode node) throws MetadataException { + for (SchemaEvolution schemaEvolution : node.getSchemaEvolutions()) { + if (schemaEvolution instanceof TableRename) { + TableRename tableRename = (TableRename) schemaEvolution; + applyTableRename(tableRename.getNameBefore(), tableRename.getNameAfter()); + } else { + logger.warn("Unsupported schemaEvolution {}, ignore it", schemaEvolution); + } + } + writeToMLog(node); + } + + public void applyTableRename(String oldName, String newName) { + mTree.renameTable(oldName, newName); + } + // endregion private static class RecoverOperationResult { @@ -2115,5 +2136,16 @@ public RecoverOperationResult visitAlterEncodingCompressor( return new RecoverOperationResult(e); } } + + @Override + public RecoverOperationResult visitEvolveSchema( + EvolveSchemaNode evolveSchemaNode, SchemaRegionMemoryImpl context) { + try { + applySchemaEvolution(evolveSchemaNode); + return RecoverOperationResult.SUCCESS; + } catch (final MetadataException e) { + return new RecoverOperationResult(e); + } + } } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionPBTreeImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionPBTreeImpl.java index 2f4bec896ddf4..5ebc9d52f9d4d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionPBTreeImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionPBTreeImpl.java @@ -41,6 +41,7 @@ import org.apache.iotdb.db.exception.metadata.SchemaQuotaExceededException; import org.apache.iotdb.db.queryengine.common.schematree.ClusterSchemaTree; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.AlterEncodingCompressorNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.relational.metadata.fetcher.cache.TableId; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.ConstructTableDevicesBlackListNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.CreateOrUpdateTableDeviceNode; @@ -1583,6 +1584,11 @@ public void addNodeLocation(final TableNodeLocationAddNode node) { throw new UnsupportedOperationException(); } + @Override + public void applySchemaEvolution(EvolveSchemaNode schemaEvolutions) { + throw new UnsupportedOperationException(); + } + // endregion private static class RecoverOperationResult { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/logfile/visitor/SchemaRegionPlanDeserializer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/logfile/visitor/SchemaRegionPlanDeserializer.java index 163ccb4e59de8..eb2b07225266c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/logfile/visitor/SchemaRegionPlanDeserializer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/logfile/visitor/SchemaRegionPlanDeserializer.java @@ -26,6 +26,7 @@ import org.apache.iotdb.commons.schema.view.viewExpression.ViewExpression; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeType; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.AlterEncodingCompressorNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.ConstructTableDevicesBlackListNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.CreateOrUpdateTableDeviceNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.DeleteTableDeviceNode; @@ -482,5 +483,11 @@ public ISchemaRegionPlan visitAlterEncodingCompressor( final AlterEncodingCompressorNode alterEncodingCompressorNode, final ByteBuffer buffer) { return (AlterEncodingCompressorNode) PlanNodeType.deserialize(buffer); } + + @Override + public ISchemaRegionPlan visitEvolveSchema( + EvolveSchemaNode evolveSchemaNode, ByteBuffer buffer) { + return (EvolveSchemaNode) PlanNodeType.deserialize(buffer); + } } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/logfile/visitor/SchemaRegionPlanSerializer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/logfile/visitor/SchemaRegionPlanSerializer.java index b7b7d9758ca4b..d65b18f18af57 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/logfile/visitor/SchemaRegionPlanSerializer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/logfile/visitor/SchemaRegionPlanSerializer.java @@ -23,6 +23,7 @@ import org.apache.iotdb.commons.schema.view.viewExpression.ViewExpression; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.AlterEncodingCompressorNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.ConstructTableDevicesBlackListNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.CreateOrUpdateTableDeviceNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.DeleteTableDeviceNode; @@ -553,6 +554,12 @@ public SchemaRegionPlanSerializationResult visitAlterEncodingCompressor( return visitPlanNode(alterEncodingCompressorNode, outputStream); } + @Override + public SchemaRegionPlanSerializationResult visitEvolveSchema( + EvolveSchemaNode evolveSchemaNode, DataOutputStream outputStream) { + return visitPlanNode(evolveSchemaNode, outputStream); + } + private SchemaRegionPlanSerializationResult visitPlanNode( final PlanNode planNode, final DataOutputStream outputStream) { try { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/mtree/impl/mem/MTreeBelowSGMemoryImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/mtree/impl/mem/MTreeBelowSGMemoryImpl.java index 9aec147ace1fe..82dc65cd33873 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/mtree/impl/mem/MTreeBelowSGMemoryImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/mtree/impl/mem/MTreeBelowSGMemoryImpl.java @@ -1703,6 +1703,16 @@ public int getTableDeviceNotExistNum(final String tableName, final List lastTsFileSetMap = new ConcurrentHashMap<>(); + + private DataRegionTaskManager dataRegionTaskManager; + /** * Construct a database processor. * @@ -467,7 +481,7 @@ public DataRegion(String databaseName, String dataRegionIdString) { } private void initDiskSelector() { - final ILoadDiskSelector.DiskDirectorySelector selector = + final DiskDirectorySelector selector = (sourceDirectory, fileName, tierLevel) -> { try { return TierManager.getInstance() @@ -669,6 +683,10 @@ private void recover() throws DataRegionException { throw new RuntimeException(e); } } + // ensure that seq and unseq files in the same partition have the same TsFileSet + Map> recoveredPartitionTsFileSetMap = new HashMap<>(); + Map partitionMinimalVersion = new HashMap<>(); + for (Entry> partitionFiles : partitionTmpSeqTsFiles.entrySet()) { Callable asyncRecoverTask = recoverFilesInPartition( @@ -676,7 +694,9 @@ private void recover() throws DataRegionException { dataRegionRecoveryContext, partitionFiles.getValue(), fileTimeIndexMap, - true); + true, + recoveredPartitionTsFileSetMap, + partitionMinimalVersion); if (asyncRecoverTask != null) { asyncTsFileResourceRecoverTaskList.add(asyncRecoverTask); } @@ -689,7 +709,9 @@ private void recover() throws DataRegionException { dataRegionRecoveryContext, partitionFiles.getValue(), fileTimeIndexMap, - false); + false, + recoveredPartitionTsFileSetMap, + partitionMinimalVersion); if (asyncRecoverTask != null) { asyncTsFileResourceRecoverTaskList.add(asyncRecoverTask); } @@ -704,6 +726,19 @@ private void recover() throws DataRegionException { Long.MAX_VALUE, lastFlushTimeMap.getMemSize(latestPartitionId))); } + + // remove empty file sets + for (Entry> entry : recoveredPartitionTsFileSetMap.entrySet()) { + long partitionId = entry.getKey(); + // if no file in the partition, all filesets should be cleared + long minimumFileVersion = + partitionMinimalVersion.getOrDefault(partitionId, Long.MAX_VALUE); + for (TsFileSet tsFileSet : entry.getValue()) { + if (tsFileSet.getEndVersion() < minimumFileVersion) { + tsFileSet.remove(); + } + } + } } // wait until all unsealed TsFiles have been recovered for (WALRecoverListener recoverListener : recoverListeners) { @@ -741,6 +776,9 @@ private void recover() throws DataRegionException { throw new DataRegionException(e); } + dataRegionTaskManager = new DataRegionTaskManager(this); + dataRegionTaskManager.recover(); + if (asyncTsFileResourceRecoverTaskList.isEmpty()) { initCompactionSchedule(); } @@ -772,9 +810,13 @@ private void updatePartitionLastFlushTime(TsFileResource resource) { protected void updateDeviceLastFlushTime(TsFileResource resource) { long timePartitionId = resource.getTimePartition(); Map endTimeMap = new HashMap<>(); + EvolvedSchema mergedEvolvedSchema = resource.getMergedEvolvedSchema(); for (IDeviceID deviceId : resource.getDevices()) { @SuppressWarnings("OptionalGetWithoutIsPresent") // checked above long endTime = resource.getEndTime(deviceId).get(); + if (mergedEvolvedSchema != null) { + deviceId = mergedEvolvedSchema.rewriteToOriginal(deviceId); + } endTimeMap.put(deviceId, endTime); } if (config.isEnableSeparateData()) { @@ -789,10 +831,14 @@ protected void upgradeAndUpdateDeviceLastFlushTime( long timePartitionId, List resources) { Map endTimeMap = new HashMap<>(); for (TsFileResource resource : resources) { + EvolvedSchema mergedEvolvedSchema = resource.getMergedEvolvedSchema(); for (IDeviceID deviceId : resource.getDevices()) { // checked above //noinspection OptionalGetWithoutIsPresent long endTime = resource.getEndTime(deviceId).get(); + if (mergedEvolvedSchema != null) { + deviceId = mergedEvolvedSchema.rewriteToOriginal(deviceId); + } endTimeMap.put(deviceId, endTime); } } @@ -1010,16 +1056,76 @@ private void recoverSealedTsFiles( } } + private String getFileSetsDir(long partitionId) { + return dataRegionSysDir + + File.separator + + partitionId + + File.separator + + TsFileSet.FILE_SET_DIR_NAME; + } + + public File getDataRegionSysDir() { + return dataRegionSysDir; + } + + private List recoverTsFileSets( + long partitionId, Map> tsFileSetMap) { + List tsFileSets = + tsFileSetMap.computeIfAbsent( + partitionId, + pid -> { + File fileSetDir = new File(getFileSetsDir(partitionId)); + File[] fileSets = fileSetDir.listFiles(); + if (fileSets == null || fileSets.length == 0) { + return Collections.emptyList(); + } else { + List results = new ArrayList<>(); + for (File fileSet : fileSets) { + TsFileSet tsFileSet; + try { + tsFileSet = + new TsFileSet( + Long.parseLong(fileSet.getName()), fileSetDir.getAbsolutePath(), true); + tsFileManager.addTsFileSet(tsFileSet, partitionId); + } catch (NumberFormatException e) { + continue; + } + results.add(tsFileSet); + } + return results; + } + }); + if (!tsFileSets.isEmpty()) { + tsFileSets.sort(null); + lastTsFileSetMap.put(partitionId, tsFileSets.get(tsFileSets.size() - 1)); + } + return tsFileSets; + } + private Callable recoverFilesInPartition( long partitionId, DataRegionRecoveryContext context, List resourceList, Map fileTimeIndexMap, - boolean isSeq) { + boolean isSeq, + Map> partitionTsFileSetMap, + Map partitionMinimalVersion) { + List resourceListForAsyncRecover = new ArrayList<>(); List resourceListForSyncRecover = new ArrayList<>(); Callable asyncRecoverTask = null; + recoverTsFileSets(partitionId, partitionTsFileSetMap); for (TsFileResource tsFileResource : resourceList) { + long fileVersion = tsFileResource.getTsFileID().fileVersion; + partitionMinimalVersion.compute( + partitionId, + (pid, oldVersion) -> { + if (oldVersion == null) { + return fileVersion; + } + return Math.min(oldVersion, fileVersion); + }); + tsFileManager.add(tsFileResource, isSeq); if (fileTimeIndexMap.containsKey(tsFileResource.getTsFileID()) && tsFileResource.resourceFileExists()) { @@ -1140,6 +1246,86 @@ private int compareFileName(File o1, File o2) { } } + private TsFileSet createNewFileSet(long maxVersion, long partitionId) { + TsFileSet newSet = new TsFileSet(maxVersion, getFileSetsDir(partitionId), false); + tsFileManager.addTsFileSet(newSet, partitionId); + return newSet; + } + + public void applySchemaEvolution(List schemaEvolutions) throws IOException { + long startTime = System.nanoTime(); + writeLock("applySchemaEvolution"); + PERFORMANCE_OVERVIEW_METRICS.recordScheduleLockCost(System.nanoTime() - startTime); + try { + if (deleted) { + return; + } + + syncCloseAllWorkingTsFileProcessors(); + + // may update table names in deviceIds + schemaEvolutions.forEach(lastFlushTimeMap::accept); + + SchemaEvolutionTask evolutionTask = new SchemaEvolutionTask(schemaEvolutions, this); + dataRegionTaskManager.submitAndRun(evolutionTask); + } finally { + writeUnlock(); + } + } + + public void recordSchemaEvolution(List schemaEvolutions) { + for (Entry partitionVersionEntry : partitionMaxFileVersions.entrySet()) { + long partitionId = partitionVersionEntry.getKey(); + long maxVersion = partitionVersionEntry.getValue(); + lastTsFileSetMap.compute( + partitionId, + (pid, lastSet) -> { + if (lastSet == null) { + lastSet = createNewFileSet(maxVersion, partitionId); + } else if (lastSet.getEndVersion() < maxVersion) { + lastSet = createNewFileSet(maxVersion, partitionId); + } + try { + lastSet.appendSchemaEvolution(schemaEvolutions); + } catch (IOException e) { + logger.error( + "Cannot append schema evolutions to fileSets in partition {}-{}", + dataRegionId, + partitionId, + e); + } + return lastSet; + }); + } + } + + public void applySchemaEvolutionToObjects(List schemaEvolutions) { + for (SchemaEvolution schemaEvolution : schemaEvolutions) { + if (schemaEvolution instanceof TableRename) { + TableRename tableRename = (TableRename) schemaEvolution; + renameTableForObjects(tableRename.getNameBefore(), tableRename.getNameAfter()); + } else if (schemaEvolution instanceof ColumnRename) { + ColumnRename columnRename = (ColumnRename) schemaEvolution; + if (columnRename.getDataType() == TSDataType.OBJECT) { + renameMeasurementForObjects( + columnRename.getTableName(), + columnRename.getNameBefore(), + columnRename.getNameAfter()); + } + } + } + } + + private void renameTableForObjects(String nameBefore, String nameAfter) { + // TODO-SchemaEvolution + // throw new UnsupportedOperationException(); + } + + private void renameMeasurementForObjects(String tableName, String nameBefore, String nameAfter) { + // TODO-SchemaEvolution + // throw new UnsupportedOperationException(); + } + /** * insert one row of data. * @@ -1705,7 +1891,7 @@ private List insertToTsFileProcessors( } List executedInsertRowNodeList = new ArrayList<>(); - for (Map.Entry entry : tsFileProcessorMap.entrySet()) { + for (Entry entry : tsFileProcessorMap.entrySet()) { TsFileProcessor tsFileProcessor = entry.getKey(); InsertRowsNode subInsertRowsNode = entry.getValue(); try { @@ -2413,7 +2599,8 @@ private boolean tryGetFLushLock( for (TsFileResource tsFileResource : seqResources) { // only need to acquire flush lock for those unclosed and satisfied tsfile if (!tsFileResource.isClosed() - && tsFileResource.isSatisfied(singleDeviceId, globalTimeFilter, true, isDebug)) { + && tsFileResource.isFinalDeviceIdSatisfied( + singleDeviceId, globalTimeFilter, true, isDebug)) { TsFileProcessor tsFileProcessor = tsFileResource.getProcessor(); try { if (tsFileProcessor == null) { @@ -2460,7 +2647,8 @@ private boolean tryGetFLushLock( // deal with unSeq resources for (TsFileResource tsFileResource : unSeqResources) { if (!tsFileResource.isClosed() - && tsFileResource.isSatisfied(singleDeviceId, globalTimeFilter, false, isDebug)) { + && tsFileResource.isFinalDeviceIdSatisfied( + singleDeviceId, globalTimeFilter, false, isDebug)) { TsFileProcessor tsFileProcessor = tsFileResource.getProcessor(); try { if (tsFileProcessor == null) { @@ -2575,7 +2763,8 @@ private List getFileHandleListForQuery( List fileScanHandles = new ArrayList<>(); for (TsFileResource tsFileResource : tsFileResources) { - if (!tsFileResource.isSatisfied(null, globalTimeFilter, isSeq, context.isDebug())) { + if (!tsFileResource.isFinalDeviceIdSatisfied( + null, globalTimeFilter, isSeq, context.isDebug())) { continue; } if (tsFileResource.isClosed()) { @@ -2653,7 +2842,8 @@ private List getFileHandleListForQuery( List fileScanHandles = new ArrayList<>(); for (TsFileResource tsFileResource : tsFileResources) { - if (!tsFileResource.isSatisfied(null, globalTimeFilter, isSeq, context.isDebug())) { + if (!tsFileResource.isFinalDeviceIdSatisfied( + null, globalTimeFilter, isSeq, context.isDebug())) { continue; } if (tsFileResource.isClosed()) { @@ -2734,8 +2924,9 @@ public void writeUnlock() { * @param tsFileResources includes sealed and unsealed tsfile resources * @return fill unsealed tsfile resources with memory data and ChunkMetadataList of data in disk */ + @SuppressWarnings("SuspiciousSystemArraycopy") private List getFileResourceListForQuery( - Collection tsFileResources, + List tsFileResources, List pathList, IDeviceID singleDeviceId, QueryContext context, @@ -2745,8 +2936,56 @@ private List getFileResourceListForQuery( List tsfileResourcesForQuery = new ArrayList<>(); + List tsFileSets = Collections.emptyList(); + int tsFileSetsIndex = 0; + Long currentTimePartitionId = null; + EvolvedSchema currentEvolvedSchema; + IDeviceID originalDeviceId = singleDeviceId; + for (TsFileResource tsFileResource : tsFileResources) { - if (!tsFileResource.isSatisfied(singleDeviceId, globalTimeFilter, isSeq, context.isDebug())) { + long fileTimePartition = tsFileResource.getTimePartition(); + // update TsFileSets if time partition changes + boolean tsFileSetsChanged = false; + if (currentTimePartitionId == null || currentTimePartitionId != fileTimePartition) { + currentTimePartitionId = fileTimePartition; + tsFileSets = tsFileManager.getTsFileSet(fileTimePartition); + tsFileSetsIndex = 0; + tsFileSetsChanged = true; + originalDeviceId = singleDeviceId; + } + // find TsFileSets this file belongs to + while (tsFileSetsIndex < tsFileSets.size()) { + TsFileSet tsFileSet = tsFileSets.get(tsFileSetsIndex); + if (tsFileSet.contains(tsFileResource)) { + break; + } else { + tsFileSetsChanged = true; + tsFileSetsIndex++; + } + } + // if TsFileSets change + if (tsFileSetsChanged) { + // and there are remaining TsFileSets, update EvolvedSchema + if (tsFileSetsIndex < tsFileSets.size()) { + currentEvolvedSchema = + TsFileSet.getMergedEvolvedSchema( + tsFileSets.subList(tsFileSetsIndex, tsFileSets.size())); + // use EvolvedSchema to rewrite deviceId to original deviceId + if (currentEvolvedSchema != null) { + originalDeviceId = currentEvolvedSchema.rewriteToOriginal(singleDeviceId); + } else { + // no schema evolution, use the singleDeviceId as originalDeviceId + originalDeviceId = singleDeviceId; + } + } else { + // no remaining TsFileSets, no schema evolution + originalDeviceId = singleDeviceId; + } + } + + // reuse the deviceId to avoid rewriting again or reading EvolvedSchema unnecessarily + if (!tsFileResource.isOriginalDeviceIdSatisfied( + originalDeviceId, globalTimeFilter, isSeq, context.isDebug())) { continue; } try { @@ -3007,12 +3246,12 @@ private List logDeletionInWAL(RelationalDeleteDataNode deleteD for (TableDeletionEntry modEntry : deleteDataNode.getModEntries()) { long startTime = modEntry.getStartTime(); long endTime = modEntry.getEndTime(); - for (Map.Entry entry : workSequenceTsFileProcessors.entrySet()) { + for (Entry entry : workSequenceTsFileProcessors.entrySet()) { if (TimePartitionUtils.satisfyPartitionId(startTime, endTime, entry.getKey())) { involvedProcessors.add(entry.getValue()); } } - for (Map.Entry entry : workUnsequenceTsFileProcessors.entrySet()) { + for (Entry entry : workUnsequenceTsFileProcessors.entrySet()) { if (TimePartitionUtils.satisfyPartitionId(startTime, endTime, entry.getKey())) { involvedProcessors.add(entry.getValue()); } @@ -3048,13 +3287,13 @@ private List logDeletionInWAL( DeleteDataNode deleteDataNode = new DeleteDataNode(new PlanNodeId(""), Collections.singletonList(path), startTime, endTime); deleteDataNode.setSearchIndex(searchIndex); - for (Map.Entry entry : workSequenceTsFileProcessors.entrySet()) { + for (Entry entry : workSequenceTsFileProcessors.entrySet()) { if (TimePartitionUtils.satisfyPartitionId(startTime, endTime, entry.getKey())) { WALFlushListener walFlushListener = entry.getValue().logDeleteDataNodeInWAL(deleteDataNode); walFlushListeners.add(walFlushListener); } } - for (Map.Entry entry : workUnsequenceTsFileProcessors.entrySet()) { + for (Entry entry : workUnsequenceTsFileProcessors.entrySet()) { if (TimePartitionUtils.satisfyPartitionId(startTime, endTime, entry.getKey())) { WALFlushListener walFlushListener = entry.getValue().logDeleteDataNodeInWAL(deleteDataNode); walFlushListeners.add(walFlushListener); @@ -3171,6 +3410,11 @@ private boolean canSkipDelete(TsFileResource tsFileResource, ModEntry deletion) return false; } + EvolvedSchema evolvedSchema = tsFileResource.getMergedEvolvedSchema(); + if (evolvedSchema != null) { + deletion = evolvedSchema.rewriteToOriginal(deletion); + } + for (IDeviceID device : tsFileResource.getDevices()) { // we are iterating the time index so the times are definitely present long startTime = tsFileResource.getTimeIndex().getStartTime(device).get(); @@ -3222,69 +3466,127 @@ private void deleteDataInUnsealedFiles( } } + private boolean canBeFullyDeleted( + ArrayDeviceTimeIndex deviceTimeIndex, TableDeletionEntry tableDeletionEntry) { + Set devicesInFile = deviceTimeIndex.getDevices(); + String tableName = tableDeletionEntry.getTableName(); + long matchSize = + devicesInFile.stream() + .filter( + device -> { + if (logger.isDebugEnabled()) { + logger.debug( + "device is {}, deviceTable is {}, tableDeletionEntry.getPredicate().matches(device) is {}", + device, + device.getTableName(), + tableDeletionEntry.getPredicate().matches(device)); + } + return tableName.equals(device.getTableName()) + && tableDeletionEntry.getPredicate().matches(device); + }) + .count(); + boolean onlyOneTable = matchSize == devicesInFile.size(); + if (logger.isDebugEnabled()) { + logger.debug( + "tableName is {}, matchSize is {}, onlyOneTable is {}", + tableName, + matchSize, + onlyOneTable); + } + + if (onlyOneTable) { + matchSize = 0; + for (IDeviceID device : devicesInFile) { + Optional optStart = deviceTimeIndex.getStartTime(device); + Optional optEnd = deviceTimeIndex.getEndTime(device); + if (!optStart.isPresent() || !optEnd.isPresent()) { + continue; + } + + long fileStartTime = optStart.get(); + long fileEndTime = optEnd.get(); + + if (logger.isDebugEnabled()) { + logger.debug( + "tableName is {}, device is {}, deletionStartTime is {}, deletionEndTime is {}, fileStartTime is {}, fileEndTime is {}", + device.getTableName(), + device, + tableDeletionEntry.getStartTime(), + tableDeletionEntry.getEndTime(), + fileStartTime, + fileEndTime); + } + if (isFileFullyMatchedByTime(tableDeletionEntry, fileStartTime, fileEndTime)) { + ++matchSize; + } else { + return false; + } + } + return matchSize == devicesInFile.size(); + } else { + return false; + } + } + private void deleteDataInSealedFiles(Collection sealedTsFiles, ModEntry deletion) throws IOException { - Set involvedModificationFiles = new HashSet<>(); - List deletedByMods = new ArrayList<>(); + Set> involvedModificationFiles = new HashSet<>(); List deletedByFiles = new ArrayList<>(); - boolean isDropMeasurementExist = false; - IDPredicate.IDPredicateType idPredicateType = null; - - if (deletion instanceof TableDeletionEntry) { - TableDeletionEntry tableDeletionEntry = (TableDeletionEntry) deletion; - isDropMeasurementExist = !tableDeletionEntry.getPredicate().getMeasurementNames().isEmpty(); - idPredicateType = tableDeletionEntry.getPredicate().getIdPredicateType(); - } for (TsFileResource sealedTsFile : sealedTsFiles) { if (canSkipDelete(sealedTsFile, deletion)) { continue; } - // the tsfile may not be closed here, it should not be added in deletedByFiles - if (!sealedTsFile.isClosed()) { - deletedByMods.add(sealedTsFile); - continue; - } - ITimeIndex timeIndex = sealedTsFile.getTimeIndex(); + EvolvedSchema evolvedSchema = sealedTsFile.getMergedEvolvedSchema(); + // the tsfile may not be closed here, it should not be added in deletedByFiles if ((timeIndex instanceof ArrayDeviceTimeIndex) - && (deletion.getType() == ModEntry.ModType.TABLE_DELETION)) { + && (deletion.getType() == ModType.TABLE_DELETION) + && sealedTsFile.isClosed()) { ArrayDeviceTimeIndex deviceTimeIndex = (ArrayDeviceTimeIndex) timeIndex; + Set devicesInFile = deviceTimeIndex.getDevices(); boolean onlyOneTable = false; - if (deletion instanceof TableDeletionEntry) { - TableDeletionEntry tableDeletionEntry = (TableDeletionEntry) deletion; - String tableName = tableDeletionEntry.getTableName(); - long matchSize = - devicesInFile.stream() - .filter( - device -> { - if (logger.isDebugEnabled()) { - logger.debug( - "device is {}, deviceTable is {}, tableDeletionEntry.getPredicate().matches(device) is {}", - device, - device.getTableName(), - tableDeletionEntry.getPredicate().matches(device)); - } - return tableName.equals(device.getTableName()) - && tableDeletionEntry.getPredicate().matches(device); - }) - .count(); - onlyOneTable = matchSize == devicesInFile.size(); - if (logger.isDebugEnabled()) { - logger.debug( - "tableName is {}, matchSize is {}, onlyOneTable is {}", - tableName, - matchSize, - onlyOneTable); - } + TableDeletionEntry tableDeletionEntry = (TableDeletionEntry) deletion; + tableDeletionEntry = + evolvedSchema != null + ? evolvedSchema.rewriteToOriginal(tableDeletionEntry) + : tableDeletionEntry; + boolean isDropMeasurementExist = + !tableDeletionEntry.getPredicate().getMeasurementNames().isEmpty(); + TagPredicateType tagPredicateType = tableDeletionEntry.getPredicate().getTagPredicateType(); + + String tableName = tableDeletionEntry.getTableName(); + TableDeletionEntry finalTableDeletionEntry = tableDeletionEntry; + long matchSize = + devicesInFile.stream() + .filter( + device -> { + if (logger.isDebugEnabled()) { + logger.debug( + "device is {}, deviceTable is {}, tableDeletionEntry.getPredicate().matches(device) is {}", + device, + device.getTableName(), + finalTableDeletionEntry.getPredicate().matches(device)); + } + return tableName.equals(device.getTableName()) + && finalTableDeletionEntry.getPredicate().matches(device); + }) + .count(); + onlyOneTable = matchSize == devicesInFile.size(); + if (logger.isDebugEnabled()) { + logger.debug( + "tableName is {}, matchSize is {}, onlyOneTable is {}", + tableName, + matchSize, + onlyOneTable); } if (onlyOneTable) { - int matchSize = 0; + matchSize = 0; for (IDeviceID device : devicesInFile) { Optional optStart = deviceTimeIndex.getStartTime(device); Optional optEnd = deviceTimeIndex.getEndTime(device); @@ -3306,11 +3608,12 @@ private void deleteDataInSealedFiles(Collection sealedTsFiles, M fileEndTime); } if (isFileFullyMatchedByTime(deletion, fileStartTime, fileEndTime) - && idPredicateType.equals(IDPredicate.IDPredicateType.NOP) + && tagPredicateType.equals(TagPredicateType.NOP) && !isDropMeasurementExist) { ++matchSize; } else { - deletedByMods.add(sealedTsFile); + involvedModificationFiles.add( + new Pair<>(sealedTsFile.getModFileForWrite(), tableDeletionEntry)); break; } } @@ -3326,20 +3629,17 @@ private void deleteDataInSealedFiles(Collection sealedTsFiles, M } } } else { - involvedModificationFiles.add(sealedTsFile.getModFileForWrite()); + involvedModificationFiles.add( + new Pair<>(sealedTsFile.getModFileForWrite(), tableDeletionEntry)); } } else { - involvedModificationFiles.add(sealedTsFile.getModFileForWrite()); + involvedModificationFiles.add( + new Pair<>( + sealedTsFile.getModFileForWrite(), + evolvedSchema != null ? evolvedSchema.rewriteToOriginal(deletion) : deletion)); } } - for (TsFileResource tsFileResource : deletedByMods) { - if (tsFileResource.isClosed() - || !tsFileResource.getProcessor().deleteDataInMemory(deletion)) { - involvedModificationFiles.add(tsFileResource.getModFileForWrite()); - } // else do nothing - } - if (!deletedByFiles.isEmpty()) { deleteTsFileCompletely(deletedByFiles); if (logger.isDebugEnabled()) { @@ -3356,10 +3656,10 @@ private void deleteDataInSealedFiles(Collection sealedTsFiles, M List exceptions = involvedModificationFiles.parallelStream() .map( - modFile -> { + modFileEntryPair -> { try { - modFile.write(deletion); - modFile.close(); + modFileEntryPair.getLeft().write(modFileEntryPair.getRight()); + modFileEntryPair.getLeft().close(); } catch (Exception e) { return e; } @@ -4445,7 +4745,7 @@ public void insert(InsertRowsOfOneDeviceNode insertRowsOfOneDeviceNode) // infoForMetrics[2]: ScheduleWalTimeCost // infoForMetrics[3]: ScheduleMemTableTimeCost // infoForMetrics[4]: InsertedPointsNumber - for (Map.Entry entry : tsFileProcessorMap.entrySet()) { + for (Entry entry : tsFileProcessorMap.entrySet()) { TsFileProcessor tsFileProcessor = entry.getKey(); InsertRowsNode subInsertRowsNode = entry.getValue(); try { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/DeviceLastFlushTime.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/DeviceLastFlushTime.java index f02044b041472..3e72acaa34dbc 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/DeviceLastFlushTime.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/DeviceLastFlushTime.java @@ -19,6 +19,9 @@ package org.apache.iotdb.db.storageengine.dataregion; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.TableRename; + import org.apache.tsfile.file.metadata.IDeviceID; import java.util.HashMap; @@ -53,4 +56,13 @@ public ILastFlushTime degradeLastFlushTime() { Map getDeviceLastFlushTimeMap() { return deviceLastFlushTimeMap; } + + @Override + public void accept(SchemaEvolution schemaEvolution) { + if (!(schemaEvolution instanceof TableRename)) { + return; + } + TableRename tableRename = (TableRename) schemaEvolution; + tableRename.rewriteMap(deviceLastFlushTimeMap); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/HashLastFlushTimeMap.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/HashLastFlushTimeMap.java index 3f0abfd3e2481..d02835015eb58 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/HashLastFlushTimeMap.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/HashLastFlushTimeMap.java @@ -20,12 +20,15 @@ package org.apache.iotdb.db.storageengine.dataregion; import org.apache.iotdb.db.storageengine.StorageEngine; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.TableRename; import org.apache.tsfile.file.metadata.IDeviceID; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Map; +import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; public class HashLastFlushTimeMap implements ILastFlushTimeMap { @@ -70,7 +73,7 @@ public void updateMultiDeviceFlushedTime( timePartitionId, id -> new DeviceLastFlushTime()); long memIncr = 0L; - for (Map.Entry entry : flushedTimeMap.entrySet()) { + for (Entry entry : flushedTimeMap.entrySet()) { if (flushTimeMapForPartition.getLastFlushTime(entry.getKey()) == Long.MIN_VALUE) { memIncr += HASHMAP_NODE_BASIC_SIZE + entry.getKey().ramBytesUsed(); } @@ -93,7 +96,7 @@ public void upgradeAndUpdateMultiDeviceFlushedTime( long maxFlushTime = flushTimeMapForPartition.getLastFlushTime(null); ILastFlushTime newDeviceLastFlushTime = new DeviceLastFlushTime(); long memIncr = 0; - for (Map.Entry entry : flushedTimeMap.entrySet()) { + for (Entry entry : flushedTimeMap.entrySet()) { memIncr += HASHMAP_NODE_BASIC_SIZE + entry.getKey().ramBytesUsed(); newDeviceLastFlushTime.updateLastFlushTime(entry.getKey(), entry.getValue()); maxFlushTime = Math.max(maxFlushTime, entry.getValue()); @@ -104,7 +107,7 @@ public void upgradeAndUpdateMultiDeviceFlushedTime( } else { // go here when DeviceLastFlushTime was recovered by wal recovery long memIncr = 0; - for (Map.Entry entry : flushedTimeMap.entrySet()) { + for (Entry entry : flushedTimeMap.entrySet()) { if (flushTimeMapForPartition.getLastFlushTime(entry.getKey()) == Long.MIN_VALUE) { memIncr += HASHMAP_NODE_BASIC_SIZE + entry.getKey().ramBytesUsed(); } @@ -131,7 +134,7 @@ public void updatePartitionFlushedTime(long timePartitionId, long maxFlushedTime // go here when DeviceLastFlushTime was recovered by wal recovery DeviceLastFlushTime deviceLastFlushTime = (DeviceLastFlushTime) flushTimeMapForPartition; Map flushedTimeMap = deviceLastFlushTime.getDeviceLastFlushTimeMap(); - for (Map.Entry entry : flushedTimeMap.entrySet()) { + for (Entry entry : flushedTimeMap.entrySet()) { flushTimeMapForPartition.updateLastFlushTime(entry.getKey(), entry.getValue()); } } @@ -139,7 +142,7 @@ public void updatePartitionFlushedTime(long timePartitionId, long maxFlushedTime @Override public void updateMultiDeviceGlobalFlushedTime(Map globalFlushedTimeMap) { - for (Map.Entry entry : globalFlushedTimeMap.entrySet()) { + for (Entry entry : globalFlushedTimeMap.entrySet()) { globalLatestFlushedTimeForEachDevice.merge(entry.getKey(), entry.getValue(), Math::max); } } @@ -161,7 +164,7 @@ public boolean checkAndCreateFlushedTimePartition( // For insert @Override public void updateLatestFlushTime(long partitionId, Map updateMap) { - for (Map.Entry entry : updateMap.entrySet()) { + for (Entry entry : updateMap.entrySet()) { partitionLatestFlushedTime .computeIfAbsent(partitionId, id -> new DeviceLastFlushTime()) .updateLastFlushTime(entry.getKey(), entry.getValue()); @@ -212,4 +215,15 @@ public long getMemSize(long partitionId) { } return 0; } + + @Override + public void accept(SchemaEvolution schemaEvolution) { + if (!(schemaEvolution instanceof TableRename)) { + return; + } + + TableRename tableRename = (TableRename) schemaEvolution; + tableRename.rewriteMap(globalLatestFlushedTimeForEachDevice); + partitionLatestFlushedTime.values().forEach(t -> t.accept(schemaEvolution)); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/ILastFlushTime.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/ILastFlushTime.java index be68369a42b87..9b685407326f0 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/ILastFlushTime.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/ILastFlushTime.java @@ -19,6 +19,8 @@ package org.apache.iotdb.db.storageengine.dataregion; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; + import org.apache.tsfile.file.metadata.IDeviceID; public interface ILastFlushTime { @@ -28,4 +30,6 @@ public interface ILastFlushTime { void updateLastFlushTime(IDeviceID device, long time); ILastFlushTime degradeLastFlushTime(); + + void accept(SchemaEvolution schemaEvolution); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/ILastFlushTimeMap.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/ILastFlushTimeMap.java index 7bdd141bf6b5e..ca3a5e37ced69 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/ILastFlushTimeMap.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/ILastFlushTimeMap.java @@ -19,6 +19,8 @@ package org.apache.iotdb.db.storageengine.dataregion; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; + import org.apache.tsfile.file.metadata.IDeviceID; import java.util.Map; @@ -63,4 +65,6 @@ void upgradeAndUpdateMultiDeviceFlushedTime( void degradeLastFlushTime(long partitionId); long getMemSize(long partitionId); + + void accept(SchemaEvolution schemaEvolution); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/PartitionLastFlushTime.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/PartitionLastFlushTime.java index a5976861441e7..e37ce43b930a0 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/PartitionLastFlushTime.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/PartitionLastFlushTime.java @@ -19,6 +19,8 @@ package org.apache.iotdb.db.storageengine.dataregion; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; + import org.apache.tsfile.file.metadata.IDeviceID; public class PartitionLastFlushTime implements ILastFlushTime { @@ -43,4 +45,9 @@ public void updateLastFlushTime(IDeviceID device, long time) { public ILastFlushTime degradeLastFlushTime() { return this; } + + @Override + public void accept(SchemaEvolution schemaEvolution) { + // no-op + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/FastCompactionPerformer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/FastCompactionPerformer.java index 54b21ddd382fe..1390fa2bdcfb3 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/FastCompactionPerformer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/FastCompactionPerformer.java @@ -48,6 +48,7 @@ import org.apache.iotdb.db.storageengine.dataregion.compaction.selector.estimator.FastCrossSpaceCompactionEstimator; import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.iotdb.db.utils.datastructure.PatternTreeMapFactory; import org.apache.tsfile.common.conf.TSFileDescriptor; @@ -75,6 +76,8 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; +import java.util.stream.Collectors; +import java.util.stream.Stream; public class FastCompactionPerformer implements ICrossCompactionPerformer, ISeqCompactionPerformer, IUnseqCompactionPerformer { @@ -103,6 +106,7 @@ public class FastCompactionPerformer private final boolean isCrossCompaction; private EncryptParameter encryptParameter; + private final Pair maxTsFileSetEndVersionAndMinResource; @TestOnly public FastCompactionPerformer( @@ -122,6 +126,7 @@ public FastCompactionPerformer( new EncryptParameter( TSFileDescriptor.getInstance().getConfig().getEncryptType(), TSFileDescriptor.getInstance().getConfig().getEncryptKey()); + this.maxTsFileSetEndVersionAndMinResource = new Pair<>(Long.MIN_VALUE, null); } public FastCompactionPerformer( @@ -139,6 +144,9 @@ public FastCompactionPerformer( isCrossCompaction = true; } this.encryptParameter = encryptParameter; + this.maxTsFileSetEndVersionAndMinResource = + TsFileResource.getMaxTsFileSetEndVersionAndMinResource( + Stream.concat(seqFiles.stream(), unseqFiles.stream()).collect(Collectors.toList())); } @TestOnly @@ -148,27 +156,45 @@ public FastCompactionPerformer(boolean isCrossCompaction) { new EncryptParameter( TSFileDescriptor.getInstance().getConfig().getEncryptType(), TSFileDescriptor.getInstance().getConfig().getEncryptKey()); + this.maxTsFileSetEndVersionAndMinResource = new Pair<>(Long.MIN_VALUE, null); } public FastCompactionPerformer(boolean isCrossCompaction, EncryptParameter encryptParameter) { this.isCrossCompaction = isCrossCompaction; this.encryptParameter = encryptParameter; + this.maxTsFileSetEndVersionAndMinResource = new Pair<>(Long.MIN_VALUE, null); } @Override public void perform() throws Exception { this.subTaskSummary.setTemporalFileNum(targetFiles.size()); + List allSourceFiles = + Stream.concat(seqFiles.stream(), unseqFiles.stream()) + .sorted(TsFileResource::compareFileName) + .collect(Collectors.toList()); + Pair maxTsFileSetEndVersionAndMinResource = + TsFileResource.getMaxTsFileSetEndVersionAndMinResource(allSourceFiles); + try (MultiTsFileDeviceIterator deviceIterator = new MultiTsFileDeviceIterator(seqFiles, unseqFiles, readerCacheMap); AbstractCompactionWriter compactionWriter = isCrossCompaction ? new FastCrossCompactionWriter( - targetFiles, seqFiles, readerCacheMap, encryptParameter) + targetFiles, + seqFiles, + readerCacheMap, + encryptParameter, + maxTsFileSetEndVersionAndMinResource.left) : new FastInnerCompactionWriter(targetFiles, encryptParameter)) { List schemas = CompactionTableSchemaCollector.collectSchema( - seqFiles, unseqFiles, readerCacheMap, deviceIterator.getDeprecatedTableSchemaMap()); - compactionWriter.setSchemaForAllTargetFile(schemas); + seqFiles, + unseqFiles, + readerCacheMap, + deviceIterator.getDeprecatedTableSchemaMap(), + maxTsFileSetEndVersionAndMinResource); + + compactionWriter.setSchemaForAllTargetFile(schemas, maxTsFileSetEndVersionAndMinResource); readModification(seqFiles); readModification(unseqFiles); while (deviceIterator.hasNextDevice()) { @@ -184,10 +210,23 @@ public void perform() throws Exception { sortedSourceFiles.addAll(unseqFiles); boolean isTreeModel = !isAligned || device.getTableName().startsWith("root."); long ttl = deviceIterator.getTTLForCurrentDevice(); - sortedSourceFiles.removeIf(x -> x.definitelyNotContains(device)); + sortedSourceFiles.removeIf( + x -> { + EvolvedSchema evolvedSchema = + x.getMergedEvolvedSchema(maxTsFileSetEndVersionAndMinResource.left); + IDeviceID originalDevice = device; + if (evolvedSchema != null) { + originalDevice = evolvedSchema.rewriteToOriginal(device); + } + return x.definitelyNotContains(originalDevice); + }); // checked above - //noinspection OptionalGetWithoutIsPresent - sortedSourceFiles.sort(Comparator.comparingLong(x -> x.getStartTime(device).get())); + sortedSourceFiles.sort( + Comparator.comparingLong( + x -> { + //noinspection OptionalGetWithoutIsPresent + return x.getStartTime(device, maxTsFileSetEndVersionAndMinResource.left).get(); + })); ModEntry ttlDeletion = null; if (ttl != Long.MAX_VALUE) { ttlDeletion = @@ -273,7 +312,8 @@ private void compactAlignedSeries( measurementSchemas, deviceId, taskSummary, - ignoreAllNullRows) + ignoreAllNullRows, + maxTsFileSetEndVersionAndMinResource) .call(); subTaskSummary.increase(taskSummary); } @@ -333,7 +373,8 @@ private void compactNonAlignedSeries( measurementsForEachSubTask[i], deviceID, taskSummary, - i))); + i, + maxTsFileSetEndVersionAndMinResource))); taskSummaryList.add(taskSummary); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/ReadChunkCompactionPerformer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/ReadChunkCompactionPerformer.java index d406286e37f64..1bc7ad548a0ec 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/ReadChunkCompactionPerformer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/ReadChunkCompactionPerformer.java @@ -27,6 +27,7 @@ import org.apache.iotdb.db.exception.StorageEngineException; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.ISeqCompactionPerformer; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.task.CompactionTaskSummary; +import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils.CompactionTableSchema; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils.CompactionTableSchemaCollector; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils.MultiTsFileDeviceIterator; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils.executor.batch.BatchedReadChunkAlignedSeriesCompactionExecutor; @@ -36,6 +37,7 @@ import org.apache.iotdb.db.storageengine.dataregion.compaction.selector.estimator.AbstractInnerSpaceEstimator; import org.apache.iotdb.db.storageengine.dataregion.compaction.selector.estimator.ReadChunkInnerCompactionEstimator; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.iotdb.db.storageengine.rescon.memory.SystemInfo; import org.apache.iotdb.db.utils.EncryptDBUtils; @@ -71,7 +73,8 @@ public class ReadChunkCompactionPerformer implements ISeqCompactionPerformer { * IoTDBDescriptor.getInstance().getConfig().getChunkMetadataSizeProportion()); private Schema schema = null; - private EncryptParameter firstEncryptParameter; + private final EncryptParameter firstEncryptParameter; + protected Pair maxTsFileSetEndVersionAndMinResource; @TestOnly public ReadChunkCompactionPerformer(List sourceFiles, TsFileResource targetFile) { @@ -91,6 +94,7 @@ public ReadChunkCompactionPerformer( setSourceFiles(sourceFiles); setTargetFiles(targetFiles); this.firstEncryptParameter = EncryptDBUtils.getDefaultFirstEncryptParam(); + this.maxTsFileSetEndVersionAndMinResource = new Pair<>(Long.MIN_VALUE, null); } public ReadChunkCompactionPerformer( @@ -100,18 +104,23 @@ public ReadChunkCompactionPerformer( setSourceFiles(sourceFiles); setTargetFiles(targetFiles); this.firstEncryptParameter = encryptParameter; + this.maxTsFileSetEndVersionAndMinResource = + TsFileResource.getMaxTsFileSetEndVersionAndMinResource(sourceFiles); } @TestOnly public ReadChunkCompactionPerformer(List sourceFiles) { setSourceFiles(sourceFiles); this.firstEncryptParameter = EncryptDBUtils.getDefaultFirstEncryptParam(); + this.maxTsFileSetEndVersionAndMinResource = new Pair<>(Long.MIN_VALUE, null); } public ReadChunkCompactionPerformer( List sourceFiles, EncryptParameter encryptParameter) { setSourceFiles(sourceFiles); this.firstEncryptParameter = encryptParameter; + this.maxTsFileSetEndVersionAndMinResource = + TsFileResource.getMaxTsFileSetEndVersionAndMinResource(sourceFiles); } @TestOnly @@ -120,6 +129,7 @@ public ReadChunkCompactionPerformer() { new EncryptParameter( TSFileDescriptor.getInstance().getConfig().getEncryptType(), TSFileDescriptor.getInstance().getConfig().getEncryptKey()); + this.maxTsFileSetEndVersionAndMinResource = new Pair<>(Long.MIN_VALUE, null); } public ReadChunkCompactionPerformer(EncryptParameter encryptParameter) { @@ -138,7 +148,8 @@ public void perform() CompactionTableSchemaCollector.collectSchema( seqFiles, deviceIterator.getReaderMap(), - deviceIterator.getDeprecatedTableSchemaMap()); + deviceIterator.getDeprecatedTableSchemaMap(), + maxTsFileSetEndVersionAndMinResource); while (deviceIterator.hasNextDevice()) { currentWriter = getAvailableCompactionWriter(); Pair deviceInfo = deviceIterator.nextDevice(); @@ -204,13 +215,26 @@ private void rollCompactionFileWriter() throws IOException { } private void useNewWriter() throws IOException { + TsFileResource tsFileResource = targetResources.get(currentTargetFileIndex); currentWriter = new CompactionTsFileWriter( - targetResources.get(currentTargetFileIndex).getTsFile(), + tsFileResource, memoryBudgetForFileWriter, CompactionType.INNER_SEQ_COMPACTION, - firstEncryptParameter); - currentWriter.setSchema(CompactionTableSchemaCollector.copySchema(schema)); + firstEncryptParameter, + maxTsFileSetEndVersionAndMinResource.getLeft()); + + Schema schema = CompactionTableSchemaCollector.copySchema(this.schema); + TsFileResource minVersionResource = maxTsFileSetEndVersionAndMinResource.getRight(); + // only null during test + tsFileResource.setTsFileManager( + minVersionResource != null ? minVersionResource.getTsFileManager() : null); + EvolvedSchema evolvedSchema = + tsFileResource.getMergedEvolvedSchema(maxTsFileSetEndVersionAndMinResource.getLeft()); + currentWriter.setSchema( + evolvedSchema != null + ? evolvedSchema.rewriteToOriginal(schema, CompactionTableSchema::new) + : schema); } @Override @@ -352,6 +376,8 @@ private void compactNotAlignedSeries( @Override public void setSourceFiles(List seqFiles) { this.seqFiles = seqFiles; + this.maxTsFileSetEndVersionAndMinResource = + TsFileResource.getMaxTsFileSetEndVersionAndMinResource(seqFiles); } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/ReadPointCompactionPerformer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/ReadPointCompactionPerformer.java index c58870357d915..00cbfddfd7926 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/ReadPointCompactionPerformer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/ReadPointCompactionPerformer.java @@ -19,7 +19,6 @@ package org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.impl; import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.commons.exception.MetadataException; import org.apache.iotdb.commons.path.AlignedFullPath; import org.apache.iotdb.commons.path.IFullPath; import org.apache.iotdb.commons.path.NonAlignedFullPath; @@ -71,6 +70,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.stream.Collectors; +import java.util.stream.Stream; public class ReadPointCompactionPerformer implements ICrossCompactionPerformer, IUnseqCompactionPerformer { @@ -153,26 +153,45 @@ public void perform() throws Exception { // Do not close device iterator, because tsfile reader is managed by FileReaderManager. MultiTsFileDeviceIterator deviceIterator = new MultiTsFileDeviceIterator(seqFiles, unseqFiles); + List allSourceFiles = + Stream.concat(seqFiles.stream(), unseqFiles.stream()) + .sorted(TsFileResource::compareFileName) + .collect(Collectors.toList()); + Pair maxTsFileSetEndVersionAndMinResource = + TsFileResource.getMaxTsFileSetEndVersionAndMinResource(allSourceFiles); + List schemas = CompactionTableSchemaCollector.collectSchema( seqFiles, unseqFiles, deviceIterator.getReaderMap(), - deviceIterator.getDeprecatedTableSchemaMap()); - compactionWriter.setSchemaForAllTargetFile(schemas); + deviceIterator.getDeprecatedTableSchemaMap(), + maxTsFileSetEndVersionAndMinResource); + + compactionWriter.setSchemaForAllTargetFile(schemas, maxTsFileSetEndVersionAndMinResource); while (deviceIterator.hasNextDevice()) { checkThreadInterrupted(); Pair deviceInfo = deviceIterator.nextDevice(); IDeviceID device = deviceInfo.left; boolean isAligned = deviceInfo.right; - queryDataSource.fillOrderIndexes(device, true); + queryDataSource.fillOrderIndexes(device, true, maxTsFileSetEndVersionAndMinResource.left); if (isAligned) { compactAlignedSeries( - device, deviceIterator, compactionWriter, fragmentInstanceContext, queryDataSource); + device, + deviceIterator, + compactionWriter, + fragmentInstanceContext, + queryDataSource, + maxTsFileSetEndVersionAndMinResource); } else { compactNonAlignedSeries( - device, deviceIterator, compactionWriter, fragmentInstanceContext, queryDataSource); + device, + deviceIterator, + compactionWriter, + fragmentInstanceContext, + queryDataSource, + maxTsFileSetEndVersionAndMinResource); } summary.setTemporaryFileSize(compactionWriter.getWriterSize()); } @@ -208,9 +227,11 @@ private void compactAlignedSeries( MultiTsFileDeviceIterator deviceIterator, AbstractCompactionWriter compactionWriter, FragmentInstanceContext fragmentInstanceContext, - QueryDataSource queryDataSource) - throws IOException, MetadataException { - Map schemaMap = deviceIterator.getAllSchemasOfCurrentDevice(); + QueryDataSource queryDataSource, + Pair maxTsFileSetEndVersionAndMinResource) + throws IOException { + Map schemaMap = + deviceIterator.getAllSchemasOfCurrentDevice(maxTsFileSetEndVersionAndMinResource); IMeasurementSchema timeSchema = schemaMap.remove(TsFileConstant.TIME_COLUMN_ID); List measurementSchemas = new ArrayList<>(schemaMap.values()); if (measurementSchemas.isEmpty()) { @@ -230,16 +251,15 @@ private void compactAlignedSeries( new ArrayList<>(schemaMap.keySet()), fragmentInstanceContext, queryDataSource, - true); + true, + maxTsFileSetEndVersionAndMinResource.left); if (dataBlockReader.hasNextBatch()) { - // chunkgroup is serialized only when at least one timeseries under this device has data compactionWriter.startChunkGroup(device, true); - measurementSchemas.add(0, timeSchema); compactionWriter.startMeasurement( TsFileConstant.TIME_COLUMN_ID, new AlignedChunkWriterImpl( - measurementSchemas.remove(0), + timeSchema, measurementSchemas, EncryptUtils.getEncryptParameter(getEncryptParameter())), 0); @@ -256,9 +276,11 @@ private void compactNonAlignedSeries( MultiTsFileDeviceIterator deviceIterator, AbstractCompactionWriter compactionWriter, FragmentInstanceContext fragmentInstanceContext, - QueryDataSource queryDataSource) + QueryDataSource queryDataSource, + Pair maxTsFileSetEndVersionAndMinResource) throws IOException, InterruptedException, ExecutionException { - Map schemaMap = deviceIterator.getAllSchemasOfCurrentDevice(); + Map schemaMap = + deviceIterator.getAllSchemasOfCurrentDevice(maxTsFileSetEndVersionAndMinResource); List allMeasurements = new ArrayList<>(schemaMap.keySet()); allMeasurements.sort((String::compareTo)); int subTaskNums = Math.min(allMeasurements.size(), SUB_TASK_NUM); @@ -287,7 +309,8 @@ private void compactNonAlignedSeries( new QueryDataSource(queryDataSource), compactionWriter, schemaMap, - i))); + i, + maxTsFileSetEndVersionAndMinResource.left))); } for (Future future : futures) { future.get(); @@ -311,7 +334,8 @@ public static IDataBlockReader constructReader( List allSensors, FragmentInstanceContext fragmentInstanceContext, QueryDataSource queryDataSource, - boolean isAlign) { + boolean isAlign, + long maxTsFileSetEndVersion) { IFullPath seriesPath; if (isAlign) { seriesPath = new AlignedFullPath(deviceId, measurementIds, measurementSchemas); @@ -320,7 +344,12 @@ public static IDataBlockReader constructReader( } return new SeriesDataBlockReader( - seriesPath, new HashSet<>(allSensors), fragmentInstanceContext, queryDataSource, true); + seriesPath, + new HashSet<>(allSensors), + fragmentInstanceContext, + queryDataSource, + true, + maxTsFileSetEndVersion); } @SuppressWarnings("squid:S1172") @@ -351,8 +380,16 @@ protected AbstractCompactionWriter getCompactionWriter( throws IOException { if (!seqFileResources.isEmpty() && !unseqFileResources.isEmpty()) { // cross space + List allSourceFiles = + Stream.concat(seqFileResources.stream(), unseqFileResources.stream()) + .collect(Collectors.toList()); + Pair maxTsFileSetEndVersionAndMinResource = + TsFileResource.getMaxTsFileSetEndVersionAndMinResource(allSourceFiles); return new ReadPointCrossCompactionWriter( - targetFileResources, seqFileResources, encryptParameter); + targetFileResources, + seqFileResources, + encryptParameter, + maxTsFileSetEndVersionAndMinResource.left); } else { // inner space return new ReadPointInnerCompactionWriter(targetFileResources, encryptParameter); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/InnerSpaceCompactionTask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/InnerSpaceCompactionTask.java index c61d2275ac1af..7236e78abd63a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/InnerSpaceCompactionTask.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/InnerSpaceCompactionTask.java @@ -378,6 +378,7 @@ private void calculateRenamedTargetFiles(boolean needAdjustSourceFilePosition) new File(skippedSourceFile.getParentFile().getPath() + File.separator + newFileName), TsFileResourceStatus.COMPACTING); filesView.renamedTargetFiles.add(renamedTargetFile); + renamedTargetFile.setTsFileManager(tsFileManager); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/RepairUnsortedFileCompactionTask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/RepairUnsortedFileCompactionTask.java index fe1957975cf63..d027957c6e656 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/RepairUnsortedFileCompactionTask.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/RepairUnsortedFileCompactionTask.java @@ -121,9 +121,10 @@ protected void prepare() throws IOException { @Override protected void calculateSourceFilesAndTargetFiles() throws IOException { filesView.sourceFilesInLog = filesView.sourceFilesInCompactionPerformer; - filesView.targetFilesInLog = - Collections.singletonList( - new TsFileResource(generateTargetFile(), TsFileResourceStatus.COMPACTING)); + TsFileResource targetResource = + new TsFileResource(generateTargetFile(), TsFileResourceStatus.COMPACTING); + targetResource.setTsFileManager(tsFileManager); + filesView.targetFilesInLog = Collections.singletonList(targetResource); filesView.targetFilesInPerformer = filesView.targetFilesInLog; } @@ -137,7 +138,7 @@ private File generateTargetFile() throws IOException { sourceFile.isSeq() ? lastAllocatedFileTimestamp.incrementAndGet() : sourceFileName.getTime(), - sourceFile.isSeq() ? 0 : sourceFileName.getVersion(), + sourceFileName.getVersion(), sourceFileName.getInnerCompactionCnt() + 1, sourceFileName.getCrossCompactionCnt()); // if source file is sequence, the sequence data targetFileDir should be replaced to unsequence diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/subtask/FastCompactionPerformerSubTask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/subtask/FastCompactionPerformerSubTask.java index 873993d97df4c..c8bd778c0ffc7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/subtask/FastCompactionPerformerSubTask.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/subtask/FastCompactionPerformerSubTask.java @@ -31,6 +31,7 @@ import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import org.apache.iotdb.db.utils.datastructure.PatternTreeMapFactory; +import org.apache.iotdb.db.utils.datastructure.PatternTreeMapFactory.ModsSerializer; import org.apache.tsfile.exception.write.PageException; import org.apache.tsfile.file.metadata.IDeviceID; @@ -76,6 +77,8 @@ public class FastCompactionPerformerSubTask implements Callable { private List measurementSchemas; + private final Pair maxTsFileSetEndVersionAndMinResource; + private Map compactionSeriesContextMap; /** Used for nonAligned timeseries. */ @@ -90,7 +93,8 @@ public FastCompactionPerformerSubTask( List measurements, IDeviceID deviceId, FastCompactionTaskSummary summary, - int subTaskId) { + int subTaskId, + Pair maxTsFileSetEndVersionAndMinResource) { this.compactionWriter = compactionWriter; this.subTaskId = subTaskId; this.timeseriesMetadataOffsetMap = timeseriesMetadataOffsetMap; @@ -102,6 +106,7 @@ public FastCompactionPerformerSubTask( this.measurements = measurements; this.summary = summary; this.ignoreAllNullRows = true; + this.maxTsFileSetEndVersionAndMinResource = maxTsFileSetEndVersionAndMinResource; } public FastCompactionPerformerSubTask( @@ -114,7 +119,8 @@ public FastCompactionPerformerSubTask( List measurements, IDeviceID deviceId, FastCompactionTaskSummary summary, - int subTaskId) { + int subTaskId, + Pair maxTsFileSetEndVersionAndMinResource) { this.compactionWriter = compactionWriter; this.subTaskId = subTaskId; this.compactionSeriesContextMap = compactionSeriesContextMap; @@ -127,6 +133,7 @@ public FastCompactionPerformerSubTask( this.measurements = measurements; this.summary = summary; this.ignoreAllNullRows = true; + this.maxTsFileSetEndVersionAndMinResource = maxTsFileSetEndVersionAndMinResource; } /** Used for aligned timeseries. */ @@ -134,13 +141,13 @@ public FastCompactionPerformerSubTask( AbstractCompactionWriter compactionWriter, Map>> timeseriesMetadataOffsetMap, Map readerCacheMap, - Map> - modificationCacheMap, + Map> modificationCacheMap, List sortedSourceFiles, List measurementSchemas, IDeviceID deviceId, FastCompactionTaskSummary summary, - boolean ignoreAllNullRows) { + boolean ignoreAllNullRows, + Pair maxTsFileSetEndVersionAndMinResource) { this.compactionWriter = compactionWriter; this.subTaskId = 0; this.timeseriesMetadataOffsetMap = timeseriesMetadataOffsetMap; @@ -152,6 +159,7 @@ public FastCompactionPerformerSubTask( this.measurementSchemas = measurementSchemas; this.summary = summary; this.ignoreAllNullRows = ignoreAllNullRows; + this.maxTsFileSetEndVersionAndMinResource = maxTsFileSetEndVersionAndMinResource; } @Override @@ -166,7 +174,8 @@ public Void call() sortedSourceFiles, deviceId, subTaskId, - summary); + summary, + maxTsFileSetEndVersionAndMinResource); for (String measurement : measurements) { seriesCompactionExecutor.setNewMeasurement( compactionSeriesContextMap.get(measurement).getFileTimeseriesMetdataOffsetMap()); @@ -191,7 +200,8 @@ public Void call() subTaskId, measurementSchemas, summary, - ignoreAllNullRows); + ignoreAllNullRows, + maxTsFileSetEndVersionAndMinResource); } else { seriesCompactionExecutor = new FastAlignedSeriesCompactionExecutor( @@ -204,7 +214,8 @@ public Void call() subTaskId, measurementSchemas, summary, - ignoreAllNullRows); + ignoreAllNullRows, + maxTsFileSetEndVersionAndMinResource); } seriesCompactionExecutor.execute(); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/subtask/ReadPointPerformerSubTask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/subtask/ReadPointPerformerSubTask.java index 74f7259074508..741a6f314cd92 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/subtask/ReadPointPerformerSubTask.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/subtask/ReadPointPerformerSubTask.java @@ -57,6 +57,7 @@ public class ReadPointPerformerSubTask implements Callable { private final AbstractCompactionWriter compactionWriter; private final Map schemaMap; private final int taskId; + private final long maxTsFileSetEndVersion; public ReadPointPerformerSubTask( IDeviceID device, @@ -65,7 +66,8 @@ public ReadPointPerformerSubTask( QueryDataSource queryDataSource, AbstractCompactionWriter compactionWriter, Map schemaMap, - int taskId) { + int taskId, + long maxTsFileSetEndVersion) { this.device = device; this.measurementList = measurementList; this.fragmentInstanceContext = fragmentInstanceContext; @@ -73,6 +75,7 @@ public ReadPointPerformerSubTask( this.compactionWriter = compactionWriter; this.schemaMap = schemaMap; this.taskId = taskId; + this.maxTsFileSetEndVersion = maxTsFileSetEndVersion; } @Override @@ -88,7 +91,8 @@ public Void call() throws Exception { new ArrayList<>(schemaMap.keySet()), fragmentInstanceContext, queryDataSource, - false); + false, + maxTsFileSetEndVersion); if (dataBlockReader.hasNextBatch()) { compactionWriter.startMeasurement( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionTableSchema.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionTableSchema.java index 3f6e83cbe96c0..8d43305d94e19 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionTableSchema.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionTableSchema.java @@ -33,6 +33,18 @@ public CompactionTableSchema(String tableName) { super(tableName); } + public CompactionTableSchema(TableSchema tableSchema) { + this(tableSchema.getTableName(), tableSchema.getColumnSchemas(), tableSchema.getColumnTypes()); + this.updatable = tableSchema.isUpdatable(); + } + + public CompactionTableSchema( + String tableName, + List columnSchemas, + List columnCategories) { + super(tableName, columnSchemas, columnCategories); + } + public boolean merge(TableSchema tableSchema) { if (tableSchema == null) { return true; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionTableSchemaCollector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionTableSchemaCollector.java index 55640c3fcfa4d..2c2f34e0fca1f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionTableSchemaCollector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionTableSchemaCollector.java @@ -20,9 +20,11 @@ package org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.tsfile.file.metadata.TableSchema; import org.apache.tsfile.read.TsFileSequenceReader; +import org.apache.tsfile.utils.Pair; import org.apache.tsfile.write.schema.Schema; import java.io.IOException; @@ -42,7 +44,8 @@ public static List collectSchema( List seqFiles, List unseqFiles, Map readerMap, - Map> deprecatedTableSchemaMap) + Map> deprecatedTableSchemaMap, + Pair maxTsFileSetEndVersionAndAssociatedResource) throws IOException { List targetSchemas = new ArrayList<>(seqFiles.size()); Schema schema = @@ -51,7 +54,8 @@ public static List collectSchema( .sorted(TsFileResource::compareFileName) .collect(Collectors.toList()), readerMap, - deprecatedTableSchemaMap); + deprecatedTableSchemaMap, + maxTsFileSetEndVersionAndAssociatedResource); targetSchemas.add(schema); for (int i = 1; i < seqFiles.size(); i++) { @@ -72,10 +76,12 @@ public static Schema copySchema(Schema source) { public static Schema collectSchema( List sourceFiles, Map readerMap, - Map> deprecatedTableSchemaMap) + Map> deprecatedTableSchemaMap, + Pair maxTsFileSetEndVersionAndAssociatedResource) throws IOException { Schema targetSchema = new Schema(); Map targetTableSchemaMap = new HashMap<>(); + for (int i = 0; i < sourceFiles.size(); i++) { TsFileResource resource = sourceFiles.get(i); TsFileSequenceReader reader = readerMap.get(resource); @@ -84,12 +90,21 @@ public static Schema collectSchema( // v3 tsfile continue; } + + EvolvedSchema evolvedSchema = + resource.getMergedEvolvedSchema(maxTsFileSetEndVersionAndAssociatedResource.getLeft()); + for (Map.Entry entry : tableSchemaMap.entrySet()) { String tableName = entry.getKey(); TableSchema currentTableSchema = entry.getValue(); if (isTreeModel(currentTableSchema)) { continue; } + if (evolvedSchema != null) { + currentTableSchema = evolvedSchema.rewriteToFinal(currentTableSchema); + tableName = currentTableSchema.getTableName(); + } + // merge all id columns, measurement schema will be generated automatically when end chunk // group CompactionTableSchema collectedTableSchema = diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionUtils.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionUtils.java index 61943047e541f..8dd27525d2879 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionUtils.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionUtils.java @@ -36,11 +36,11 @@ import org.apache.iotdb.db.storageengine.dataregion.compaction.constant.CompactionTaskType; import org.apache.iotdb.db.storageengine.dataregion.compaction.schedule.CompactionTaskManager; import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.FullExactMatch; import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry; import org.apache.iotdb.db.storageengine.dataregion.modification.ModFileManagement; import org.apache.iotdb.db.storageengine.dataregion.modification.ModificationFile; import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.FullExactMatch; import org.apache.iotdb.db.storageengine.dataregion.modification.TreeDeletionEntry; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import org.apache.iotdb.db.storageengine.dataregion.tsfile.timeindex.ArrayDeviceTimeIndex; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/MultiTsFileDeviceIterator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/MultiTsFileDeviceIterator.java index 9099586e87e5c..ab50d6740fa91 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/MultiTsFileDeviceIterator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/MultiTsFileDeviceIterator.java @@ -33,6 +33,8 @@ import org.apache.iotdb.db.storageengine.dataregion.modification.TreeDeletionEntry; import org.apache.iotdb.db.storageengine.dataregion.read.control.FileReaderManager; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.fileset.TsFileSet; import org.apache.iotdb.db.utils.EncryptDBUtils; import org.apache.iotdb.db.utils.ModificationUtils; import org.apache.iotdb.db.utils.datastructure.PatternTreeMapFactory; @@ -81,6 +83,7 @@ public class MultiTsFileDeviceIterator implements AutoCloseable { private long ttlForCurrentDevice; private long timeLowerBoundForCurrentDevice; private final String databaseName; + private final long maxTsFileSetEndVersion; /** * Used for compaction with read chunk performer. @@ -96,6 +99,18 @@ public MultiTsFileDeviceIterator(List tsFileResources) throws IO // sort the files from the newest to the oldest Collections.sort( this.tsFileResourcesSortedByDesc, TsFileResource::compareFileCreationOrderByDesc); + maxTsFileSetEndVersion = + this.tsFileResourcesSortedByDesc.stream() + .mapToLong( + // max endVersion of all filesets of a TsFile + resource -> + resource.getTsFileSets().stream() + .mapToLong(TsFileSet::getEndVersion) + .max() + .orElse(Long.MAX_VALUE)) + // overall max endVersion + .max() + .orElse(Long.MAX_VALUE); try { for (TsFileResource tsFileResource : this.tsFileResourcesSortedByDesc) { CompactionTsFileReader reader = @@ -104,7 +119,15 @@ public MultiTsFileDeviceIterator(List tsFileResources) throws IO CompactionType.INNER_SEQ_COMPACTION, EncryptDBUtils.getFirstEncryptParamFromTSFilePath(tsFileResource.getTsFilePath())); readerMap.put(tsFileResource, reader); - deviceIteratorMap.put(tsFileResource, reader.getAllDevicesIteratorWithIsAligned()); + TsFileDeviceIterator tsFileDeviceIterator; + EvolvedSchema evolvedSchema = tsFileResource.getMergedEvolvedSchema(maxTsFileSetEndVersion); + if (evolvedSchema != null) { + tsFileDeviceIterator = + new ReorderedTsFileDeviceIterator(reader, evolvedSchema::rewriteToFinal); + } else { + tsFileDeviceIterator = reader.getAllDevicesIteratorWithIsAligned(); + } + deviceIteratorMap.put(tsFileResource, tsFileDeviceIterator); } } catch (Exception e) { // if there is any exception occurs @@ -129,12 +152,35 @@ public MultiTsFileDeviceIterator( // sort the files from the newest to the oldest Collections.sort( this.tsFileResourcesSortedByDesc, TsFileResource::compareFileCreationOrderByDesc); + + maxTsFileSetEndVersion = + this.tsFileResourcesSortedByDesc.stream() + .mapToLong( + // max endVersion of all filesets of a TsFile + resource -> + resource.getTsFileSets().stream() + .mapToLong(TsFileSet::getEndVersion) + .max() + .orElse(Long.MAX_VALUE)) + // overall max endVersion + .max() + .orElse(Long.MAX_VALUE); + for (TsFileResource tsFileResource : tsFileResourcesSortedByDesc) { TsFileSequenceReader reader = FileReaderManager.getInstance() .get(tsFileResource.getTsFilePath(), tsFileResource.getTsFileID(), true); readerMap.put(tsFileResource, reader); - deviceIteratorMap.put(tsFileResource, reader.getAllDevicesIteratorWithIsAligned()); + + TsFileDeviceIterator tsFileDeviceIterator; + EvolvedSchema evolvedSchema = tsFileResource.getMergedEvolvedSchema(maxTsFileSetEndVersion); + if (evolvedSchema != null) { + tsFileDeviceIterator = + new ReorderedTsFileDeviceIterator(reader, evolvedSchema::rewriteToFinal); + } else { + tsFileDeviceIterator = reader.getAllDevicesIteratorWithIsAligned(); + } + deviceIteratorMap.put(tsFileResource, tsFileDeviceIterator); } } @@ -156,6 +202,19 @@ public MultiTsFileDeviceIterator( this.tsFileResourcesSortedByDesc, TsFileResource::compareFileCreationOrderByDesc); this.readerMap = readerMap; + maxTsFileSetEndVersion = + this.tsFileResourcesSortedByDesc.stream() + .mapToLong( + // max endVersion of all filesets of a TsFile + resource -> + resource.getTsFileSets().stream() + .mapToLong(TsFileSet::getEndVersion) + .max() + .orElse(Long.MAX_VALUE)) + // overall max endVersion + .max() + .orElse(Long.MAX_VALUE); + CompactionType type = null; if (!seqResources.isEmpty() && !unseqResources.isEmpty()) { type = CompactionType.CROSS_COMPACTION; @@ -172,7 +231,16 @@ public MultiTsFileDeviceIterator( type, EncryptDBUtils.getFirstEncryptParamFromTSFilePath(tsFileResource.getTsFilePath())); readerMap.put(tsFileResource, reader); - deviceIteratorMap.put(tsFileResource, reader.getAllDevicesIteratorWithIsAligned()); + + TsFileDeviceIterator tsFileDeviceIterator; + EvolvedSchema evolvedSchema = tsFileResource.getMergedEvolvedSchema(maxTsFileSetEndVersion); + if (evolvedSchema != null) { + tsFileDeviceIterator = + new ReorderedTsFileDeviceIterator(reader, evolvedSchema::rewriteToFinal); + } else { + tsFileDeviceIterator = reader.getAllDevicesIteratorWithIsAligned(); + } + deviceIteratorMap.put(tsFileResource, tsFileDeviceIterator); } } @@ -260,7 +328,8 @@ public long getTimeLowerBoundForCurrentDevice() { * * @throws IOException if io errors occurred */ - public Map getAllSchemasOfCurrentDevice() throws IOException { + public Map getAllSchemasOfCurrentDevice( + Pair maxTsFileSetEndVersionAndMinResource) throws IOException { Map schemaMap = new ConcurrentHashMap<>(); // get schemas from the newest file to the oldest file for (TsFileResource resource : tsFileResourcesSortedByDesc) { @@ -278,12 +347,23 @@ public Map getAllSchemasOfCurrentDevice() throws IOEx schemaMap.keySet(), true, null); + EvolvedSchema evolvedSchema = + resource.getMergedEvolvedSchema(maxTsFileSetEndVersionAndMinResource.left); + if (evolvedSchema != null) { + // the device has been rewritten, should get the original name for rewriting + evolvedSchema.rewriteToFinal( + evolvedSchema.getOriginalTableName(currentDevice.left.getTableName()), + timeseriesMetadataList); + } + for (TimeseriesMetadata timeseriesMetadata : timeseriesMetadataList) { if (!schemaMap.containsKey(timeseriesMetadata.getMeasurementId()) && !timeseriesMetadata.getChunkMetadataList().isEmpty()) { - schemaMap.put( - timeseriesMetadata.getMeasurementId(), - reader.getMeasurementSchema(timeseriesMetadata.getChunkMetadataList())); + MeasurementSchema measurementSchema = + reader.getMeasurementSchema(timeseriesMetadata.getChunkMetadataList()); + // the column may be renamed + measurementSchema.setMeasurementName(timeseriesMetadata.getMeasurementId()); + schemaMap.put(timeseriesMetadata.getMeasurementId(), measurementSchema); } } } @@ -437,6 +517,12 @@ public Map getCompactionSeriesContextOfCurrentD true) .entrySet()) { String measurementId = entrySet.getKey(); + EvolvedSchema evolvedSchema = resource.getMergedEvolvedSchema(maxTsFileSetEndVersion); + if (evolvedSchema != null) { + String originalTableName = + evolvedSchema.getOriginalTableName(currentDevice.left.getTableName()); + measurementId = evolvedSchema.getFinalColumnName(originalTableName, measurementId); + } if (!timeseriesMetadataOffsetMap.containsKey(measurementId)) { MeasurementSchema schema = reader.getMeasurementSchema(entrySet.getValue().left); timeseriesMetadataOffsetMap.put(measurementId, new Pair<>(schema, new HashMap<>())); @@ -497,10 +583,31 @@ public Map getCompactionSeriesContextOfCurrentD MetadataIndexNode firstMeasurementNodeOfCurrentDevice = iterator.getFirstMeasurementNodeOfCurrentDevice(); TsFileSequenceReader reader = readerMap.get(tsFileResource); + EvolvedSchema evolvedSchema = tsFileResource.getMergedEvolvedSchema(maxTsFileSetEndVersion); + IDeviceID originalDeviceId = currentDevice.left; + if (evolvedSchema != null) { + // rewrite the deviceId to the original one so that we can use it to query the file + originalDeviceId = evolvedSchema.rewriteToOriginal(originalDeviceId); + } List alignedChunkMetadataList = reader.getAlignedChunkMetadataByMetadataIndexNode( - currentDevice.left, firstMeasurementNodeOfCurrentDevice, ignoreAllNullRows); + originalDeviceId, firstMeasurementNodeOfCurrentDevice, ignoreAllNullRows); applyModificationForAlignedChunkMetadataList(tsFileResource, alignedChunkMetadataList); + + if (evolvedSchema != null) { + // rewrite the measurementId to the final ones so that they can be aligned with other files + for (AbstractAlignedChunkMetadata abstractAlignedChunkMetadata : alignedChunkMetadataList) { + for (IChunkMetadata chunkMetadata : + abstractAlignedChunkMetadata.getValueChunkMetadataList()) { + if (chunkMetadata != null) { + chunkMetadata.setMeasurementUid( + evolvedSchema.getFinalColumnName( + originalDeviceId.getTableName(), chunkMetadata.getMeasurementUid())); + } + } + } + } + readerAndChunkMetadataList.add(new Pair<>(reader, alignedChunkMetadataList)); } @@ -522,7 +629,7 @@ private void applyModificationForAlignedChunkMetadataList( } IDeviceID device = currentDevice.getLeft(); ModEntry ttlDeletion = null; - Optional startTime = tsFileResource.getStartTime(device); + Optional startTime = tsFileResource.getStartTime(device, maxTsFileSetEndVersion); if (startTime.isPresent() && startTime.get() < timeLowerBoundForCurrentDevice) { ttlDeletion = CompactionUtils.convertTtlToDeletion(device, timeLowerBoundForCurrentDevice); } @@ -748,7 +855,7 @@ public String nextSeries() throws IllegalPathException { Map> chunkMetadataListMap = chunkMetadataCacheMap.get(reader); ModEntry ttlDeletion = null; - Optional startTime = resource.getStartTime(device); + Optional startTime = resource.getStartTime(device, maxTsFileSetEndVersion); if (startTime.isPresent() && startTime.get() < timeLowerBoundForCurrentDevice) { ttlDeletion = new TreeDeletionEntry( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/ReorderedTsFileDeviceIterator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/ReorderedTsFileDeviceIterator.java new file mode 100644 index 0000000000000..91ac97feb21c8 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/ReorderedTsFileDeviceIterator.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils; + +import org.apache.tsfile.file.metadata.IDeviceID; +import org.apache.tsfile.file.metadata.MetadataIndexNode; +import org.apache.tsfile.read.TsFileSequenceReader; +import org.apache.tsfile.utils.Pair; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import java.util.function.Function; + +public class ReorderedTsFileDeviceIterator extends TransformedTsFileDeviceIterator { + + private final List, MetadataIndexNode>> + deviceIDAndFirstMeasurementNodeList = new ArrayList<>(); + private Iterator, MetadataIndexNode>> deviceIDListIterator; + private Pair, MetadataIndexNode> current; + + public ReorderedTsFileDeviceIterator( + TsFileSequenceReader reader, Function transformer) throws IOException { + super(reader, transformer); + collectAndSort(); + } + + public ReorderedTsFileDeviceIterator( + TsFileSequenceReader reader, String tableName, Function transformer) + throws IOException { + super(reader, tableName, transformer); + collectAndSort(); + } + + private void collectAndSort() throws IOException { + while (super.hasNext()) { + Pair next = super.next(); + deviceIDAndFirstMeasurementNodeList.add( + new Pair<>(next, super.getFirstMeasurementNodeOfCurrentDevice())); + } + deviceIDAndFirstMeasurementNodeList.sort(Comparator.comparing(p -> p.getLeft().getLeft())); + deviceIDListIterator = deviceIDAndFirstMeasurementNodeList.iterator(); + } + + @Override + public boolean hasNext() { + return deviceIDListIterator.hasNext(); + } + + @Override + public Pair next() { + Pair, MetadataIndexNode> next = deviceIDListIterator.next(); + current = next; + return next.left; + } + + @Override + public Pair current() { + return current == null ? null : current.left; + } + + @Override + public MetadataIndexNode getFirstMeasurementNodeOfCurrentDevice() { + // the devices have been reordered, cannot use the measurementNode + return current == null ? null : current.right; + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/TransformedTsFileDeviceIterator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/TransformedTsFileDeviceIterator.java new file mode 100644 index 0000000000000..a361adb18e611 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/TransformedTsFileDeviceIterator.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils; + +import org.apache.tsfile.file.metadata.IDeviceID; +import org.apache.tsfile.read.TsFileDeviceIterator; +import org.apache.tsfile.read.TsFileSequenceReader; +import org.apache.tsfile.utils.Pair; + +import java.io.IOException; +import java.util.function.Function; + +public class TransformedTsFileDeviceIterator extends TsFileDeviceIterator { + + protected Function transformer; + + public TransformedTsFileDeviceIterator( + TsFileSequenceReader reader, Function transformer) throws IOException { + super(reader); + this.transformer = transformer; + } + + public TransformedTsFileDeviceIterator( + TsFileSequenceReader reader, String tableName, Function transformer) + throws IOException { + super(reader, tableName, null); + this.transformer = transformer; + } + + @Override + public Pair next() { + Pair next = super.next(); + next.left = transformer.apply(next.left); + return next; + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/BatchedFastAlignedSeriesCompactionExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/BatchedFastAlignedSeriesCompactionExecutor.java index 0c1f12e9886a7..787452be2dc4c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/BatchedFastAlignedSeriesCompactionExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/BatchedFastAlignedSeriesCompactionExecutor.java @@ -89,7 +89,8 @@ public BatchedFastAlignedSeriesCompactionExecutor( int subTaskId, List measurementSchemas, FastCompactionTaskSummary summary, - boolean ignoreAllNullRows) { + boolean ignoreAllNullRows, + Pair maxTsFileSetEndVersionAndMinResource) { super( compactionWriter, timeseriesMetadataOffsetMap, @@ -100,7 +101,8 @@ public BatchedFastAlignedSeriesCompactionExecutor( subTaskId, measurementSchemas, summary, - ignoreAllNullRows); + ignoreAllNullRows, + maxTsFileSetEndVersionAndMinResource); timeSchema = measurementSchemas.remove(0); valueMeasurementSchemas = measurementSchemas; this.batchColumnSelection = @@ -171,7 +173,8 @@ private void compactFirstBatch() subTaskId, selectedMeasurementSchemas, summary, - ignoreAllNullRows); + ignoreAllNullRows, + maxTsFileSetEndVersionAndMinResource); executor.execute(); LOGGER.debug( "[Batch Compaction] current device is {}, first batch compacted time chunk is {}", @@ -199,7 +202,8 @@ private void compactLeftBatches() subTaskId, currentBatchMeasurementSchemas, summary, - ignoreAllNullRows); + ignoreAllNullRows, + maxTsFileSetEndVersionAndMinResource); executor.execute(); } } @@ -230,7 +234,8 @@ public FirstBatchFastAlignedSeriesCompactionExecutor( int subTaskId, List measurementSchemas, FastCompactionTaskSummary summary, - boolean ignoreAllNullRows) { + boolean ignoreAllNullRows, + Pair maxTsFileSetEndVersionAndMinResource) { super( compactionWriter, timeseriesMetadataOffsetMap, @@ -241,7 +246,8 @@ public FirstBatchFastAlignedSeriesCompactionExecutor( subTaskId, measurementSchemas, summary, - ignoreAllNullRows); + ignoreAllNullRows, + maxTsFileSetEndVersionAndMinResource); isBatchedCompaction = true; } @@ -340,7 +346,8 @@ public FollowingBatchFastAlignedSeriesCompactionExecutor( int subTaskId, List measurementSchemas, FastCompactionTaskSummary summary, - boolean ignoreAllNullRows) { + boolean ignoreAllNullRows, + Pair maxTsFileSetEndVersionAndMinResource) { super( compactionWriter, timeseriesMetadataOffsetMap, @@ -351,7 +358,8 @@ public FollowingBatchFastAlignedSeriesCompactionExecutor( subTaskId, measurementSchemas, summary, - ignoreAllNullRows); + ignoreAllNullRows, + maxTsFileSetEndVersionAndMinResource); isBatchedCompaction = true; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/utils/FirstBatchCompactionAlignedChunkWriter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/utils/FirstBatchCompactionAlignedChunkWriter.java index 0109fcda16ce9..1d38a04d91df2 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/utils/FirstBatchCompactionAlignedChunkWriter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/utils/FirstBatchCompactionAlignedChunkWriter.java @@ -44,6 +44,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; +import java.util.function.Function; public class FirstBatchCompactionAlignedChunkWriter extends AlignedChunkWriterImpl { @@ -158,6 +159,18 @@ public void writeToFileWriter(TsFileIOWriter tsfileWriter) throws IOException { super.writeToFileWriter(tsfileWriter); } + @Override + public void writeToFileWriter( + TsFileIOWriter tsfileWriter, Function measurementNameRemapper) + throws IOException { + if (!isEmpty() && beforeChunkWriterFlushCallback != null) { + // make sure all pages are recorded before this call + sealCurrentPage(); + beforeChunkWriterFlushCallback.call(this); + } + super.writeToFileWriter(tsfileWriter, measurementNameRemapper); + } + public void registerBeforeFlushChunkWriterCallback( ChunkWriterFlushCallback flushChunkWriterCallback) { this.beforeChunkWriterFlushCallback = flushChunkWriterCallback; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/utils/FollowingBatchCompactionAlignedChunkWriter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/utils/FollowingBatchCompactionAlignedChunkWriter.java index 85813d2cd4d37..37729391fbde3 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/utils/FollowingBatchCompactionAlignedChunkWriter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/utils/FollowingBatchCompactionAlignedChunkWriter.java @@ -40,6 +40,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; +import java.util.function.Function; public class FollowingBatchCompactionAlignedChunkWriter extends AlignedChunkWriterImpl { private int currentPage = 0; @@ -131,6 +132,21 @@ public void writeToFileWriter(TsFileIOWriter tsfileWriter) throws IOException { } } + @Override + public void writeToFileWriter( + TsFileIOWriter tsfileWriter, Function measurementNameRemapper) + throws IOException { + if (isEmpty()) { + return; + } + for (ValueChunkWriter valueChunkWriter : valueChunkWriterList) { + valueChunkWriter.writeToFileWriter(tsfileWriter, measurementNameRemapper); + } + if (afterChunkWriterFlushCallback != null) { + afterChunkWriterFlushCallback.call(this); + } + } + @Override public boolean checkIsChunkSizeOverThreshold( long size, long pointNum, boolean returnTrueIfChunkEmpty) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/FastAlignedSeriesCompactionExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/FastAlignedSeriesCompactionExecutor.java index 121d4ca1d3a9d..3d03b7baf165e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/FastAlignedSeriesCompactionExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/FastAlignedSeriesCompactionExecutor.java @@ -37,6 +37,7 @@ import org.apache.iotdb.db.storageengine.dataregion.compaction.io.CompactionTsFileReader; import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.iotdb.db.utils.ModificationUtils; import org.apache.iotdb.db.utils.datastructure.PatternTreeMapFactory; @@ -89,9 +90,17 @@ public FastAlignedSeriesCompactionExecutor( int subTaskId, List measurementSchemas, FastCompactionTaskSummary summary, - boolean ignoreAllNullRows) { + boolean ignoreAllNullRows, + Pair maxTsFileSetEndVersionAndMinResource) { super( - compactionWriter, readerCacheMap, modificationCacheMap, deviceId, true, subTaskId, summary); + compactionWriter, + readerCacheMap, + modificationCacheMap, + deviceId, + true, + subTaskId, + summary, + maxTsFileSetEndVersionAndMinResource); this.timeseriesMetadataOffsetMap = timeseriesMetadataOffsetMap; this.measurementSchemas = measurementSchemas; this.timeColumnMeasurementSchema = measurementSchemas.get(0); @@ -188,6 +197,9 @@ protected List getAlignedChunkMetadataList(TsFileR // read time chunk metadatas and value chunk metadatas in the current file List timeChunkMetadatas = null; List> valueChunkMetadatas = new ArrayList<>(); + EvolvedSchema evolvedSchema = + resource.getMergedEvolvedSchema(maxTsFileSetEndVersionAndMinResource.getLeft()); + for (Map.Entry>> entry : timeseriesMetadataOffsetMap.entrySet()) { String measurementID = entry.getKey(); @@ -216,7 +228,7 @@ protected List getAlignedChunkMetadataList(TsFileR .get(resource) .getChunkMetadataListByTimeseriesMetadataOffset( timeseriesOffsetInCurrentFile.left, timeseriesOffsetInCurrentFile.right); - if (isValueChunkDataTypeMatchSchema(valueColumnChunkMetadataList)) { + if (isValueChunkDataTypeMatchSchema(valueColumnChunkMetadataList, evolvedSchema)) { valueChunkMetadatas.add(valueColumnChunkMetadataList); } else { valueChunkMetadatas.add(null); @@ -270,18 +282,29 @@ protected List getAlignedChunkMetadataList(TsFileR // modify aligned chunk metadatas ModificationUtils.modifyAlignedChunkMetaData( alignedChunkMetadataList, timeModifications, valueModifications, ignoreAllNullRows); + + if (evolvedSchema != null) { + String originalTableName = evolvedSchema.getOriginalTableName(deviceId.getTableName()); + for (AbstractAlignedChunkMetadata abstractAlignedChunkMetadata : alignedChunkMetadataList) { + evolvedSchema.rewriteToFinal(abstractAlignedChunkMetadata, originalTableName); + } + } } return alignedChunkMetadataList; } private boolean isValueChunkDataTypeMatchSchema( - List chunkMetadataListOfOneValueColumn) { + List chunkMetadataListOfOneValueColumn, EvolvedSchema evolvedSchema) { boolean isMatch = false; for (IChunkMetadata chunkMetadata : chunkMetadataListOfOneValueColumn) { if (chunkMetadata == null) { continue; } String measurement = chunkMetadata.getMeasurementUid(); + if (evolvedSchema != null) { + String originalTableName = evolvedSchema.getOriginalTableName(deviceId.getTableName()); + measurement = evolvedSchema.getFinalColumnName(originalTableName, measurement); + } IMeasurementSchema schema = measurementSchemaMap.get(measurement); if (MetadataUtils.canAlter(chunkMetadata.getDataType(), schema.getType())) { if (schema.getType() != chunkMetadata.getDataType()) { @@ -371,11 +394,15 @@ void readChunk(ChunkMetadataElement chunkMetadataElement) throws IOException { valueChunks.add(null); continue; } + + Chunk chunk = readChunk(reader, (ChunkMetadata) valueChunkMetadata); + // the column may be renamed, enqueue with the final column name + chunk.getHeader().setMeasurementID(valueChunkMetadata.getMeasurementUid()); + if (valueChunkMetadata.getNewType() != null) { - Chunk chunk = - readChunk(reader, (ChunkMetadata) valueChunkMetadata) - .rewrite( - ((ChunkMetadata) valueChunkMetadata).getNewType(), chunkMetadataElement.chunk); + chunk = + chunk.rewrite( + ((ChunkMetadata) valueChunkMetadata).getNewType(), chunkMetadataElement.chunk); valueChunks.add(chunk); ChunkMetadata chunkMetadata = (ChunkMetadata) valueChunkMetadata; @@ -384,7 +411,7 @@ void readChunk(ChunkMetadataElement chunkMetadataElement) throws IOException { statistics.mergeStatistics(chunk.getChunkStatistic()); chunkMetadata.setStatistics(statistics); } else { - valueChunks.add(readChunk(reader, (ChunkMetadata) valueChunkMetadata)); + valueChunks.add(chunk); } } chunkMetadataElement.valueChunks = valueChunks; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/FastNonAlignedSeriesCompactionExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/FastNonAlignedSeriesCompactionExecutor.java index 363554fb60672..9b828cd9a6356 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/FastNonAlignedSeriesCompactionExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/FastNonAlignedSeriesCompactionExecutor.java @@ -85,7 +85,8 @@ public FastNonAlignedSeriesCompactionExecutor( List sortedSourceFiles, IDeviceID deviceId, int subTaskId, - FastCompactionTaskSummary summary) { + FastCompactionTaskSummary summary, + Pair maxTsFileSetEndVersionAndMinResource) { super( compactionWriter, readerCacheMap, @@ -93,7 +94,8 @@ public FastNonAlignedSeriesCompactionExecutor( deviceId, false, subTaskId, - summary); + summary, + maxTsFileSetEndVersionAndMinResource); this.sortResources = sortedSourceFiles; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/SeriesCompactionExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/SeriesCompactionExecutor.java index b3073bd3d258d..ff75a5a867b19 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/SeriesCompactionExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/SeriesCompactionExecutor.java @@ -41,6 +41,7 @@ import org.apache.tsfile.read.TimeValuePair; import org.apache.tsfile.read.TsFileSequenceReader; import org.apache.tsfile.read.common.TimeRange; +import org.apache.tsfile.utils.Pair; import java.io.IOException; import java.util.ArrayList; @@ -97,6 +98,8 @@ void call(PageElement pageElement) protected boolean isAligned; + protected final Pair maxTsFileSetEndVersionAndMinResource; + protected SeriesCompactionExecutor( AbstractCompactionWriter compactionWriter, Map readerCacheMap, @@ -105,7 +108,8 @@ protected SeriesCompactionExecutor( IDeviceID deviceId, boolean isAligned, int subTaskId, - FastCompactionTaskSummary summary) { + FastCompactionTaskSummary summary, + Pair maxTsFileSetEndVersionAndMinResource) { this.compactionWriter = compactionWriter; this.subTaskId = subTaskId; this.deviceId = deviceId; @@ -128,6 +132,7 @@ protected SeriesCompactionExecutor( int timeCompare = Long.compare(o1.getStartTime(), o2.getStartTime()); return timeCompare != 0 ? timeCompare : o2.getPriority().compareTo(o1.getPriority()); }); + this.maxTsFileSetEndVersionAndMinResource = maxTsFileSetEndVersionAndMinResource; } public abstract void execute() @@ -350,12 +355,14 @@ private void checkAndCompactOverlapPage(PageElement nextPageElement, TimeValuePa */ protected List findOverlapFiles(FileElement fileToCheck) { List overlappedFiles = new ArrayList<>(); - Optional endTimeInCheckingFile = fileToCheck.resource.getEndTime(deviceId); + Optional endTimeInCheckingFile = + fileToCheck.resource.getEndTime(deviceId, maxTsFileSetEndVersionAndMinResource.left); for (FileElement otherFile : fileList) { if (!endTimeInCheckingFile.isPresent()) { continue; } - Optional startTimeInOtherFile = otherFile.resource.getStartTime(deviceId); + Optional startTimeInOtherFile = + otherFile.resource.getStartTime(deviceId, maxTsFileSetEndVersionAndMinResource.left); if (startTimeInOtherFile.isPresent() && startTimeInOtherFile.get() <= endTimeInCheckingFile.get()) { if (!otherFile.isSelected) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/readchunk/ReadChunkAlignedSeriesCompactionExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/readchunk/ReadChunkAlignedSeriesCompactionExecutor.java index 654f8f770e74f..66b8062f47173 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/readchunk/ReadChunkAlignedSeriesCompactionExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/readchunk/ReadChunkAlignedSeriesCompactionExecutor.java @@ -165,7 +165,7 @@ private void collectValueColumnSchemaList() throws IOException { ChunkHeader chunkHeader = reader.readChunkHeader(chunkMetadata.getOffsetOfChunkHeader()); IMeasurementSchema schema = new MeasurementSchema( - chunkHeader.getMeasurementID(), + chunkMetadata.getMeasurementUid(), chunkHeader.getDataType(), chunkHeader.getEncodingType(), chunkHeader.getCompressionType()); @@ -262,6 +262,8 @@ protected ChunkLoader getChunkLoader(TsFileSequenceReader reader, ChunkMetadata return new InstantChunkLoader(); } Chunk chunk = reader.readMemChunk(chunkMetadata); + // the chunk may be renamed and chunkMetadata contains the final name + chunk.getHeader().setMeasurementID(chunkMetadata.getMeasurementUid()); return new InstantChunkLoader(reader.getFileName(), chunkMetadata, chunk); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/reader/SeriesDataBlockReader.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/reader/SeriesDataBlockReader.java index a49b97c8f5222..9c78d866852bb 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/reader/SeriesDataBlockReader.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/reader/SeriesDataBlockReader.java @@ -53,7 +53,8 @@ public SeriesDataBlockReader( Set allSensors, FragmentInstanceContext context, QueryDataSource dataSource, - boolean ascending) { + boolean ascending, + long maxTsFileSetEndVersion) { SeriesScanOptions.Builder scanOptionsBuilder = new SeriesScanOptions.Builder(); scanOptionsBuilder.withAllSensors(allSensors); @@ -63,14 +64,16 @@ public SeriesDataBlockReader( (AlignedFullPath) seriesPath, ascending ? Ordering.ASC : Ordering.DESC, scanOptionsBuilder.build(), - context); + context, + maxTsFileSetEndVersion); } else if (seriesPath instanceof NonAlignedFullPath) { this.seriesScanUtil = new SeriesScanUtil( seriesPath, ascending ? Ordering.ASC : Ordering.DESC, scanOptionsBuilder.build(), - context); + context, + maxTsFileSetEndVersion); } else { throw new IllegalArgumentException("Should call exact sub class!"); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractCompactionWriter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractCompactionWriter.java index f3cd5185b58af..61e47d318d859 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractCompactionWriter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractCompactionWriter.java @@ -27,6 +27,7 @@ import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils.writer.flushcontroller.AbstractCompactionFlushController; import org.apache.iotdb.db.storageengine.dataregion.compaction.io.CompactionTsFileWriter; import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import org.apache.tsfile.encrypt.EncryptParameter; import org.apache.tsfile.exception.write.PageException; @@ -38,6 +39,7 @@ import org.apache.tsfile.read.TimeValuePair; import org.apache.tsfile.read.common.Chunk; import org.apache.tsfile.read.common.block.TsBlock; +import org.apache.tsfile.utils.Pair; import org.apache.tsfile.utils.TsPrimitiveType; import org.apache.tsfile.write.chunk.AlignedChunkWriterImpl; import org.apache.tsfile.write.chunk.ChunkWriterImpl; @@ -339,5 +341,6 @@ protected void checkPreviousTimestamp(long currentWritingTimestamp, int subTaskI } } - public abstract void setSchemaForAllTargetFile(List schemas); + public abstract void setSchemaForAllTargetFile( + List schemas, Pair maxTsFileSetEndVersionAndAssociatedResource); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractCrossCompactionWriter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractCrossCompactionWriter.java index f970ad65e56c3..3c0fe38ae1e3e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractCrossCompactionWriter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractCrossCompactionWriter.java @@ -21,10 +21,12 @@ import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.db.conf.IoTDBDescriptor; +import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils.CompactionTableSchema; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils.CompactionUtils; import org.apache.iotdb.db.storageengine.dataregion.compaction.io.CompactionTsFileWriter; import org.apache.iotdb.db.storageengine.dataregion.compaction.schedule.constant.CompactionType; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.iotdb.db.storageengine.dataregion.tsfile.timeindex.ITimeIndex; import org.apache.iotdb.db.storageengine.rescon.memory.SystemInfo; import org.apache.iotdb.db.utils.EncryptDBUtils; @@ -35,6 +37,7 @@ import org.apache.tsfile.read.TimeValuePair; import org.apache.tsfile.read.TsFileSequenceReader; import org.apache.tsfile.read.common.block.TsBlock; +import org.apache.tsfile.utils.Pair; import org.apache.tsfile.utils.TsPrimitiveType; import org.apache.tsfile.write.schema.Schema; @@ -73,17 +76,24 @@ public abstract class AbstractCrossCompactionWriter extends AbstractCompactionWr private final EncryptParameter encryptParameter; + private final long maxTsFileSetEndVersion; + @TestOnly protected AbstractCrossCompactionWriter( List targetResources, List seqFileResources) throws IOException { - this(targetResources, seqFileResources, EncryptDBUtils.getDefaultFirstEncryptParam()); + this( + targetResources, + seqFileResources, + EncryptDBUtils.getDefaultFirstEncryptParam(), + Long.MIN_VALUE); } protected AbstractCrossCompactionWriter( List targetResources, List seqFileResources, - EncryptParameter encryptParameter) + EncryptParameter encryptParameter, + long maxTsFileSetEndVersion) throws IOException { currentDeviceEndTime = new long[seqFileResources.size()]; isCurrentDeviceExistedInSourceSeqFiles = new boolean[seqFileResources.size()]; @@ -99,14 +109,16 @@ protected AbstractCrossCompactionWriter( for (int i = 0; i < targetResources.size(); i++) { this.targetFileWriters.add( new CompactionTsFileWriter( - targetResources.get(i).getTsFile(), + targetResources.get(i), memorySizeForEachWriter, CompactionType.CROSS_COMPACTION, - this.encryptParameter)); + this.encryptParameter, + maxTsFileSetEndVersion)); isEmptyFile[i] = true; } this.seqTsFileResources = seqFileResources; this.targetResources = targetResources; + this.maxTsFileSetEndVersion = maxTsFileSetEndVersion; } @Override @@ -227,10 +239,17 @@ protected void checkTimeAndMayFlushChunkToCurrentFile(long timestamp, int subTas private void checkIsDeviceExistAndGetDeviceEndTime() throws IOException { int fileIndex = 0; while (fileIndex < seqTsFileResources.size()) { - ITimeIndex timeIndex = seqTsFileResources.get(fileIndex).getTimeIndex(); + TsFileResource tsFileResource = seqTsFileResources.get(fileIndex); + EvolvedSchema evolvedSchema = tsFileResource.getMergedEvolvedSchema(maxTsFileSetEndVersion); + IDeviceID originalDeviceId = deviceId; + if (evolvedSchema != null) { + originalDeviceId = evolvedSchema.rewriteToOriginal(deviceId); + } + + ITimeIndex timeIndex = tsFileResource.getTimeIndex(); if (timeIndex.getTimeIndexType() != ITimeIndex.FILE_TIME_INDEX_TYPE) { // the timeIndexType of resource is deviceTimeIndex - Optional endTime = timeIndex.getEndTime(deviceId); + Optional endTime = timeIndex.getEndTime(originalDeviceId); currentDeviceEndTime[fileIndex] = endTime.orElse(Long.MIN_VALUE); isCurrentDeviceExistedInSourceSeqFiles[fileIndex] = endTime.isPresent(); } else { @@ -239,7 +258,7 @@ private void checkIsDeviceExistAndGetDeviceEndTime() throws IOException { // Fast compaction get reader from cache map, while read point compaction get reader from // FileReaderManager Map deviceMetadataMap = - getFileReader(seqTsFileResources.get(fileIndex)).readDeviceMetadata(deviceId); + getFileReader(tsFileResource).readDeviceMetadata(originalDeviceId); for (Map.Entry entry : deviceMetadataMap.entrySet()) { long tmpStartTime = entry.getValue().getStatistics().getStartTime(); long tmpEndTime = entry.getValue().getStatistics().getEndTime(); @@ -266,9 +285,25 @@ public long getWriterSize() throws IOException { } @Override - public void setSchemaForAllTargetFile(List schemas) { + public void setSchemaForAllTargetFile( + List schemas, Pair maxTsFileSetEndVersionAndMinResource) { for (int i = 0; i < targetFileWriters.size(); i++) { - targetFileWriters.get(i).setSchema(schemas.get(i)); + CompactionTsFileWriter compactionTsFileWriter = targetFileWriters.get(i); + Schema schema = schemas.get(i); + TsFileResource targetResource = compactionTsFileWriter.getTsFileResource(); + if (maxTsFileSetEndVersionAndMinResource.right != null) { + long maxTsFileSetEndVersion = maxTsFileSetEndVersionAndMinResource.left; + TsFileResource minVersionResource = maxTsFileSetEndVersionAndMinResource.getRight(); + targetResource.setTsFileManager(minVersionResource.getTsFileManager()); + EvolvedSchema evolvedSchema = targetResource.getMergedEvolvedSchema(maxTsFileSetEndVersion); + + if (evolvedSchema != null) { + schema = evolvedSchema.rewriteToOriginal(schema, CompactionTableSchema::new); + } + compactionTsFileWriter.setSchema(schema); + } else { + compactionTsFileWriter.setSchema(schema); + } } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractInnerCompactionWriter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractInnerCompactionWriter.java index 6573bb7e96e86..0689e37971914 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractInnerCompactionWriter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractInnerCompactionWriter.java @@ -21,11 +21,13 @@ import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.db.conf.IoTDBDescriptor; +import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils.CompactionTableSchema; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils.CompactionTableSchemaCollector; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils.CompactionUtils; import org.apache.iotdb.db.storageengine.dataregion.compaction.io.CompactionTsFileWriter; import org.apache.iotdb.db.storageengine.dataregion.compaction.schedule.constant.CompactionType; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.iotdb.db.storageengine.rescon.memory.SystemInfo; import org.apache.iotdb.db.utils.EncryptDBUtils; @@ -33,6 +35,7 @@ import org.apache.tsfile.file.metadata.IDeviceID; import org.apache.tsfile.read.TimeValuePair; import org.apache.tsfile.read.common.block.TsBlock; +import org.apache.tsfile.utils.Pair; import org.apache.tsfile.write.schema.Schema; import java.io.IOException; @@ -46,6 +49,7 @@ public abstract class AbstractInnerCompactionWriter extends AbstractCompactionWr protected long endedFileSize = 0; protected List schemas; protected EncryptParameter encryptParameter; + protected Pair maxTsFileSetEndVersionAndMinResource; protected final long memoryBudgetForFileWriter = (long) @@ -113,15 +117,29 @@ private void rollCompactionFileWriter() throws IOException { } private void useNewWriter() throws IOException { + long maxTsFileSetEndVersion = maxTsFileSetEndVersionAndMinResource.left; fileWriter = new CompactionTsFileWriter( - targetResources.get(currentFileIndex).getTsFile(), + targetResources.get(currentFileIndex), memoryBudgetForFileWriter, targetResources.get(currentFileIndex).isSeq() ? CompactionType.INNER_SEQ_COMPACTION : CompactionType.INNER_UNSEQ_COMPACTION, - encryptParameter); - fileWriter.setSchema(CompactionTableSchemaCollector.copySchema(schemas.get(0))); + encryptParameter, + maxTsFileSetEndVersion); + Schema schema = CompactionTableSchemaCollector.copySchema(schemas.get(0)); + TsFileResource minVersionResource = maxTsFileSetEndVersionAndMinResource.getRight(); + // only null during test + fileWriter + .getTsFileResource() + .setTsFileManager( + minVersionResource != null ? minVersionResource.getTsFileManager() : null); + EvolvedSchema evolvedSchema = + fileWriter.getTsFileResource().getMergedEvolvedSchema(maxTsFileSetEndVersion); + fileWriter.setSchema( + evolvedSchema != null + ? evolvedSchema.rewriteToOriginal(schema, CompactionTableSchema::new) + : schema); } @Override @@ -174,8 +192,10 @@ public void checkAndMayFlushChunkMetadata() throws IOException { } @Override - public void setSchemaForAllTargetFile(List schemas) { + public void setSchemaForAllTargetFile( + List schemas, Pair maxTsFileSetEndVersionAndMinResource) { this.schemas = schemas; + this.maxTsFileSetEndVersionAndMinResource = maxTsFileSetEndVersionAndMinResource; } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/FastCrossCompactionWriter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/FastCrossCompactionWriter.java index 59a87b4211c46..f379d02704add 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/FastCrossCompactionWriter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/FastCrossCompactionWriter.java @@ -53,7 +53,11 @@ public FastCrossCompactionWriter( List seqSourceResources, Map readerMap) throws IOException { - super(targetResources, seqSourceResources, EncryptDBUtils.getDefaultFirstEncryptParam()); + super( + targetResources, + seqSourceResources, + EncryptDBUtils.getDefaultFirstEncryptParam(), + Long.MIN_VALUE); this.readerMap = readerMap; } @@ -61,9 +65,10 @@ public FastCrossCompactionWriter( List targetResources, List seqSourceResources, Map readerMap, - EncryptParameter encryptParameter) + EncryptParameter encryptParameter, + long maxTsFileSetEndVersion) throws IOException { - super(targetResources, seqSourceResources, encryptParameter); + super(targetResources, seqSourceResources, encryptParameter, maxTsFileSetEndVersion); this.readerMap = readerMap; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/ReadPointCrossCompactionWriter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/ReadPointCrossCompactionWriter.java index 6810df4d1a3b6..b2799c0dfe5e2 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/ReadPointCrossCompactionWriter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/ReadPointCrossCompactionWriter.java @@ -47,15 +47,20 @@ public class ReadPointCrossCompactionWriter extends AbstractCrossCompactionWrite public ReadPointCrossCompactionWriter( List targetResources, List seqFileResources) throws IOException { - super(targetResources, seqFileResources, EncryptDBUtils.getDefaultFirstEncryptParam()); + super( + targetResources, + seqFileResources, + EncryptDBUtils.getDefaultFirstEncryptParam(), + Long.MIN_VALUE); } public ReadPointCrossCompactionWriter( List targetResources, List seqFileResources, - EncryptParameter encryptParameter) + EncryptParameter encryptParameter, + long maxTsFileSetEndVersion) throws IOException { - super(targetResources, seqFileResources, encryptParameter); + super(targetResources, seqFileResources, encryptParameter, maxTsFileSetEndVersion); } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/io/CompactionTsFileWriter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/io/CompactionTsFileWriter.java index 1f822e3b9345d..e03faa82026e5 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/io/CompactionTsFileWriter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/io/CompactionTsFileWriter.java @@ -24,6 +24,8 @@ import org.apache.iotdb.db.storageengine.dataregion.compaction.schedule.CompactionTaskManager; import org.apache.iotdb.db.storageengine.dataregion.compaction.schedule.constant.CompactionIoDataType; import org.apache.iotdb.db.storageengine.dataregion.compaction.schedule.constant.CompactionType; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.iotdb.db.utils.EncryptDBUtils; import org.apache.tsfile.encrypt.EncryptParameter; @@ -53,25 +55,39 @@ public class CompactionTsFileWriter extends TsFileIOWriter { private volatile boolean isWritingAligned = false; private boolean isEmptyTargetFile = true; - private IDeviceID currentDeviceId; + private IDeviceID currentOriginalDeviceId; - private EncryptParameter firstEncryptParameter; + private final TsFileResource tsFileResource; + private final EvolvedSchema evolvedSchema; + + private final EncryptParameter firstEncryptParameter; @TestOnly public CompactionTsFileWriter(File file, long maxMetadataSize, CompactionType type) throws IOException { - this(file, maxMetadataSize, type, EncryptDBUtils.getDefaultFirstEncryptParam()); + this( + new TsFileResource(file), + maxMetadataSize, + type, + EncryptDBUtils.getDefaultFirstEncryptParam(), + Long.MIN_VALUE); } public CompactionTsFileWriter( - File file, long maxMetadataSize, CompactionType type, EncryptParameter encryptParameter) + TsFileResource tsFile, + long maxMetadataSize, + CompactionType type, + EncryptParameter encryptParameter, + long maxTsFileSetEndVersion) throws IOException { - super(file, maxMetadataSize, encryptParameter); + super(tsFile.getTsFile(), maxMetadataSize, encryptParameter); + this.tsFileResource = tsFile; this.firstEncryptParameter = encryptParameter; this.type = type; super.out = new CompactionTsFileOutput( super.out, CompactionTaskManager.getInstance().getMergeWriteRateLimiter()); + evolvedSchema = tsFileResource.getMergedEvolvedSchema(maxTsFileSetEndVersion); } public EncryptParameter getEncryptParameter() { @@ -92,7 +108,14 @@ public void writeChunk(IChunkWriter chunkWriter) throws IOException { if (!chunkWriter.isEmpty()) { isEmptyTargetFile = false; } - chunkWriter.writeToFileWriter(this); + chunkWriter.writeToFileWriter( + this, + evolvedSchema == null + ? null + : measurementName -> + evolvedSchema.getOriginalColumnName( + evolvedSchema.getFinalTableName(currentOriginalDeviceId.getTableName()), + measurementName)); long writtenDataSize = this.getPos() - beforeOffset; CompactionMetrics.getInstance() .recordWriteInfo( @@ -107,6 +130,15 @@ public void writeChunk(Chunk chunk, ChunkMetadata chunkMetadata) throws IOExcept if (chunkMetadata.getNumOfPoints() != 0) { isEmptyTargetFile = false; } + if (evolvedSchema != null) { + String finalTableName = + evolvedSchema.getFinalTableName(currentOriginalDeviceId.getTableName()); + chunk + .getHeader() + .setMeasurementID( + evolvedSchema.getOriginalColumnName( + finalTableName, chunk.getHeader().getMeasurementID())); + } super.writeChunk(chunk, chunkMetadata); long writtenDataSize = this.getPos() - beforeOffset; CompactionMetrics.getInstance() @@ -124,6 +156,11 @@ public void writeEmptyValueChunk( TSEncoding encodingType, Statistics statistics) throws IOException { + if (evolvedSchema != null) { + measurementId = + evolvedSchema.getOriginalColumnName( + currentOriginalDeviceId.getTableName(), measurementId); + } long beforeOffset = this.getPos(); super.writeEmptyValueChunk( measurementId, compressionType, tsDataType, encodingType, statistics); @@ -141,21 +178,24 @@ public int checkMetadataSizeAndMayFlush() throws IOException { @Override public int startChunkGroup(IDeviceID deviceId) throws IOException { - currentDeviceId = deviceId; + if (evolvedSchema != null) { + deviceId = evolvedSchema.rewriteToOriginal(deviceId); + } + currentOriginalDeviceId = deviceId; return super.startChunkGroup(deviceId); } @Override public void endChunkGroup() throws IOException { - if (currentDeviceId == null || chunkMetadataList.isEmpty()) { + if (currentOriginalDeviceId == null || chunkMetadataList.isEmpty()) { return; } - String tableName = currentDeviceId.getTableName(); + String tableName = currentOriginalDeviceId.getTableName(); TableSchema tableSchema = getSchema().getTableSchemaMap().get(tableName); boolean generateTableSchemaForCurrentChunkGroup = tableSchema != null; setGenerateTableSchema(generateTableSchemaForCurrentChunkGroup); super.endChunkGroup(); - currentDeviceId = null; + currentOriginalDeviceId = null; } @Override @@ -192,4 +232,8 @@ private void removeUnusedTableSchema() { iterator.remove(); } } + + public TsFileResource getTsFileResource() { + return tsFileResource; + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/DeletionPredicate.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/DeletionPredicate.java index 877c94f7081ca..83eabc0fc5846 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/DeletionPredicate.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/DeletionPredicate.java @@ -18,7 +18,8 @@ */ package org.apache.iotdb.db.storageengine.dataregion.modification; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.NOP; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.NOP; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.TagPredicateType; import org.apache.iotdb.db.utils.io.BufferSerializable; import org.apache.iotdb.db.utils.io.StreamSerializable; @@ -42,7 +43,7 @@ public class DeletionPredicate implements StreamSerializable, BufferSerializable public static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(DeletionPredicate.class); private String tableName; - private IDPredicate idPredicate = new NOP(); + private TagPredicate tagPredicate = new NOP(); // an empty list means affecting all columns private List measurementNames = Collections.emptyList(); @@ -52,32 +53,28 @@ public DeletionPredicate(String tableName) { this.tableName = tableName; } - public DeletionPredicate(String tableName, IDPredicate idPredicate) { + public DeletionPredicate(String tableName, TagPredicate tagPredicate) { this.tableName = tableName; - this.idPredicate = idPredicate; + this.tagPredicate = tagPredicate; } public DeletionPredicate( - String tableName, IDPredicate idPredicate, List measurementNames) { + String tableName, TagPredicate tagPredicate, List measurementNames) { this.tableName = tableName; - this.idPredicate = idPredicate; + this.tagPredicate = tagPredicate; this.measurementNames = measurementNames; } public boolean matches(IDeviceID deviceID) { - return tableName.equals(deviceID.getTableName()) && idPredicate.matches(deviceID); + return tableName.equals(deviceID.getTableName()) && tagPredicate.matches(deviceID); } - public void setIdPredicate(IDPredicate idPredicate) { - this.idPredicate = idPredicate; + public void setIdPredicate(TagPredicate tagPredicate) { + this.tagPredicate = tagPredicate; } - public IDPredicate getIdPredicate() { - return idPredicate; - } - - public IDPredicate.IDPredicateType getIdPredicateType() { - return this.idPredicate.type; + public TagPredicateType getTagPredicateType() { + return this.tagPredicate.type; } public String getTableName() { @@ -88,6 +85,10 @@ public List getMeasurementNames() { return measurementNames; } + public TagPredicate getTagPredicate() { + return tagPredicate; + } + public boolean affects(String measurementName) { return measurementNames.isEmpty() || measurementNames.contains(measurementName); } @@ -95,7 +96,7 @@ public boolean affects(String measurementName) { @Override public long serialize(OutputStream stream) throws IOException { long size = ReadWriteIOUtils.writeVar(tableName, stream); - size += idPredicate.serialize(stream); + size += tagPredicate.serialize(stream); size += ReadWriteForEncodingUtils.writeVarInt(measurementNames.size(), stream); for (String measurementName : measurementNames) { size += ReadWriteIOUtils.writeVar(measurementName, stream); @@ -106,7 +107,7 @@ public long serialize(OutputStream stream) throws IOException { @Override public long serialize(ByteBuffer buffer) { long size = ReadWriteIOUtils.writeVar(tableName, buffer); - size += idPredicate.serialize(buffer); + size += tagPredicate.serialize(buffer); size += ReadWriteForEncodingUtils.writeVarInt(measurementNames.size(), buffer); for (String measurementName : measurementNames) { size += ReadWriteIOUtils.writeVar(measurementName, buffer); @@ -117,7 +118,7 @@ public long serialize(ByteBuffer buffer) { @Override public void deserialize(InputStream stream) throws IOException { tableName = ReadWriteIOUtils.readVarIntString(stream); - idPredicate = IDPredicate.createFrom(stream); + tagPredicate = TagPredicate.createFrom(stream); int measurementLength = ReadWriteForEncodingUtils.readVarInt(stream); if (measurementLength > 0) { @@ -133,7 +134,7 @@ public void deserialize(InputStream stream) throws IOException { @Override public void deserialize(ByteBuffer buffer) { tableName = ReadWriteIOUtils.readVarIntString(buffer); - idPredicate = IDPredicate.createFrom(buffer); + tagPredicate = TagPredicate.createFrom(buffer); int measurementLength = ReadWriteForEncodingUtils.readVarInt(buffer); if (measurementLength > 0) { @@ -151,7 +152,7 @@ public int serializedSize() { int size = ReadWriteForEncodingUtils.varIntSize(tableName.length()) + tableName.length() * Character.BYTES - + idPredicate.serializedSize() + + tagPredicate.serializedSize() + ReadWriteForEncodingUtils.varIntSize(measurementNames.size()); for (String measurementName : measurementNames) { size += @@ -171,13 +172,13 @@ public boolean equals(Object o) { } DeletionPredicate that = (DeletionPredicate) o; return Objects.equals(tableName, that.tableName) - && Objects.equals(idPredicate, that.idPredicate) + && Objects.equals(tagPredicate, that.tagPredicate) && Objects.equals(measurementNames, that.measurementNames); } @Override public int hashCode() { - return Objects.hash(tableName, idPredicate, measurementNames); + return Objects.hash(tableName, tagPredicate, measurementNames); } @Override @@ -187,7 +188,7 @@ public String toString() { + tableName + '\'' + ", idPredicate=" - + idPredicate + + tagPredicate + ", measurementNames=" + measurementNames + '}'; @@ -197,7 +198,7 @@ public String toString() { public long ramBytesUsed() { return SHALLOW_SIZE + RamUsageEstimator.sizeOf(tableName) - + RamUsageEstimator.sizeOfObject(idPredicate) + + RamUsageEstimator.sizeOfObject(tagPredicate) + RamUsageEstimator.sizeOfArrayList(measurementNames); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/TableDeletionEntry.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/TableDeletionEntry.java index 61ec2e0d67854..60aab450bf342 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/TableDeletionEntry.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/TableDeletionEntry.java @@ -21,7 +21,7 @@ import org.apache.iotdb.commons.conf.IoTDBConstant; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.db.queryengine.execution.MemoryEstimationHelper; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.IDPredicateType; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.TagPredicateType; import org.apache.iotdb.db.utils.ModificationUtils; import org.apache.tsfile.file.metadata.IDeviceID; @@ -138,8 +138,8 @@ public String getTableName() { } public boolean isDroppingTable() { - IDPredicate idPredicate = predicate.getIdPredicate(); - return idPredicate.type == IDPredicateType.NOP + TagPredicate tagPredicate = predicate.getTagPredicate(); + return tagPredicate.type == TagPredicateType.NOP && predicate.getMeasurementNames().isEmpty() && timeRange.getMin() == Long.MIN_VALUE && timeRange.getMax() == Long.MAX_VALUE; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/IDPredicate.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/TagPredicate.java similarity index 78% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/IDPredicate.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/TagPredicate.java index 44741f9e67940..9b0022aa9551f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/IDPredicate.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/TagPredicate.java @@ -18,6 +18,7 @@ */ package org.apache.iotdb.db.storageengine.dataregion.modification; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.iotdb.db.utils.io.BufferSerializable; import org.apache.iotdb.db.utils.io.StreamSerializable; @@ -38,7 +39,7 @@ import java.util.List; import java.util.Objects; -public abstract class IDPredicate implements StreamSerializable, BufferSerializable, Accountable { +public abstract class TagPredicate implements StreamSerializable, BufferSerializable, Accountable { public int serializedSize() { // type @@ -46,7 +47,7 @@ public int serializedSize() { } @SuppressWarnings("java:S6548") - public enum IDPredicateType { + public enum TagPredicateType { NOP, FULL_EXACT_MATCH, SEGMENT_EXACT_MATCH, @@ -62,23 +63,31 @@ public long serialize(ByteBuffer buffer) { return 1; } - public static IDPredicateType deserialize(InputStream stream) throws IOException { + public static TagPredicateType deserialize(InputStream stream) throws IOException { return values()[stream.read()]; } - public static IDPredicateType deserialize(ByteBuffer buffer) { + public static TagPredicateType deserialize(ByteBuffer buffer) { return values()[buffer.get()]; } } - protected final IDPredicateType type; + protected final TagPredicateType type; - protected IDPredicate(IDPredicateType type) { + protected TagPredicate(TagPredicateType type) { this.type = type; } public abstract boolean matches(IDeviceID deviceID); + public TagPredicate rewriteToOriginal(EvolvedSchema evolvedSchema) { + return this; + } + + public TagPredicate rewriteToFinal(EvolvedSchema evolvedSchema) { + return this; + } + @Override public long serialize(OutputStream stream) throws IOException { return type.serialize(stream); @@ -89,16 +98,16 @@ public long serialize(ByteBuffer buffer) { return type.serialize(buffer); } - public static IDPredicate createFrom(ByteBuffer buffer) { - IDPredicateType type = IDPredicateType.deserialize(buffer); - IDPredicate predicate; - if (Objects.requireNonNull(type) == IDPredicateType.NOP) { + public static TagPredicate createFrom(ByteBuffer buffer) { + TagPredicateType type = TagPredicateType.deserialize(buffer); + TagPredicate predicate; + if (Objects.requireNonNull(type) == TagPredicateType.NOP) { predicate = new NOP(); - } else if (Objects.requireNonNull(type) == IDPredicateType.FULL_EXACT_MATCH) { + } else if (Objects.requireNonNull(type) == TagPredicateType.FULL_EXACT_MATCH) { predicate = new FullExactMatch(); - } else if (Objects.requireNonNull(type) == IDPredicateType.SEGMENT_EXACT_MATCH) { + } else if (Objects.requireNonNull(type) == TagPredicateType.SEGMENT_EXACT_MATCH) { predicate = new SegmentExactMatch(); - } else if (Objects.requireNonNull(type) == IDPredicateType.AND) { + } else if (Objects.requireNonNull(type) == TagPredicateType.AND) { predicate = new And(); } else { throw new IllegalArgumentException("Unrecognized predicate type: " + type); @@ -107,16 +116,16 @@ public static IDPredicate createFrom(ByteBuffer buffer) { return predicate; } - public static IDPredicate createFrom(InputStream stream) throws IOException { - IDPredicateType type = IDPredicateType.deserialize(stream); - IDPredicate predicate; - if (Objects.requireNonNull(type) == IDPredicateType.NOP) { + public static TagPredicate createFrom(InputStream stream) throws IOException { + TagPredicateType type = TagPredicateType.deserialize(stream); + TagPredicate predicate; + if (Objects.requireNonNull(type) == TagPredicateType.NOP) { predicate = new NOP(); - } else if (Objects.requireNonNull(type) == IDPredicateType.FULL_EXACT_MATCH) { + } else if (Objects.requireNonNull(type) == TagPredicateType.FULL_EXACT_MATCH) { predicate = new FullExactMatch(); - } else if (Objects.requireNonNull(type) == IDPredicateType.SEGMENT_EXACT_MATCH) { + } else if (Objects.requireNonNull(type) == TagPredicateType.SEGMENT_EXACT_MATCH) { predicate = new SegmentExactMatch(); - } else if (Objects.requireNonNull(type) == IDPredicateType.AND) { + } else if (Objects.requireNonNull(type) == TagPredicateType.AND) { predicate = new And(); } else { throw new IllegalArgumentException("Unrecognized predicate type: " + type); @@ -125,11 +134,11 @@ public static IDPredicate createFrom(InputStream stream) throws IOException { return predicate; } - public static class NOP extends IDPredicate { + public static class NOP extends TagPredicate { public static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(NOP.class); public NOP() { - super(IDPredicateType.NOP); + super(TagPredicateType.NOP); } @Override @@ -168,19 +177,19 @@ public long ramBytesUsed() { } } - public static class FullExactMatch extends IDPredicate { + public static class FullExactMatch extends TagPredicate { public static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(FullExactMatch.class); private IDeviceID deviceID; public FullExactMatch(IDeviceID deviceID) { - super(IDPredicateType.FULL_EXACT_MATCH); + super(TagPredicateType.FULL_EXACT_MATCH); this.deviceID = deviceID; } public FullExactMatch() { - super(IDPredicateType.FULL_EXACT_MATCH); + super(TagPredicateType.FULL_EXACT_MATCH); } @Override @@ -243,9 +252,19 @@ public String toString() { public long ramBytesUsed() { return SHALLOW_SIZE + RamUsageEstimator.sizeOfObject(deviceID); } + + @Override + public TagPredicate rewriteToOriginal(EvolvedSchema evolvedSchema) { + return new FullExactMatch(evolvedSchema.rewriteToOriginal(deviceID)); + } + + @Override + public TagPredicate rewriteToFinal(EvolvedSchema evolvedSchema) { + return new FullExactMatch(evolvedSchema.rewriteToFinal(deviceID)); + } } - public static class SegmentExactMatch extends IDPredicate { + public static class SegmentExactMatch extends TagPredicate { public static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(SegmentExactMatch.class); @@ -253,13 +272,13 @@ public static class SegmentExactMatch extends IDPredicate { private int segmentIndex; public SegmentExactMatch(String pattern, int segmentIndex) { - super(IDPredicateType.SEGMENT_EXACT_MATCH); + super(TagPredicateType.SEGMENT_EXACT_MATCH); this.pattern = pattern; this.segmentIndex = segmentIndex; } public SegmentExactMatch() { - super(IDPredicateType.SEGMENT_EXACT_MATCH); + super(TagPredicateType.SEGMENT_EXACT_MATCH); } @Override @@ -342,17 +361,17 @@ public long ramBytesUsed() { } } - public static class And extends IDPredicate { + public static class And extends TagPredicate { public static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(And.class); - private final List predicates = new ArrayList<>(); + private final List predicates = new ArrayList<>(); - public And(IDPredicate... predicates) { - super(IDPredicateType.AND); + public And(TagPredicate... predicates) { + super(TagPredicateType.AND); Collections.addAll(this.predicates, predicates); } - public void add(IDPredicate predicate) { + public void add(TagPredicate predicate) { predicates.add(predicate); } @@ -360,7 +379,7 @@ public void add(IDPredicate predicate) { public int serializedSize() { int serializedSize = super.serializedSize(); serializedSize += ReadWriteForEncodingUtils.varIntSize(predicates.size()); - for (IDPredicate predicate : predicates) { + for (TagPredicate predicate : predicates) { serializedSize += predicate.serializedSize(); } return serializedSize; @@ -370,7 +389,7 @@ public int serializedSize() { public long serialize(OutputStream stream) throws IOException { long size = super.serialize(stream); size += ReadWriteForEncodingUtils.writeVarInt(predicates.size(), stream); - for (IDPredicate predicate : predicates) { + for (TagPredicate predicate : predicates) { size += predicate.serialize(stream); } return size; @@ -380,7 +399,7 @@ public long serialize(OutputStream stream) throws IOException { public long serialize(ByteBuffer buffer) { long size = super.serialize(buffer); size += ReadWriteForEncodingUtils.writeVarInt(predicates.size(), buffer); - for (IDPredicate predicate : predicates) { + for (TagPredicate predicate : predicates) { size += predicate.serialize(buffer); } return size; @@ -390,7 +409,7 @@ public long serialize(ByteBuffer buffer) { public void deserialize(InputStream stream) throws IOException { int size = ReadWriteForEncodingUtils.readVarInt(stream); for (int i = 0; i < size; i++) { - predicates.add(IDPredicate.createFrom(stream)); + predicates.add(TagPredicate.createFrom(stream)); } } @@ -398,7 +417,7 @@ public void deserialize(InputStream stream) throws IOException { public void deserialize(ByteBuffer buffer) { int size = ReadWriteForEncodingUtils.readVarInt(buffer); for (int i = 0; i < size; i++) { - predicates.add(IDPredicate.createFrom(buffer)); + predicates.add(TagPredicate.createFrom(buffer)); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/QueryDataSource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/QueryDataSource.java index 2816493401671..8c95847788ee8 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/QueryDataSource.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/QueryDataSource.java @@ -113,18 +113,26 @@ public IQueryDataSource clone() { return queryDataSource; } - public boolean hasNextSeqResource(int curIndex, boolean ascending, IDeviceID deviceID) { + public boolean hasNextSeqResource( + int curIndex, boolean ascending, IDeviceID deviceID, long maxTsFileSetEndVersion) { boolean res = ascending ? curIndex < seqResources.size() : curIndex >= 0; if (res && curIndex != this.curSeqIndex) { this.curSeqIndex = curIndex; - this.curSeqOrderTime = seqResources.get(curIndex).getOrderTimeForSeq(deviceID, ascending); + this.curSeqOrderTime = + seqResources + .get(curIndex) + .getOrderTimeForSeq(deviceID, ascending, maxTsFileSetEndVersion); this.curSeqSatisfied = null; } return res; } public boolean isSeqSatisfied( - IDeviceID deviceID, int curIndex, Filter timeFilter, boolean debug) { + IDeviceID deviceID, + int curIndex, + Filter timeFilter, + boolean debug, + long maxTsFileSetEndVersion) { if (curIndex != this.curSeqIndex) { throw new IllegalArgumentException( String.format("curIndex %d is not equal to curSeqIndex %d", curIndex, this.curSeqIndex)); @@ -133,7 +141,9 @@ public boolean isSeqSatisfied( TsFileResource tsFileResource = seqResources.get(curSeqIndex); curSeqSatisfied = tsFileResource != null - && (isSingleDevice || tsFileResource.isSatisfied(deviceID, timeFilter, true, debug)); + && (isSingleDevice + || tsFileResource.isFinalDeviceIdSatisfied( + deviceID, timeFilter, true, debug, maxTsFileSetEndVersion)); } return curSeqSatisfied; @@ -154,21 +164,26 @@ public TsFileResource getSeqResourceByIndex(int curIndex) { return null; } - public boolean hasNextUnseqResource(int curIndex, boolean ascending, IDeviceID deviceID) { + public boolean hasNextUnseqResource( + int curIndex, boolean ascending, IDeviceID deviceID, long maxTsFileSetEndVersion) { boolean res = curIndex < unseqResources.size(); if (res && curIndex != this.curUnSeqIndex) { this.curUnSeqIndex = curIndex; this.curUnSeqOrderTime = unseqResources .get(unSeqFileOrderIndex[curIndex]) - .getOrderTimeForUnseq(deviceID, ascending); + .getOrderTimeForUnseq(deviceID, ascending, maxTsFileSetEndVersion); this.curUnSeqSatisfied = null; } return res; } public boolean isUnSeqSatisfied( - IDeviceID deviceID, int curIndex, Filter timeFilter, boolean debug) { + IDeviceID deviceID, + int curIndex, + Filter timeFilter, + boolean debug, + long maxTsFileSetEndVersion) { if (curIndex != this.curUnSeqIndex) { throw new IllegalArgumentException( String.format( @@ -178,7 +193,9 @@ public boolean isUnSeqSatisfied( TsFileResource tsFileResource = unseqResources.get(unSeqFileOrderIndex[curIndex]); curUnSeqSatisfied = tsFileResource != null - && (isSingleDevice || tsFileResource.isSatisfied(deviceID, timeFilter, false, debug)); + && (isSingleDevice + || tsFileResource.isFinalDeviceIdSatisfied( + deviceID, timeFilter, false, debug, maxTsFileSetEndVersion)); } return curUnSeqSatisfied; @@ -209,7 +226,7 @@ public int getUnseqResourcesSize() { return unseqResources.size(); } - public void fillOrderIndexes(IDeviceID deviceId, boolean ascending) { + public void fillOrderIndexes(IDeviceID deviceId, boolean ascending, long maxTsFileSetEndVersion) { if (unseqResources == null || unseqResources.isEmpty()) { return; } @@ -219,7 +236,8 @@ public void fillOrderIndexes(IDeviceID deviceId, boolean ascending) { for (TsFileResource resource : unseqResources) { orderTimeToIndexMap .computeIfAbsent( - resource.getOrderTimeForUnseq(deviceId, ascending), key -> new ArrayList<>()) + resource.getOrderTimeForUnseq(deviceId, ascending, maxTsFileSetEndVersion), + key -> new ArrayList<>()) .add(index++); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/control/QueryResourceManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/control/QueryResourceManager.java index 930d68b4e891d..391775c8a8178 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/control/QueryResourceManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/control/QueryResourceManager.java @@ -56,7 +56,12 @@ public long assignQueryId() { * queryId = xx + Long.MIN_VALUE */ public long assignCompactionQueryId() { - long threadNum = Long.parseLong((Thread.currentThread().getName().split("-"))[5]); + long threadNum = 0; + try { + threadNum = Long.parseLong((Thread.currentThread().getName().split("-"))[5]); + } catch (ArrayIndexOutOfBoundsException | NumberFormatException e) { + // test environment, ignore it + } long queryId = Long.MIN_VALUE + threadNum; filePathsManager.addQueryId(queryId); return queryId; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/task/DataRegionTask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/task/DataRegionTask.java new file mode 100644 index 0000000000000..297518538dd08 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/task/DataRegionTask.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.task; + +import org.apache.iotdb.db.storageengine.dataregion.DataRegion; +import org.apache.iotdb.db.utils.io.StreamSerializable; + +import org.apache.tsfile.utils.ReadWriteForEncodingUtils; + +import java.io.IOException; +import java.io.InputStream; + +public interface DataRegionTask extends Runnable, StreamSerializable { + + long getTaskId(); + + void setTaskId(long taskId); + + TaskType getTaskType(); + + enum TaskType { + SchemaEvolutionTask + } + + @SuppressWarnings("SwitchStatementWithTooFewBranches") + static DataRegionTask createFrom(InputStream stream, long taskId, DataRegion dataRegion) + throws IOException { + int typeOrdinal = ReadWriteForEncodingUtils.readVarInt(stream); + if (typeOrdinal < 0 || typeOrdinal >= TaskType.values().length) { + throw new IOException("Invalid task type: " + typeOrdinal); + } + + TaskType taskType = TaskType.values()[typeOrdinal]; + + DataRegionTask task; + switch (taskType) { + case SchemaEvolutionTask: + task = new SchemaEvolutionTask(dataRegion); + break; + default: + throw new IOException("Invalid task type: " + taskType); + } + task.deserialize(stream); + task.setTaskId(taskId); + return task; + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/task/DataRegionTaskManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/task/DataRegionTaskManager.java new file mode 100644 index 0000000000000..59339aed00795 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/task/DataRegionTaskManager.java @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.task; + +import org.apache.iotdb.db.storageengine.dataregion.DataRegion; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.util.Arrays; +import java.util.concurrent.atomic.AtomicLong; + +@SuppressWarnings("ResultOfMethodCallIgnored") +public class DataRegionTaskManager { + + private static final Logger LOGGER = LoggerFactory.getLogger(DataRegionTaskManager.class); + private static final String TASKS_DIR_NAME = "tasks"; + private static final String TASK_FILE_SUFFIX = ".tsk"; + + private final DataRegion dataRegion; + private final AtomicLong lastestTaskId = new AtomicLong(0); + private final File tasksDir; + + public DataRegionTaskManager(DataRegion dataRegion) { + this.dataRegion = dataRegion; + this.tasksDir = new File(dataRegion.getDataRegionSysDir() + File.separator + TASKS_DIR_NAME); + } + + public void recover() { + tasksDir.mkdirs(); + File[] files = tasksDir.listFiles((File dir, String name) -> name.endsWith(TASK_FILE_SUFFIX)); + if (files == null) { + return; + } + + Arrays.sort( + files, + (f1, f2) -> { + String fileName1 = f1.getName(); + int suffixIndex1 = fileName1.indexOf("."); + long taskId1 = Long.parseLong(fileName1.substring(0, suffixIndex1)); + + String fileName2 = f2.getName(); + int suffixIndex2 = fileName2.indexOf("."); + long taskId2 = Long.parseLong(fileName1.substring(0, suffixIndex2)); + + return Long.compare(taskId1, taskId2); + }); + + for (File file : files) { + String fileName = file.getName(); + int suffixIndex = fileName.indexOf("."); + long taskId = Long.parseLong(fileName.substring(0, suffixIndex)); + lastestTaskId.getAndUpdate(l -> Math.max(l, taskId)); + + try (FileInputStream fis = new FileInputStream(file); + BufferedInputStream bufferedInputStream = new BufferedInputStream(fis)) { + DataRegionTask task = DataRegionTask.createFrom(bufferedInputStream, taskId, dataRegion); + task.run(); + } catch (IOException e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn("Cannot recover task from file {}", file.getAbsolutePath(), e); + } + } finally { + file.delete(); + } + } + } + + private void persistTask(DataRegionTask task) throws IOException { + File taskFile = new File(tasksDir, task.getTaskId() + ".tsk"); + try (FileOutputStream fos = new FileOutputStream(taskFile); + BufferedOutputStream bufferedOutputStream = new BufferedOutputStream(fos)) { + task.serialize(bufferedOutputStream); + } + } + + private void removeTask(DataRegionTask task) throws IOException { + File taskFile = new File(tasksDir, task.getTaskId() + ".tsk"); + taskFile.delete(); + } + + public void submitAndRun(DataRegionTask dataRegionTask) throws IOException { + dataRegionTask.setTaskId(lastestTaskId.getAndIncrement()); + persistTask(dataRegionTask); + dataRegionTask.run(); + removeTask(dataRegionTask); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/task/SchemaEvolutionTask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/task/SchemaEvolutionTask.java new file mode 100644 index 0000000000000..9a361ca4700e9 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/task/SchemaEvolutionTask.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.task; + +import org.apache.iotdb.db.storageengine.dataregion.DataRegion; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; + +import org.apache.tsfile.utils.ReadWriteForEncodingUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.List; + +public class SchemaEvolutionTask implements DataRegionTask { + + private List schemaEvolutions; + private final DataRegion dataRegion; + private long taskId; + + @Override + public void run() { + dataRegion.recordSchemaEvolution(schemaEvolutions); + dataRegion.applySchemaEvolutionToObjects(schemaEvolutions); + } + + public SchemaEvolutionTask(DataRegion dataRegion) { + this.dataRegion = dataRegion; + } + + public SchemaEvolutionTask(List schemaEvolutions, DataRegion dataRegion) { + this.schemaEvolutions = schemaEvolutions; + this.dataRegion = dataRegion; + } + + @Override + public long serialize(OutputStream stream) throws IOException { + long size = ReadWriteForEncodingUtils.writeVarInt(getTaskType().ordinal(), stream); + size += ReadWriteForEncodingUtils.writeVarInt(schemaEvolutions.size(), stream); + for (SchemaEvolution schemaEvolution : schemaEvolutions) { + size += schemaEvolution.serialize(stream); + } + return size; + } + + @Override + public void deserialize(InputStream stream) throws IOException { + int size = ReadWriteForEncodingUtils.readVarInt(stream); + schemaEvolutions = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + schemaEvolutions.add(SchemaEvolution.createFrom(stream)); + } + } + + @Override + public long getTaskId() { + return taskId; + } + + @Override + public void setTaskId(long taskId) { + this.taskId = taskId; + } + + @Override + public TaskType getTaskType() { + return TaskType.SchemaEvolutionTask; + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileManager.java index b7c1ba2c14fb4..1e4f31d189491 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileManager.java @@ -23,6 +23,7 @@ import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; import org.apache.iotdb.db.storageengine.dataregion.modification.ModFileManagement; import org.apache.iotdb.db.storageengine.dataregion.modification.PartitionLevelModFileManager; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.fileset.TsFileSet; import org.apache.iotdb.db.storageengine.dataregion.tsfile.timeindex.FileTimeIndexCacheRecorder; import org.apache.iotdb.db.storageengine.rescon.memory.TsFileResourceManager; @@ -31,12 +32,15 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReadWriteLock; @@ -55,6 +59,7 @@ public class TsFileManager { private final TreeMap sequenceFiles = new TreeMap<>(); private final TreeMap unsequenceFiles = new TreeMap<>(); private final TreeMap modFileManagementMap = new TreeMap<>(); + private final Map> tsfileSets = new ConcurrentSkipListMap<>(); private volatile boolean allowCompaction = true; private final AtomicLong currentCompactionTaskSerialId = new AtomicLong(0); @@ -236,6 +241,7 @@ public void insertToPartitionFileList( modFileManagementMap.computeIfAbsent( timePartition, t -> new PartitionLevelModFileManager())); } + tsFileResource.setTsFileManager(this); } finally { writeUnlock(); } @@ -254,6 +260,7 @@ public void add(TsFileResource tsFileResource, boolean sequence) { modFileManagementMap.computeIfAbsent( tsFileResource.getTimePartition(), t -> new PartitionLevelModFileManager())); } + tsFileResource.setTsFileManager(this); } finally { writeUnlock(); } @@ -272,6 +279,7 @@ public void keepOrderInsert(TsFileResource tsFileResource, boolean sequence) thr modFileManagementMap.computeIfAbsent( tsFileResource.getTimePartition(), t -> new PartitionLevelModFileManager())); } + tsFileResource.setTsFileManager(this); } finally { writeUnlock(); } @@ -332,6 +340,7 @@ public void replace( modFileManagementMap.computeIfAbsent( resource.getTimePartition(), t -> new PartitionLevelModFileManager())); } + resource.setTsFileManager(this); } } } finally { @@ -507,4 +516,31 @@ public long getMaxFileTimestampOfUnSequenceFile() { } return maxFileTimestamp; } + + public void addTsFileSet(TsFileSet newSet, long partitionId) { + List tsFileSetList = + tsfileSets.computeIfAbsent(partitionId, p -> new CopyOnWriteArrayList<>()); + tsFileSetList.add(newSet); + } + + public List getTsFileSet(long partitionId) { + return getTsFileSet(partitionId, Long.MIN_VALUE, Long.MAX_VALUE); + } + + public List getTsFileSet( + long partitionId, long minFileVersionIncluded, long maxFileVersionExcluded) { + List tsFileSetList = tsfileSets.getOrDefault(partitionId, Collections.emptyList()); + int start = 0, end = tsFileSetList.size(); + for (int i = 0, tsFileSetListSize = tsFileSetList.size(); i < tsFileSetListSize; i++) { + TsFileSet tsFileSet = tsFileSetList.get(i); + if (tsFileSet.getEndVersion() < minFileVersionIncluded) { + start = i + 1; + } + if (tsFileSet.getEndVersion() >= maxFileVersionExcluded) { + end = i; + break; + } + } + return start < end ? tsFileSetList.subList(start, end) : Collections.emptyList(); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileResource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileResource.java index b84cce9e8d21b..d6d2b42c7dd29 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileResource.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileResource.java @@ -42,6 +42,8 @@ import org.apache.iotdb.db.storageengine.dataregion.modification.v1.Deletion; import org.apache.iotdb.db.storageengine.dataregion.modification.v1.Modification; import org.apache.iotdb.db.storageengine.dataregion.modification.v1.ModificationFileV1; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.fileset.TsFileSet; import org.apache.iotdb.db.storageengine.dataregion.tsfile.generator.TsFileNameGenerator; import org.apache.iotdb.db.storageengine.dataregion.tsfile.timeindex.ArrayDeviceTimeIndex; import org.apache.iotdb.db.storageengine.dataregion.tsfile.timeindex.FileTimeIndex; @@ -209,6 +211,8 @@ public class TsFileResource implements PersistentResource, Cloneable { private Map>> lastValues; + private TsFileManager tsFileManager = null; + @TestOnly public TsFileResource() { this.tsFileID = new TsFileID(); @@ -257,6 +261,7 @@ public TsFileResource( this.tsFileID = originTsFileResource.tsFileID; this.isSeq = originTsFileResource.isSeq; this.tierLevel = originTsFileResource.tierLevel; + this.tsFileManager = originTsFileResource.tsFileManager; } public synchronized void serialize(String targetFilePath) throws IOException { @@ -610,8 +615,21 @@ public long getTsFileSize() { } } - public Optional getStartTime(IDeviceID deviceId) { + public IDeviceID toOriginalDeviceID(IDeviceID deviceID) { + return toOriginalDeviceID(Long.MAX_VALUE, deviceID); + } + + public IDeviceID toOriginalDeviceID(long maxTsFileSetEndVersion, IDeviceID deviceID) { + EvolvedSchema evolvedSchema = getMergedEvolvedSchema(maxTsFileSetEndVersion); + if (evolvedSchema != null) { + return evolvedSchema.rewriteToOriginal(deviceID); + } + return deviceID; + } + + public Optional getStartTime(IDeviceID deviceId, long maxTsFileSetEndVersion) { try { + deviceId = toOriginalDeviceID(maxTsFileSetEndVersion, deviceId); return deviceId == null ? Optional.of(getFileStartTime()) : timeIndex.getStartTime(deviceId); } catch (Exception e) { LOGGER.error( @@ -623,9 +641,14 @@ public Optional getStartTime(IDeviceID deviceId) { } } + public Optional getStartTime(IDeviceID deviceId) { + return getStartTime(deviceId, Long.MAX_VALUE); + } + /** open file's end time is Long.MIN_VALUE */ - public Optional getEndTime(IDeviceID deviceId) { + public Optional getEndTime(IDeviceID deviceId, long maxTsFileSetEndVersion) { try { + deviceId = toOriginalDeviceID(maxTsFileSetEndVersion, deviceId); return deviceId == null ? Optional.of(getFileEndTime()) : timeIndex.getEndTime(deviceId); } catch (Exception e) { LOGGER.error( @@ -637,9 +660,19 @@ public Optional getEndTime(IDeviceID deviceId) { } } + /** open file's end time is Long.MIN_VALUE */ + public Optional getEndTime(IDeviceID deviceId) { + return getEndTime(deviceId, Long.MAX_VALUE); + } + // cannot use FileTimeIndex - public long getOrderTimeForSeq(IDeviceID deviceId, boolean ascending) { + public long getOrderTimeForSeq( + IDeviceID deviceId, boolean ascending, long maxTsFileSetEndVersion) { if (timeIndex instanceof ArrayDeviceTimeIndex) { + EvolvedSchema evolvedSchema = getMergedEvolvedSchema(maxTsFileSetEndVersion); + if (evolvedSchema != null) { + deviceId = evolvedSchema.rewriteToOriginal(deviceId); + } return ascending ? timeIndex.getStartTime(deviceId).orElse(Long.MIN_VALUE) : timeIndex.getEndTime(deviceId).orElse(Long.MAX_VALUE); @@ -649,8 +682,13 @@ public long getOrderTimeForSeq(IDeviceID deviceId, boolean ascending) { } // can use FileTimeIndex - public long getOrderTimeForUnseq(IDeviceID deviceId, boolean ascending) { + public long getOrderTimeForUnseq( + IDeviceID deviceId, boolean ascending, long maxTsFileSetEndVersion) { if (timeIndex instanceof ArrayDeviceTimeIndex) { + EvolvedSchema evolvedSchema = getMergedEvolvedSchema(maxTsFileSetEndVersion); + if (evolvedSchema != null) { + deviceId = evolvedSchema.rewriteToOriginal(deviceId); + } if (ascending) { return timeIndex.getStartTime(deviceId).orElse(Long.MIN_VALUE); } else { @@ -718,6 +756,8 @@ public ITimeIndex getTimeIndex() { * Whether this TsFile definitely not contains this device, if ture, it must not contain this * device, if false, it may or may not contain this device Notice: using method be CAREFULLY and * you really understand the meaning!!!!! + * + * @param device the IDeviceID before schema evolution */ public boolean definitelyNotContains(IDeviceID device) { return timeIndex.definitelyNotContains(device); @@ -1003,14 +1043,45 @@ public boolean stillLives(long timeLowerBound) { } public boolean isDeviceIdExist(IDeviceID deviceId) { + EvolvedSchema evolvedSchema = getMergedEvolvedSchema(); + if (evolvedSchema != null) { + deviceId = evolvedSchema.rewriteToOriginal(deviceId); + } return timeIndex.checkDeviceIdExist(deviceId); } /** + * @param deviceId IDeviceId after schema evolution + */ + public boolean isFinalDeviceIdSatisfied( + IDeviceID deviceId, Filter timeFilter, boolean isSeq, boolean debug) { + return isFinalDeviceIdSatisfied(deviceId, timeFilter, isSeq, debug, Long.MAX_VALUE); + } + + /** + * @param deviceId the IDeviceID after schema evolution + * @return true if the device is contained in the TsFile + */ + public boolean isFinalDeviceIdSatisfied( + IDeviceID deviceId, + Filter timeFilter, + boolean isSeq, + boolean debug, + long maxTsFileSetEndVersion) { + EvolvedSchema evolvedSchema = getMergedEvolvedSchema(maxTsFileSetEndVersion); + if (evolvedSchema != null) { + deviceId = evolvedSchema.rewriteToOriginal(deviceId); + } + return isOriginalDeviceIdSatisfied(deviceId, timeFilter, isSeq, debug); + } + + /** + * @param deviceId the IDeviceID before schema evolution * @return true if the device is contained in the TsFile */ @SuppressWarnings("OptionalGetWithoutIsPresent") - public boolean isSatisfied(IDeviceID deviceId, Filter timeFilter, boolean isSeq, boolean debug) { + public boolean isOriginalDeviceIdSatisfied( + IDeviceID deviceId, Filter timeFilter, boolean isSeq, boolean debug) { if (deviceId != null && definitelyNotContains(deviceId)) { if (debug) { DEBUG_LOGGER.info( @@ -1057,6 +1128,8 @@ private boolean isAlive(long time, long dataTTL) { /** * Check whether the given device may still alive or not. Return false if the device does not * exist or out of dated. + * + * @param device IDeviceID before schema evolution */ public boolean isDeviceAlive(IDeviceID device, long ttl) { if (definitelyNotContains(device)) { @@ -1635,4 +1708,69 @@ public TsFileResource shallowClone() { public TsFileResource shallowCloneForNative() throws CloneNotSupportedException { return (TsFileResource) clone(); } + + public List getTsFileSets() { + if (tsFileManager == null) { + // loading TsFile, no TsFileSets + return Collections.emptyList(); + } + return tsFileManager.getTsFileSet( + tsFileID.timePartitionId, tsFileID.fileVersion, Long.MAX_VALUE); + } + + public EvolvedSchema getMergedEvolvedSchema() { + return getMergedEvolvedSchema(Long.MAX_VALUE); + } + + public EvolvedSchema getMergedEvolvedSchema(long excludedMaxFileVersion) { + List list = new ArrayList<>(); + List tsFileSets = getTsFileSets(); + for (TsFileSet fileSet : tsFileSets) { + if (fileSet.getEndVersion() >= excludedMaxFileVersion) { + break; + } + + try { + EvolvedSchema readEvolvedSchema = fileSet.readEvolvedSchema(); + list.add(readEvolvedSchema); + } catch (IOException e) { + LOGGER.warn("Cannot read evolved schema from {}, skipping it", fileSet); + } + } + + return EvolvedSchema.merge(list.toArray(new EvolvedSchema[0])); + } + + public static Pair getMaxTsFileSetEndVersionAndMinResource( + List tsFileResources) { + long maxTsFileSetEndVersion = Long.MIN_VALUE; + long minResourceVersion = Long.MAX_VALUE; + TsFileResource minTsFileResource = null; + for (TsFileResource tsFileResource : tsFileResources) { + List tsFileSets = tsFileResource.getTsFileSets(); + if (tsFileSets.isEmpty()) { + // include the newest files that does not belong to any file sets, + // should apply all schema evolution + maxTsFileSetEndVersion = Long.MAX_VALUE; + break; + } + TsFileSet lastTsFileSet = tsFileSets.get(tsFileSets.size() - 1); + if (lastTsFileSet.getEndVersion() > maxTsFileSetEndVersion) { + maxTsFileSetEndVersion = lastTsFileSet.getEndVersion(); + } + if (tsFileResource.getTsFileID().fileVersion < minResourceVersion) { + minTsFileResource = tsFileResource; + minResourceVersion = tsFileResource.getTsFileID().fileVersion; + } + } + return new Pair<>(maxTsFileSetEndVersion, minTsFileResource); + } + + public void setTsFileManager(TsFileManager tsFileManager) { + this.tsFileManager = tsFileManager; + } + + public TsFileManager getTsFileManager() { + return tsFileManager; + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/ColumnRename.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/ColumnRename.java new file mode 100644 index 0000000000000..adb6e7e935911 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/ColumnRename.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution; + +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.utils.ReadWriteForEncodingUtils; +import org.apache.tsfile.utils.ReadWriteIOUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; + +/** A schema evolution operation that renames a column in a table schema. */ +public class ColumnRename implements SchemaEvolution { + + private String tableName; + private String nameBefore; + private String nameAfter; + // to judge if the Object directories should be renamed + private TSDataType dataType; + + // for deserialization + public ColumnRename() {} + + public ColumnRename(String tableName, String nameBefore, String nameAfter) { + this.tableName = tableName.toLowerCase(); + this.nameBefore = nameBefore.toLowerCase(); + this.nameAfter = nameAfter.toLowerCase(); + } + + public ColumnRename(String tableName, String nameBefore, String nameAfter, TSDataType dataType) { + this(tableName, nameBefore, nameAfter); + this.dataType = dataType; + } + + @Override + public SchemaEvolutionType getEvolutionType() { + return SchemaEvolutionType.COLUMN_RENAME; + } + + @Override + public void applyTo(EvolvedSchema evolvedSchema) { + evolvedSchema.renameColumn(tableName, nameBefore, nameAfter); + } + + @Override + public long serialize(OutputStream stream) throws IOException { + int size = ReadWriteForEncodingUtils.writeVarInt(getEvolutionType().ordinal(), stream); + size += ReadWriteIOUtils.writeVar(tableName, stream); + size += ReadWriteIOUtils.writeVar(nameBefore, stream); + size += ReadWriteIOUtils.writeVar(nameAfter, stream); + size += ReadWriteIOUtils.write(dataType != null ? (byte) dataType.ordinal() : -1, stream); + return size; + } + + @Override + public void deserialize(InputStream stream) throws IOException { + tableName = ReadWriteIOUtils.readVarIntString(stream); + nameBefore = ReadWriteIOUtils.readVarIntString(stream); + nameAfter = ReadWriteIOUtils.readVarIntString(stream); + byte category = ReadWriteIOUtils.readByte(stream); + if (category != -1) { + dataType = TSDataType.values()[category]; + } + } + + @Override + public long serialize(ByteBuffer buffer) { + int size = ReadWriteForEncodingUtils.writeVarInt(getEvolutionType().ordinal(), buffer); + size += ReadWriteIOUtils.writeVar(tableName, buffer); + size += ReadWriteIOUtils.writeVar(nameBefore, buffer); + size += ReadWriteIOUtils.writeVar(nameAfter, buffer); + size += ReadWriteIOUtils.write(dataType != null ? (byte) dataType.ordinal() : -1, buffer); + return size; + } + + @Override + public void deserialize(ByteBuffer buffer) { + tableName = ReadWriteIOUtils.readVarIntString(buffer); + nameBefore = ReadWriteIOUtils.readVarIntString(buffer); + nameAfter = ReadWriteIOUtils.readVarIntString(buffer); + byte category = ReadWriteIOUtils.readByte(buffer); + if (category != -1) { + dataType = TSDataType.values()[category]; + } + } + + public TSDataType getDataType() { + return dataType; + } + + public void setDataType(TSDataType dataType) { + this.dataType = dataType; + } + + public String getTableName() { + return tableName; + } + + public String getNameBefore() { + return nameBefore; + } + + public String getNameAfter() { + return nameAfter; + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/EvolvedSchema.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/EvolvedSchema.java new file mode 100644 index 0000000000000..e32d3649c9428 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/EvolvedSchema.java @@ -0,0 +1,416 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution; + +import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; +import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry.ModType; +import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate; + +import org.apache.tsfile.enums.ColumnCategory; +import org.apache.tsfile.file.metadata.AbstractAlignedChunkMetadata; +import org.apache.tsfile.file.metadata.IChunkMetadata; +import org.apache.tsfile.file.metadata.IDeviceID; +import org.apache.tsfile.file.metadata.IDeviceID.Factory; +import org.apache.tsfile.file.metadata.TableSchema; +import org.apache.tsfile.file.metadata.TimeseriesMetadata; +import org.apache.tsfile.utils.Accountable; +import org.apache.tsfile.utils.RamUsageEstimator; +import org.apache.tsfile.write.schema.IMeasurementSchema; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.apache.tsfile.write.schema.Schema; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; + +public class EvolvedSchema implements Accountable { + + // the evolved table names after applying all schema evolution operations + private Map finalToOriginalTableNames = new LinkedHashMap<>(); + + /** + * the first key is the evolved table name, the second key is the evolved column name, and the + * value is the original column name before any schema evolution. + */ + private Map> finalToOriginalColumnNames = new LinkedHashMap<>(); + + // the reversed version of finalToOriginalTableNames + private Map originalToFinalTableNames = new LinkedHashMap<>(); + + // the reversed version of finalToOriginalColumnNames + private Map> originalToFinalColumnNames = new LinkedHashMap<>(); + + public void renameTable(String oldTableName, String newTableName) { + if (!finalToOriginalTableNames.containsKey(oldTableName) + || finalToOriginalTableNames.get(oldTableName).isEmpty()) { + finalToOriginalTableNames.put(newTableName, oldTableName); + finalToOriginalTableNames.put(oldTableName, ""); + originalToFinalTableNames.put(oldTableName, newTableName); + } else { + // mark the old table name as non-exists (empty) + String originalName = finalToOriginalTableNames.put(oldTableName, ""); + finalToOriginalTableNames.put(newTableName, originalName); + originalToFinalTableNames.put(originalName, newTableName); + } + + if (finalToOriginalColumnNames.containsKey(oldTableName)) { + Map columnMap = finalToOriginalColumnNames.remove(oldTableName); + finalToOriginalColumnNames.put(newTableName, columnMap); + } + } + + public void renameColumn(String newTableName, String oldColumnName, String newColumnName) { + Map columnNameMap = + finalToOriginalColumnNames.computeIfAbsent(newTableName, t -> new LinkedHashMap<>()); + String originalTableName = getOriginalTableName(newTableName); + if (!columnNameMap.containsKey(oldColumnName) || columnNameMap.get(oldColumnName).isEmpty()) { + columnNameMap.put(newColumnName, oldColumnName); + columnNameMap.put(oldColumnName, ""); + originalToFinalColumnNames + .computeIfAbsent(originalTableName, t -> new LinkedHashMap<>()) + .put(oldColumnName, newColumnName); + } else { + // mark the old column name as non-exists + String originalName = columnNameMap.put(oldColumnName, ""); + columnNameMap.put(newColumnName, originalName); + originalToFinalColumnNames + .computeIfAbsent(originalTableName, t -> new LinkedHashMap<>()) + .put(originalName, newColumnName); + } + } + + public String getOriginalTableName(String finalTableName) { + return finalToOriginalTableNames.getOrDefault(finalTableName, finalTableName); + } + + public String getFinalTableName(String originalTableName) { + return originalToFinalTableNames.getOrDefault(originalTableName, originalTableName); + } + + public String getOriginalColumnName(String tableName, String evolvedColumnName) { + Map columnNameMap = finalToOriginalColumnNames.get(tableName); + if (columnNameMap == null) { + return evolvedColumnName; + } + return columnNameMap.getOrDefault(evolvedColumnName, evolvedColumnName); + } + + public String getFinalColumnName(String originalTableName, String originalColumnName) { + return originalToFinalColumnNames + .getOrDefault(originalTableName, Collections.emptyMap()) + .getOrDefault(originalColumnName, originalColumnName); + } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) { + return false; + } + EvolvedSchema that = (EvolvedSchema) o; + return Objects.equals(finalToOriginalTableNames, that.finalToOriginalTableNames) + && Objects.equals(finalToOriginalColumnNames, that.finalToOriginalColumnNames); + } + + @Override + public int hashCode() { + return Objects.hash(finalToOriginalTableNames, finalToOriginalColumnNames); + } + + @Override + public String toString() { + return "EvolvedSchema{" + + "originalTableNames=" + + finalToOriginalTableNames + + ", originalColumnNames=" + + finalToOriginalColumnNames + + '}'; + } + + public List toSchemaEvolutions() { + List schemaEvolutions = new ArrayList<>(); + finalToOriginalTableNames.forEach( + (finalTableName, originalTableName) -> { + if (!originalTableName.isEmpty()) { + schemaEvolutions.add(new TableRename(originalTableName, finalTableName)); + } + }); + finalToOriginalColumnNames.forEach( + (finalTableName, originalColumnNameMap) -> { + originalColumnNameMap.forEach( + (finalColumnName, originalColumnName) -> { + if (!originalColumnName.isEmpty()) { + schemaEvolutions.add( + new ColumnRename(finalTableName, originalColumnName, finalColumnName, null)); + } + }); + }); + return schemaEvolutions; + } + + public ModEntry rewriteToOriginal(ModEntry entry) { + if (entry.getType() == ModType.TABLE_DELETION) { + return rewriteToOriginal(((TableDeletionEntry) entry)); + } + return entry; + } + + public ModEntry rewriteToFinal(ModEntry entry) { + if (entry.getType() == ModType.TABLE_DELETION) { + return rewriteToFinal(((TableDeletionEntry) entry)); + } + return entry; + } + + public TableDeletionEntry rewriteToOriginal(TableDeletionEntry entry) { + DeletionPredicate deletionPredicate = rewriteToOriginal(entry.getPredicate()); + return new TableDeletionEntry(deletionPredicate, entry.getTimeRange()); + } + + public TableDeletionEntry rewriteToFinal(TableDeletionEntry entry) { + DeletionPredicate deletionPredicate = rewriteToFinal(entry.getPredicate()); + return new TableDeletionEntry(deletionPredicate, entry.getTimeRange()); + } + + private DeletionPredicate rewriteToFinal(DeletionPredicate predicate) { + String finalTableName = getFinalTableName(predicate.getTableName()); + TagPredicate tagPredicate = predicate.getTagPredicate(); + tagPredicate = tagPredicate.rewriteToOriginal(this); + List newMeasurements = + predicate.getMeasurementNames().stream() + .map(m -> getFinalColumnName(predicate.getTableName(), m)) + .collect(Collectors.toList()); + return new DeletionPredicate(finalTableName, tagPredicate, newMeasurements); + } + + private DeletionPredicate rewriteToOriginal(DeletionPredicate predicate) { + String originalTableName = getOriginalTableName(predicate.getTableName()); + TagPredicate tagPredicate = predicate.getTagPredicate(); + tagPredicate = tagPredicate.rewriteToOriginal(this); + List newMeasurements = + predicate.getMeasurementNames().stream() + .map(m -> getOriginalColumnName(predicate.getTableName(), m)) + .collect(Collectors.toList()); + return new DeletionPredicate(originalTableName, tagPredicate, newMeasurements); + } + + public IDeviceID rewriteToOriginal(IDeviceID deviceID) { + String tableName = deviceID.getTableName(); + String originalTableName = getOriginalTableName(tableName); + return rewriteTableName(deviceID, originalTableName); + } + + public IDeviceID rewriteToFinal(IDeviceID deviceID) { + String tableName = deviceID.getTableName(); + String finalTableName = getFinalTableName(tableName); + return rewriteTableName(deviceID, finalTableName); + } + + public void rewriteToFinal( + String originalTableName, List timeseriesMetadataList) { + timeseriesMetadataList.forEach( + timeseriesMetadata -> { + String finalColumnName = + getFinalColumnName(originalTableName, timeseriesMetadata.getMeasurementId()); + timeseriesMetadata.setMeasurementId(finalColumnName); + }); + } + + public Map rewriteToFinal(Map tableSchemas) { + Map finalTableSchemas = new HashMap<>(tableSchemas.size()); + for (Map.Entry entry : tableSchemas.entrySet()) { + TableSchema tableSchema = entry.getValue(); + tableSchema = rewriteToFinal(tableSchema); + finalTableSchemas.put(tableSchema.getTableName(), tableSchema); + } + return finalTableSchemas; + } + + private TableSchema rewriteToOriginal(TableSchema tableSchema) { + String originalTableName = getOriginalTableName(tableSchema.getTableName()); + + List measurementSchemas = + new ArrayList<>(tableSchema.getColumnSchemas().size()); + List columnCategories = new ArrayList<>(tableSchema.getColumnTypes().size()); + List columnSchemas = tableSchema.getColumnSchemas(); + for (int i = 0, columnSchemasSize = columnSchemas.size(); i < columnSchemasSize; i++) { + IMeasurementSchema measurementSchema = columnSchemas.get(i); + measurementSchemas.add( + new MeasurementSchema( + getOriginalColumnName( + tableSchema.getTableName(), measurementSchema.getMeasurementName()), + measurementSchema.getType(), + measurementSchema.getEncodingType(), + measurementSchema.getCompressor())); + columnCategories.add(tableSchema.getColumnTypes().get(i)); + } + + TableSchema schema = new TableSchema(originalTableName, measurementSchemas, columnCategories); + schema.setUpdatable(tableSchema.isUpdatable()); + return schema; + } + + public TableSchema rewriteToFinal(TableSchema tableSchema) { + String finalTableName = getFinalTableName(tableSchema.getTableName()); + + List measurementSchemas = + new ArrayList<>(tableSchema.getColumnSchemas().size()); + List columnCategories = new ArrayList<>(tableSchema.getColumnTypes().size()); + List columnSchemas = tableSchema.getColumnSchemas(); + for (int i = 0, columnSchemasSize = columnSchemas.size(); i < columnSchemasSize; i++) { + IMeasurementSchema measurementSchema = columnSchemas.get(i); + measurementSchemas.add( + new MeasurementSchema( + getFinalColumnName( + tableSchema.getTableName(), measurementSchema.getMeasurementName()), + measurementSchema.getType(), + measurementSchema.getEncodingType(), + measurementSchema.getCompressor())); + columnCategories.add(tableSchema.getColumnTypes().get(i)); + } + + TableSchema schema = new TableSchema(finalTableName, measurementSchemas, columnCategories); + schema.setUpdatable(tableSchema.isUpdatable()); + return schema; + } + + @SuppressWarnings("SuspiciousSystemArraycopy") + public static IDeviceID rewriteTableName(IDeviceID deviceID, String newTableName) { + String tableName = deviceID.getTableName(); + if (!tableName.equals(newTableName)) { + Object[] segments = deviceID.getSegments(); + String[] newSegments = new String[segments.length]; + newSegments[0] = newTableName; + System.arraycopy(segments, 1, newSegments, 1, segments.length - 1); + return Factory.DEFAULT_FACTORY.create(newSegments); + } + return deviceID; + } + + public static EvolvedSchema deepCopy(EvolvedSchema evolvedSchema) { + EvolvedSchema newEvolvedSchema = new EvolvedSchema(); + newEvolvedSchema.finalToOriginalTableNames = + new LinkedHashMap<>(evolvedSchema.finalToOriginalTableNames); + newEvolvedSchema.finalToOriginalColumnNames = new LinkedHashMap<>(); + for (Entry> entry : + evolvedSchema.finalToOriginalColumnNames.entrySet()) { + newEvolvedSchema.finalToOriginalColumnNames.put( + entry.getKey(), new LinkedHashMap<>(entry.getValue())); + } + newEvolvedSchema.originalToFinalTableNames = + new LinkedHashMap<>(evolvedSchema.originalToFinalTableNames); + newEvolvedSchema.originalToFinalColumnNames = new LinkedHashMap<>(); + for (Entry> entry : + evolvedSchema.originalToFinalColumnNames.entrySet()) { + newEvolvedSchema.originalToFinalColumnNames.put( + entry.getKey(), new LinkedHashMap<>(entry.getValue())); + } + return newEvolvedSchema; + } + + public static EvolvedSchema merge(EvolvedSchema... schemas) { + EvolvedSchema firstNotNullSchema = null; + int i = 0; + for (; i < schemas.length; i++) { + if (schemas[i] != null) { + firstNotNullSchema = schemas[i]; + i++; + break; + } + } + if (i == schemas.length) { + return firstNotNullSchema; + } + + if (firstNotNullSchema == null) { + return null; + } + EvolvedSchema mergedSchema = deepCopy(firstNotNullSchema); + + for (; i < schemas.length; i++) { + if (schemas[i] != null) { + EvolvedSchema newSchema = schemas[i]; + for (Entry finalOriginalTableName : + newSchema.finalToOriginalTableNames.entrySet()) { + if (!finalOriginalTableName.getValue().isEmpty()) { + mergedSchema.renameTable( + finalOriginalTableName.getValue(), finalOriginalTableName.getKey()); + } + } + for (Entry> finalTableNameColumnNameMapEntry : + newSchema.finalToOriginalColumnNames.entrySet()) { + for (Entry finalColNameOriginalColNameEntry : + finalTableNameColumnNameMapEntry.getValue().entrySet()) { + if (!finalColNameOriginalColNameEntry.getValue().isEmpty()) { + String finalTableName = finalTableNameColumnNameMapEntry.getKey(); + String finalColName = finalColNameOriginalColNameEntry.getKey(); + String originalColName = finalColNameOriginalColNameEntry.getValue(); + mergedSchema.renameColumn(finalTableName, originalColName, finalColName); + } + } + } + } + } + return mergedSchema; + } + + public Schema rewriteToOriginal(Schema schema) { + return rewriteToOriginal(schema, null); + } + + public Schema rewriteToOriginal( + Schema schema, Function tableSchemaTransformer) { + Schema copySchema = new Schema(); + for (TableSchema tableSchema : schema.getTableSchemaMap().values()) { + TableSchema originalSchema = rewriteToOriginal(tableSchema); + if (tableSchemaTransformer != null) { + originalSchema = tableSchemaTransformer.apply(originalSchema); + } + copySchema.registerTableSchema(originalSchema); + } + return copySchema; + } + + public void rewriteToFinal( + AbstractAlignedChunkMetadata abstractAlignedChunkMetadata, String originalTableName) { + for (IChunkMetadata iChunkMetadata : abstractAlignedChunkMetadata.getValueChunkMetadataList()) { + if (iChunkMetadata != null) { + iChunkMetadata.setMeasurementUid( + getFinalColumnName(originalTableName, iChunkMetadata.getMeasurementUid())); + } + } + } + + @Override + public long ramBytesUsed() { + return RamUsageEstimator.sizeOfMap(this.finalToOriginalTableNames) + + RamUsageEstimator.sizeOfMap(this.finalToOriginalColumnNames) + + RamUsageEstimator.sizeOfMap(this.originalToFinalTableNames) + + RamUsageEstimator.sizeOfMap(this.originalToFinalColumnNames); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/EvolvedSchemaCache.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/EvolvedSchemaCache.java new file mode 100644 index 0000000000000..d036528f6de65 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/EvolvedSchemaCache.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution; + +import org.apache.iotdb.db.storageengine.dataregion.tsfile.fileset.TsFileSet; + +import com.github.benmanes.caffeine.cache.Cache; +import com.github.benmanes.caffeine.cache.Caffeine; +import com.github.benmanes.caffeine.cache.Weigher; + +import java.util.function.Supplier; + +public class EvolvedSchemaCache { + + private final Cache cache; + + private EvolvedSchemaCache() { + cache = + Caffeine.newBuilder() + .weigher( + (Weigher) + (k, v) -> { + // TsFileSet is always in memory, do not count it + return (int) v.ramBytesUsed(); + }) + .maximumWeight( + // TODO-Sevo configurable + 128 * 1024 * 1024L) + .build(); + } + + public EvolvedSchema computeIfAbsent( + TsFileSet tsFileSet, Supplier schemaSupplier) { + return cache.get(tsFileSet, k -> schemaSupplier.get()); + } + + public void invalidate(TsFileSet tsFileSet) { + cache.invalidate(tsFileSet); + } + + public static EvolvedSchemaCache getInstance() { + return InstanceHolder.INSTANCE; + } + + private static class InstanceHolder { + private static final EvolvedSchemaCache INSTANCE = new EvolvedSchemaCache(); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/SchemaEvolution.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/SchemaEvolution.java new file mode 100644 index 0000000000000..109998dec27ee --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/SchemaEvolution.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution; + +import org.apache.iotdb.db.utils.io.BufferSerializable; +import org.apache.iotdb.db.utils.io.StreamSerializable; + +import org.apache.tsfile.utils.ReadWriteForEncodingUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; + +/** A schema evolution operation that can be applied to a TableSchemaMap. */ +public interface SchemaEvolution extends StreamSerializable, BufferSerializable { + + /** + * Apply this schema evolution operation to the given metadata. + * + * @param schema the schema to apply the operation to + */ + void applyTo(EvolvedSchema schema); + + SchemaEvolutionType getEvolutionType(); + + enum SchemaEvolutionType { + TABLE_RENAME, + COLUMN_RENAME + } + + static SchemaEvolution createFrom(int type) { + if (type < 0 || type > SchemaEvolutionType.values().length) { + throw new IllegalArgumentException("Invalid evolution type: " + type); + } + SchemaEvolution evolution; + SchemaEvolutionType evolutionType = SchemaEvolutionType.values()[type]; + switch (evolutionType) { + case TABLE_RENAME: + evolution = new TableRename(); + break; + case COLUMN_RENAME: + evolution = new ColumnRename(); + break; + default: + throw new IllegalArgumentException("Invalid evolution type: " + evolutionType); + } + return evolution; + } + + static SchemaEvolution createFrom(InputStream stream) throws IOException { + int type = ReadWriteForEncodingUtils.readVarInt(stream); + SchemaEvolution evolution = createFrom(type); + evolution.deserialize(stream); + return evolution; + } + + static SchemaEvolution createFrom(ByteBuffer buffer) { + int type = ReadWriteForEncodingUtils.readVarInt(buffer); + SchemaEvolution evolution = createFrom(type); + evolution.deserialize(buffer); + return evolution; + } + + static List createListFrom(ByteBuffer buffer) { + int size = ReadWriteForEncodingUtils.readVarInt(buffer); + List list = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + list.add(createFrom(buffer)); + } + return list; + } + + static void serializeList(List list, OutputStream stream) throws IOException { + ReadWriteForEncodingUtils.writeVarInt(list.size(), stream); + for (SchemaEvolution evolution : list) { + evolution.serialize(stream); + } + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/SchemaEvolutionFile.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/SchemaEvolutionFile.java new file mode 100644 index 0000000000000..e7dd3326913bd --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/SchemaEvolutionFile.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution; + +import org.apache.iotdb.commons.utils.FileUtils; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.util.Collection; + +/** SchemaEvolutionFile manages schema evolutions related to a TsFileSet. */ +public class SchemaEvolutionFile { + public static final String FILE_SUFFIX = ".sevo"; + + private String filePath; + + public SchemaEvolutionFile(String filePath) { + this.filePath = filePath; + } + + /** + * Recover the SchemaEvolutionFile if it is broken. + * + * @return true if the file exists false otherwise + * @throws IOException if the file cannot be recovered + */ + private boolean recoverFile() throws IOException { + File file = new File(filePath); + if (!file.exists() || file.length() == 0) { + return false; + } + + long length = file.length(); + String fileName = file.getName(); + long validLength = parseValidLength(fileName); + if (length > validLength) { + try (FileOutputStream fis = new FileOutputStream(file, true); + FileChannel fileChannel = fis.getChannel()) { + fileChannel.truncate(validLength); + } + } + return true; + } + + public static long parseValidLength(String fileName) { + return Long.parseLong(fileName.substring(0, fileName.lastIndexOf('.'))); + } + + public void append(Collection schemaEvolutions) throws IOException { + recoverFile(); + + try (FileOutputStream fos = new FileOutputStream(filePath, true); + BufferedOutputStream bos = new BufferedOutputStream(fos)) { + for (SchemaEvolution schemaEvolution : schemaEvolutions) { + schemaEvolution.serialize(bos); + } + } + + File originFile = new File(filePath); + long newLength = originFile.length(); + File newFile = new File(originFile.getParentFile(), newLength + FILE_SUFFIX); + FileUtils.moveFileSafe(originFile, newFile); + filePath = newFile.getAbsolutePath(); + } + + public EvolvedSchema readAsSchema() throws IOException { + boolean exists = recoverFile(); + if (!exists) { + return null; + } + + EvolvedSchema evolvedSchema = new EvolvedSchema(); + try (FileInputStream fis = new FileInputStream(filePath); + BufferedInputStream bis = new BufferedInputStream(fis)) { + while (bis.available() > 0) { + SchemaEvolution evolution = SchemaEvolution.createFrom(bis); + evolution.applyTo(evolvedSchema); + } + } + return evolvedSchema; + } + + public String getFilePath() { + return filePath; + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/TableRename.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/TableRename.java new file mode 100644 index 0000000000000..a37557f45aa88 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/TableRename.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution; + +import org.apache.tsfile.file.metadata.IDeviceID; +import org.apache.tsfile.file.metadata.IDeviceID.Factory; +import org.apache.tsfile.utils.ReadWriteForEncodingUtils; +import org.apache.tsfile.utils.ReadWriteIOUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** A schema evolution operation that renames a table in a schema map. */ +public class TableRename implements SchemaEvolution { + + private String nameBefore; + private String nameAfter; + + // for deserialization + public TableRename() {} + + public TableRename(String nameBefore, String nameAfter) { + this.nameBefore = nameBefore.toLowerCase(); + this.nameAfter = nameAfter.toLowerCase(); + } + + @Override + public void applyTo(EvolvedSchema evolvedSchema) { + evolvedSchema.renameTable(nameBefore, nameAfter); + } + + @Override + public SchemaEvolutionType getEvolutionType() { + return SchemaEvolutionType.TABLE_RENAME; + } + + @Override + public long serialize(OutputStream stream) throws IOException { + long size = ReadWriteForEncodingUtils.writeVarInt(getEvolutionType().ordinal(), stream); + size += ReadWriteIOUtils.writeVar(nameBefore, stream); + size += ReadWriteIOUtils.writeVar(nameAfter, stream); + return size; + } + + @Override + public void deserialize(InputStream stream) throws IOException { + nameBefore = ReadWriteIOUtils.readVarIntString(stream); + nameAfter = ReadWriteIOUtils.readVarIntString(stream); + } + + @Override + public long serialize(ByteBuffer buffer) { + long size = ReadWriteForEncodingUtils.writeVarInt(getEvolutionType().ordinal(), buffer); + size += ReadWriteIOUtils.writeVar(nameBefore, buffer); + size += ReadWriteIOUtils.writeVar(nameAfter, buffer); + return size; + } + + @Override + public void deserialize(ByteBuffer buffer) { + nameBefore = ReadWriteIOUtils.readVarIntString(buffer); + nameAfter = ReadWriteIOUtils.readVarIntString(buffer); + } + + public String getNameBefore() { + return nameBefore; + } + + public String getNameAfter() { + return nameAfter; + } + + @SuppressWarnings("SuspiciousSystemArraycopy") + public IDeviceID rewriteDeviceId(IDeviceID deviceId) { + if (!deviceId.getTableName().equals(nameBefore)) { + return deviceId; + } + + Object[] segments = deviceId.getSegments(); + String[] newSegments = new String[segments.length]; + newSegments[0] = nameAfter; + System.arraycopy(segments, 1, newSegments, 1, segments.length - 1); + return Factory.DEFAULT_FACTORY.create(newSegments); + } + + public void rewriteMap(Map map) { + List affectedDeviceId = + map.keySet().stream() + .filter(k -> k.getTableName().equals(getNameBefore())) + .collect(Collectors.toList()); + for (IDeviceID deviceID : affectedDeviceId) { + IDeviceID newDeviceId = rewriteDeviceId(deviceID); + T removed = map.remove(deviceID); + map.put(newDeviceId, removed); + } + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/fileset/TsFileSet.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/fileset/TsFileSet.java new file mode 100644 index 0000000000000..c3230c6297cad --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/fileset/TsFileSet.java @@ -0,0 +1,160 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.tsfile.fileset; + +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchemaCache; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolutionFile; + +import org.apache.tsfile.external.commons.io.FileUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +/** TsFileSet represents a set of TsFiles in a time partition whose version <= endVersion. */ +public class TsFileSet implements Comparable { + + private static final Logger LOGGER = LoggerFactory.getLogger(TsFileSet.class); + public static final String FILE_SET_DIR_NAME = "filesets"; + + private final long endVersion; + private final File fileSetDir; + private final ReentrantReadWriteLock lock; + private SchemaEvolutionFile schemaEvolutionFile; + + public TsFileSet(long endVersion, String fileSetsDir, boolean recover) { + this.endVersion = endVersion; + this.fileSetDir = new File(fileSetsDir + File.separator + endVersion); + this.lock = new ReentrantReadWriteLock(); + + if (recover) { + recover(); + } else { + //noinspection ResultOfMethodCallIgnored + fileSetDir.mkdirs(); + } + + if (schemaEvolutionFile == null) { + schemaEvolutionFile = + new SchemaEvolutionFile( + fileSetDir + File.separator + 0 + SchemaEvolutionFile.FILE_SUFFIX); + } + } + + private void recover() { + File[] files = fileSetDir.listFiles(); + if (files != null) { + for (File file : files) { + if (file.getName().endsWith(SchemaEvolutionFile.FILE_SUFFIX)) { + schemaEvolutionFile = new SchemaEvolutionFile(file.getAbsolutePath()); + } + } + } + } + + public void appendSchemaEvolution(Collection schemaEvolutions) + throws IOException { + writeLock(); + try { + schemaEvolutionFile.append(schemaEvolutions); + EvolvedSchemaCache.getInstance().invalidate(this); + } finally { + writeUnlock(); + } + } + + public EvolvedSchema readEvolvedSchema() throws IOException { + readLock(); + try { + return EvolvedSchemaCache.getInstance() + .computeIfAbsent( + this, + () -> { + try { + return schemaEvolutionFile.readAsSchema(); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } finally { + readUnlock(); + } + } + + @Override + public int compareTo(TsFileSet o) { + return Long.compare(endVersion, o.endVersion); + } + + public void writeLock() { + lock.writeLock().lock(); + } + + public void readLock() { + lock.readLock().lock(); + } + + public void writeUnlock() { + lock.writeLock().unlock(); + } + + public void readUnlock() { + lock.readLock().unlock(); + } + + public long getEndVersion() { + return endVersion; + } + + @Override + public String toString() { + return "TsFileSet{" + "endVersion=" + endVersion + ", fileSetDir=" + fileSetDir + '}'; + } + + public void remove() { + FileUtils.deleteQuietly(fileSetDir); + } + + public boolean contains(TsFileResource tsFileResource) { + return tsFileResource.getVersion() <= endVersion; + } + + public static EvolvedSchema getMergedEvolvedSchema(List tsFileSetList) { + List list = new ArrayList<>(); + for (TsFileSet fileSet : tsFileSetList) { + try { + EvolvedSchema readEvolvedSchema = fileSet.readEvolvedSchema(); + list.add(readEvolvedSchema); + } catch (IOException e) { + LOGGER.warn("Cannot read evolved schema from {}, skipping it", fileSet); + } + } + + return EvolvedSchema.merge(list.toArray(new EvolvedSchema[0])); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/generator/TsFileNameGenerator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/generator/TsFileNameGenerator.java index 16be82188e9ca..698b4b95be681 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/generator/TsFileNameGenerator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/generator/TsFileNameGenerator.java @@ -363,6 +363,7 @@ public static List getNewInnerCompactionTargetFileResources( TsFileResourceStatus.COMPACTING); targetResource.setSeq(sequence); targetResources.add(targetResource); + targetResource.setTsFileManager(resource.getTsFileManager()); } return targetResources; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/recover/file/UnsealedTsFileRecoverPerformer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/recover/file/UnsealedTsFileRecoverPerformer.java index f4da410733458..0c222a7dbda75 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/recover/file/UnsealedTsFileRecoverPerformer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/recover/file/UnsealedTsFileRecoverPerformer.java @@ -34,9 +34,9 @@ import org.apache.iotdb.db.storageengine.dataregion.memtable.IWritableMemChunk; import org.apache.iotdb.db.storageengine.dataregion.memtable.IWritableMemChunkGroup; import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.FullExactMatch; import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry; import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.FullExactMatch; import org.apache.iotdb.db.storageengine.dataregion.modification.TreeDeletionEntry; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import org.apache.iotdb.db.storageengine.dataregion.tsfile.timeindex.FileTimeIndexCacheRecorder; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/config/LoadTsFileConfigurator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/config/LoadTsFileConfigurator.java index 8478486781be5..9bffbfffce46a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/config/LoadTsFileConfigurator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/config/LoadTsFileConfigurator.java @@ -26,6 +26,7 @@ import javax.annotation.Nullable; +import java.io.File; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; @@ -60,11 +61,21 @@ public static void validateParameters(final String key, final String value) { case ASYNC_LOAD_KEY: validateAsyncLoadParam(value); break; + case SEVO_FILE_PATH_KEY: + validateSevoFilePathParam(value); + break; default: throw new SemanticException("Invalid parameter '" + key + "' for LOAD TSFILE command."); } } + private static void validateSevoFilePathParam(String value) { + File file = new File(value); + if (!file.exists()) { + throw new SemanticException("The sevo file " + value + " does not exist."); + } + } + public static void validateSynonymParameters(final Map parameters) { if (parameters.containsKey(DATABASE_KEY) && parameters.containsKey(DATABASE_NAME_KEY)) { throw new SemanticException( @@ -115,6 +126,13 @@ public static int parseOrGetDefaultDatabaseLevel(final Map loadA return Objects.nonNull(databaseName) ? databaseName.toLowerCase(Locale.ENGLISH) : null; } + public static final String SEVO_FILE_PATH_KEY = "sevo-file-path"; + + public static @Nullable File parseSevoFile(final Map loadAttributes) { + String sevoFilePath = loadAttributes.get(SEVO_FILE_PATH_KEY); + return sevoFilePath != null ? new File(sevoFilePath) : null; + } + public static final String ON_SUCCESS_KEY = "on-success"; public static final String ON_SUCCESS_DELETE_VALUE = "delete"; public static final String ON_SUCCESS_NONE_VALUE = "none"; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/AlignedChunkData.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/AlignedChunkData.java index 72268168258ee..216a22eaa6f57 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/AlignedChunkData.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/AlignedChunkData.java @@ -22,6 +22,7 @@ import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; import org.apache.iotdb.commons.utils.TimePartitionUtils; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.load.LoadTsFilePieceNode; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.tsfile.enums.TSDataType; import org.apache.tsfile.exception.write.PageException; @@ -66,7 +67,7 @@ public class AlignedChunkData implements ChunkData { protected static final Binary DEFAULT_BINARY = null; protected final TTimePartitionSlot timePartitionSlot; - protected final IDeviceID device; + protected IDeviceID device; protected List chunkHeaderList; protected PublicBAOS byteStream; @@ -508,4 +509,14 @@ public String toString() { + needDecodeChunk + '}'; } + + @Override + public void rewriteToFinal(EvolvedSchema evolvedSchema) { + IDeviceID newDevice = evolvedSchema.rewriteToFinal(device); + chunkHeaderList.forEach( + h -> + h.setMeasurementID( + evolvedSchema.getFinalColumnName(device.getTableName(), h.getMeasurementID()))); + device = newDevice; + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/DeletionData.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/DeletionData.java index 0695c7a84def9..c140b79bc1226 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/DeletionData.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/DeletionData.java @@ -22,6 +22,7 @@ import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry; import org.apache.iotdb.db.storageengine.dataregion.modification.ModificationFile; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.tsfile.utils.ReadWriteIOUtils; @@ -31,7 +32,7 @@ import java.io.InputStream; public class DeletionData implements TsFileData { - private final ModEntry deletion; + private ModEntry deletion; public DeletionData(ModEntry deletion) { this.deletion = deletion; @@ -51,6 +52,11 @@ public TsFileDataType getType() { return TsFileDataType.DELETION; } + @Override + public void rewriteToFinal(EvolvedSchema evolvedSchema) { + deletion = evolvedSchema.rewriteToFinal(deletion); + } + @Override public void serialize(DataOutputStream stream) throws IOException { ReadWriteIOUtils.write(getType().ordinal(), stream); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/NonAlignedChunkData.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/NonAlignedChunkData.java index 2310b9cb95c3e..5ad970c38de65 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/NonAlignedChunkData.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/NonAlignedChunkData.java @@ -21,6 +21,7 @@ import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; import org.apache.iotdb.commons.utils.TimePartitionUtils; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.tsfile.exception.write.PageException; import org.apache.tsfile.file.header.ChunkHeader; @@ -52,7 +53,7 @@ public class NonAlignedChunkData implements ChunkData { private final TTimePartitionSlot timePartitionSlot; - private final IDeviceID device; + private IDeviceID device; private final ChunkHeader chunkHeader; private final PublicBAOS byteStream; @@ -316,6 +317,14 @@ private void close() throws IOException { stream.close(); } + @Override + public void rewriteToFinal(EvolvedSchema evolvedSchema) { + IDeviceID newDevice = evolvedSchema.rewriteToFinal(device); + chunkHeader.setMeasurementID( + evolvedSchema.getFinalColumnName(device.getTableName(), chunkHeader.getMeasurementID())); + device = newDevice; + } + @Override public String toString() { return "NonAlignedChunkData{" diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/TsFileData.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/TsFileData.java index f24eb45c01bc6..d3c5d150b37e7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/TsFileData.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/TsFileData.java @@ -20,6 +20,7 @@ package org.apache.iotdb.db.storageengine.load.splitter; import org.apache.iotdb.commons.exception.IllegalPathException; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.tsfile.exception.write.PageException; import org.apache.tsfile.utils.ReadWriteIOUtils; @@ -35,6 +36,8 @@ public interface TsFileData { void serialize(DataOutputStream stream) throws IOException; + void rewriteToFinal(EvolvedSchema evolvedSchema); + static TsFileData deserialize(InputStream stream) throws IOException, PageException, IllegalPathException { final TsFileDataType type = TsFileDataType.values()[ReadWriteIOUtils.readInt(stream)]; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/TsFileSplitter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/TsFileSplitter.java index 5a75f4fb8e085..f67c0d96ed00a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/TsFileSplitter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/TsFileSplitter.java @@ -26,6 +26,8 @@ import org.apache.iotdb.db.exception.load.LoadFileException; import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry; import org.apache.iotdb.db.storageengine.dataregion.modification.ModificationFile; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolutionFile; import org.apache.tsfile.common.conf.TSFileConfig; import org.apache.tsfile.common.conf.TSFileDescriptor; @@ -67,7 +69,7 @@ public class TsFileSplitter { private static final IoTDBConfig CONFIG = IoTDBDescriptor.getInstance().getConfig(); private final File tsFile; - private final TsFileDataConsumer consumer; + private TsFileDataConsumer consumer; private Map offset2ChunkMetadata = new HashMap<>(); private List deletions = new ArrayList<>(); private Map> pageIndex2ChunkData = new HashMap<>(); @@ -77,6 +79,7 @@ public class TsFileSplitter { private boolean isAligned; private int timeChunkIndexOfCurrentValueColumn = 0; private Set timePartitionSlots = new HashSet<>(); + private EvolvedSchema evolvedSchema; // Maintain the number of times the chunk of each measurement appears. private Map valueColumn2TimeChunkIndex = new HashMap<>(); @@ -87,9 +90,18 @@ public class TsFileSplitter { private List> pageIndex2TimesList = null; private List isTimeChunkNeedDecodeList = new ArrayList<>(); - public TsFileSplitter(File tsFile, TsFileDataConsumer consumer) { + public TsFileSplitter(File tsFile, TsFileDataConsumer consumer, File schemaEvolutionFile) { this.tsFile = tsFile; this.consumer = consumer; + if (schemaEvolutionFile != null && schemaEvolutionFile.exists()) { + SchemaEvolutionFile sevoFile = new SchemaEvolutionFile(schemaEvolutionFile.getAbsolutePath()); + try { + this.evolvedSchema = sevoFile.readAsSchema(); + this.consumer = new SchemaEvolutionTsFileDataConsumer(this.consumer, evolvedSchema); + } catch (IOException e) { + logger.error("Cannot read schema evolution file, ignoring it.", e); + } + } } @SuppressWarnings({"squid:S3776", "squid:S6541"}) @@ -588,4 +600,38 @@ private TsPrimitiveType[] decodeValuePage( public interface TsFileDataConsumer { boolean apply(TsFileData tsFileData) throws LoadFileException; } + + public abstract class WrappedTsFileDataConsumer implements TsFileDataConsumer { + + private TsFileDataConsumer delegate; + + public WrappedTsFileDataConsumer(TsFileDataConsumer delegate) { + this.delegate = delegate; + } + + protected abstract TsFileData rewrite(TsFileData tsFileData); + + @Override + public boolean apply(TsFileData tsFileData) throws LoadFileException { + tsFileData = rewrite(tsFileData); + return delegate.apply(tsFileData); + } + } + + private class SchemaEvolutionTsFileDataConsumer extends WrappedTsFileDataConsumer { + + private EvolvedSchema evolvedSchema; + + public SchemaEvolutionTsFileDataConsumer( + TsFileDataConsumer delegate, EvolvedSchema evolvedSchema) { + super(delegate); + this.evolvedSchema = evolvedSchema; + } + + @Override + protected TsFileData rewrite(TsFileData tsFileData) { + tsFileData.rewriteToFinal(evolvedSchema); + return tsFileData; + } + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/utils/CommonUtils.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/utils/CommonUtils.java index 784312f9f9cd2..a8dc5ae94cbfb 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/utils/CommonUtils.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/utils/CommonUtils.java @@ -19,17 +19,23 @@ package org.apache.iotdb.db.utils; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.FullDeviceIdKey; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.NoTableNameDeviceIdKey; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; +import org.apache.iotdb.commons.schema.table.TsTable; import org.apache.iotdb.commons.schema.table.column.TsTableColumnCategory; import org.apache.iotdb.commons.service.metric.MetricService; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; import org.apache.iotdb.commons.utils.CommonDateTimeUtils; +import org.apache.iotdb.commons.utils.PathUtils; import org.apache.iotdb.db.exception.query.QueryProcessException; import org.apache.iotdb.db.exception.sql.SemanticException; import org.apache.iotdb.db.protocol.thrift.OperationType; import org.apache.iotdb.db.queryengine.plan.execution.IQueryExecution; import org.apache.iotdb.db.queryengine.plan.statement.StatementType; import org.apache.iotdb.db.queryengine.plan.statement.literal.BinaryLiteral; +import org.apache.iotdb.db.schemaengine.table.DataNodeTableCache; import org.apache.iotdb.db.utils.constant.SqlConstant; import org.apache.iotdb.metrics.utils.MetricLevel; import org.apache.iotdb.service.rpc.thrift.TSAggregationQueryReq; @@ -456,4 +462,26 @@ public static String toString(TsBlock tsBlock) { } return tsBlockBuilder.toString(); } + + public static SeriesPartitionKey getSeriesPartitionKey( + IDeviceID deviceID, String databaseName, boolean tableMustExist) { + if (databaseName != null && PathUtils.isTableModelDatabase(databaseName)) { + TsTable table = + DataNodeTableCache.getInstance() + .getTable(databaseName, deviceID.getTableName(), tableMustExist); + if (table == null) { + // if table does not exist, then we are creating a new table + // use the default setting + return TsTable.ALLOW_ALTER_NAME_DEFAULT + ? new NoTableNameDeviceIdKey(deviceID) + : new FullDeviceIdKey(deviceID); + } + if (table.canAlterName()) { + return new NoTableNameDeviceIdKey(deviceID); + } else { + return new FullDeviceIdKey(deviceID); + } + } + return new FullDeviceIdKey(deviceID); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/utils/io/IOUtils.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/utils/io/IOUtils.java new file mode 100644 index 0000000000000..3a1841190cb2b --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/utils/io/IOUtils.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.utils.io; + +import org.apache.tsfile.utils.ReadWriteForEncodingUtils; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.util.List; + +// TODO: move to TsFile +public class IOUtils { + + private IOUtils() { + // util class + } + + public static long writeList(List list, ByteBuffer byteBuffer) { + long size = ReadWriteForEncodingUtils.writeVarInt(list.size(), byteBuffer); + for (BufferSerializable item : list) { + size += item.serialize(byteBuffer); + } + return size; + } + + public static long writeList(List list, OutputStream stream) + throws IOException { + long size = ReadWriteForEncodingUtils.writeVarInt(list.size(), stream); + for (StreamSerializable item : list) { + size += item.serialize(stream); + } + return size; + } +} diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/metadata/path/PatternTreeMapTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/metadata/path/PatternTreeMapTest.java index 7a7d71bef5127..6f14ac8638467 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/metadata/path/PatternTreeMapTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/metadata/path/PatternTreeMapTest.java @@ -24,9 +24,9 @@ import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PatternTreeMap; import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.NOP; import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry; import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.NOP; import org.apache.iotdb.db.storageengine.dataregion.modification.TreeDeletionEntry; import org.apache.iotdb.db.utils.datastructure.PatternTreeMapFactory; import org.apache.iotdb.db.utils.datastructure.PatternTreeMapFactory.ModsSerializer; diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/consensus/DeletionRecoverTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/consensus/DeletionRecoverTest.java index 06f823c0e23fb..fcb2ffdb79294 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/consensus/DeletionRecoverTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/consensus/DeletionRecoverTest.java @@ -30,8 +30,8 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.DeleteDataNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.RelationalDeleteDataNode; import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.NOP; import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.NOP; import org.apache.tsfile.read.common.TimeRange; import org.junit.After; diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/consensus/DeletionResourceTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/consensus/DeletionResourceTest.java index f94d909f94bd1..05c1f9361c66b 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/consensus/DeletionResourceTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/consensus/DeletionResourceTest.java @@ -43,8 +43,8 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.DeleteDataNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.RelationalDeleteDataNode; import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.NOP; import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.NOP; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; import org.apache.tsfile.read.common.TimeRange; diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/source/PipePlanTablePatternParseVisitorTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/source/PipePlanTablePatternParseVisitorTest.java index d01351ce60adc..82aee3b1fba33 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/source/PipePlanTablePatternParseVisitorTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/source/PipePlanTablePatternParseVisitorTest.java @@ -29,8 +29,8 @@ import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.TableDeviceAttributeUpdateNode; import org.apache.iotdb.db.storageengine.dataregion.memtable.DeviceIDFactory; import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate; import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate; import org.apache.tsfile.read.common.TimeRange; import org.junit.Assert; @@ -115,12 +115,12 @@ public void testDeleteData() { new TableDeletionEntry( new DeletionPredicate( "ac", - new IDPredicate.And( - new IDPredicate.FullExactMatch( + new TagPredicate.And( + new TagPredicate.FullExactMatch( DeviceIDFactory.getInstance() .getDeviceID( new PartialPath(new String[] {"ac", "device1"}))), - new IDPredicate.SegmentExactMatch("device2", 1))), + new TagPredicate.SegmentExactMatch("device2", 1))), new TimeRange(0, 1))), "db1"), tablePattern) diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/node/load/LoadTsFileNodeTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/node/load/LoadTsFileNodeTest.java index e425c709815c3..6b4eec7f63f41 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/node/load/LoadTsFileNodeTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/node/load/LoadTsFileNodeTest.java @@ -41,7 +41,7 @@ public void testLoadSingleTsFileNode() { TsFileResource resource = new TsFileResource(new File("1")); String database = "root.db"; LoadSingleTsFileNode node = - new LoadSingleTsFileNode(new PlanNodeId(""), resource, false, database, true, 0L); + new LoadSingleTsFileNode(new PlanNodeId(""), resource, false, database, true, 0L, null); Assert.assertTrue(node.isDeleteAfterLoad()); Assert.assertEquals(resource, node.getTsFileResource()); Assert.assertEquals(database, node.getDatabase()); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalDeleteDataNodeTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalDeleteDataNodeTest.java index e51a8b99db91d..a58b4ae7c00da 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalDeleteDataNodeTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalDeleteDataNodeTest.java @@ -24,11 +24,11 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeId; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeType; import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.And; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.FullExactMatch; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.NOP; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.SegmentExactMatch; import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.And; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.FullExactMatch; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.NOP; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.SegmentExactMatch; import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALByteBufferForTest; import org.apache.tsfile.file.metadata.IDeviceID.Factory; diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/statement/StatementTestUtils.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/statement/StatementTestUtils.java index a80095458a942..2b064a77721b1 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/statement/StatementTestUtils.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/statement/StatementTestUtils.java @@ -55,7 +55,11 @@ private StatementTestUtils() { } public static String tableName() { - return "table1"; + return tableName(1); + } + + public static String tableName(int i) { + return "table" + i; } public static String[] genColumnNames() { @@ -232,7 +236,11 @@ public static InsertRowStatement genInsertRowStatement(boolean writeToTable) { } public static TsTable genTsTable() { - final TsTable tsTable = new TsTable(tableName()); + return genTsTable(1); + } + + public static TsTable genTsTable(int tableId) { + final TsTable tsTable = new TsTable(tableName(tableId)); String[] measurements = genColumnNames(); TSDataType[] dataTypes = genDataTypes(); TsTableColumnCategory[] columnCategories = genColumnCategories(); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/DataRegionTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/DataRegionTest.java index c3895a058c63c..894a445e84746 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/DataRegionTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/DataRegionTest.java @@ -30,6 +30,7 @@ import org.apache.iotdb.commons.path.MeasurementPath; import org.apache.iotdb.commons.path.NonAlignedFullPath; import org.apache.iotdb.commons.path.PartialPath; +import org.apache.iotdb.commons.schema.table.column.TsTableColumnCategory; import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.exception.DataRegionException; @@ -44,6 +45,7 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowsNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertTabletNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.RelationalDeleteDataNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.RelationalInsertRowNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.RelationalInsertTabletNode; import org.apache.iotdb.db.queryengine.plan.statement.StatementTestUtils; @@ -58,11 +60,18 @@ import org.apache.iotdb.db.storageengine.dataregion.compaction.selector.constant.InnerUnsequenceCompactionSelector; import org.apache.iotdb.db.storageengine.dataregion.compaction.utils.CompactionConfigRestorer; import org.apache.iotdb.db.storageengine.dataregion.flush.FlushManager; -import org.apache.iotdb.db.storageengine.dataregion.flush.TsFileFlushPolicy; +import org.apache.iotdb.db.storageengine.dataregion.flush.TsFileFlushPolicy.DirectFlushPolicy; import org.apache.iotdb.db.storageengine.dataregion.memtable.ReadOnlyMemChunk; import org.apache.iotdb.db.storageengine.dataregion.memtable.TsFileProcessor; +import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; +import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.NOP; import org.apache.iotdb.db.storageengine.dataregion.read.QueryDataSource; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource.ModIterator; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.ColumnRename; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.TableRename; import org.apache.iotdb.db.storageengine.dataregion.tsfile.generator.TsFileNameGenerator; import org.apache.iotdb.db.storageengine.rescon.memory.MemTableManager; import org.apache.iotdb.db.storageengine.rescon.memory.SystemInfo; @@ -71,10 +80,13 @@ import org.apache.tsfile.enums.TSDataType; import org.apache.tsfile.file.metadata.IDeviceID; +import org.apache.tsfile.file.metadata.IDeviceID.Factory; import org.apache.tsfile.file.metadata.enums.CompressionType; import org.apache.tsfile.file.metadata.enums.TSEncoding; import org.apache.tsfile.read.TimeValuePair; +import org.apache.tsfile.read.common.TimeRange; import org.apache.tsfile.read.reader.IPointReader; +import org.apache.tsfile.utils.Binary; import org.apache.tsfile.utils.BitMap; import org.apache.tsfile.write.record.TSRecord; import org.apache.tsfile.write.record.datapoint.DataPoint; @@ -89,6 +101,7 @@ import java.io.File; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -96,6 +109,9 @@ import static org.apache.iotdb.db.queryengine.plan.statement.StatementTestUtils.genInsertRowNode; import static org.apache.iotdb.db.queryengine.plan.statement.StatementTestUtils.genInsertTabletNode; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; public class DataRegionTest { private static final IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig(); @@ -106,7 +122,7 @@ public class DataRegionTest { private String systemDir = TestConstant.OUTPUT_DATA_DIR.concat("info"); private String deviceId = "root.vehicle.d0"; - private IDeviceID device = IDeviceID.Factory.DEFAULT_FACTORY.create(deviceId); + private IDeviceID device = Factory.DEFAULT_FACTORY.create(deviceId); private String measurementId = "s0"; private NonAlignedFullPath nonAlignedFullPath = @@ -134,9 +150,13 @@ public void setUp() throws Exception { config.setInnerUnsequenceCompactionSelector( InnerUnsequenceCompactionSelector.SIZE_TIERED_SINGLE_TARGET); DataNodeTableCache.getInstance() - .preUpdateTable(dataRegion.getDatabaseName(), StatementTestUtils.genTsTable(), null); + .preUpdateTable(dataRegion.getDatabaseName(), StatementTestUtils.genTsTable(1), null); DataNodeTableCache.getInstance() - .commitUpdateTable(dataRegion.getDatabaseName(), StatementTestUtils.tableName(), null); + .commitUpdateTable(dataRegion.getDatabaseName(), StatementTestUtils.tableName(1), null); + DataNodeTableCache.getInstance() + .preUpdateTable(dataRegion.getDatabaseName(), StatementTestUtils.genTsTable(2), null); + DataNodeTableCache.getInstance() + .commitUpdateTable(dataRegion.getDatabaseName(), StatementTestUtils.tableName(2), null); } @After @@ -231,7 +251,7 @@ record = new TSRecord(deviceId, j); null); } - Assert.assertEquals(1, tsfileResourcesForQuery.size()); + assertEquals(1, tsfileResourcesForQuery.size()); List memChunks = tsfileResourcesForQuery.get(0).getReadOnlyMemChunk(IFullPath.convertToIFullPath(fullPath)); long time = 16; @@ -239,7 +259,7 @@ record = new TSRecord(deviceId, j); IPointReader iterator = memChunk.getPointReader(); while (iterator.hasNextTimeValuePair()) { TimeValuePair timeValuePair = iterator.nextTimeValuePair(); - Assert.assertEquals(time++, timeValuePair.getTimestamp()); + assertEquals(time++, timeValuePair.getTimestamp()); } } } @@ -254,7 +274,7 @@ public void testSequenceSyncClose() dataRegion.syncCloseAllWorkingTsFileProcessors(); } - IDeviceID device = IDeviceID.Factory.DEFAULT_FACTORY.create(deviceId); + IDeviceID device = Factory.DEFAULT_FACTORY.create(deviceId); QueryDataSource queryDataSource = dataRegion.query( Collections.singletonList( @@ -264,9 +284,9 @@ device, new MeasurementSchema(measurementId, TSDataType.INT32))), context, null, null); - Assert.assertEquals(10, queryDataSource.getSeqResources().size()); + assertEquals(10, queryDataSource.getSeqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } } @@ -297,8 +317,8 @@ public void testRelationalTabletWriteAndSyncClose() context, null, null); - Assert.assertEquals(1, queryDataSource.getSeqResources().size()); - Assert.assertEquals(0, queryDataSource.getUnseqResources().size()); + assertEquals(1, queryDataSource.getSeqResources().size()); + assertEquals(0, queryDataSource.getUnseqResources().size()); queryDataSource = dataRegion.query( @@ -311,10 +331,10 @@ public void testRelationalTabletWriteAndSyncClose() context, null, null); - Assert.assertEquals(1, queryDataSource.getSeqResources().size()); - Assert.assertEquals(0, queryDataSource.getUnseqResources().size()); + assertEquals(1, queryDataSource.getSeqResources().size()); + assertEquals(0, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } } @@ -345,8 +365,8 @@ public void testRelationRowWriteAndSyncClose() context, null, null); - Assert.assertEquals(1, queryDataSource.getSeqResources().size()); - Assert.assertEquals(0, queryDataSource.getUnseqResources().size()); + assertEquals(1, queryDataSource.getSeqResources().size()); + assertEquals(0, queryDataSource.getUnseqResources().size()); queryDataSource = dataRegion.query( @@ -359,10 +379,10 @@ public void testRelationRowWriteAndSyncClose() context, null, null); - Assert.assertEquals(1, queryDataSource.getSeqResources().size()); - Assert.assertEquals(0, queryDataSource.getUnseqResources().size()); + assertEquals(1, queryDataSource.getSeqResources().size()); + assertEquals(0, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } } @@ -418,8 +438,8 @@ public void testIoTDBTabletWriteAndSyncClose() int hashCode2 = Arrays.hashCode((long[]) columns[1]); dataRegion.insertTablet(insertTabletNode1); // the hashCode should not be changed when insert - Assert.assertEquals(hashCode1, Arrays.hashCode((int[]) columns[0])); - Assert.assertEquals(hashCode2, Arrays.hashCode((long[]) columns[1])); + assertEquals(hashCode1, Arrays.hashCode((int[]) columns[0])); + assertEquals(hashCode2, Arrays.hashCode((long[]) columns[1])); dataRegion.syncCloseAllWorkingTsFileProcessors(); for (int r = 50; r < 149; r++) { @@ -448,10 +468,10 @@ public void testIoTDBTabletWriteAndSyncClose() dataRegion.query( Collections.singletonList(nonAlignedFullPath), device, context, null, null); - Assert.assertEquals(2, queryDataSource.getSeqResources().size()); - Assert.assertEquals(1, queryDataSource.getUnseqResources().size()); + assertEquals(2, queryDataSource.getSeqResources().size()); + assertEquals(1, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } } @@ -518,16 +538,16 @@ public void testIoTDBTabletWriteAndDeleteDataRegion() times.length); dataRegion.insertTablet(insertTabletNode2); - Assert.assertTrue(SystemInfo.getInstance().getTotalMemTableSize() > 0); + assertTrue(SystemInfo.getInstance().getTotalMemTableSize() > 0); dataRegion.syncDeleteDataFiles(); - Assert.assertEquals(0, SystemInfo.getInstance().getTotalMemTableSize()); + assertEquals(0, SystemInfo.getInstance().getTotalMemTableSize()); QueryDataSource queryDataSource = dataRegion.query( Collections.singletonList(nonAlignedFullPath), device, context, null, null); - Assert.assertEquals(0, queryDataSource.getSeqResources().size()); - Assert.assertEquals(0, queryDataSource.getUnseqResources().size()); + assertEquals(0, queryDataSource.getSeqResources().size()); + assertEquals(0, queryDataSource.getUnseqResources().size()); } @Test @@ -600,10 +620,10 @@ public void testEmptyTabletWriteAndSyncClose() dataRegion.query( Collections.singletonList(nonAlignedFullPath), device, context, null, null); - Assert.assertEquals(0, queryDataSource.getSeqResources().size()); - Assert.assertEquals(0, queryDataSource.getUnseqResources().size()); + assertEquals(0, queryDataSource.getSeqResources().size()); + assertEquals(0, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } } @@ -676,10 +696,10 @@ public void testAllMeasurementsFailedTabletWriteAndSyncClose() dataRegion.query( Collections.singletonList(nonAlignedFullPath), device, context, null, null); - Assert.assertEquals(0, queryDataSource.getSeqResources().size()); - Assert.assertEquals(0, queryDataSource.getUnseqResources().size()); + assertEquals(0, queryDataSource.getSeqResources().size()); + assertEquals(0, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } } @@ -703,13 +723,13 @@ public void testSeqAndUnSeqSyncClose() QueryDataSource queryDataSource = dataRegion.query( Collections.singletonList(nonAlignedFullPath), device, context, null, null); - Assert.assertEquals(10, queryDataSource.getSeqResources().size()); - Assert.assertEquals(10, queryDataSource.getUnseqResources().size()); + assertEquals(10, queryDataSource.getSeqResources().size()); + assertEquals(10, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } for (TsFileResource resource : queryDataSource.getUnseqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } } @@ -737,13 +757,13 @@ public void testAllMeasurementsFailedRecordSeqAndUnSeqSyncClose() QueryDataSource queryDataSource = dataRegion.query( Collections.singletonList(nonAlignedFullPath), device, context, null, null); - Assert.assertEquals(0, queryDataSource.getSeqResources().size()); - Assert.assertEquals(0, queryDataSource.getUnseqResources().size()); + assertEquals(0, queryDataSource.getSeqResources().size()); + assertEquals(0, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } for (TsFileResource resource : queryDataSource.getUnseqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } } @@ -770,13 +790,13 @@ public void testDisableSeparateDataForInsertRowPlan() QueryDataSource queryDataSource = dataRegion.query( Collections.singletonList(nonAlignedFullPath), device, context, null, null); - Assert.assertEquals(0, queryDataSource.getSeqResources().size()); - Assert.assertEquals(20, queryDataSource.getUnseqResources().size()); + assertEquals(0, queryDataSource.getSeqResources().size()); + assertEquals(20, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } for (TsFileResource resource : queryDataSource.getUnseqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } config.setEnableSeparateData(defaultValue); @@ -852,10 +872,10 @@ public void testDisableSeparateDataForInsertTablet1() dataRegion.query( Collections.singletonList(nonAlignedFullPath), device, context, null, null); - Assert.assertEquals(0, queryDataSource.getSeqResources().size()); - Assert.assertEquals(2, queryDataSource.getUnseqResources().size()); + assertEquals(0, queryDataSource.getSeqResources().size()); + assertEquals(2, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } config.setEnableSeparateData(defaultEnableDiscard); @@ -932,10 +952,10 @@ public void testDisableSeparateDataForInsertTablet2() dataRegion.query( Collections.singletonList(nonAlignedFullPath), device, context, null, null); - Assert.assertEquals(0, queryDataSource.getSeqResources().size()); - Assert.assertEquals(2, queryDataSource.getUnseqResources().size()); + assertEquals(0, queryDataSource.getSeqResources().size()); + assertEquals(2, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } config.setEnableSeparateData(defaultEnableDiscard); @@ -1012,10 +1032,10 @@ public void testDisableSeparateDataForInsertTablet3() dataRegion.query( Collections.singletonList(nonAlignedFullPath), device, context, null, null); - Assert.assertEquals(0, queryDataSource.getSeqResources().size()); - Assert.assertEquals(2, queryDataSource.getUnseqResources().size()); + assertEquals(0, queryDataSource.getSeqResources().size()); + assertEquals(2, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } config.setEnableSeparateData(defaultEnableDiscard); @@ -1042,7 +1062,7 @@ public void testInsertUnSequenceRows() InsertRowsNode insertRowsNode = new InsertRowsNode(new PlanNodeId(""), indexList, nodes); dataRegion1.insert(insertRowsNode); dataRegion1.syncCloseAllWorkingTsFileProcessors(); - IDeviceID tmpDeviceId = IDeviceID.Factory.DEFAULT_FACTORY.create("root.Rows"); + IDeviceID tmpDeviceId = Factory.DEFAULT_FACTORY.create("root.Rows"); QueryDataSource queryDataSource = dataRegion1.query( Collections.singletonList( @@ -1052,10 +1072,10 @@ tmpDeviceId, new MeasurementSchema(measurementId, TSDataType.INT32))), context, null, null); - Assert.assertEquals(1, queryDataSource.getSeqResources().size()); - Assert.assertEquals(0, queryDataSource.getUnseqResources().size()); + assertEquals(1, queryDataSource.getSeqResources().size()); + assertEquals(0, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } dataRegion1.syncDeleteDataFiles(); } @@ -1079,7 +1099,7 @@ public void testSmallReportProportionInsertRow() dataRegion1.syncCloseAllWorkingTsFileProcessors(); } dataRegion1.syncCloseAllWorkingTsFileProcessors(); - IDeviceID tmpDeviceId = IDeviceID.Factory.DEFAULT_FACTORY.create("root.ln22"); + IDeviceID tmpDeviceId = Factory.DEFAULT_FACTORY.create("root.ln22"); QueryDataSource queryDataSource = dataRegion1.query( Collections.singletonList( @@ -1089,13 +1109,13 @@ tmpDeviceId, new MeasurementSchema(measurementId, TSDataType.INT32))), context, null, null); - Assert.assertEquals(10, queryDataSource.getSeqResources().size()); - Assert.assertEquals(0, queryDataSource.getUnseqResources().size()); + assertEquals(10, queryDataSource.getSeqResources().size()); + assertEquals(0, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } for (TsFileResource resource : queryDataSource.getUnseqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } dataRegion1.syncDeleteDataFiles(); @@ -1158,12 +1178,12 @@ public void testMerge() QueryDataSource queryDataSource = dataRegion.query( Collections.singletonList(nonAlignedFullPath), device, context, null, null); - Assert.assertEquals(2, queryDataSource.getSeqResources().size()); + assertEquals(2, queryDataSource.getSeqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } for (TsFileResource resource : queryDataSource.getUnseqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } IoTDBDescriptor.getInstance() .getConfig() @@ -1232,7 +1252,7 @@ public void testDeleteStorageGroupWhenCompacting() throws Exception { + CompactionLogger.INNER_COMPACTION_LOG_NAME_SUFFIX); Assert.assertFalse(logFile.exists()); Assert.assertFalse(CommonDescriptor.getInstance().getConfig().isReadOnly()); - Assert.assertTrue(dataRegion.getTsFileManager().isAllowCompaction()); + assertTrue(dataRegion.getTsFileManager().isAllowCompaction()); } finally { new CompactionConfigRestorer().restoreCompactionConfig(); } @@ -1245,7 +1265,7 @@ public void testTimedFlushSeqMemTable() TSRecord record = new TSRecord(deviceId, 10000); record.addTuple(DataPoint.getDataPoint(TSDataType.INT32, measurementId, String.valueOf(1000))); dataRegion.insert(buildInsertRowNodeByTSRecord(record)); - Assert.assertEquals(1, MemTableManager.getInstance().getCurrentMemtableNumber()); + assertEquals(1, MemTableManager.getInstance().getCurrentMemtableNumber()); // change config & reboot timed service boolean prevEnableTimedFlushSeqMemtable = config.isEnableTimedFlushSeqMemtable(); @@ -1256,7 +1276,7 @@ public void testTimedFlushSeqMemTable() Thread.sleep(500); - Assert.assertEquals(1, dataRegion.getWorkSequenceTsFileProcessors().size()); + assertEquals(1, dataRegion.getWorkSequenceTsFileProcessors().size()); TsFileProcessor tsFileProcessor = dataRegion.getWorkSequenceTsFileProcessors().iterator().next(); FlushManager flushManager = FlushManager.getInstance(); @@ -1281,7 +1301,7 @@ public void testTimedFlushSeqMemTable() } } - Assert.assertEquals(0, MemTableManager.getInstance().getCurrentMemtableNumber()); + assertEquals(0, MemTableManager.getInstance().getCurrentMemtableNumber()); config.setEnableTimedFlushSeqMemtable(prevEnableTimedFlushSeqMemtable); config.setSeqMemtableFlushInterval(preFLushInterval); @@ -1294,15 +1314,15 @@ public void testTimedFlushUnseqMemTable() TSRecord record = new TSRecord(deviceId, 10000); record.addTuple(DataPoint.getDataPoint(TSDataType.INT32, measurementId, String.valueOf(1000))); dataRegion.insert(buildInsertRowNodeByTSRecord(record)); - Assert.assertEquals(1, MemTableManager.getInstance().getCurrentMemtableNumber()); + assertEquals(1, MemTableManager.getInstance().getCurrentMemtableNumber()); dataRegion.syncCloseAllWorkingTsFileProcessors(); - Assert.assertEquals(0, MemTableManager.getInstance().getCurrentMemtableNumber()); + assertEquals(0, MemTableManager.getInstance().getCurrentMemtableNumber()); // create one unsequence memtable record = new TSRecord(deviceId, 1); record.addTuple(DataPoint.getDataPoint(TSDataType.INT32, measurementId, String.valueOf(1000))); dataRegion.insert(buildInsertRowNodeByTSRecord(record)); - Assert.assertEquals(1, MemTableManager.getInstance().getCurrentMemtableNumber()); + assertEquals(1, MemTableManager.getInstance().getCurrentMemtableNumber()); // change config & reboot timed service boolean prevEnableTimedFlushUnseqMemtable = config.isEnableTimedFlushUnseqMemtable(); @@ -1313,7 +1333,7 @@ record = new TSRecord(deviceId, 1); Thread.sleep(500); - Assert.assertEquals(1, dataRegion.getWorkUnsequenceTsFileProcessors().size()); + assertEquals(1, dataRegion.getWorkUnsequenceTsFileProcessors().size()); TsFileProcessor tsFileProcessor = dataRegion.getWorkUnsequenceTsFileProcessors().iterator().next(); FlushManager flushManager = FlushManager.getInstance(); @@ -1338,7 +1358,7 @@ record = new TSRecord(deviceId, 1); } } - Assert.assertEquals(0, MemTableManager.getInstance().getCurrentMemtableNumber()); + assertEquals(0, MemTableManager.getInstance().getCurrentMemtableNumber()); config.setEnableTimedFlushUnseqMemtable(prevEnableTimedFlushUnseqMemtable); config.setUnseqMemtableFlushInterval(preFLushInterval); @@ -1392,11 +1412,11 @@ public void testDeleteDataNotInFile() for (int i = 0; i < dataRegion.getSequenceFileList().size(); i++) { TsFileResource resource = dataRegion.getSequenceFileList().get(i); if (i == 1) { - Assert.assertTrue(resource.anyModFileExists()); - Assert.assertEquals(2, resource.getAllModEntries().size()); + assertTrue(resource.anyModFileExists()); + assertEquals(2, resource.getAllModEntries().size()); } else if (i == 3) { - Assert.assertTrue(resource.anyModFileExists()); - Assert.assertEquals(1, resource.getAllModEntries().size()); + assertTrue(resource.anyModFileExists()); + assertEquals(1, resource.getAllModEntries().size()); } else { Assert.assertFalse(resource.anyModFileExists()); } @@ -1489,8 +1509,8 @@ public void testDeleteDataInSeqFlushingMemtable() dataRegion.deleteByDevice(new MeasurementPath("root.vehicle.d0.s0"), deleteDataNode4); dataRegion.syncCloseAllWorkingTsFileProcessors(); - Assert.assertTrue(tsFileResource.anyModFileExists()); - Assert.assertEquals(3, tsFileResource.getAllModEntries().size()); + assertTrue(tsFileResource.anyModFileExists()); + assertEquals(3, tsFileResource.getAllModEntries().size()); } @Test @@ -1584,8 +1604,8 @@ public void testDeleteDataInUnSeqFlushingMemtable() dataRegion.deleteByDevice(new MeasurementPath("root.vehicle.d0.s0"), deleteDataNode12); dataRegion.syncCloseAllWorkingTsFileProcessors(); - Assert.assertTrue(tsFileResource.anyModFileExists()); - Assert.assertEquals(3, tsFileResource.getAllModEntries().size()); + assertTrue(tsFileResource.anyModFileExists()); + assertEquals(3, tsFileResource.getAllModEntries().size()); } @Test @@ -1625,9 +1645,7 @@ public void testDeleteDataInSeqWorkingMemtable() dataRegion.syncCloseAllWorkingTsFileProcessors(); Assert.assertFalse(tsFileResource.anyModFileExists()); Assert.assertFalse( - tsFileResource - .getDevices() - .contains(IDeviceID.Factory.DEFAULT_FACTORY.create("root.vehicle.d199"))); + tsFileResource.getDevices().contains(Factory.DEFAULT_FACTORY.create("root.vehicle.d199"))); } @Test @@ -1659,7 +1677,7 @@ public static class DummyDataRegion extends DataRegion { public DummyDataRegion(String systemInfoDir, String storageGroupName) throws DataRegionException { - super(systemInfoDir, "0", new TsFileFlushPolicy.DirectFlushPolicy(), storageGroupName); + super(systemInfoDir, "0", new DirectFlushPolicy(), storageGroupName); } } @@ -1686,7 +1704,7 @@ public void testDeleteDataDirectlySeqWriteModsOrDeleteFiles() new DeleteDataNode(new PlanNodeId("1"), Collections.singletonList(path), 50, 100); deleteDataNode1.setSearchIndex(0); dataRegion.deleteDataDirectly(new MeasurementPath("root.vehicle.d0.**"), deleteDataNode1); - Assert.assertTrue(tsFileResource.getTsFile().exists()); + assertTrue(tsFileResource.getTsFile().exists()); Assert.assertFalse(tsFileResource.anyModFileExists()); dataRegion.syncCloseAllWorkingTsFileProcessors(); @@ -1696,8 +1714,8 @@ public void testDeleteDataDirectlySeqWriteModsOrDeleteFiles() new DeleteDataNode(new PlanNodeId("2"), Collections.singletonList(path), 100, 120); deleteDataNode2.setSearchIndex(0); dataRegion.deleteDataDirectly(new MeasurementPath("root.vehicle.d0.**"), deleteDataNode2); - Assert.assertTrue(tsFileResource.getTsFile().exists()); - Assert.assertTrue(tsFileResource.anyModFileExists()); + assertTrue(tsFileResource.getTsFile().exists()); + assertTrue(tsFileResource.anyModFileExists()); // delete data in closed file, and time all match DeleteDataNode deleteDataNode3 = @@ -1727,8 +1745,8 @@ public void testDeleteDataDirectlyUnseqWriteModsOrDeleteFiles() dataRegion.syncCloseWorkingTsFileProcessors(true); TsFileResource tsFileResourceUnSeq = dataRegion.getTsFileManager().getTsFileList(false).get(0); - Assert.assertTrue(tsFileResourceSeq.getTsFile().exists()); - Assert.assertTrue(tsFileResourceUnSeq.getTsFile().exists()); + assertTrue(tsFileResourceSeq.getTsFile().exists()); + assertTrue(tsFileResourceUnSeq.getTsFile().exists()); // already closed, will have a mods file. MeasurementPath path = new MeasurementPath("root.vehicle.d0.**"); @@ -1743,9 +1761,9 @@ public void testDeleteDataDirectlyUnseqWriteModsOrDeleteFiles() dataRegion.deleteDataDirectly(new MeasurementPath("root.vehicle.d0.**"), deleteDataNode2); // delete data in mem table, there is no mods - Assert.assertTrue(tsFileResourceSeq.getTsFile().exists()); - Assert.assertTrue(tsFileResourceUnSeq.getTsFile().exists()); - Assert.assertTrue(tsFileResourceSeq.anyModFileExists()); + assertTrue(tsFileResourceSeq.getTsFile().exists()); + assertTrue(tsFileResourceUnSeq.getTsFile().exists()); + assertTrue(tsFileResourceSeq.anyModFileExists()); Assert.assertFalse(tsFileResourceUnSeq.anyModFileExists()); dataRegion.syncCloseAllWorkingTsFileProcessors(); @@ -1753,8 +1771,8 @@ public void testDeleteDataDirectlyUnseqWriteModsOrDeleteFiles() new DeleteDataNode(new PlanNodeId("3"), Collections.singletonList(path), 40, 80); deleteDataNode3.setSearchIndex(0); dataRegion.deleteDataDirectly(new MeasurementPath("root.vehicle.d0.**"), deleteDataNode3); - Assert.assertTrue(tsFileResourceUnSeq.getTsFile().exists()); - Assert.assertTrue(tsFileResourceUnSeq.anyModFileExists()); + assertTrue(tsFileResourceUnSeq.getTsFile().exists()); + assertTrue(tsFileResourceUnSeq.anyModFileExists()); // seq file and unseq file have data file and mod file now, // this deletion will remove data file and mod file. @@ -1772,4 +1790,259 @@ public void testDeleteDataDirectlyUnseqWriteModsOrDeleteFiles() Assert.assertFalse(tsFileResourceSeq.anyModFileExists()); Assert.assertFalse(tsFileResourceUnSeq.anyModFileExists()); } + + @Test + public void testSchemaEvolution() + throws WriteProcessException, QueryProcessException, IOException { + String[] measurements = {"tag1", "s1", "s2"}; + MeasurementSchema[] measurementSchemas = { + new MeasurementSchema("tag1", TSDataType.STRING), + new MeasurementSchema("s1", TSDataType.INT64), + new MeasurementSchema("s2", TSDataType.DOUBLE) + }; + RelationalInsertRowNode insertRowNode = + new RelationalInsertRowNode( + new PlanNodeId(""), + new PartialPath(new String[] {"table1"}), + true, + measurements, + new TSDataType[] {TSDataType.STRING, TSDataType.INT64, TSDataType.DOUBLE}, + measurementSchemas, + 10, + new Object[] {new Binary("tag1".getBytes(StandardCharsets.UTF_8)), 1L, 1.0}, + false, + new TsTableColumnCategory[] { + TsTableColumnCategory.TAG, TsTableColumnCategory.FIELD, TsTableColumnCategory.FIELD + }); + dataRegion.insert(insertRowNode); + + // table1 -> table2 + dataRegion.applySchemaEvolution(Collections.singletonList(new TableRename("table1", "table2"))); + + // cannot query with the old name + IDeviceID deviceID1 = Factory.DEFAULT_FACTORY.create(new String[] {"table1", "tag1"}); + List fullPaths = + Arrays.asList( + new AlignedFullPath( + deviceID1, Arrays.asList(measurements), Arrays.asList(measurementSchemas))); + QueryDataSource dataSource = + dataRegion.query( + fullPaths, + deviceID1, + new QueryContext(), + null, + Collections.singletonList(0L), + Long.MAX_VALUE); + assertTrue(dataSource.getSeqResources().isEmpty()); + + // can query with the new name + IDeviceID deviceID2 = Factory.DEFAULT_FACTORY.create(new String[] {"table2", "tag1"}); + fullPaths = + Arrays.asList( + new AlignedFullPath( + deviceID2, Arrays.asList(measurements), Arrays.asList(measurementSchemas))); + dataSource = + dataRegion.query( + fullPaths, + deviceID2, + new QueryContext(), + null, + Collections.singletonList(0L), + Long.MAX_VALUE); + assertEquals(1, dataSource.getSeqResources().size()); + + DataNodeTableCache.getInstance() + .preUpdateTable(dataRegion.getDatabaseName(), StatementTestUtils.genTsTable(1), null); + DataNodeTableCache.getInstance() + .commitUpdateTable(dataRegion.getDatabaseName(), StatementTestUtils.tableName(1), null); + + // write again with table1 + insertRowNode = + new RelationalInsertRowNode( + new PlanNodeId(""), + new PartialPath(new String[] {"table1"}), + true, + measurements, + new TSDataType[] {TSDataType.STRING, TSDataType.INT64, TSDataType.DOUBLE}, + measurementSchemas, + 10, + new Object[] {new Binary("tag1".getBytes(StandardCharsets.UTF_8)), 1L, 1.0}, + false, + new TsTableColumnCategory[] { + TsTableColumnCategory.TAG, TsTableColumnCategory.FIELD, TsTableColumnCategory.FIELD + }); + dataRegion.insert(insertRowNode); + + // can query with table1 + fullPaths = + Arrays.asList( + new AlignedFullPath( + deviceID1, Arrays.asList(measurements), Arrays.asList(measurementSchemas))); + dataSource = + dataRegion.query( + fullPaths, + deviceID1, + new QueryContext(), + null, + Collections.singletonList(0L), + Long.MAX_VALUE); + assertEquals(1, dataSource.getSeqResources().size()); + + // can query with table2 + fullPaths = + Arrays.asList( + new AlignedFullPath( + deviceID2, Arrays.asList(measurements), Arrays.asList(measurementSchemas))); + dataSource = + dataRegion.query( + fullPaths, + deviceID2, + new QueryContext(), + null, + Collections.singletonList(0L), + Long.MAX_VALUE); + assertEquals(1, dataSource.getSeqResources().size()); + } + + @Test + public void testSchemaEvolutionWithPartialDeletion() throws WriteProcessException, IOException { + String[] measurements = {"tag1", "s1", "s2"}; + MeasurementSchema[] measurementSchemas = { + new MeasurementSchema("tag1", TSDataType.STRING), + new MeasurementSchema("s1", TSDataType.INT64), + new MeasurementSchema("s2", TSDataType.DOUBLE) + }; + RelationalInsertRowNode insertRowNode = + new RelationalInsertRowNode( + new PlanNodeId(""), + new PartialPath(new String[] {"table1"}), + true, + measurements, + new TSDataType[] {TSDataType.STRING, TSDataType.INT64, TSDataType.DOUBLE}, + measurementSchemas, + 10, + new Object[] {new Binary("tag1".getBytes(StandardCharsets.UTF_8)), 1L, 1.0}, + false, + new TsTableColumnCategory[] { + TsTableColumnCategory.TAG, TsTableColumnCategory.FIELD, TsTableColumnCategory.FIELD + }); + dataRegion.insert(insertRowNode); + insertRowNode.setTime(20); + dataRegion.insert(insertRowNode); + + // table1 -> table2 + dataRegion.applySchemaEvolution(Collections.singletonList(new TableRename("table1", "table2"))); + // s1 -> s3 + dataRegion.applySchemaEvolution( + Collections.singletonList(new ColumnRename("table2", "s1", "s3", null))); + + // delete with table2 + TableDeletionEntry tableDeletionEntry = + new TableDeletionEntry(new DeletionPredicate("table2"), new TimeRange(0, 15)); + RelationalDeleteDataNode relationalDeleteDataNode = + new RelationalDeleteDataNode( + new PlanNodeId(""), tableDeletionEntry, dataRegion.getDatabaseName()); + dataRegion.deleteByTable(relationalDeleteDataNode); + // delete with s3 + tableDeletionEntry = + new TableDeletionEntry( + new DeletionPredicate("table2", new NOP(), Collections.singletonList("s3")), + new TimeRange(0, 15)); + relationalDeleteDataNode = + new RelationalDeleteDataNode( + new PlanNodeId(""), tableDeletionEntry, dataRegion.getDatabaseName()); + dataRegion.deleteByTable(relationalDeleteDataNode); + // delete with table1 + tableDeletionEntry = + new TableDeletionEntry(new DeletionPredicate("table1"), new TimeRange(0, 15)); + relationalDeleteDataNode = + new RelationalDeleteDataNode( + new PlanNodeId(""), tableDeletionEntry, dataRegion.getDatabaseName()); + dataRegion.deleteByTable(relationalDeleteDataNode); + // delete with s1 + tableDeletionEntry = + new TableDeletionEntry( + new DeletionPredicate("table2", new NOP(), Collections.singletonList("s1")), + new TimeRange(0, 15)); + relationalDeleteDataNode = + new RelationalDeleteDataNode( + new PlanNodeId(""), tableDeletionEntry, dataRegion.getDatabaseName()); + dataRegion.deleteByTable(relationalDeleteDataNode); + + List sequenceFileList = dataRegion.getSequenceFileList(); + assertEquals(1, sequenceFileList.size()); + ModIterator modEntryIterator = sequenceFileList.get(0).getModEntryIterator(); + ModEntry next = modEntryIterator.next(); + // the table2 modification should be rewritten to table1 + assertEquals("table1", ((TableDeletionEntry) next).getTableName()); + next = modEntryIterator.next(); + // the s3 modification should be rewritten to s1 + assertEquals( + Collections.singletonList("s1"), + ((TableDeletionEntry) next).getPredicate().getMeasurementNames()); + next = modEntryIterator.next(); + // the table1 modification should be skipped + // the s1 modification should be rewritten to empty + assertEquals( + Collections.singletonList(""), + ((TableDeletionEntry) next).getPredicate().getMeasurementNames()); + assertFalse(modEntryIterator.hasNext()); + } + + @Test + public void testSchemaEvolutionWithFullDeletion() throws WriteProcessException, IOException { + String[] measurements = {"tag1", "s1", "s2"}; + MeasurementSchema[] measurementSchemas = { + new MeasurementSchema("tag1", TSDataType.STRING), + new MeasurementSchema("s1", TSDataType.INT64), + new MeasurementSchema("s2", TSDataType.DOUBLE) + }; + RelationalInsertRowNode insertRowNode = + new RelationalInsertRowNode( + new PlanNodeId(""), + new PartialPath(new String[] {"table1"}), + true, + measurements, + new TSDataType[] {TSDataType.STRING, TSDataType.INT64, TSDataType.DOUBLE}, + measurementSchemas, + 10, + new Object[] {new Binary("tag1".getBytes(StandardCharsets.UTF_8)), 1L, 1.0}, + false, + new TsTableColumnCategory[] { + TsTableColumnCategory.TAG, TsTableColumnCategory.FIELD, TsTableColumnCategory.FIELD + }); + dataRegion.insert(insertRowNode); + insertRowNode.setTime(20); + dataRegion.insert(insertRowNode); + + // table1 -> table2 + dataRegion.applySchemaEvolution(Collections.singletonList(new TableRename("table1", "table2"))); + // s1 -> s3 + dataRegion.applySchemaEvolution( + Collections.singletonList(new ColumnRename("table2", "s1", "s3", null))); + + // delete with table1 + TableDeletionEntry tableDeletionEntry = + new TableDeletionEntry(new DeletionPredicate("table1"), new TimeRange(0, 30)); + RelationalDeleteDataNode relationalDeleteDataNode = + new RelationalDeleteDataNode( + new PlanNodeId(""), tableDeletionEntry, dataRegion.getDatabaseName()); + dataRegion.deleteByTable(relationalDeleteDataNode); + // nothing should be deleted + List sequenceFileList = dataRegion.getSequenceFileList(); + assertEquals(1, sequenceFileList.size()); + ModIterator modEntryIterator = sequenceFileList.get(0).getModEntryIterator(); + assertFalse(modEntryIterator.hasNext()); + + // delete with table2 + tableDeletionEntry = + new TableDeletionEntry(new DeletionPredicate("table2"), new TimeRange(0, 30)); + relationalDeleteDataNode = + new RelationalDeleteDataNode( + new PlanNodeId(""), tableDeletionEntry, dataRegion.getDatabaseName()); + dataRegion.deleteByTable(relationalDeleteDataNode); + // the file should be deleted + sequenceFileList = dataRegion.getSequenceFileList(); + assertEquals(0, sequenceFileList.size()); + } } diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/AbstractCompactionTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/AbstractCompactionTest.java index a5503bb9e645c..7a90f6b16d61a 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/AbstractCompactionTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/AbstractCompactionTest.java @@ -850,7 +850,8 @@ protected List getPaths(List resources) Pair iDeviceIDBooleanPair = deviceIterator.nextDevice(); IDeviceID deviceID = iDeviceIDBooleanPair.getLeft(); boolean isAlign = iDeviceIDBooleanPair.getRight(); - Map schemaMap = deviceIterator.getAllSchemasOfCurrentDevice(); + Map schemaMap = + deviceIterator.getAllSchemasOfCurrentDevice(new Pair<>(Long.MIN_VALUE, null)); IMeasurementSchema timeSchema = schemaMap.remove(TsFileConstant.TIME_COLUMN_ID); List measurementSchemas = new ArrayList<>(schemaMap.values()); if (measurementSchemas.isEmpty()) { diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/BatchedCompactionWithTsFileSplitterTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/BatchedCompactionWithTsFileSplitterTest.java index 272d9e6ae5ca7..b64e67a572546 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/BatchedCompactionWithTsFileSplitterTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/BatchedCompactionWithTsFileSplitterTest.java @@ -301,7 +301,8 @@ private void consumeChunkDataAndValidate(TsFileResource resource) throw new RuntimeException(e); } return true; - }); + }, + null); splitter.splitTsFileByDataPartition(); List splitResources = new ArrayList<>(); for (Map.Entry entry : writerMap.entrySet()) { diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionWithSevoTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionWithSevoTest.java new file mode 100644 index 0000000000000..f68e1afb77498 --- /dev/null +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionWithSevoTest.java @@ -0,0 +1,759 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.compaction; + +import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.ICompactionPerformer; +import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.impl.FastCompactionPerformer; +import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.impl.ReadChunkCompactionPerformer; +import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.impl.ReadPointCompactionPerformer; +import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.task.CompactionTaskSummary; +import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.task.subtask.FastCompactionTaskSummary; +import org.apache.iotdb.db.storageengine.dataregion.compaction.utils.CompactionFileGeneratorUtils; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.ColumnRename; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.TableRename; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.fileset.TsFileSet; +import org.apache.iotdb.db.utils.EncryptDBUtils; +import org.apache.iotdb.db.utils.constant.TestConstant; + +import org.apache.tsfile.enums.ColumnCategory; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.exception.write.NoMeasurementException; +import org.apache.tsfile.exception.write.NoTableException; +import org.apache.tsfile.file.metadata.ColumnSchemaBuilder; +import org.apache.tsfile.file.metadata.IDeviceID.Factory; +import org.apache.tsfile.file.metadata.TableSchema; +import org.apache.tsfile.read.query.dataset.ResultSet; +import org.apache.tsfile.read.v4.ITsFileReader; +import org.apache.tsfile.read.v4.TsFileReaderBuilder; +import org.apache.tsfile.write.TsFileWriter; +import org.apache.tsfile.write.record.Tablet; +import org.junit.Test; + +import java.io.File; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public class CompactionWithSevoTest extends AbstractCompactionTest { + + @Test + public void testReadChunkCompactionPerformer() throws Exception { + testInner( + targets -> + new ReadChunkCompactionPerformer( + seqResources, targets, EncryptDBUtils.getDefaultFirstEncryptParam()), + CompactionTaskSummary::new); + } + + @Test + public void testReadPointCompactionPerformerSeq() throws Exception { + testInner( + targets -> new ReadPointCompactionPerformer(seqResources, Collections.emptyList(), targets), + CompactionTaskSummary::new); + } + + @Test + public void testReadPointCompactionPerformerUnseq() throws Exception { + testInner( + targets -> new ReadPointCompactionPerformer(Collections.emptyList(), seqResources, targets), + CompactionTaskSummary::new); + } + + @Test + public void testReadPointCompactionPerformerCross() throws Exception { + testCross( + targets -> new ReadPointCompactionPerformer(seqResources, unseqResources, targets), + CompactionTaskSummary::new); + } + + @Test + public void testFastCompactionPerformerSeq() throws Exception { + testInner( + targets -> + new FastCompactionPerformer( + seqResources, + Collections.emptyList(), + targets, + EncryptDBUtils.getDefaultFirstEncryptParam()), + FastCompactionTaskSummary::new); + } + + @Test + public void testFastCompactionPerformerUnseq() throws Exception { + testInner( + targets -> + new FastCompactionPerformer( + Collections.emptyList(), + seqResources, + targets, + EncryptDBUtils.getDefaultFirstEncryptParam()), + FastCompactionTaskSummary::new); + } + + @Test + public void testFastCompactionPerformerCross() throws Exception { + testCross( + targets -> + new FastCompactionPerformer( + seqResources, + unseqResources, + targets, + EncryptDBUtils.getDefaultFirstEncryptParam()), + FastCompactionTaskSummary::new); + } + + private void genSourceFiles() throws Exception { + String fileSetDir = + TestConstant.BASE_OUTPUT_PATH + File.separator + TsFileSet.FILE_SET_DIR_NAME; + // seq-file1: + // table1[s1, s2, s3] + // table2[s1, s2, s3] + File seqf1 = new File(SEQ_DIRS, "0-1-0-0.tsfile"); + TableSchema tableSchema1_1 = + new TableSchema( + "table1", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s1") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + TableSchema tableSchema1_2 = + new TableSchema( + "table2", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s1") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + try (TsFileWriter tsFileWriter = new TsFileWriter(seqf1)) { + tsFileWriter.registerTableSchema(tableSchema1_1); + tsFileWriter.registerTableSchema(tableSchema1_2); + + Tablet tablet1 = new Tablet(tableSchema1_1.getTableName(), tableSchema1_1.getColumnSchemas()); + tablet1.addTimestamp(0, 0); + tablet1.addValue(0, 0, 1); + tablet1.addValue(0, 1, 2); + tablet1.addValue(0, 2, 3); + + Tablet tablet2 = new Tablet(tableSchema1_2.getTableName(), tableSchema1_2.getColumnSchemas()); + tablet2.addTimestamp(0, 0); + tablet2.addValue(0, 0, 101); + tablet2.addValue(0, 1, 102); + tablet2.addValue(0, 2, 103); + + tsFileWriter.writeTable(tablet1); + tsFileWriter.writeTable(tablet2); + } + TsFileResource resource1 = new TsFileResource(seqf1); + resource1.setTsFileManager(tsFileManager); + resource1.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table1"}), 0); + resource1.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table1"}), 0); + resource1.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 0); + resource1.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 0); + resource1.close(); + + // rename table1 -> table0 + TsFileSet tsFileSet1 = new TsFileSet(1, fileSetDir, false); + tsFileSet1.appendSchemaEvolution( + Collections.singletonList(new TableRename("table1", "table0"))); + tsFileManager.addTsFileSet(tsFileSet1, 0); + + // seq-file2: + // table0[s1, s2, s3] + // table2[s1, s2, s3] + File seqf2 = new File(SEQ_DIRS, "0-2-0-0.tsfile"); + TableSchema tableSchema2_1 = + new TableSchema( + "table0", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s1") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + TableSchema tableSchema2_2 = + new TableSchema( + "table2", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s1") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + try (TsFileWriter tsFileWriter = new TsFileWriter(seqf2)) { + tsFileWriter.registerTableSchema(tableSchema2_1); + tsFileWriter.registerTableSchema(tableSchema2_2); + + Tablet tablet1 = new Tablet(tableSchema2_1.getTableName(), tableSchema2_1.getColumnSchemas()); + tablet1.addTimestamp(0, 1); + tablet1.addValue(0, 0, 11); + tablet1.addValue(0, 1, 12); + tablet1.addValue(0, 2, 13); + + Tablet tablet2 = new Tablet(tableSchema2_2.getTableName(), tableSchema2_2.getColumnSchemas()); + tablet2.addTimestamp(0, 1); + tablet2.addValue(0, 0, 111); + tablet2.addValue(0, 1, 112); + tablet2.addValue(0, 2, 113); + + tsFileWriter.writeTable(tablet1); + tsFileWriter.writeTable(tablet2); + } + TsFileResource resource2 = new TsFileResource(seqf2); + resource2.setTsFileManager(tsFileManager); + resource2.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table0"}), 1); + resource2.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table0"}), 1); + resource2.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 1); + resource2.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 1); + resource2.close(); + + // rename table0.s1 -> table0.s0 + TsFileSet tsFileSet2 = new TsFileSet(2, fileSetDir, false); + tsFileSet2.appendSchemaEvolution( + Collections.singletonList(new ColumnRename("table0", "s1", "s0"))); + tsFileManager.addTsFileSet(tsFileSet2, 0); + + // seq-file3: + // table0[s0, s2, s3] + // table2[s1, s2, s3] + File seqf3 = new File(SEQ_DIRS, "0-3-0-0.tsfile"); + TableSchema tableSchema3_1 = + new TableSchema( + "table0", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s0") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + TableSchema tableSchema3_2 = + new TableSchema( + "table2", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s1") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + try (TsFileWriter tsFileWriter = new TsFileWriter(seqf3)) { + tsFileWriter.registerTableSchema(tableSchema3_1); + tsFileWriter.registerTableSchema(tableSchema3_2); + + Tablet tablet1 = new Tablet(tableSchema3_1.getTableName(), tableSchema3_1.getColumnSchemas()); + tablet1.addTimestamp(0, 2); + tablet1.addValue(0, 0, 21); + tablet1.addValue(0, 1, 22); + tablet1.addValue(0, 2, 23); + + Tablet tablet2 = new Tablet(tableSchema3_2.getTableName(), tableSchema3_2.getColumnSchemas()); + tablet2.addTimestamp(0, 2); + tablet2.addValue(0, 0, 121); + tablet2.addValue(0, 1, 122); + tablet2.addValue(0, 2, 123); + + tsFileWriter.writeTable(tablet1); + tsFileWriter.writeTable(tablet2); + } + TsFileResource resource3 = new TsFileResource(seqf3); + resource3.setTsFileManager(tsFileManager); + resource3.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table0"}), 2); + resource3.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table0"}), 2); + resource3.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 2); + resource3.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 2); + resource3.close(); + + // rename table2 -> table1 + TsFileSet tsFileSet3 = new TsFileSet(3, fileSetDir, false); + tsFileSet3.appendSchemaEvolution( + Collections.singletonList(new TableRename("table2", "table1"))); + tsFileManager.addTsFileSet(tsFileSet3, 0); + + seqResources.add(resource1); + seqResources.add(resource2); + seqResources.add(resource3); + + // unseq-file4: + // table0[s0, s2, s3] + // table1[s1, s2, s3] + File unseqf4 = new File(UNSEQ_DIRS, "0-4-0-0.tsfile"); + TableSchema tableSchema4_1 = + new TableSchema( + "table0", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s0") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + TableSchema tableSchema4_2 = + new TableSchema( + "table1", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s1") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + try (TsFileWriter tsFileWriter = new TsFileWriter(unseqf4)) { + tsFileWriter.registerTableSchema(tableSchema4_1); + tsFileWriter.registerTableSchema(tableSchema4_2); + + Tablet tablet1 = new Tablet(tableSchema4_1.getTableName(), tableSchema4_1.getColumnSchemas()); + tablet1.addTimestamp(0, 1); + tablet1.addValue(0, 0, 1011); + tablet1.addValue(0, 1, 1012); + tablet1.addValue(0, 2, 1013); + + Tablet tablet2 = new Tablet(tableSchema4_2.getTableName(), tableSchema4_2.getColumnSchemas()); + tablet2.addTimestamp(0, 1); + tablet2.addValue(0, 0, 1111); + tablet2.addValue(0, 1, 1112); + tablet2.addValue(0, 2, 1113); + + tsFileWriter.writeTable(tablet1); + tsFileWriter.writeTable(tablet2); + } + TsFileResource resource4 = new TsFileResource(unseqf4); + resource4.setTsFileManager(tsFileManager); + resource4.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table0"}), 1); + resource4.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table0"}), 1); + resource4.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table1"}), 1); + resource4.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table1"}), 1); + resource4.close(); + unseqResources.add(resource4); + } + + private void testCross( + Function, ICompactionPerformer> compactionPerformerFunction, + Supplier summarySupplier) + throws Exception { + genSourceFiles(); + List targetResources; + ICompactionPerformer performer; + + targetResources = + CompactionFileGeneratorUtils.getCrossCompactionTargetTsFileResources(seqResources); + targetResources.forEach(s -> s.setTsFileManager(tsFileManager)); + + performer = compactionPerformerFunction.apply(targetResources); + performer.setSummary(summarySupplier.get()); + performer.perform(); + + // target(version=1): + // table1[s1, s2, s3] + // table2[s1, s2, s3] + try (ITsFileReader tsFileReader = + new TsFileReaderBuilder().file(targetResources.get(0).getTsFile()).build()) { + // table0 should not exist + try { + tsFileReader.query( + "table0", Collections.singletonList("s2"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table0 should not exist"); + } catch (NoTableException e) { + assertEquals("Table table0 not found", e.getMessage()); + } + + // table1.s0 should not exist + try { + tsFileReader.query( + "table1", Collections.singletonList("s0"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table1.s0 should not exist"); + } catch (NoMeasurementException e) { + assertEquals("No measurement for s0", e.getMessage()); + } + + // check data of table1 + ResultSet resultSet = + tsFileReader.query( + "table1", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + assertTrue(resultSet.next()); + assertEquals(0, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(j + 1, resultSet.getLong(j + 2)); + } + + // check data of table2 + resultSet = + tsFileReader.query( + "table2", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + assertTrue(resultSet.next()); + assertEquals(0, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(100 + j + 1, resultSet.getLong(j + 2)); + } + } + + // target(version=2): + // table0[s1, s2, s3] + // table2[s1, s2, s3] + try (ITsFileReader tsFileReader = + new TsFileReaderBuilder().file(targetResources.get(1).getTsFile()).build()) { + // table1 should not exist + try { + tsFileReader.query( + "table1", Collections.singletonList("s2"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table1 should not exist"); + } catch (NoTableException e) { + assertEquals("Table table1 not found", e.getMessage()); + } + + // table0.s0 should not exist + try { + tsFileReader.query( + "table0", Collections.singletonList("s0"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table0.s0 should not exist"); + } catch (NoMeasurementException e) { + assertEquals("No measurement for s0", e.getMessage()); + } + + // check data of table0 + ResultSet resultSet = + tsFileReader.query( + "table0", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + assertTrue(resultSet.next()); + assertEquals(1, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(1010 + j + 1, resultSet.getLong(j + 2)); + } + + // check data of table2 + resultSet = + tsFileReader.query( + "table2", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + assertTrue(resultSet.next()); + assertEquals(1, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(1110 + j + 1, resultSet.getLong(j + 2)); + } + } + + // target(version=2): + // table0[s0, s2, s3] + // table2[s1, s2, s3] + try (ITsFileReader tsFileReader = + new TsFileReaderBuilder().file(targetResources.get(2).getTsFile()).build()) { + // table1 should not exist + try { + tsFileReader.query( + "table1", Collections.singletonList("s2"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table1 should not exist"); + } catch (NoTableException e) { + assertEquals("Table table1 not found", e.getMessage()); + } + + // table0.s1 should not exist + try { + tsFileReader.query( + "table0", Collections.singletonList("s1"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table0.s0 should not exist"); + } catch (NoMeasurementException e) { + assertEquals("No measurement for s1", e.getMessage()); + } + + // check data of table0 + ResultSet resultSet = + tsFileReader.query( + "table0", Arrays.asList("s0", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + assertTrue(resultSet.next()); + assertEquals(2, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(20 + j + 1, resultSet.getLong(j + 2)); + } + + // check data of table2 + resultSet = + tsFileReader.query( + "table2", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + assertTrue(resultSet.next()); + assertEquals(2, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(100 + 20 + j + 1, resultSet.getLong(j + 2)); + } + } + } + + private void testInner( + Function, ICompactionPerformer> compactionPerformerFunction, + Supplier summarySupplier) + throws Exception { + genSourceFiles(); + List targetResources; + ICompactionPerformer performer; + + // target(version=1): + // table1[s1, s2, s3] + // table2[s1, s2, s3] + targetResources = + CompactionFileGeneratorUtils.getInnerCompactionTargetTsFileResources(seqResources, true); + targetResources.forEach(s -> s.setTsFileManager(tsFileManager)); + + performer = compactionPerformerFunction.apply(targetResources); + performer.setSummary(summarySupplier.get()); + performer.perform(); + + try (ITsFileReader tsFileReader = + new TsFileReaderBuilder().file(targetResources.get(0).getTsFile()).build()) { + // table0 should not exist + try { + tsFileReader.query( + "table0", Collections.singletonList("s2"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table0 should not exist"); + } catch (NoTableException e) { + assertEquals("Table table0 not found", e.getMessage()); + } + + // table1.s0 should not exist + try { + tsFileReader.query( + "table1", Collections.singletonList("s0"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table1.s0 should not exist"); + } catch (NoMeasurementException e) { + assertEquals("No measurement for s0", e.getMessage()); + } + + // check data of table1 + ResultSet resultSet = + tsFileReader.query( + "table1", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + for (int i = 0; i < 3; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(i * 10 + j + 1, resultSet.getLong(j + 2)); + } + } + + // check data of table2 + resultSet = + tsFileReader.query( + "table2", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + for (int i = 0; i < 3; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(100 + i * 10 + j + 1, resultSet.getLong(j + 2)); + } + } + } + + // target(version=2): + // table0[s1, s2, s3] + // table2[s1, s2, s3] + targetResources = + CompactionFileGeneratorUtils.getInnerCompactionTargetTsFileResources( + seqResources.subList(1, seqResources.size()), true); + targetResources.forEach(s -> s.setTsFileManager(tsFileManager)); + + performer = compactionPerformerFunction.apply(targetResources); + performer.setSummary(summarySupplier.get()); + performer.perform(); + + try (ITsFileReader tsFileReader = + new TsFileReaderBuilder().file(targetResources.get(0).getTsFile()).build()) { + // table1 should not exist + try { + tsFileReader.query( + "table1", Collections.singletonList("s2"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table1 should not exist"); + } catch (NoTableException e) { + assertEquals("Table table1 not found", e.getMessage()); + } + + // table0.s0 should not exist + try { + tsFileReader.query( + "table0", Collections.singletonList("s0"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table0.s0 should not exist"); + } catch (NoMeasurementException e) { + assertEquals("No measurement for s0", e.getMessage()); + } + + // check data of table0 + ResultSet resultSet = + tsFileReader.query( + "table0", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + for (int i = 0; i < 3; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(i * 10 + j + 1, resultSet.getLong(j + 2)); + } + } + + // check data of table2 + resultSet = + tsFileReader.query( + "table2", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + for (int i = 0; i < 3; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(100 + i * 10 + j + 1, resultSet.getLong(j + 2)); + } + } + } + + // target(version=2): + // table0[s0, s2, s3] + // table2[s1, s2, s3] + targetResources = + CompactionFileGeneratorUtils.getInnerCompactionTargetTsFileResources( + seqResources.subList(2, seqResources.size()), true); + targetResources.forEach(s -> s.setTsFileManager(tsFileManager)); + + performer = compactionPerformerFunction.apply(targetResources); + performer.setSummary(summarySupplier.get()); + performer.perform(); + + try (ITsFileReader tsFileReader = + new TsFileReaderBuilder().file(targetResources.get(0).getTsFile()).build()) { + // table1 should not exist + try { + tsFileReader.query( + "table1", Collections.singletonList("s2"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table1 should not exist"); + } catch (NoTableException e) { + assertEquals("Table table1 not found", e.getMessage()); + } + + // table0.s1 should not exist + try { + tsFileReader.query( + "table0", Collections.singletonList("s1"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table0.s0 should not exist"); + } catch (NoMeasurementException e) { + assertEquals("No measurement for s1", e.getMessage()); + } + + // check data of table0 + ResultSet resultSet = + tsFileReader.query( + "table0", Arrays.asList("s0", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + for (int i = 0; i < 3; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(i * 10 + j + 1, resultSet.getLong(j + 2)); + } + } + + // check data of table2 + resultSet = + tsFileReader.query( + "table2", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + for (int i = 0; i < 3; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(100 + i * 10 + j + 1, resultSet.getLong(j + 2)); + } + } + } + } +} diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/ReadPointCompactionPerformerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/ReadPointCompactionPerformerTest.java index f21571ce4f87d..b74bcec8f964c 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/ReadPointCompactionPerformerTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/ReadPointCompactionPerformerTest.java @@ -37,18 +37,32 @@ import org.apache.iotdb.db.storageengine.dataregion.read.control.FileReaderManager; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResourceStatus; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.ColumnRename; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.TableRename; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.fileset.TsFileSet; import org.apache.iotdb.db.utils.EnvironmentUtils; +import org.apache.iotdb.db.utils.constant.TestConstant; import org.apache.tsfile.common.conf.TSFileDescriptor; +import org.apache.tsfile.enums.ColumnCategory; import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.exception.write.NoMeasurementException; +import org.apache.tsfile.exception.write.NoTableException; import org.apache.tsfile.exception.write.WriteProcessException; +import org.apache.tsfile.file.metadata.ColumnSchemaBuilder; import org.apache.tsfile.file.metadata.IDeviceID; import org.apache.tsfile.file.metadata.IDeviceID.Factory; +import org.apache.tsfile.file.metadata.TableSchema; import org.apache.tsfile.read.common.IBatchDataIterator; import org.apache.tsfile.read.common.block.TsBlock; +import org.apache.tsfile.read.query.dataset.ResultSet; +import org.apache.tsfile.read.v4.ITsFileReader; +import org.apache.tsfile.read.v4.TsFileReaderBuilder; import org.apache.tsfile.utils.Pair; import org.apache.tsfile.utils.TsFileGeneratorUtils; import org.apache.tsfile.utils.TsPrimitiveType; +import org.apache.tsfile.write.TsFileWriter; +import org.apache.tsfile.write.record.Tablet; import org.apache.tsfile.write.schema.IMeasurementSchema; import org.apache.tsfile.write.schema.MeasurementSchema; import org.junit.After; @@ -56,8 +70,10 @@ import org.junit.Before; import org.junit.Test; +import java.io.File; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -66,6 +82,8 @@ import static org.apache.iotdb.commons.conf.IoTDBConstant.PATH_SEPARATOR; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; @SuppressWarnings("OptionalGetWithoutIsPresent") public class ReadPointCompactionPerformerTest extends AbstractCompactionTest { @@ -6993,4 +7011,296 @@ public void testCrossSpaceCompactionWithDeviceMaxTimeLaterInUnseqFile() Assert.fail(); } } + + @Test + public void testWithSevoFile() throws Exception { + String fileSetDir = + TestConstant.BASE_OUTPUT_PATH + File.separator + TsFileSet.FILE_SET_DIR_NAME; + // file1: + // table1[s1, s2, s3] + // table2[s1, s2, s3] + File f1 = new File(SEQ_DIRS, "0-1-0-0.tsfile"); + TableSchema tableSchema1_1 = + new TableSchema( + "table1", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s1") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + TableSchema tableSchema1_2 = + new TableSchema( + "table2", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s1") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + try (TsFileWriter tsFileWriter = new TsFileWriter(f1)) { + tsFileWriter.registerTableSchema(tableSchema1_1); + tsFileWriter.registerTableSchema(tableSchema1_2); + + Tablet tablet1 = new Tablet(tableSchema1_1.getTableName(), tableSchema1_1.getColumnSchemas()); + tablet1.addTimestamp(0, 0); + tablet1.addValue(0, 0, 1); + tablet1.addValue(0, 1, 2); + tablet1.addValue(0, 2, 3); + + Tablet tablet2 = new Tablet(tableSchema1_2.getTableName(), tableSchema1_2.getColumnSchemas()); + tablet2.addTimestamp(0, 0); + tablet2.addValue(0, 0, 101); + tablet2.addValue(0, 1, 102); + tablet2.addValue(0, 2, 103); + + tsFileWriter.writeTable(tablet1); + tsFileWriter.writeTable(tablet2); + } + TsFileResource resource1 = new TsFileResource(f1); + resource1.setTsFileManager(tsFileManager); + resource1.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table1"}), 0); + resource1.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table1"}), 0); + resource1.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 0); + resource1.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 0); + resource1.close(); + + // rename table1 -> table0 + TsFileSet tsFileSet1 = new TsFileSet(1, fileSetDir, false); + tsFileSet1.appendSchemaEvolution( + Collections.singletonList(new TableRename("table1", "table0"))); + tsFileManager.addTsFileSet(tsFileSet1, 0); + + // file2: + // table0[s1, s2, s3] + // table2[s1, s2, s3] + File f2 = new File(SEQ_DIRS, "0-2-0-0.tsfile"); + TableSchema tableSchema2_1 = + new TableSchema( + "table0", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s1") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + TableSchema tableSchema2_2 = + new TableSchema( + "table2", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s1") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + try (TsFileWriter tsFileWriter = new TsFileWriter(f2)) { + tsFileWriter.registerTableSchema(tableSchema2_1); + tsFileWriter.registerTableSchema(tableSchema2_2); + + Tablet tablet1 = new Tablet(tableSchema2_1.getTableName(), tableSchema2_1.getColumnSchemas()); + tablet1.addTimestamp(0, 1); + tablet1.addValue(0, 0, 11); + tablet1.addValue(0, 1, 12); + tablet1.addValue(0, 2, 13); + + Tablet tablet2 = new Tablet(tableSchema2_2.getTableName(), tableSchema2_2.getColumnSchemas()); + tablet2.addTimestamp(0, 1); + tablet2.addValue(0, 0, 111); + tablet2.addValue(0, 1, 112); + tablet2.addValue(0, 2, 113); + + tsFileWriter.writeTable(tablet1); + tsFileWriter.writeTable(tablet2); + } + TsFileResource resource2 = new TsFileResource(f2); + resource2.setTsFileManager(tsFileManager); + resource2.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table0"}), 1); + resource2.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table0"}), 1); + resource2.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 1); + resource2.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 1); + resource2.close(); + + // rename table0.s1 -> table0.s0 + TsFileSet tsFileSet2 = new TsFileSet(2, fileSetDir, false); + tsFileSet2.appendSchemaEvolution( + Collections.singletonList(new ColumnRename("table0", "s1", "s0"))); + tsFileManager.addTsFileSet(tsFileSet2, 0); + + // file3: + // table0[s0, s2, s3] + // table2[s1, s2, s3] + File f3 = new File(SEQ_DIRS, "0-3-0-0.tsfile"); + TableSchema tableSchema3_1 = + new TableSchema( + "table0", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s0") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + TableSchema tableSchema3_2 = + new TableSchema( + "table2", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s1") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + try (TsFileWriter tsFileWriter = new TsFileWriter(f3)) { + tsFileWriter.registerTableSchema(tableSchema3_1); + tsFileWriter.registerTableSchema(tableSchema3_2); + + Tablet tablet1 = new Tablet(tableSchema3_1.getTableName(), tableSchema3_1.getColumnSchemas()); + tablet1.addTimestamp(0, 2); + tablet1.addValue(0, 0, 21); + tablet1.addValue(0, 1, 22); + tablet1.addValue(0, 2, 23); + + Tablet tablet2 = new Tablet(tableSchema3_2.getTableName(), tableSchema3_2.getColumnSchemas()); + tablet2.addTimestamp(0, 2); + tablet2.addValue(0, 0, 121); + tablet2.addValue(0, 1, 122); + tablet2.addValue(0, 2, 123); + + tsFileWriter.writeTable(tablet1); + tsFileWriter.writeTable(tablet2); + } + TsFileResource resource3 = new TsFileResource(f3); + resource3.setTsFileManager(tsFileManager); + resource3.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table0"}), 2); + resource3.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table0"}), 2); + resource3.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 2); + resource3.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 2); + resource3.close(); + + // rename table2 -> table1 + TsFileSet tsFileSet3 = new TsFileSet(3, fileSetDir, false); + tsFileSet3.appendSchemaEvolution( + Collections.singletonList(new TableRename("table2", "table1"))); + tsFileManager.addTsFileSet(tsFileSet3, 0); + + // perform compaction + seqResources.add(resource1); + seqResources.add(resource2); + seqResources.add(resource3); + + List targetResources = + CompactionFileGeneratorUtils.getInnerCompactionTargetTsFileResources(seqResources, true); + targetResources.forEach(s -> s.setTsFileManager(tsFileManager)); + + ICompactionPerformer performer = + new ReadPointCompactionPerformer(seqResources, unseqResources, targetResources); + performer.setSummary(new CompactionTaskSummary()); + performer.perform(); + + // target(version=1): + // table1[s1, s2, s3] + // table2[s1, s2, s3] + try (ITsFileReader tsFileReader = + new TsFileReaderBuilder().file(targetResources.get(0).getTsFile()).build()) { + // table1 should not exist + try { + tsFileReader.query( + "table0", Collections.singletonList("s2"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table0 should not exist"); + } catch (NoTableException e) { + assertEquals("Table table0 not found", e.getMessage()); + } + + // table1.s0 should not exist + try { + tsFileReader.query( + "table1", Collections.singletonList("s0"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table1.s0 should not exist"); + } catch (NoMeasurementException e) { + assertEquals("No measurement for s0", e.getMessage()); + } + + // check data of table1 + ResultSet resultSet = + tsFileReader.query( + "table1", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + for (int i = 0; i < 3; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(i * 10 + j + 1, resultSet.getLong(j + 2)); + } + } + + // check data of table2 + resultSet = + tsFileReader.query( + "table2", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + for (int i = 0; i < 3; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(100 + i * 10 + j + 1, resultSet.getLong(j + 2)); + } + } + } + } } diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/tablemodel/CompactionWithAllNullRowsTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/tablemodel/CompactionWithAllNullRowsTest.java index 9ff8a401150b8..69e83e769a3ce 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/tablemodel/CompactionWithAllNullRowsTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/tablemodel/CompactionWithAllNullRowsTest.java @@ -35,9 +35,9 @@ import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.impl.ReadPointCompactionPerformer; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.task.InnerSpaceCompactionTask; import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.FullExactMatch; import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.FullExactMatch; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import org.apache.tsfile.exception.write.WriteProcessException; @@ -333,7 +333,7 @@ public void testCompactionWithAllValueColumnDeletion() throws IOException, Illeg new TableDeletionEntry( new DeletionPredicate( "t1", - new IDPredicate.FullExactMatch(deviceID), + new TagPredicate.FullExactMatch(deviceID), Collections.singletonList("s0")), new TimeRange(Long.MIN_VALUE, 11))); resource1 @@ -342,7 +342,7 @@ public void testCompactionWithAllValueColumnDeletion() throws IOException, Illeg new TableDeletionEntry( new DeletionPredicate( "t1", - new IDPredicate.FullExactMatch(deviceID), + new TagPredicate.FullExactMatch(deviceID), Collections.singletonList("s1")), new TimeRange(Long.MIN_VALUE, 11))); resource1 @@ -351,7 +351,7 @@ public void testCompactionWithAllValueColumnDeletion() throws IOException, Illeg new TableDeletionEntry( new DeletionPredicate( "t1", - new IDPredicate.FullExactMatch(deviceID), + new TagPredicate.FullExactMatch(deviceID), Collections.singletonList("s2")), new TimeRange(Long.MIN_VALUE, 11))); resource1 @@ -360,7 +360,7 @@ public void testCompactionWithAllValueColumnDeletion() throws IOException, Illeg new TableDeletionEntry( new DeletionPredicate( "t1", - new IDPredicate.FullExactMatch(deviceID), + new TagPredicate.FullExactMatch(deviceID), Collections.singletonList("s3")), new TimeRange(Long.MIN_VALUE, 11))); resource1.getModFileForWrite().close(); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/utils/CompactionCheckerUtils.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/utils/CompactionCheckerUtils.java index 6c3e692212a85..8aca64683447f 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/utils/CompactionCheckerUtils.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/utils/CompactionCheckerUtils.java @@ -538,7 +538,8 @@ public static List getAllPathsOfResources(List resour Pair iDeviceIDBooleanPair = deviceIterator.nextDevice(); IDeviceID deviceID = iDeviceIDBooleanPair.getLeft(); boolean isAlign = iDeviceIDBooleanPair.getRight(); - Map schemaMap = deviceIterator.getAllSchemasOfCurrentDevice(); + Map schemaMap = + deviceIterator.getAllSchemasOfCurrentDevice(new Pair<>(Long.MIN_VALUE, null)); IMeasurementSchema timeSchema = schemaMap.remove(TsFileConstant.TIME_COLUMN_ID); List measurementSchemas = new ArrayList<>(schemaMap.values()); if (measurementSchemas.isEmpty()) { diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/modification/ModificationFileTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/modification/ModificationFileTest.java index a0a9885ecf08e..29609cc9c2740 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/modification/ModificationFileTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/modification/ModificationFileTest.java @@ -22,9 +22,9 @@ import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.path.MeasurementPath; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.recover.CompactionRecoverManager; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.FullExactMatch; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.NOP; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.SegmentExactMatch; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.FullExactMatch; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.NOP; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.SegmentExactMatch; import org.apache.iotdb.db.utils.constant.TestConstant; import org.apache.tsfile.file.metadata.IDeviceID.Factory; diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/modification/TableDeletionEntryTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/modification/TableDeletionEntryTest.java index 5c2979a90755c..ee9a4dfa405e4 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/modification/TableDeletionEntryTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/modification/TableDeletionEntryTest.java @@ -18,10 +18,10 @@ */ package org.apache.iotdb.db.storageengine.dataregion.modification; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.And; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.FullExactMatch; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.NOP; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.SegmentExactMatch; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.And; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.FullExactMatch; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.NOP; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.SegmentExactMatch; import org.apache.tsfile.file.metadata.IDeviceID; import org.apache.tsfile.file.metadata.IDeviceID.Factory; diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/EvolvedSchemaTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/EvolvedSchemaTest.java new file mode 100644 index 0000000000000..dbbdc9c046c5b --- /dev/null +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/EvolvedSchemaTest.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution; + +import org.apache.tsfile.enums.TSDataType; +import org.junit.Test; + +import java.util.Arrays; +import java.util.List; + +import static org.junit.Assert.assertEquals; + +public class EvolvedSchemaTest { + + @Test + public void testMerge() { + // t1 -> t2, t2.s1 -> t2.s2, t3 -> t1 + List schemaEvolutionList = + Arrays.asList( + new TableRename("t1", "t2"), + new ColumnRename("t2", "s1", "s2", TSDataType.INT32), + new TableRename("t3", "t1")); + EvolvedSchema oldSchema = new EvolvedSchema(); + EvolvedSchema allSchema = new EvolvedSchema(); + schemaEvolutionList.forEach(schemaEvolution -> schemaEvolution.applyTo(oldSchema)); + schemaEvolutionList.forEach(schemaEvolution -> schemaEvolution.applyTo(allSchema)); + + // t1 -> t2 -> t3, t2.s1 -> t2.s2 -> t3.s1, t3 -> t1 -> t2 + schemaEvolutionList = + Arrays.asList( + new TableRename("t2", "t3"), + new ColumnRename("t3", "s2", "s1", TSDataType.INT32), + new TableRename("t1", "t2")); + EvolvedSchema newSchema = new EvolvedSchema(); + schemaEvolutionList.forEach(schemaEvolution -> schemaEvolution.applyTo(newSchema)); + schemaEvolutionList.forEach(schemaEvolution -> schemaEvolution.applyTo(allSchema)); + + EvolvedSchema mergedShema = EvolvedSchema.merge(oldSchema, newSchema); + + assertEquals(allSchema, mergedShema); + } + + @Test + public void testCovert() { + // t1 -> t2, t2.s1 -> t2.s2, t3 -> t1 + List schemaEvolutionList = + Arrays.asList( + new TableRename("t1", "t2"), + new ColumnRename("t2", "s1", "s2", TSDataType.INT32), + new TableRename("t3", "t1")); + EvolvedSchema oldSchema = new EvolvedSchema(); + schemaEvolutionList.forEach(schemaEvolution -> schemaEvolution.applyTo(oldSchema)); + + List convertedSchemaEvolutions = oldSchema.toSchemaEvolutions(); + EvolvedSchema newSchema = new EvolvedSchema(); + convertedSchemaEvolutions.forEach(schemaEvolution -> schemaEvolution.applyTo(newSchema)); + + assertEquals(oldSchema, newSchema); + } + + @Test + public void testTableRename() { + EvolvedSchema schema = new EvolvedSchema(); + // t1 -> t2 + SchemaEvolution schemaEvolution = new TableRename("t1", "t2"); + schemaEvolution.applyTo(schema); + assertEquals("t1", schema.getOriginalTableName("t2")); + assertEquals("", schema.getOriginalTableName("t1")); + assertEquals("t2", schema.getFinalTableName("t1")); + assertEquals("t2", schema.getFinalTableName("t2")); + // t1 -> t2 -> t3 + schemaEvolution = new TableRename("t2", "t3"); + schemaEvolution.applyTo(schema); + assertEquals("t1", schema.getOriginalTableName("t3")); + assertEquals("", schema.getOriginalTableName("t2")); + assertEquals("t3", schema.getFinalTableName("t1")); + assertEquals("t2", schema.getFinalTableName("t2")); + // t1 -> t2 -> t3 -> t1 + schemaEvolution = new TableRename("t3", "t1"); + schemaEvolution.applyTo(schema); + assertEquals("t1", schema.getOriginalTableName("t1")); + assertEquals("", schema.getOriginalTableName("t3")); + assertEquals("t1", schema.getFinalTableName("t1")); + assertEquals("t3", schema.getFinalTableName("t3")); + } + + @Test + public void testColumnRename() { + EvolvedSchema schema = new EvolvedSchema(); + // s1 -> s2 + SchemaEvolution schemaEvolution = new ColumnRename("t1", "s1", "s2"); + schemaEvolution.applyTo(schema); + assertEquals("s1", schema.getOriginalColumnName("t1", "s2")); + assertEquals("", schema.getOriginalColumnName("t1", "s1")); + assertEquals("s2", schema.getFinalColumnName("t1", "s1")); + assertEquals("s2", schema.getFinalColumnName("t1", "s2")); + // s1 -> s2 -> s3 + schemaEvolution = new ColumnRename("t1", "s2", "s3"); + schemaEvolution.applyTo(schema); + assertEquals("s1", schema.getOriginalColumnName("t1", "s3")); + assertEquals("", schema.getOriginalColumnName("t1", "s2")); + assertEquals("s3", schema.getFinalColumnName("t1", "s1")); + assertEquals("s2", schema.getFinalColumnName("t1", "s2")); + // s1 -> s2 -> s3 -> s1 + schemaEvolution = new ColumnRename("t1", "s3", "s1"); + schemaEvolution.applyTo(schema); + assertEquals("s1", schema.getOriginalColumnName("t1", "s1")); + assertEquals("", schema.getOriginalColumnName("t1", "s3")); + assertEquals("s1", schema.getFinalColumnName("t1", "s1")); + assertEquals("s3", schema.getFinalColumnName("t3", "s3")); + } +} diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/SchemaEvolutionFileTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/SchemaEvolutionFileTest.java new file mode 100644 index 0000000000000..10348a92c17db --- /dev/null +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/SchemaEvolutionFileTest.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution; + +import org.apache.iotdb.db.utils.constant.TestConstant; + +import org.apache.tsfile.enums.TSDataType; +import org.junit.After; +import org.junit.Test; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +@SuppressWarnings("ResultOfMethodCallIgnored") +public class SchemaEvolutionFileTest { + + @After + public void tearDown() throws Exception { + clearSchemaEvolutionFile(); + } + + @Test + public void testSchemaEvolutionFile() throws IOException { + String filePath = TestConstant.BASE_OUTPUT_PATH + File.separator + "0.sevo"; + + SchemaEvolutionFile schemaEvolutionFile = new SchemaEvolutionFile(filePath); + + // t1 -> t2, t2.s1 -> t2.s2, t3 -> t1 + List schemaEvolutionList = + Arrays.asList( + new TableRename("t1", "t2"), + new ColumnRename("t2", "s1", "s2", TSDataType.INT32), + new TableRename("t3", "t1")); + schemaEvolutionFile.append(schemaEvolutionList); + + EvolvedSchema evolvedSchema = schemaEvolutionFile.readAsSchema(); + assertEquals("t1", evolvedSchema.getOriginalTableName("t2")); + assertEquals("s1", evolvedSchema.getOriginalColumnName("t2", "s2")); + assertEquals("t3", evolvedSchema.getOriginalTableName("t1")); + // not evolved, should remain the same + assertEquals("t4", evolvedSchema.getOriginalTableName("t4")); + assertEquals("s3", evolvedSchema.getOriginalColumnName("t2", "s3")); + + // t1 -> t2 -> t3, t2.s1 -> t2.s2 -> t3.s1, t3 -> t1 -> t2 + schemaEvolutionList = + Arrays.asList( + new TableRename("t2", "t3"), + new ColumnRename("t3", "s2", "s1", TSDataType.INT32), + new TableRename("t1", "t2")); + schemaEvolutionFile.append(schemaEvolutionList); + evolvedSchema = schemaEvolutionFile.readAsSchema(); + assertEquals("t1", evolvedSchema.getOriginalTableName("t3")); + assertEquals("s1", evolvedSchema.getOriginalColumnName("t3", "s1")); + assertEquals("t3", evolvedSchema.getOriginalTableName("t2")); + // not evolved, should remain the same + assertEquals("t4", evolvedSchema.getOriginalTableName("t4")); + assertEquals("s3", evolvedSchema.getOriginalColumnName("t2", "s3")); + } + + private void clearSchemaEvolutionFile() { + File dir = new File(TestConstant.BASE_OUTPUT_PATH); + File[] files = dir.listFiles(f -> f.getName().endsWith(SchemaEvolutionFile.FILE_SUFFIX)); + if (files != null) { + for (File file : files) { + file.delete(); + } + } + } + + @Test + public void testRecover() throws IOException { + String filePath = TestConstant.BASE_OUTPUT_PATH + File.separator + "0.sevo"; + + SchemaEvolutionFile schemaEvolutionFile = new SchemaEvolutionFile(filePath); + List schemaEvolutionList = + Arrays.asList( + new TableRename("t1", "t2"), + new ColumnRename("t2", "s1", "s2", TSDataType.INT32), + new TableRename("t3", "t1")); + schemaEvolutionFile.append(schemaEvolutionList); + + File dir = new File(TestConstant.BASE_OUTPUT_PATH); + File[] files = dir.listFiles(f -> f.getName().endsWith(SchemaEvolutionFile.FILE_SUFFIX)); + assertNotNull(files); + assertEquals(1, files.length); + assertEquals(24, SchemaEvolutionFile.parseValidLength(files[0].getName())); + + try (FileOutputStream fileOutputStream = new FileOutputStream(files[0], true)) { + fileOutputStream.write(new byte[100]); + } + + schemaEvolutionFile = new SchemaEvolutionFile(files[0].getAbsolutePath()); + EvolvedSchema evolvedSchema = schemaEvolutionFile.readAsSchema(); + assertEquals("t1", evolvedSchema.getOriginalTableName("t2")); + assertEquals("s1", evolvedSchema.getOriginalColumnName("t2", "s2")); + assertEquals("t3", evolvedSchema.getOriginalTableName("t1")); + // not evolved, should remain the same + assertEquals("t4", evolvedSchema.getOriginalTableName("t4")); + assertEquals("s3", evolvedSchema.getOriginalColumnName("t2", "s3")); + } +} diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/DataPartition.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/DataPartition.java index 100c40eddcc23..99507d0ec0dfa 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/DataPartition.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/DataPartition.java @@ -22,9 +22,12 @@ import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.FullDeviceIdKey; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import org.apache.iotdb.commons.utils.PathUtils; import org.apache.iotdb.commons.utils.TimePartitionUtils; +import org.apache.tsfile.annotations.TreeModel; import org.apache.tsfile.file.metadata.IDeviceID; import org.apache.tsfile.read.filter.basic.Filter; import org.slf4j.Logger; @@ -95,18 +98,21 @@ public void setDataPartitionMap( this.dataPartitionMap = dataPartitionMap; } + @TreeModel public List> getTimePartitionRange( IDeviceID deviceID, Filter timeFilter) { - String storageGroup = getDatabaseNameByDevice(deviceID); - TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceID); - if (!dataPartitionMap.containsKey(storageGroup) - || !dataPartitionMap.get(storageGroup).containsKey(seriesPartitionSlot)) { + String databaseName = getDatabaseNameByDevice(deviceID); + // since this method retrieves database from deviceId, it must only be used by the tree model + TSeriesPartitionSlot seriesPartitionSlot = + calculateDeviceGroupId(new FullDeviceIdKey(deviceID)); + if (!dataPartitionMap.containsKey(databaseName) + || !dataPartitionMap.get(databaseName).containsKey(seriesPartitionSlot)) { return Collections.emptyList(); } List> res = new ArrayList<>(); Map> map = - dataPartitionMap.get(storageGroup).get(seriesPartitionSlot); + dataPartitionMap.get(databaseName).get(seriesPartitionSlot); List timePartitionSlotList = map.keySet().stream() .filter(key -> TimePartitionUtils.satisfyPartitionStartTime(timeFilter, key.startTime)) @@ -138,10 +144,13 @@ public List> getTimePartitionRange( return res; } + @TreeModel public List getDataRegionReplicaSetWithTimeFilter( final IDeviceID deviceId, final Filter timeFilter) { + // since this method retrieves database from deviceId, it must only be used by the tree model final String storageGroup = getDatabaseNameByDevice(deviceId); - final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceId); + final TSeriesPartitionSlot seriesPartitionSlot = + calculateDeviceGroupId(new FullDeviceIdKey(deviceId)); Map> regionReplicaSetMap = dataPartitionMap .getOrDefault(storageGroup, Collections.emptyMap()) @@ -166,8 +175,8 @@ public List getDataRegionReplicaSetWithTimeFilter( *

The device id shall be [table, seg1, ....] */ public List getDataRegionReplicaSetWithTimeFilter( - final String database, final IDeviceID deviceId, final Filter timeFilter) { - final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceId); + final String database, final SeriesPartitionKey seriesPartitionKey, final Filter timeFilter) { + final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(seriesPartitionKey); if (!dataPartitionMap.containsKey(database) || !dataPartitionMap.get(database).containsKey(seriesPartitionSlot)) { return Collections.singletonList(NOT_ASSIGNED); @@ -181,15 +190,18 @@ public List getDataRegionReplicaSetWithTimeFilter( .collect(toList()); } + @TreeModel public List getDataRegionReplicaSet( final IDeviceID deviceID, final TTimePartitionSlot tTimePartitionSlot) { + // since this method retrieves database from deviceId, it must only be used by the tree model final String storageGroup = getDatabaseNameByDevice(deviceID); final Map>> dbMap = dataPartitionMap.get(storageGroup); if (dbMap == null) { return Collections.singletonList(NOT_ASSIGNED); } - final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceID); + final TSeriesPartitionSlot seriesPartitionSlot = + calculateDeviceGroupId(new FullDeviceIdKey(deviceID)); final Map> seriesSlotMap = dbMap.get(seriesPartitionSlot); if (seriesSlotMap == null) { @@ -206,16 +218,17 @@ public List getDataRegionReplicaSet( } public List getDataRegionReplicaSetForWriting( - final IDeviceID deviceID, + final SeriesPartitionKey key, final List timePartitionSlotList, String databaseName) { if (databaseName == null) { - databaseName = getDatabaseNameByDevice(deviceID); + // must be the tree model here + databaseName = getDatabaseNameByDevice(((FullDeviceIdKey) key).getDeviceID()); } // A list of data region replica sets will store data in a same time partition. // We will insert data to the last set in the list. // TODO return the latest dataRegionReplicaSet for each time partition - final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceID); + final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(key); // IMPORTANT TODO: (xingtanzjr) need to handle the situation for write operation that there are // more than 1 Regions for one timeSlot final List dataRegionReplicaSets = new ArrayList<>(); @@ -228,8 +241,7 @@ public List getDataRegionReplicaSetForWriting( if (targetRegionList == null || targetRegionList.isEmpty()) { throw new RuntimeException( String.format( - "targetRegionList is empty. device: %s, timeSlot: %s", - deviceID, timePartitionSlot)); + "targetRegionList is empty. device: %s, timeSlot: %s", key, timePartitionSlot)); } else { dataRegionReplicaSets.add(targetRegionList.get(targetRegionList.size() - 1)); } @@ -238,13 +250,16 @@ public List getDataRegionReplicaSetForWriting( } public TRegionReplicaSet getDataRegionReplicaSetForWriting( - final IDeviceID deviceID, final TTimePartitionSlot timePartitionSlot, String databaseName) { + final SeriesPartitionKey seriesPartitionKey, + final TTimePartitionSlot timePartitionSlot, + String databaseName) { // A list of data region replica sets will store data in a same time partition. // We will insert data to the last set in the list. // TODO return the latest dataRegionReplicaSet for each time partition - final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceID); + final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(seriesPartitionKey); if (databaseName == null) { - databaseName = getDatabaseNameByDevice(deviceID); + // must be the tree model here + databaseName = getDatabaseNameByDevice(((FullDeviceIdKey) seriesPartitionKey).getDeviceID()); } final Map>> databasePartitionMap = dataPartitionMap.get(databaseName); @@ -261,10 +276,11 @@ public TRegionReplicaSet getDataRegionReplicaSetForWriting( return regions.get(0); } + @TreeModel public TRegionReplicaSet getDataRegionReplicaSetForWriting( IDeviceID deviceID, TTimePartitionSlot timePartitionSlot) { return getDataRegionReplicaSetForWriting( - deviceID, timePartitionSlot, getDatabaseNameByDevice(deviceID)); + new FullDeviceIdKey(deviceID), timePartitionSlot, getDatabaseNameByDevice(deviceID)); } public String getDatabaseNameByDevice(IDeviceID deviceID) { @@ -302,15 +318,14 @@ public List getDistributionInfo() { public void upsertDataPartition(DataPartition targetDataPartition) { requireNonNull(this.dataPartitionMap, "dataPartitionMap is null"); - for (Map.Entry< - String, Map>>> + for (Entry>>> targetDbEntry : targetDataPartition.getDataPartitionMap().entrySet()) { String database = targetDbEntry.getKey(); if (dataPartitionMap.containsKey(database)) { Map>> sourceSeriesPartitionMap = dataPartitionMap.get(database); - for (Map.Entry>> + for (Entry>> targetSeriesSlotEntry : targetDbEntry.getValue().entrySet()) { TSeriesPartitionSlot targetSeriesSlot = targetSeriesSlotEntry.getKey(); @@ -319,7 +334,7 @@ public void upsertDataPartition(DataPartition targetDataPartition) { sourceSeriesPartitionMap.get(targetSeriesSlot); Map> targetTimePartionMap = targetSeriesSlotEntry.getValue(); - for (Map.Entry> targetEntry : + for (Entry> targetEntry : targetTimePartionMap.entrySet()) { if (!sourceTimePartitionMap.containsKey(targetEntry.getKey())) { sourceTimePartitionMap.put(targetEntry.getKey(), targetEntry.getValue()); diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/Partition.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/Partition.java index b94cf3d005d72..5d32e33db610e 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/Partition.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/Partition.java @@ -21,8 +21,7 @@ import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor; - -import org.apache.tsfile.file.metadata.IDeviceID; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import java.util.List; @@ -42,8 +41,8 @@ protected Partition(String seriesSlotExecutorName, int seriesPartitionSlotNum) { seriesSlotExecutorName, seriesPartitionSlotNum); } - public TSeriesPartitionSlot calculateDeviceGroupId(IDeviceID deviceID) { - return executor.getSeriesPartitionSlot(deviceID); + public TSeriesPartitionSlot calculateDeviceGroupId(SeriesPartitionKey key) { + return executor.getSeriesPartitionSlot(key); } public abstract List getDistributionInfo(); diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/SchemaPartition.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/SchemaPartition.java index 96abc7498653d..ff56dee46fed5 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/SchemaPartition.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/SchemaPartition.java @@ -22,10 +22,13 @@ import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; import org.apache.iotdb.commons.exception.IoTDBException; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.FullDeviceIdKey; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import org.apache.iotdb.commons.utils.PathUtils; import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.rpc.TSStatusCode; +import org.apache.tsfile.annotations.TreeModel; import org.apache.tsfile.file.metadata.IDeviceID; import java.util.ArrayList; @@ -75,18 +78,20 @@ public void setSchemaPartitionMap( * *

The device id shall be [table, seg1, ....] */ - public TRegionReplicaSet getSchemaRegionReplicaSet(String database, IDeviceID deviceID) { - TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceID); + public TRegionReplicaSet getSchemaRegionReplicaSet(String database, SeriesPartitionKey key) { + TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(key); return schemaPartitionMap.get(database).get(seriesPartitionSlot); } + @TreeModel // [root, db, ....] public TRegionReplicaSet getSchemaRegionReplicaSet(final IDeviceID deviceID) { // A list of data region replica sets will store data in a same time partition. // We will insert data to the last set in the list. // TODO return the latest dataRegionReplicaSet for each time partition final String storageGroup = getStorageGroupByDevice(deviceID); - final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceID); + final TSeriesPartitionSlot seriesPartitionSlot = + calculateDeviceGroupId(new FullDeviceIdKey(deviceID)); if (schemaPartitionMap.get(storageGroup) == null) { throw new RuntimeException( new IoTDBException("Path does not exist. ", TSStatusCode.PATH_NOT_EXIST.getStatusCode())); diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/SeriesPartitionExecutor.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/SeriesPartitionExecutor.java index d2666446e8e14..8140cb259b194 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/SeriesPartitionExecutor.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/SeriesPartitionExecutor.java @@ -45,7 +45,12 @@ public SeriesPartitionExecutor(int seriesPartitionSlotNum) { @TestOnly public abstract TSeriesPartitionSlot getSeriesPartitionSlot(String device); - public abstract TSeriesPartitionSlot getSeriesPartitionSlot(IDeviceID deviceID); + @TestOnly + public TSeriesPartitionSlot getSeriesPartitionSlot(IDeviceID deviceID) { + return getSeriesPartitionSlot(new FullDeviceIdKey(deviceID)); + } + + public abstract TSeriesPartitionSlot getSeriesPartitionSlot(SeriesPartitionKey deviceID); public static SeriesPartitionExecutor getSeriesPartitionExecutor( String executorName, int seriesPartitionSlotNum) { @@ -73,4 +78,50 @@ private static synchronized void initStaticSeriesPartitionExecutor( } } } + + public interface SeriesPartitionKey { + int segmentNum(); + + Object segment(int index); + } + + public static class FullDeviceIdKey implements SeriesPartitionKey { + private final IDeviceID deviceID; + + public FullDeviceIdKey(IDeviceID deviceID) { + this.deviceID = deviceID; + } + + @Override + public int segmentNum() { + return deviceID.segmentNum(); + } + + @Override + public Object segment(int index) { + return deviceID.segment(index); + } + + public IDeviceID getDeviceID() { + return deviceID; + } + } + + public static class NoTableNameDeviceIdKey implements SeriesPartitionKey { + private final IDeviceID deviceID; + + public NoTableNameDeviceIdKey(IDeviceID deviceID) { + this.deviceID = deviceID; + } + + @Override + public int segmentNum() { + return deviceID.segmentNum() - 1; + } + + @Override + public Object segment(int index) { + return deviceID.segment(index + 1); + } + } } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/APHashExecutor.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/APHashExecutor.java index 9390111d24720..6b6c7976a3531 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/APHashExecutor.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/APHashExecutor.java @@ -22,8 +22,6 @@ import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor; -import org.apache.tsfile.file.metadata.IDeviceID; - import static org.apache.iotdb.commons.conf.IoTDBConstant.PATH_SEPARATOR; public class APHashExecutor extends SeriesPartitionExecutor { @@ -49,13 +47,13 @@ public TSeriesPartitionSlot getSeriesPartitionSlot(String device) { } @Override - public TSeriesPartitionSlot getSeriesPartitionSlot(IDeviceID deviceID) { + public TSeriesPartitionSlot getSeriesPartitionSlot(SeriesPartitionKey key) { int hash = 0; - int segmentNum = deviceID.segmentNum(); + int segmentNum = key.segmentNum(); int index = 0; for (int segmentID = 0; segmentID < segmentNum; segmentID++) { - Object segment = deviceID.segment(segmentID); + Object segment = key.segment(segmentID); if (segment instanceof String) { String segmentStr = (String) segment; for (int i = 0; i < segmentStr.length(); i++) { diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/BKDRHashExecutor.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/BKDRHashExecutor.java index c039e8ddd8335..9502f8007df6f 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/BKDRHashExecutor.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/BKDRHashExecutor.java @@ -22,8 +22,6 @@ import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor; -import org.apache.tsfile.file.metadata.IDeviceID; - import static org.apache.iotdb.commons.conf.IoTDBConstant.PATH_SEPARATOR; public class BKDRHashExecutor extends SeriesPartitionExecutor { @@ -47,12 +45,12 @@ public TSeriesPartitionSlot getSeriesPartitionSlot(String device) { } @Override - public TSeriesPartitionSlot getSeriesPartitionSlot(IDeviceID deviceID) { + public TSeriesPartitionSlot getSeriesPartitionSlot(SeriesPartitionKey key) { int hash = 0; - int segmentNum = deviceID.segmentNum(); + int segmentNum = key.segmentNum(); for (int segmentID = 0; segmentID < segmentNum; segmentID++) { - Object segment = deviceID.segment(segmentID); + Object segment = key.segment(segmentID); if (segment instanceof String) { String segmentStr = (String) segment; for (int i = 0; i < segmentStr.length(); i++) { diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/JSHashExecutor.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/JSHashExecutor.java index 1e8c203158378..734da1ee39735 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/JSHashExecutor.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/JSHashExecutor.java @@ -21,8 +21,6 @@ import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor; -import org.apache.tsfile.file.metadata.IDeviceID; - import static org.apache.iotdb.commons.conf.IoTDBConstant.PATH_SEPARATOR; public class JSHashExecutor extends SeriesPartitionExecutor { @@ -46,12 +44,12 @@ public TSeriesPartitionSlot getSeriesPartitionSlot(String device) { } @Override - public TSeriesPartitionSlot getSeriesPartitionSlot(IDeviceID deviceID) { + public TSeriesPartitionSlot getSeriesPartitionSlot(SeriesPartitionKey key) { int hash = BASE; - int segmentNum = deviceID.segmentNum(); + int segmentNum = key.segmentNum(); for (int segmentID = 0; segmentID < segmentNum; segmentID++) { - Object segment = deviceID.segment(segmentID); + Object segment = key.segment(segmentID); if (segment instanceof String) { String segmentStr = (String) segment; for (int i = 0; i < segmentStr.length(); i++) { diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/SDBMHashExecutor.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/SDBMHashExecutor.java index e2143c00c0cec..ec68812cac609 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/SDBMHashExecutor.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/SDBMHashExecutor.java @@ -21,8 +21,6 @@ import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor; -import org.apache.tsfile.file.metadata.IDeviceID; - import static org.apache.iotdb.commons.conf.IoTDBConstant.PATH_SEPARATOR; public class SDBMHashExecutor extends SeriesPartitionExecutor { @@ -44,12 +42,12 @@ public TSeriesPartitionSlot getSeriesPartitionSlot(String device) { } @Override - public TSeriesPartitionSlot getSeriesPartitionSlot(IDeviceID deviceID) { + public TSeriesPartitionSlot getSeriesPartitionSlot(SeriesPartitionKey key) { int hash = 0; - int segmentNum = deviceID.segmentNum(); + int segmentNum = key.segmentNum(); for (int segmentID = 0; segmentID < segmentNum; segmentID++) { - Object segment = deviceID.segment(segmentID); + Object segment = key.segment(segmentID); if (segment instanceof String) { String segmentStr = (String) segment; for (int i = 0; i < segmentStr.length(); i++) { diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/schema/table/TsTable.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/schema/table/TsTable.java index 1b5a9d4ac34aa..87e23672c8e7d 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/schema/table/TsTable.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/schema/table/TsTable.java @@ -43,8 +43,9 @@ import java.io.OutputStream; import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Collections; +import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -68,7 +69,10 @@ public class TsTable { new TimeColumnSchema(TIME_COLUMN_NAME, TSDataType.TIMESTAMP); public static final String TTL_PROPERTY = "ttl"; - public static final Set TABLE_ALLOWED_PROPERTIES = Collections.singleton(TTL_PROPERTY); + public static final String ALLOW_ALTER_NAME_PROPERTY = "allow_alter_name"; + public static final boolean ALLOW_ALTER_NAME_DEFAULT = true; + public static final Set TABLE_ALLOWED_PROPERTIES = + new HashSet<>(Arrays.asList(TTL_PROPERTY, ALLOW_ALTER_NAME_PROPERTY)); private static final String OBJECT_STRING_ERROR = "When there are object fields, the %s %s shall not be '.', '..' or contain './', '.\\'."; protected String tableName; @@ -439,4 +443,12 @@ public String toString() { + props + '}'; } + + public boolean canAlterName() { + if (getProps() == null) { + return false; + } + return Boolean.parseBoolean( + getProps().getOrDefault(TsTable.ALLOW_ALTER_NAME_PROPERTY, "false")); + } } diff --git a/iotdb-protocol/thrift-datanode/src/main/thrift/datanode.thrift b/iotdb-protocol/thrift-datanode/src/main/thrift/datanode.thrift index cca7110f28d40..6f8b65dd5a4ef 100644 --- a/iotdb-protocol/thrift-datanode/src/main/thrift/datanode.thrift +++ b/iotdb-protocol/thrift-datanode/src/main/thrift/datanode.thrift @@ -459,6 +459,18 @@ struct TDeleteDataForDeleteSchemaReq { 3: optional bool isGeneratedByPipe } +struct TDataRegionEvolveSchemaReq { + 1: required list dataRegionIdList + 2: required binary schemaEvolutions + 3: optional bool isGeneratedByPipe +} + +struct TSchemaRegionEvolveSchemaReq { + 1: required list schemaRegionIdList + 2: required binary schemaEvolutions + 3: optional bool isGeneratedByPipe +} + struct TDeleteTimeSeriesReq { 1: required list schemaRegionIdList 2: required binary pathPatternTree @@ -1091,6 +1103,10 @@ service IDataNodeRPCService { */ common.TSStatus deleteDataForDeleteSchema(TDeleteDataForDeleteSchemaReq req) + common.TSStatus evolveSchemaInDataRegion(TDataRegionEvolveSchemaReq req) + + common.TSStatus evolveSchemaInSchemaRegion(TSchemaRegionEvolveSchemaReq req) + /** * Delete matched timeseries and remove according schema black list in target schemRegion */ diff --git a/pom.xml b/pom.xml index c3b80202a01ea..3ae94c8cbff86 100644 --- a/pom.xml +++ b/pom.xml @@ -173,7 +173,7 @@ 0.14.1 1.9 1.5.6-3 - 2.2.1-260115-SNAPSHOT + 2.2.1-sevo2-SNAPSHOT