我有以下用C#编写的代码,但是据此,我需要4-5天的时间才能将数据从Oracle数据库迁移到Elasticsearch。我将以100批为单位插入记录。是否还有其他方法可以使400万条记录的迁移速度更快(如果可能的话,可能不到一天)?
public static void Selection() { for(int i = 1; i < 4000000; i += 1000) { for(int j = i; j < (i+1000); j += 100) { OracleCommand cmd = new OracleCommand(BuildQuery(j), oracle_connection); OracleDataReader reader = cmd.ExecuteReader(); List<Record> list=CreateRecordList(reader); insert(list); } } } private static List<Record> CreateRecordList(OracleDataReader reader) { List<Record> l = new List<Record>(); string[] str = new string[7]; try { while (reader.Read()) { for (int i = 0; i < 7; i++) { str[i] = reader[i].ToString(); } Record r = new Record(str[0], str[1], str[2], str[3], str[4], str[5], str[6]); l.Add(r); } } catch (Exception er) { string msg = er.Message; } return l; } private static string BuildQuery(int from) { int to = from + change - 1; StringBuilder builder = new StringBuilder(); builder.AppendLine(@"select * from"); builder.AppendLine("("); builder.AppendLine("select FIELD_1, FIELD_2, FIELD_3, FIELD_4, FIELD_5, FIELD_6, FIELD_7, "); builder.Append(" row_number() over(order by FIELD_1) rn"); builder.AppendLine(" from tablename"); builder.AppendLine(")"); builder.AppendLine(string.Format("where rn between {0} and {1}", from, to)); builder.AppendLine("order by rn"); return builder.ToString(); } public static void insert(List<Record> l) { try { foreach(Record r in l) client.Index<Record>(r, "index", "type"); } catch (Exception er) { string msg = er.Message; } }
该ROW_NUMBER()功能将对性能产生负面影响,并且您正在运行数千次。您已经在使用OracleDataReader-它不会一次将所有四百万行都拉到您的计算机上,它基本上是一次流一次或几行。
ROW_NUMBER()
OracleDataReader
这必须在几分钟或几小时内完成,而不是几天-我们有几个进程以类似的方式在Sybase和SQL Server之间移动数百万条记录,并且花费不到五分钟。
也许试一下:
OracleCommand cmd = new OracleCommand("SELECT ... FROM TableName", oracle_connection); int batchSize = 500; using (OracleDataReader reader = cmd.ExecuteReader()) { List<Record> l = new List<Record>(batchSize); string[] str = new string[7]; int currentRow = 0; while (reader.Read()) { for (int i = 0; i < 7; i++) { str[i] = reader[i].ToString(); } l.Add(new Record(str[0], str[1], str[2], str[3], str[4], str[5], str[6])); // Commit every time batchSize records have been read if (++currentRow == batchSize) { Commit(l); l.Clear(); currentRow = 0; } } // commit remaining records Commit(l); }
以下是Commit可能的样子:
Commit
public void Commit(IEnumerable<Record> records) { // TODO: Use ES's BULK features, I don't know the exact syntax client.IndexMany<Record>(records, "index", "type"); // client.Bulk(b => b.IndexMany(records))... something like this }