1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.catalog;
20
21 import static org.junit.Assert.assertEquals;
22 import static org.junit.Assert.assertTrue;
23
24 import java.io.File;
25 import java.io.IOException;
26 import java.util.ArrayList;
27 import java.util.Arrays;
28 import java.util.List;
29
30 import junit.framework.Assert;
31
32 import org.apache.commons.logging.Log;
33 import org.apache.commons.logging.LogFactory;
34 import org.apache.hadoop.conf.Configuration;
35 import org.apache.hadoop.fs.FileSystem;
36 import org.apache.hadoop.fs.FileUtil;
37 import org.apache.hadoop.fs.FsShell;
38 import org.apache.hadoop.fs.Path;
39 import org.apache.hadoop.hbase.migration.NamespaceUpgrade;
40 import org.apache.hadoop.hbase.TableName;
41 import org.apache.hadoop.hbase.HBaseTestingUtility;
42 import org.apache.hadoop.hbase.HColumnDescriptor;
43 import org.apache.hadoop.hbase.HConstants;
44 import org.apache.hadoop.hbase.HRegionInfo;
45 import org.apache.hadoop.hbase.HTableDescriptor;
46 import org.apache.hadoop.hbase.testclassification.MediumTests;
47 import org.apache.hadoop.hbase.client.HTable;
48 import org.apache.hadoop.hbase.client.Put;
49 import org.apache.hadoop.hbase.client.Result;
50 import org.apache.hadoop.hbase.client.ResultScanner;
51 import org.apache.hadoop.hbase.client.Scan;
52 import org.apache.hadoop.hbase.client.Durability;
53 import org.apache.hadoop.hbase.master.HMaster;
54 import org.apache.hadoop.hbase.util.Bytes;
55 import org.apache.hadoop.io.DataOutputBuffer;
56 import org.apache.hadoop.util.ToolRunner;
57 import org.junit.AfterClass;
58 import org.junit.BeforeClass;
59 import org.junit.Test;
60 import org.junit.experimental.categories.Category;
61
62
63
64
65
66
67 @Category(MediumTests.class)
68 @Deprecated
69 public class TestMetaMigrationConvertingToPB {
70 static final Log LOG = LogFactory.getLog(TestMetaMigrationConvertingToPB.class);
71 private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
72
73 private final static String TESTTABLE = "TestTable";
74
75 private final static int ROW_COUNT = 100;
76 private final static int REGION_COUNT = 9;
77
78 private static final int META_VERSION_092 = 0;
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94 @BeforeClass
95 public static void setUpBeforeClass() throws Exception {
96
97
98
99 TEST_UTIL.startMiniZKCluster();
100 TEST_UTIL.startMiniDFSCluster(1);
101 Path testdir = TEST_UTIL.getDataTestDir("TestMetaMigrationConvertToPB");
102
103 File untar = untar(new File(testdir.toString()));
104
105 Configuration conf = TEST_UTIL.getConfiguration();
106 FsShell shell = new FsShell(conf);
107 FileSystem fs = FileSystem.get(conf);
108
109 Path hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
110 if (!fs.isDirectory(hbaseRootDir.getParent())) {
111
112 fs.mkdirs(hbaseRootDir.getParent());
113 }
114 doFsCommand(shell,
115 new String [] {"-put", untar.toURI().toString(), hbaseRootDir.toString()});
116
117
118
119
120 doFsCommand(shell, new String [] {"-mv", new Path(hbaseRootDir, "-META-").toString(),
121 new Path(hbaseRootDir, ".META.").toString()});
122
123 doFsCommand(shell, new String [] {"-lsr", "/"});
124
125
126 Configuration toolConf = TEST_UTIL.getConfiguration();
127 conf.set(HConstants.HBASE_DIR, TEST_UTIL.getDefaultRootDirPath().toString());
128 ToolRunner.run(toolConf, new NamespaceUpgrade(), new String[]{"--upgrade"});
129
130 TEST_UTIL.startMiniHBaseCluster(1, 1);
131
132
133
134 HTable t = new HTable(TEST_UTIL.getConfiguration(), TESTTABLE);
135 ResultScanner scanner = t.getScanner(new Scan());
136 int count = 0;
137 while (scanner.next() != null) {
138 count++;
139 }
140
141
142 Assert.assertEquals(ROW_COUNT, count);
143 scanner.close();
144 t.close();
145 }
146
147 private static File untar(final File testdir) throws IOException {
148
149 final String datafile = "TestMetaMigrationConvertToPB";
150 String srcTarFile =
151 System.getProperty("project.build.testSourceDirectory", "src/test") +
152 File.separator + "data" + File.separator + datafile + ".tgz";
153 File homedir = new File(testdir.toString());
154 File tgtUntarDir = new File(homedir, datafile);
155 if (tgtUntarDir.exists()) {
156 if (!FileUtil.fullyDelete(tgtUntarDir)) {
157 throw new IOException("Failed delete of " + tgtUntarDir.toString());
158 }
159 }
160 LOG.info("Untarring " + srcTarFile + " into " + homedir.toString());
161 FileUtil.unTar(new File(srcTarFile), homedir);
162 Assert.assertTrue(tgtUntarDir.exists());
163 return tgtUntarDir;
164 }
165
166 private static void doFsCommand(final FsShell shell, final String [] args)
167 throws Exception {
168
169 int errcode = shell.run(args);
170 if (errcode != 0) throw new IOException("Failed put; errcode=" + errcode);
171 }
172
173
174
175
176 @AfterClass
177 public static void tearDownAfterClass() throws Exception {
178 TEST_UTIL.shutdownMiniCluster();
179 }
180
181 @Test
182 public void testMetaUpdatedFlagInROOT() throws Exception {
183 HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
184 boolean metaUpdated = MetaMigrationConvertingToPB.
185 isMetaTableUpdated(master.getCatalogTracker());
186 assertEquals(true, metaUpdated);
187 verifyMetaRowsAreUpdated(master.getCatalogTracker());
188 }
189
190 @Test
191 public void testMetaMigration() throws Exception {
192 LOG.info("Starting testMetaMigration");
193 final byte [] FAMILY = Bytes.toBytes("family");
194 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testMetaMigration"));
195 HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
196 htd.addFamily(hcd);
197 Configuration conf = TEST_UTIL.getConfiguration();
198 byte[][] regionNames = new byte[][]{
199 HConstants.EMPTY_START_ROW,
200 Bytes.toBytes("region_a"),
201 Bytes.toBytes("region_b")};
202 createMultiRegionsWithWritableSerialization(conf,
203 htd.getTableName().getName(),
204 regionNames);
205 CatalogTracker ct =
206 TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker();
207
208 undoVersionInRoot(ct);
209 MetaReader.fullScanMetaAndPrint(ct);
210 LOG.info("Meta Print completed.testMetaMigration");
211
212 long numMigratedRows = MetaMigrationConvertingToPB.updateMeta(
213 TEST_UTIL.getHBaseCluster().getMaster());
214 MetaReader.fullScanMetaAndPrint(ct);
215
216
217 assertEquals(regionNames.length, numMigratedRows);
218
219
220 boolean metaUpdated =
221 MetaMigrationConvertingToPB.isMetaTableUpdated(
222 TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker());
223 assertEquals(true, metaUpdated);
224 verifyMetaRowsAreUpdated(ct);
225 }
226
227
228
229
230
231
232
233
234
235
236
237
238
239 @Test
240 public void testMasterCrashDuringMetaMigration() throws Exception {
241 final byte[] FAMILY = Bytes.toBytes("family");
242 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf
243 ("testMasterCrashDuringMetaMigration"));
244 HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
245 htd.addFamily(hcd);
246 Configuration conf = TEST_UTIL.getConfiguration();
247
248 createMultiRegionsWithPBSerialization(conf, htd.getTableName().getName(), 10);
249
250 createMultiRegionsWithWritableSerialization(conf,
251 htd.getTableName().getName(), 10);
252 CatalogTracker ct =
253 TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker();
254
255 undoVersionInRoot(ct);
256
257 MetaReader.fullScanMetaAndPrint(ct);
258 LOG.info("Meta Print completed.testUpdatesOnMetaWithLegacyHRI");
259
260 long numMigratedRows =
261 MetaMigrationConvertingToPB.updateMetaIfNecessary(
262 TEST_UTIL.getHBaseCluster().getMaster());
263 assertEquals(numMigratedRows, 10);
264
265
266 boolean metaUpdated = MetaMigrationConvertingToPB.
267 isMetaTableUpdated(TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker());
268 assertEquals(true, metaUpdated);
269
270 verifyMetaRowsAreUpdated(ct);
271
272 LOG.info("END testMasterCrashDuringMetaMigration");
273 }
274
275
276
277
278 void verifyMetaRowsAreUpdated(CatalogTracker catalogTracker)
279 throws IOException {
280 List<Result> results = MetaReader.fullScan(catalogTracker);
281 assertTrue(results.size() >= REGION_COUNT);
282
283 for (Result result : results) {
284 byte[] hriBytes = result.getValue(HConstants.CATALOG_FAMILY,
285 HConstants.REGIONINFO_QUALIFIER);
286 assertTrue(hriBytes != null && hriBytes.length > 0);
287 assertTrue(MetaMigrationConvertingToPB.isMigrated(hriBytes));
288
289 byte[] splitA = result.getValue(HConstants.CATALOG_FAMILY,
290 HConstants.SPLITA_QUALIFIER);
291 if (splitA != null && splitA.length > 0) {
292 assertTrue(MetaMigrationConvertingToPB.isMigrated(splitA));
293 }
294
295 byte[] splitB = result.getValue(HConstants.CATALOG_FAMILY,
296 HConstants.SPLITB_QUALIFIER);
297 if (splitB != null && splitB.length > 0) {
298 assertTrue(MetaMigrationConvertingToPB.isMigrated(splitB));
299 }
300 }
301 }
302
303
304 private void undoVersionInRoot(CatalogTracker ct) throws IOException {
305 Put p = new Put(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
306
307 p.add(HConstants.CATALOG_FAMILY, HConstants.META_VERSION_QUALIFIER,
308 Bytes.toBytes(META_VERSION_092));
309
310
311 LOG.info("Downgraded -ROOT- meta version=" + META_VERSION_092);
312 }
313
314
315
316
317 public int createMultiRegionsWithWritableSerialization(final Configuration c,
318 final byte[] tableName, int numRegions) throws IOException {
319 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
320 byte [] startKey = Bytes.toBytes("aaaaa");
321 byte [] endKey = Bytes.toBytes("zzzzz");
322 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
323 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
324 for (int i=0;i<splitKeys.length;i++) {
325 regionStartKeys[i+1] = splitKeys[i];
326 }
327 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
328 return createMultiRegionsWithWritableSerialization(c, tableName, regionStartKeys);
329 }
330
331 public int createMultiRegionsWithWritableSerialization(final Configuration c,
332 final byte[] tableName, byte [][] startKeys)
333 throws IOException {
334 return createMultiRegionsWithWritableSerialization(c,
335 TableName.valueOf(tableName), startKeys);
336 }
337
338
339
340
341 public int createMultiRegionsWithWritableSerialization(final Configuration c,
342 final TableName tableName, byte [][] startKeys)
343 throws IOException {
344 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
345 HTable meta = new HTable(c, TableName.META_TABLE_NAME);
346
347 List<HRegionInfo> newRegions
348 = new ArrayList<HRegionInfo>(startKeys.length);
349 int count = 0;
350 for (int i = 0; i < startKeys.length; i++) {
351 int j = (i + 1) % startKeys.length;
352 HRegionInfo hri = new HRegionInfo(tableName, startKeys[i], startKeys[j]);
353 Put put = new Put(hri.getRegionName());
354 put.setDurability(Durability.SKIP_WAL);
355 put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
356 getBytes(hri));
357
358
359 put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER,
360 getBytes(hri));
361
362 put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER,
363 getBytes(hri));
364
365 meta.put(put);
366 LOG.info("createMultiRegionsWithWritableSerialization: PUT inserted " + hri.toString());
367
368 newRegions.add(hri);
369 count++;
370 }
371 meta.close();
372 return count;
373 }
374
375 @Deprecated
376 private byte[] getBytes(HRegionInfo hri) throws IOException {
377 DataOutputBuffer out = new DataOutputBuffer();
378 try {
379 hri.write(out);
380 return out.getData();
381 } finally {
382 if (out != null) {
383 out.close();
384 }
385 }
386 }
387
388
389
390
391 int createMultiRegionsWithPBSerialization(final Configuration c,
392 final byte[] tableName, int numRegions)
393 throws IOException {
394 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
395 byte [] startKey = Bytes.toBytes("aaaaa");
396 byte [] endKey = Bytes.toBytes("zzzzz");
397 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
398 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
399 for (int i=0;i<splitKeys.length;i++) {
400 regionStartKeys[i+1] = splitKeys[i];
401 }
402 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
403 return createMultiRegionsWithPBSerialization(c, tableName, regionStartKeys);
404 }
405
406
407
408
409 int createMultiRegionsWithPBSerialization(final Configuration c, final byte[] tableName,
410 byte [][] startKeys) throws IOException {
411 return createMultiRegionsWithPBSerialization(c,
412 TableName.valueOf(tableName), startKeys);
413 }
414
415 int createMultiRegionsWithPBSerialization(final Configuration c,
416 final TableName tableName,
417 byte [][] startKeys) throws IOException {
418 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
419 HTable meta = new HTable(c, TableName.META_TABLE_NAME);
420
421 List<HRegionInfo> newRegions
422 = new ArrayList<HRegionInfo>(startKeys.length);
423 int count = 0;
424 for (int i = 0; i < startKeys.length; i++) {
425 int j = (i + 1) % startKeys.length;
426 HRegionInfo hri = new HRegionInfo(tableName, startKeys[i], startKeys[j]);
427 Put put = MetaEditor.makePutFromRegionInfo(hri);
428 put.setDurability(Durability.SKIP_WAL);
429 meta.put(put);
430 LOG.info("createMultiRegionsWithPBSerialization: PUT inserted " + hri.toString());
431
432 newRegions.add(hri);
433 count++;
434 }
435 meta.close();
436 return count;
437 }
438
439
440 }