View Javadoc

1   /**
2    * Copyright The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.migration;
21  
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertTrue;
24  import static org.junit.Assert.assertFalse;
25  
26  import java.io.File;
27  import java.io.IOException;
28  import java.util.ArrayList;
29  import java.util.List;
30  
31  import org.apache.commons.logging.Log;
32  import org.apache.commons.logging.LogFactory;
33  import org.apache.hadoop.conf.Configuration;
34  import org.apache.hadoop.fs.FileSystem;
35  import org.apache.hadoop.fs.FileUtil;
36  import org.apache.hadoop.fs.FsShell;
37  import org.apache.hadoop.fs.Path;
38  import org.apache.hadoop.hbase.HBaseTestingUtility;
39  import org.apache.hadoop.hbase.HColumnDescriptor;
40  import org.apache.hadoop.hbase.HConstants;
41  import org.apache.hadoop.hbase.HRegionInfo;
42  import org.apache.hadoop.hbase.HTableDescriptor;
43  import org.apache.hadoop.hbase.testclassification.MediumTests;
44  import org.apache.hadoop.hbase.NamespaceDescriptor;
45  import org.apache.hadoop.hbase.TableName;
46  import org.apache.hadoop.hbase.Waiter;
47  import org.apache.hadoop.hbase.client.Get;
48  import org.apache.hadoop.hbase.client.HTable;
49  import org.apache.hadoop.hbase.client.Put;
50  import org.apache.hadoop.hbase.client.Result;
51  import org.apache.hadoop.hbase.client.ResultScanner;
52  import org.apache.hadoop.hbase.client.Scan;
53  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
54  import org.apache.hadoop.hbase.regionserver.HRegion;
55  import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
56  import org.apache.hadoop.hbase.security.access.AccessControlLists;
57  import org.apache.hadoop.hbase.util.Bytes;
58  import org.apache.hadoop.hbase.util.FSTableDescriptors;
59  import org.apache.hadoop.hbase.util.FSUtils;
60  import org.apache.hadoop.util.ToolRunner;
61  import org.junit.AfterClass;
62  import org.junit.Assert;
63  import org.junit.BeforeClass;
64  import org.junit.Test;
65  import org.junit.experimental.categories.Category;
66  
67  /**
68   * Test upgrade from no namespace in 0.94 to namespace directory structure.
69   * Mainly tests that tables are migrated and consistent. Also verifies
70   * that snapshots have been migrated correctly.
71   *
72   * <p>Uses a tarball which is an image of an 0.94 hbase.rootdir.
73   *
74   * <p>Contains tables with currentKeys as the stored keys:
75   * foo, ns1.foo, ns2.foo
76   *
77   * <p>Contains snapshots with snapshot{num}Keys as the contents:
78   * snapshot1Keys, snapshot2Keys
79   *
80   * Image also contains _acl_ table with one region and two storefiles.
81   * This is needed to test the acl table migration.
82   *
83   */
84  @Category(MediumTests.class)
85  public class TestNamespaceUpgrade {
86    static final Log LOG = LogFactory.getLog(TestNamespaceUpgrade.class);
87    private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
88    private final static String snapshot1Keys[] =
89        {"1","10","2","3","4","5","6","7","8","9"};
90    private final static String snapshot2Keys[] =
91        {"1","2","3","4","5","6","7","8","9"};
92    private final static String currentKeys[] =
93        {"1","2","3","4","5","6","7","8","9","A"};
94    private final static String tables[] = {"data", "foo", "ns1.foo","ns.two.foo"};
95  
96    @BeforeClass
97    public static void setUpBeforeClass() throws Exception {
98      // Start up our mini cluster on top of an 0.94 root.dir that has data from
99      // a 0.94 hbase run and see if we can migrate to 0.96
100     TEST_UTIL.startMiniZKCluster();
101     TEST_UTIL.startMiniDFSCluster(1);
102     Path testdir = TEST_UTIL.getDataTestDir("TestNamespaceUpgrade");
103     // Untar our test dir.
104     File untar = untar(new File(testdir.toString()));
105     // Now copy the untar up into hdfs so when we start hbase, we'll run from it.
106     Configuration conf = TEST_UTIL.getConfiguration();
107     FsShell shell = new FsShell(conf);
108     FileSystem fs = FileSystem.get(conf);
109     // find where hbase will root itself, so we can copy filesystem there
110     Path hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
111     if (!fs.isDirectory(hbaseRootDir.getParent())) {
112       // mkdir at first
113       fs.mkdirs(hbaseRootDir.getParent());
114     }
115     if(org.apache.hadoop.util.VersionInfo.getVersion().startsWith("2.")) {
116       LOG.info("Hadoop version is 2.x, pre-migrating snapshot dir");
117       FileSystem localFS = FileSystem.getLocal(conf);
118       if(!localFS.rename(new Path(untar.toString(), HConstants.OLD_SNAPSHOT_DIR_NAME),
119           new Path(untar.toString(), HConstants.SNAPSHOT_DIR_NAME))) {
120         throw new IllegalStateException("Failed to move snapshot dir to 2.x expectation");
121       }
122     }
123     doFsCommand(shell,
124       new String [] {"-put", untar.toURI().toString(), hbaseRootDir.toString()});
125     doFsCommand(shell, new String [] {"-lsr", "/"});
126     // See whats in minihdfs.
127     Configuration toolConf = TEST_UTIL.getConfiguration();
128     conf.set(HConstants.HBASE_DIR, TEST_UTIL.getDefaultRootDirPath().toString());
129     ToolRunner.run(toolConf, new NamespaceUpgrade(), new String[]{"--upgrade"});
130     assertTrue(FSUtils.getVersion(fs, hbaseRootDir).equals(HConstants.FILE_SYSTEM_VERSION));
131     doFsCommand(shell, new String [] {"-lsr", "/"});
132     TEST_UTIL.startMiniHBaseCluster(1, 1);
133 
134     for(String table: tables) {
135       int count = 0;
136       for(Result res: new HTable(TEST_UTIL.getConfiguration(), table).getScanner(new Scan())) {
137         assertEquals(currentKeys[count++], Bytes.toString(res.getRow()));
138       }
139       Assert.assertEquals(currentKeys.length, count);
140     }
141     assertEquals(2, TEST_UTIL.getHBaseAdmin().listNamespaceDescriptors().length);
142 
143     //verify ACL table is migrated
144     HTable secureTable = new HTable(conf, AccessControlLists.ACL_TABLE_NAME);
145     ResultScanner scanner = secureTable.getScanner(new Scan());
146     int count = 0;
147     for(Result r : scanner) {
148       count++;
149     }
150     assertEquals(3, count);
151     assertFalse(TEST_UTIL.getHBaseAdmin().tableExists("_acl_"));
152 
153     //verify ACL table was compacted
154     List<HRegion> regions = TEST_UTIL.getMiniHBaseCluster().getRegions(secureTable.getName());
155     for(HRegion region : regions) {
156       assertEquals(1, region.getStores().size());
157     }
158   }
159 
160    static File untar(final File testdir) throws IOException {
161     // Find the src data under src/test/data
162     final String datafile = "TestNamespaceUpgrade";
163     File srcTarFile = new File(
164       System.getProperty("project.build.testSourceDirectory", "src/test") +
165       File.separator + "data" + File.separator + datafile + ".tgz");
166     File homedir = new File(testdir.toString());
167     File tgtUntarDir = new File(homedir, "hbase");
168     if (tgtUntarDir.exists()) {
169       if (!FileUtil.fullyDelete(tgtUntarDir)) {
170         throw new IOException("Failed delete of " + tgtUntarDir.toString());
171       }
172     }
173     if (!srcTarFile.exists()) {
174       throw new IOException(srcTarFile+" does not exist");
175     }
176     LOG.info("Untarring " + srcTarFile + " into " + homedir.toString());
177     FileUtil.unTar(srcTarFile, homedir);
178     Assert.assertTrue(tgtUntarDir.exists());
179     return tgtUntarDir;
180   }
181 
182   private static void doFsCommand(final FsShell shell, final String [] args)
183   throws Exception {
184     // Run the 'put' command.
185     int errcode = shell.run(args);
186     if (errcode != 0) throw new IOException("Failed put; errcode=" + errcode);
187   }
188 
189   @AfterClass
190   public static void tearDownAfterClass() throws Exception {
191     TEST_UTIL.shutdownMiniCluster();
192   }
193 
194   @Test (timeout=300000)
195   public void testSnapshots() throws IOException, InterruptedException {
196     String snapshots[][] = {snapshot1Keys, snapshot2Keys};
197     for(int i = 1; i <= snapshots.length; i++) {
198       for(String table: tables) {
199         TEST_UTIL.getHBaseAdmin().cloneSnapshot(table+"_snapshot"+i, table+"_clone"+i);
200         FSUtils.logFileSystemState(FileSystem.get(TEST_UTIL.getConfiguration()),
201             FSUtils.getRootDir(TEST_UTIL.getConfiguration()),
202             LOG);
203         int count = 0;
204         for(Result res: new HTable(TEST_UTIL.getConfiguration(), table+"_clone"+i).getScanner(new
205             Scan())) {
206           assertEquals(snapshots[i-1][count++], Bytes.toString(res.getRow()));
207         }
208         Assert.assertEquals(table+"_snapshot"+i, snapshots[i-1].length, count);
209       }
210     }
211   }
212 
213   @Test (timeout=300000)
214   public void testRenameUsingSnapshots() throws Exception {
215     String newNS = "newNS";
216     TEST_UTIL.getHBaseAdmin().createNamespace(NamespaceDescriptor.create(newNS).build());
217     for(String table: tables) {
218       int count = 0;
219       for(Result res: new HTable(TEST_UTIL.getConfiguration(), table).getScanner(new
220           Scan())) {
221         assertEquals(currentKeys[count++], Bytes.toString(res.getRow()));
222       }
223       TEST_UTIL.getHBaseAdmin().snapshot(table + "_snapshot3", table);
224       final String newTableName = newNS + TableName.NAMESPACE_DELIM + table + "_clone3";
225       TEST_UTIL.getHBaseAdmin().cloneSnapshot(table + "_snapshot3", newTableName);
226       Thread.sleep(1000);
227       count = 0;
228       for(Result res: new HTable(TEST_UTIL.getConfiguration(), newTableName).getScanner(new
229           Scan())) {
230         assertEquals(currentKeys[count++], Bytes.toString(res.getRow()));
231       }
232       FSUtils.logFileSystemState(TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath()
233           , LOG);
234       Assert.assertEquals(newTableName, currentKeys.length, count);
235       TEST_UTIL.getHBaseAdmin().flush(newTableName);
236       TEST_UTIL.getHBaseAdmin().majorCompact(newTableName);
237       TEST_UTIL.waitFor(30000, new Waiter.Predicate<IOException>() {
238         @Override
239         public boolean evaluate() throws IOException {
240           try {
241             return TEST_UTIL.getHBaseAdmin().getCompactionState(newTableName) ==
242                 AdminProtos.GetRegionInfoResponse.CompactionState.NONE;
243           } catch (InterruptedException e) {
244             throw new IOException(e);
245           }
246         }
247       });
248     }
249 
250     String nextNS = "nextNS";
251     TEST_UTIL.getHBaseAdmin().createNamespace(NamespaceDescriptor.create(nextNS).build());
252     for(String table: tables) {
253       String srcTable = newNS + TableName.NAMESPACE_DELIM + table + "_clone3";
254       TEST_UTIL.getHBaseAdmin().snapshot(table + "_snapshot4", srcTable);
255       String newTableName = nextNS + TableName.NAMESPACE_DELIM + table + "_clone4";
256       TEST_UTIL.getHBaseAdmin().cloneSnapshot(table+"_snapshot4", newTableName);
257       FSUtils.logFileSystemState(TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath(),
258         LOG);
259       int count = 0;
260       for(Result res: new HTable(TEST_UTIL.getConfiguration(), newTableName).getScanner(new
261           Scan())) {
262         assertEquals(currentKeys[count++], Bytes.toString(res.getRow()));
263       }
264       Assert.assertEquals(newTableName, currentKeys.length, count);
265     }
266   }
267 
268   @Test (timeout=300000)
269   public void testOldDirsAreGonePostMigration() throws IOException {
270     FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
271     Path hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
272     List <String> dirs = new ArrayList<String>(NamespaceUpgrade.NON_USER_TABLE_DIRS);
273     // Remove those that are not renamed
274     dirs.remove(HConstants.HBCK_SIDELINEDIR_NAME);
275     dirs.remove(HConstants.SNAPSHOT_DIR_NAME);
276     dirs.remove(HConstants.HBASE_TEMP_DIRECTORY);
277     for (String dir: dirs) {
278       assertFalse(fs.exists(new Path(hbaseRootDir, dir)));
279     }
280   }
281 
282   @Test (timeout=300000)
283   public void testNewDirsArePresentPostMigration() throws IOException {
284     FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
285     // Below list does not include 'corrupt' because there is no 'corrupt' in the tgz
286     String [] newdirs = new String [] {HConstants.BASE_NAMESPACE_DIR,
287       HConstants.HREGION_LOGDIR_NAME};
288     Path hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
289     for (String dir: newdirs) {
290       assertTrue(dir, fs.exists(new Path(hbaseRootDir, dir)));
291     }
292   }
293 
294   @Test (timeout = 300000)
295   public void testACLTableMigration() throws IOException {
296     Path rootDir = TEST_UTIL.getDataTestDirOnTestFS("testACLTable");
297     FileSystem fs = TEST_UTIL.getTestFileSystem();
298     Configuration conf = TEST_UTIL.getConfiguration();
299     byte[] FAMILY = Bytes.toBytes("l");
300     byte[] QUALIFIER = Bytes.toBytes("testUser");
301     byte[] VALUE = Bytes.toBytes("RWCA");
302 
303     // Create a Region
304     HTableDescriptor aclTable = new HTableDescriptor(TableName.valueOf("testACLTable"));
305     aclTable.addFamily(new HColumnDescriptor(FAMILY));
306     FSTableDescriptors fstd = new FSTableDescriptors(conf, fs, rootDir);
307     fstd.createTableDescriptor(aclTable);
308     HRegionInfo hriAcl = new HRegionInfo(aclTable.getTableName(), null, null);
309     HRegion region = HRegion.createHRegion(hriAcl, rootDir, conf, aclTable);
310     try {
311       // Create rows
312       Put p = new Put(Bytes.toBytes("-ROOT-"));
313       p.addImmutable(FAMILY, QUALIFIER, VALUE);
314       region.put(p);
315       p = new Put(Bytes.toBytes(".META."));
316       p.addImmutable(FAMILY, QUALIFIER, VALUE);
317       region.put(p);
318       p = new Put(Bytes.toBytes("_acl_"));
319       p.addImmutable(FAMILY, QUALIFIER, VALUE);
320       region.put(p);
321 
322       NamespaceUpgrade upgrade = new NamespaceUpgrade();
323       upgrade.updateAcls(region);
324 
325       // verify rows -ROOT- is removed
326       Get g = new Get(Bytes.toBytes("-ROOT-"));
327       Result r = region.get(g);
328       assertTrue(r == null || r.size() == 0);
329 
330       // verify rows _acl_ is renamed to hbase:acl
331       g = new Get(AccessControlLists.ACL_TABLE_NAME.toBytes());
332       r = region.get(g);
333       assertTrue(r != null && r.size() == 1);
334       assertTrue(Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIER)) == 0);
335 
336       // verify rows .META. is renamed to hbase:meta
337       g = new Get(TableName.META_TABLE_NAME.toBytes());
338       r = region.get(g);
339       assertTrue(r != null && r.size() == 1);
340       assertTrue(Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIER)) == 0);
341     } finally {
342       region.close();
343       // Delete the region
344       HRegionFileSystem.deleteRegionFromFileSystem(conf, fs,
345         FSUtils.getTableDir(rootDir, hriAcl.getTable()), hriAcl);
346     }
347   }
348 }