View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.migration;
19  
20  import static org.junit.Assert.assertEquals;
21  import static org.junit.Assert.assertTrue;
22  
23  import java.io.File;
24  import java.io.IOException;
25  
26  import org.apache.commons.logging.Log;
27  import org.apache.commons.logging.LogFactory;
28  import org.apache.hadoop.fs.FileStatus;
29  import org.apache.hadoop.fs.FileSystem;
30  import org.apache.hadoop.fs.FsShell;
31  import org.apache.hadoop.fs.Path;
32  import org.apache.hadoop.hbase.HBaseTestingUtility;
33  import org.apache.hadoop.hbase.testclassification.MediumTests;
34  import org.apache.hadoop.hbase.io.FileLink;
35  import org.apache.hadoop.hbase.io.HFileLink;
36  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
37  import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
38  import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer;
39  import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State;
40  import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
41  import org.apache.hadoop.hbase.util.Bytes;
42  import org.apache.hadoop.hbase.util.FSUtils;
43  import org.apache.hadoop.hbase.util.HFileV1Detector;
44  import org.apache.hadoop.hbase.zookeeper.ZKUtil;
45  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
46  import org.apache.hadoop.util.ToolRunner;
47  import org.apache.zookeeper.KeeperException;
48  import org.junit.AfterClass;
49  import org.junit.BeforeClass;
50  import org.junit.Test;
51  import org.junit.experimental.categories.Category;
52  
53  import com.google.protobuf.InvalidProtocolBufferException;
54  
55  /**
56   * Upgrade to 0.96 involves detecting HFileV1 in existing cluster, updating namespace and
57   * updating znodes. This class tests for HFileV1 detection and upgrading znodes.
58   * Uprading namespace is tested in {@link TestNamespaceUpgrade}.
59   */
60  @Category(MediumTests.class)
61  public class TestUpgradeTo96 {
62  
63    static final Log LOG = LogFactory.getLog(TestUpgradeTo96.class);
64    private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
65  
66    /**
67     * underlying file system instance
68     */
69    private static FileSystem fs;
70    /**
71     * hbase root dir
72     */
73    private static Path hbaseRootDir;
74    private static ZooKeeperWatcher zkw;
75    /**
76     * replication peer znode (/hbase/replication/peers)
77     */
78    private static String replicationPeerZnode;
79    /**
80     * znode of a table
81     */
82    private static String tableAZnode;
83    private static ReplicationPeer peer1;
84    /**
85     * znode for replication peer1 (/hbase/replication/peers/1)
86     */
87    private static String peer1Znode;
88  
89    @BeforeClass
90    public static void setUpBeforeClass() throws Exception {
91      // Start up the mini cluster on top of an 0.94 root.dir that has data from
92      // a 0.94 hbase run and see if we can migrate to 0.96
93      TEST_UTIL.startMiniZKCluster();
94      TEST_UTIL.startMiniDFSCluster(1);
95  
96      hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
97      fs = FileSystem.get(TEST_UTIL.getConfiguration());
98      FSUtils.setRootDir(TEST_UTIL.getConfiguration(), hbaseRootDir);
99      zkw = TEST_UTIL.getZooKeeperWatcher();
100 
101     Path testdir = TEST_UTIL.getDataTestDir("TestUpgradeTo96");
102     // get the untar 0.94 file structure
103 
104     set94FSLayout(testdir);
105     setUp94Znodes();
106   }
107 
108   /**
109    * Lays out 0.94 file system layout using {@link TestNamespaceUpgrade} apis.
110    * @param testdir
111    * @throws IOException
112    * @throws Exception
113    */
114   private static void set94FSLayout(Path testdir) throws IOException, Exception {
115     File untar = TestNamespaceUpgrade.untar(new File(testdir.toString()));
116     if (!fs.exists(hbaseRootDir.getParent())) {
117       // mkdir at first
118       fs.mkdirs(hbaseRootDir.getParent());
119     }
120     FsShell shell = new FsShell(TEST_UTIL.getConfiguration());
121     shell.run(new String[] { "-put", untar.toURI().toString(), hbaseRootDir.toString() });
122     // See whats in minihdfs.
123     shell.run(new String[] { "-lsr", "/" });
124   }
125 
126   /**
127    * Sets znodes used in 0.94 version. Only table and replication znodes will be upgraded to PB,
128    * others would be deleted.
129    * @throws KeeperException
130    */
131   private static void setUp94Znodes() throws IOException, KeeperException {
132     // add some old znodes, which would be deleted after upgrade.
133     String rootRegionServerZnode = ZKUtil.joinZNode(zkw.baseZNode, "root-region-server");
134     ZKUtil.createWithParents(zkw, rootRegionServerZnode);
135     ZKUtil.createWithParents(zkw, zkw.backupMasterAddressesZNode);
136     // add table znode, data of its children would be protobuffized
137     tableAZnode = ZKUtil.joinZNode(zkw.tableZNode, "a");
138     ZKUtil.createWithParents(zkw, tableAZnode,
139       Bytes.toBytes(ZooKeeperProtos.Table.State.ENABLED.toString()));
140     // add replication znodes, data of its children would be protobuffized
141     String replicationZnode = ZKUtil.joinZNode(zkw.baseZNode, "replication");
142     replicationPeerZnode = ZKUtil.joinZNode(replicationZnode, "peers");
143     peer1Znode = ZKUtil.joinZNode(replicationPeerZnode, "1");
144     peer1 = ReplicationPeer.newBuilder().setClusterkey("abc:123:/hbase").build();
145     ZKUtil.createWithParents(zkw, peer1Znode, Bytes.toBytes(peer1.getClusterkey()));
146   }
147 
148   /**
149    * Tests a 0.94 filesystem for any HFileV1.
150    * @throws Exception
151    */
152   @Test
153   public void testHFileV1Detector() throws Exception {
154     assertEquals(0, ToolRunner.run(TEST_UTIL.getConfiguration(), new HFileV1Detector(), null));
155   }
156 
157   /**
158    * Creates a corrupt file, and run HFileV1 detector tool
159    * @throws Exception
160    */
161   @Test
162   public void testHFileV1DetectorWithCorruptFiles() throws Exception {
163     // add a corrupt file.
164     Path tablePath = new Path(hbaseRootDir, "foo");
165     FileStatus[] regionsDir = fs.listStatus(tablePath);
166     if (regionsDir == null) throw new IOException("No Regions found for table " + "foo");
167     Path columnFamilyDir = null;
168     Path targetRegion = null;
169     for (FileStatus s : regionsDir) {
170       if (fs.exists(new Path(s.getPath(), HRegionFileSystem.REGION_INFO_FILE))) {
171         targetRegion = s.getPath();
172         break;
173       }
174     }
175     FileStatus[] cfs = fs.listStatus(targetRegion);
176     for (FileStatus f : cfs) {
177       if (f.isDir()) {
178         columnFamilyDir = f.getPath();
179         break;
180       }
181     }
182     LOG.debug("target columnFamilyDir: " + columnFamilyDir);
183     // now insert a corrupt file in the columnfamily.
184     Path corruptFile = new Path(columnFamilyDir, "corrupt_file");
185     if (!fs.createNewFile(corruptFile)) throw new IOException("Couldn't create corrupt file: "
186         + corruptFile);
187     assertEquals(1, ToolRunner.run(TEST_UTIL.getConfiguration(), new HFileV1Detector(), null));
188     // remove the corrupt file
189     FileSystem.get(TEST_UTIL.getConfiguration()).delete(corruptFile, false);
190   }
191 
192   @Test
193   public void testHFileLink() throws Exception {
194     // pass a link, and verify that correct paths are returned.
195     Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration());
196     Path aFileLink = new Path(rootDir, "table/2086db948c48/cf/table=21212abcdc33-0906db948c48");
197     Path preNamespaceTablePath = new Path(rootDir, "table/21212abcdc33/cf/0906db948c48");
198     Path preNamespaceArchivePath =
199       new Path(rootDir, ".archive/table/21212abcdc33/cf/0906db948c48");
200     Path preNamespaceTempPath = new Path(rootDir, ".tmp/table/21212abcdc33/cf/0906db948c48");
201     boolean preNSTablePathExists = false;
202     boolean preNSArchivePathExists = false;
203     boolean preNSTempPathExists = false;
204     assertTrue(HFileLink.isHFileLink(aFileLink));
205     HFileLink hFileLink = new HFileLink(TEST_UTIL.getConfiguration(), aFileLink);
206     assertTrue(hFileLink.getArchivePath().toString().startsWith(rootDir.toString()));
207 
208     HFileV1Detector t = new HFileV1Detector();
209     t.setConf(TEST_UTIL.getConfiguration());
210     FileLink fileLink = t.getFileLinkWithPreNSPath(aFileLink);
211     //assert it has 6 paths (2 NS, 2 Pre NS, and 2 .tmp)  to look.
212     assertTrue(fileLink.getLocations().length == 6);
213     for (Path p : fileLink.getLocations()) {
214       if (p.equals(preNamespaceArchivePath)) preNSArchivePathExists = true;
215       if (p.equals(preNamespaceTablePath)) preNSTablePathExists = true;
216       if (p.equals(preNamespaceTempPath)) preNSTempPathExists = true;
217     }
218     assertTrue(preNSArchivePathExists & preNSTablePathExists & preNSTempPathExists);
219   }
220 
221   @Test
222   public void testADirForHFileV1() throws Exception {
223     Path tablePath = new Path(hbaseRootDir, "foo");
224     System.out.println("testADirForHFileV1: " + tablePath.makeQualified(fs));
225     System.out.println("Passed: " + hbaseRootDir + "/foo");
226     assertEquals(0,
227       ToolRunner.run(TEST_UTIL.getConfiguration(), new HFileV1Detector(), new String[] { "-p"
228           + "foo" }));
229   }
230 
231   @Test
232   public void testZnodeMigration() throws Exception {
233     String rootRSZnode = ZKUtil.joinZNode(zkw.baseZNode, "root-region-server");
234     assertTrue(ZKUtil.checkExists(zkw, rootRSZnode) > -1);
235     ToolRunner.run(TEST_UTIL.getConfiguration(), new UpgradeTo96(), new String[] { "-execute" });
236     assertEquals(-1, ZKUtil.checkExists(zkw, rootRSZnode));
237     byte[] data = ZKUtil.getData(zkw, tableAZnode);
238     assertTrue(ProtobufUtil.isPBMagicPrefix(data));
239     checkTableState(data, ZooKeeperProtos.Table.State.ENABLED);
240     // ensure replication znodes are there, and protobuffed.
241     data = ZKUtil.getData(zkw, peer1Znode);
242     assertTrue(ProtobufUtil.isPBMagicPrefix(data));
243     checkReplicationPeerData(data, peer1);
244   }
245 
246   private void checkTableState(byte[] data, State expectedState)
247       throws InvalidProtocolBufferException {
248     ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder();
249     int magicLen = ProtobufUtil.lengthOfPBMagic();
250     ZooKeeperProtos.Table t = builder.mergeFrom(data, magicLen, data.length - magicLen).build();
251     assertTrue(t.getState() == expectedState);
252   }
253 
254   private void checkReplicationPeerData(byte[] data, ReplicationPeer peer)
255       throws InvalidProtocolBufferException {
256     int magicLen = ProtobufUtil.lengthOfPBMagic();
257     ZooKeeperProtos.ReplicationPeer.Builder builder = ZooKeeperProtos.ReplicationPeer.newBuilder();
258     assertEquals(builder.mergeFrom(data, magicLen, data.length - magicLen).build().getClusterkey(),
259       peer.getClusterkey());
260 
261   }
262 
263   @AfterClass
264   public static void tearDownAfterClass() throws Exception {
265     TEST_UTIL.shutdownMiniHBaseCluster();
266     TEST_UTIL.shutdownMiniDFSCluster();
267     TEST_UTIL.shutdownMiniZKCluster();
268   }
269 
270 }