View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.util;
21  
22  import java.io.IOException;
23  import java.util.ArrayList;
24  import java.util.List;
25  
26  import org.apache.commons.logging.Log;
27  import org.apache.commons.logging.LogFactory;
28  import org.apache.hadoop.fs.Path;
29  import org.apache.hadoop.hbase.Cell;
30  import org.apache.hadoop.hbase.CellUtil;
31  import org.apache.hadoop.hbase.HBaseTestCase;
32  import org.apache.hadoop.hbase.HBaseTestingUtility;
33  import org.apache.hadoop.hbase.HColumnDescriptor;
34  import org.apache.hadoop.hbase.HConstants;
35  import org.apache.hadoop.hbase.HRegionInfo;
36  import org.apache.hadoop.hbase.HTableDescriptor;
37  import org.apache.hadoop.hbase.testclassification.LargeTests;
38  import org.apache.hadoop.hbase.client.Get;
39  import org.apache.hadoop.hbase.client.Put;
40  import org.apache.hadoop.hbase.client.Result;
41  import org.apache.hadoop.hbase.client.Scan;
42  import org.apache.hadoop.hbase.regionserver.HRegion;
43  import org.apache.hadoop.hbase.regionserver.InternalScanner;
44  import org.apache.hadoop.hbase.regionserver.wal.HLog;
45  import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
46  import org.apache.hadoop.hdfs.MiniDFSCluster;
47  import org.apache.hadoop.util.ToolRunner;
48  import org.junit.experimental.categories.Category;
49  
50  /** Test stand alone merge tool that can merge arbitrary regions */
51  @Category(LargeTests.class)
52  public class TestMergeTool extends HBaseTestCase {
53    static final Log LOG = LogFactory.getLog(TestMergeTool.class);
54    HBaseTestingUtility TEST_UTIL;
55  //  static final byte [] COLUMN_NAME = Bytes.toBytes("contents:");
56    static final byte [] FAMILY = Bytes.toBytes("contents");
57    static final byte [] QUALIFIER = Bytes.toBytes("dc");
58  
59    private final HRegionInfo[] sourceRegions = new HRegionInfo[5];
60    private final HRegion[] regions = new HRegion[5];
61    private HTableDescriptor desc;
62    private byte [][][] rows;
63    private MiniDFSCluster dfsCluster = null;
64  
65    @Override
66    public void setUp() throws Exception {
67      // Set the timeout down else this test will take a while to complete.
68      this.conf.setLong("hbase.zookeeper.recoverable.waittime", 10);
69      // Make it so we try and connect to a zk that is not there (else we might
70      // find a zk ensemble put up by another concurrent test and this will
71      // mess up this test.  Choose unlikely port. Default test port is 21818.
72      // Default zk port is 2181.
73      this.conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, 10001);
74  
75      this.conf.set("hbase.hstore.compactionThreshold", "2");
76  
77      // Create table description
78      this.desc = new HTableDescriptor(org.apache.hadoop.hbase.TableName.valueOf("TestMergeTool"));
79      this.desc.addFamily(new HColumnDescriptor(FAMILY));
80  
81      /*
82       * Create the HRegionInfos for the regions.
83       */
84      // Region 0 will contain the key range [row_0200,row_0300)
85      sourceRegions[0] = new HRegionInfo(this.desc.getTableName(),
86          Bytes.toBytes("row_0200"),
87        Bytes.toBytes("row_0300"));
88  
89      // Region 1 will contain the key range [row_0250,row_0400) and overlaps
90      // with Region 0
91      sourceRegions[1] =
92        new HRegionInfo(this.desc.getTableName(),
93            Bytes.toBytes("row_0250"),
94            Bytes.toBytes("row_0400"));
95  
96      // Region 2 will contain the key range [row_0100,row_0200) and is adjacent
97      // to Region 0 or the region resulting from the merge of Regions 0 and 1
98      sourceRegions[2] =
99        new HRegionInfo(this.desc.getTableName(),
100           Bytes.toBytes("row_0100"),
101           Bytes.toBytes("row_0200"));
102 
103     // Region 3 will contain the key range [row_0500,row_0600) and is not
104     // adjacent to any of Regions 0, 1, 2 or the merged result of any or all
105     // of those regions
106     sourceRegions[3] =
107       new HRegionInfo(this.desc.getTableName(),
108           Bytes.toBytes("row_0500"),
109           Bytes.toBytes("row_0600"));
110 
111     // Region 4 will have empty start and end keys and overlaps all regions.
112     sourceRegions[4] =
113       new HRegionInfo(this.desc.getTableName(),
114           HConstants.EMPTY_BYTE_ARRAY,
115           HConstants.EMPTY_BYTE_ARRAY);
116 
117     /*
118      * Now create some row keys
119      */
120     this.rows = new byte [5][][];
121     this.rows[0] = Bytes.toByteArrays(new String[] { "row_0210", "row_0280" });
122     this.rows[1] = Bytes.toByteArrays(new String[] { "row_0260", "row_0350",
123         "row_035" });
124     this.rows[2] = Bytes.toByteArrays(new String[] { "row_0110", "row_0175",
125         "row_0175", "row_0175"});
126     this.rows[3] = Bytes.toByteArrays(new String[] { "row_0525", "row_0560",
127         "row_0560", "row_0560", "row_0560"});
128     this.rows[4] = Bytes.toByteArrays(new String[] { "row_0050", "row_1000",
129         "row_1000", "row_1000", "row_1000", "row_1000" });
130 
131     // Start up dfs
132     TEST_UTIL = new HBaseTestingUtility(conf);
133     this.dfsCluster = TEST_UTIL.startMiniDFSCluster(2);
134     this.fs = this.dfsCluster.getFileSystem();
135     System.out.println("fs=" + this.fs);
136     FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
137     Path parentdir = fs.getHomeDirectory();
138     FSUtils.setRootDir(conf, parentdir);
139     fs.mkdirs(parentdir);
140     FSUtils.setVersion(fs, parentdir);
141 
142     // Note: we must call super.setUp after starting the mini cluster or
143     // we will end up with a local file system
144 
145     super.setUp();
146     try {
147       // Create meta region
148       createMetaRegion();
149       new FSTableDescriptors(conf, this.fs, this.testDir).createTableDescriptor(this.desc);
150       /*
151        * Create the regions we will merge
152        */
153       for (int i = 0; i < sourceRegions.length; i++) {
154         regions[i] =
155           HRegion.createHRegion(this.sourceRegions[i], this.testDir, this.conf,
156               this.desc);
157         /*
158          * Insert data
159          */
160         for (int j = 0; j < rows[i].length; j++) {
161           byte [] row = rows[i][j];
162           Put put = new Put(row);
163           put.add(FAMILY, QUALIFIER, row);
164           regions[i].put(put);
165         }
166         HRegion.addRegionToMETA(meta, regions[i]);
167       }
168       // Close root and meta regions
169       closeRootAndMeta();
170 
171     } catch (Exception e) {
172       TEST_UTIL.shutdownMiniCluster();
173       throw e;
174     }
175   }
176 
177   @Override
178   public void tearDown() throws Exception {
179     super.tearDown();
180     for (int i = 0; i < sourceRegions.length; i++) {
181       HRegion r = regions[i];
182       if (r != null) {
183         HRegion.closeHRegion(r);
184       }
185     }
186     TEST_UTIL.shutdownMiniCluster();
187   }
188 
189   /*
190    * @param msg Message that describes this merge
191    * @param regionName1
192    * @param regionName2
193    * @param log Log to use merging.
194    * @param upperbound Verifying, how high up in this.rows to go.
195    * @return Merged region.
196    * @throws Exception
197    */
198   private HRegion mergeAndVerify(final String msg, final String regionName1,
199     final String regionName2, final HLog log, final int upperbound)
200   throws Exception {
201     Merge merger = new Merge(this.conf);
202     LOG.info(msg);
203     LOG.info("fs2=" + this.conf.get("fs.defaultFS"));
204     int errCode = ToolRunner.run(this.conf, merger,
205       new String[] {this.desc.getTableName().getNameAsString(), regionName1, regionName2}
206     );
207     assertTrue("'" + msg + "' failed with errCode " + errCode, errCode == 0);
208     HRegionInfo mergedInfo = merger.getMergedHRegionInfo();
209 
210     // Now verify that we can read all the rows from regions 0, 1
211     // in the new merged region.
212     HRegion merged = HRegion.openHRegion(mergedInfo, this.desc, log, this.conf);
213     verifyMerge(merged, upperbound);
214     merged.close();
215     LOG.info("Verified " + msg);
216     return merged;
217   }
218 
219   private void verifyMerge(final HRegion merged, final int upperbound)
220   throws IOException {
221     //Test
222     Scan scan = new Scan();
223     scan.addFamily(FAMILY);
224     InternalScanner scanner = merged.getScanner(scan);
225     try {
226     List<Cell> testRes = null;
227       while (true) {
228         testRes = new ArrayList<Cell>();
229         boolean hasNext = scanner.next(testRes);
230         if (!hasNext) {
231           break;
232         }
233       }
234     } finally {
235       scanner.close();
236     }
237 
238     //!Test
239 
240     for (int i = 0; i < upperbound; i++) {
241       for (int j = 0; j < rows[i].length; j++) {
242         Get get = new Get(rows[i][j]);
243         get.addFamily(FAMILY);
244         Result result = merged.get(get);
245         assertEquals(1, result.size());
246         byte [] bytes = CellUtil.cloneValue(result.rawCells()[0]);
247         assertNotNull(Bytes.toStringBinary(rows[i][j]), bytes);
248         assertTrue(Bytes.equals(bytes, rows[i][j]));
249       }
250     }
251   }
252 
253   /**
254    * Test merge tool.
255    * @throws Exception
256    */
257   public void testMergeTool() throws Exception {
258     // First verify we can read the rows from the source regions and that they
259     // contain the right data.
260     for (int i = 0; i < regions.length; i++) {
261       for (int j = 0; j < rows[i].length; j++) {
262         Get get = new Get(rows[i][j]);
263         get.addFamily(FAMILY);
264         Result result = regions[i].get(get);
265         byte [] bytes =  CellUtil.cloneValue(result.rawCells()[0]);
266         assertNotNull(bytes);
267         assertTrue(Bytes.equals(bytes, rows[i][j]));
268       }
269       // Close the region and delete the log
270       HRegion.closeHRegion(regions[i]);
271     }
272 
273     // Create a log that we can reuse when we need to open regions
274     Path logPath = new Path("/tmp");
275     String logName = HConstants.HREGION_LOGDIR_NAME + "_"
276       + System.currentTimeMillis();
277     LOG.info("Creating log " + logPath.toString() + "/" + logName);
278 
279     HLog log = HLogFactory.createHLog(this.fs, logPath,
280         logName, this.conf);
281 
282     try {
283        // Merge Region 0 and Region 1
284       HRegion merged = mergeAndVerify("merging regions 0 and 1 ",
285         this.sourceRegions[0].getRegionNameAsString(),
286         this.sourceRegions[1].getRegionNameAsString(), log, 2);
287 
288       // Merge the result of merging regions 0 and 1 with region 2
289       merged = mergeAndVerify("merging regions 0+1 and 2",
290         merged.getRegionInfo().getRegionNameAsString(),
291         this.sourceRegions[2].getRegionNameAsString(), log, 3);
292 
293       // Merge the result of merging regions 0, 1 and 2 with region 3
294       merged = mergeAndVerify("merging regions 0+1+2 and 3",
295         merged.getRegionInfo().getRegionNameAsString(),
296         this.sourceRegions[3].getRegionNameAsString(), log, 4);
297 
298       // Merge the result of merging regions 0, 1, 2 and 3 with region 4
299       merged = mergeAndVerify("merging regions 0+1+2+3 and 4",
300         merged.getRegionInfo().getRegionNameAsString(),
301         this.sourceRegions[4].getRegionNameAsString(), log, rows.length);
302     } finally {
303       log.closeAndDelete();
304     }
305   }
306 
307 }
308