View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.util;
20  
21  import static org.junit.Assert.assertTrue;
22  
23  import java.io.IOException;
24  import java.util.List;
25  
26  import org.apache.commons.logging.Log;
27  import org.apache.commons.logging.LogFactory;
28  import org.apache.hadoop.conf.Configuration;
29  import org.apache.hadoop.fs.FileSystem;
30  import org.apache.hadoop.fs.Path;
31  import org.apache.hadoop.hbase.*;
32  import org.apache.hadoop.hbase.catalog.CatalogTracker;
33  import org.apache.hadoop.hbase.catalog.MetaReader;
34  import org.apache.hadoop.hbase.client.HBaseAdmin;
35  import org.apache.hadoop.hbase.client.Put;
36  import org.apache.hadoop.hbase.client.Durability;
37  import org.apache.hadoop.hbase.regionserver.HRegion;
38  import org.apache.hadoop.hbase.testclassification.MediumTests;
39  import org.junit.Test;
40  import org.junit.experimental.categories.Category;
41  
42  /**
43   * Tests merging a normal table's regions
44   */
45  @Category(MediumTests.class)
46  public class TestMergeTable {
47    private static final Log LOG = LogFactory.getLog(TestMergeTable.class);
48    private final HBaseTestingUtility UTIL = new HBaseTestingUtility();
49    private static final byte [] COLUMN_NAME = Bytes.toBytes("contents");
50    private static final byte [] VALUE;
51    static {
52      // We will use the same value for the rows as that is not really important here
53      String partialValue = String.valueOf(System.currentTimeMillis());
54      StringBuilder val = new StringBuilder();
55      while (val.length() < 1024) {
56        val.append(partialValue);
57      }
58      VALUE = Bytes.toBytes(val.toString());
59    }
60  
61    /**
62     * Test merge.
63     * Hand-makes regions of a mergeable size and adds the hand-made regions to
64     * hand-made meta.  The hand-made regions are created offline.  We then start
65     * up mini cluster, disables the hand-made table and starts in on merging.
66     * @throws Exception
67     */
68    @Test (timeout=300000) public void testMergeTable() throws Exception {
69      // Table we are manually creating offline.
70      HTableDescriptor desc = new HTableDescriptor(org.apache.hadoop.hbase.TableName.valueOf(Bytes.toBytes("test")));
71      desc.addFamily(new HColumnDescriptor(COLUMN_NAME));
72  
73      // Set maximum regionsize down.
74      UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE, 64L * 1024L * 1024L);
75      // Make it so we don't split.
76      UTIL.getConfiguration().setInt("hbase.regionserver.regionSplitLimit", 0);
77      // Startup hdfs.  Its in here we'll be putting our manually made regions.
78      UTIL.startMiniDFSCluster(1);
79      // Create hdfs hbase rootdir.
80      Path rootdir = UTIL.createRootDir();
81      FileSystem fs = FileSystem.get(UTIL.getConfiguration());
82      if (fs.exists(rootdir)) {
83        if (fs.delete(rootdir, true)) {
84          LOG.info("Cleaned up existing " + rootdir);
85        }
86      }
87  
88      // Now create three data regions: The first is too large to merge since it
89      // will be > 64 MB in size. The second two will be smaller and will be
90      // selected for merging.
91  
92      // To ensure that the first region is larger than 64MB we need to write at
93      // least 65536 rows. We will make certain by writing 70000
94      byte [] row_70001 = Bytes.toBytes("row_70001");
95      byte [] row_80001 = Bytes.toBytes("row_80001");
96  
97      // Create regions and populate them at same time.  Create the tabledir
98      // for them first.
99      new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir).createTableDescriptor(desc);
100     HRegion [] regions = {
101       createRegion(desc, null, row_70001, 1, 70000, rootdir),
102       createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir),
103       createRegion(desc, row_80001, null, 80001, 11000, rootdir)
104     };
105 
106     // Now create the root and meta regions and insert the data regions
107     // created above into hbase:meta
108     setupMeta(rootdir, regions);
109     try {
110       LOG.info("Starting mini zk cluster");
111       UTIL.startMiniZKCluster();
112       LOG.info("Starting mini hbase cluster");
113       UTIL.startMiniHBaseCluster(1, 1);
114       Configuration c = new Configuration(UTIL.getConfiguration());
115       CatalogTracker ct = new CatalogTracker(c);
116       ct.start();
117       List<HRegionInfo> originalTableRegions =
118         MetaReader.getTableRegions(ct, desc.getTableName());
119       LOG.info("originalTableRegions size=" + originalTableRegions.size() +
120         "; " + originalTableRegions);
121       HBaseAdmin admin = new HBaseAdmin(c);
122       admin.disableTable(desc.getTableName());
123       HMerge.merge(c, FileSystem.get(c), desc.getTableName());
124       List<HRegionInfo> postMergeTableRegions =
125         MetaReader.getTableRegions(ct, desc.getTableName());
126       LOG.info("postMergeTableRegions size=" + postMergeTableRegions.size() +
127         "; " + postMergeTableRegions);
128       assertTrue("originalTableRegions=" + originalTableRegions.size() +
129         ", postMergeTableRegions=" + postMergeTableRegions.size(),
130         postMergeTableRegions.size() < originalTableRegions.size());
131       LOG.info("Done with merge");
132     } finally {
133       UTIL.shutdownMiniCluster();
134       LOG.info("After cluster shutdown");
135     }
136   }
137 
138   private HRegion createRegion(final HTableDescriptor desc,
139       byte [] startKey, byte [] endKey, int firstRow, int nrows, Path rootdir)
140   throws IOException {
141     HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
142     HRegion region = HRegion.createHRegion(hri, rootdir, UTIL.getConfiguration(), desc);
143     LOG.info("Created region " + region.getRegionNameAsString());
144     for(int i = firstRow; i < firstRow + nrows; i++) {
145       Put put = new Put(Bytes.toBytes("row_" + String.format("%1$05d", i)));
146       put.setDurability(Durability.SKIP_WAL);
147       put.add(COLUMN_NAME, null,  VALUE);
148       region.put(put);
149       if (i % 10000 == 0) {
150         LOG.info("Flushing write #" + i);
151         region.flushcache();
152       }
153     }
154     HRegion.closeHRegion(region);
155     return region;
156   }
157 
158   protected void setupMeta(Path rootdir, final HRegion [] regions)
159   throws IOException {
160     HRegion meta =
161       HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, rootdir,
162       UTIL.getConfiguration(), UTIL.getMetaTableDescriptor());
163     for (HRegion r: regions) {
164       HRegion.addRegionToMETA(meta, r);
165     }
166     HRegion.closeHRegion(meta);
167   }
168 
169 }
170