View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.regionserver;
21  
22  import static org.junit.Assert.fail;
23  
24  import java.lang.reflect.Method;
25  import java.net.InetSocketAddress;
26  import java.net.URI;
27  import java.util.ArrayList;
28  import java.util.List;
29  
30  import org.apache.hadoop.fs.BlockLocation;
31  import org.apache.hadoop.fs.FileStatus;
32  import org.apache.hadoop.fs.Path;
33  import org.apache.hadoop.fs.permission.FsPermission;
34  import org.apache.hadoop.hbase.TableName;
35  import org.apache.hadoop.hbase.HBaseTestingUtility;
36  import org.apache.hadoop.hbase.testclassification.MediumTests;
37  import org.apache.hadoop.hbase.client.HTable;
38  import org.apache.hadoop.hbase.util.Bytes;
39  import org.apache.hadoop.hdfs.DistributedFileSystem;
40  import org.apache.hadoop.hdfs.server.datanode.DataNode;
41  import org.apache.hadoop.util.Progressable;
42  import org.junit.AfterClass;
43  import org.junit.Assume;
44  import org.junit.BeforeClass;
45  import org.junit.Test;
46  import org.junit.experimental.categories.Category;
47  
48  /**
49   * Tests the ability to specify favored nodes for a region.
50   */
51  @Category(MediumTests.class)
52  public class TestRegionFavoredNodes {
53  
54    private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
55    private static HTable table;
56    private static final TableName TABLE_NAME =
57        TableName.valueOf("table");
58    private static final byte[] COLUMN_FAMILY = Bytes.toBytes("family");
59    private static final int FAVORED_NODES_NUM = 3;
60    private static final int REGION_SERVERS = 6;
61    private static final int FLUSHES = 3;
62    private static Method createWithFavoredNode = null;
63  
64    @BeforeClass
65    public static void setUpBeforeClass() throws Exception {
66      try {
67        createWithFavoredNode = DistributedFileSystem.class.getDeclaredMethod("create", Path.class,
68          FsPermission.class, boolean.class, int.class, short.class, long.class,
69          Progressable.class, InetSocketAddress[].class);
70      } catch (NoSuchMethodException nm) {
71        return;
72      }
73      TEST_UTIL.startMiniCluster(REGION_SERVERS);
74      table = TEST_UTIL.createTable(TABLE_NAME, COLUMN_FAMILY);
75      TEST_UTIL.createMultiRegions(table, COLUMN_FAMILY);
76      TEST_UTIL.waitUntilAllRegionsAssigned(TABLE_NAME);
77    }
78  
79    @AfterClass
80    public static void tearDownAfterClass() throws Exception {
81      if (createWithFavoredNode == null) {
82        return;
83      }
84      TEST_UTIL.shutdownMiniCluster();
85    }
86  
87    @Test
88    public void testFavoredNodes() throws Exception {
89      Assume.assumeTrue(createWithFavoredNode != null);
90      // Get the addresses of the datanodes in the cluster.
91      InetSocketAddress[] nodes = new InetSocketAddress[REGION_SERVERS];
92      List<DataNode> datanodes = TEST_UTIL.getDFSCluster().getDataNodes();
93      Method selfAddress;
94      try {
95        selfAddress = DataNode.class.getMethod("getSelfAddr");
96      } catch (NoSuchMethodException ne) {
97        selfAddress = DataNode.class.getMethod("getXferAddress");
98      }
99      for (int i = 0; i < REGION_SERVERS; i++) {
100       nodes[i] = (InetSocketAddress)selfAddress.invoke(datanodes.get(i));
101     }
102 
103     String[] nodeNames = new String[REGION_SERVERS];
104     for (int i = 0; i < REGION_SERVERS; i++) {
105       nodeNames[i] = nodes[i].getAddress().getHostAddress() + ":" +
106           nodes[i].getPort();
107     }
108 
109     // For each region, choose some datanodes as the favored nodes then assign
110     // them as favored nodes through the HRegion.
111     for (int i = 0; i < REGION_SERVERS; i++) {
112       HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(i);
113       List<HRegion> regions = server.getOnlineRegions(TABLE_NAME);
114       for (HRegion region : regions) {
115         List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>favoredNodes =
116             new ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>(3);
117         String encodedRegionName = region.getRegionInfo().getEncodedName();
118         for (int j = 0; j < FAVORED_NODES_NUM; j++) {
119           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder b =
120               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder();
121           b.setHostName(nodes[(i + j) % REGION_SERVERS].getAddress().getHostAddress());
122           b.setPort(nodes[(i + j) % REGION_SERVERS].getPort());
123           b.setStartCode(-1);
124           favoredNodes.add(b.build());
125         }
126         server.updateRegionFavoredNodesMapping(encodedRegionName, favoredNodes);
127       }
128     }
129 
130     // Write some data to each region and flush. Repeat some number of times to
131     // get multiple files for each region.
132     for (int i = 0; i < FLUSHES; i++) {
133       TEST_UTIL.loadTable(table, COLUMN_FAMILY, false);
134       TEST_UTIL.flush();
135     }
136 
137     // For each region, check the block locations of each file and ensure that
138     // they are consistent with the favored nodes for that region.
139     for (int i = 0; i < REGION_SERVERS; i++) {
140       HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(i);
141       List<HRegion> regions = server.getOnlineRegions(TABLE_NAME);
142       for (HRegion region : regions) {
143         List<String> files = region.getStoreFileList(new byte[][]{COLUMN_FAMILY});
144         for (String file : files) {
145           FileStatus status = TEST_UTIL.getDFSCluster().getFileSystem().
146               getFileStatus(new Path(new URI(file).getPath()));
147           BlockLocation[] lbks = 
148               ((DistributedFileSystem)TEST_UTIL.getDFSCluster().getFileSystem())
149               .getFileBlockLocations(status, 0, Long.MAX_VALUE);
150           for (BlockLocation lbk : lbks) {
151             locations:
152               for (String info : lbk.getNames()) {
153                 for (int j = 0; j < FAVORED_NODES_NUM; j++) {
154                   if (info.equals(nodeNames[(i + j) % REGION_SERVERS])) {
155                     continue locations;
156                   }
157                 }
158                 // This block was at a location that was not a favored location.
159                 fail("Block location " + info + " not a favored node");
160               }
161           }
162         }
163       }
164     }
165   }
166 }