View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.coprocessor;
21  
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertNull;
24  
25  import java.io.IOException;
26  import java.util.Collections;
27  import java.util.List;
28  import java.util.NavigableSet;
29  import java.util.concurrent.CountDownLatch;
30  
31  import org.apache.hadoop.conf.Configuration;
32  import org.apache.hadoop.fs.FileSystem;
33  import org.apache.hadoop.fs.Path;
34  import org.apache.hadoop.hbase.Cell;
35  import org.apache.hadoop.hbase.Coprocessor;
36  import org.apache.hadoop.hbase.HBaseConfiguration;
37  import org.apache.hadoop.hbase.HBaseTestingUtility;
38  import org.apache.hadoop.hbase.HColumnDescriptor;
39  import org.apache.hadoop.hbase.HConstants;
40  import org.apache.hadoop.hbase.HRegionInfo;
41  import org.apache.hadoop.hbase.HTableDescriptor;
42  import org.apache.hadoop.hbase.TableName;
43  import org.apache.hadoop.hbase.client.Get;
44  import org.apache.hadoop.hbase.client.HBaseAdmin;
45  import org.apache.hadoop.hbase.client.HTable;
46  import org.apache.hadoop.hbase.client.IsolationLevel;
47  import org.apache.hadoop.hbase.client.Put;
48  import org.apache.hadoop.hbase.client.Result;
49  import org.apache.hadoop.hbase.client.Scan;
50  import org.apache.hadoop.hbase.filter.FilterBase;
51  import org.apache.hadoop.hbase.regionserver.HRegion;
52  import org.apache.hadoop.hbase.regionserver.HRegionServer;
53  import org.apache.hadoop.hbase.regionserver.HStore;
54  import org.apache.hadoop.hbase.regionserver.InternalScanner;
55  import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
56  import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
57  import org.apache.hadoop.hbase.regionserver.RegionServerServices;
58  import org.apache.hadoop.hbase.regionserver.ScanType;
59  import org.apache.hadoop.hbase.regionserver.Store;
60  import org.apache.hadoop.hbase.regionserver.StoreScanner;
61  import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
62  import org.apache.hadoop.hbase.regionserver.compactions.CompactionThroughputController;
63  import org.apache.hadoop.hbase.regionserver.wal.HLog;
64  import org.apache.hadoop.hbase.testclassification.MediumTests;
65  import org.apache.hadoop.hbase.util.Bytes;
66  import org.junit.Test;
67  import org.junit.experimental.categories.Category;
68  
69  @Category(MediumTests.class)
70  public class TestRegionObserverScannerOpenHook {
71    private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
72    static final Path DIR = UTIL.getDataTestDir();
73  
74    public static class NoDataFilter extends FilterBase {
75  
76      @Override
77      public ReturnCode filterKeyValue(Cell ignored) throws IOException {
78        return ReturnCode.SKIP;
79      }
80  
81      @Override
82      public boolean filterAllRemaining() throws IOException {
83        return true;
84      }
85  
86      @Override
87      public boolean filterRow() throws IOException {
88        return true;
89      }
90    }
91  
92    /**
93     * Do the same logic as the {@link BaseRegionObserver}. Needed since {@link BaseRegionObserver} is
94     * an abstract class.
95     */
96    public static class EmptyRegionObsever extends BaseRegionObserver {
97    }
98  
99    /**
100    * Don't return any data from a scan by creating a custom {@link StoreScanner}.
101    */
102   public static class NoDataFromScan extends BaseRegionObserver {
103     @Override
104     public KeyValueScanner preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
105         Store store, Scan scan, NavigableSet<byte[]> targetCols, KeyValueScanner s)
106         throws IOException {
107       scan.setFilter(new NoDataFilter());
108       return new StoreScanner(store, store.getScanInfo(), scan, targetCols,
109         ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
110     }
111   }
112 
113   /**
114    * Don't allow any data in a flush by creating a custom {@link StoreScanner}.
115    */
116   public static class NoDataFromFlush extends BaseRegionObserver {
117     @Override
118     public InternalScanner preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
119         Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
120       Scan scan = new Scan();
121       scan.setFilter(new NoDataFilter());
122       return new StoreScanner(store, store.getScanInfo(), scan,
123           Collections.singletonList(memstoreScanner), ScanType.COMPACT_RETAIN_DELETES,
124           store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
125     }
126   }
127 
128   /**
129    * Don't allow any data to be written out in the compaction by creating a custom
130    * {@link StoreScanner}.
131    */
132   public static class NoDataFromCompaction extends BaseRegionObserver {
133     @Override
134     public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
135         Store store, List<? extends KeyValueScanner> scanners, ScanType scanType,
136         long earliestPutTs, InternalScanner s) throws IOException {
137       Scan scan = new Scan();
138       scan.setFilter(new NoDataFilter());
139       return new StoreScanner(store, store.getScanInfo(), scan, scanners,
140           ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(),
141           HConstants.OLDEST_TIMESTAMP);
142     }
143   }
144 
145   HRegion initHRegion(byte[] tableName, String callingMethod, Configuration conf,
146       byte[]... families) throws IOException {
147     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
148     for (byte[] family : families) {
149       htd.addFamily(new HColumnDescriptor(family));
150     }
151     HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
152     Path path = new Path(DIR + callingMethod);
153     HRegion r = HRegion.createHRegion(info, path, conf, htd);
154     // this following piece is a hack. currently a coprocessorHost
155     // is secretly loaded at OpenRegionHandler. we don't really
156     // start a region server here, so just manually create cphost
157     // and set it to region.
158     RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
159     r.setCoprocessorHost(host);
160     return r;
161   }
162 
163   @Test
164   public void testRegionObserverScanTimeStacking() throws Exception {
165     byte[] ROW = Bytes.toBytes("testRow");
166     byte[] TABLE = Bytes.toBytes(getClass().getName());
167     byte[] A = Bytes.toBytes("A");
168     byte[][] FAMILIES = new byte[][] { A };
169 
170     Configuration conf = HBaseConfiguration.create();
171     HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
172     RegionCoprocessorHost h = region.getCoprocessorHost();
173     h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf);
174     h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
175 
176     Put put = new Put(ROW);
177     put.add(A, A, A);
178     region.put(put);
179 
180     Get get = new Get(ROW);
181     Result r = region.get(get);
182     assertNull(
183       "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
184           + r, r.listCells());
185   }
186 
187   @Test
188   public void testRegionObserverFlushTimeStacking() throws Exception {
189     byte[] ROW = Bytes.toBytes("testRow");
190     byte[] TABLE = Bytes.toBytes(getClass().getName());
191     byte[] A = Bytes.toBytes("A");
192     byte[][] FAMILIES = new byte[][] { A };
193 
194     Configuration conf = HBaseConfiguration.create();
195     HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
196     RegionCoprocessorHost h = region.getCoprocessorHost();
197     h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
198     h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
199 
200     // put a row and flush it to disk
201     Put put = new Put(ROW);
202     put.add(A, A, A);
203     region.put(put);
204     region.flushcache();
205     Get get = new Get(ROW);
206     Result r = region.get(get);
207     assertNull(
208       "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
209           + r, r.listCells());
210   }
211 
212   /*
213    * Custom HRegion which uses CountDownLatch to signal the completion of compaction
214    */
215   public static class CompactionCompletionNotifyingRegion extends HRegion {
216     private static volatile CountDownLatch compactionStateChangeLatch = null;
217     
218     @SuppressWarnings("deprecation")
219     public CompactionCompletionNotifyingRegion(Path tableDir, HLog log,
220         FileSystem fs, Configuration confParam, HRegionInfo info,
221         HTableDescriptor htd, RegionServerServices rsServices) {
222       super(tableDir, log, fs, confParam, info, htd, rsServices);
223     }
224     
225     public CountDownLatch getCompactionStateChangeLatch() {
226       if (compactionStateChangeLatch == null) compactionStateChangeLatch = new CountDownLatch(1);
227       return compactionStateChangeLatch;
228     }
229 
230     @Override
231     public boolean compact(CompactionContext compaction, Store store,
232         CompactionThroughputController throughputController) throws IOException {
233       boolean ret = super.compact(compaction, store, throughputController);
234       if (ret) compactionStateChangeLatch.countDown();
235       return ret;
236     }    
237   }
238   
239   /**
240    * Unfortunately, the easiest way to test this is to spin up a mini-cluster since we want to do
241    * the usual compaction mechanism on the region, rather than going through the backdoor to the
242    * region
243    */
244   @Test
245   public void testRegionObserverCompactionTimeStacking() throws Exception {
246     // setup a mini cluster so we can do a real compaction on a region
247     Configuration conf = UTIL.getConfiguration();
248     conf.setClass(HConstants.REGION_IMPL, CompactionCompletionNotifyingRegion.class, HRegion.class);
249     conf.setInt("hbase.hstore.compaction.min", 2);
250     UTIL.startMiniCluster();
251     String tableName = "testRegionObserverCompactionTimeStacking";
252     byte[] ROW = Bytes.toBytes("testRow");
253     byte[] A = Bytes.toBytes("A");
254     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
255     desc.addFamily(new HColumnDescriptor(A));
256     desc.addCoprocessor(EmptyRegionObsever.class.getName(), null, Coprocessor.PRIORITY_USER, null);
257     desc.addCoprocessor(NoDataFromCompaction.class.getName(), null, Coprocessor.PRIORITY_HIGHEST,
258       null);
259 
260     HBaseAdmin admin = UTIL.getHBaseAdmin();
261     admin.createTable(desc);
262 
263     HTable table = new HTable(conf, desc.getTableName());
264 
265     // put a row and flush it to disk
266     Put put = new Put(ROW);
267     put.add(A, A, A);
268     table.put(put);
269     table.flushCommits();
270 
271     HRegionServer rs = UTIL.getRSForFirstRegionInTable(desc.getTableName());
272     List<HRegion> regions = rs.getOnlineRegions(desc.getTableName());
273     assertEquals("More than 1 region serving test table with 1 row", 1, regions.size());
274     HRegion region = regions.get(0);
275     admin.flush(region.getRegionName());
276     CountDownLatch latch = ((CompactionCompletionNotifyingRegion)region)
277         .getCompactionStateChangeLatch();
278     
279     // put another row and flush that too
280     put = new Put(Bytes.toBytes("anotherrow"));
281     put.add(A, A, A);
282     table.put(put);
283     table.flushCommits();
284     admin.flush(region.getRegionName());
285 
286     // run a compaction, which normally would should get rid of the data
287     // wait for the compaction checker to complete
288     latch.await();
289     // check both rows to ensure that they aren't there
290     Get get = new Get(ROW);
291     Result r = table.get(get);
292     assertNull(
293       "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
294           + r, r.listCells());
295 
296     get = new Get(Bytes.toBytes("anotherrow"));
297     r = table.get(get);
298     assertNull(
299       "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor Found: "
300           + r, r.listCells());
301 
302     table.close();
303     UTIL.shutdownMiniCluster();
304   }
305 }