View Javadoc

1   /*
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.client;
20  
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertFalse;
23  import static org.junit.Assert.assertTrue;
24  import static org.junit.Assert.fail;
25  
26  import java.io.IOException;
27  import java.lang.reflect.Field;
28  import java.util.ArrayList;
29  import java.util.List;
30  import java.util.concurrent.CountDownLatch;
31  import java.util.concurrent.ThreadPoolExecutor;
32  
33  import org.apache.commons.logging.Log;
34  import org.apache.commons.logging.LogFactory;
35  import org.apache.hadoop.hbase.Cell;
36  import org.apache.hadoop.hbase.CellUtil;
37  import org.apache.hadoop.hbase.HBaseTestingUtility;
38  import org.apache.hadoop.hbase.testclassification.MediumTests;
39  import org.apache.hadoop.hbase.Waiter;
40  import org.apache.hadoop.hbase.exceptions.OperationConflictException;
41  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
42  import org.apache.hadoop.hbase.util.Bytes;
43  import org.apache.hadoop.hbase.util.JVMClusterUtil;
44  import org.apache.hadoop.hbase.util.Threads;
45  import org.junit.AfterClass;
46  import org.junit.Assert;
47  import org.junit.Before;
48  import org.junit.BeforeClass;
49  import org.junit.Test;
50  import org.junit.experimental.categories.Category;
51  
52  @Category(MediumTests.class)
53  public class TestMultiParallel {
54    private static final Log LOG = LogFactory.getLog(TestMultiParallel.class);
55  
56    private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
57    private static final byte[] VALUE = Bytes.toBytes("value");
58    private static final byte[] QUALIFIER = Bytes.toBytes("qual");
59    private static final String FAMILY = "family";
60    private static final String TEST_TABLE = "multi_test_table";
61    private static final byte[] BYTES_FAMILY = Bytes.toBytes(FAMILY);
62    private static final byte[] ONE_ROW = Bytes.toBytes("xxx");
63    private static final byte [][] KEYS = makeKeys();
64  
65    private static final int slaves = 5; // also used for testing HTable pool size
66  
67    @BeforeClass public static void beforeClass() throws Exception {
68      // Uncomment the following lines if more verbosity is needed for
69      // debugging (see HBASE-12285 for details).
70      //((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL);
71      //((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL);
72      //((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
73      UTIL.startMiniCluster(slaves);
74      HTable t = UTIL.createTable(Bytes.toBytes(TEST_TABLE), Bytes.toBytes(FAMILY));
75      UTIL.createMultiRegions(t, Bytes.toBytes(FAMILY));
76      UTIL.waitTableEnabled(Bytes.toBytes(TEST_TABLE));
77      t.close();
78    }
79  
80    @AfterClass public static void afterClass() throws Exception {
81      UTIL.shutdownMiniCluster();
82    }
83  
84    @Before public void before() throws Exception {
85      LOG.info("before");
86      if (UTIL.ensureSomeRegionServersAvailable(slaves)) {
87        // Distribute regions
88        UTIL.getMiniHBaseCluster().getMaster().balance();
89  
90        // Wait until completing balance
91        UTIL.waitFor(15 * 1000, UTIL.predicateNoRegionsInTransition());
92      }
93      HConnection conn = HConnectionManager.getConnection(UTIL.getConfiguration());
94      conn.clearRegionCache();
95      conn.close();
96      LOG.info("before done");
97    }
98  
99    private static byte[][] makeKeys() {
100     byte [][] starterKeys = HBaseTestingUtility.KEYS;
101     // Create a "non-uniform" test set with the following characteristics:
102     // a) Unequal number of keys per region
103 
104     // Don't use integer as a multiple, so that we have a number of keys that is
105     // not a multiple of the number of regions
106     int numKeys = (int) ((float) starterKeys.length * 10.33F);
107 
108     List<byte[]> keys = new ArrayList<byte[]>();
109     for (int i = 0; i < numKeys; i++) {
110       int kIdx = i % starterKeys.length;
111       byte[] k = starterKeys[kIdx];
112       byte[] cp = new byte[k.length + 1];
113       System.arraycopy(k, 0, cp, 0, k.length);
114       cp[k.length] = new Integer(i % 256).byteValue();
115       keys.add(cp);
116     }
117 
118     // b) Same duplicate keys (showing multiple Gets/Puts to the same row, which
119     // should work)
120     // c) keys are not in sorted order (within a region), to ensure that the
121     // sorting code and index mapping doesn't break the functionality
122     for (int i = 0; i < 100; i++) {
123       int kIdx = i % starterKeys.length;
124       byte[] k = starterKeys[kIdx];
125       byte[] cp = new byte[k.length + 1];
126       System.arraycopy(k, 0, cp, 0, k.length);
127       cp[k.length] = new Integer(i % 256).byteValue();
128       keys.add(cp);
129     }
130     return keys.toArray(new byte [][] {new byte [] {}});
131   }
132 
133 
134   /**
135    * This is for testing the active number of threads that were used while
136    * doing a batch operation. It inserts one row per region via the batch
137    * operation, and then checks the number of active threads.
138    * For HBASE-3553
139    * @throws IOException
140    * @throws InterruptedException
141    * @throws NoSuchFieldException
142    * @throws SecurityException
143    */
144   @Test(timeout=300000)
145   public void testActiveThreadsCount() throws Exception{
146     HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
147     List<Row> puts = constructPutRequests(); // creates a Put for every region
148     table.batch(puts);
149     Field poolField = table.getClass().getDeclaredField("pool");
150     poolField.setAccessible(true);
151     ThreadPoolExecutor tExecutor = (ThreadPoolExecutor) poolField.get(table);
152     assertEquals(slaves, tExecutor.getLargestPoolSize());
153     table.close();
154   }
155 
156   @Test(timeout=300000)
157   public void testBatchWithGet() throws Exception {
158     LOG.info("test=testBatchWithGet");
159     HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
160 
161     // load test data
162     List<Row> puts = constructPutRequests();
163     table.batch(puts);
164 
165     // create a list of gets and run it
166     List<Row> gets = new ArrayList<Row>();
167     for (byte[] k : KEYS) {
168       Get get = new Get(k);
169       get.addColumn(BYTES_FAMILY, QUALIFIER);
170       gets.add(get);
171     }
172     Result[] multiRes = new Result[gets.size()];
173     table.batch(gets, multiRes);
174 
175     // Same gets using individual call API
176     List<Result> singleRes = new ArrayList<Result>();
177     for (Row get : gets) {
178       singleRes.add(table.get((Get) get));
179     }
180     // Compare results
181     Assert.assertEquals(singleRes.size(), multiRes.length);
182     for (int i = 0; i < singleRes.size(); i++) {
183       Assert.assertTrue(singleRes.get(i).containsColumn(BYTES_FAMILY, QUALIFIER));
184       Cell[] singleKvs = singleRes.get(i).rawCells();
185       Cell[] multiKvs = multiRes[i].rawCells();
186       for (int j = 0; j < singleKvs.length; j++) {
187         Assert.assertEquals(singleKvs[j], multiKvs[j]);
188         Assert.assertEquals(0, Bytes.compareTo(CellUtil.cloneValue(singleKvs[j]), 
189             CellUtil.cloneValue(multiKvs[j])));
190       }
191     }
192     table.close();
193   }
194 
195   @Test
196   public void testBadFam() throws Exception {
197     LOG.info("test=testBadFam");
198     HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
199 
200     List<Row> actions = new ArrayList<Row>();
201     Put p = new Put(Bytes.toBytes("row1"));
202     p.add(Bytes.toBytes("bad_family"), Bytes.toBytes("qual"), Bytes.toBytes("value"));
203     actions.add(p);
204     p = new Put(Bytes.toBytes("row2"));
205     p.add(BYTES_FAMILY, Bytes.toBytes("qual"), Bytes.toBytes("value"));
206     actions.add(p);
207 
208     // row1 and row2 should be in the same region.
209 
210     Object [] r = new Object[actions.size()];
211     try {
212       table.batch(actions, r);
213       fail();
214     } catch (RetriesExhaustedWithDetailsException ex) {
215       LOG.debug(ex);
216       // good!
217       assertFalse(ex.mayHaveClusterIssues());
218     }
219     assertEquals(2, r.length);
220     assertTrue(r[0] instanceof Throwable);
221     assertTrue(r[1] instanceof Result);
222     table.close();
223   }
224 
225   @Test (timeout=300000)
226   public void testFlushCommitsNoAbort() throws Exception {
227     LOG.info("test=testFlushCommitsNoAbort");
228     doTestFlushCommits(false);
229   }
230 
231   /**
232    * Only run one Multi test with a forced RegionServer abort. Otherwise, the
233    * unit tests will take an unnecessarily long time to run.
234    *
235    * @throws Exception
236    */
237   @Test (timeout=300000)
238   public void testFlushCommitsWithAbort() throws Exception {
239     LOG.info("test=testFlushCommitsWithAbort");
240     doTestFlushCommits(true);
241   }
242 
243   /**
244    * Set table auto flush to false and test flushing commits
245    * @param doAbort true if abort one regionserver in the testing
246    * @throws Exception
247    */
248   private void doTestFlushCommits(boolean doAbort) throws Exception {
249     // Load the data
250     LOG.info("get new table");
251     HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
252     table.setAutoFlush(false, true);
253     table.setWriteBufferSize(10 * 1024 * 1024);
254 
255     LOG.info("constructPutRequests");
256     List<Row> puts = constructPutRequests();
257     for (Row put : puts) {
258       table.put((Put) put);
259     }
260     LOG.info("puts");
261     table.flushCommits();
262     final int liveRScount = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads()
263         .size();
264     assert liveRScount > 0;
265     JVMClusterUtil.RegionServerThread liveRS = UTIL.getMiniHBaseCluster()
266         .getLiveRegionServerThreads().get(0);
267     if (doAbort) {
268       liveRS.getRegionServer().abort("Aborting for tests",
269           new Exception("doTestFlushCommits"));
270       // If we wait for no regions being online after we abort the server, we
271       // could ensure the master has re-assigned the regions on killed server
272       // after writing successfully. It means the server we aborted is dead
273       // and detected by matser
274       while (liveRS.getRegionServer().getNumberOfOnlineRegions() != 0) {
275         Thread.sleep(10);
276       }
277       // try putting more keys after the abort. same key/qual... just validating
278       // no exceptions thrown
279       puts = constructPutRequests();
280       for (Row put : puts) {
281         table.put((Put) put);
282       }
283 
284       table.flushCommits();
285     }
286 
287     LOG.info("validating loaded data");
288     validateLoadedData(table);
289 
290     // Validate server and region count
291     List<JVMClusterUtil.RegionServerThread> liveRSs = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads();
292     int count = 0;
293     for (JVMClusterUtil.RegionServerThread t: liveRSs) {
294       count++;
295       LOG.info("Count=" + count + ", Alive=" + t.getRegionServer());
296     }
297     LOG.info("Count=" + count);
298     Assert.assertEquals("Server count=" + count + ", abort=" + doAbort,
299         (doAbort ? (liveRScount - 1) : liveRScount), count);
300     for (JVMClusterUtil.RegionServerThread t: liveRSs) {
301       int regions = ProtobufUtil.getOnlineRegions(t.getRegionServer()).size();
302       // Assert.assertTrue("Count of regions=" + regions, regions > 10);
303     }
304     if (doAbort) {
305       UTIL.getMiniHBaseCluster().waitOnRegionServer(0);
306       UTIL.waitFor(15 * 1000, new Waiter.Predicate<Exception>() {
307         @Override
308         public boolean evaluate() throws Exception {
309           return UTIL.getMiniHBaseCluster().getMaster()
310               .getClusterStatus().getServersSize() == (liveRScount - 1);
311         }
312       });
313       UTIL.waitFor(15 * 1000, UTIL.predicateNoRegionsInTransition());
314     }
315 
316     table.close();
317     LOG.info("done");
318   }
319 
320   @Test (timeout=300000)
321   public void testBatchWithPut() throws Exception {
322     LOG.info("test=testBatchWithPut");
323     HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
324 
325     // put multiple rows using a batch
326     List<Row> puts = constructPutRequests();
327 
328     Object[] results = table.batch(puts);
329     validateSizeAndEmpty(results, KEYS.length);
330 
331     if (true) {
332       int liveRScount = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size();
333       assert liveRScount > 0;
334       JVMClusterUtil.RegionServerThread liveRS =
335         UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().get(0);
336       liveRS.getRegionServer().abort("Aborting for tests", new Exception("testBatchWithPut"));
337       puts = constructPutRequests();
338       try {
339         results = table.batch(puts);
340       } catch (RetriesExhaustedWithDetailsException ree) {
341         LOG.info(ree.getExhaustiveDescription());
342         throw ree;
343       } finally {
344         table.close();
345       }
346       validateSizeAndEmpty(results, KEYS.length);
347     }
348 
349     validateLoadedData(table);
350     table.close();
351   }
352 
353   @Test(timeout=300000)
354   public void testBatchWithDelete() throws Exception {
355     LOG.info("test=testBatchWithDelete");
356     HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
357 
358     // Load some data
359     List<Row> puts = constructPutRequests();
360     Object[] results = table.batch(puts);
361     validateSizeAndEmpty(results, KEYS.length);
362 
363     // Deletes
364     List<Row> deletes = new ArrayList<Row>();
365     for (int i = 0; i < KEYS.length; i++) {
366       Delete delete = new Delete(KEYS[i]);
367       delete.deleteFamily(BYTES_FAMILY);
368       deletes.add(delete);
369     }
370     results = table.batch(deletes);
371     validateSizeAndEmpty(results, KEYS.length);
372 
373     // Get to make sure ...
374     for (byte[] k : KEYS) {
375       Get get = new Get(k);
376       get.addColumn(BYTES_FAMILY, QUALIFIER);
377       Assert.assertFalse(table.exists(get));
378     }
379     table.close();
380   }
381 
382   @Test(timeout=300000)
383   public void testHTableDeleteWithList() throws Exception {
384     LOG.info("test=testHTableDeleteWithList");
385     HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
386 
387     // Load some data
388     List<Row> puts = constructPutRequests();
389     Object[] results = table.batch(puts);
390     validateSizeAndEmpty(results, KEYS.length);
391 
392     // Deletes
393     ArrayList<Delete> deletes = new ArrayList<Delete>();
394     for (int i = 0; i < KEYS.length; i++) {
395       Delete delete = new Delete(KEYS[i]);
396       delete.deleteFamily(BYTES_FAMILY);
397       deletes.add(delete);
398     }
399     table.delete(deletes);
400     Assert.assertTrue(deletes.isEmpty());
401 
402     // Get to make sure ...
403     for (byte[] k : KEYS) {
404       Get get = new Get(k);
405       get.addColumn(BYTES_FAMILY, QUALIFIER);
406       Assert.assertFalse(table.exists(get));
407     }
408     table.close();
409   }
410 
411   @Test(timeout=300000)
412   public void testBatchWithManyColsInOneRowGetAndPut() throws Exception {
413     LOG.info("test=testBatchWithManyColsInOneRowGetAndPut");
414     HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
415 
416     List<Row> puts = new ArrayList<Row>();
417     for (int i = 0; i < 100; i++) {
418       Put put = new Put(ONE_ROW);
419       byte[] qual = Bytes.toBytes("column" + i);
420       put.add(BYTES_FAMILY, qual, VALUE);
421       puts.add(put);
422     }
423     Object[] results = table.batch(puts);
424 
425     // validate
426     validateSizeAndEmpty(results, 100);
427 
428     // get the data back and validate that it is correct
429     List<Row> gets = new ArrayList<Row>();
430     for (int i = 0; i < 100; i++) {
431       Get get = new Get(ONE_ROW);
432       byte[] qual = Bytes.toBytes("column" + i);
433       get.addColumn(BYTES_FAMILY, qual);
434       gets.add(get);
435     }
436 
437     Object[] multiRes = table.batch(gets);
438 
439     int idx = 0;
440     for (Object r : multiRes) {
441       byte[] qual = Bytes.toBytes("column" + idx);
442       validateResult(r, qual, VALUE);
443       idx++;
444     }
445     table.close();
446   }
447 
448   @Test(timeout=300000)
449   public void testBatchWithIncrementAndAppend() throws Exception {
450     LOG.info("test=testBatchWithIncrementAndAppend");
451     final byte[] QUAL1 = Bytes.toBytes("qual1");
452     final byte[] QUAL2 = Bytes.toBytes("qual2");
453     final byte[] QUAL3 = Bytes.toBytes("qual3");
454     final byte[] QUAL4 = Bytes.toBytes("qual4");
455     HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
456     Delete d = new Delete(ONE_ROW);
457     table.delete(d);
458     Put put = new Put(ONE_ROW);
459     put.add(BYTES_FAMILY, QUAL1, Bytes.toBytes("abc"));
460     put.add(BYTES_FAMILY, QUAL2, Bytes.toBytes(1L));
461     table.put(put);
462 
463     Increment inc = new Increment(ONE_ROW);
464     inc.addColumn(BYTES_FAMILY, QUAL2, 1);
465     inc.addColumn(BYTES_FAMILY, QUAL3, 1);
466 
467     Append a = new Append(ONE_ROW);
468     a.add(BYTES_FAMILY, QUAL1, Bytes.toBytes("def"));
469     a.add(BYTES_FAMILY, QUAL4, Bytes.toBytes("xyz"));
470     List<Row> actions = new ArrayList<Row>();
471     actions.add(inc);
472     actions.add(a);
473 
474     Object[] multiRes = table.batch(actions);
475     validateResult(multiRes[1], QUAL1, Bytes.toBytes("abcdef"));
476     validateResult(multiRes[1], QUAL4, Bytes.toBytes("xyz"));
477     validateResult(multiRes[0], QUAL2, Bytes.toBytes(2L));
478     validateResult(multiRes[0], QUAL3, Bytes.toBytes(1L));
479     table.close();
480   }
481 
482   @Test(timeout=300000)
483   public void testNonceCollision() throws Exception {
484     LOG.info("test=testNonceCollision");
485     HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
486     Put put = new Put(ONE_ROW);
487     put.add(BYTES_FAMILY, QUALIFIER, Bytes.toBytes(0L));
488 
489     // Replace nonce manager with the one that returns each nonce twice.
490     NonceGenerator cnm = new PerClientRandomNonceGenerator() {
491       long lastNonce = -1;
492       @Override
493       public synchronized long newNonce() {
494         long nonce = 0;
495         if (lastNonce == -1) {
496           lastNonce = nonce = super.newNonce();
497         } else {
498           nonce = lastNonce;
499           lastNonce = -1L;
500         }
501         return nonce;
502       }
503     };
504     NonceGenerator oldCnm =
505         HConnectionManager.injectNonceGeneratorForTesting(table.getConnection(), cnm);
506 
507     // First test sequential requests.
508     try {
509       Increment inc = new Increment(ONE_ROW);
510       inc.addColumn(BYTES_FAMILY, QUALIFIER, 1L);
511       table.increment(inc);
512       inc = new Increment(ONE_ROW);
513       inc.addColumn(BYTES_FAMILY, QUALIFIER, 1L);
514       try {
515         table.increment(inc);
516         fail("Should have thrown an exception");
517       } catch (OperationConflictException ex) {
518       }
519       Get get = new Get(ONE_ROW);
520       get.addColumn(BYTES_FAMILY, QUALIFIER);
521       Result result = table.get(get);
522       validateResult(result, QUALIFIER, Bytes.toBytes(1L));
523 
524       // Now run a bunch of requests in parallel, exactly half should succeed.
525       int numRequests = 40;
526       final CountDownLatch startedLatch = new CountDownLatch(numRequests);
527       final CountDownLatch startLatch = new CountDownLatch(1);
528       final CountDownLatch doneLatch = new CountDownLatch(numRequests);
529       for (int i = 0; i < numRequests; ++i) {
530         Runnable r = new Runnable() {
531           @Override
532           public void run() {
533             HTable table = null;
534             try {
535               table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
536             } catch (IOException e) {
537               fail("Not expected");
538             }
539             Increment inc = new Increment(ONE_ROW);
540             inc.addColumn(BYTES_FAMILY, QUALIFIER, 1L);
541             startedLatch.countDown();
542             try {
543               startLatch.await();
544             } catch (InterruptedException e) {
545               fail("Not expected");
546             }
547             try {
548               table.increment(inc);
549             } catch (OperationConflictException ex) { // Some threads are expected to fail.
550             } catch (IOException ioEx) {
551               fail("Not expected");
552             }
553             doneLatch.countDown();
554           }
555         };
556         Threads.setDaemonThreadRunning(new Thread(r));
557       }
558       startedLatch.await(); // Wait until all threads are ready...
559       startLatch.countDown(); // ...and unleash the herd!
560       doneLatch.await();
561       // Now verify
562       get = new Get(ONE_ROW);
563       get.addColumn(BYTES_FAMILY, QUALIFIER);
564       result = table.get(get);
565       validateResult(result, QUALIFIER, Bytes.toBytes((numRequests / 2) + 1L));
566       table.close();
567     } finally {
568       HConnectionManager.injectNonceGeneratorForTesting(table.getConnection(), oldCnm);
569     }
570   }
571 
572   @Test(timeout=300000)
573   public void testBatchWithMixedActions() throws Exception {
574     LOG.info("test=testBatchWithMixedActions");
575     HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
576 
577     // Load some data to start
578     Object[] results = table.batch(constructPutRequests());
579     validateSizeAndEmpty(results, KEYS.length);
580 
581     // Batch: get, get, put(new col), delete, get, get of put, get of deleted,
582     // put
583     List<Row> actions = new ArrayList<Row>();
584 
585     byte[] qual2 = Bytes.toBytes("qual2");
586     byte[] val2 = Bytes.toBytes("putvalue2");
587 
588     // 0 get
589     Get get = new Get(KEYS[10]);
590     get.addColumn(BYTES_FAMILY, QUALIFIER);
591     actions.add(get);
592 
593     // 1 get
594     get = new Get(KEYS[11]);
595     get.addColumn(BYTES_FAMILY, QUALIFIER);
596     actions.add(get);
597 
598     // 2 put of new column
599     Put put = new Put(KEYS[10]);
600     put.add(BYTES_FAMILY, qual2, val2);
601     actions.add(put);
602 
603     // 3 delete
604     Delete delete = new Delete(KEYS[20]);
605     delete.deleteFamily(BYTES_FAMILY);
606     actions.add(delete);
607 
608     // 4 get
609     get = new Get(KEYS[30]);
610     get.addColumn(BYTES_FAMILY, QUALIFIER);
611     actions.add(get);
612 
613     // There used to be a 'get' of a previous put here, but removed
614     // since this API really cannot guarantee order in terms of mixed
615     // get/puts.
616 
617     // 5 put of new column
618     put = new Put(KEYS[40]);
619     put.add(BYTES_FAMILY, qual2, val2);
620     actions.add(put);
621 
622     results = table.batch(actions);
623 
624     // Validation
625 
626     validateResult(results[0]);
627     validateResult(results[1]);
628     validateEmpty(results[2]);
629     validateEmpty(results[3]);
630     validateResult(results[4]);
631     validateEmpty(results[5]);
632 
633     // validate last put, externally from the batch
634     get = new Get(KEYS[40]);
635     get.addColumn(BYTES_FAMILY, qual2);
636     Result r = table.get(get);
637     validateResult(r, qual2, val2);
638 
639     table.close();
640   }
641 
642   // // Helper methods ////
643 
644   private void validateResult(Object r) {
645     validateResult(r, QUALIFIER, VALUE);
646   }
647 
648   private void validateResult(Object r1, byte[] qual, byte[] val) {
649     Result r = (Result)r1;
650     Assert.assertTrue(r.containsColumn(BYTES_FAMILY, qual));
651     byte[] value = r.getValue(BYTES_FAMILY, qual);
652     if (0 != Bytes.compareTo(val, value)) {
653       fail("Expected [" + Bytes.toStringBinary(val)
654           + "] but got [" + Bytes.toStringBinary(value) + "]");
655     }
656   }
657 
658   private List<Row> constructPutRequests() {
659     List<Row> puts = new ArrayList<Row>();
660     for (byte[] k : KEYS) {
661       Put put = new Put(k);
662       put.add(BYTES_FAMILY, QUALIFIER, VALUE);
663       puts.add(put);
664     }
665     return puts;
666   }
667 
668   private void validateLoadedData(HTable table) throws IOException {
669     // get the data back and validate that it is correct
670     for (byte[] k : KEYS) {
671       Get get = new Get(k);
672       get.addColumn(BYTES_FAMILY, QUALIFIER);
673       Result r = table.get(get);
674       Assert.assertTrue(r.containsColumn(BYTES_FAMILY, QUALIFIER));
675       Assert.assertEquals(0, Bytes.compareTo(VALUE, r
676           .getValue(BYTES_FAMILY, QUALIFIER)));
677     }
678   }
679 
680   private void validateEmpty(Object r1) {
681     Result result = (Result)r1;
682     Assert.assertTrue(result != null);
683     Assert.assertTrue(result.getRow() == null);
684     Assert.assertEquals(0, result.rawCells().length);
685   }
686 
687   private void validateSizeAndEmpty(Object[] results, int expectedSize) {
688     // Validate got back the same number of Result objects, all empty
689     Assert.assertEquals(expectedSize, results.length);
690     for (Object result : results) {
691       validateEmpty(result);
692     }
693   }
694 }