View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.DataInput;
22  import java.io.DataOutput;
23  import java.io.IOException;
24  import java.util.Collections;
25  import java.util.HashMap;
26  import java.util.HashSet;
27  import java.util.Map;
28  import java.util.Set;
29  
30  import org.apache.hadoop.hbase.classification.InterfaceAudience;
31  import org.apache.hadoop.hbase.classification.InterfaceStability;
32  import org.apache.hadoop.hbase.exceptions.DeserializationException;
33  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
34  import org.apache.hadoop.hbase.io.compress.Compression;
35  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
36  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
37  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
38  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
39  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
40  import org.apache.hadoop.hbase.regionserver.BloomType;
41  import org.apache.hadoop.hbase.util.Bytes;
42  import org.apache.hadoop.hbase.util.PrettyPrinter;
43  import org.apache.hadoop.hbase.util.PrettyPrinter.Unit;
44  import org.apache.hadoop.io.Text;
45  import org.apache.hadoop.io.WritableComparable;
46  
47  import com.google.common.base.Preconditions;
48  import org.apache.hadoop.hbase.util.ByteStringer;
49  import com.google.protobuf.InvalidProtocolBufferException;
50  
51  /**
52   * An HColumnDescriptor contains information about a column family such as the
53   * number of versions, compression settings, etc.
54   *
55   * It is used as input when creating a table or adding a column.
56   */
57  @InterfaceAudience.Public
58  @InterfaceStability.Evolving
59  public class HColumnDescriptor implements WritableComparable<HColumnDescriptor> {
60    // For future backward compatibility
61  
62    // Version  3 was when column names become byte arrays and when we picked up
63    // Time-to-live feature.  Version 4 was when we moved to byte arrays, HBASE-82.
64    // Version  5 was when bloom filter descriptors were removed.
65    // Version  6 adds metadata as a map where keys and values are byte[].
66    // Version  7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217)
67    // Version  8 -- reintroduction of bloom filters, changed from boolean to enum
68    // Version  9 -- add data block encoding
69    // Version 10 -- change metadata to standard type.
70    // Version 11 -- add column family level configuration.
71    private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11;
72  
73    // These constants are used as FileInfo keys
74    public static final String COMPRESSION = "COMPRESSION";
75    public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT";
76    public static final String ENCODE_ON_DISK = // To be removed, it is not used anymore
77        "ENCODE_ON_DISK";
78    public static final String DATA_BLOCK_ENCODING =
79        "DATA_BLOCK_ENCODING";
80    public static final String BLOCKCACHE = "BLOCKCACHE";
81    public static final String CACHE_DATA_ON_WRITE = "CACHE_DATA_ON_WRITE";
82    public static final String CACHE_INDEX_ON_WRITE = "CACHE_INDEX_ON_WRITE";
83    public static final String CACHE_BLOOMS_ON_WRITE = "CACHE_BLOOMS_ON_WRITE";
84    public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE";
85    /**
86     * Key for the PREFETCH_BLOCKS_ON_OPEN attribute.
87     * If set, all INDEX, BLOOM, and DATA blocks of HFiles belonging to this
88     * family will be loaded into the cache as soon as the file is opened. These
89     * loads will not count as cache misses.
90     */
91    public static final String PREFETCH_BLOCKS_ON_OPEN = "PREFETCH_BLOCKS_ON_OPEN";
92  
93    /**
94     * Size of storefile/hfile 'blocks'.  Default is {@link #DEFAULT_BLOCKSIZE}.
95     * Use smaller block sizes for faster random-access at expense of larger
96     * indices (more memory consumption).
97     */
98    public static final String BLOCKSIZE = "BLOCKSIZE";
99  
100   public static final String LENGTH = "LENGTH";
101   public static final String TTL = "TTL";
102   public static final String BLOOMFILTER = "BLOOMFILTER";
103   public static final String FOREVER = "FOREVER";
104   public static final String REPLICATION_SCOPE = "REPLICATION_SCOPE";
105   public static final byte[] REPLICATION_SCOPE_BYTES = Bytes.toBytes(REPLICATION_SCOPE);
106   public static final String MIN_VERSIONS = "MIN_VERSIONS";
107   public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS";
108   public static final String COMPRESS_TAGS = "COMPRESS_TAGS";
109 
110   @InterfaceStability.Unstable
111   public static final String ENCRYPTION = "ENCRYPTION";
112   @InterfaceStability.Unstable
113   public static final String ENCRYPTION_KEY = "ENCRYPTION_KEY";
114 
115   /**
116    * Default compression type.
117    */
118   public static final String DEFAULT_COMPRESSION =
119     Compression.Algorithm.NONE.getName();
120 
121   /**
122    * Default value of the flag that enables data block encoding on disk, as
123    * opposed to encoding in cache only. We encode blocks everywhere by default,
124    * as long as {@link #DATA_BLOCK_ENCODING} is not NONE.
125    */
126   public static final boolean DEFAULT_ENCODE_ON_DISK = true;
127 
128   /** Default data block encoding algorithm. */
129   public static final String DEFAULT_DATA_BLOCK_ENCODING =
130       DataBlockEncoding.NONE.toString();
131 
132   /**
133    * Default number of versions of a record to keep.
134    */
135   public static final int DEFAULT_VERSIONS = HBaseConfiguration.create().getInt(
136     "hbase.column.max.version", 1);
137 
138   /**
139    * Default is not to keep a minimum of versions.
140    */
141   public static final int DEFAULT_MIN_VERSIONS = 0;
142 
143   /*
144    * Cache here the HCD value.
145    * Question: its OK to cache since when we're reenable, we create a new HCD?
146    */
147   private volatile Integer blocksize = null;
148 
149   /**
150    * Default setting for whether to serve from memory or not.
151    */
152   public static final boolean DEFAULT_IN_MEMORY = false;
153 
154   /**
155    * Default setting for preventing deleted from being collected immediately.
156    */
157   public static final KeepDeletedCells DEFAULT_KEEP_DELETED = KeepDeletedCells.FALSE;
158 
159   /**
160    * Default setting for whether to use a block cache or not.
161    */
162   public static final boolean DEFAULT_BLOCKCACHE = true;
163 
164   /**
165    * Default setting for whether to cache data blocks on write if block caching
166    * is enabled.
167    */
168   public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false;
169 
170   /**
171    * Default setting for whether to cache index blocks on write if block
172    * caching is enabled.
173    */
174   public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = false;
175 
176   /**
177    * Default size of blocks in files stored to the filesytem (hfiles).
178    */
179   public static final int DEFAULT_BLOCKSIZE = HConstants.DEFAULT_BLOCKSIZE;
180 
181   /**
182    * Default setting for whether or not to use bloomfilters.
183    */
184   public static final String DEFAULT_BLOOMFILTER = BloomType.ROW.toString();
185 
186   /**
187    * Default setting for whether to cache bloom filter blocks on write if block
188    * caching is enabled.
189    */
190   public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false;
191 
192   /**
193    * Default time to live of cell contents.
194    */
195   public static final int DEFAULT_TTL = HConstants.FOREVER;
196 
197   /**
198    * Default scope.
199    */
200   public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL;
201 
202   /**
203    * Default setting for whether to evict cached blocks from the blockcache on
204    * close.
205    */
206   public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = false;
207 
208   /**
209    * Default compress tags along with any type of DataBlockEncoding.
210    */
211   public static final boolean DEFAULT_COMPRESS_TAGS = true;
212 
213   /*
214    * Default setting for whether to prefetch blocks into the blockcache on open.
215    */
216   public static final boolean DEFAULT_PREFETCH_BLOCKS_ON_OPEN = false;
217 
218   private final static Map<String, String> DEFAULT_VALUES
219     = new HashMap<String, String>();
220   private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
221     = new HashSet<ImmutableBytesWritable>();
222   static {
223       DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER);
224       DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE));
225       DEFAULT_VALUES.put(HConstants.VERSIONS, String.valueOf(DEFAULT_VERSIONS));
226       DEFAULT_VALUES.put(MIN_VERSIONS, String.valueOf(DEFAULT_MIN_VERSIONS));
227       DEFAULT_VALUES.put(COMPRESSION, DEFAULT_COMPRESSION);
228       DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL));
229       DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE));
230       DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY));
231       DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE));
232       DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED));
233       DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
234       DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE));
235       DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE));
236       DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE));
237       DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE));
238       DEFAULT_VALUES.put(PREFETCH_BLOCKS_ON_OPEN, String.valueOf(DEFAULT_PREFETCH_BLOCKS_ON_OPEN));
239       for (String s : DEFAULT_VALUES.keySet()) {
240         RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
241       }
242       RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(ENCRYPTION)));
243       RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(ENCRYPTION_KEY)));
244   }
245 
246   private static final int UNINITIALIZED = -1;
247 
248   // Column family name
249   private byte [] name;
250 
251   // Column metadata
252   private final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
253     new HashMap<ImmutableBytesWritable,ImmutableBytesWritable>();
254 
255   /**
256    * A map which holds the configuration specific to the column family.
257    * The keys of the map have the same names as config keys and override the defaults with
258    * cf-specific settings. Example usage may be for compactions, etc.
259    */
260   private final Map<String, String> configuration = new HashMap<String, String>();
261 
262   /*
263    * Cache the max versions rather than calculate it every time.
264    */
265   private int cachedMaxVersions = UNINITIALIZED;
266 
267   /**
268    * Default constructor. Must be present for Writable.
269    * @deprecated Used by Writables and Writables are going away.
270    */
271   @Deprecated
272   // Make this private rather than remove after deprecation period elapses.  Its needed by pb
273   // deserializations.
274   public HColumnDescriptor() {
275     this.name = null;
276   }
277 
278   /**
279    * Construct a column descriptor specifying only the family name
280    * The other attributes are defaulted.
281    *
282    * @param familyName Column family name. Must be 'printable' -- digit or
283    * letter -- and may not contain a <code>:<code>
284    */
285   public HColumnDescriptor(final String familyName) {
286     this(Bytes.toBytes(familyName));
287   }
288 
289   /**
290    * Construct a column descriptor specifying only the family name
291    * The other attributes are defaulted.
292    *
293    * @param familyName Column family name. Must be 'printable' -- digit or
294    * letter -- and may not contain a <code>:<code>
295    */
296   public HColumnDescriptor(final byte [] familyName) {
297     this (familyName == null || familyName.length <= 0?
298       HConstants.EMPTY_BYTE_ARRAY: familyName, DEFAULT_VERSIONS,
299       DEFAULT_COMPRESSION, DEFAULT_IN_MEMORY, DEFAULT_BLOCKCACHE,
300       DEFAULT_TTL, DEFAULT_BLOOMFILTER);
301   }
302 
303   /**
304    * Constructor.
305    * Makes a deep copy of the supplied descriptor.
306    * Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor.
307    * @param desc The descriptor.
308    */
309   public HColumnDescriptor(HColumnDescriptor desc) {
310     super();
311     this.name = desc.name.clone();
312     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
313         desc.values.entrySet()) {
314       this.values.put(e.getKey(), e.getValue());
315     }
316     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
317       this.configuration.put(e.getKey(), e.getValue());
318     }
319     setMaxVersions(desc.getMaxVersions());
320   }
321 
322   /**
323    * Constructor
324    * @param familyName Column family name. Must be 'printable' -- digit or
325    * letter -- and may not contain a <code>:<code>
326    * @param maxVersions Maximum number of versions to keep
327    * @param compression Compression type
328    * @param inMemory If true, column data should be kept in an HRegionServer's
329    * cache
330    * @param blockCacheEnabled If true, MapFile blocks should be cached
331    * @param timeToLive Time-to-live of cell contents, in seconds
332    * (use HConstants.FOREVER for unlimited TTL)
333    * @param bloomFilter Bloom filter type for this column
334    *
335    * @throws IllegalArgumentException if passed a family name that is made of
336    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
337    * a <code>:</code>
338    * @throws IllegalArgumentException if the number of versions is &lt;= 0
339    * @deprecated use {@link #HColumnDescriptor(String)} and setters
340    */
341   @Deprecated
342   public HColumnDescriptor(final byte [] familyName, final int maxVersions,
343       final String compression, final boolean inMemory,
344       final boolean blockCacheEnabled,
345       final int timeToLive, final String bloomFilter) {
346     this(familyName, maxVersions, compression, inMemory, blockCacheEnabled,
347       DEFAULT_BLOCKSIZE, timeToLive, bloomFilter, DEFAULT_REPLICATION_SCOPE);
348   }
349 
350   /**
351    * Constructor
352    * @param familyName Column family name. Must be 'printable' -- digit or
353    * letter -- and may not contain a <code>:<code>
354    * @param maxVersions Maximum number of versions to keep
355    * @param compression Compression type
356    * @param inMemory If true, column data should be kept in an HRegionServer's
357    * cache
358    * @param blockCacheEnabled If true, MapFile blocks should be cached
359    * @param blocksize Block size to use when writing out storefiles.  Use
360    * smaller block sizes for faster random-access at expense of larger indices
361    * (more memory consumption).  Default is usually 64k.
362    * @param timeToLive Time-to-live of cell contents, in seconds
363    * (use HConstants.FOREVER for unlimited TTL)
364    * @param bloomFilter Bloom filter type for this column
365    * @param scope The scope tag for this column
366    *
367    * @throws IllegalArgumentException if passed a family name that is made of
368    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
369    * a <code>:</code>
370    * @throws IllegalArgumentException if the number of versions is &lt;= 0
371    * @deprecated use {@link #HColumnDescriptor(String)} and setters
372    */
373   @Deprecated
374   public HColumnDescriptor(final byte [] familyName, final int maxVersions,
375       final String compression, final boolean inMemory,
376       final boolean blockCacheEnabled, final int blocksize,
377       final int timeToLive, final String bloomFilter, final int scope) {
378     this(familyName, DEFAULT_MIN_VERSIONS, maxVersions, DEFAULT_KEEP_DELETED,
379         compression, DEFAULT_ENCODE_ON_DISK, DEFAULT_DATA_BLOCK_ENCODING,
380         inMemory, blockCacheEnabled, blocksize, timeToLive, bloomFilter,
381         scope);
382   }
383 
384   /**
385    * Constructor
386    * @param familyName Column family name. Must be 'printable' -- digit or
387    * letter -- and may not contain a <code>:<code>
388    * @param minVersions Minimum number of versions to keep
389    * @param maxVersions Maximum number of versions to keep
390    * @param keepDeletedCells Whether to retain deleted cells until they expire
391    *        up to maxVersions versions.
392    * @param compression Compression type
393    * @param encodeOnDisk whether to use the specified data block encoding
394    *        on disk. If false, the encoding will be used in cache only.
395    * @param dataBlockEncoding data block encoding
396    * @param inMemory If true, column data should be kept in an HRegionServer's
397    * cache
398    * @param blockCacheEnabled If true, MapFile blocks should be cached
399    * @param blocksize Block size to use when writing out storefiles.  Use
400    * smaller blocksizes for faster random-access at expense of larger indices
401    * (more memory consumption).  Default is usually 64k.
402    * @param timeToLive Time-to-live of cell contents, in seconds
403    * (use HConstants.FOREVER for unlimited TTL)
404    * @param bloomFilter Bloom filter type for this column
405    * @param scope The scope tag for this column
406    *
407    * @throws IllegalArgumentException if passed a family name that is made of
408    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
409    * a <code>:</code>
410    * @throws IllegalArgumentException if the number of versions is &lt;= 0
411    * @deprecated use {@link #HColumnDescriptor(String)} and setters
412    */
413   @Deprecated
414   public HColumnDescriptor(final byte[] familyName, final int minVersions,
415       final int maxVersions, final KeepDeletedCells keepDeletedCells,
416       final String compression, final boolean encodeOnDisk,
417       final String dataBlockEncoding, final boolean inMemory,
418       final boolean blockCacheEnabled, final int blocksize,
419       final int timeToLive, final String bloomFilter, final int scope) {
420     isLegalFamilyName(familyName);
421     this.name = familyName;
422 
423     if (maxVersions <= 0) {
424       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
425       // Until there is support, consider 0 or < 0 -- a configuration error.
426       throw new IllegalArgumentException("Maximum versions must be positive");
427     }
428 
429     if (minVersions > 0) {
430       if (timeToLive == HConstants.FOREVER) {
431         throw new IllegalArgumentException("Minimum versions requires TTL.");
432       }
433       if (minVersions >= maxVersions) {
434         throw new IllegalArgumentException("Minimum versions must be < "
435             + "maximum versions.");
436       }
437     }
438 
439     setMaxVersions(maxVersions);
440     setMinVersions(minVersions);
441     setKeepDeletedCells(keepDeletedCells);
442     setInMemory(inMemory);
443     setBlockCacheEnabled(blockCacheEnabled);
444     setTimeToLive(timeToLive);
445     setCompressionType(Compression.Algorithm.
446       valueOf(compression.toUpperCase()));
447     setDataBlockEncoding(DataBlockEncoding.
448         valueOf(dataBlockEncoding.toUpperCase()));
449     setBloomFilterType(BloomType.
450       valueOf(bloomFilter.toUpperCase()));
451     setBlocksize(blocksize);
452     setScope(scope);
453   }
454 
455   /**
456    * @param b Family name.
457    * @return <code>b</code>
458    * @throws IllegalArgumentException If not null and not a legitimate family
459    * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because
460    * <code>b</code> can be null when deserializing).  Cannot start with a '.'
461    * either. Also Family can not be an empty value or equal "recovered.edits".
462    */
463   public static byte [] isLegalFamilyName(final byte [] b) {
464     if (b == null) {
465       return b;
466     }
467     Preconditions.checkArgument(b.length != 0, "Family name can not be empty");
468     if (b[0] == '.') {
469       throw new IllegalArgumentException("Family names cannot start with a " +
470         "period: " + Bytes.toString(b));
471     }
472     for (int i = 0; i < b.length; i++) {
473       if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') {
474         throw new IllegalArgumentException("Illegal character <" + b[i] +
475           ">. Family names cannot contain control characters or colons: " +
476           Bytes.toString(b));
477       }
478     }
479     byte[] recoveredEdit = Bytes.toBytes(HConstants.RECOVERED_EDITS_DIR);
480     if (Bytes.equals(recoveredEdit, b)) {
481       throw new IllegalArgumentException("Family name cannot be: " +
482           HConstants.RECOVERED_EDITS_DIR);
483     }
484     return b;
485   }
486 
487   /**
488    * @return Name of this column family
489    */
490   public byte [] getName() {
491     return name;
492   }
493 
494   /**
495    * @return Name of this column family
496    */
497   public String getNameAsString() {
498     return Bytes.toString(this.name);
499   }
500 
501   /**
502    * @param key The key.
503    * @return The value.
504    */
505   public byte[] getValue(byte[] key) {
506     ImmutableBytesWritable ibw = values.get(new ImmutableBytesWritable(key));
507     if (ibw == null)
508       return null;
509     return ibw.get();
510   }
511 
512   /**
513    * @param key The key.
514    * @return The value as a string.
515    */
516   public String getValue(String key) {
517     byte[] value = getValue(Bytes.toBytes(key));
518     if (value == null)
519       return null;
520     return Bytes.toString(value);
521   }
522 
523   /**
524    * @return All values.
525    */
526   public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
527     // shallow pointer copy
528     return Collections.unmodifiableMap(values);
529   }
530 
531   /**
532    * @param key The key.
533    * @param value The value.
534    * @return this (for chained invocation)
535    */
536   public HColumnDescriptor setValue(byte[] key, byte[] value) {
537     values.put(new ImmutableBytesWritable(key),
538       new ImmutableBytesWritable(value));
539     return this;
540   }
541 
542   /**
543    * @param key Key whose key and value we're to remove from HCD parameters.
544    */
545   public void remove(final byte [] key) {
546     values.remove(new ImmutableBytesWritable(key));
547   }
548 
549   /**
550    * @param key The key.
551    * @param value The value.
552    * @return this (for chained invocation)
553    */
554   public HColumnDescriptor setValue(String key, String value) {
555     if (value == null) {
556       remove(Bytes.toBytes(key));
557     } else {
558       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
559     }
560     return this;
561   }
562 
563   /** @return compression type being used for the column family */
564   public Compression.Algorithm getCompression() {
565     String n = getValue(COMPRESSION);
566     if (n == null) {
567       return Compression.Algorithm.NONE;
568     }
569     return Compression.Algorithm.valueOf(n.toUpperCase());
570   }
571 
572   /** @return compression type being used for the column family for major
573       compression */
574   public Compression.Algorithm getCompactionCompression() {
575     String n = getValue(COMPRESSION_COMPACT);
576     if (n == null) {
577       return getCompression();
578     }
579     return Compression.Algorithm.valueOf(n.toUpperCase());
580   }
581 
582   /** @return maximum number of versions */
583   public int getMaxVersions() {
584     if (this.cachedMaxVersions == UNINITIALIZED) {
585       String v = getValue(HConstants.VERSIONS);
586       this.cachedMaxVersions = Integer.parseInt(v);
587     }
588     return this.cachedMaxVersions;
589   }
590 
591   /**
592    * @param maxVersions maximum number of versions
593    * @return this (for chained invocation)
594    */
595   public HColumnDescriptor setMaxVersions(int maxVersions) {
596     if (maxVersions <= 0) {
597       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
598       // Until there is support, consider 0 or < 0 -- a configuration error.
599       throw new IllegalArgumentException("Maximum versions must be positive");
600     }    
601     if (maxVersions < this.getMinVersions()) {      
602         throw new IllegalArgumentException("Set MaxVersion to " + maxVersions
603             + " while minVersion is " + this.getMinVersions()
604             + ". Maximum versions must be >= minimum versions ");      
605     }
606     setValue(HConstants.VERSIONS, Integer.toString(maxVersions));
607     cachedMaxVersions = maxVersions;
608     return this;
609   }
610 
611   /**
612    * @return The storefile/hfile blocksize for this column family.
613    */
614   public synchronized int getBlocksize() {
615     if (this.blocksize == null) {
616       String value = getValue(BLOCKSIZE);
617       this.blocksize = (value != null)?
618         Integer.decode(value): Integer.valueOf(DEFAULT_BLOCKSIZE);
619     }
620     return this.blocksize.intValue();
621   }
622 
623   /**
624    * @param s Blocksize to use when writing out storefiles/hfiles on this
625    * column family.
626    * @return this (for chained invocation)
627    */
628   public HColumnDescriptor setBlocksize(int s) {
629     setValue(BLOCKSIZE, Integer.toString(s));
630     this.blocksize = null;
631     return this;
632   }
633 
634   /**
635    * @return Compression type setting.
636    */
637   public Compression.Algorithm getCompressionType() {
638     return getCompression();
639   }
640 
641   /**
642    * Compression types supported in hbase.
643    * LZO is not bundled as part of the hbase distribution.
644    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
645    * for how to enable it.
646    * @param type Compression type setting.
647    * @return this (for chained invocation)
648    */
649   public HColumnDescriptor setCompressionType(Compression.Algorithm type) {
650     return setValue(COMPRESSION, type.getName().toUpperCase());
651   }
652 
653   /** @return data block encoding algorithm used on disk */
654   @Deprecated
655   public DataBlockEncoding getDataBlockEncodingOnDisk() {
656     return getDataBlockEncoding();
657   }
658 
659   /**
660    * This method does nothing now. Flag ENCODE_ON_DISK is not used
661    * any more. Data blocks have the same encoding in cache as on disk.
662    * @return this (for chained invocation)
663    */
664   @Deprecated
665   public HColumnDescriptor setEncodeOnDisk(boolean encodeOnDisk) {
666     return this;
667   }
668 
669   /**
670    * @return the data block encoding algorithm used in block cache and
671    *         optionally on disk
672    */
673   public DataBlockEncoding getDataBlockEncoding() {
674     String type = getValue(DATA_BLOCK_ENCODING);
675     if (type == null) {
676       type = DEFAULT_DATA_BLOCK_ENCODING;
677     }
678     return DataBlockEncoding.valueOf(type);
679   }
680 
681   /**
682    * Set data block encoding algorithm used in block cache.
683    * @param type What kind of data block encoding will be used.
684    * @return this (for chained invocation)
685    */
686   public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding type) {
687     String name;
688     if (type != null) {
689       name = type.toString();
690     } else {
691       name = DataBlockEncoding.NONE.toString();
692     }
693     return setValue(DATA_BLOCK_ENCODING, name);
694   }
695 
696   /**
697    * Set whether the tags should be compressed along with DataBlockEncoding. When no
698    * DataBlockEncoding is been used, this is having no effect.
699    * 
700    * @param compressTags
701    * @return this (for chained invocation)
702    */
703   public HColumnDescriptor setCompressTags(boolean compressTags) {
704     return setValue(COMPRESS_TAGS, String.valueOf(compressTags));
705   }
706 
707   /**
708    * @return Whether KV tags should be compressed along with DataBlockEncoding. When no
709    *         DataBlockEncoding is been used, this is having no effect.
710    */
711   public boolean shouldCompressTags() {
712     String compressTagsStr = getValue(COMPRESS_TAGS);
713     boolean compressTags = DEFAULT_COMPRESS_TAGS;
714     if (compressTagsStr != null) {
715       compressTags = Boolean.valueOf(compressTagsStr);
716     }
717     return compressTags;
718   }
719 
720   /**
721    * @return Compression type setting.
722    */
723   public Compression.Algorithm getCompactionCompressionType() {
724     return getCompactionCompression();
725   }
726 
727   /**
728    * Compression types supported in hbase.
729    * LZO is not bundled as part of the hbase distribution.
730    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
731    * for how to enable it.
732    * @param type Compression type setting.
733    * @return this (for chained invocation)
734    */
735   public HColumnDescriptor setCompactionCompressionType(
736       Compression.Algorithm type) {
737     return setValue(COMPRESSION_COMPACT, type.getName().toUpperCase());
738   }
739 
740   /**
741    * @return True if we are to keep all in use HRegionServer cache.
742    */
743   public boolean isInMemory() {
744     String value = getValue(HConstants.IN_MEMORY);
745     if (value != null)
746       return Boolean.valueOf(value).booleanValue();
747     return DEFAULT_IN_MEMORY;
748   }
749 
750   /**
751    * @param inMemory True if we are to keep all values in the HRegionServer
752    * cache
753    * @return this (for chained invocation)
754    */
755   public HColumnDescriptor setInMemory(boolean inMemory) {
756     return setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory));
757   }
758 
759   public KeepDeletedCells getKeepDeletedCells() {
760     String value = getValue(KEEP_DELETED_CELLS);
761     if (value != null) {
762       // toUpperCase for backwards compatibility
763       return KeepDeletedCells.valueOf(value.toUpperCase());
764     }
765     return DEFAULT_KEEP_DELETED;
766   }
767 
768   /**
769    * @param keepDeletedCells True if deleted rows should not be collected
770    * immediately.
771    * @return this (for chained invocation)
772    * @deprecated use {@link #setKeepDeletedCells(KeepDeletedCells)}
773    */
774   @Deprecated
775   public HColumnDescriptor setKeepDeletedCells(boolean keepDeletedCells) {
776     return setValue(KEEP_DELETED_CELLS, (keepDeletedCells ? KeepDeletedCells.TRUE
777         : KeepDeletedCells.FALSE).toString());
778   }
779 
780   /**
781    * @param keepDeletedCells True if deleted rows should not be collected
782    * immediately.
783    * @return this (for chained invocation)
784    */
785   public HColumnDescriptor setKeepDeletedCells(KeepDeletedCells keepDeletedCells) {
786     return setValue(KEEP_DELETED_CELLS, keepDeletedCells.toString());
787   }
788 
789   /**
790    * @return Time-to-live of cell contents, in seconds.
791    */
792   public int getTimeToLive() {
793     String value = getValue(TTL);
794     return (value != null)? Integer.valueOf(value).intValue(): DEFAULT_TTL;
795   }
796 
797   /**
798    * @param timeToLive Time-to-live of cell contents, in seconds.
799    * @return this (for chained invocation)
800    */
801   public HColumnDescriptor setTimeToLive(int timeToLive) {
802     return setValue(TTL, Integer.toString(timeToLive));
803   }
804 
805   /**
806    * @return The minimum number of versions to keep.
807    */
808   public int getMinVersions() {
809     String value = getValue(MIN_VERSIONS);
810     return (value != null)? Integer.valueOf(value).intValue(): 0;
811   }
812 
813   /**
814    * @param minVersions The minimum number of versions to keep.
815    * (used when timeToLive is set)
816    * @return this (for chained invocation)
817    */
818   public HColumnDescriptor setMinVersions(int minVersions) {
819     return setValue(MIN_VERSIONS, Integer.toString(minVersions));
820   }
821 
822   /**
823    * @return True if MapFile blocks should be cached.
824    */
825   public boolean isBlockCacheEnabled() {
826     String value = getValue(BLOCKCACHE);
827     if (value != null)
828       return Boolean.valueOf(value).booleanValue();
829     return DEFAULT_BLOCKCACHE;
830   }
831 
832   /**
833    * @param blockCacheEnabled True if MapFile blocks should be cached.
834    * @return this (for chained invocation)
835    */
836   public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) {
837     return setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled));
838   }
839 
840   /**
841    * @return bloom filter type used for new StoreFiles in ColumnFamily
842    */
843   public BloomType getBloomFilterType() {
844     String n = getValue(BLOOMFILTER);
845     if (n == null) {
846       n = DEFAULT_BLOOMFILTER;
847     }
848     return BloomType.valueOf(n.toUpperCase());
849   }
850 
851   /**
852    * @param bt bloom filter type
853    * @return this (for chained invocation)
854    */
855   public HColumnDescriptor setBloomFilterType(final BloomType bt) {
856     return setValue(BLOOMFILTER, bt.toString());
857   }
858 
859    /**
860     * @return the scope tag
861     */
862   public int getScope() {
863     byte[] value = getValue(REPLICATION_SCOPE_BYTES);
864     if (value != null) {
865       return Integer.valueOf(Bytes.toString(value));
866     }
867     return DEFAULT_REPLICATION_SCOPE;
868   }
869 
870  /**
871   * @param scope the scope tag
872   * @return this (for chained invocation)
873   */
874   public HColumnDescriptor setScope(int scope) {
875     return setValue(REPLICATION_SCOPE, Integer.toString(scope));
876   }
877 
878   /**
879    * @return true if we should cache data blocks on write
880    */
881   public boolean shouldCacheDataOnWrite() {
882     String value = getValue(CACHE_DATA_ON_WRITE);
883     if (value != null) {
884       return Boolean.valueOf(value).booleanValue();
885     }
886     return DEFAULT_CACHE_DATA_ON_WRITE;
887   }
888 
889   /**
890    * @param value true if we should cache data blocks on write
891    * @return this (for chained invocation)
892    */
893   public HColumnDescriptor setCacheDataOnWrite(boolean value) {
894     return setValue(CACHE_DATA_ON_WRITE, Boolean.toString(value));
895   }
896 
897   /**
898    * @return true if we should cache index blocks on write
899    */
900   public boolean shouldCacheIndexesOnWrite() {
901     String value = getValue(CACHE_INDEX_ON_WRITE);
902     if (value != null) {
903       return Boolean.valueOf(value).booleanValue();
904     }
905     return DEFAULT_CACHE_INDEX_ON_WRITE;
906   }
907 
908   /**
909    * @param value true if we should cache index blocks on write
910    * @return this (for chained invocation)
911    */
912   public HColumnDescriptor setCacheIndexesOnWrite(boolean value) {
913     return setValue(CACHE_INDEX_ON_WRITE, Boolean.toString(value));
914   }
915 
916   /**
917    * @return true if we should cache bloomfilter blocks on write
918    */
919   public boolean shouldCacheBloomsOnWrite() {
920     String value = getValue(CACHE_BLOOMS_ON_WRITE);
921     if (value != null) {
922       return Boolean.valueOf(value).booleanValue();
923     }
924     return DEFAULT_CACHE_BLOOMS_ON_WRITE;
925   }
926 
927   /**
928    * @param value true if we should cache bloomfilter blocks on write
929    * @return this (for chained invocation)
930    */
931   public HColumnDescriptor setCacheBloomsOnWrite(boolean value) {
932     return setValue(CACHE_BLOOMS_ON_WRITE, Boolean.toString(value));
933   }
934 
935   /**
936    * @return true if we should evict cached blocks from the blockcache on
937    * close
938    */
939   public boolean shouldEvictBlocksOnClose() {
940     String value = getValue(EVICT_BLOCKS_ON_CLOSE);
941     if (value != null) {
942       return Boolean.valueOf(value).booleanValue();
943     }
944     return DEFAULT_EVICT_BLOCKS_ON_CLOSE;
945   }
946 
947   /**
948    * @param value true if we should evict cached blocks from the blockcache on
949    * close
950    * @return this (for chained invocation)
951    */
952   public HColumnDescriptor setEvictBlocksOnClose(boolean value) {
953     return setValue(EVICT_BLOCKS_ON_CLOSE, Boolean.toString(value));
954   }
955 
956   /**
957    * @return true if we should prefetch blocks into the blockcache on open
958    */
959   public boolean shouldPrefetchBlocksOnOpen() {
960     String value = getValue(PREFETCH_BLOCKS_ON_OPEN);
961    if (value != null) {
962       return Boolean.valueOf(value).booleanValue();
963     }
964     return DEFAULT_PREFETCH_BLOCKS_ON_OPEN;
965   }
966 
967   /**
968    * @param value true if we should prefetch blocks into the blockcache on open
969    * @return this (for chained invocation)
970    */
971   public HColumnDescriptor setPrefetchBlocksOnOpen(boolean value) {
972     return setValue(PREFETCH_BLOCKS_ON_OPEN, Boolean.toString(value));
973   }
974 
975   /**
976    * @see java.lang.Object#toString()
977    */
978   @Override
979   public String toString() {
980     StringBuilder s = new StringBuilder();
981 
982     s.append('{');
983     s.append(HConstants.NAME);
984     s.append(" => '");
985     s.append(Bytes.toString(name));
986     s.append("'");
987     s.append(getValues(true));
988     s.append('}');
989     return s.toString();
990   }
991 
992   /**
993    * @return Column family descriptor with only the customized attributes.
994    */
995   public String toStringCustomizedValues() {
996     StringBuilder s = new StringBuilder();
997     s.append('{');
998     s.append(HConstants.NAME);
999     s.append(" => '");
1000     s.append(Bytes.toString(name));
1001     s.append("'");
1002     s.append(getValues(false));
1003     s.append('}');
1004     return s.toString();
1005   }
1006 
1007   private StringBuilder getValues(boolean printDefaults) {
1008     StringBuilder s = new StringBuilder();
1009 
1010     boolean hasConfigKeys = false;
1011 
1012     // print all reserved keys first
1013     for (ImmutableBytesWritable k : values.keySet()) {
1014       if (!RESERVED_KEYWORDS.contains(k)) {
1015         hasConfigKeys = true;
1016         continue;
1017       }
1018       String key = Bytes.toString(k.get());
1019       String value = Bytes.toStringBinary(values.get(k).get());
1020       if (printDefaults
1021           || !DEFAULT_VALUES.containsKey(key)
1022           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
1023         s.append(", ");
1024         s.append(key);
1025         s.append(" => ");
1026         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1027       }
1028     }
1029 
1030     // print all non-reserved, advanced config keys as a separate subset
1031     if (hasConfigKeys) {
1032       s.append(", ");
1033       s.append(HConstants.METADATA).append(" => ");
1034       s.append('{');
1035       boolean printComma = false;
1036       for (ImmutableBytesWritable k : values.keySet()) {
1037         if (RESERVED_KEYWORDS.contains(k)) {
1038           continue;
1039         }
1040         String key = Bytes.toString(k.get());
1041         String value = Bytes.toStringBinary(values.get(k).get());
1042         if (printComma) {
1043           s.append(", ");
1044         }
1045         printComma = true;
1046         s.append('\'').append(key).append('\'');
1047         s.append(" => ");
1048         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1049       }
1050       s.append('}');
1051     }
1052 
1053     if (!configuration.isEmpty()) {
1054       s.append(", ");
1055       s.append(HConstants.CONFIGURATION).append(" => ");
1056       s.append('{');
1057       boolean printCommaForConfiguration = false;
1058       for (Map.Entry<String, String> e : configuration.entrySet()) {
1059         if (printCommaForConfiguration) s.append(", ");
1060         printCommaForConfiguration = true;
1061         s.append('\'').append(e.getKey()).append('\'');
1062         s.append(" => ");
1063         s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))).append('\'');
1064       }
1065       s.append("}");
1066     }
1067     return s;
1068   }
1069 
1070   public static Unit getUnit(String key) {
1071     Unit unit;
1072       /* TTL for now, we can add more as we neeed */
1073     if (key.equals(HColumnDescriptor.TTL)) {
1074       unit = Unit.TIME_INTERVAL;
1075     } else {
1076       unit = Unit.NONE;
1077     }
1078     return unit;
1079   }
1080 
1081   public static Map<String, String> getDefaultValues() {
1082     return Collections.unmodifiableMap(DEFAULT_VALUES);
1083   }
1084 
1085   /**
1086    * @see java.lang.Object#equals(java.lang.Object)
1087    */
1088   @Override
1089   public boolean equals(Object obj) {
1090     if (this == obj) {
1091       return true;
1092     }
1093     if (obj == null) {
1094       return false;
1095     }
1096     if (!(obj instanceof HColumnDescriptor)) {
1097       return false;
1098     }
1099     return compareTo((HColumnDescriptor)obj) == 0;
1100   }
1101 
1102   /**
1103    * @see java.lang.Object#hashCode()
1104    */
1105   @Override
1106   public int hashCode() {
1107     int result = Bytes.hashCode(this.name);
1108     result ^= Byte.valueOf(COLUMN_DESCRIPTOR_VERSION).hashCode();
1109     result ^= values.hashCode();
1110     result ^= configuration.hashCode();
1111     return result;
1112   }
1113 
1114   /**
1115    * @deprecated Writables are going away.  Use pb {@link #parseFrom(byte[])} instead.
1116    */
1117   @Deprecated
1118   public void readFields(DataInput in) throws IOException {
1119     int version = in.readByte();
1120     if (version < 6) {
1121       if (version <= 2) {
1122         Text t = new Text();
1123         t.readFields(in);
1124         this.name = t.getBytes();
1125 //        if(KeyValue.getFamilyDelimiterIndex(this.name, 0, this.name.length)
1126 //            > 0) {
1127 //          this.name = stripColon(this.name);
1128 //        }
1129       } else {
1130         this.name = Bytes.readByteArray(in);
1131       }
1132       this.values.clear();
1133       setMaxVersions(in.readInt());
1134       int ordinal = in.readInt();
1135       setCompressionType(Compression.Algorithm.values()[ordinal]);
1136       setInMemory(in.readBoolean());
1137       setBloomFilterType(in.readBoolean() ? BloomType.ROW : BloomType.NONE);
1138       if (getBloomFilterType() != BloomType.NONE && version < 5) {
1139         // If a bloomFilter is enabled and the column descriptor is less than
1140         // version 5, we need to skip over it to read the rest of the column
1141         // descriptor. There are no BloomFilterDescriptors written to disk for
1142         // column descriptors with a version number >= 5
1143         throw new UnsupportedClassVersionError(this.getClass().getName() +
1144             " does not support backward compatibility with versions older " +
1145             "than version 5");
1146       }
1147       if (version > 1) {
1148         setBlockCacheEnabled(in.readBoolean());
1149       }
1150       if (version > 2) {
1151        setTimeToLive(in.readInt());
1152       }
1153     } else {
1154       // version 6+
1155       this.name = Bytes.readByteArray(in);
1156       this.values.clear();
1157       int numValues = in.readInt();
1158       for (int i = 0; i < numValues; i++) {
1159         ImmutableBytesWritable key = new ImmutableBytesWritable();
1160         ImmutableBytesWritable value = new ImmutableBytesWritable();
1161         key.readFields(in);
1162         value.readFields(in);
1163 
1164         // in version 8, the BloomFilter setting changed from bool to enum
1165         if (version < 8 && Bytes.toString(key.get()).equals(BLOOMFILTER)) {
1166           value.set(Bytes.toBytes(
1167               Boolean.getBoolean(Bytes.toString(value.get()))
1168                 ? BloomType.ROW.toString()
1169                 : BloomType.NONE.toString()));
1170         }
1171 
1172         values.put(key, value);
1173       }
1174       if (version == 6) {
1175         // Convert old values.
1176         setValue(COMPRESSION, Compression.Algorithm.NONE.getName());
1177       }
1178       String value = getValue(HConstants.VERSIONS);
1179       this.cachedMaxVersions = (value != null)?
1180           Integer.valueOf(value).intValue(): DEFAULT_VERSIONS;
1181       if (version > 10) {
1182         configuration.clear();
1183         int numConfigs = in.readInt();
1184         for (int i = 0; i < numConfigs; i++) {
1185           ImmutableBytesWritable key = new ImmutableBytesWritable();
1186           ImmutableBytesWritable val = new ImmutableBytesWritable();
1187           key.readFields(in);
1188           val.readFields(in);
1189           configuration.put(
1190             Bytes.toString(key.get(), key.getOffset(), key.getLength()),
1191             Bytes.toString(val.get(), val.getOffset(), val.getLength()));
1192         }
1193       }
1194     }
1195   }
1196 
1197   /**
1198    * @deprecated Writables are going away.  Use {@link #toByteArray()} instead.
1199    */
1200   @Deprecated
1201   public void write(DataOutput out) throws IOException {
1202     out.writeByte(COLUMN_DESCRIPTOR_VERSION);
1203     Bytes.writeByteArray(out, this.name);
1204     out.writeInt(values.size());
1205     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1206         values.entrySet()) {
1207       e.getKey().write(out);
1208       e.getValue().write(out);
1209     }
1210     out.writeInt(configuration.size());
1211     for (Map.Entry<String, String> e : configuration.entrySet()) {
1212       new ImmutableBytesWritable(Bytes.toBytes(e.getKey())).write(out);
1213       new ImmutableBytesWritable(Bytes.toBytes(e.getValue())).write(out);
1214     }
1215   }
1216 
1217   // Comparable
1218 
1219   public int compareTo(HColumnDescriptor o) {
1220     int result = Bytes.compareTo(this.name, o.getName());
1221     if (result == 0) {
1222       // punt on comparison for ordering, just calculate difference
1223       result = this.values.hashCode() - o.values.hashCode();
1224       if (result < 0)
1225         result = -1;
1226       else if (result > 0)
1227         result = 1;
1228     }
1229     if (result == 0) {
1230       result = this.configuration.hashCode() - o.configuration.hashCode();
1231       if (result < 0)
1232         result = -1;
1233       else if (result > 0)
1234         result = 1;
1235     }
1236     return result;
1237   }
1238 
1239   /**
1240    * @return This instance serialized with pb with pb magic prefix
1241    * @see #parseFrom(byte[])
1242    */
1243   public byte [] toByteArray() {
1244     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1245   }
1246 
1247   /**
1248    * @param bytes A pb serialized {@link HColumnDescriptor} instance with pb magic prefix
1249    * @return An instance of {@link HColumnDescriptor} made from <code>bytes</code>
1250    * @throws DeserializationException
1251    * @see #toByteArray()
1252    */
1253   public static HColumnDescriptor parseFrom(final byte [] bytes) throws DeserializationException {
1254     if (!ProtobufUtil.isPBMagicPrefix(bytes)) throw new DeserializationException("No magic");
1255     int pblen = ProtobufUtil.lengthOfPBMagic();
1256     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1257     ColumnFamilySchema cfs = null;
1258     try {
1259       cfs = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
1260     } catch (InvalidProtocolBufferException e) {
1261       throw new DeserializationException(e);
1262     }
1263     return convert(cfs);
1264   }
1265 
1266   /**
1267    * @param cfs
1268    * @return An {@link HColumnDescriptor} made from the passed in <code>cfs</code>
1269    */
1270   public static HColumnDescriptor convert(final ColumnFamilySchema cfs) {
1271     // Use the empty constructor so we preserve the initial values set on construction for things
1272     // like maxVersion.  Otherwise, we pick up wrong values on deserialization which makes for
1273     // unrelated-looking test failures that are hard to trace back to here.
1274     HColumnDescriptor hcd = new HColumnDescriptor();
1275     hcd.name = cfs.getName().toByteArray();
1276     for (BytesBytesPair a: cfs.getAttributesList()) {
1277       hcd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1278     }
1279     for (NameStringPair a: cfs.getConfigurationList()) {
1280       hcd.setConfiguration(a.getName(), a.getValue());
1281     }
1282     return hcd;
1283   }
1284 
1285   /**
1286    * @return Convert this instance to a the pb column family type
1287    */
1288   public ColumnFamilySchema convert() {
1289     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1290     builder.setName(ByteStringer.wrap(getName()));
1291     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
1292       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1293       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1294       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1295       builder.addAttributes(aBuilder.build());
1296     }
1297     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1298       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1299       aBuilder.setName(e.getKey());
1300       aBuilder.setValue(e.getValue());
1301       builder.addConfiguration(aBuilder.build());
1302     }
1303     return builder.build();
1304   }
1305 
1306   /**
1307    * Getter for accessing the configuration value by key.
1308    */
1309   public String getConfigurationValue(String key) {
1310     return configuration.get(key);
1311   }
1312 
1313   /**
1314    * Getter for fetching an unmodifiable {@link #configuration} map.
1315    */
1316   public Map<String, String> getConfiguration() {
1317     // shallow pointer copy
1318     return Collections.unmodifiableMap(configuration);
1319   }
1320 
1321   /**
1322    * Setter for storing a configuration setting in {@link #configuration} map.
1323    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1324    * @param value String value. If null, removes the configuration.
1325    */
1326   public void setConfiguration(String key, String value) {
1327     if (value == null) {
1328       removeConfiguration(key);
1329     } else {
1330       configuration.put(key, value);
1331     }
1332   }
1333 
1334   /**
1335    * Remove a configuration setting represented by the key from the {@link #configuration} map.
1336    */
1337   public void removeConfiguration(final String key) {
1338     configuration.remove(key);
1339   }
1340 
1341   /**
1342    * Return the encryption algorithm in use by this family
1343    */
1344   @InterfaceStability.Unstable
1345   public String getEncryptionType() {
1346     return getValue(ENCRYPTION);
1347   }
1348 
1349   /**
1350    * Set the encryption algorithm for use with this family
1351    * @param algorithm
1352    */
1353   @InterfaceStability.Unstable
1354   public HColumnDescriptor setEncryptionType(String algorithm) {
1355     setValue(ENCRYPTION, algorithm);
1356     return this;
1357   }
1358 
1359   /** Return the raw crypto key attribute for the family, or null if not set  */
1360   @InterfaceStability.Unstable
1361   public byte[] getEncryptionKey() {
1362     return getValue(Bytes.toBytes(ENCRYPTION_KEY));
1363   }
1364 
1365   /** Set the raw crypto key attribute for the family */
1366   @InterfaceStability.Unstable
1367   public HColumnDescriptor setEncryptionKey(byte[] keyBytes) {
1368     setValue(Bytes.toBytes(ENCRYPTION_KEY), keyBytes);
1369     return this;
1370   }
1371 }