View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master.handler;
20  
21  import java.io.IOException;
22  import java.util.ArrayList;
23  import java.util.List;
24  
25  import org.apache.commons.logging.Log;
26  import org.apache.commons.logging.LogFactory;
27  import org.apache.hadoop.hbase.classification.InterfaceAudience;
28  import org.apache.hadoop.hbase.client.Delete;
29  import org.apache.hadoop.hbase.client.HTable;
30  import org.apache.hadoop.hbase.client.Result;
31  import org.apache.hadoop.hbase.client.ResultScanner;
32  import org.apache.hadoop.hbase.client.Scan;
33  import org.apache.hadoop.fs.FileSystem;
34  import org.apache.hadoop.fs.Path;
35  import org.apache.hadoop.hbase.TableName;
36  import org.apache.hadoop.hbase.HRegionInfo;
37  import org.apache.hadoop.hbase.Server;
38  import org.apache.hadoop.hbase.backup.HFileArchiver;
39  import org.apache.hadoop.hbase.catalog.MetaEditor;
40  import org.apache.hadoop.hbase.catalog.MetaReader;
41  import org.apache.hadoop.hbase.executor.EventType;
42  import org.apache.hadoop.hbase.master.AssignmentManager;
43  import org.apache.hadoop.hbase.master.HMaster;
44  import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
45  import org.apache.hadoop.hbase.master.MasterFileSystem;
46  import org.apache.hadoop.hbase.master.MasterServices;
47  import org.apache.hadoop.hbase.master.RegionStates;
48  import org.apache.hadoop.hbase.master.RegionState.State;
49  import org.apache.hadoop.hbase.util.Threads;
50  import org.apache.zookeeper.KeeperException;
51  
52  @InterfaceAudience.Private
53  public class DeleteTableHandler extends TableEventHandler {
54    private static final Log LOG = LogFactory.getLog(DeleteTableHandler.class);
55  
56    public DeleteTableHandler(TableName tableName, Server server,
57        final MasterServices masterServices) {
58      super(EventType.C_M_DELETE_TABLE, tableName, server, masterServices);
59    }
60  
61    @Override
62    protected void prepareWithTableLock() throws IOException {
63      // The next call fails if no such table.
64      getTableDescriptor();
65    }
66  
67    @Override
68    protected void handleTableOperation(List<HRegionInfo> regions)
69    throws IOException, KeeperException {
70      MasterCoprocessorHost cpHost = ((HMaster) this.server)
71          .getCoprocessorHost();
72      if (cpHost != null) {
73        cpHost.preDeleteTableHandler(this.tableName);
74      }
75  
76      // 1. Wait because of region in transition
77      AssignmentManager am = this.masterServices.getAssignmentManager();
78      RegionStates states = am.getRegionStates();
79      long waitTime = server.getConfiguration().
80        getLong("hbase.master.wait.on.region", 5 * 60 * 1000);
81      for (HRegionInfo region : regions) {
82        long done = System.currentTimeMillis() + waitTime;
83        while (System.currentTimeMillis() < done) {
84          if (states.isRegionInState(region, State.FAILED_OPEN)) {
85            am.regionOffline(region);
86          }
87          if (!states.isRegionInTransition(region)) break;
88          Threads.sleep(waitingTimeForEvents);
89          LOG.debug("Waiting on region to clear regions in transition; "
90            + am.getRegionStates().getRegionTransitionState(region));
91        }
92        if (states.isRegionInTransition(region)) {
93          throw new IOException("Waited hbase.master.wait.on.region (" +
94            waitTime + "ms) for region to leave region " +
95            region.getRegionNameAsString() + " in transitions");
96        }
97      }
98  
99      // 2. Remove regions from META
100     LOG.debug("Deleting regions from META");
101     MetaEditor.deleteRegions(this.server.getCatalogTracker(), regions);
102 
103     // 3. Move the table in /hbase/.tmp
104     MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
105     Path tempTableDir = mfs.moveTableToTemp(tableName);
106 
107     try {
108       // 4. Delete regions from FS (temp directory)
109       FileSystem fs = mfs.getFileSystem();
110       for (HRegionInfo hri: regions) {
111         LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
112         HFileArchiver.archiveRegion(fs, mfs.getRootDir(),
113             tempTableDir, new Path(tempTableDir, hri.getEncodedName()));
114       }
115 
116       // 5. Delete table from FS (temp directory)
117       if (!fs.delete(tempTableDir, true)) {
118         LOG.error("Couldn't delete " + tempTableDir);
119       }
120 
121       LOG.debug("Table '" + tableName + "' archived!");
122     } finally {
123       // 6. Update table descriptor cache
124       LOG.debug("Removing '" + tableName + "' descriptor.");
125       this.masterServices.getTableDescriptors().remove(tableName);
126 
127       // 7. Clean up regions of the table in RegionStates.
128       LOG.debug("Removing '" + tableName + "' from region states.");
129       states.tableDeleted(tableName);
130 
131       // 8. If entry for this table in zk, and up in AssignmentManager, remove it.
132       LOG.debug("Marking '" + tableName + "' as deleted.");
133       am.getZKTable().setDeletedTable(tableName);
134 
135       // 9. Clean up any remaining rows for this table
136       cleanAnyRemainingRows();
137     }
138 
139     if (cpHost != null) {
140       cpHost.postDeleteTableHandler(this.tableName);
141     }
142   }
143 
144   /**
145    * There may be items for this table still up in hbase:meta in the case where the
146    * info:regioninfo column was empty because of some write error. Remove ALL rows from hbase:meta
147    * that have to do with this table. See HBASE-12980.
148    * @throws IOException
149    */
150   private void cleanAnyRemainingRows() throws IOException {
151     Scan tableScan = MetaReader.getScanForTableName(tableName);
152     HTable metaTable = new HTable(TableName.META_TABLE_NAME, 
153       this.masterServices.getCatalogTracker().getConnection());
154     try {
155       List<Delete> deletes = new ArrayList<Delete>();
156       ResultScanner resScanner = metaTable.getScanner(tableScan);
157       try {
158         for (Result result : resScanner) {
159           deletes.add(new Delete(result.getRow()));
160         }
161       } finally {
162         resScanner.close();
163       }
164       if (!deletes.isEmpty()) {
165         LOG.warn("Deleting some vestigal " + deletes.size() + " rows of " + this.tableName +
166           " from " + TableName.META_TABLE_NAME);
167         metaTable.delete(deletes);
168       }
169     } finally {
170       metaTable.close();
171     }
172   }
173 
174   
175   @Override
176   protected void releaseTableLock() {
177     super.releaseTableLock();
178     try {
179       masterServices.getTableLockManager().tableDeleted(tableName);
180     } catch (IOException ex) {
181       LOG.warn("Received exception from TableLockManager.tableDeleted:", ex); //not critical
182     }
183   }
184 
185   @Override
186   public String toString() {
187     String name = "UnknownServerName";
188     if(server != null && server.getServerName() != null) {
189       name = server.getServerName().toString();
190     }
191     return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + "-" + tableName;
192   }
193 }