Skip to content

Commit 5529cb4

Browse files
committed
Refactor the code
Add ReopenTableRegionsProcedure.throttled to create a procedure with the throttling configuration.
1 parent 2e9b0df commit 5529cb4

File tree

3 files changed

+68
-25
lines changed

3 files changed

+68
-25
lines changed

hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java

Lines changed: 2 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -17,22 +17,15 @@
1717
*/
1818
package org.apache.hadoop.hbase.master.procedure;
1919

20-
import static org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_BACKOFF_MILLIS_DEFAULT;
21-
import static org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY;
22-
import static org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_SIZE_MAX_DISABLED;
23-
import static org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_SIZE_MAX_KEY;
24-
2520
import java.io.IOException;
2621
import java.util.Arrays;
2722
import java.util.Collections;
2823
import java.util.HashSet;
2924
import java.util.List;
30-
import java.util.Optional;
3125
import java.util.Set;
3226
import java.util.function.Supplier;
3327
import java.util.stream.Collectors;
3428
import java.util.stream.IntStream;
35-
import org.apache.hadoop.conf.Configuration;
3629
import org.apache.hadoop.hbase.ConcurrentTableModificationException;
3730
import org.apache.hadoop.hbase.DoNotRetryIOException;
3831
import org.apache.hadoop.hbase.HBaseIOException;
@@ -231,22 +224,8 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS
231224
break;
232225
case MODIFY_TABLE_REOPEN_ALL_REGIONS:
233226
if (isTableEnabled(env)) {
234-
Configuration conf = env.getMasterConfiguration();
235-
// Use table configuration if defined
236-
final TableDescriptor descriptor =
237-
env.getMasterServices().getTableDescriptors().get(getTableName());
238-
long backoffMillis =
239-
Optional.ofNullable(descriptor.getValue(PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY))
240-
.map(Long::parseLong)
241-
.orElseGet(() -> conf.getLong(PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY,
242-
PROGRESSIVE_BATCH_BACKOFF_MILLIS_DEFAULT));
243-
int batchSizeMax =
244-
Optional.ofNullable(descriptor.getValue(PROGRESSIVE_BATCH_SIZE_MAX_KEY))
245-
.map(Integer::parseInt).orElseGet(() -> conf.getInt(PROGRESSIVE_BATCH_SIZE_MAX_KEY,
246-
PROGRESSIVE_BATCH_SIZE_MAX_DISABLED));
247-
248-
addChildProcedure(
249-
new ReopenTableRegionsProcedure(getTableName(), backoffMillis, batchSizeMax));
227+
addChildProcedure(ReopenTableRegionsProcedure.throttled(env.getMasterConfiguration(),
228+
env.getMasterServices().getTableDescriptors().get(getTableName())));
250229
}
251230
setNextState(ModifyTableState.MODIFY_TABLE_ASSIGN_NEW_REPLICAS);
252231
break;

hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java

Lines changed: 30 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,12 @@
2222
import java.util.ArrayList;
2323
import java.util.Collections;
2424
import java.util.List;
25+
import java.util.Optional;
2526
import java.util.stream.Collectors;
27+
import org.apache.hadoop.conf.Configuration;
2628
import org.apache.hadoop.hbase.HRegionLocation;
2729
import org.apache.hadoop.hbase.TableName;
30+
import org.apache.hadoop.hbase.client.TableDescriptor;
2831
import org.apache.hadoop.hbase.conf.ConfigKey;
2932
import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
3033
import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
@@ -83,6 +86,23 @@ public class ReopenTableRegionsProcedure
8386
private long regionsReopened = 0;
8487
private long batchesProcessed = 0;
8588

89+
/**
90+
* Create a new ReopenTableRegionsProcedure respecting the throttling configuration for the table.
91+
* First check the table descriptor, then fall back to the global configuration. Only used in
92+
* ModifyTableProcedure.
93+
*/
94+
public static ReopenTableRegionsProcedure throttled(final Configuration conf,
95+
final TableDescriptor desc) {
96+
long backoffMillis = Optional.ofNullable(desc.getValue(PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY))
97+
.map(Long::parseLong).orElseGet(() -> conf.getLong(PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY,
98+
PROGRESSIVE_BATCH_BACKOFF_MILLIS_DEFAULT));
99+
int batchSizeMax = Optional.ofNullable(desc.getValue(PROGRESSIVE_BATCH_SIZE_MAX_KEY))
100+
.map(Integer::parseInt).orElseGet(
101+
() -> conf.getInt(PROGRESSIVE_BATCH_SIZE_MAX_KEY, PROGRESSIVE_BATCH_SIZE_MAX_DISABLED));
102+
103+
return new ReopenTableRegionsProcedure(desc.getTableName(), backoffMillis, batchSizeMax);
104+
}
105+
86106
public ReopenTableRegionsProcedure() {
87107
this(null);
88108
}
@@ -96,12 +116,14 @@ public ReopenTableRegionsProcedure(final TableName tableName, final List<byte[]>
96116
PROGRESSIVE_BATCH_SIZE_MAX_DISABLED);
97117
}
98118

99-
public ReopenTableRegionsProcedure(final TableName tableName, long reopenBatchBackoffMillis,
119+
@RestrictedApi(explanation = "Should only be called in tests", link = "",
120+
allowedOnPath = ".*/src/test/.*")
121+
ReopenTableRegionsProcedure(final TableName tableName, long reopenBatchBackoffMillis,
100122
int reopenBatchSizeMax) {
101123
this(tableName, Collections.emptyList(), reopenBatchBackoffMillis, reopenBatchSizeMax);
102124
}
103125

104-
public ReopenTableRegionsProcedure(final TableName tableName, final List<byte[]> regionNames,
126+
private ReopenTableRegionsProcedure(final TableName tableName, final List<byte[]> regionNames,
105127
long reopenBatchBackoffMillis, int reopenBatchSizeMax) {
106128
this.tableName = tableName;
107129
this.regionNames = regionNames;
@@ -137,6 +159,12 @@ public long getBatchesProcessed() {
137159
return batchesProcessed;
138160
}
139161

162+
@RestrictedApi(explanation = "Should only be called in tests", link = "",
163+
allowedOnPath = ".*/src/test/.*")
164+
long getReopenBatchBackoffMillis() {
165+
return reopenBatchBackoffMillis;
166+
}
167+
140168
@RestrictedApi(explanation = "Should only be called internally or in tests", link = "",
141169
allowedOnPath = ".*(/src/test/.*|ReopenTableRegionsProcedure).java")
142170
protected int progressBatchSize() {

hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureBatching.java

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,9 @@
1717
*/
1818
package org.apache.hadoop.hbase.master.procedure;
1919

20+
import static org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_BACKOFF_MILLIS_DEFAULT;
21+
import static org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY;
22+
import static org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_SIZE_MAX_KEY;
2023
import static org.junit.Assert.assertEquals;
2124
import static org.junit.Assert.assertTrue;
2225

@@ -26,9 +29,11 @@
2629
import java.util.stream.Collectors;
2730
import org.apache.hadoop.conf.Configuration;
2831
import org.apache.hadoop.hbase.HBaseClassTestRule;
32+
import org.apache.hadoop.hbase.HBaseConfiguration;
2933
import org.apache.hadoop.hbase.HBaseTestingUtil;
3034
import org.apache.hadoop.hbase.TableName;
3135
import org.apache.hadoop.hbase.client.RegionInfo;
36+
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
3237
import org.apache.hadoop.hbase.master.RegionState.State;
3338
import org.apache.hadoop.hbase.master.ServerManager;
3439
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
@@ -160,6 +165,37 @@ public void testBatchSizeDoesNotOverflow() {
160165
}
161166
}
162167

168+
@Test
169+
public void testBackoffConfigurationFromTableDescriptor() {
170+
Configuration conf = HBaseConfiguration.create();
171+
TableDescriptorBuilder tbd = TableDescriptorBuilder.newBuilder(TABLE_NAME);
172+
173+
// Default (no batching, no backoff)
174+
ReopenTableRegionsProcedure proc = ReopenTableRegionsProcedure.throttled(conf, tbd.build());
175+
assertEquals(PROGRESSIVE_BATCH_BACKOFF_MILLIS_DEFAULT, proc.getReopenBatchBackoffMillis());
176+
assertEquals(Integer.MAX_VALUE, proc.progressBatchSize());
177+
178+
// From Configuration (backoff: 100ms, max: 6)
179+
conf.setLong(PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY, 100);
180+
conf.setInt(PROGRESSIVE_BATCH_SIZE_MAX_KEY, 6);
181+
proc = ReopenTableRegionsProcedure.throttled(conf, tbd.build());
182+
assertEquals(100, proc.getReopenBatchBackoffMillis());
183+
assertEquals(2, proc.progressBatchSize());
184+
assertEquals(4, proc.progressBatchSize());
185+
assertEquals(6, proc.progressBatchSize());
186+
assertEquals(6, proc.progressBatchSize());
187+
188+
// From TableDescriptor (backoff: 200ms, max: 7)
189+
tbd.setValue(PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY, "200");
190+
tbd.setValue(PROGRESSIVE_BATCH_SIZE_MAX_KEY, "7");
191+
proc = ReopenTableRegionsProcedure.throttled(conf, tbd.build());
192+
assertEquals(200, proc.getReopenBatchBackoffMillis());
193+
assertEquals(2, proc.progressBatchSize());
194+
assertEquals(4, proc.progressBatchSize());
195+
assertEquals(7, proc.progressBatchSize());
196+
assertEquals(7, proc.progressBatchSize());
197+
}
198+
163199
private void confirmBatchSize(int expectedBatchSize, Set<StuckRegion> stuckRegions,
164200
ReopenTableRegionsProcedure proc) {
165201
while (true) {

0 commit comments

Comments
 (0)