Skip to content

Commit eed5924

Browse files
authored
HDDS-12770. Use ContainerID instead of Long in CONTAINER_IDS_TABLE. (apache#8247)
1 parent 7dfd8c1 commit eed5924

File tree

8 files changed

+191
-31
lines changed

8 files changed

+191
-31
lines changed

hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -37,9 +37,11 @@
3737
import java.util.concurrent.ConcurrentSkipListSet;
3838
import java.util.concurrent.atomic.AtomicBoolean;
3939
import java.util.concurrent.atomic.AtomicInteger;
40+
import java.util.function.ToLongFunction;
4041
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
4142
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State;
4243
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
44+
import org.apache.hadoop.hdds.scm.container.ContainerID;
4345
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
4446
import org.apache.hadoop.hdds.utils.db.Table;
4547
import org.apache.hadoop.ozone.container.common.interfaces.Container;
@@ -60,7 +62,7 @@ public static ContainerSet newReadOnlyContainerSet(long recoveringTimeout) {
6062
return new ContainerSet(null, recoveringTimeout);
6163
}
6264

63-
public static ContainerSet newRwContainerSet(Table<Long, String> containerIdsTable, long recoveringTimeout) {
65+
public static ContainerSet newRwContainerSet(Table<ContainerID, String> containerIdsTable, long recoveringTimeout) {
6466
Objects.requireNonNull(containerIdsTable, "containerIdsTable == null");
6567
return new ContainerSet(containerIdsTable, recoveringTimeout);
6668
}
@@ -73,13 +75,13 @@ public static ContainerSet newRwContainerSet(Table<Long, String> containerIdsTab
7375
new ConcurrentSkipListMap<>();
7476
private final Clock clock;
7577
private long recoveringTimeout;
76-
private final Table<Long, String> containerIdsTable;
78+
private final Table<ContainerID, String> containerIdsTable;
7779

78-
private ContainerSet(Table<Long, String> continerIdsTable, long recoveringTimeout) {
80+
private ContainerSet(Table<ContainerID, String> continerIdsTable, long recoveringTimeout) {
7981
this(continerIdsTable, recoveringTimeout, null);
8082
}
8183

82-
ContainerSet(Table<Long, String> continerIdsTable, long recoveringTimeout, Clock clock) {
84+
ContainerSet(Table<ContainerID, String> continerIdsTable, long recoveringTimeout, Clock clock) {
8385
this.clock = clock != null ? clock : Clock.systemUTC();
8486
this.containerIdsTable = continerIdsTable;
8587
this.recoveringTimeout = recoveringTimeout;
@@ -146,7 +148,7 @@ private boolean addContainer(Container<?> container, boolean overwrite) throws
146148
}
147149
try {
148150
if (containerIdsTable != null) {
149-
containerIdsTable.put(containerId, containerState.toString());
151+
containerIdsTable.put(ContainerID.valueOf(containerId), containerState.toString());
150152
}
151153
} catch (IOException e) {
152154
throw new StorageContainerException(e, ContainerProtos.Result.IO_EXCEPTION);
@@ -230,7 +232,7 @@ private boolean removeContainer(long containerId, boolean markMissing, boolean r
230232
if (removeFromDB) {
231233
try {
232234
if (containerIdsTable != null) {
233-
containerIdsTable.delete(containerId);
235+
containerIdsTable.delete(ContainerID.valueOf(containerId));
234236
}
235237
} catch (IOException e) {
236238
throw new StorageContainerException(e, ContainerProtos.Result.IO_EXCEPTION);
@@ -461,7 +463,7 @@ public Set<Long> getMissingContainerSet() {
461463
return missingContainerSet;
462464
}
463465

464-
public Table<Long, String> getContainerIdsTable() {
466+
public Table<ContainerID, String> getContainerIdsTable() {
465467
return containerIdsTable;
466468
}
467469

@@ -475,10 +477,9 @@ public Table<Long, String> getContainerIdsTable() {
475477
* @param container2BCSIDMap Map of containerId to BCSID persisted in the
476478
* Ratis snapshot
477479
*/
478-
public void buildMissingContainerSetAndValidate(
479-
Map<Long, Long> container2BCSIDMap) {
480+
public <T> void buildMissingContainerSetAndValidate(Map<T, Long> container2BCSIDMap, ToLongFunction<T> getId) {
480481
container2BCSIDMap.entrySet().parallelStream().forEach((mapEntry) -> {
481-
long id = mapEntry.getKey();
482+
final long id = getId.applyAsLong(mapEntry.getKey());
482483
if (!containerMap.containsKey(id)) {
483484
LOG.warn("Adding container {} to missing container set.", id);
484485
missingContainerSet.add(id);

hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -178,8 +178,7 @@ private boolean canIgnoreException(Result result) {
178178
@Override
179179
public void buildMissingContainerSetAndValidate(
180180
Map<Long, Long> container2BCSIDMap) {
181-
containerSet
182-
.buildMissingContainerSetAndValidate(container2BCSIDMap);
181+
containerSet.buildMissingContainerSetAndValidate(container2BCSIDMap, n -> n);
183182
}
184183

185184
@Override

hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerDBDefinition.java

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,9 @@
1919

2020
import java.util.Map;
2121
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
22+
import org.apache.hadoop.hdds.scm.container.ContainerID;
2223
import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition;
2324
import org.apache.hadoop.hdds.utils.db.DBDefinition;
24-
import org.apache.hadoop.hdds.utils.db.LongCodec;
2525
import org.apache.hadoop.hdds.utils.db.StringCodec;
2626
import org.apache.hadoop.ozone.OzoneConsts;
2727

@@ -32,10 +32,10 @@ public final class WitnessedContainerDBDefinition extends DBDefinition.WithMap {
3232

3333
private static final String CONTAINER_IDS_TABLE_NAME = "containerIds";
3434

35-
public static final DBColumnFamilyDefinition<Long, String>
35+
public static final DBColumnFamilyDefinition<ContainerID, String>
3636
CONTAINER_IDS_TABLE = new DBColumnFamilyDefinition<>(
3737
CONTAINER_IDS_TABLE_NAME,
38-
LongCodec.get(),
38+
ContainerID.getCodec(),
3939
StringCodec.get());
4040

4141
private static final Map<String, DBColumnFamilyDefinition<?, ?>>
@@ -62,7 +62,7 @@ public String getLocationConfigKey() {
6262
return ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR;
6363
}
6464

65-
public DBColumnFamilyDefinition<Long, String> getContainerIdsTable() {
65+
DBColumnFamilyDefinition<ContainerID, String> getContainerIdsTable() {
6666
return CONTAINER_IDS_TABLE;
6767
}
6868
}

hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStore.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717

1818
package org.apache.hadoop.ozone.container.metadata;
1919

20+
import org.apache.hadoop.hdds.scm.container.ContainerID;
2021
import org.apache.hadoop.hdds.utils.db.Table;
2122

2223
/**
@@ -28,5 +29,5 @@ public interface WitnessedContainerMetadataStore extends DBStoreManager {
2829
*
2930
* @return Table
3031
*/
31-
Table<Long, String> getContainerIdsTable();
32+
Table<ContainerID, String> getContainerIdsTable();
3233
}

hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
import java.util.concurrent.ConcurrentHashMap;
2323
import java.util.concurrent.ConcurrentMap;
2424
import org.apache.hadoop.hdds.conf.ConfigurationSource;
25+
import org.apache.hadoop.hdds.scm.container.ContainerID;
2526
import org.apache.hadoop.hdds.utils.db.DBStore;
2627
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
2728
import org.apache.hadoop.hdds.utils.db.Table;
@@ -33,7 +34,7 @@
3334
public final class WitnessedContainerMetadataStoreImpl extends AbstractRDBStore<WitnessedContainerDBDefinition>
3435
implements WitnessedContainerMetadataStore {
3536

36-
private Table<Long, String> containerIdsTable;
37+
private Table<ContainerID, String> containerIdsTable;
3738
private static final ConcurrentMap<String, WitnessedContainerMetadataStore> INSTANCES =
3839
new ConcurrentHashMap<>();
3940

@@ -63,13 +64,13 @@ private WitnessedContainerMetadataStoreImpl(ConfigurationSource config, boolean
6364
@Override
6465
protected DBStore initDBStore(DBStoreBuilder dbStoreBuilder, ManagedDBOptions options, ConfigurationSource config)
6566
throws IOException {
66-
DBStore dbStore = dbStoreBuilder.build();
67+
final DBStore dbStore = dbStoreBuilder.build();
6768
this.containerIdsTable = this.getDbDef().getContainerIdsTable().getTable(dbStore);
6869
return dbStore;
6970
}
7071

7172
@Override
72-
public Table<Long, String> getContainerIdsTable() {
73+
public Table<ContainerID, String> getContainerIdsTable() {
7374
return containerIdsTable;
7475
}
7576
}

hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@
5555
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
5656
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto;
5757
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
58+
import org.apache.hadoop.hdds.scm.container.ContainerID;
5859
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
5960
import org.apache.hadoop.hdds.security.SecurityConfig;
6061
import org.apache.hadoop.hdds.security.symmetric.SecretKeyVerifierClient;
@@ -343,13 +344,13 @@ public void buildContainerSet() throws IOException {
343344
for (Thread volumeThread : volumeThreads) {
344345
volumeThread.join();
345346
}
346-
try (TableIterator<Long, ? extends Table.KeyValue<Long, String>> itr =
347+
try (TableIterator<ContainerID, ? extends Table.KeyValue<ContainerID, String>> itr =
347348
containerSet.getContainerIdsTable().iterator()) {
348-
Map<Long, Long> containerIds = new HashMap<>();
349+
final Map<ContainerID, Long> containerIds = new HashMap<>();
349350
while (itr.hasNext()) {
350351
containerIds.put(itr.next().getKey(), 0L);
351352
}
352-
containerSet.buildMissingContainerSetAndValidate(containerIds);
353+
containerSet.buildMissingContainerSetAndValidate(containerIds, ContainerID::getId);
353354
}
354355
} catch (InterruptedException ex) {
355356
LOG.error("Volume Threads Interrupted exception", ex);

hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java

Lines changed: 18 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,22 @@
5656
* RDBStore Tests.
5757
*/
5858
public class TestRDBStore {
59+
static ManagedDBOptions newManagedDBOptions() {
60+
final ManagedDBOptions options = new ManagedDBOptions();
61+
options.setCreateIfMissing(true);
62+
options.setCreateMissingColumnFamilies(true);
63+
64+
Statistics statistics = new Statistics();
65+
statistics.setStatsLevel(StatsLevel.ALL);
66+
options.setStatistics(statistics);
67+
return options;
68+
}
69+
70+
static RDBStore newRDBStore(File dbFile, ManagedDBOptions options, Set<TableConfig> families)
71+
throws IOException {
72+
return newRDBStore(dbFile, options, families, MAX_DB_UPDATES_SIZE_THRESHOLD);
73+
}
74+
5975
public static RDBStore newRDBStore(File dbFile, ManagedDBOptions options,
6076
Set<TableConfig> families,
6177
long maxDbUpdatesSizeThreshold)
@@ -72,20 +88,14 @@ public static RDBStore newRDBStore(File dbFile, ManagedDBOptions options,
7288
"Fourth", "Fifth",
7389
"Sixth");
7490
private RDBStore rdbStore = null;
75-
private ManagedDBOptions options = null;
91+
private ManagedDBOptions options;
7692
private Set<TableConfig> configSet;
7793

7894
@BeforeEach
7995
public void setUp(@TempDir File tempDir) throws Exception {
8096
CodecBuffer.enableLeakDetection();
8197

82-
options = new ManagedDBOptions();
83-
options.setCreateIfMissing(true);
84-
options.setCreateMissingColumnFamilies(true);
85-
86-
Statistics statistics = new Statistics();
87-
statistics.setStatsLevel(StatsLevel.ALL);
88-
options.setStatistics(statistics);
98+
options = newManagedDBOptions();
8999
configSet = new HashSet<>();
90100
for (String name : families) {
91101
TableConfig newConfig = new TableConfig(name,
Lines changed: 147 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,147 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one or more
3+
* contributor license agreements. See the NOTICE file distributed with
4+
* this work for additional information regarding copyright ownership.
5+
* The ASF licenses this file to You under the Apache License, Version 2.0
6+
* (the "License"); you may not use this file except in compliance with
7+
* the License. You may obtain a copy of the License at
8+
*
9+
* http://www.apache.org/licenses/LICENSE-2.0
10+
*
11+
* Unless required by applicable law or agreed to in writing, software
12+
* distributed under the License is distributed on an "AS IS" BASIS,
13+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
* See the License for the specific language governing permissions and
15+
* limitations under the License.
16+
*/
17+
18+
package org.apache.hadoop.hdds.utils.db;
19+
20+
import static org.junit.jupiter.api.Assertions.assertEquals;
21+
22+
import java.io.File;
23+
import java.io.IOException;
24+
import java.util.ArrayList;
25+
import java.util.Arrays;
26+
import java.util.HashMap;
27+
import java.util.List;
28+
import java.util.Map;
29+
import java.util.Set;
30+
import java.util.concurrent.ThreadLocalRandom;
31+
import java.util.function.LongFunction;
32+
import java.util.stream.Collectors;
33+
import org.apache.hadoop.hdds.StringUtils;
34+
import org.apache.hadoop.hdds.scm.container.ContainerID;
35+
import org.apache.hadoop.hdds.utils.db.cache.TableCache;
36+
import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
37+
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
38+
import org.apache.ratis.util.UncheckedAutoCloseable;
39+
import org.junit.jupiter.api.AfterEach;
40+
import org.junit.jupiter.api.BeforeEach;
41+
import org.junit.jupiter.api.Test;
42+
import org.junit.jupiter.api.io.TempDir;
43+
import org.rocksdb.RocksDB;
44+
45+
/**
46+
* Tests for RocksDBTable Store.
47+
*/
48+
public class TestTypedTable {
49+
private final List<String> families = Arrays.asList(StringUtils.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
50+
"First", "Second");
51+
52+
private RDBStore rdb;
53+
private final List<UncheckedAutoCloseable> closeables = new ArrayList<>();
54+
55+
static TableConfig newTableConfig(String name, List<UncheckedAutoCloseable> closeables) {
56+
final ManagedColumnFamilyOptions option = new ManagedColumnFamilyOptions();
57+
closeables.add(option::close);
58+
return new TableConfig(name, option);
59+
}
60+
61+
@BeforeEach
62+
public void setUp(@TempDir File tempDir) throws Exception {
63+
CodecBuffer.enableLeakDetection();
64+
65+
final Set<TableConfig> configSet = families.stream()
66+
.map(name -> newTableConfig(name, closeables))
67+
.collect(Collectors.toSet());
68+
final ManagedDBOptions options = TestRDBStore.newManagedDBOptions();
69+
closeables.add(options::close);
70+
rdb = TestRDBStore.newRDBStore(tempDir, options, configSet);
71+
}
72+
73+
@AfterEach
74+
public void tearDown() throws Exception {
75+
rdb.close();
76+
closeables.forEach(UncheckedAutoCloseable::close);
77+
closeables.clear();
78+
CodecBuffer.assertNoLeaks();
79+
}
80+
81+
<K, V> TypedTable<K, V> newTypedTable(int index, Codec<K> keyCodec, Codec<V> valueCodec) throws IOException {
82+
final RDBTable rawTable = rdb.getTable(families.get(index));
83+
return new TypedTable<>(rawTable, keyCodec, valueCodec, TableCache.CacheType.PARTIAL_CACHE);
84+
}
85+
86+
static <V> V put(Map<Long, V> map, long key, LongFunction<V> constructor) {
87+
return map.put(key, constructor.apply(key));
88+
}
89+
90+
static <V> Map<Long, V> newMap(LongFunction<V> constructor) {
91+
final Map<Long, V> map = new HashMap<>();
92+
for (long n = 1; n > 0; n <<= 1) {
93+
put(map, n, constructor);
94+
put(map, n - 1, constructor);
95+
put(map, n + 1, constructor);
96+
}
97+
put(map, Long.MAX_VALUE, constructor);
98+
for (int i = 0; i < 1000; i++) {
99+
final long key = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE) + 1;
100+
put(map, key, constructor);
101+
}
102+
return map;
103+
}
104+
105+
@Test
106+
public void testContainerIDvsLong() throws Exception {
107+
final Map<Long, ContainerID> keys = newMap(ContainerID::valueOf);
108+
109+
// Table 1: ContainerID -> String
110+
// Table 2: Long -> String
111+
try (TypedTable<ContainerID, String> idTable = newTypedTable(
112+
1, ContainerID.getCodec(), StringCodec.get());
113+
TypedTable<Long, String> longTable = newTypedTable(
114+
2, LongCodec.get(), StringCodec.get())) {
115+
116+
for (Map.Entry<Long, ContainerID> e : keys.entrySet()) {
117+
final long n = e.getKey();
118+
final ContainerID id = e.getValue();
119+
final String value = id.toString();
120+
// put the same value to both tables
121+
idTable.put(id, value);
122+
longTable.put(n, value);
123+
}
124+
}
125+
126+
// Reopen tables with different key types
127+
128+
// Table 1: Long -> String
129+
// Table 2: ContainerID -> String
130+
try (TypedTable<ContainerID, String> idTable = newTypedTable(
131+
2, ContainerID.getCodec(), StringCodec.get());
132+
TypedTable<Long, String> longTable = newTypedTable(
133+
1, LongCodec.get(), StringCodec.get())) {
134+
135+
for (Map.Entry<Long, ContainerID> e : keys.entrySet()) {
136+
final long n = e.getKey();
137+
final ContainerID id = e.getValue();
138+
final String expected = id.toString();
139+
// Read the value using a different key type
140+
final String idValue = idTable.get(id);
141+
assertEquals(expected, idValue);
142+
final String longValue = longTable.get(n);
143+
assertEquals(expected, longValue);
144+
}
145+
}
146+
}
147+
}

0 commit comments

Comments
 (0)