|
27 | 27 | import org.apache.hadoop.hdfs.DFSConfigKeys;
|
28 | 28 | import org.apache.hadoop.hdfs.DFSTestUtil;
|
29 | 29 | import org.apache.hadoop.hdfs.DFSUtil;
|
| 30 | +import org.apache.hadoop.hdfs.DFSUtilClient; |
30 | 31 | import org.apache.hadoop.hdfs.DistributedFileSystem;
|
31 | 32 | import org.apache.hadoop.hdfs.HdfsConfiguration;
|
32 | 33 | import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|
46 | 47 | import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
47 | 48 | import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.LazyPersistTestCase;
|
48 | 49 | import org.apache.hadoop.io.IOUtils;
|
| 50 | +import org.apache.hadoop.metrics2.MetricsSystem; |
| 51 | +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; |
49 | 52 | import org.apache.hadoop.test.GenericTestUtils;
|
50 | 53 | import org.junit.After;
|
51 | 54 | import org.junit.Test;
|
@@ -764,6 +767,63 @@ public void testBalancerWithLimitOverUtilizedNum() throws Exception {
|
764 | 767 | }
|
765 | 768 | }
|
766 | 769 |
|
| 770 | + @Test(timeout = 60000) |
| 771 | + public void testBalancerMetricsDuplicate() throws Exception { |
| 772 | + final Configuration conf = new HdfsConfiguration(); |
| 773 | + // Init the config (block size to 100) |
| 774 | + initConf(conf); |
| 775 | + final long totalCapacity = 1000L; |
| 776 | + final int numOfOverUtilizedDn = 1; |
| 777 | + final int numOfUnderUtilizedDn = 2; |
| 778 | + final int totalNumOfDn = numOfOverUtilizedDn + numOfUnderUtilizedDn; |
| 779 | + final long[] capacityArray = new long[totalNumOfDn]; |
| 780 | + Arrays.fill(capacityArray, totalCapacity); |
| 781 | + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) |
| 782 | + .numDataNodes(totalNumOfDn) |
| 783 | + .simulatedCapacities(capacityArray) |
| 784 | + .build()) { |
| 785 | + cluster.setDataNodesDead(); |
| 786 | + List<DataNode> dataNodes = cluster.getDataNodes(); |
| 787 | + |
| 788 | + DataNodeTestUtils.triggerHeartbeat(dataNodes.get(0)); |
| 789 | + DataNodeTestUtils.triggerBlockReport(dataNodes.get(0)); |
| 790 | + // Create nodes with: 100% |
| 791 | + TestBalancer.createFile(cluster, new Path("test_big" + 0), 1000, (short) 1, 0); |
| 792 | + cluster.setDataNodesDead(); |
| 793 | + |
| 794 | + // Two UnderUtilized in the cluster, execute at least twice: b.runOneIteration() |
| 795 | + for (int i = 1; i <= numOfUnderUtilizedDn; i++) { |
| 796 | + DataNodeTestUtils.triggerHeartbeat(dataNodes.get(i)); |
| 797 | + DataNodeTestUtils.triggerBlockReport(dataNodes.get(i)); |
| 798 | + // Create nodes with: 0% |
| 799 | + TestBalancer.createFile(cluster, new Path("test_small" + i), 0, (short) 1, 0); |
| 800 | + cluster.setDataNodesDead(); |
| 801 | + } |
| 802 | + |
| 803 | + cluster.triggerDeletionReports(); |
| 804 | + cluster.triggerBlockReports(); |
| 805 | + cluster.triggerHeartbeats(); |
| 806 | + |
| 807 | + Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf); |
| 808 | + Collection<String> nsIds = DFSUtilClient.getNameServiceIds(conf); |
| 809 | + assertEquals(1, namenodes.size()); |
| 810 | + |
| 811 | + // Throw an error when we double-initialize BalancerMetrics |
| 812 | + DefaultMetricsSystem.setMiniClusterMode(false); |
| 813 | + MetricsSystem instance = DefaultMetricsSystem.instance(); |
| 814 | + // Avoid the impact of cluster metric, remove cluster JvmMetrics |
| 815 | + instance.unregisterSource("JvmMetrics"); |
| 816 | + |
| 817 | + final BalancerParameters balancerParameters = Balancer.Cli.parse(new String[] { |
| 818 | + "-policy", BalancingPolicy.Node.INSTANCE.getName(), |
| 819 | + "-threshold", "10", |
| 820 | + }); |
| 821 | + int r = Balancer.run(namenodes, nsIds, balancerParameters, conf); |
| 822 | + assertEquals(ExitStatus.SUCCESS.getExitCode(), r); |
| 823 | + DefaultMetricsSystem.setMiniClusterMode(true); |
| 824 | + } |
| 825 | + } |
| 826 | + |
767 | 827 | @Test(timeout = 100000)
|
768 | 828 | public void testMaxIterationTime() throws Exception {
|
769 | 829 | final Configuration conf = new HdfsConfiguration();
|
|
0 commit comments