Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1101,7 +1101,12 @@ public PipelineAck.ECN getECN() {
}
double load = ManagementFactory.getOperatingSystemMXBean()
.getSystemLoadAverage();
return load > NUM_CORES * congestionRatio ? PipelineAck.ECN.CONGESTED :
double threshold = NUM_CORES * congestionRatio;

if (load > threshold || DataNodeFaultInjector.get().mockCongestedForTest()) {
metrics.incrCongestedCount();
}
return load > threshold ? PipelineAck.ECN.CONGESTED :
PipelineAck.ECN.SUPPORTED;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -162,4 +162,8 @@ public void markSlow(String dnAddr, int[] replies) {}
* Just delay delete replica a while.
*/
public void delayDeleteReplica() {}

public boolean mockCongestedForTest() {
return false;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,8 @@ public class DataNodeMetrics {
private MutableCounterLong replaceBlockOpOnSameMount;
@Metric("Number of replaceBlock ops to another node")
private MutableCounterLong replaceBlockOpToOtherHost;
@Metric("Number of congested count")
private MutableCounterLong congestedCount;

final MetricsRegistry registry = new MetricsRegistry("datanode");
@Metric("Milliseconds spent on calling NN rpc")
Expand Down Expand Up @@ -807,4 +809,7 @@ public void incrReplaceBlockOpToOtherHost() {
replaceBlockOpToOtherHost.incr();
}

public void incrCongestedCount() {
congestedCount.incr();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,11 @@
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.apache.hadoop.util.Lists;
import org.junit.Assert;
import org.junit.Assume;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand Down Expand Up @@ -816,4 +818,31 @@ public Boolean get() {
}, 100, 10000);
}
}

@Test
public void testCongestedCount() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_PIPELINE_ECN_ENABLED, true);
MiniDFSCluster cluster = null;
DataNodeFaultInjector old = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
old = DataNodeFaultInjector.get();
DataNodeFaultInjector.set(new DataNodeFaultInjector(){
@Override
public boolean mockCongestedForTest() {
return true;
}
});
PipelineAck.ECN ecn = cluster.getDataNodes().get(0).getECN();
MetricsRecordBuilder dnMetrics = getMetrics(cluster.getDataNodes().get(0)
.getMetrics().name());
Assert.assertEquals(1L, getLongCounter("CongestedCount", dnMetrics));
} finally {
if (cluster != null) {
DataNodeFaultInjector.set(old);
cluster.shutdown();
}
}
}
}