Skip to content

Commit

Permalink
HDDS-1758. Add replication and key deletion tests to MiniOzoneChaosCl…
Browse files Browse the repository at this point in the history
…uster. Contributed by Mukul Kumar Singh. (apache#1049)
  • Loading branch information
mukul1987 authored and bshashikant committed Jul 10, 2019
1 parent 6e5b5a9 commit bf94adf
Show file tree
Hide file tree
Showing 4 changed files with 58 additions and 9 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
Expand Down Expand Up @@ -55,7 +56,8 @@ public class MiniOzoneChaosCluster extends MiniOzoneClusterImpl {
private ScheduledFuture scheduledFuture;

private enum FailureMode {
NODES
NODES_RESTART,
NODES_SHUTDOWN
}

public MiniOzoneChaosCluster(OzoneConfiguration conf,
Expand All @@ -81,21 +83,55 @@ private boolean isFastRestart() {
return RandomUtils.nextBoolean();
}

// Should the selected node be stopped or started.
private boolean shouldStop() {
return RandomUtils.nextBoolean();
}

// Get the datanode index of the datanode to fail.
private int getNodeToFail() {
return RandomUtils.nextInt() % numDatanodes;
}

private void failNodes() {
private void restartNodes() {
final int numNodesToFail = getNumberOfNodesToFail();
LOG.info("Will restart {} nodes to simulate failure", numNodesToFail);
for (int i = 0; i < numNodesToFail; i++) {
boolean failureMode = isFastRestart();
int failedNodeIndex = getNodeToFail();
String failString = failureMode ? "Fast" : "Slow";
DatanodeDetails dn =
getHddsDatanodes().get(failedNodeIndex).getDatanodeDetails();
try {
LOG.info("Restarting DataNodeIndex {}", failedNodeIndex);
LOG.info("{} Restarting DataNode: {}", failString, dn.getUuid());
restartHddsDatanode(failedNodeIndex, failureMode);
LOG.info("Completed restarting DataNodeIndex {}", failedNodeIndex);
LOG.info("{} Completed restarting Datanode: {}", failString,
dn.getUuid());
} catch (Exception e) {

}
}
}

private void shutdownNodes() {
final int numNodesToFail = getNumberOfNodesToFail();
LOG.info("Will shutdown {} nodes to simulate failure", numNodesToFail);
for (int i = 0; i < numNodesToFail; i++) {
boolean shouldStop = shouldStop();
int failedNodeIndex = getNodeToFail();
String stopString = shouldStop ? "Stopping" : "Starting";
DatanodeDetails dn =
getHddsDatanodes().get(failedNodeIndex).getDatanodeDetails();
try {
LOG.info("{} DataNode {}", stopString, dn.getUuid());

if (shouldStop) {
shutdownHddsDatanode(failedNodeIndex);
} else {
restartHddsDatanode(failedNodeIndex, true);
}
LOG.info("Completed {} DataNode {}", stopString, dn.getUuid());

} catch (Exception e) {

}
Expand All @@ -111,8 +147,11 @@ private FailureMode getFailureMode() {
private void fail() {
FailureMode mode = getFailureMode();
switch (mode) {
case NODES:
failNodes();
case NODES_RESTART:
restartNodes();
break;
case NODES_SHUTDOWN:
shutdownNodes();
break;

default:
Expand Down Expand Up @@ -190,7 +229,9 @@ void initializeConfiguration() throws IOException {
1, StorageUnit.MB);
conf.setTimeDuration(ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT, 1000,
TimeUnit.MILLISECONDS);
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL, 5,
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL, 10,
TimeUnit.SECONDS);
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL, 20,
TimeUnit.SECONDS);
conf.setTimeDuration(HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL, 1,
TimeUnit.SECONDS);
Expand All @@ -204,6 +245,8 @@ void initializeConfiguration() throws IOException {
conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, 1,
TimeUnit.SECONDS);
conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 8);
conf.setInt("hdds.scm.replication.thread.interval", 10 * 1000);
conf.setInt("hdds.scm.replication.event.timeout", 20 * 1000);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,11 @@ private void load(long runTimeMillis) {
break;
}

try {
bucket.deleteKey(keyName);
} catch (Exception e) {
LOG.error("LOADGEN: Unable to delete key:{}", keyName, e);
}
}
// This will terminate other threads too.
isWriteThreadRunning.set(false);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ public class TestMiniChaosOzoneCluster implements Runnable {

@Option(names = {"-i", "--failureInterval"},
description = "time between failure events in seconds")
private static int failureInterval = 5; // 5 second period between failures.
private static int failureInterval = 300; // 5 second period between failures.

private static MiniOzoneChaosCluster cluster;
private static MiniOzoneLoadGenerator loadGenerator;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -711,7 +711,8 @@ public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException {
k.setPipeline(cp.getPipeline());
}
} catch (IOException e) {
LOG.debug("Unable to update pipeline for container");
LOG.error("Unable to update pipeline for container:{}",
k.getContainerID());
}
}
});
Expand Down

0 comments on commit bf94adf

Please sign in to comment.