Skip to content

Instantly share code, notes, and snippets.

@oza
Created January 21, 2014 09:03
Show Gist options
  • Save oza/8536682 to your computer and use it in GitHub Desktop.
Save oza/8536682 to your computer and use it in GitHub Desktop.
TestNamenodeRetryCacheMetrics
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.OutputStream;
import org.apache.hadoop.hdfs.DFSClient;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.ipc.metrics.RetryCacheMetrics;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.ipc.ClientId;
import org.apache.hadoop.ipc.RPC.RpcKind;
import org.apache.hadoop.ipc.RetryCache.CacheEntry;
import org.apache.hadoop.ipc.RpcConstants;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.LightWeightCache;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/**
* Tests for ensuring the namenode retry cache metrics works correctly for
* non-idempotent requests.
*
* Retry cache works based on tracking previously received request based on the
* ClientId and CallId received in RPC requests and storing the response. The
* response is replayed on retry when the same request is received again.
*
*/
public class TestNamenodeRetryCacheMetrics {
private MiniDFSCluster cluster;
private FSNamesystem namesystem;
private PermissionStatus perm = new PermissionStatus(
"TestNamenodeRetryCache", null, FsPermission.getDefault());
private DistributedFileSystem filesystem;
private int namenodeId = 0;
private Configuration conf;
private RetryCacheMetrics metrics;
private DFSClient client;
/** Start a cluster */
@Before
public void setup() throws Exception {
conf = new HdfsConfiguration();
conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 1000);
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0)
.build();
cluster.waitActive();
cluster.transitionToActive(namenodeId);
namesystem = cluster.getNamesystem(namenodeId);
filesystem = cluster.getFileSystem(namenodeId);
metrics = namesystem.getRetryCache().getMetrics();
filesystem.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
}
/** Cleanup after the test
* @throws IOException
* @throws UnresolvedLinkException
* @throws SafeModeException
* @throws AccessControlException */
@After
public void cleanup() throws IOException {
filesystem.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
}
@Test
public void testHoge() throws IOException {
trySaveNamespace();
trySaveNamespace();
trySaveNamespace();
trySaveNamespace();
final long cacheHit = 0;
final long cacheCleared = 0;
final long cacheUpdated = 0;
checkMetrics(cacheHit, cacheCleared, cacheUpdated);
printMetrics();
}
private void printMetrics() {
//MetricsRecordBuilder rb = getMetrics("RetryCache/NameNodeRetryCache");
//metrics.incrCacheCleared();
System.out.println("Testing metrics!");
System.out.println(metrics.getCacheHit());
System.out.println(metrics.getCacheCleared());
System.out.println(metrics.getCacheUpdated());
}
private void checkMetrics(long hit, long cleared, long updated) {
MetricsRecordBuilder rb = getMetrics("RetryCache/NameNodeRetryCache");
assertCounter("CacheHit", hit, rb);
assertCounter("CacheCleared", cleared, rb);
assertCounter("CacheUpdated", updated, rb);
}
private void closeIfNotNull(OutputStream out) throws IOException {
if (out != null) {
out.close();
}
}
private void trySaveNamespace() {
try {
filesystem.saveNamespace();
} catch (IOException ioe) {
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment