Created
March 5, 2014 01:23
-
-
Save jayunit100/9359408 to your computer and use it in GitHub Desktop.
better?
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/* | |
* Licensed to the Apache Software Foundation (ASF) under one | |
* or more contributor license agreements. See the NOTICE file | |
* distributed with this work for additional information | |
* regarding copyright ownership. The ASF licenses this file | |
* to you under the Apache License, Version 2.0 (the | |
* "License"); you may not use this file except in compliance | |
* with the License. You may obtain a copy of the License at | |
* | |
* http://www.apache.org/licenses/LICENSE-2.0 | |
* | |
* Unless required by applicable law or agreed to in writing, software | |
* distributed under the License is distributed on an "AS IS" BASIS, | |
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
* See the License for the specific language governing permissions and | |
* limitations under the License. | |
*/ | |
package org.apache.bigtop.itest.hadoop.hcfs | |
import org.apache.hadoop.conf.Configuration | |
import org.junit.AfterClass | |
import org.junit.BeforeClass | |
import org.junit.Test | |
import static org.junit.Assert.assertEquals | |
import static org.junit.Assert.assertTrue | |
import static org.apache.bigtop.itest.LogErrorsUtils.logError | |
import org.apache.bigtop.itest.shell.Shell | |
import org.apache.commons.logging.Log | |
import org.apache.commons.logging.LogFactory | |
/** | |
* This is a "superset" of the hdfs/TestFuseDFS class. | |
* In time, we can refactor or deprecate the TestFuseDFS class since | |
* there might not be any particular need to test HDFS over FUSE specifically. | |
* After all, FUSE is an interface, and it should be tested at that level, with | |
* understanding that all distribtued file system implementations should | |
* require the | |
* same testing. | |
* | |
* These tests are complex (use lambdas and complex shell commands) | |
* and thus somewhat overcommented for the first iteration, | |
* we can clean comments up | |
* over time.*/ | |
public class TestFuseDFS { | |
private static String username = System.properties["user.name"]; | |
private static Configuration conf; | |
private static Shell sh = new Shell("/bin/bash -s"); | |
private static Shell shRoot = new Shell("/bin/bash -s", "root"); | |
private static String mount_point = System. | |
getProperty("fuse.dfs.mountpoint", "/tmp/hcfs-test"); | |
static private Log LOG = LogFactory.getLog(Shell.class) | |
/** | |
* If HCFS=GLUSTER,S3,... Then we dont do a mount option. | |
* That allows this test to work on any file system, because its not | |
* coupled to hadoop-fuse-dfs*/ | |
private static boolean isHDFS = "HDFS". | |
equals(System.getProperty("HCFS_IMPLEMENTATION", "HDFS")); | |
private static String userdir = "${mount_point}/user/${username}"; | |
private static String testdir = "${userdir}/TestFuseDFS-testDir"; | |
/** | |
* Used to have a "test file" but now we leave that to individual tests. | |
* Also see the testWrapper function to see how the base testDir is | |
* created.*/ | |
@BeforeClass | |
public static void setUp() { | |
conf = new Configuration(); | |
String fs_default_name = conf.get("fs.defaultFS"); | |
String uri = fs_default_name.substring(1); | |
shRoot.exec("umount ${mount_point}"); | |
shRoot.exec("mkdir -p ${mount_point}"); | |
if (isHDFS) { | |
LOG.info("mounting ${uri} on ${mount_point}"); | |
shRoot.exec("hadoop-fuse-dfs ${uri} ${mount_point}"); | |
} | |
logError(shRoot); | |
assertEquals("hadoop-fuse-dfs failed", 0, shRoot.getRet()); | |
} | |
/** | |
* Simple test wrapper: Send a command, and a closure to validate it. | |
* See the main impl. for details. */ | |
public void testWrapper(String testCommand, Closure validatorFn) { | |
testWrapper(null, testCommand, validatorFn); | |
} | |
/** | |
* Test Wrapper takes care of several aspects of testing the FUSE mount. | |
* 1) Does basic setup of a test dir from scratch. | |
* 2) Runs shell "setupCommand" and asserts that if passes. | |
* 3) Runs shell "testCommand". | |
* 4) Sends results of (3) to validator, runs the validator. | |
* 5) Removes the test dir so next test is pure, and no dependencies are | |
* possible. | |
* | |
* Use this function to make it easy to write declarative FUSE tests | |
* which mostly | |
* focus on logic of the test.*/ | |
public void testWrapper(String setupCommand, String testCommand, | |
Closure validatorFn) { | |
/** | |
* Note that to setup we use FUSE ops, but in future | |
* formally, better to use "hadoop fs". The FUSE operations | |
* are faster due to no JVM setup, so for the first iteration | |
* we go with them. But that makes this test somewhat dependant | |
* on working FUSE mount to begin with.*/ | |
sh.exec("mkdir -p ${testdir}"); | |
/** | |
* some tests will require a file system command to setup the test, | |
* for example making sub directories.*/ | |
if (setupCommand != null) { | |
sh.exec(setupCommand); | |
LOG.info(setupCommand + " out : " + sh.getOut()); | |
logError(sh); | |
assertEquals("setup passed ", 0, sh.getRet()); | |
} | |
/** | |
* The main test is here. */ | |
sh.exec(testCommand); | |
/** | |
* Validator lambda is called here. Groovy is smart enough | |
* it runs against the shell object to confirm that the right | |
* out/err/etc.. | |
* occured.*/ | |
validatorFn(sh); | |
/** | |
* Completely clean up the testing sub directory, this gaurantees | |
* that each unit test is self contained.*/ | |
sh.exec("rm -rf ${testdir}"); | |
} | |
@AfterClass | |
public static void tearDown() { | |
shRoot.exec("umount ${mount_point}"); | |
logError(shRoot); | |
assertEquals("FUSE-DFS mount not cleaned up", 0, shRoot.getRet()); | |
} | |
@Test | |
public void testCd() { | |
testWrapper(//The test: Change to a directory. | |
"cd ${testdir} && pwd ", | |
//The lambda: Validates via running pwd. | |
{ | |
LOG.info("After cd, pwd=" + sh.getOut()[0]); | |
assertEquals("contains '${testdir}' after change dir", true, | |
sh.getOut()[0].contains("hcfs")); | |
assertEquals("pwd exit code is 0", 0, sh.getRet()); | |
}); | |
} | |
@Test | |
public void testLs() { | |
testWrapper("touch ${testdir}/non-trivial-fn", | |
//Setup command, touch a file which we will "ls" later. | |
"ls -altrh ${testdir}", //Test command : ls the dir. | |
{ | |
LOG.info(sh.getOut()); | |
//assert that FUSE mount calculates total line (ls -altrh) | |
assertTrue("Confirm that total is shown in ls", | |
sh.getOut()[0].contains("total")); | |
//now, we expect the user name to be in the test | |
// directory, since | |
//user is the one who created the test directory. | |
assertTrue("Confirm that the a file is shown in ls", | |
sh.getOut()[1].contains("non-trivial-fn")); | |
assertEquals("ls failed", 0, sh.getRet()); | |
}); | |
} | |
@Test | |
public void testMkDir() { | |
testWrapper("mkdir ${testdir}/dir1 && cd ${testdir}/dir1 && pwd",{ | |
LOG.info(sh.getOut()); | |
//assert that FUSE mount calculates total line (ls -altrh) | |
assertTrue("Confirm that dir1 is the new working dir. ", | |
sh.getOut().toString().contains("${testdir}/dir1")); | |
assertEquals("mkdir under ${testdir} succeeded ", 0, | |
sh.getRet()); | |
} | |
); | |
} | |
@Test | |
public void testTouch() { | |
testWrapper("touch ${testdir}/file1 && ls ${testdir}", | |
{ | |
LOG.info(sh.getOut()); | |
//assert that FUSE mount calculates total line (ls -altrh) | |
assertTrue("Confirm that file1 is created/listed ", | |
sh.getOut()[0].contains("file1")); | |
assertEquals("touch ${testdir}/file1 + subsequent ls ", 0, | |
sh.getRet()); | |
}); | |
} | |
/** | |
* TODO , make multiple files and cat them all. Since files will be | |
* distributed to nodes, this is a better distributed test of the FUSE cat | |
* operation.*/ | |
@Test | |
public void testZCat() { | |
//copy this file in and cat it. | |
File f = new File("/tmp/FUSETEST_bigtop"); | |
f.write("hi_bigtop\nhi_bigtop\n"); | |
testWrapper("/bin/cp -rf /tmp/FUSETEST_bigtop ${testdir}/cf2", | |
/** | |
* Required sleep: IS HDFS FUSE Strictly consistent? | |
* Reveals HDFS-6072.*/ | |
"sleep 2 && cat ${testdir}/cf2", | |
{ | |
//contents of output stream should be "-hello bigtop-" | |
LOG.info("cat output = " + sh.getOut() + " " + sh.getErr() + | |
" " + sh.getRet()); | |
//assert that FUSE mount calculates total line (ls -altrh) | |
assertTrue("Verifying " + sh.getOut().toString() + | |
sh.getErr() + sh.getRet(), | |
sh.getOut().toString().contains("hi_bigtop")); | |
assertEquals("cat text succeeded", 0, sh.getRet()); | |
}); | |
} | |
@Test | |
public void testCp() { | |
testWrapper( | |
//setup: we make a target dir to test cp'ing with some files | |
// to copy in. | |
"mkdir ${testdir}/targetdir && touch ${testdir}/cp1 && touch " + | |
"${testdir}/cp2", | |
"/bin/cp -rf ${testdir}/cp* ${testdir}/targetdir/ ", | |
/** | |
* Main thing to validate here is that the files were copied. | |
* TODO: We should also validate the file contents, | |
* but dont want | |
* to create a double test scenario for HDFS-6027 (see above)*/ | |
{ | |
assertEquals("cp exit code = 0", 0, sh.getRet()); | |
sh.exec("ls -altrh ${testdir}/targetdir/"); | |
//assert that FUSE mount calculates total line (ls -altrh) | |
assertEquals("ls of target dir succeeded", 0, sh.getRet()); | |
assertTrue("copy of cp1", | |
sh.getOut().toString().contains("cp1")); | |
assertTrue("copy of cp2", | |
sh.getOut().toString().contains("cp2")); | |
}); | |
} | |
@Test | |
public void testMv() { | |
//test that move recursively moves stuff | |
testWrapper( | |
"mkdir -p ${testdir}/subdir1 && touch " + | |
"${testdir}/subdir1/innerfile", | |
"mv ${testdir}/subdir1 ${testdir}/subdir2", | |
{ | |
assertEquals("cp exit code = 0", 0, sh.getRet()); | |
sh.exec("ls -altrh ${testdir}/subdir2/"); | |
//assert that the inner file exists | |
assertTrue(sh.getOut().toString().contains("innerfile")); | |
//assert that original file is gone | |
sh.exec("ls -altrh ${testdir}"); | |
assertTrue(!sh.getOut().toString().contains("subdir1")); | |
}); | |
} | |
//TODO Test recursive removals | |
@Test | |
public void testRm() { | |
testWrapper("touch ${testdir}/file-removed", | |
"rm ${testdir}/file-removed", | |
{ | |
assertEquals("Remove succeeded", 0, sh.getRet()); | |
sh.exec("ls ${testdir}"); | |
assertTrue( | |
!sh.getOut().toString().contains("file-removed")); | |
}); | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment