Created
June 14, 2019 03:50
-
-
Save ariG23498/589a2e2ca224010dff053612ce9aee8c to your computer and use it in GitHub Desktop.
This code takes in a document `doc1.txt` and then spits out the frequency of each word in the document.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import java.nio.file.Files; | |
import java.nio.file.Paths; | |
import java.util.HashMap; | |
import java.nio.charset.Charset; | |
import java.util.List; | |
import java.util.StringTokenizer; | |
import java.io.IOException; | |
class Distance { | |
public static void main(String[] args) throws IOException { | |
HashMap<String, Integer> doc1 = new HashMap<String, Integer>(); | |
List<String> lines = Files.readAllLines(Paths.get("doc1.txt", ""), Charset.defaultCharset()); | |
for (String line : lines) { | |
StringTokenizer words = new StringTokenizer(line, " "); | |
while (words.hasMoreTokens()) { | |
String token = words.nextToken(); | |
if (doc1.containsKey(token)) { | |
int count = doc1.get(token); | |
count++; | |
doc1.put(token, count); | |
} else { | |
doc1.put(token, 0); | |
} | |
} | |
} | |
System.out.println(doc1.toString()); | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment