Skip to content

Instantly share code, notes, and snippets.

@mcleary
Last active May 24, 2016 01:00
Show Gist options
  • Save mcleary/ab5a24e9ce18d3fd5f8356be1f0d5546 to your computer and use it in GitHub Desktop.
Save mcleary/ab5a24e9ce18d3fd5f8356be1f0d5546 to your computer and use it in GitHub Desktop.
Naive Shortest Sub-Segment
#include <cmath>
#include <cstdio>
#include <vector>
#include <deque>
#include <set>
#include <unordered_set>
#include <string>
#include <iostream>
#include <iterator>
#include <sstream>
#include <fstream>
#include <algorithm>
using namespace std;
string to_lower(string input)
{
transform(input.begin(), input.end(), input.begin(), ::tolower);
return input;
}
string remove_special_chars(string input)
{
input.erase(remove_if(input.begin(), input.end(), [](char c) { return c == '.'; }), input.end());
return input;
}
int main()
{
// maximo de 14 caracteres por palavra
string paragraph;
getline(cin, paragraph);
paragraph = remove_special_chars(paragraph);
istringstream paragraph_stream(paragraph);
int word_count;
cin >> word_count;
deque<string> tokens{ istream_iterator<string>{paragraph_stream}, istream_iterator<string>{} };
auto tokens_bkp = tokens;
int tokens_count = tokens.size();
vector<string> words;
for (int i = 0; i < word_count; ++i)
{
string word;
cin >> word;
words.push_back(word);
}
bool found = false;
std::deque<string> subsegment_tokens;
while (word_count < tokens_count)
{
while (tokens.size() > word_count)
{
//auto it = find_first_of(tokens.begin(), tokens.end(), words.begin(), words.end(), [](string a, string b) { return to_lower(a) == to_lower(b); });
auto it = tokens.begin();
auto it_bkp = it;
if (it == tokens.end())
{
break;
}
if (subsegment_tokens.empty())
{
for (int i = 0; i < word_count; ++i)
{
if (it == tokens.end())
{
break;
}
subsegment_tokens.push_back(to_lower(*it));
++it;
}
}
else
{
subsegment_tokens.pop_front();
auto next_token_it = it + word_count - 1;
auto next_token = to_lower(remove_special_chars(*next_token_it));
subsegment_tokens.push_back(next_token);
}
bool all_words_in_subsegment = true;
for (const auto& word : words)
{
//all_words_in_subsegment &= subsegment_tokens.find(word) != subsegment_tokens.end();
all_words_in_subsegment &= find(subsegment_tokens.begin(), subsegment_tokens.end(), word) != subsegment_tokens.end();
}
if (all_words_in_subsegment)
{
for (int i = 0; i < word_count; ++i)
{
cout << remove_special_chars(*it_bkp) << " ";
++it_bkp;
}
cout << endl;
found = true;
break;
}
else
{
tokens.pop_front();
}
cout << tokens.size() << endl;
}
if (found)
{
break;
}
tokens = tokens_bkp;
subsegment_tokens.clear();
word_count++;
cout << word_count << endl;
}
if (!found)
{
cout << "NO SUBSEGMENT FOUND" << endl;
}
return 0;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment