Skip to content

Instantly share code, notes, and snippets.

@zechtz
Created March 2, 2017 12:11
Show Gist options
  • Save zechtz/de1ea2786e2be40ffbe084a03ef63e00 to your computer and use it in GitHub Desktop.
Save zechtz/de1ea2786e2be40ffbe084a03ef63e00 to your computer and use it in GitHub Desktop.
simple page scrapping utility examle
require_relative 'boot'
require 'rails/all'
# Require the gems listed in Gemfile, including any gems
# you've limited to :test, :development, or :production.
Bundler.require(*Rails.groups)
module AntiPlag
class Application < Rails::Application
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
config.autoload_paths << "#{Rails.root}/lib"
end
end
class Plag
include Anemon
@anemon = Anemon::Scrapper.new
def self.scrap(url)
@anemon.scrap(url)
end
end
module Anemon
class Scrapper
require 'anemone'
require 'nokogiri'
require 'open-uri'
def scrap(url)
filename = url.gsub("http://", "").split(".").join("_").strip + ".txt"
# only scrap pages you havent scrapped yet
if File.file?(filename)
puts "This page has already been scrapped"
else
puts "scrapping the web for data..."
data = []
crawl(url).each do |urli|
html_data = open(urli).read
nokogiri_object = Nokogiri::HTML(html_data)
elements = nokogiri_object.xpath("//p")
elements.each do |element|
data = element.text
end
end
write_to_file(url, data)
store_visited_link(url)
end
end
def uploaded_docs(file_name)
File.readlines(file_name)
end
def comparative(file_name)
@scrap_lines = File.readlines(file_name)
@upload_docs.each do |upload_doc_line|
if @scrap_lines.include?(upload_doc_line)
if File.file?("report.txt")
File.open("report.txt" ,"w"){ |file| file.write(upload_doc_line)}
else
@reportfile = File.new("report.txt","w+")
File.open("report.txt","w"){ |file| file.write(upload_doc_line)}
@reportfile.close
end
else
puts "No instances of plagiarism detected in the application;"
end
end
end
private
def crawl(url)
puts "crawling the web..."
urls = []
Anemone.crawl(url) do |anemone|
anemone.on_every_page do |page|
links = page.url
urls.push(links)
end
end
# returns urls as an array
urls
end
def write_to_file(url, data)
filename = url.gsub("http://", "").split(".").join("_").strip + ".txt"
#File.file?(filename) will only return true for files
#File.exists?(filename) will only return true for directories watch out
if File.file?(filename)
File.open(filename,"w"){ |file| file.write(data)}
else
scrapped_file = File.new(filename,"w+")
File.open(filename,"w"){ |file| file.write(data)}
scrapped_file.close
end
@scrap_file = File.readlines(filename)
end
def store_visited_link(url)
if File.file?("visited_links.txt")
File.open("visited_links.txt","a"){|file| file.puts(url) }
else
@visited = File.new("visited_links.txt","w")
File.open("visited_links.txt","w"){ |file| file.write(url)}
@visited.close
end
end
end
end
@zechtz
Copy link
Author

zechtz commented Mar 2, 2017

#in rails console
Plag.scrap('http://google.com')
# this will create a file google_com.txt and add your scrapped data
# it will also append http://google.com into the visited links file or create it if it doesn't exist

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment