###Web Browser:
###Web Browsing:
- Flash*
- Java* (Known security issues, install at your own risk.)
- Shockwave*
| #!/usr/bin/env python | |
| # Copyright (c) 2013 Nikola Kovacevic <[email protected]> | |
| # Permission is hereby granted, free of charge, to any person obtaining a copy | |
| # of this software and associated documentation files (the "Software"), to deal | |
| # in the Software without restriction, including without limitation the rights | |
| # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
| # copies of the Software, and to permit persons to whom the Software is | |
| # furnished to do so, subject to the following conditions: |
| #!/usr/bin/python | |
| # | |
| # Script Name: getRedditJSONSubmissionData.py | |
| # Usage: ./getRedditJSONSubmissionData.py > redditData.json | |
| # ---------------------------------------------------------------------------- | |
| # This script will average one request every two seconds. | |
| # If the servers return data faster, you might | |
| # need to change the sleep time to avoid going over the API limits. | |
| # Also, make sure you change the settings in your Reddit account | |
| # to get 100 objects at a time. You can also use the URL variable "limit=100" |
| import random | |
| number_to_guess=0 | |
| number_of_guesses=0 | |
| def new_game(): | |
| global number_to_guess, number_of_guesses | |
| user_response=raw_input("Start a new game? Y/N > ") | |
| if user_response.lower() in ["no","n"]: | |
| print "Bye!" |
| # -*- coding: utf-8 -*- | |
| import praw | |
| flair_dict = {"male_old_css_class": {"class": "male_new_css_class", | |
| "text": u"♂"}, | |
| "female_old_css_class": {"class": "female_new_css_class", | |
| "text": u"♀"}, } | |
| flair_mapping = [] |
| import hashlib | |
| valid_types=[ 'md5', 'sha1', 'ripemd160', 'sha224', 'sha256', 'sha384', 'sha512'] | |
| hash_type=None | |
| while hash_type!="exit": | |
| hash_type=raw_input("Enter hash type -- valid are: {}\n>>>".format( | |
| ', '.join(valid_types))).lower() | |
| if hash_type in valid_types: | |
| hashed=hashlib.new(hash_type) |
| import json | |
| import urllib2 | |
| from random import randint | |
| req = urllib2.Request("http://www.reddit.com/comments/1oa44z.json",None, {'User-Agent': "wub_wub's script",}) | |
| json_response=json.loads(urllib2.urlopen(req).read()) | |
| all_comments=json_response[1]["data"]["children"] | |
| valid_comments={} | |
| winners=0 | |
| given_games=[] | |
| for comment in all_comments: |
| import sqlite3 | |
| import time | |
| import json | |
| import urllib2 | |
| def get_submissions(): | |
| url="http://www.reddit.com/r/all/new/.json" #URL of the page that we want to fetch | |
| headers = { 'User-Agent' : 'fetching new submissions script' } # our script "identifier" | |
| req = urllib2.Request(url, None, headers) # We create new request here to open the url with above set headers | |
| data = urllib2.urlopen(req).read() # Open url and make data variable equal whatever we get |
| from plugin import CommandPlugin | |
| class test(CommandPlugin): | |
| self.add_commands({"test":self.test()}) | |
| def test(self, user, channel, parameters): | |
| print "executing test command" | |
| self.bot.msg(channel, "Test") |
| removed_counter=0 | |
| for x in range(0,len(to_update),100): | |
| items=to_update[x:x+100] | |
| items_url=api_by_id.format(",".join(items)) | |
| request_time=time() | |
| print "[FILTERING] Starting request...",request_time | |
| # print "Request url", items_url | |
| try: | |
| r=requests.get(items_url, headers=headers, timeout=20) | |
| except: |