Each of these commands will run an ad hoc http static server in your current (or specified) directory, available at http://localhost:8000. Use this power wisely.
$ python -m SimpleHTTPServer 8000
#!/bin/bash | |
#Usage: ./mediacru.sh $(cat urls.txt) | |
echo -e "\e[91mMediaCrush Rehoster\e[0m" | |
for f in "$@"; do | |
echo -ne "\e[34mUploading:\e[0m" $f | |
url="https://mediacru.sh/api/upload/url" | |
out=$(curl -s -F "url=$f" "$url") | |
hash=$(echo $out |sed -nre 's#.*"hash": "([^"]+)".*#\1#p') |
Each of these commands will run an ad hoc http static server in your current (or specified) directory, available at http://localhost:8000. Use this power wisely.
$ python -m SimpleHTTPServer 8000
#!/bin/bash | |
id="$1" | |
host="http://vidble.com/album/" | |
mkdir "$id" | |
wget -nv -O - "$host$id" |grep -e "<img src='" |grep -Eo '/[^" ]+(jpg|jpeg|JPG|GIF|gif|PNG|png)' |sed -e 's/_med//' -e 's@^@http://vidble.com@' > $id/links.txt | |
sed -i '/logo/d' $id/links.txt | |
wget -i $id/links.txt -P $id |
#!/bin/bash | |
# Rename the ouput html file from redditPostArchiver with the reddit thread title. | |
# https://github.com/sJohnsonStoever/redditPostArchiver | |
for f in *.html; | |
do | |
title=$( awk 'BEGIN{IGNORECASE=1;FS="<title>|</title>";RS=EOF} {print $2}' "$f" ) | |
mv -i "$f" "${title//[^a-zA-Z0-9\._\- ]}_$f" |
import random | |
import subprocess | |
import urllib.request | |
import os | |
import sys | |
import requests | |
import threading | |
import timeit | |
from multiprocessing.pool import ThreadPool |
#!/bin/sh | |
# Quickly delete all traces of a user from the database and filesystem | |
# usage: ./deluser.sh username | |
USER=$1 | |
USERID=`sqlite3 ../database.db "select id from users where username = '${USER}'"` | |
for table in albums comments images posts; do | |
sqlite3 ../database.db "delete from ${table} where userid = ${USERID}" | |
done |
#!/bin/bash | |
# USAGE ./bam.sh URL | |
url=$1 | |
out=$(echo $url |sed 's/^.\{,20\}//') | |
get=$(curl -s $url |grep ',"url":"' |sed -e 's/^.\{,241\}//' -e 's/\"','"auto.*//' -e 's/\\//g') | |
wget -c $get -O $out.mp4 |
#!/usr/bin/python | |
# ------------------------------ | |
# Script to parse reddit json | |
# Usage $cat *.json | redditJSON.py | |
# ------------------------------ | |
import sys | |
reload(sys) | |
sys.setdefaultencoding('utf8') | |
import json |
#!/usr/bin/env python | |
# | |
# Copyright (c) 2010 Giorgos Keramidas. | |
# All rights reserved. | |
# | |
# Redistribution and use in source and binary forms, with or without | |
# modification, are permitted provided that the following conditions | |
# are met: | |
# 1. Redistributions of source code must retain the above copyright | |
# notice, this list of conditions and the following disclaimer. |
import sqlite3 | |
import time | |
import json | |
import urllib2 | |
def get_submissions(): | |
url="http://www.reddit.com/r/all/new/.json" #URL of the page that we want to fetch | |
headers = { 'User-Agent' : 'fetching new submissions script' } # our script "identifier" | |
req = urllib2.Request(url, None, headers) # We create new request here to open the url with above set headers | |
data = urllib2.urlopen(req).read() # Open url and make data variable equal whatever we get |