Skip to content

Instantly share code, notes, and snippets.

#!/usr/bin/env bash
set -e
# Base and Shark directories
BASE_DIR=${XDG_CONFIG_HOME:-$HOME}
SHARK_DIR=${SHARK_DIR:-"$BASE_DIR/.shark"}
# Detect shell and choose profile
case $SHELL in
*/zsh) PROFILE=${ZDOTDIR:-"$HOME"}/.zshenv ;;
@evmcheb
evmcheb / decrypt.sol
Created April 1, 2022 05:53
Crabada decompiled contracts
# Palkeoramix decompiler.
def storage:
owner is addr at storage 1
unknown56e2039a is mapping of uint256 at storage 2
unknown284dd0e2 is mapping of uint128 at storage 3
unknown09f13758 is uint256 at storage 4
unknowncda84ae8 is uint256 at storage 5
unknownd99a2d0a is uint8 at storage 6
unknown9e19916dAddress is addr at storage 6 offset 8
!git clone https://github.com/AlexeyAB/darknet/
% cd darknet
!sed -i 's/OPENCV=0/OPENCV=1/g' Makefile
!sed -i 's/GPU=0/GPU=1/g' Makefile
!sed -i 's/CUDNN=0/CUDNN=1/g' Makefile
!sed -i 's/CUDNN_HALF=0/CUDNN_HALF=1/g' Makefile
!sed -i 's/LIBSO=0/LIBSO=1/g' Makefile
!apt update
!apt-get install libopencv-dev python-opencv
!make clean
import os
import numpy as np
from tqdm import tqdm
from pymongo import MongoClient
bookies = {
"44":"Betfair",
"16":"bet365",
"18":"Pinnacle",
"5":"Unibet"
}
from keras import backend as K
from keras.models import Model
from keras.models import Sequential
from keras.layers import Input, Dense, Dropout, Conv2D, Flatten, Activation, concatenate
from keras.optimizers import Adam
c = 0.6
def decorrelation_loss(neuron):
def loss(y_actual, y_predicted):
return K.mean(
# A snippet of how a comment is converted into a video
from moviepy.editor import *
audio = AudioFileClip(f"{tid}/{comment.name}/{num}.mp3")
image = (ImageClip(f"{tid}/{comment.name}/{part.name}")
.set_duration(audio.duration) # using the fx library to effortlessly transform the video clip
.resize(1.2).on_color(size=DIM, color=dark_grey)
.set_fps(5)
.set_pos((0.5, 0.5), relative=True)
.set_audio(audio))
# Reads out a script using google text to speech api
from google.cloud import texttospeech
import sys
def work(tid):
with open(f"{tid}/script.txt", encoding='utf-8') as f:
lines = f.read().strip().split("\n\n")
with open("resources/rules.txt") as f:
# each line is "badword, replacement"
bad_words = [(x.split()[0], x.split()[1]) for x in f.readlines()]
# Grabs the best comments and splits into parts
import praw, re
r = praw.Reddit()
thread = r.submission(url=URL)
thread.comment_sort = 'best' # get the best comments from the thread
for comment in thread.comments:
punctuation_reg = re.compile('(?<=[.!,?:;-]) +')
split_parts = punctuation_reg.split(comment.body)
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, Conv2D, MaxPooling2D, GlobalAveragePooling2D
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(100, 100, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu'))
import os
samples = []
labels = []
images_folder = "images/"
for image in os.listdir(images_folder):
samples.append(img_to_array(load_img(images_folder+image, target_size=(100, 100))))
if "normal" in image:
labels.append((0))
else:
labels.append((1))