Skip to content

Instantly share code, notes, and snippets.

import diff_match_patch
import re
from pathlib import Path
from typing import List, Tuple
# --- Configuration Variables (Text Colors) ---
TEXT1_DELETION_COLOR = "#ff0000" # Red
TEXT2_INSERTION_COLOR = "#008000" # Green
@freelze
freelze / chrome.css
Last active January 17, 2020 11:51
Firefox 72.0
/* Firefox 72.0 */
/* You need to modify about:config, set the option toolkit.legacyUserProfileCustomizations.stylesheets to True.*/
/*@namespace url("http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul"); set default namespace to XUL */
/* https://www.reddit.com/r/FirefoxCSS/comments/elczdc/correctly_hide_url_bar_in_firefox_720_using/fdiwmv7/?utm_source=share&utm_medium=web2x */
#navigator-toolbox {z-index: 1;}
.tab-content[selected="true"] {
background: rgba(65, 85, 145, 0.4) !important;
/* Firefox 70.0.1 */
@namespace url("http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul"); /* set default namespace to XUL */
/*
#TabsToolbar {
visibility: collapse !important;
}
#titlebar-buttonbox{height: var(--tab-min-height) !important;}
#titlebar{ margin-bottom: calc(-2px - var(--tab-min-height) ) !important;}
*/
@freelze
freelze / plurk_crawler_MP.py
Last active September 5, 2018 03:29
plurk media crawler (without response media), using multiprocessing speed up the crawling.
#!/usr/bin/python
# -*- coding:utf-8 -*-
"""
Compare MultiProcessing and Normal Way
Case:crawl 84 images in 81 plurk posts
(sec)
MultiProcessing :
@freelze
freelze / plurk_crawler.py
Last active September 4, 2018 11:56
slow plurk media(jpg,png,gif) crawler
#!/usr/bin/python
# -*- coding:utf-8 -*-
# API: https://github.com/clsung/plurk-oauth
# You can retrieve your app keys via the test tool at http://www.plurk.com/PlurkApp/
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
ACCESS_TOKEN = ''
ACCESS_TOKEN_SECRET = ''
// if use global variable, it will call create() three times.
var sheetID = create() // 84行
/* 目前做到的是:利用create()新增一個Google試算表,並取得sheetID
* 因為我有3個Function(爬流量,重置已發訊息量,刪除特定Trigger)會使用到sheetID
* 但是執行主程式Start()後,
* 會產生多個sheets
* 是因為sheetID是global variable,所以create()就call了3次(?)
* 請問有什麼方法可以存下sheetID,並給其他function使用?
*/
@freelze
freelze / YzudormDataflowMonitor.gs
Last active June 20, 2018 06:53
元智大學宿舍網路流量提醒。(當你的流量超過limitedDataflow時,會發出LINE通知)
// reference:https://stackoverflow.com/questions/21621019/google-apps-script-login-to-website-with-http-request, https://gist.github.com/erajanraja24/02279e405e28311f220f557156363d7b
// Need to insert a library(Resources -> Library):M1lugvAXKKtUxn_vdAG9JZleS6DrsjUUV
var student_id = '帳號';
var password = '密碼';
var limitedDataflow = 0; // 當達到多少流量時,發出LINE的通知
var LineNotifyToken = "你的LINE Notify Token"
function lineNotify(token, msg){
url = "https://notify-api.line.me/api/notify"
headers = {
@freelze
freelze / MangaReminder.gs
Last active June 19, 2018 14:36
Scrape mangakakalot website , telegram you when the mangas update.
// reference:https://github.com/ocordova/gas-telegram-bot , https://github.com/we684123/Telegram_bot_example , http://fu7771.blogspot.com/2017/08/google-script-telegram-bot.html
// https://api.telegram.org/botKEY/setWebhook?url=https://...
var id = ""
var key = ""
var sheetID = ""
function doPost(e) {
var update = JSON.parse(e.postData.contents);
// Make sure this is update is a type message
if (update.hasOwnProperty('message')) {
var msg = update.message;
@freelze
freelze / YZU_Dorm_Dataflow_Reminder_scheduler.py
Last active June 19, 2018 15:38
定時爬宿網流量,若超過一定值將發出Line-Notify的提醒
# You need to change 4 values: studentID, password, LINE_TOKEN , DataFlow
# $pip install schedule
import requests
from bs4 import BeautifulSoup
from lxml import html
import re
import schedule
import time
def job():
@freelze
freelze / YZU_dorm_dataFlow_Reminder(Selenium).py
Last active June 19, 2018 09:08
定時爬宿網流量,若超過一定值將發出Line-Notify的提醒(使用Selenium)
# You Need to change 5 values: Chrome location, studentID, password, LINE_TOKEN , DataFlow
# Put chromedriver.exe in the same location with this python program.
# Download chromedriver.exe: http://chromedriver.chromium.org/downloads
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import schedule # Python job scheduling for humans.
import time