Created
April 17, 2017 21:45
-
-
Save spott/b8a44bf1dffd1eed519ba5e7628aadcd to your computer and use it in GitHub Desktop.
A script for dealing with beeminding a thesis.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/local/bin/python3 | |
import requests | |
import hashlib | |
import time as tt | |
import ast | |
import os | |
import sys | |
folder = '<where_your_thesis_is>' | |
state_file = 'some_path/Beeminder_Thesis/working_hard.csv' | |
focus_file = "some_path/focus_times.csv" | |
auth_token = {'auth_token': "<your_auth_token_here>"} | |
username = '<your_username_here>' | |
goal = 'thesis' | |
class DictDiffer(object): | |
""" | |
from: http://stackoverflow.com/questions/1165352/calculate-difference-in-keys-contained-in-two-python-dictionaries | |
Calculate the difference between two dictionaries as: | |
(1) items added | |
(2) items removed | |
(3) keys same in both but changed values | |
(4) keys same in both and unchanged values | |
""" | |
def __init__(self, current_dict, past_dict): | |
self.current_dict, self.past_dict = current_dict, past_dict | |
self.set_current, self.set_past = set(current_dict.keys()), set(past_dict.keys()) | |
self.intersect = self.set_current.intersection(self.set_past) | |
def added(self): | |
return self.set_current - self.intersect | |
def removed(self): | |
return self.set_past - self.intersect | |
def changed(self): | |
return set(o for o in self.intersect if self.past_dict[o] != self.current_dict[o]) | |
def unchanged(self): | |
return set(o for o in self.intersect if self.past_dict[o] == self.current_dict[o]) | |
def strip_and_count_latex( text_string, **kwargs ): | |
empty_lines = 0 | |
for i, l in enumerate(text_string.splitlines()): | |
if l.strip().startswith("%") or len(l.strip()) < 5: | |
empty_lines += 1 | |
return i + 1 - empty_lines | |
def load_file( filename, **kwargs): | |
with open(filename, 'r') as f: | |
return f.read() | |
def hash_files( file_text_list , **kwargs): | |
m = hashlib.md5() | |
for t in file_text_list: | |
m.update(t.encode()) | |
return m.hexdigest() | |
def rollback_state_file(dry_run=False, **kwargs): | |
if os.path.getsize(state_file) == 0: | |
return | |
with open(state_file, 'r') as f: | |
#from: http://stackoverflow.com/questions/1877999/delete-final-line-in-file-via-python | |
#Move the pointer (similar to a cursor in a text editor) to the end of the file. | |
f.seek(0, os.SEEK_END) | |
#This code means the following code skips the very last character in the file - | |
#i.e. in the case the last line is null we delete the last line | |
#and the penultimate one | |
pos = f.tell() - 1 | |
#Read each character in the file one at a time from the penultimate | |
#character going backwards, searching for a newline character | |
#If we find a new line, exit the search | |
while pos > 0 and f.read(1) != "\n": | |
pos -= 1 | |
f.seek(pos, os.SEEK_SET) | |
#So long as we're not at the start of the file, delete all the characters ahead of this position | |
if not dry_run: | |
with open(state_file, 'w') as f: | |
print(pos) | |
if pos >= 0: | |
f.seek(pos, os.SEEK_SET) | |
f.truncate(0) | |
else: | |
print(pos) | |
def append_to_state_file( date, hashtext, data, dry_run=False, **kwargs): | |
text = str(date) + "," + str(hashtext) + "," + str(data) + "\n"; | |
if not dry_run: | |
with open(state_file, 'a') as f: | |
f.write(text) | |
else: | |
print(text) | |
def read_from_state_file(**kwargs): | |
if os.path.getsize(state_file) == 0: | |
raise FileEmptyException(state_file) | |
with open(state_file, 'r') as f: | |
for line in f: | |
pass | |
last = line | |
date, hashtext, data = last.split(',', maxsplit=2) | |
data = ast.literal_eval(data) | |
return date, hashtext, data | |
def beeminder_create_datapoint(username, goal, value, comment, hashtext, dry_run=False, **kwargs): | |
url = "https://www.beeminder.com/api/v1/users/"+username+"/goals/"+goal+"/datapoints.json" | |
payload = {"value": float(value), "comment": str(comment), "requestid": hashtext} | |
payload.update(auth_token) | |
if not dry_run: | |
r = requests.post(url, data=payload) | |
r.raise_for_status() | |
return r | |
from collections import namedtuple | |
X = namedtuple("X", ['text']) | |
return X("this was just a dry run") | |
def beeminder_weekends_off(username, goal, dry_run=False, **kwargs): | |
from datetime import date, time, datetime, timedelta | |
url = "https://www.beeminder.com/api/v1/users/"+username+"/goals/"+goal+".json" | |
#get the road: | |
with requests.Session() as s: | |
r = s.get(url, params=auth_token) | |
data = r.json() | |
road = data['roadall'] | |
#one week ahead: | |
current_weekday = date.fromtimestamp(tt.time()).weekday() | |
editable_next_fri = timedelta(7 - current_weekday + 4) if current_weekday > 4 else timedelta(4-current_weekday) | |
date_wanted = date.fromtimestamp(tt.time()) + editable_next_fri + timedelta(7) | |
wanted_slope = 0 | |
#read the whole road, figure out what the slope should be on the date we want, and what it should be after what we want. | |
for i, (t, val, slope) in reversed(list(enumerate(road))): | |
if date.fromtimestamp(t) == date_wanted + timedelta(3) and slope == 0: | |
return # we already have a break... | |
if date.fromtimestamp(t) < date_wanted: | |
break # the last point we recorded contains the slope we want. | |
else: | |
wanted_slope = slope | |
if i == 0: | |
raise | |
#we now have the wanted slope, and the point to insert it at (i), so we can insert things correctly: | |
road = data["roadall"][0:i+1] + \ | |
[[int(datetime.combine(date_wanted, time(0)).timestamp()),None,wanted_slope]] + \ | |
[[int(datetime.combine(date_wanted + timedelta(3),time(0)).timestamp()),None,0]] + data["roadall"][i+1:] | |
payload = {"roadall": road } | |
payload.update(auth_token) | |
print(payload) | |
if not dry_run: | |
r = s.put(url, json=payload) | |
print(r.request.body) | |
r.raise_for_status() | |
else: | |
from collections import namedtuple | |
X = namedtuple("X", ['text']) | |
r = X("this was just a dry run") | |
return r | |
# def prepare_file_for_diff(filename): | |
# strings = [] | |
# with open(filename, 'r') as f: | |
# for line in f: | |
# #ignore commented lines: | |
# if line.strip().startswith('%') \ | |
# or line.strip().startswith("\\begin") \ | |
# or line.strip().startswith("\\begin") \ | |
# or line.strip().startswith("\\end"): | |
# strings += [line] | |
# #now we have a string containing the non-commented lines, lets | |
# # combine the "just text" blocks into single lines: | |
# def diff_files(newfile, folder): | |
# # we want to look at every different line in these files and make sure it isn't in another file | |
# # somewhere | |
# import difflib | |
# # so, first we get a list of lines from the new file: | |
# lines = [] | |
# with open(newfile, 'r') as nf: | |
# for line in f: | |
# lines += [line] | |
# #Then we diff this file with it's corresponding one in the folder, assuming it exists: | |
# oldfilelines = [] | |
# if os.path.exists(os.path.join(folder, os.path.basename(newfile))): | |
# with open (os.path.join(folder, os.path.basename(newfile)), 'r') as of: | |
# for line in f: | |
# oldfilelines += [line + '\n'] | |
# diff = difflib.ndiff(lines,oldfilelines,linejunk = lambda x: x.strip().startswidth("%")) | |
# newlines = difflib.restore(diff,1) | |
# #diff these diffed lines with other ones in the folder: | |
class FileEmptyException(Exception): | |
def __init__(self, filename): | |
self.filename = filename | |
def read_focus_times(filename, dry_run=False, **kwargs): | |
total = 0 | |
sessions = 0; | |
lines = [] | |
if os.path.getsize(filename) == 0: | |
raise FileEmptyException(filename) | |
with open(filename, 'r') as f: | |
for i,line in enumerate(f): | |
lines += [line] | |
total += int(line) | |
sessions = i | |
if dry_run: | |
return i+1,total,lines | |
with open(filename, 'w') as f: | |
f.truncate(0) | |
#os.lseek(f, 0, os.SEEK_SET) | |
return i+1,total,lines | |
def prepare_focus_info(filename, dry_run = False, **kwargs): | |
focus_sessions, time_focused, focus_lines = read_focus_times(focus_file, dry_run) | |
focus_requestid = hash_files([str(focus_sessions),str(time_focused)] + focus_lines) | |
#time focused needs to be in hours, not seconds. | |
time_focused = float(time_focused) / (60.*60.) | |
focus_comment = "Focused for " + str(time_focused) + " over " + str(focus_sessions) + " sessions." | |
return time_focused, focus_comment, focus_requestid, focus_lines | |
def rollback_focus_file(lines, filename, dry_run = False, **kwargs): | |
with open(filename, 'w') as f: | |
f.writelines(lines) | |
return | |
def prepare_lines_info(dry_run = False, **kwargs): | |
text_list = [] | |
nums = {} | |
for entry in filter(lambda x: x.is_file() and x.name.endswith(".tex") and not x.name.startswith("_"), os.scandir(folder)): | |
try: | |
text = load_file(entry.path) | |
except FileNotFoundError: | |
pass | |
text_list += [text] | |
num = strip_and_count_latex(text) | |
nums[entry.name] = num | |
try: | |
_,_,nums_old = read_from_state_file() | |
except FileEmptyException as e: | |
#we are done... this is an initial value: | |
requestid = hash_files(text_list) | |
if not dry_run: | |
append_to_state_file(tt.mktime(tt.localtime()),requestid,nums) | |
raise FileEmptyException(e) | |
requestid = hash_files(text_list) | |
state_file_line = (tt.mktime(tt.localtime()),requestid, nums) | |
if not dry_run: | |
append_to_state_file(*state_file_line) | |
added_files = [] | |
diff = DictDiffer(nums,nums_old) | |
added_files = dict(filter(lambda x: x[0] in diff.added(), nums.items())) | |
removed_files = dict(filter(lambda x: x[0] in diff.removed(), nums.items())) | |
changed_files = dict(map(lambda x: (x,abs(nums[x] - nums_old[x])),diff.changed())) | |
comment = "" | |
if len(added_files) != 0: | |
comment += "Added " + str(len(added_files)) + " files for a total of " + str(sum(added_files.values())) + " lines added. " | |
if len(removed_files) != 0: | |
comment += "Removed " + str(len(removed_files)) + " files for a total of " + str(sum(removed_files.values())) + " lines removed. " | |
if len(changed_files) != 0: | |
comment += "Changed " + str(len(changed_files)) + " files for a total of " + str(sum(changed_files.values())) + " lines added. " | |
value = abs(sum(removed_files.values())) + abs(sum(changed_files.values())) + + abs(sum(added_files.values())) | |
comment += "A total of " + str(abs(sum(changed_files.values())) + abs(sum(removed_files.values())) + abs(sum(added_files.values()))) + " lines were changed." | |
return value, comment, requestid | |
def main(dry_run): | |
try: | |
value, comment, requestid = prepare_lines_info(dry_run=dry_run) | |
print(comment) | |
if value > 0: | |
r = beeminder_create_datapoint("spott", "thesis", value, comment, requestid, dry_run=dry_run) | |
print(r.text) | |
except requests.HTTPError as e: | |
print("error: ", e) | |
rollback_state_file(dry_run=dry_run) | |
except FileEmptyException as e: | |
print("no history to add...",e) | |
try: | |
time_focused, focus_comment, focus_requestid, focus_lines = prepare_focus_info(focus_file,dry_run=dry_run) | |
print(focus_comment) | |
r = beeminder_create_datapoint("spott", "focusing", time_focused, focus_comment, focus_requestid,dry_run=dry_run) | |
print(r.text) | |
except requests.HTTPError as e: | |
print("error: ", e) | |
rollback_focus_file(focus_lines, focus_file,dry_run=dry_run) | |
except FileEmptyException as e: | |
print("nothing to add: ", e) | |
#focus file doesn't have any problems... | |
try: | |
r = beeminder_weekends_off("spott", "focusing", dry_run=dry_run) | |
if r: | |
print("successfully made the weekend off for focusing") | |
except requests.HTTPError as e: | |
print("error: ", e) | |
try: | |
r = beeminder_weekends_off("spott", "thesis", dry_run=dry_run) | |
if r: | |
print("successfully made the weekend off for thesis") | |
except requests.HTTPError as e: | |
print("error: ", e) | |
#Weekends off: | |
if __name__ == '__main__': | |
dry_run=False | |
if "-d" in sys.argv or "--dry-run" in sys.argv: | |
dry_run = True; | |
main(dry_run) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment