Skip to content

Instantly share code, notes, and snippets.

@SqrtRyan
Created April 24, 2025 12:39
Show Gist options
  • Save SqrtRyan/3bac1f23046b2970f3ea31cee3b1776f to your computer and use it in GitHub Desktop.
Save SqrtRyan/3bac1f23046b2970f3ea31cee3b1776f to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
#THINGS TO DO BEFORE OFFICIAL RELEASE:
# Rename "path" functions to "2d-somethings" idk what, but it conflicts with file-paths...
# Rename "display" functions to "plot" functions. "display" functions should be very simple and library-agnostic, while plot can be matplotlib-based.
# Remove useless functions, and categorize them. Probably should split into multiple files; but that's kinda messy...
# These functions don't have to be removed from r.py, they just have to be deleted from rp.py (after using from r import *, use something like 'del useless_function')
#TODO: Turn the comments at the beginning of each function into docstrings so they can be read by the builtin help function
# python /Users/Ryan/PycharmProjects/Py27RyanStandard2.7/Groupie.py ftF11dwbP61OfPf9QsXBfS5usCdQdBkkMieObdvZ -g 'The Think Tank'
# Imports that are necessary for the 'r' module:
# Imports I tend to use a lot and include so they their names can be directly imported from th:
# region Import
# This is useful for running things on the terminal app or in blender
# import r# For rinsp searches for functions in the r module, so I don't need to keep typing 'import r' over and over again
# Places I want to access no matter where I launch r.py
# sys.path.append('/Users/Ryan/PycharmProjects/RyanBStandards_Python3.5')
# sys.path.append('/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages')
# endregion
# region [entuple, detuple]
import sys
import threading
from builtins import *#For autocompletion with pseudo_terminal
from time import sleep
sys.path.append(__file__[:-len("r.py")])
import rp
import os
import time
import shlex
import sys
import random
import warnings
import pickle
import tempfile
import contextlib
import itertools
import math
import random
import re
from itertools import product as cartesian_product, combinations as all_combinations
from functools import lru_cache
from multiprocessing.dummy import Pool as ThreadPool # ⟵ par_map uses ThreadPool. We import it now so we don't have to later, when we use par_map.
from contextlib import contextmanager
from math import factorial
#Make glob both a function and module
import glob
glob.glob.__dict__.update(glob.__dict__)
glob=glob.glob
# Make copy both a function and module
import copy
copy.copy.__dict__.update(copy.__dict__)
copy = copy.copy
_original_pwd = os.getcwd()
def entuple(x):
# For pesky petty things.
if isinstance(x,tuple):
return x
return x,
def detuple(x):
"""
For pesky petty things. Code is simpler than explanation here.
Primarily used for allowing functions to take either one iterable argument OR a vararg list of items
Used commonly throughout RP's library functions to make them more convenient to use.
EXAMPLE:
>>> def print_sum(*x):
... x = detuple(x)
... print(sum(x))
... print_sum(1,2,3,4,5) #Prints 15
... print_sum([1,2,3,4,5]) #Prints 15
"""
try:
if len(x) == 1:
return x[0]
except Exception:
pass
return x
# endregion
# region [enlist, delist]
def enlist(x):
""" For pesky petty things. Code is simpler than explanation here. """
if isinstance(x,list):
return x
return [x]
def delist(x):
""" For pesky petty things. Code is simpler than explanation here. """
try:
if len(x) == 1:
return x[0]
except Exception:
pass
return x
# endregion
# region rCode: [itc‚ run‚ fog‚ scoop‚ seq_map‚ par_map‚ seq‚ par‚ rev‚ pam‚ identity,list_flatten,summation,product]
# ∞
# ∫𝓍²∂𝓍
# ﹣∞
def itc(f,x):
#itc ==== iterate to convergence, where f(x)==x.
#In other words, we iterate f on x until we reach a fixed point.
while True:
y=f(x)
if y==x:
return y
x=y
# region [run‚ fog]
def run_func(f,*g,**kwg): # Pop () ⟶ )(
return f(*g,**kwg)
call = run_func
def fog(f,*g,**kwg): # Encapsulate )( ⟶ () 'fog' ≣ ƒ ∘ g‚ where g can be any number of parameters.
return lambda:f(*g,**kwg)
# endregion
# region[scoop]
# scoop could have been implemented with seq. I chose not to.
def scoop(funcⵓscoopˏnew,list_in,init_value=None):
from copy import copy,deepcopy
# Try to make a copy just in case init_value is a list
try:
scoop_value=deepcopy(init_value)
except Exception:
try:
scoop_value=copy(init_value)
except Exception:
scoop_value=init_value
for element in list_in:
scoop_value=funcⵓscoopˏnew(scoop_value,element)
return scoop_value
# endregion
# region [seq_map‚ par_map]
def seq_map(func,*iterables):
# Like par_map, this def features non-lazy evaluation! (Unlike python's map function, which does not. Proof: map(print,['hello']) does not print anything, but [*map(print,['hello'])] does.)
return list(map(func,*iterables)) # Basically it's exactly like python's built-in map function, except it forces it to evaluate everything inside it before it returns the output.
def _legacy_par_map(func,*iterables,number_of_threads=None,chunksize=None):
#THE OLD IMPLEMENTATION: There's nothing wrong with it, it was just old and messy.
#REST IN PEACE OLD FRIEND! (Made this early in freshman year like 7 years ago lol - its 2023 now)
#TODO: Test the new one in python3.5!! Just in case its still useful, for compatiability I leave it in here as _legacy_par_map
# Multi-threaded map function. When I figure out a way to do parallel computations, this def (conveniently high-level) will be replaced.
try:
par_pool=ThreadPool(number_of_threads)
try:
out=par_pool.map(lambda args:func(*args),zip(*iterables),chunksize=chunksize) # ⟵ A more complicated version of out=par_pool.map(func,iterable,chunksize=chunksize). Current version lets func accept multiple arguments.
except Exception:
out=par_pool.map(func,iterables,chunksize=chunksize)
par_pool.terminate() # ⟵ If we don't have this line here, the number of threads running AKA threading.active_count() will continue to grow even after this def has returned, ∴ eventually causing the RunTime error exception mentioned below.
return out
except RuntimeError: # ⟵ Assuming we got "RuntimeError: can't start new thread", we will calculate it sequentially instead. It will give the same result, but it won't be in parallel.
return seq_map(func,*iterables)
def par_map(func,*iterables,num_threads=None,buffer_limit=0):
"""
See lazy_par_map for doc.
buffer_limit defaults to 0 because we return everything all at once anyway and therefore don't care about how long we wait for the first item, and we have to store all the outputs in memory anyway.
"""
return list(lazy_par_map(func,*iterables,num_threads=num_threads,buffer_limit=buffer_limit))
def lazy_par_map(func, *iterables, num_threads=None, buffer_limit=None):
"""
A parallelized version of the built-in map using ThreadPoolExecutor.
Parameters:
- func: The function to apply to the items.
- *iterables: Input iterables. The function is applied to the results of zipping these iterables.
- num_threads (optional): The number of worker threads to use.
If 0, the function will work synchronously like the built-in map.
If not provided, defaults to 32.
- buffer_limit (optional): The maximum size of the buffer. Without this, this function isn't really very lazy lol.
If set to 0, there's no constraint on the number of stored items and it will try to precalculate everything.
A buffer_limit is useful for conserving memory, such as loading millions of images lazily in a dataloader that processes them slowly.
For example, if we want to load 1,000,000 images from URLs lazily, if we don't have a buffer limit,
it will try making 1,000,000 http requests all at once - which can prevent the first ones from completing in a reasonable time.
It also means you'd have to store all 1,000,000 images in memory even before you need them - which could crash your python runtime.
If not provided, defaults to num_threads.
Returns:
- An iterator that yields results as tasks complete.
Examples:
def test_par_map():
def func(index):
time=random_float(0,1)
sleep(time)
print(index)
return index
return list(lazy_par_map(func,range(10),buffer_limit=3,num_threads=3))
ans=test_par_map()
assert ans==sorted(ans),'lazy_par_map failed to preserve order with a non-zero buffer_limit'
"""
from concurrent.futures import ThreadPoolExecutor, wait, FIRST_COMPLETED
if num_threads is not None and (not isinstance(num_threads, int) or num_threads < 0):
raise ValueError("num_threads must be None or an integer >= 0")
if buffer_limit is not None and (not isinstance(buffer_limit, int) or buffer_limit < 0):
raise ValueError("buffer_limit must be None or an integer >= 0")
if num_threads is None:
num_threads = 32
if buffer_limit is None:
buffer_limit = num_threads
if num_threads == 0:
yield from map(func, *iterables)
return
iterable = zip(*iterables)
iterator = enumerate(iter(iterable))
with ThreadPoolExecutor(max_workers=num_threads) as executor:
if not buffer_limit:
yield from executor.map(func, *iterables)
return
futures = set()
# We need to preserve the order of the inputs
results = {} #Maps index -> result
yield_index=0
def wrapper(index):
def new_func(*args):
value=func(*args)
return index,value
return new_func
# Load up the initial buffer_limit tasks
for _ in range(buffer_limit):
try:
iterator_index, args = next(iterator)
futures.add(executor.submit(wrapper(iterator_index), *args))
except StopIteration:
break
while futures:
done, _ = wait(futures, return_when=FIRST_COMPLETED) # wait for any future to complete
for future in done:
result_index,value = future.result()
results[result_index]=value
futures.remove(future)
try:
iterator_index,args = next(iterator)
futures.add(executor.submit(wrapper(iterator_index), *args))
except StopIteration:
pass
while yield_index in results:
yield results[yield_index]
del results[yield_index] #Don't leak memory!
yield_index+=1
# endregion
# region [seq‚ par]
def seq(funcs,*init):
# The current flagship function of rCode. This function can, in theory, single-handedly replace all other rCode functions (except par, which is analogous to seq). (Though it might be inconvenient to do so)
# Possible future add-on: Enable recursive calls with a special value of func? (Probably won't though)
try: # Usually funcs will be an iterable. But if it is not, this test will catch it. This is because seq(print,'hello world')≣seq([print],'hello world')
funcs=list(funcs) # A simple check to find out whether funcs is iterable or not. If it is, it becomes a list (even if it was originally, let's say, a tuple).
except TypeError: # 'funcs' was not iterable; ∴ 'funcs' must be a single, callable function
return funcs(*init) # Because we have not yet iterated, we contain certain that 'init' is a tuple.
# assert isinstance(funcs,list) # Internal logic assertion. This should always be true because of 'funcs=[*funcs]'
for func in funcs: # If we reach this line, we know ∴ 'funcs' is a list.
temp=func(*init) if isinstance(init,tuple) else func(init)
if temp is not None:
init=temp
return init
def par(funcsᆢvoids,*params):
# NOTE: PARAMS NEVER CHANGES!!! The applications of that would be too limited to justify the effort of creating it. Instead, this def simply treats all functions as voids in the same way that seq could.
# seq's little sister, and child of par_map. Only analagous to seq in specific contexts. This function is NOT capable of returning anything useful due to the inherent nature of multi-threading.
par_map(lambda func:func(*params),funcsᆢvoids) # Shares a similar syntax to seq. AKA multiple functions with a single set of parameters.
# endregion
# region [rev]
rev=lambda f,n:lambda *𝓍_:seq([f] * n,*𝓍_) # Pseudo-revolutions (technically iterations) Ex: rev(lambda x:x+1,5)(0) == 5
# endregion
# region [pam]
def pam(funcs,*args,**kwargs):
# pam is map spelt backwards. pam maps multiple defs onto a single set of arguments (instead of map, which maps multiple sets of arguments onto one function)
assert is_iterable(funcs),str(funcs) + " ≣ funcs,is NOT iterable. Don't bother using pam! Pam is meant for mapping multiple functions onto one set of arguments; and from what I can tell you only have one function."
return [f(*args,**kwargs) for f in funcs]
# endregion
# region [identity]
def identity(*args):
"""
The identity function. ƒ﹙𝓍﹚﹦ 𝓍 where ƒ ≣ identity
Examples:
identity(2) == 2
identity('Hello World!') == 'Hello World!'
identity(1,2,3) == (1,2,3) #When given multiple args, returns a tuple
Better than "lambda x:x" because it shows up in stack traces as "identity" instead of an anonymous lambda function.
Makes your life easier by making debugging easier. Used throughout RP's library functions.
"""
return detuple(args)
# endregion
# region [list_flatten]
#FORMERLY CALLED list_pop (a bit of a misnomer; I know that now, after having taken CSE214.)
def list_roll(x,shift=0):
"""
Demo:
>>> for _ in range(10):
print(list_roll(range(10),_))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
[9, 0, 1, 2, 3, 4, 5, 6, 7, 8]
[8, 9, 0, 1, 2, 3, 4, 5, 6, 7]
[7, 8, 9, 0, 1, 2, 3, 4, 5, 6]
[6, 7, 8, 9, 0, 1, 2, 3, 4, 5]
[5, 6, 7, 8, 9, 0, 1, 2, 3, 4]
[4, 5, 6, 7, 8, 9, 0, 1, 2, 3]
[3, 4, 5, 6, 7, 8, 9, 0, 1, 2]
[2, 3, 4, 5, 6, 7, 8, 9, 0, 1]
[1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
Efficiency Test/Comparison: https://chatgpt.com/share/7de702fb-3aef-4f7a-b3f6-2ecc0d2c9fec
"""
if not shift:
return list(x)
shift %= len(x)
return list(x[-shift:]) + list(x[:-shift])
def list_flatten(list_2d):
"""
Example Speed boost over scoop:
List size: 21000
Time taken by list comprehension: 0.032008200883865356 seconds
Time taken by itertools.chain: 0.016012614592909813 seconds
Time taken by scoop: 64.72587646916509 seconds
See https://gist.github.com/SqrtRyan/91dd2edd469c0cef1a545bc576efabf0
Old, MUCH SLOWER version: list_flatten=lambda list_2d:scoop(lambda old,new:list(old) + list(new),list_2d,[])
"""
from itertools import chain
return list(chain.from_iterable(list_2d))
list_pop=list_flatten#Just because I'm used to list_pop, even though it doesn't make much sense lol. Thought of 'popping' the inner brackets of [[a,b],[c,d]] to [a,b,c,d] as if the inner brackets looked like bubbles to be popped. Has no relationship to popping an item off a stack or queue lol
# endregion
# region [summation,product]
def product(x):
# Useful because this literally uses the '*' operator over and over again instead of necessarily treating the elements as numbers.
return scoop(lambda 𝓍,𝓎:𝓍 * 𝓎,x,x[0]) if len(x) else 1
# assert is_iterable(x)
# try:
# out=x[0]
# except Exception:
# return 1# x has no indices
# for y in x[1:]:
# out*=y
# return out
def summation(x,start=None):
# Useful because this literally uses the '+' operator over and over again instead of necessarily treating the elements as numbers.
# list_flatten(l)≣summation(l)
# sum(x,[])≣summation(x)
# sum(x)≣summation(x)
return scoop(lambda 𝓍,𝓎:𝓍 + 𝓎,x,start if start is not None else x[0]) if len(x) else start
# assert is_iterable(x)
# try:
# out=x[0]
# except Exception:
# return 0# x has no indices
# for y in x[1:]:
# out+=y
# return out
def unique(iterable, *, key=identity, lazy=False):
"""
Removes duplicates but preserves order
Works with things that aren't conventionally hashable, like numpy arrays
(this is because it uses handy_hash)
EXAMPLE:
>>> list(unique([4,3,5,4,3,2]))
ans = [4, 3, 5, 2]
EXAMPLE:
>>> list(unique('alpha beta delta alpha'.split()))
ans = ['alpha', 'beta', 'delta']
>>> list(unique('alpha beta delta alpha'.split(),key=len))
ans = ['alpha', 'beta']
"""
def helper():
seen = set()
for item in iterable:
tag = handy_hash(key(item))
if tag not in seen:
seen.add(tag)
yield item
output = helper()
if not lazy:
output = list(output)
return output
# endregion
# endregion
# region Time:[gtoc,tic‚ toc‚ ptoc‚ ptoctic‚ millis,micros,nanos]
_global_tic=time.time()
gtoc=time.time # global toc
def tic() -> callable:
global _global_tic
_global_tic=local_tic=time.time()
def local_toc(): # Gives a permanent toc to this tic, specifically
return gtoc() - local_tic
def reset_timer():
nonlocal local_tic
local_tic=time.time()
local_toc.tic=reset_timer
return local_toc # Returns a method so you can do a=tic();a.toc() ⟵ Gives a local (not global) toc value so each tic can be used as a new timer
def toc() -> float:
return gtoc() - _global_tic
def ptoc(title='',*,new_line=True) -> None:
print(str(title) + ": %05f seconds" % toc(), end="\n" if new_line else "")
def ptoctic(label='') -> None:
ptoc(label)
tic()
# ⎧ ⎫
# ⎪ ⎧ ⎫⎪
# ⎪ ⎪⎧ ⎫ ⎪⎪
_milli_micro_nano_converter=lambda s,n:int(round((s() if callable(s) else s) * n))
# ⎪ ⎪⎩ ⎭ ⎪⎪
# ⎪ ⎩ ⎭⎪
# ⎩ ⎭
# You can do millis(tic()) ⟵ Will probably be about 0, millis(toc), millis(1315), millis() ⟵ Gets global time by default
def seconds(seconds=gtoc) -> int:
""" Return seconds since common epoch (rounded to the nearest integer) """
return _milli_micro_nano_converter(seconds,10 ** 0)
def millis(seconds=gtoc) -> int:
""" Return milliseconds since common epoch (rounded to the nearest integer) """
return _milli_micro_nano_converter(seconds,10 ** 3)
def micros(seconds=gtoc) -> int:
""" Return microseconds since common epoch (rounded to the nearest integer) """
return _milli_micro_nano_converter(seconds,10 ** 6)
def nanos(seconds=gtoc) -> int:
""" Return nanoseconds since common epoch (rounded to the nearest integer) """
return _milli_micro_nano_converter(seconds,10 ** 9)
# endregion
# region Files and such: [get_current_directory‚ get_all_file_names]
def get_process_cwd(pid):
pip_import('psutil')
import psutil
process = psutil.Process(pid)
cwd = process.cwd()
return cwd
def get_current_directory(pid=None):
# Get the result of 'cd' in a shell. This is the current folder where save or load things by default.
# SUMMARY: get_current_directory() ≣ sys.path[0] ﹦ ﹙default folder_path﹚ ﹦ ﹙current directory﹚ ﹦ /Users/Ryan/PycharmProjects/RyanBStandards_Python3.5
if pid is not None:
assert isinstance(pid,int),pid
return _get_process_cwd(pid)
try:
import os
return os.getcwd()
except FileNotFoundError as e:
return '.' #A simple, but technically correct way to answer this question...will prevent errors in other places when the current directory is deleted...
raise FileNotFoundError(str(e)+"\nPerhaps the directory you're working in no longer exists?")
def set_current_directory(path):
import os
os.chdir(path)
class SetCurrentDirectoryTemporarily:
"""
Temporarily CD into a directory
Example:
print(get_current_directory())
with SetCurrentDirectoryTemporarily('/home'):
print(get_current_directory())
print(get_current_directory())
"""
def __init__(self,directory:str=None):
self.directory=directory
def __enter__(self, directory:str=None):
self.original_dir=get_current_directory()
if self.directory is not None:
set_current_directory(self.directory)
def __exit__(self,*args):
set_current_directory(self.original_dir)
class TemporarilySetAttr:
"""
A context manager for temporarily setting attributes on an object.
Usage:
with TemporarilySetAttr(obj, attr1=value1, attr2=value2):
# do something with obj that requires temporary attribute values
# Other examples and testable examples are included below:
Example 1 (Graphics - Drawing shapes with temporary styles):
# Instead of doing this:
old_fill_color = shape.fill_color
old_stroke_width = shape.stroke_width
shape.fill_color = 'red'
shape.stroke_width = 3
draw_shape(shape)
shape.fill_color = old_fill_color
shape.stroke_width = old_stroke_width
# Use TemporarilySetAttr like this:
with TemporarilySetAttr(shape, fill_color='red', stroke_width=3):
draw_shape(shape)
Example 2 (Scientific Computing - Using temporary units in a physics simulation):
# Instead of doing this:
old_unit_system = physics_object.unit_system
physics_object.unit_system = 'imperial'
compute_gravitational_force(physics_object)
physics_object.unit_system = old_unit_system
# Use TemporarilySetAttr like this:
with TemporarilySetAttr(physics_object, unit_system='imperial'):
compute_gravitational_force(physics_object)
Example 3 (Web Scraping - Temporarily changing request headers):
# Instead of doing this:
old_headers = request.headers
request.headers = {'User-Agent': 'Custom-UA'}
response = fetch_data(request)
request.headers = old_headers
# Use TemporarilySetAttr like this:
with TemporarilySetAttr(request, headers={'User-Agent': 'Custom-UA'}):
response = fetch_data(request)
Testable examples:
class Test:
def __init__(self):
self.attr = 0
instance = Test()
with TemporarilySetAttr(instance, attr=42):
assert instance.attr == 42
assert instance.attr == 0
with TemporarilySetAttr(instance, attr=42, new_attr=100):
assert instance.attr == 42
assert instance.new_attr == 100
assert instance.attr == 0
assert not hasattr(instance, 'new_attr')
Written with the aid of GPT4: https://sharegpt.com/c/ZXG65TG
"""
def __init__(self, instance, **kwargs):
self.instance = instance
self.old_attrs = {}
self.new_attrs = kwargs
def __enter__(self):
for attr, new_value in self.new_attrs.items():
if hasattr(self.instance, attr):
self.old_attrs[attr] = getattr(self.instance, attr)
setattr(self.instance, attr, new_value)
def __exit__(self, exc_type, exc_value, traceback):
for attr, old_value in self.old_attrs.items():
setattr(self.instance, attr, old_value)
for attr in self.new_attrs:
if attr not in self.old_attrs:
delattr(self.instance, attr)
class TemporarilySetItem:
"""
A context manager for temporarily setting items in a container (list, dict, etc.).
Usage:
with TemporarilySetItem(container, {key1: value1, key2: value2}):
# do something with the container that requires temporary values
# Other examples and testable examples are included below:
Example 1 (Text Processing - Temporarily changing specific words in a list):
# Instead of doing this:
old_word_1 = words[1]
old_word_2 = words[2]
words[1] = 'slow'
words[2] = 'red'
process_text(words)
words[1] = old_word_1
words[2] = old_word_2
# Use TemporarilySetItem like this:
words = ['The', 'quick', 'brown', 'fox']
with TemporarilySetItem(words, {1: 'slow', 2: 'red'}):
process_text(words)
Example 2 (Data Analysis - Temporarily modifying data points in a dataset):
# Instead of doing this:
old_a = data['a']
old_b = data['b']
data['a'] = 100
data['b'] = 5
analyze_outliers(data)
data['a'] = old_a
data['b'] = old_b
# Use TemporarilySetItem like this:
data = {'a': 42, 'b': 7, 'c': 15}
with TemporarilySetItem(data, {'a': 100, 'b': 5}):
analyze_outliers(data)
Example 3 (Configuration - Temporarily changing settings in a configuration dictionary):
# Instead of doing this:
old_mode = config['mode']
old_log_level = config['log_level']
config['mode'] = 'development'
config['log_level'] = 'debug'
run_tests(config)
config['mode'] = old_mode
config['log_level'] = old_log_level
# Use TemporarilySetItem like this:
config = {'mode': 'production', 'log_level': 'info'}
with TemporarilySetItem(config, {'mode': 'development', 'log_level': 'debug'}):
Testable examples:
my_list = [1, 2, 3, 4]
with TemporarilySetItem(my_list, {2: 42}):
assert my_list[2] == 42
assert my_list[2] == 3
my_dict = {'a': 1, 'b': 2}
with TemporarilySetItem(my_dict, {'a': 42, 'c': 3}):
assert my_dict['a'] == 42
assert my_dict['c'] == 3
assert my_dict['a'] == 1
assert 'c' not in my_dict
Written with the aid of GPT4: https://sharegpt.com/c/ZXG65TG
"""
def __init__(self, container, mapping):
self.container = container
self.old_items = {}
self.new_items = mapping
def __enter__(self):
for key, new_value in self.new_items.items():
if key in self.container:
self.old_items[key] = self.container[key]
self.container[key] = new_value
def __exit__(self, exc_type, exc_value, traceback):
for key in self.new_items:
if key in self.old_items:
self.container[key] = self.old_items[key]
else:
del self.container[key]
def ConditionalContext(condition, context_manager, *args, **kwargs):
"""
A context manager to conditionally enter another context based on a given condition.
This utility facilitates cleaner and concise management of conditional contexts without deep nesting.
Parameters:
- condition (bool or callable): A flag or a function returning a bool to determine
if the context should be entered.
- context_manager (callable): A function, lambda, or class instance that returns/provides a context manager.
- *args, **kwargs: Arguments and keyword arguments to be passed to the context_manager.
Usage:
```python
with conditional_context(some_condition, some_context, arg1, arg2, key=value):
# Your code here...
```
This usage is equivalent to the following, but with less branching and nesting:
```python
if some_condition:
with some_context(arg1, arg2, key=value):
# Your code here...
else:
# Your code here... (Same as inside the context)
```
The `conditional_context` streamlines the structure, making the code more readable and easier to maintain.
Note: The context managers are instantiated and arguments are passed only if their respective conditions are True (or evaluate to True).
Returns:
- Context manager based on the condition.
EXAMPLES:
```python
def test_conditional_context():
import random
from contextlib import contextmanager
@contextmanager
def sample_context():
print("Entered the context!")
yield
print("Exiting the context!")
# Condition set randomly
random_condition = random.choice([True, False])
print(f"Random condition: {random_condition}")
# Using conditional_context
with conditional_context(random_condition, sample_context):
print("Inside the conditional context.")
print()
# Equivalent traditional method
if random_condition:
with sample_context():
print("Inside the traditional context.")
else:
print("Inside the traditional context.")
#This will print the same thing twice, because it works!
test_conditional_context()
```
This test showcases how the conditional context manager works equivalently to the traditional method while providing a cleaner structure. The output for both scenarios will match, demonstrating its effectiveness.
Aided by GPT4: https://chat.openai.com/share/36186fdf-fc23-4c82-8394-d29e5cbbd32d
"""
from contextlib import contextmanager
@contextmanager
def wrapper():
if callable(condition):
condition_val = condition()
else:
condition_val = condition
if condition_val:
with context_manager(*args, **kwargs):
yield
else:
yield
return wrapper()
class PrintBeforeAfter:
"""
A context manager that prints the value of an expression or callable before and after execution.
Args:
target: Either a string representing an expression or a callable function.
Example:
>>> x = 10
... with PrintBeforeAfter("x"):
... x += 5
...
... def get_value():
... return x * 2
...
... with PrintBeforeAfter(get_value):
... x *= 3
--> BEFORE: x == 10
<-- AFTER: x == 15
--> BEFORE: 30
<-- AFTER: 90
Example:
>>> with rp.PrintBeforeAfter('transformer.patch_embed.proj'):
... transformer.patch_embed.proj = rp.libs.torch_tools.resize_conv2d_channels(
... transformer.patch_embed.proj,
... in_channels=64,
... )
--> BEFORE: transformer.patch_embed.proj == Conv2d(32, 3072, kernel_size=(2, 2), stride=(2, 2))
<-- AFTER: transformer.patch_embed.proj == Conv2d(64, 3072, kernel_size=(2, 2), stride=(2, 2))
"""
def __init__(self, target):
self._scope = get_scope(frames_back=1)
self._target = target
if not (isinstance(target, str) or callable(target)):
raise AssertionError("Target must be a string or callable")
def _get_value(self):
if isinstance(self._target, str):
out = exeval(self._target, self._scope)
elif callable(self._target):
out = self._target()
else:
assert False
return str(out)
def _get_name(self):
if isinstance(self._target, str):
return self._target + " == "
elif callable(self._target):
return ""
else:
assert False
def __enter__(self):
fansi_print(
" --> BEFORE: " + self._get_name() + self._get_value(),
"bold green",
)
def __exit__(self, exc_type, exc_val, exc_tb):
fansi_print(
" <-- AFTER: " + self._get_name() + self._get_value(),
"bold green",
)
#THIS IS DEPRECATED IN FAVOR OF get_all_paths
# def get_all_file_names(file_name_ending: str = '',file_name_must_contain: str = '',folder_path: str = get_current_directory(),show_debug_narrative: bool = False):
# # SUMMARY: This method returns a list of all file names files in 'folder_path' that meet the specifications set by 'file_name_ending' and 'file_name_must_contain'
# # Leave file_name_ending blank to return all file names in the folder.
# # To find all file names of a specific extension, make file_name_ending ﹦ '.jpg' or 'png' etc.
# # Note: It does not matter if you have '.png' vs 'png'! It will return a list of all files whose name's ends…
# # …with file_name_ending (whether that comes from the file type extension or not). Note that you can use this to search…
# # …for specific types of file names that YOU made arbitrarily, like 'Apuppy.png','Bpuppy.png' ⟵ Can both be found with…
# # …file_name_ending ﹦ 'puppy.png'
# # file_name_must_contain ⟶ all names in the output list must contain this character sequence
# # show_debug_narrative ⟶ controls whether to print out details about what this function is doing that might help to debug something.
# # …By default this is disabled to avoid spamming the poor programmer who dares use this function.
# # ;;::O(if)OOO
# os.chdir(folder_path)
# if show_debug_narrative:
# print(get_all_file_names.__name__ + ": (Debug Narrative) Search Directory ﹦ " + folder_path)
# output=[]
# for file_name in glob.glob("*" + file_name_ending):
# if file_name_must_contain in file_name:
# output.append(file_name) # I tried doing it with the '+' operator, but it returned a giant list of individual characters. This way works better.
# if show_debug_narrative:
# print(get_all_file_names.__name__ + ": (Debug Narrative) Found '" + file_name + "'")
# if show_debug_narrative:
# print(get_all_file_names.__name__ + ' (Debug Narrative) Output ﹦ ' + str(output))
# return output
# endregion
# region String ⟷ Integer List: [int_list_to_string‚ string_to_int_list]
int_list_to_string=lambda int_list:"".join(list(chr(i) for i in int_list))
string_to_int_list=lambda string:list(ord(i) for i in string)
# USAGE EXAMPLE:
# print((lambda x:int_list_to_string(range(ord(x)-500,ord(x)+500)))("⚢"))
# print(int_list_to_string([*(a+1 for a in string_to_int_list("♔"))]))
# #♈♉♊♋♌♍♎♏♐♑♒♓ ♔♕♖♗♘♙♚♛♜♝♞♟ gen
# #⟦⟧⟨⟩⟪⟫⟬⟭⟮⟯ ❨❩❪❫❬❭❮❯❰❱❲❳❴❵ ⚀⚁⚂⚃⚄⚅ ♔♕♖♗♘♙♚♛♜♝♞♟
# endregion
# region Fansi:[fansi,fansi_print,print_fansi_reference_table,fansi_syntax_highlighting] (Format-ANSI colors and styles for the console)
# noinspection PyShadowingBuiltins
def currently_running_windows():
import os
return os.name=='nt'
def currently_running_posix():
import os
return os.name=='posix'
def currently_running_mac():
import platform
return platform.system()=='Darwin'
def currently_running_linux():
import platform
return platform.system()=='Linux'
currently_running_unix=currently_running_posix#Technically posix!=unix, but realistically we don't care...i mean what OS is posix and not unix that somebody's likely to run rp on?
def terminal_supports_ansi():
if currently_running_windows():
try:
from colorama import init
init() # Trying to enable ANSI coloring on windows console
return True
except Exception:
return False
return True
# return sys.stdout.isatty()# There are probably more sophistacated, better ways to check, but I don't know them.
def terminal_supports_unicode():
if currently_running_windows():# Try to enable unicode, but fail if we can't
try:
from win_unicode_console import enable
enable() # Trying to enable unicode characters on windows console
return True
except Exception:
return False
# ∴ we are not running Windows
return True# I don't know how to check whether you can render characters such as ⮤, ✔, or ⛤ etc
def fansi_is_enabled():
""" Returns true IFF fansi is enabled """
return not _disable_fansi
def fansi_is_disabled():
""" Returns true IFF fansi is disabled """
return _disable_fansi
_disable_fansi=False
def disable_fansi():
global _disable_fansi
_disable_fansi=True
def enable_fansi():
global _disable_fansi
_disable_fansi=False
@contextmanager
def without_fansi():
"""
Context to run a block of code without using fansi.
Example:
f=lambda:fansi_print("Hello World",'cyan','bold','red')
f()#With fansi
with without_fansi():
f()#Without fansi
"""
global _disable_fansi
old_disable_fansi=_disable_fansi
_disable_fansi=True
try:
yield
finally:
_disable_fansi=old_disable_fansi
_fansi_styles = {
"normal": 0,
"bold": 1,
"faded": 2,
"italic": 3,
"underlined": 4,
"blinking": 5,
"invert": 7,
"hide": 8,
"strike": 9,
"sub": 74,
"super": 73,
#https://ryantravitz.com/blog/2023-02-18-pull-of-the-undercurl/
'underline' : '4:1', #Same as underlined, but supports custom colors!
'underdouble': '4:2',
'undercurl' : '4:3',
'underdots' : '4:4',
'underdash' : '4:5',
}
def _transform_fansi_arg(spec):
""" Allow for 'yellow green underlined on blue bold' """
spec = spec.lower()
style = []
color = []
background = []
on = False
for x in spec.split():
if x == 'on':
on = True
elif x in _fansi_styles:
style.append(x)
elif on:
background.append(x)
else:
color.append(x)
style = ' '.join(style) or None
color = ' '.join(color) or None
background = ' '.join(background) or None
return color, style, background
def fansi(
text_string="",
text_color=None,
style=None,
background_color=None,
underline_color=None,
*,
per_line=True,
reset=True,
truecolor=False,
link=None
):
"""
'fansi' is a pun, referring to ANSI and fancy
Uses ANSI formatting to give the terminal styled color outputs.
The 'per_line' option applies fansi to each line separately, which is useful for multi-line strings. It is enabled by default.
The 'truecolor' option enables 24-bit truecolor support if the terminal supports it. It is disabled by default.
The 'underline_color' option allows specifying a color for underlines independent of the text color. It is None by default.
The 'link' option creates a hyperlink to the provided URL. It is None by default.
Note on terminal hyperlink support:
- iTerm2, GNOME Terminal, Konsole: Directly clickable hyperlinks
- Wezterm: Requires Ctrl+click or similar modifier (configurable)
- Alacritty: Highlights links, but requires additional configuration for clicking
(Check .alacritty.yml documentation for mouse.url settings)
STYLES:
Alacritty Terminal.app Wezterm
- 'normal': No styling (default) | yes | yes | yes |
- 'bold': Bold text | yes | yes | yes |
- 'faded': Faint text | yes | yes | yes |
- 'italic': Italic text | yes | yes | yes |
- 'underlined': Underlined text | yes | yes | yes |
- 'blinking': Blinking text | no | yes | yes |
- 'invert': Swap foreground and background colors | yes | yes | yes |
- 'hide': Hidden text (useful for passwords) | yes | yes | yes |
- 'strike': Strikethrough text | yes | no | yes |
- 'super': Superscript text | no | no | yes |
- 'sub': Subscript text | no | no | yes |
COLORS:
The basic color options for text_color and background_color are:
- 'black': ANSI color 0
- 'red': ANSI color 1
- 'green': ANSI color 2
- 'yellow': ANSI color 3
- 'blue': ANSI color 4
- 'magenta': ANSI color 5
- 'cyan': ANSI color 6
- 'gray'/'grey': ANSI color 7
- 'white': ANSI color 8
Any other colors will be displayed in either 256-color form, or 24-bit color form if truecolor==True
If text_color or background_color is given as an integer, it will be interpreted as a 256-color code.
Any color compatible with rp.as_rgba_float_color will work too, and will be mapped to the nearest 256-color code.
If truecolor=True, assumes terminal has 24-bit color support. Otherwise, 256 color support will be assumed.
See the below example!
LINK:
- If link is provided, the text becomes a clickable hyperlink in terminals that support hyperlinks
- Example: fansi("Click me", "blue", "underlined", link="https://example.com")
EXAMPLES:
>>> #Shorthand: You can combine multiple styles together, foreground and background separated by 'on'!
... fansi_print("HELLO WORLD!",'bold yellow green on red')
... fansi_print("HELLO WORLD!",'bold yellow green on red red underlined blinking')
... fansi_print("HELLO WORLD!",'yellow on white')
... fansi_print("HELLO WORLD!",'bold green')
... fansi_print("HELLO WORLD!",'on blue cyan')
... fansi_print("HELLO WORLD!",'on blue cyan bold')
... fansi_print("HELLO WORLD!",'on bold')
... fansi_print("HELLO WORLD!",'bold')
>>> #Adding styles together via setting reset=False
... print(
... fansi("hello ", "red", "sub", reset=False)
... + fansi("underline ", "green", "underlined", reset=False)
... + fansi("blinking ", "blue", "blinking", reset=False)
... + fansi("italic ", "yellow", "italic", reset=False)
... + fansi("strike ", "cyan", "strike", reset=False)
... + fansi("bold ", "magenta", "bold", reset=False)
... + fansi("invert ", "orange", "invert", reset=False)
... + fansi(reset=True)
... + "After reset..."
... + fansi("All at once!", 'hot pink', 'underlined blinking italic strike bold super')
... )
>>> #Using hyperlinks
... print(fansi("Click here to visit example.com", "blue", "underlined", link="https://example.com"))
... print(fansi("Documentation", "green", link="https://docs.python.org"))
>>> #Display an image (display_image_in_terminal_color is faster - but this is to show how fansi works)
... for truecolor in [True,False]:
... image=load_image('https://images.unsplash.com/photo-1507146426996-ef05306b995a?fm=jpg&q=60&w=3000&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxzZWFyY2h8Mnx8cHVwcHl8ZW58MHx8MHx8fDA%3D')
... image=resize_image_to_fit(image,100,100)
... string=""
... for row in image:
... for pixel in row:
... string+=fansi('██',text_color=tuple(pixel/255),truecolor=truecolor)
... string+='\n'
... print('truecolor = ',truecolor)
... print(string)
>>> #Adding styles together in a single call
... for style_a, style_b in all_combinations("normal bold italic underlined invert strike".split(), 2):
... print(fansi("\tCombined Style: " + style_a + " " + style_b, style=style_a + " " + style_b))
>>> #An overview of what you can do with fansi
... fansi_print("Fansi Styles:", style="underlined")
... for style in 'normal bold faded italic underlined blinking invert hide strike'.split():
... print(fansi("\tStyle: "+style, style=style))
...
... fansi_print("Traditional Terminal Colors", style="underlined")
... for color in "black red green yellow blue magenta cyan gray".split():
... for style in [None, "bold"]:
... print(fansi("\t█████ " + color + " " + str(style), color, style))
...
... fansi_print("Ansi256 Terminal Colors", style="underlined")
... print("\tPerfect RGB matches")
... for color in ["green green", "blue blue", "red red", "yellow yellow", "cyan cyan", "magenta magenta", "gray gray", "black black", "white white"]:
... print(fansi("\t█████ " + str(color) + " ", color))
...
... print("\tSpecial color names")
... for color in ["green cyan", "blue cyan", "navy blue", "hot pink"]:
... print(fansi("\t█████ " + str(color) + " ", color))
...
... print("\tHex codes")
... for color in ["#0055AB"]:
... print(fansi("\t█████ " + str(color) + " ", color))
...
... print("\tGrayscale floats")
... for color in [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]:
... print(fansi("\t█████ " + str(color) + " ", color))
...
... print("\tAnsi256 integer color codes")
... for color in [12, 34, 56, 78, 90]:
... print(fansi("\t█████ " + str(color) + " ", color))
...
... print("\tRGB float tuples (truecolor=True)")
... for color in [random_rgb_float_color() for _ in range(10)]:
... print(fansi("\t█████ " + str(color) + " ", color, truecolor=True))
...
... fansi_print("Background Colors:", style="underlined")
... for color in ["green cyan", "blue cyan", "navy blue", "hot pink"]:
... print(fansi("\tXXXXX " + str(color) + " ", background_color=color))
... fansi_print("Hyperlink Examples:", style="underlined")
... print(fansi("\tPython Documentation", "blue", "underlined", link="https://docs.python.org"))
... print(fansi("\tGoogle Search", "green", link="https://google.com"))
... print(fansi("\tGitHub Repository", "magenta", "bold", link="https://github.com"))
... print("Note: In Wezterm, use Ctrl+click on links. In Alacritty, hyperlinks may need configuration.")
"""
if isinstance(text_color, str) and style is None and background_color is None:
text_color, style, background_color = _transform_fansi_arg(text_color)
# Ensure text_string is a string
text_string = str(text_string)
# Handle per_line option
if per_line and text_string:
lines = text_string.splitlines(keepends=True)
lines = [fansi(line, text_color, style, background_color, underline_color=underline_color, per_line=False, reset=reset, truecolor=truecolor, link=link) for line in lines]
return ''.join(lines)
# Check if ANSI formatting is disabled
if globals().get('_disable_fansi', False):
return text_string
# Check if terminal supports ANSI codes
if not terminal_supports_ansi():
return text_string
# Define color and style mappings
color_codes = {'black': 0, 'red': 1, 'green': 2, 'yellow': 3,
'blue': 4, 'magenta': 5, 'cyan': 6, 'gray': 7, 'grey': 7}
styles = _fansi_styles
#To see all styles supported for your terminal:
# >>> for style in range(100): print(fansi('Hello World! '+str(style),style=style))
# ... #Reference: https://en.wikipedia.org/wiki/ANSI_escape_code
legacy_styles = {'outlined':7} #Older versions of RP used these keys instead
styles.update(legacy_styles)
format_codes = []
# Convert style from string to code
if isinstance(style, str):
for style_lower in style.lower().split():
if style_lower in styles:
style = styles[style_lower]
#You can try a custom integer and see what happens in your terminal! Not all terminals respond the same to style codes - some implement more than others.
#For example, terminal.app doesn't support 'strike' but does support 'blinking', which Alacritty doesn't
format_codes.append(str(style))
else:
print("ERROR: fansi: Invalid style '{}'. Valid options are: {}".format(style, list(styles.keys())))
style = None
if isinstance(text_color, str):
text_color = text_color.lower()
# Handle text_color
if text_color is not None:
if isinstance(text_color,str) and text_color in color_codes:
color_code = color_codes[text_color] + 30
format_codes.append(str(color_code))
else:
try:
text_color=as_rgb_float_color(text_color)
if truecolor and not isinstance(text_color,int):
r, g, b = float_color_to_byte_color(text_color)
format_codes.append('38;2;%i;%i;%i'%(r,g,b))
else:
color_code = text_color if isinstance(text_color, int) else float_color_to_ansi256(text_color)
format_codes.append('38;5;'+str(color_code))
except Exception:
print("ERROR: fansi: Invalid text_color '%s'. Valid options are: %s, or any RGB float color compatible with rp.as_rgb_float_color" % (text_color, list(color_codes.keys())))
# Handle background_color
if background_color is not None:
if isinstance(background_color,str) and background_color in color_codes:
bg_color_code = color_codes[background_color] + 40
format_codes.append(str(bg_color_code))
else:
try:
background_color=as_rgb_float_color(background_color)
if truecolor and not isinstance(background_color,int):
r, g, b = float_color_to_byte_color(background_color)
format_codes.append('48;2;%i;%i;%i'%(r,g,b))
else:
color_code = background_color if isinstance(background_color, int) else float_color_to_ansi256(background_color)
format_codes.append('48;5;'+str(color_code))
except Exception:
print("ERROR: fansi: Invalid background_color '%s'. Valid options are: %s, or any RGB float color compatible with rp.as_rgb_float_color" % (background_color, list(color_codes.keys())))
# Apply hyperlink if provided
hyperlink_start = ""
hyperlink_end = ""
if link is not None:
# OSC 8 hyperlink format: ESC]8;params;URI\BEL text ESC]8;;\BEL
# Where ESC is \033, and BEL is \007
# The empty string before the URI is where additional parameters can go
# For maximum compatibility across terminals (Wezterm/Alacritty/iTerm/etc.)
hyperlink_params = "" # Could add params like "id=identifier" here if needed
hyperlink_start = "\033]8;" + hyperlink_params + ";" + str(link) + "\007"
hyperlink_end = "\033]8;;\007"
# Handle underline_color if provided
underline_color_code = ""
if underline_color is not None:
if isinstance(underline_color, str) and underline_color in color_codes:
color_code = color_codes[underline_color]
underline_color_code = '\033[58;5;{}m'.format(color_code)
else:
try:
underline_color = as_rgb_float_color(underline_color)
if truecolor and not isinstance(underline_color, int):
r, g, b = float_color_to_byte_color(underline_color)
underline_color_code = '\033[58;2;{};{};{}m'.format(r, g, b)
else:
color_code = underline_color if isinstance(underline_color, int) else float_color_to_ansi256(underline_color)
underline_color_code = '\033[58;5;{}m'.format(color_code)
except Exception:
print("ERROR: fansi: Invalid underline_color '{}'. Valid options are: {}, or any RGB float color compatible with rp.as_rgb_float_color".format(
underline_color, list(color_codes.keys())))
# Apply ANSI formatting
format_sequence = ';'.join(format_codes)
# Insert underline color after the format codes if needed
output = "\x1b[{}m{}{}{}{}".format(format_sequence, underline_color_code, hyperlink_start, text_string, hyperlink_end)
if reset:
output += '\x1b[0m'
return output
def _fansi_fix(string):
"""
Fixes nested ANSI formatting issues in a string by restoring outer formatting after inner resets.
When nested fansi calls are used, the inner reset code (\x1b[0m) cancels all formatting.
This function ensures that after an inner reset, the outer formatting is restored.
Example:
fansi("Hello "+fansi("World",'yellow')+"!",'green')
Original output: \x1b[32mHello \x1b[33mWorld\x1b[0m!\x1b[0m
Fixed output: \x1b[32mHello \x1b[33mWorld\x1b[0m\x1b[32m!\x1b[0m
Args:
string (str): The string with ANSI formatting to fix
Returns:
str: The fixed string with proper nested formatting
"""
import re
# Regular expression to find ANSI escape sequences
ansi_pattern = re.compile(r'\x1b\[((?:\d+;)*\d+)m')
# Process the string
result = []
last_pos = 0
format_stack = [] # Stack to track active format codes
for match in ansi_pattern.finditer(string):
# Add text before this sequence
if match.start() > last_pos:
result.append(string[last_pos:match.start()])
code = match.group(1)
if code == '0': # Reset code
# Add the reset
result.append('\x1b[0m')
# Pop from format stack
if format_stack:
format_stack.pop()
# Restore previous format if there's any
if format_stack:
result.append('\x1b[%sm'%format_stack[-1])
else:
# Add the format code
result.append(match.group(0))
# Push to format stack
format_stack.append(code)
# Update last position
last_pos = match.end()
# Add remaining text
if last_pos < len(string):
result.append(string[last_pos:])
return ''.join(result)
def _legacy_fansi(text_string,text_color=None,style=None,background_color=None,*,per_line=True):
"""
TODO: Fix bug: PROBLEM is that '\n' not in fansi('Hello\n','gray')
This function uses ANSI escape sequnces to make colored text in a terminal.
It can also make bolded, underlined, or highlighted text.
It uses ANSI escape sequences to do this...
...and so calling it 'fansi' is a pun on 'fancy' and 'ansi'
'fansi' is a pun, referring to ANSI and fancy
Uses ANSI formatting to give the terminal color outputs.
There are only 8 possible choices from each category, in [0‚7]⋂ ℤ
Adding 0,30,and 40 because of the ANSI codes. Subtracting 1 later on because the syntax
of this def says that '0' is the absence of any style etc, whereas 1-8 are active styles.
The 'per_line' option applies fansi to every line, which is useful when trying to draw tables and such
Some terminals cant handle ansi escape sequences and just print garbage, so if _disable_fansi is turned on this function just returns unformatted text.
(This is usually only the case with more obscure terminals, such as one I have for ssh'ing on my phone. But they do exist)
To undo the effect of this function on a string (aka to un-format a string) use rp.strip_ansi_escapes() (see its documentation for more details)
EXAMPLE: print(fansi('ERROR:','red','bold')+fansi(" ATE TOO MANY APPLES!!!",'blue','underlined','yellow'))
"""
#This function was replaced by fansi at 12:00AM Oct28 2024 - and now has better color functionality and cleaner code!
text_string=str(text_string)
if per_line:
lines=line_split(text_string)
lines=[fansi(line,text_color,style,background_color,per_line=False) for line in lines]
return line_join(lines)
if _disable_fansi:
return text_string#This is for terminals that dont support colors. I don't have a method wrapper for this yet, though.
if not terminal_supports_ansi():# We cannot guarentee we have ANSI support; we might get ugly crap like '\[0Hello World\[0' or something ugly like that!
return text_string# Don't format it; just leave it as-is
if text_string=='':# Without this, print(fansi("",'blue')+'Hello World'
return ''
if isinstance(text_color,str): # if text_color is a string, convert it into the correct integer and handle the associated exceptions
text_colors={'black':0,'red':1,'green':2,'yellow':3,'blue':4,'magenta':5,'cyan':6,'gray':7,'grey':7}
try:
text_color=text_colors[text_color.lower()]
except Exception:
print("ERROR: def fansi: input-error: text_color = '{0}' BUT '{0}' is not a valid key! Replacing text_color as None. Please choose from {1}".format(text_color,str(list(text_colors))))
text_color=None
if isinstance(style,str): # if background_color is a string, convert it into the correct integer
styles={'bold':1,'faded':2,'underlined':4,'blinking':5,'outlined':7}
try:
style=styles[style.lower()] # I don't know what the other integers do.
except Exception:
print("ERROR: def fansi: input-error: style = '{0}' BUT '{0}' is not a valid key! Replacing style as None. Please choose from {1}".format(style,str(list(styles))))
style=None
if isinstance(background_color,str): # if background_color is a string, convert it into the correct integer
background_colors={'black':0,'red':1,'green':2,'yellow':3,'blue':4,'magenta':5,'cyan':6,'gray':7,'grey':7}
try:
background_color=background_colors[background_color.lower()]
except Exception:
print("ERROR: def fansi: input-error: background_color = '{0}' BUT '{0}' is not a valid key! Replacing background_color as None. Please choose from {1}".format(background_color,str(list(background_colors))))
background_color=None
format=[]
if style is not None:
assert 0 <= style <= 7,"style == " + str(style) + " ∴ ¬﹙0 <= style <= 7﹚ ∴ AssertionError"
style+=0
format.append(str(style))
if text_color is not None:
assert 0 <= text_color <= 7,"text_color == " + str(text_color) + " ∴ ¬﹙0 <= text_color <= 7﹚ ∴ AssertionError"
text_color+=30
format.append(str(text_color))
if background_color is not None:
assert 0 <= background_color <= 7,"background_color == " + str(background_color) + " ∴ ¬﹙0 <= background_color <= 7﹚ ∴ AssertionError"
background_color+=40
format.append(str(background_color))
return "\x1b[%sm%s\x1b[0m" % (';'.join(format),str(text_string)) # returns a string with the appropriate formatting applied
# region fansi Examples
# print(fansi('ERROR:','red','bold')+fansi(" ATE TOO MANY APPLES!!!",'blue','underlined','yellow'))
# from random import randint
# print(seq([lambda old:old+fansi(chr(randint(0,30000)),randint(0,7),randint(0,7),randint(0,7))]*100,''))
# endregion
def fansi_print(
text_string: object,
text_color: object = None,
style: object = None,
background_color: object = None,
underline_color=None,
*,
link=None,
new_line=True,
reset=True,
truecolor=True
):
"""
This function prints colored text in a terminal.
It can also print bolded, underlined, or highlighted text.
It uses ANSI escape sequences to do this...
...and so calling it 'fansi' is a pun on 'fancy' and 'ansi'
Example: print(fansi('ERROR:','red','bold')+fansi(" ATE TOO MANY APPLES!!!",'blue','underlined','yellow'))
"""
print(
fansi(
text_string,
text_color=text_color,
style=style,
background_color=background_color,
reset=reset,
truecolor=truecolor,
underline_color=underline_color,
link=link,
),
end="\n" if new_line else "",
flush=True,
)
def fansi_printed(x, *args, **kwargs):
fansi_print(x, *args, **kwargs)
return x
# noinspection PyShadowingBuiltins
def print_fansi_reference_table() -> None:
"""
prints table of formatted text format options for fansi. For reference
"""
for style in range(8):
for fg in range(30,38):
s1=''
for bg in range(40,48):
format=';'.join([str(style),str(fg),str(bg)])
s1+='\x1b[%sm %s \x1b[0m' % (format,format)
print(s1)
if currently_running_unix():
print("ALSO PRINTING ALL 256 COLORS")
#From https://superuser.com/questions/285381/how-does-the-tmux-color-palette-work/285400
os.system('bash -c \'for i in {0..255}; do printf "\\x1b[38;5;${i}mcolor%-5i\\x1b[0m" $i ; if ! (( ($i + 1 ) % 8 )); then echo ; fi ; done\'')
def _old_fansi_syntax_highlighting(code: str,namespace=(),style_overrides={}):
"""
PLEASE NOTE THAT I DID NOT WRITE SOME OF THIS CODE!!! IT CAME FROM https://github.com/akheron/cpython/blob/master/Tools/scripts/highlight.py
Assumes code was written in python.
Method mainly intended for rinsp.
I put it in the r class for convenience.
Works when I paste methods in but doesn't seem to play nicely with rinsp. I don't know why yet.
See the highlight_sourse_in_ansi module for more stuff including HTML highlighting etc.
"""
default_ansi={
'comment':('\033[0;31m','\033[0m'),
'string':('\033[0;32m','\033[0m'),
'docstring':('\033[0;32m','\033[0m'),
'keyword':('\033[0;33m','\033[0m'),
'builtin':('\033[0;35m','\033[0m'),
'definition':('\033[0;33m','\033[0m'),
'defname':('\033[0;34m','\033[0m'),
'operator':('\033[0;33m','\033[0m'),
}
default_ansi.update(style_overrides)
try:
import keyword,tokenize,cgi,re,functools
try:
import builtins
except ImportError:
import builtins as builtins
def is_builtin(s):
'Return True if s is the name of a builtin'
return hasattr(builtins,s) or s in namespace
def combine_range(lines,start,end):
'Join content from a range of lines between start and end'
(srow,scol),(erow,ecol)=start,end
if srow == erow:
return lines[srow - 1][scol:ecol],end
rows=[lines[srow - 1][scol:]] + lines[srow: erow - 1] + [lines[erow - 1][:ecol]]
return ''.join(rows),end
def analyze_python(source):
'''Generate and classify chunks of Python for syntax highlighting.
Yields tuples in the form: (category, categorized_text).
'''
lines=source.splitlines(True)
lines.append('')
readline=functools.partial(next,iter(lines),'')
kind=tok_str=''
tok_type=tokenize.COMMENT
written=(1,0)
for tok in tokenize.generate_tokens(readline):
prev_tok_type,prev_tok_str=tok_type,tok_str
tok_type,tok_str,(srow,scol),(erow,ecol),logical_lineno=tok
kind=''
if tok_type == tokenize.COMMENT:
kind='comment'
elif tok_type == tokenize.OP and tok_str[:1] not in '{}[](),.:;@':
kind='operator'
elif tok_type == tokenize.STRING:
kind='string'
if prev_tok_type == tokenize.INDENT or scol == 0:
kind='docstring'
elif tok_type == tokenize.NAME:
if tok_str in ('def','class','import','from'):
kind='definition'
elif prev_tok_str in ('def','class'):
kind='defname'
elif keyword.iskeyword(tok_str):
kind='keyword'
elif is_builtin(tok_str) and prev_tok_str != '.':
kind='builtin'
if kind:
if written != (srow,scol):
text,written=combine_range(lines,written,(srow,scol))
yield '',text
text,written=tok_str,(erow,ecol)
yield kind,text
line_upto_token,written=combine_range(lines,written,(erow,ecol))
yield '',line_upto_token
def ansi_highlight(classified_text,colors=default_ansi):
'Add syntax highlighting to source code using ANSI escape sequences'
# http://en.wikipedia.org/wiki/ANSI_escape_code
result=[]
for kind,text in classified_text:
opener,closer=colors.get(kind,('',''))
result+=[opener,text,closer]
return ''.join(result)
return ansi_highlight(analyze_python(code))
except Exception:
return code # Failed to highlight code, presumably because of an import error.
def fansi_syntax_highlighting(code: str,
namespace=(),
style_overrides:dict={},
line_wrap_width:int=None,
show_line_numbers:bool=False,
lazy:bool=False,
):
""" Apply syntax highlighting to 'code', a given string of python code. Returns an ANSI-styled string for printing in a terminal. Provides extra arguments such as including line numbers, line wrapping stuff, custom styling via style_overrides, and lazy for processing super large amounts of code without having to wait for it to all finish.
TODO: Because of the way it was programmed, it now included an extraneous new empty line on the top of the output. Feel free to remove that later brutishly lol (just lob it off the final output)
If lazy==True, this function returns a generator of strings that should be printed sequentially without new lines
If line_wrap_width is an int, it will wrap the whole output to that width - this is suprisingly tricky to do because of the ansi escape codes
show_line_numbers, if true, will also display a line number gutter on the side
EXAMPLE USING LAZY:
#Lazy can make syntax highlighting of things like rp start instantly
code=get_source_code(r)
for chunk in fansi_syntax_highlighting(code,lazy=True,show_line_numbers=True,line_wrap_width=get_terminal_width()):
print(end=chunk)
print()
The result is that it has a shorter delay to start ; but it also might take longer in total
EXAMPLE:
print(fansi_syntax_highlighting(get_source_code(load_image),line_wrap_width=30,show_line_numbers=False))
PLEASE NOTE THAT I DID NOT WRITE SOME OF THIS CODE!!! IT CAME FROM https://github.com/akheron/cpython/blob/master/Tools/scripts/highlight.py
Assumes code was written in python.
Method mainly intended for rinsp.
I put it in the r class for convenience.
Works when I paste methods in but doesn't seem to play nicely with rinsp. I don't know why yet.
See the highlight_sourse_in_ansi module for more stuff including HTML highlighting etc.
"""
if not lazy and not show_line_numbers and not line_wrap_width:
return _old_fansi_syntax_highlighting(code,namespace,style_overrides) #This one is less glitchy. Use it when we can until the new one is fixed.
default_ansi={
'comment':('\033[0;31m','\033[0m'),
'string':('\033[0;32m','\033[0m'),
'docstring':('\033[0;32m','\033[0m'),
'keyword':('\033[0;33m','\033[0m'),
'builtin':('\033[0;35m','\033[0m'),
'definition':('\033[0;33m','\033[0m'),
'defname':('\033[0;34m','\033[0m'),
'operator':('\033[0;33m','\033[0m'),
}
default_ansi.update(style_overrides)
try:
import keyword,tokenize,cgi,re,functools
try:
import builtins
except ImportError:
import builtins as builtins
def is_builtin(s):
'Return True if s is the name of a builtin'
return hasattr(builtins,s) or s in namespace
def combine_range(lines,start,end):
'Join content from a range of lines between start and end'
(srow,scol),(erow,ecol)=start,end
if srow == erow:
return lines[srow - 1][scol:ecol],end
rows=[lines[srow - 1][scol:]] + lines[srow: erow - 1] + [lines[erow - 1][:ecol]]
return ''.join(rows),end
def analyze_python(source):
'''Generate and classify chunks of Python for syntax highlighting.
Yields tuples in the form: (category, categorized_text).
'''
lines=source.splitlines(True)
lines.append('')
readline=functools.partial(next,iter(lines),'')
kind=tok_str=''
tok_type=tokenize.COMMENT
written=(1,0)
for tok in tokenize.generate_tokens(readline):
prev_tok_type,prev_tok_str=tok_type,tok_str
tok_type,tok_str,(srow,scol),(erow,ecol),logical_lineno=tok
kind=''
if tok_type == tokenize.COMMENT:
kind='comment'
elif tok_type == tokenize.OP and tok_str[:1] not in '{}[](),.:;@':
kind='operator'
elif tok_type == tokenize.STRING:
kind='string'
if prev_tok_type == tokenize.INDENT or scol == 0:
kind='docstring'
elif tok_type == tokenize.NAME:
if tok_str in ('def','class','import','from'):
kind='definition'
elif prev_tok_str in ('def','class'):
kind='defname'
elif keyword.iskeyword(tok_str):
kind='keyword'
elif is_builtin(tok_str) and prev_tok_str != '.':
kind='builtin'
if kind:
if written != (srow,scol):
text,written=combine_range(lines,written,(srow,scol))
yield '',text
text,written=tok_str,(erow,ecol)
yield kind,text
line_upto_token,written=combine_range(lines,written,(erow,ecol))
yield '',line_upto_token
def ansi_highlight(classified_text,colors=default_ansi):
'Add syntax highlighting to source code using ANSI escape sequences'
# http://en.wikipedia.org/wiki/ANSI_escape_code
nonlocal line_wrap_width, show_line_numbers
if line_wrap_width is None:
line_wrap_width = 9999999
num_code_lines = code.count('\n')+1
num_digits = len(str(num_code_lines)) #Max number of digits in the line numbers
max_width = line_wrap_width
if show_line_numbers:
#Should always return strings of the same width if done correctly
#TODO: Make this customizable through the args
def line_number_prefix_generator( line_number):
return (('%%%ii│ ')%num_digits)%line_number
line_prefix_length = len(line_number_prefix_generator(num_code_lines))
if line_wrap_width >= line_prefix_length:
max_width = line_wrap_width-line_prefix_length
else:
#We have to not show line numbers, or else we'd be showing literally nothing but them!
show_line_numbers=False
def wrapped_line_tokens(tokens,max_width):
#Wrap the string, respecting token boundaries when possible
#Tokens is a list of [(kind,text), (kind,text), ... ] tuples
#Output is a generator of [(kind,text,line_number) ... ] tuples
#EXAMPLE TEST:
#
# >>> list(wrapped_line_tokens([(11,'Hello\nWorld!\n123\nab\nc'),(22,'d'),(33,'e'),(44,'f')],2))
# [
# (11 , 'He', 0),
# (None, '\n', 1),
# (11 , 'll', 1),
# (None, '\n', 2),
# (11 , 'o' , 2),
# (None, '\n', 3),
# (11 , 'Wo', 3),
# (None, '\n', 4),
# (11 , 'rl', 4),
# (None, '\n', 5),
# (11 , 'd!', 5),
# (None, '\n', 6),
# (11 , '12', 6),
# (None, '\n', 7),
# (11 , '3' , 7),
# (None, '\n', 8),
# (11 , 'ab', 8),
# (None, '\n', 9),
# (11 , 'c' , 9),
# (22 , 'd' , 9),
# (33 , '' , 9),
# (None, '\n', 10),
# (33 , 'e' , 10),
# (44 , 'f' , 10)
# ]
#
line_length=0
line_number=0
line_skip=0
for kind,text in tokens:
subtokens=split_including_delimiters(text,'\n')
subtokens=subtokens[::-1]
while subtokens:
assert max_width>=line_length
subtoken=subtokens.pop()
if subtoken=='\n':
if not line_skip:
line_number+=1
line_skip=max(0,line_skip-1)
line_length=0
#Probably can eliminate typehere....
yield None,subtoken,line_number
elif line_length+len(subtoken)>max_width:
index=max_width-line_length
token_right=subtoken[index:]
subtoken =subtoken[:index]
line_length=0
subtokens.append(token_right)
subtokens.append('\n')
yield kind,subtoken,line_number
line_skip+=1
else:
line_length+=len(subtoken)
yield kind,subtoken,line_number
digit_remover=str.maketrans('0123456789', ' ')
prev_line_number=None
from itertools import chain
for kind,text,line_number in chain([[None,'\n',0]],wrapped_line_tokens(classified_text,max_width=max_width)):
opener,closer=colors.get(kind,('',''))
if show_line_numbers and text.endswith('\n'):
prefix=line_number_prefix_generator(line_number+1)
prefix=fansi(prefix,'cyan','bold')#,'black')
if line_number==prev_line_number:
#https://stackoverflow.com/questions/19084443/replacing-digits-with-str-replace
prefix=prefix.translate(digit_remover)
text=text+prefix
yield from [opener,text,closer]
prev_line_number=line_number
output=(ansi_highlight(analyze_python(code)))
if lazy:
return output
else:
return ''.join(output)
except Exception:
raise
return code # Failed to highlight code, presumably because of an import error.
def fansi_highlight_path(path):
""" Syntax-highlights a path like "/path/to/thing/" - it colors the /'s differently from the rest. Returns a string with ansi escapes for printing in a terminal. """
color='cyan'
path=strip_ansi_escapes(path)
path=path.split('/')
return fansi('/','blue','bold').join(fansi(x,color) for x in path)
return fansi('/',color,'bold').join(fansi(x,color) for x in path)
_fansi_highlight_path = fansi_highlight_path
def fansi_pygments(
code,
language=None,
*,
style=None,
color_mode=None
):
"""
Highlight code using pygments and return a string with ANSI escape codes for colors.
If language is not provided, it will attempt to autodetect the language.
The style parameter allows specifying a color scheme for the highlighting.
The color_mode parameter specifies the color mode for the output (basic, 256, or truecolor).
"""
if fansi_is_disabled():
return code
pip_import("pygments")
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import Terminal256Formatter, TerminalTrueColorFormatter, TerminalFormatter
from pygments.lexers import guess_lexer
from pygments.styles import get_style_by_name, get_all_styles
from pygments.lexers import get_all_lexers
#Handle defaults
if currently_running_windows():
if color_mode is None: color_mode = 'basic'
if style is None:style='default'
else:
if color_mode is None:color_mode='256'
if style is None:style='monokai'
if language:
try:
lexer = get_lexer_by_name(language)
except:
available_languages = [lexer[0] for lexer in get_all_lexers()]
raise ValueError("Invalid language '{}' specified. Available languages: {}".format(language, ', '.join(available_languages)))
else:
lexer = guess_lexer(code)
try:
style = get_style_by_name(style)
except:
available_style = list(get_all_styles())
raise ValueError("Invalid color style '{}' specified. Available style: {}".format(style, ', '.join(available_style)))
if color_mode == 'basic':
formatter = TerminalFormatter()
elif color_mode == '256':
formatter = Terminal256Formatter(style=style)
elif color_mode == 'true':
formatter = TerminalTrueColorFormatter(style=style)
else:
raise ValueError("Invalid color mode '{}' specified. Available modes: basic, 256, true".format(color_mode))
highlighted_code = highlight(code, lexer, formatter)
return highlighted_code
def fansi_pygments_demo(code=None):
""" Displays all themes for fansi_pygments """
if code is None:
code=unindent("""
@decorator
def f(x, *y):
print("HELLO", 1+2.3 <= [])
""").strip()
pip_import('pygments')
from pygments.styles import get_all_styles
for style in get_all_styles():
print(style.center(string_width(code)+10,'·'))
print(fansi_pygments(code,'python',style=style))
# endregion
# region Copy/Paste: [string_to_clipboard,string_from_clipboard]
_local_clipboard_string=''#if we can't access a system OS clipboard, try and fake it with a local clipboard istead. Of course, you need to use the string_to_clipboard and clipboard_to_string functions to make this work, but that's ok
_local_clipboard_string_path=__file__+'.rp_local_clipboard'
def _get_local_clipboard_string():
#Try to get the string from a file (so we can share our fake clipboard across processes. THis is important over SSH into headless systems that don't support clipboards, but we still want to keep our clipboard between sessions).
#If we can't write or read from a file, just keep _local_clipboard_string as a local variable in this process as a last resort.
try:
return text_file_to_string(_local_clipboard_string_path)
except OSError:
return _local_clipboard_string
def _set_local_clipboard_string(string):
global _local_clipboard_string
_local_clipboard_string=string
string_to_text_file(_local_clipboard_string_path,string)
def string_to_clipboard(string):
"""
Copies a string to the clipboard so you can paste it later
First tries to copy the string to the system clipboard.
If that doesn't work, it falls back to writing your string to a local file called '.rp_local_clipboard', and uses that to copy/paste along with the string_from_clipboard function. This is useful over SSH where pyperclip fails on linux systems. Because it uses a file, it's synced across rp processes and is persistent even after we close and reopen rp, even while over ssh on a system whose clipboard we can't modify for some reason.
If that doesn't work, it falls back to reading/writing to a global variable called _local_clipboard_string. This string is lost if rp is closed.
I decided not to label this function 'copy' because 'copy' could refer to copying objects such as lists etc, like [1,2,3].copy()
"""
global _local_clipboard_string
_copy_text_over_terminal(string) #Experimental
_set_local_clipboard_string(string)
try:
try:
from rp.Pyperclip import paste,copy
assert not running_in_ssh() #This is a patch for Ryan Burgert's desktop computer, which doesn't like using the clipboard over ssh for some reason.
copy(string)
except Exception:
assert currently_running_mac()
os.system("echo '%_s' | pbcopy" % string)
except Exception:
return
fansi_print("string_to_clipboard: error: failed to copy a string to the clipboard",'red')
def _copy_text_over_terminal(string):
"""
Encodes a given string in base64 and sends it to the terminal to be copied to the clipboard via OSC 52 ANSI escape codes.
Does this via "OSC52" (you can google that)
Doesn't work in all terminals - for example TMUX blocks it with default settings, and it doesn't work in Jupyter's terminals.
However, it does work when TMUX is configured with "set -g set-clipboard on", and works in Alacritty
Args:
string (str): The string to be copied to the clipboard.
Raises:
TypeError: If the input is not a string.
Note:
This function relies on the terminal's ability to interpret OSC 52 escape sequences.
"""
if not isinstance(string, str):
raise TypeError("Input must be a string")
# Import necessary modules
import sys
import base64
# Base64 encode the string
encoded_string = base64.b64encode(string.encode()).decode()
# Create the OSC 52 ANSI escape sequence
escape_sequence = "\033]52;c;{}\a".format(encoded_string)
# Write the escape sequence to stdout directly
sys.stdout.write(escape_sequence)
sys.stdout.flush()
def string_from_clipboard():
"""
Pastes the string from the clipboard and returns that value
First tries to paste the string from the system clipboard.
If that doesn't work, it falls back to reading your string from a local file called '.rp_local_clipboard'
If that doesn't work, it falls back to writing to a global variable called _local_clipboard_string
"""
try:
from rp.Pyperclip import paste,copy
assert not running_in_ssh() #This is a patch for Ryan Burgert's desktop computer, which doesn't like using the clipboard over ssh for some reason.
return paste()
except Exception:
return _get_local_clipboard_string()
fansi_print("string_from_clipboard: error: failed to get a string from clipboard",'red')
def accumulate_clipboard_text(*, wipe=False, unique=False):
"""
Automatically accumulates and combines text copied to the clipboard.
This function continuously monitors your clipboard for any new text that you copy.
Run this function, then start copying text using your system clipboard. The copied text will get longer and longer.
Whenever you copy a piece of text, it will be automatically added to a running collection
of text items. All the accumulated text items will be combined together and copied back
to your clipboard, allowing you to easily collect and combine multiple pieces of text.
Parameters:
wipe (bool, optional): If True, will clear your clipboard before running.
unique (bool, optional): If set to True, duplicate items will be ignored.
Defaults to False.
Returns:
list: A list containing all the accumulated text items.
Notes:
- The function will continue running until you manually interrupt it using Ctrl+C (KeyboardInterrupt).
- The accumulated text items will be combined using a newline character as the separator.
- Each time a new text item is added, the updated collection will be copied back to your clipboard.
- Upon exiting, the function will print a message indicating that it is terminating.
Example:
>>> accumulation = run_clipboard_text_accumulator(unique=True)
# Start the clipboard text accumulator
# Copy various pieces of text to accumulate them
# Press Ctrl+C to stop the accumulator
# The accumulated text will be available on your clipboard, with duplicates removed
# The function will return a list containing all the accumulated text items
"""
if wipe:
string_to_clipboard('')
accumulation = []
separator = "\n"
def get_accumulation_string():
items = accumulation
return separator.join(items)
colors = ["yellow", "green"]
try:
while True:
cum = get_accumulation_string()
clip = string_from_clipboard()
if clip != cum:
if not unique or clip not in accumulation:
accumulation.append(clip)
cum = get_accumulation_string()
string_to_clipboard(cum)
# sleep(.1) #We might need this if it breaks in the future if system lags...so far so good though.
fansi_print(clip, colors[len(accumulation) % len(colors)])
except KeyboardInterrupt:
fansi_print("rp.run_clipboard_text_accumulator: Exiting", "cyan", "bold")
return accumulation
# endregion
# region pseudo_terminal
# EXAMPLE CODE TO USE pseudo_terminal:
# The next 3 lines are used to import pseudo_terminal
# region pseudo_terminal definition
# #from r import make_pseudo_terminal
# def pseudo_terminal():pass # Easiest way to let PyCharm know that this is a valid def. The next line redefines it.
# exec(make_pseudo_terminal)
# endregion
# NOTE: In my PyCharm Live Templates, I made a shortcut to create the above three lines.
# make pseudo terminal ⟵ The template keyword.
# print("Result = "+str(pseudo_terminal()))
# endregion
# region 2d Methods:[width,height,_rgb_to_grayscale,gauss_blur,flat_circle_kernel,med_filter,med_filter,med_filter,grid2d,grid2d_map,resize_image]
# noinspection PyShadowingNames
#The following functions are very, very deprecated. Please don't use them.
# def width(image) -> int:
# return len(image)
# def height(image) -> int:
# return len(image[0])
def _rgb_to_grayscale(image): # A demonstrative implementation of this pair
"""
Takes an image with multiple color channels
Takes a 3d tensor as an input (X,Y,RGB)
Outputs a matrix (X,Y ⋀ Grayscale value)
Calculated by taking the average of the three channels.
"""
try:
image=as_numpy_array(image)
assert image.ndim==3 #HWC
return image.mean(2).astype(image.dtype)
# return np.average(image,2).astype(image.dtype) # Very fast if possible
except Exception:
# The old way, when I used nested lists to represent images
# (Only doing this if the numpy way fails so my older scripts don't break)
# 'z' denotes the grayscale channel.
# z ﹦﹙r﹢g﹢b﹚÷3
x,y,r,g,b=image_to_xyrgb_lists(image)
# z=[*map(lambda a,b,c:(a+b+c)/3.,r,g,b)] ⟵ Got overflow errors!
z=list(range(assert_equality(len(x),len(y),len(r),len(g),len(b))))
for i in z:
z[i]=(float(r[i]) / 256 + float(g[i]) / 256 + float(b[i]) / 256) / 3
return xyrgb_lists_to_image(x,y,z.copy(),z.copy(),z.copy())
def grayscale_to_rgb(matrix,number_of_channels=3):
return np.stack((matrix,) * number_of_channels,-1)
def gauss_blur(image,σ,single_channel: bool = False,mode: str = 'reflect',shutup: bool = False):
# NOTE: order refers to the derivative of the gauss curve; for edge detection etc.
if σ == 0:
return image
mode=mode.lower()
assert mode in {'constant','nearest','reflect','mirror','wrap'},"r.med_filter: Invalid mode for blurring edge-areas of image. mode=" + str(mode)
# single_channel: IMPORTANT: This determines the difference between
# [1,2,3,4,5]
# and
# [[1],[2],[3],[4],[5]] (when False)
# Works in RGB, RGBA, or any other number of color channels!
from scipy.ndimage.filters import gaussian_filter
gb=lambda x:gaussian_filter(x,sigma=σ,mode=mode)
tp=np.transpose
# noinspection PyTypeChecker
sh=np.shape(image)
assert isinstance(sh,tuple)
if not single_channel and not sh[-1] <= 4 and not shutup: # Generally if you have more than 4 channels you are using a single_channel image.
fansi_print("r.gauss_blur: Warning: Last channel has length of " + str(sh[-1]) + "; you results might be weird. Consider setting optional parameter 'single_channel' to True?",'red')
s=list(range(len(sh)))
if len(s) == 1 or single_channel: # We don't have channels of colors, we only have 1 color channel (AKA we extracted the red of an image etc)
return gb(image)
# ⎛ ⎞
# ⎜⎛ ⎞ ⎟
# ⎜⎜ ⎛ ⎞⎟ ⎟
# ⎜⎜ ⎜ ⎛ ⎞ ⎛ ⎞⎟⎟ ⎛ ⎞ ⎛ ⎞⎟
return tp([gb(x) for x in tp(image,[s[-1]] + list(s[:-1]))],list(s[1:]) + [s[0]]) # Blur each channel individually.
# ⎜⎜ ⎜ ⎝ ⎠ ⎝ ⎠⎟⎟ ⎝ ⎠ ⎝ ⎠⎟
# ⎜⎜ ⎝ ⎠⎟ ⎟
# ⎜⎝ ⎠ ⎟
# ⎝ ⎠
# NOTE:
# >>> _s=(0,1,2)
# >>> [_s[-1]] + list(_s[:-1])
# ans=[2,0,1]
# >>> list(_s[1:]) + [_s[0]]
# ans=[1,2,0]
# region Works with RGB but fails on single channels
# cv2=pip_import('cv2')
# # noinspection PyUnresolvedReferences
# return cv2.GaussianBlur(image,(radius,radius),0)
# endregion
# def med_filter(image,σ):
# # Works in RGB, RGBA, or any other number of color channels!
# from scipy.ndimage.filters import gaussian_filter as gb
# tp=np.transpose
# return tp([gb(x,σ) for x in tp(image,[2,0,1])],[1,2,0])# Blur each channel individually.
# # region Works with RGB but fails on single channels
# # cv2=pip_import('cv2')
# # # noinspection PyUnresolvedReferences
# # return cv2.GaussianBlur(image,(radius,radius),0)
# # endregion
_flat_circle_kernel_cache={}
def flat_circle_kernel(diameter):
""" Returns a binary grayscale image (aka boolean matrix) with a circle in the middle with the given diameter """
if diameter not in _flat_circle_kernel_cache:
d=int(diameter)
v=np.linspace(-1,1,d) ** 2
m=np.zeros([d,d])
m+=v
m=np.transpose(m)
m+=v
m=m<=1
_flat_circle_kernel_cache[diameter]=m
return _flat_circle_kernel_cache[diameter]
_gaussian_circle_kernel_cache={}
def gaussian_kernel(size=21, sigma=3,dim=2):
"""Returns a normalized 2D Gaussian kernel.
Please note that increasing 'size' does NOT increase 'sigma': you must manually increase sigma proportionally if you want a bigger blur!
Parameters
----------
size : float, the kernel size (will be square)
sigma : float, the sigma Gaussian parameter
Returns
-------
out : array, shape = (size, size)
an array with the centered gaussian kernel
"""
args=size,sigma,dim
if args not in _gaussian_circle_kernel_cache:
x = np.linspace(- (size // 2), size // 2,num=size)
x /= np.sqrt(2)*sigma
x2 = x**2
assert dim==2 or dim==1,'Only 1d and 2d gaussians are supported right now'
kernel = np.exp(- x2[:, None] - x2[None, :]) if dim==2 else np.exp(-x2)
_gaussian_circle_kernel_cache[args]=kernel / kernel.sum()
return _gaussian_circle_kernel_cache[args]
def get_max_image_dimensions(*images):
""" Given a set of images, return the maximum height and width seen across all of them """
images = detuple(images)
if is_numpy_array(images) or is_torch_tensor(images): return get_image_dimensions(images[0]) #Efficiency shortcut: if given video is a tensor, all heights and widths will be the same
heights=[get_image_height(x) for x in images]
widths =[get_image_width (x) for x in images]
return max(heights),max(widths)
def get_max_video_dimensions(*images):
""" Given a set of videos, return the maximum height and width seen across all of them """
images = detuple(images)
if is_numpy_array(images) or is_torch_tensor(images): return get_image_dimensions(images[0,0]) #Efficiency shortcut: if given video is a tensor, all heights and widths will be the same
heights=[get_video_height(x) for x in images]
widths =[get_video_width (x) for x in images]
return max(heights),max(widths)
def get_min_video_dimensions(*images):
""" Given a set of videos, return the minimum height and width seen across all of them """
images = detuple(images)
if is_numpy_array(images) or is_torch_tensor(images): return get_image_dimensions(images[0,0]) #Efficiency shortcut: if given video is a tensor, all heights and widths will be the same
heights=[get_video_height(x) for x in images]
widths =[get_video_width (x) for x in images]
return min(heights),min(widths)
def get_min_image_dimensions(*images):
""" Given a set of images, return the minimum height and width seen across all of them """
images = detuple(images)
if is_numpy_array(images) or is_torch_tensor(images): return get_image_dimensions(images[0]) #Efficiency shortcut: if given video is a tensor, all heights and widths will be the same
heights=[get_image_height(x) for x in images]
widths =[get_image_width (x) for x in images]
return min(heights),min(widths)
get_video_dimensions = get_max_image_dimensions
def uniform_float_color_image(height:int,width:int,color:tuple=(0,0,0,0)):
"""
Returns an image with the given height and width, where all pixels are the given color
If the given color is a number, it returns a grayscale image
Otherwise, the given color must be either an RGB or RGBA float color (a tuple with 3 or 4 floats between 0 and 1)
EXAMPLE:
for _ in range(16):
height=randint(10,30)
width=randint(10,30)
color=random_rgb_float_color() #Color is like (.1235, .5742, .8652)
tile=uniform_float_color_image(height,width,color)
random_color_tiles.append(tile)
image=tiled_images(random_color_tiles,border_thickness=0)
display_image(image) #The result will look like https://i.imgur.com/COlmGRT.png
"""
color = as_rgba_float_color(color)
assert height>=0 and width>=0
assert is_number(color) or is_color(color) and len(color) in {3,4}, 'Color should be a number, an RGB float color, or an RGBA float color'
if is_number(color):
output = np.ones((height,width),dtype=float)*color
assert is_grayscale_image(output)
return output
else:
output = np.ones((height,width,len(color)),dtype=float)*as_numpy_array([[[*color]]])
assert len(color)==3 and is_rgb_image(output) or len(color)==4 and is_rgba_image(output)
return output
def blend_images(bot, top, alpha=1, mode="normal"):
"""
Blends two images together using various blending modes.
Args:
bot (Union[numpy.ndarray, float, Tuple[float, float, float], Tuple[float, float, float, float]]):
The bottom image to blend. Can be an image, a float (treated as a grayscale value),
or a color (RGB or RGBA tuple with values between 0 and 1).
top (Union[numpy.ndarray, float, Tuple[float, float, float], Tuple[float, float, float, float]]):
The top image to blend. Can be an image, a float (treated as a grayscale value),
or a color (RGB or RGBA tuple with values between 0 and 1).
alpha (Union[numpy.ndarray, float], optional):
The alpha mask or value to use for blending.
If an image, it will be converted to grayscale. Where alpha is closer to 1, top will be
more opaque. Where alpha is closer to 0, top will be more transparent and bot will show more.
Defaults to 1.
mode (str, optional): The blend mode to use. Can be one of:
- "normal" : Blends images like in Photoshop
- "add" : Adds pixel values of the two images
- "multiply" : Multiplies pixel values of the two images
- "subtract" : Subtracts pixel values of top from bot
- "min" : Takes the minimum pixel value from top and bot
- "max" : Takes the maximum pixel value from top and bot
- "contrast" : Multiplies the bot image by top, centered at .5.
Here, top can be any floating point such as > 1.0 or even -1.0 to invert the image.
Some modes also support "clip" at the end to force pixel outputs to be between 0 and 1.
- "add clip"
- "multiply clip"
- "subtract clip"
- "contrast clip"
These modes are inspired by photoshop.
NOTE:
The specific implementation of alpha with respect to some of them is subject to change (except 'normal', that's set in stone).
Don't rely on any blending except 'normal' mode to have a specific calculation with respect to alpha values!
Defaults to "normal".
Returns:
numpy.ndarray: The blended image.
Will always return a float rgba image as defined by rp.is_float_image and rp.is_rgba_image
Examples:
Example 1: Blending multiple images
>>> dice ='https://bellard.org/bpg/3.png'
>>> penguin ='https://www.gstatic.com/webp/gallery3/2_webp_a.png'
>>> mountains='https://cdn.britannica.com/67/19367-050-885866B4/Valley-Taurus-Mountains-Turkey.jpg'
>>> checkerboard='https://static8.depositphotos.com/1176848/894/i/450/depositphotos_8945283-stock-photo-checkerboard-chess-background.jpg'
>>> dice =load_image(dice ) #Has alpha channel
>>> penguin =load_image(penguin ) #Has alpha channel
>>> mountains=load_image(mountains) #Has no alpha channel
>>> checkerboard=load_image(checkerboard) #Has no alpha channel
>>> composite=blend_images(mountains,penguin,.5) #Penguin is slightly transparent
>>> composite=blend_images(composite,dice,alpha=checkerboard) #Mix the dice on with a checkerboard mask
>>> display_image(composite) #Result should look like https://i.imgur.com/lF8Sxuc.jpeg
Example 2: Blending with colors and alpha
>>> dice = 'https://bellard.org/bpg/3.png'
>>> dice = load_image(dice) #Has alpha channel
>>> display_image(blend_images((0,1,0),dice)) #Should look like https://i.imgur.com/iu6Z8bk.png
>>> display_image(blend_images((0,1,0),(1,0,1),alpha=dice)) #Should look like https://i.imgur.com/gxaauuD.png
>>> display_image(blend_images(1,1/2,alpha=dice)) #Should look like https://i.imgur.com/f0sKWY5.png
Example 3: Numeric blending examples
>>> blend_images(0,.5,.5)
[[[0.25 0.25 0.25 1. ]]]
>>> blend_images(0,(0,1,0),.5)
[[[0. 0.5 0. 1. ]]]
>>> blend_images(1,(0,1,0),.5)
[[[0.5 1. 0.5 1. ]]]
>>> blend_images(1,(0,1,0,.5),.5)
[[[0.75 1. 0.75 1. ]]]
Example 4: Complex blending with text and blur
>>> dog=load_image('https://i.insider.com/5484d9d1eab8ea3017b17e29?width=600&format=jpeg.jpg')
>>> nebula=load_image('https://spaceplace.nasa.gov/nebula/en/nebula1.en.jpg')
>>> nebula,dog=crop_images_to_min_size(nebula,dog)
>>> text=cv_text_to_image("OUTER\\nSPACE\\nDOGGO!!",scale=4,thickness=20)
>>> composite=blend_images(dog,nebula,alpha=text)
>>> display_image(composite) #Should look like https://i.imgur.com/wEc1t8e.png
>>> composite=blend_images(dog,nebula,alpha=cv_gauss_blur(text,sigma=15)) #Should look like https://i.imgur.com/YtPtR1p.png
Example 5: Different blending modes
>>> display_image(blend_images(mountains,penguin,mode='add')) #Add mode example
>>> display_image(blend_images(mountains,penguin,mode='multiply')) #Multiply mode example
Example 5: Live webcam demo with multiple blend modes
>>> lena_image = load_image(
>>> "https://upload.wikimedia.org/wikipedia/en/7/7d/Lenna_%28test_image%29.png",
>>> use_cache=True,
>>> )
>>> while True:
>>> cam_image = load_image_from_webcam()
>>> display_image(
>>> blend_images(
>>> blend_images(
>>> cam_image,
>>> rotate_image(lena_image, toc() * 10),
>>> mode="multiply",
>>> ),
>>> lena_image,
>>> mode="add",
>>> ),
>>> )
Note:
Most of the code here is to handle the different types of inputs (top and bot can be floats, images or colors etc)
If the alpha, top or bot image dimensions don't match - its ok! It will choose the size of the larger one and align their top left corners.
"""
#Input validation
assert is_image(top) or is_color(top) and len(top) in {3,4} or is_number(top) or isinstance(top,str)
assert is_image(bot) or is_color(bot) and len(bot) in {3,4} or is_number(bot) or isinstance(bot,str)
assert is_image(alpha) or is_number(alpha)
if isinstance(top, str):top=as_rgba_float_color(top)
if isinstance(bot, str):bot=as_rgba_float_color(bot)
blend_modes = {
"normal" ,
"min" ,
"max" ,
"add" , "add clip" ,
"multiply", "multiply clip",
"subtract", "subtract clip",
"contrast", "contrast clip",
}
assert mode in blend_modes, 'Please choose a blend mode from the following options: '+str(blend_modes)
#Determine the height and width of the output
input_images = [x for x in (bot,top,alpha) if is_image(x)]
if input_images:
#If we have at least one image as an input, the output size is the max of all of them
height,width = get_max_image_dimensions(input_images)
else:
#Otherwise, the output image will have a height and width of 1x1
height,width = 1, 1
#Possibly use color names or hex codes
if isinstance(top, str): top = as_rgba_float_color(top)
if isinstance(bot, str): top = as_rgba_float_color(bot)
#If top or bot are numbers, turn them into grayscale RGB float colors
if is_number(top):top=float(top);top=(top,top,top);assert is_float_color(top)
if is_number(bot):bot=float(bot);bot=(bot,bot,bot);assert is_float_color(bot)
#If top or bot are colors, turn them into solid-colored images
if is_color (top ):top =uniform_float_color_image(height, width, top )
if is_color (bot ):bot =uniform_float_color_image(height, width, bot )
if is_number(alpha):alpha=uniform_float_color_image(height, width, alpha)
#Whatever top, bot and alpha started as, by this point they should all be images
assert is_image(top )
assert is_image(bot )
assert is_image(alpha)
#Make sure all images are now the same size
bot,top,alpha=crop_images_to_max_size(bot,top,alpha)
top =as_rgba_image (as_float_image(top ,copy=False),copy=False)
bot =as_rgba_image (as_float_image(bot ,copy=False),copy=False)
alpha=as_grayscale_image(as_float_image(alpha,copy=False),copy=False)
top_alpha=top[:,:,3] #The alpha channel of the top image
bot_alpha=bot[:,:,3] #The alpha channel of the bot image
alpha*=top_alpha #Take the top image's alpha channel into consideration
output_alpha=1-((1-alpha)*(1-bot_alpha)) #The alpha channel of the output image. The output image is always more opaque than the input
alpha=alpha[:,:,None]
if "normal" in mode.split(): output = bot * (1 - alpha) + top * alpha
elif "add" in mode.split(): output = bot + top * alpha
elif "multiply" in mode.split(): output = bot * (1 - alpha + top * alpha)
elif "subtract" in mode.split(): output = bot - top * alpha
elif "min" in mode.split(): output = np.minimum(bot, top)
elif "max" in mode.split(): output = np.maximum(bot, top)
elif "contrast" in mode.split(): output = bot * (1 - alpha) + ((bot - 0.5) * top + 0.5) * alpha
if 'clip' in mode.split():
#If we specify we want to clip the output, it will be constrained between 0 and 1. Useful for some blend modes such as "add clip" etc
output = np.clip(output, 0, 1)
output[:,:,3]=output_alpha
return output
# def blend_videos(bot, top, alpha=1, mode="normal", *, show_progress=False):
# is_numpy
# bot, top = trim_videos_to_max_length(bot, top)
# is_numpy = is_numpy_array(bot) and is_numpy_array(top) and get_video_dimensions(bot)==get_video_dimensions(top)
# return
def overlay_images(*images,mode='normal'):
"""
Blends all the given images on top of one another; the last one being on top
It takes into consideration any alpha channels, if the images are RGBA
"""
if len(images)==1:
images=images[0]
if is_image(images):
return images.copy()
if is_image(images[0]):
#Bottom is an image
output=images[0]+0
else:
#It's a float or color tuple
output=images[0]
for image in images[1:]:
output=blend_images(output,image,mode=mode)
return output
def get_checkerboard_image(height=64,
width=64,
*,
tile_size=8,
first_color=(1.0, 1.0, 1.0, 1.0),
second_color=(0.0, 0.0, 0.0, 1.0)
):
"""
Generate a checkerboard image as a numpy array in HWC form.
Default parameters look like an actual game checkerboard.
Parameters:
- height: int
The height of the output image.
- width: int
The width of the output image.
- tile_size: int or tuple of int (tile_height, tile_width)
The size of each checkerboard tile. If an int is given, the tiles are square.
- first_color: tuple of float (r, g, b, a)
The color of the top left tile (and every other even tile)
- second_color: tuple of float (r, g, b, a)
The color of the second tile (and every other odd tile)
Returns:
- img: ndarray
A numpy array of shape (height, width, 4), representing the checkerboard image.
Each pixel is a 4-tuple of float values representing an RGBA color.
Example:
>>> img = get_checkerboard_image(100, 200, (20, 30), (1, 1, 1, 1), (0, 0, 0, 1))
>>> img.shape
(100, 200, 4)
Example:
# Alpha image with checkerboard background
rgba_image=load_image('https://bellard.org/bpg/2.png')
height,width=get_image_dimensions(rgba_image)
background=get_checkerboard_image(height,width, second_color=.75) #You can use floats as colors
composite=blend_images(background,rgba_image)
display_image(composite)
Example:
for h in range(1,32):
for w in range(1,32):
display_image(get_checkerboard_image(256,256,(h,w)))
Written with GPT4's help.
"""
import numpy as np
import math
# Handle both int and tuple tile_size
tile_height, tile_width = (
(tile_size, tile_size) if isinstance(tile_size, int) else tile_size
)
# Create the base tiles
second_tile = rp.as_rgba_image(uniform_float_color_image(tile_height, tile_width, second_color))
first_tile = rp.as_rgba_image(uniform_float_color_image(tile_height, tile_width, first_color ))
# Create the base tile
base_tile_row1 = np.hstack((first_tile, second_tile))
base_tile_row2 = np.hstack((second_tile, first_tile))
base_tile = np.vstack((base_tile_row1, base_tile_row2))
# Calculate the number of repetitions needed
reps_y = math.ceil(height / (2 * tile_height))
reps_x = math.ceil(width / (2 * tile_width))
# Repeat the base tile to cover the image size
img = np.tile(base_tile, (reps_y, reps_x, 1))
# Crop to the desired image size
img = img[:height, :width, :]
return img
def with_drop_shadow(
image,
*,
x=0,
y=0,
color=(0, 0, 0, 1),
blur=10,
opacity=1
):
"""
Applies a drop shadow to an image
**DEFAULT ARGUMENT VALUES ARE SUBJECT TO CHANGE**
"""
image=as_numpy_image(image,copy=False)
image=as_float_image(image,copy=False)
alpha=get_image_alpha(image)
shadow_alpha=shift_image(alpha,x,y,allow_growth=False)
shadow_alpha=cv_gauss_blur(shadow_alpha,sigma=blur)
height,width=get_image_dimensions(image)
shadow=with_alpha_channel(uniform_float_color_image(height,width,color),shadow_alpha*opacity,copy=False)
return blend_images(shadow,image)
def with_drop_shadows(images,**kwargs):
return [with_drop_shadow(image,**kwargs) for image in images]
def with_corner_radius(image, radius, *, antialias=True, background=None):
"""
Applies an alpha mask to round off the corners of an image
Radius is, of course, measured in pixels
EXAMPLE:
image = load_image(
"https://upload.wikimedia.org/wikipedia/en/7/7d/Lenna_%28test_image%29.png"
)
for radius in resize_list(range(max(get_image_dimensions(image)) // 2), 30):
display_alpha_image(
with_drop_shadow(
crop_image(
with_corner_radius(image, radius),
get_image_height(image) + 200,
get_image_width(image) + 200,
origin="center",
),
opacity=0.8,
blur=100,
x=50,
y=50,
)
)
"""
from PIL import Image, ImageDraw
radius = round(radius)
assert radius>=0
mask_size = tuple(get_image_dimensions(image)[::-1]) # PIL uses (width, height)
if antialias:
antialias_upsampling_factor = 2
radius *= antialias_upsampling_factor
original_mask_size = mask_size
mask_size = tuple(x * antialias_upsampling_factor for x in mask_size)
mask = Image.new("L", mask_size, 0)
draw = ImageDraw.Draw(mask)
draw.rounded_rectangle([(0, 0), mask_size], radius, fill=255)
if antialias:
mask = mask.resize(original_mask_size)
alpha = as_float_image(get_image_alpha(image),copy=False) * as_float_image(mask,copy=False)
return with_image_alpha(image, alpha)
def with_image_glow(image, *, blur=None, strength=None):
"""
Adds a bloom effect to an image with a given blur and strength.
The default values are subject to change - they're purely aesthetic!
EXAMPLE:
>>> for i in range(1000):
... display_image(
... with_image_glow(
... resize_image_to_fit(pil_text_to_image(
... "Hello World\n0123456789\n" + str(i),
... font="https://github.com/ctrlcctrlv/lcd-font/raw/master/otf/LCD14.otf",
... color=(.5,.9,.1),
... size=256,
... align='center',
... ),height=512sp),
... blur=50,
... strength=1.5,
... )
... )
EXAMPLE:
>>> url = "https://www.shutterstock.com/shutterstock/videos/1077886106/preview/stock-footage-jun-hong-kong-china-asia-drone-hyperlapse-of-hong-kong-international-financial-centre.webm"
... video = load_video(url)
... frames = []
... for frame in eta(video):
... frame = with_image_glow(frame)
... frames.append(frame)
... display_video(
... vertically_concatenated_videos(
... labeled_videos([video, frames], ["Input Video", "with_image_glow"])
... )
... )
"""
if blur is None:
blur = 10
if strength is None:
strength = 1
image = as_float_image(image)
image = as_rgba_image(image)
blurred = cv_gauss_blur(image, blur, alpha_weighted=True)
glow = as_rgb_image(blurred) * as_rgb_image(get_image_alpha(blurred))
rgb = as_rgb_image(image) + glow * strength
return with_image_rgb(image, rgb)
def with_image_glows(*images, blur=None, strength=None):
"""Plural of with_image_glow"""
images=detuple(images)
return [with_image_glow(image,blur=blur,strength=strength) for image in images]
def with_corner_radii(*images, radius, antialias=True):
images = detuple(images)
return [with_corner_radius(image, radius, antialias=antialias) for image in images]
def get_alpha_outline(image,*,inner_radius=0,outer_radius=0,include_edges=True,allow_growth=False):
"""
You should set inner_radius>0 or outer_radius>0
include_edges (bool): Has an effect when inner_radius>0 - if True, it assumes there's 0 alpha outside the image - and as a result can create an outline aronud the edges of the image
Returns an alpha mask
"""
if include_edges:
image=bordered_image_solid_color(image,color=(0,0,0,0))
if allow_growth and outer_radius:
image=bordered_image_solid_color(image,color=color,thickness=outer_radius)
mask=get_alpha_channel(image)
mask=as_binary_image(mask)
dilated=cv_dilate(mask,diameter=outer_radius,circular=True)
eroded=cv_erode(mask,diameter=inner_radius,circular=True)
outline=dilated&~eroded
if include_edges:
outline=outline[1:-1,1:-1]
outline = as_grayscale_image(outline)
return outline
def with_alpha_outline(image,*,inner_radius=0,outer_radius=0,include_edges=True,color=(1,1,1,1),allow_growth=False):
if allow_growth and outer_radius:
image=bordered_image_solid_color(image,color=(0,0,0,0),thickness=outer_radius)
outline = get_alpha_outline(
image,
inner_radius=inner_radius,
outer_radius=outer_radius,
include_edges=include_edges,
allow_growth=False
)
color=as_rgba_float_color(color)
return blend_images(image,color,outline)
def with_alpha_outlines(*images,**kwargs):
images=detuple(images)
return [with_alpha_outline(image,**kwargs) for image in images]
def get_progress_bar_image(
progress,
*,
height=10,
width=100,
bar_color="white",
background_color="black",
reverse=False
):
"""
Generate a rectangular RGBA progress bar image.
Args:
progress (float): Progress value between 0 and 1.
height (int): Height of the progress bar image in pixels. Default is 10.
width (int): Width of the progress bar image in pixels. Default is 100.
bar_color (str, tuple[float], float): Color of the progress bar.
background_color (str, tuple[float], float): Color of the background.
Colors can be given as a string, as a float, or a tuple of RGB or RGBA floats (see as_rgba_float_color)
Returns:
numpy.ndarray: RGBA image of the progress bar.
EXAMPLE:
>>> N=1000
... for i in range(N):
... display_image(get_progress_bar_image(i/N,width=10))
"""
bar_color = as_rgba_float_color(bar_color)
background_color = as_rgba_float_color(background_color)
progress = clamp(progress, 0, 1)
background = uniform_float_color_image(height, width, background_color)
alpha = np.zeros((height, width))
bar_width = progress * width
bar_floor = int(bar_width)
bar_remainder = bar_width - bar_floor
alpha[:, : int(bar_floor)] = 1
if bar_remainder and bar_floor < width:
# A bit of antialising
alpha[:, bar_floor] = bar_remainder
bar_image = blend_images(background_color, bar_color, alpha)
if reverse:
bar_image = horizontally_flipped_image(bar_image)
return bar_image
def image_with_progress_bar(
image,
progress,
*,
size=10,
bar_color="white",
background_color="black",
position='top',
reverse=False
):
"""
Adds a progress bar to an image.
See rp.get_progress_bar_image for further documentation.
EXAMPLE:
>>> image = load_image(
... "https://upload.wikimedia.org/wikipedia/en/7/7d/Lenna_%28test_image%29.png",
... use_cache=True,
... )
... N = 1000
... for i in range(N):
... display_alpha_image(
... labeled_image(
... image_with_progress_bar(
... image,
... i / N,
... bar_color="white",
... background_color="dark blue",
... ),
... f"{i}",
... font="G:Chivo Mono",
... background_color="dark blue",
... ),
... )
"""
assert isinstance(position, str)
assert is_number(size)
if size==0:
return as_numpy_image(image, copy=True)
if position in ['top', 'bottom']:
bar_width = get_image_width(image)
bar_height = size
progress_bar_image = gather_args_call(
get_progress_bar_image,
height=abs(bar_height),
width=bar_width,
)
images = progress_bar_image, image
if bar_height>0:
if position=='top':
return vertically_concatenated_images(images)
else:
return vertically_concatenated_images(images[::-1])
else:
#Overlay the bar over the image
images = crop_images_to_max_size(
images,
origin={
"top": "top left",
"bottom": "bottom right",
}[position],
)
return blend_images(*images[::-1])
elif position=='left':
image = rotate_image(image, 90)
image = gather_args_call(image_with_progress_bar, position='top', reverse=not reverse)
image = rotate_image(image, -90)
return image
elif position=='right':
image = rotate_image(image, 90)
image = gather_args_call(image_with_progress_bar, position='bottom', reverse=not reverse)
image = rotate_image(image, -90)
return image
else:
assert False, 'rp.image_with_progress_bar: position should be "top", "bottom", "left" or "right", not '+repr(position)
def video_with_progress_bar(
video,
*,
size=10,
bar_color="white",
background_color="black",
reverse=False,
position='top',
lazy=False
):
"""
Adds a progress bar to the top of a video to see how far into it you are.
See rp.get_progress_bar_image for further documentation.
EXAMPLE:
>>> display_video(
... video_with_progress_bar(
... load_video(
... "https://www.shutterstock.com/shutterstock/videos/1070160847/preview/stock-footage-electric-car-drive-on-the-wind-turbines-background-car-drives-along-a-mountain-road-electric-car.webm",
... use_cache=True,
... ),
... lazy=True,
... )
... )
EXAMPLE (slightly crazier):
>>> video = load_video(
... "https://www.shutterstock.com/shutterstock/videos/1056263531/preview/stock-footage-cctv-ai-facial-recognition-camera-zoom-in-recognizes-person-elevated-security-camera-surveillance.webm",
... use_cache=True,
... )
... video=resize_list(video,30)
... video=resize_images_to_hold(video,height=256)
... video = video_with_progress_bar(
... video,
... position="right",
... size=-20,
... background_color="translucent orange",
... lazy=True,
... )
... video = video_with_progress_bar(
... video,
... position="right",
... size=-5,
... background_color="green",
... bar_color='translucent black',
... lazy=True,
... reverse=True
... )
... video = video_with_progress_bar(
... video,
... position="top",
... bar_color="white",
... background_color="translucent black",
... size=10,
... lazy=True,
... )
... video = video_with_progress_bar(
... video,
... position="bottom",
... lazy=True,
... )
... video = video_with_progress_bar(
... video,
... position="left",
... bar_color='white',
... reverse=False,
... lazy=True,
... )
... video = video_with_progress_bar(
... video,
... position="left",
... bar_color='blue white',
... reverse=True,
... lazy=True,
... )
... video = video_with_progress_bar(
... video,
... position="left",
... bar_color='blue blue white',
... reverse=False,
... lazy=True,
... )
... video = video_with_progress_bar(
... video,
... position="left",
... bar_color='blue blue blue white',
... reverse=True,
... lazy=True,
... )
...
... display_video(with_alpha_checkerboards(video,lazy=True),loop=True)
"""
def helper():
nonlocal size, bar_color, background_color, position, reverse
length = len(video)
assert length > 0, 'Cannot make progress bar on video with only one frame - length='+str(length)
for index, image in enumerate(video):
progress = index / (length - 1)
frame = gather_args_call(image_with_progress_bar)
yield frame
output = helper()
if lazy:
if hasattr(video, '__len__'):
length = len(video)
output = IteratorWithLen(output, length)
else:
output = list(output)
return output
def boomerang_video(video):
if isinstance(video,str):
video=load_video(video)
new_video=list(video)[:-1]+list(video)[::-1][:-1]
return new_video
def _get_executable(name, download_urls, executable_name):
download_dir = make_directory(path_join(_rp_downloads_folder, name))
url = (
download_urls["macos"]
if currently_running_mac()
else download_urls["linux"]
if currently_running_linux()
else download_urls["windows"]
)
zip_file = download_url(url, download_dir, skip_existing=True, show_progress=True)
folder = strip_file_extension(zip_file)
if not folder_exists(folder):
unzip_to_folder(zip_file)
assert folder_exists(folder)
executable = path_join(folder, executable_name)
assert file_exists(executable), executable
return executable
def _get_rife_executable():
"""Returns the path to the rife-ncnn-vulkan executable or if it doesn't exist in rp downloads it"""
rife_download_urls = dict(
# https://github.com/nihui/rife-ncnn-vulkan/releases
macos="https://github.com/nihui/rife-ncnn-vulkan/releases/download/20221029/rife-ncnn-vulkan-20221029-macos.zip",
linux="https://github.com/nihui/rife-ncnn-vulkan/releases/download/20221029/rife-ncnn-vulkan-20221029-ubuntu.zip",
windows="https://github.com/nihui/rife-ncnn-vulkan/releases/download/20221029/rife-ncnn-vulkan-20221029-windows.zip",
)
return _get_executable("rife", rife_download_urls, "rife-ncnn-vulkan")
def _get_esrgan_executable():
"""Returns the path to the realesrgan-ncnn-vulkan executable or if it doesn't exist in rp downloads it"""
#TODO: Make a function to use this on images
esrgan_download_urls = dict(
# https://github.com/xinntao/Real-ESRGAN/releases/tag/v0.2.5.0
macos="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-macos.zip",
linux="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-ubuntu.zip",
windows="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-windows.zip",
)
return _get_executable("esrgan", esrgan_download_urls, "realesrgan-ncnn-vulkan")
def slowmo_video_via_rife(video):
""" Doubles the framerate of a given video. Can be a list of images as defined by rp.is_image, or a numpy array etc. Anything compatible with save_image."""
rife_executable = _get_rife_executable()
input_dir = make_directory(temporary_file_path())
output_dir = make_directory(temporary_file_path())
try:
save_images(video, input_dir, show_progress=True)
command=shlex.quote(rife_executable) + " -i " + shlex.quote(input_dir) + " -o " + shlex.quote(output_dir)
fansi_print(command, 'yellow')
_run_sys_command(command)
new_video = load_images(output_dir, show_progress=True)
new_video = as_numpy_array(new_video)
finally:
if folder_exists(input_dir ): delete_folder(input_dir )
if folder_exists(output_dir): delete_folder(output_dir)
return new_video
def _crop_images_to_max_or_min_size(*images,origin='top left',criterion=max,copy=True,do_height=True,do_width=True):
images=detuple(images)
if is_numpy_array(images) or is_torch_tensor(images): #If its an array skip the extra compute
if copy: return images+0
else : return images
dimensions=[get_image_dimensions(image) for image in images]
if len(set(dimensions))==1:
#Save a bit of time. If all the image dimensions are the same, we don't need to bother cropping them.
return list(images)
#
heights,widths=zip(*dimensions)
max_height=criterion(heights) if do_height else None
max_width =criterion(widths) if do_width else None
images=[crop_image(image,max_height,max_width,origin=origin, copy=copy) for image in images]
return images
def crop_images_to_max_size(*images,origin='top left',copy=True):
"""
Makes sure all images have the same height and width
Does this by adding additional black space around images if needed
EXAMPLE:
ans='https://i.ytimg.com/vi/MPV2METPeJU/maxresdefault.jpg https://i.insider.com/5484d9d1eab8ea3017b17e29?width=600&format=jpeg&auto=webp https://s3.amazonaws.com/cdn-origin-etr.akc.org/wp-content/uploads/2017/11/13002248/GettyImages-187066830.jpg https://hips.hearstapps.com/hmg-prod.s3.amazonaws.com/images/best-small-dog-breeds-cavalier-king-charles-spaniel-1598992577.jpg?crop=0.468xw:1.00xh;0.259xw,0&resize=480:*'.split()
ans=load_images(ans)
display_image_slideshow(ans)
print("DI")
display_image_slideshow(crop_images_to_max_size(ans))
display_image_slideshow(crop_images_to_max_size(ans,origin='center'))
"""
return _crop_images_to_max_or_min_size(*images,origin=origin,criterion=max,copy=copy)
def crop_images_to_min_size(*images,origin='top left',copy=True):
"""
Makes sure all images have the same height and width
Does this by cropping out the edges of the images if needed
EXAMPLE:
ans='https://i.ytimg.com/vi/MPV2METPeJU/maxresdefault.jpg https://i.insider.com/5484d9d1eab8ea3017b17e29?width=600&format=jpeg&auto=webp https://s3.amazonaws.com/cdn-origin-etr.akc.org/wp-content/uploads/2017/11/13002248/GettyImages-187066830.jpg https://hips.hearstapps.com/hmg-prod.s3.amazonaws.com/images/best-small-dog-breeds-cavalier-king-charles-spaniel-1598992577.jpg?crop=0.468xw:1.00xh;0.259xw,0&resize=480:*'.split()
ans=load_images(ans)
display_image_slideshow(ans)
print("DI")
display_image_slideshow(crop_images_to_min_size(ans))
display_image_slideshow(crop_images_to_min_size(ans,origin='center'))
"""
return _crop_images_to_max_or_min_size(*images,origin=origin,criterion=min,copy=copy)
def crop_images_to_max_height(*images,origin='top left',copy=True):
""" Crop all given images to the maximum height of all given images using the same extra args as seen in rp.crop_image """
return _crop_images_to_max_or_min_size(*images,origin=origin,criterion=max,copy=copy,do_width=False)
def crop_images_to_max_width(*images,origin='top left',copy=True):
""" Crop all given images to the maximum width of all given images using the same extra args as seen in rp.crop_image """
return _crop_images_to_max_or_min_size(*images,origin=origin,criterion=max,copy=copy,do_height=False)
def crop_images_to_min_height(*images,origin='top left',copy=True):
""" Crop all given images to the minimum height of all given images using the same extra args as seen in rp.crop_image """
return _crop_images_to_max_or_min_size(*images,origin=origin,criterion=min,copy=copy,do_width=False)
def crop_images_to_min_width(*images,origin='top left',copy=True):
""" Crop all given images to the minimum width of all given images using the same extra args as seen in rp.crop_image """
return _crop_images_to_max_or_min_size(*images,origin=origin,criterion=min,copy=copy,do_height=False)
def crop_image_to_square(image, *, origin="center", grow=False, copy=True):
"""
Crops an image so that it becomes square.
If grow==True, the image can become larger instead of smaller
(this means it pads the image with black transparent pixels)
"""
assert is_image(image)
assert isinstance(origin,str)
if grow:
size = max(get_image_dimensions(image))
else:
size = min(get_image_dimensions(image))
image = crop_image(image, height=size, width=size, origin=origin, copy=copy)
return image
def crop_images_to_square(images, *, origin="center", grow=False, copy=True):
"""
TODO: Optimize me!
"""
output = [crop_image_to_square(image, origin=origin, grow=grow, copy=copy) for image in images]
if is_numpy_array(images):
output = as_numpy_array(output)
return output
def crop_image_at_random_position(image, height, width, include_position=False):
"""
Returns a randomly-positioned cropped version of the input image with the specified height and width.
This function is useful for data augmentation, as it can create multiple different crops from the same image,
increasing the variability in the training dataset and helping the model generalize better.
Parameters:
image : numpy.ndarray
The input image, as defined by rp.is_image
height : int
The height of the cropped image.
width : int
The width of the cropped image.
include_position : bool
If true, will instead output a tuple (cropped_image, (x, y))
Returns:
cropped_image : numpy.ndarray
A randomly-positioned cropped image of the original
Raises:
ValueError
If the crop dimensions are larger than the original image dimensions.
Examples:
>>> img = np.random.randint(0, 256, size=(300, 400, 3), dtype=np.uint8)
>>> cropped_img = crop_image_at_random_position(img, 100, 100)
>>> cropped_img.shape
(100, 100, 3)
>>> cropped_img = crop_image_at_random_position(img, 200, 200)
>>> cropped_img.shape
(200, 200, 3)
"""
assert is_image(image)
img_height, img_width = get_image_dimensions(image)
if height > img_height or width > img_width:
raise ValueError("Crop dimensions must be smaller than or equal to the original image dimensions.")
y = randint(0, img_height - height)
x = randint(0, img_width - width)
cropped_image = image[y:y + height, x:x + width]
if include_position:
return cropped_image, (x,y)
return cropped_image
def get_random_crop_bounds(image_dimensions, crop_dimensions):
"""
Generate random bounds for cropping an image or any n-dimensional array.
Parameters:
image_dimensions : tuple of int
The dimensions of the original image or array.
Can be any number of dimensions (e.g., height, width, time, etc.).
crop_dimensions : tuple of int
The dimensions of the crop area.
Must have the same number of dimensions as image_dimensions.
Returns:
tuple of tuples
A tuple of tuples representing the bounds of the crop area.
Each inner tuple contains (start, end) indices for the corresponding dimension.
Example:
>>> # 2D image
>>> height, width = 100, 200
>>> image = np.random.rand(height, width, 3) # Dummy image
>>> crop_dimensions = crop_height, crop_width = 50, 80
>>> (top, bottom), (left, right) = get_random_crop_bounds((height, width), crop_dimensions)
>>> # Possible ranges:
>>> # top: 0 to 50 (inclusive)
>>> # bottom: top + 50, i.e., 50 to 100 (inclusive)
>>> # left: 0 to 120 (inclusive)
>>> # right: left + 80, i.e., 80 to 200 (inclusive)
>>> cropped_image = image[top:bottom, left:right]
>>> # 3D image (e.g., video)
>>> height, width, time = 200, 300, 100
>>> video = np.random.rand(height, width, time) # Dummy video
>>> crop_dimensions = crop_height, crop_width, crop_time = 120, 200, 50
>>> (top, bottom), (left, right), (start_time, end_time) = get_random_crop_bounds((height, width, time), crop_dimensions)
>>> # Possible ranges:
>>> # top: 0 to 80 (inclusive)
>>> # bottom: top + 120, i.e., 120 to 200 (inclusive)
>>> # left: 0 to 100 (inclusive)
>>> # right: left + 200, i.e., 200 to 300 (inclusive)
>>> # start_time: 0 to 50 (inclusive)
>>> # end_time: start_time + 50, i.e., 50 to 100 (inclusive)
>>> cropped_video = video[top:bottom, left:right, start_time:end_time]
"""
from random import randint
if len(image_dimensions) != len(crop_dimensions):
raise ValueError("The number of dimensions in image_dimensions and crop_dimensions must be the same.")
if any(crop_dim > img_dim for crop_dim, img_dim in zip(crop_dimensions, image_dimensions)):
raise ValueError("Crop dimensions must be smaller than or equal to the original image dimensions.")
bounds = []
for img_dim, crop_dim in zip(image_dimensions, crop_dimensions):
start = randint(0, img_dim - crop_dim)
end = start + crop_dim
bounds.append((start, end))
return tuple(bounds)
def get_center_crop_bounds(image_dimensions, crop_dimensions):
"""
Generate bounds for center cropping an image or any n-dimensional array.
Parameters:
image_dimensions : tuple of int
The dimensions of the original image or array.
Can be any number of dimensions (e.g., height, width, time, etc.).
crop_dimensions : tuple of int
The dimensions of the crop area.
Must have the same number of dimensions as image_dimensions.
Returns:
tuple of tuples
A tuple of tuples representing the bounds of the crop area.
Each inner tuple contains (start, end) indices for the corresponding dimension.
Example:
>>> while True:
>>> image = load_image_from_webcam()
>>> crop_size = 100, 100
>>> (top, bot), (left, right) = get_center_crop_bounds(
>>> get_image_dimensions(image),
>>> crop_size,
>>> )
>>> display_image(
>>> # These two images should be exactly the same
>>> horizontally_concatenated_images(
>>> image[top:bot, left:right],
>>> crop_image(image, *crop_size, origin="center"),
>>> ),
>>> )
Example:
>>> # 2D image
>>> height, width = 100, 200
>>> image = np.random.rand(height, width, 3) # Dummy image
>>> crop_dimensions = crop_height, crop_width = 50, 80
>>> (top, bottom), (left, right) = get_center_crop_bounds((height, width), crop_dimensions)
>>> # Expected bounds:
>>> # top: 25
>>> # bottom: 75
>>> # left: 60
>>> # right: 140
>>> cropped_image = image[top:bottom, left:right]
>>> # 3D image (e.g., video)
>>> height, width, time = 200, 300, 100
>>> video = np.random.rand(height, width, time) # Dummy video
>>> crop_dimensions = crop_height, crop_width, crop_time = 120, 200, 50
>>> (top, bottom), (left, right), (start_time, end_time) = get_center_crop_bounds((height, width, time), crop_dimensions)
>>> # Expected bounds:
>>> # top: 40
>>> # bottom: 160
>>> # left: 50
>>> # right: 250
>>> # start_time: 25
>>> # end_time: 75
>>> cropped_video = video[top:bottom, left:right, start_time:end_time]
"""
if len(image_dimensions) != len(crop_dimensions):
raise ValueError("The number of dimensions in image_dimensions and crop_dimensions must be the same.")
if any(crop_dim > img_dim for crop_dim, img_dim in zip(crop_dimensions, image_dimensions)):
raise ValueError("Crop dimensions must be smaller than or equal to the original image dimensions.")
bounds = []
for img_dim, crop_dim in zip(image_dimensions, crop_dimensions):
start = (img_dim - crop_dim) // 2
end = start + crop_dim
bounds.append((start, end))
return tuple(bounds)
def trim_video(video,length:int,copy=True,mode='extend'):
"""
This function takes a video and a length, and returns a video with that length
If the desired length is longer than the video, additional blank frames will be added to the end
TODO: Add examples for all use-cases, including:
-Decreasing video length
-Increasing video length for lists of images
-Increasing video length for numpy-array videos
"""
assert length>=0,'Cannot trim a video to a negative length'
assert is_numpy_array(video) or isinstance(video, list), 'Only list-videos and numpy-videos are supported right now'
if len(video)==length and not copy:
return video
if len(video)>=length:
return video[:length]
number_of_extra_frames=length-len(video)
assert len(video),'Cannot extend a video with no frames - we need an example frame to determine the width and height'
last_frame=video[-1]
assert is_image(last_frame)
# extra_frames=np.asarray([np.zeros_like(last_frame)]*number_of_extra_frames)
if mode=='zeros':
zero_frame = np.zeros_like(last_frame)
elif mode=='extend':
zero_frame = last_frame
else:
assert False,'Invalid mode: '+mode
extra_frames = np.repeat(zero_frame[None], number_of_extra_frames, axis=0)
if isinstance(video,list):
if not length:
return []
return list(video)+list(extra_frames)
elif isinstance(video,np.ndarray):
return np.concatenate((video,np.asarray(extra_frames)))
else:
raise TypeError('Unsupported video type: %s'%type(video))
def trim_videos(*videos, length: int):
"""Plural of rp.trim_video"""
videos = detuple(videos)
output = []
for video in videos:
video = trim_video(video, length)
output.append(video)
return output
def _trim_videos_to_same_length(*videos,mode=max,copy=True):
"""
If mode = max, adds blank frames to the end of videos to make sure they're all the same number of frames
If mode = min, cuts off every video to become the shortest of all lengths
If possible, returns a numpy array instead of a list
If copy=False, it might return the original tensor without copying
"""
videos=detuple(videos)
if is_numpy_array(videos) or is_torch_tensor(videos): #If its an array skip the extra compute
if copy: return videos+0
else : return videos
lengths = list(map(len, videos))
out_length = mode(lengths)
videos = trim_videos(videos, length=out_length)
if all(map(is_numpy_array, videos)) and set(x.shape for x in videos)==1:
#If possible, return a numpy array
videos = as_numpy_array(videos)
return videos
def trim_videos_to_max_length(*videos, copy=True):
return _trim_videos_to_same_length(*videos, mode=max, copy=copy)
def trim_videos_to_min_length(*videos, copy=True):
return _trim_videos_to_same_length(*videos, mode=min, copy=copy)
def _concatenated_videos(image_method,videos,origin):
videos=detuple(videos)
videos=[video for video in videos if len(video)] #Exclude videos with no frames
videos=[crop_images_to_max_size(video,copy=False) for video in videos]
videos=trim_videos_to_max_length(videos,copy=False)
output=[image_method(*frames,origin=origin) for frames in zip(*videos)]
return output
def horizontally_concatenated_videos(*videos,origin=None):
#TODO: Optimize this to not use horizontally_concatenated_images (which is slow)
return _concatenated_videos(horizontally_concatenated_images,videos,origin=origin)
def vertically_concatenated_videos(*videos,origin=None):
#TODO: Optimize this to not use vertically_concatenated_images (which is slow)
return _concatenated_videos(vertically_concatenated_images,videos,origin=origin)
def max_filter(image,diameter,single_channel: bool = False,mode: str = 'reflect',shutup: bool = False):
# NOTE: order refers to the derivative of the gauss curve; for edge detection etc.
if diameter == 0:
return image
mode=mode.lower()
assert mode in {'constant','nearest','reflect','mirror','wrap'},"r.max_filter: Invalid mode for max-filtering edge-areas of image. mode=" + str(mode)
# single_channel: IMPORTANT: This determines the difference between
# [1,2,3,4,5]
# and
# [[1],[2],[3],[4],[5]] (when False)
# Works in RGB, RGBA, or any other number of color channels!
from scipy.ndimage.filters import maximum_filter as filter
kernel=flat_circle_kernel(diameter)
f=lambda x:filter(x,footprint=kernel,mode=mode)
tp=np.transpose
sh=np.shape(image)
assert isinstance(sh,tuple)
if not single_channel and not sh[-1] <= 4 and not shutup: # Generally if you have more than 4 channels you are using a single_channel image.
fansi_print("r.med_filter: Warning: Last channel has length of " + str(sh[-1]) + "; you results might be weird. Consider setting optional parameter 'single_channel' to True?",'red')
s=list(range(len(sh)))
if len(s) == 1 or single_channel: # We don't have channels of colors, we only have 1 color channel (AKA we extracted the red of an image etc)
return f(image)
# ⎛ ⎞
# ⎜⎛ ⎞ ⎟
# ⎜⎜ ⎛ ⎞⎟ ⎟
# ⎜⎜ ⎜ ⎛ ⎞ ⎛ ⎞⎟⎟ ⎛ ⎞ ⎛ ⎞⎟
return tp([f(x) for x in tp(image,[s[-1]] + list(s[:-1]))],list(s[1:]) + [s[0]]) # Blur each channel individually.
# ⎜⎜ ⎜ ⎝ ⎠ ⎝ ⎠⎟⎟ ⎝ ⎠ ⎝ ⎠⎟
# ⎜⎜ ⎝ ⎠⎟ ⎟
# ⎜⎝ ⎠ ⎟
# ⎝ ⎠
# NOTE:
# >>> _s=(0,1,2)
# >>> [_s[-1]] + list(_s[:-1])
# ans=[2,0,1]
# >>> list(_s[1:]) + [_s[0]]
# ans=[1,2,0]
def min_filter(image,diameter,single_channel: bool = False,mode: str = 'reflect',shutup: bool = False):
# NOTE: order refers to the derivative of the gauss curve; for edge detection etc.
if diameter == 0:
return image
mode=mode.lower()
assert mode in {'constant','nearest','reflect','mir3ror','wrap'},"r.min_filter: Invalid mode for min-filtering edge-areas of image. mode=" + str(mode)
# single_channel: IMPORTANT: This determines the difference between
# [1,2,3,4,5]
# and
# [[1],[2],[3],[4],[5]] (when False)
# Works in RGB, RGBA, or any other number of color channels!
from scipy.ndimage.filters import minimum_filter as filter
kernel=flat_circle_kernel(diameter)
f=lambda x:filter(x,footprint=kernel,mode=mode)
tp=np.transpose
sh=np.shape(image)
assert isinstance(sh,tuple)
if not single_channel and not sh[-1] <= 4 and not shutup: # Generally if you have more than 4 channels you are using a single_channel image.
fansi_print("r.med_filter: Warning: Last channel has length of " + str(sh[-1]) + "; you results might be weird. Consider setting optional parameter 'single_channel' to True?",'red')
s=list(range(len(sh)))
if len(s) == 1 or single_channel: # We don't have channels of colors, we only have 1 color channel (AKA we extracted the red of an image etc)
return f(image)
# ⎛ ⎞
# ⎜⎛ ⎞ ⎟
# ⎜⎜ ⎛ ⎞⎟ ⎟
# ⎜⎜ ⎜ ⎛ ⎞ ⎛ ⎞⎟⎟ ⎛ ⎞ ⎛ ⎞⎟
return tp([f(x) for x in tp(image,[s[-1]] + list(s[:-1]))],list(s[1:]) + [s[0]]) # Blur each channel individually.
# ⎜⎜ ⎜ ⎝ ⎠ ⎝ ⎠⎟⎟ ⎝ ⎠ ⎝ ⎠⎟
# ⎜⎜ ⎝ ⎠⎟ ⎟
# ⎜⎝ ⎠ ⎟
# ⎝ ⎠
# NOTE:
# >>> _s=(0,1,2)
# >>> [_s[-1]] + list(_s[:-1])
# ans=[2,0,1]
# >>> list(_s[1:]) + [_s[0]]
# ans=[1,2,0]
def med_filter(image,diameter,single_channel: bool = False,mode: str = 'reflect',shutup: bool = False):
# NOTE: order refers to the derivative of the gauss curve; for edge detection etc.
if diameter == 0:
return image
mode=mode.lower()
assert mode in {'constant','nearest','reflect','mirror','wrap'},"r.med_filter: Invalid mode for med-filtering edge-areas of image. mode=" + str(mode)
# single_channel: IMPORTANT: This determines the difference between
# [1,2,3,4,5]
# and
# [[1],[2],[3],[4],[5]] (when False)
# Works in RGB, RGBA, or any other number of color channels!
from scipy.ndimage.filters import median_filter as filter
kernel=flat_circle_kernel(diameter)
f=lambda x:filter(x,footprint=kernel,mode=mode)
tp=np.transpose
sh=np.shape(image)
assert isinstance(sh,tuple)
if not single_channel and not sh[-1] <= 4 and not shutup: # Generally if you have more than 4 channels you are using a single_channel image.
fansi_print("r.med_filter: Warning: Last channel has length of " + str(sh[-1]) + "; you results might be weird. Consider setting optional parameter 'single_channel' to True?",'red')
s=list(range(len(sh)))
if len(s) == 1 or single_channel: # We don't have channels of colors, we only have 1 color channel (AKA we extracted the red of an image etc)
return f(image)
# ⎛ ⎞
# ⎜⎛ ⎞ ⎟
# ⎜⎜ ⎛ ⎞⎟ ⎟
# ⎜⎜ ⎜ ⎛ ⎞ ⎛ ⎞⎟⎟ ⎛ ⎞ ⎛ ⎞⎟
return tp([f(x) for x in tp(image,[s[-1]] + list(s[:-1]))],list(s[1:]) + [s[0]]) # Blur each channel individually.
# ⎜⎜ ⎜ ⎝ ⎠ ⎝ ⎠⎟⎟ ⎝ ⎠ ⎝ ⎠⎟
# ⎜⎜ ⎝ ⎠⎟ ⎟
# ⎜⎝ ⎠ ⎟
# ⎝ ⎠
# NOTE:
# >>> _s=(0,1,2)
# >>> [_s[-1]] + list(_s[:-1])
# ans=[2,0,1]
# >>> list(_s[1:]) + [_s[0]]
# ans=[1,2,0]
def range_filter(image,diameter,single_channel: bool = False,mode: str = 'reflect',shutup: bool = False):
args=image,diameter,single_channel,mode,shutup
return max_filter(*args) - min_filter(*args)
def grid2d(width: int,height: int,fᆢrowˏcolumn=lambda r,c:None) -> list:
from copy import deepcopy
# Perhaps I'll make a future version that extends this to n-dimensions, like rmif in MatLab
out=deepcopy_multiply([[[None]] * height],width)
for column in range(height):
for row in range(width):
out[row][column]=fᆢrowˏcolumn(row,column)
return out
def grid2d_map(grid2d_input,value_func=identity) -> list:
# Similar to rmvf (ryan matrix value function), except restricted to just 2d grids.
def width(image) -> int:
return len(image)
def height(image) -> int:
return len(image[0])
# ⁠⁠⁠⁠ ⎧ ⎫
# ⁠⁠⁠⁠ ⎪ ⎧ ⎫⎪
# ⁠⁠⁠⁠ ⎪ ⎧ ⎫ ⎧ ⎫ ⎪ ⎧ ⎫⎧ ⎫⎪⎪
return grid2d(width(grid2d_input),height(grid2d_input),lambda x,y:value_func(grid2d_input[x][y]))
# ⁠⁠⁠ ⎪ ⎩ ⎭ ⎩ ⎭ ⎪ ⎩ ⎭⎩ ⎭⎪⎪
# ⁠⁠⁠ ⎪ ⎩ ⎭⎪
# ⁠⁠⁠ ⎩ ⎭
def _auto_interp_for_resize_image(resize_func, image, new_size, copy_attr='copy'):
"""
A private function used by image resizing functions in rp when their interp=='auto'
'area' interpolation is good for shrinking images
'bilinear' interpolation is good for growing images
This function lets you automatically choose the best interp method.
If one dimension grows whereas the other dimension shrinks, we should use two resizings: one for 'bilinear' and the other 'area'
resize_func: is expected to be an image resizing function from rp, such as rp.resize_image, rp.cv_resize_image or rp.torch_resize_image
it should be able to handle sizes such as (None, width) to indicate one dimension should be unchanged
This function is meant to be called by those functions, when the interp method is 'auto'
image : should be an image type compatible with rp.get_image_dimensions and the given resize_func
new_size : is expected to be a tuple containing two integers (height, width)
EXAMPLE:
>>> def demo_resize_interp(interp='auto'):
... import numpy as np
... import time
...
... # Assuming get_checkerboard_image returns an image and other functions are available
... c = get_checkerboard_image(128,128)
... original_height = get_image_height(c)
... original_width = get_image_width(c)
... frames = 360 # Total number of frames for a smoother and longer animation
... start_angle = np.pi / 4 # Starting at 45 degrees
... max_multiplier = 3 # Maximum size multiplier
...
... for _ in range(2):
... for frame in range(frames):
... # Calculate the new dimensions using sine and cosine with a 45-degree offset
... # Amplitude is adjusted to vary directly from 1 to max_multiplier times the original size
... new_height = int((np.sin(2 * np.pi * frame / frames + start_angle) + 1) / 2 * (max_multiplier - 1) * original_height + 1)
... new_width = int((np.cos(2 * np.pi * frame / frames + start_angle) + 1) / 2 * (max_multiplier - 1) * original_width + 1)
...
... # Resize and display the image
...
... resized_image = cv_resize_image(c, (new_height, new_width), interp=interp)
... #resized_image = resize_image(c, (new_height, new_width), interp=interp)
...
... resized_image = crop_image(resized_image, height=300, width=300)
... resized_image=blend_images((0,.25,.5),resized_image)
... resized_image=labeled_image(resized_image,interp,background_color=(0,64,128))
... resized_image=shift_image_hue(resized_image,len(interp)/6) #Different background colors so we can easily see when interp changes
... display_image(resized_image)
...
... demo_resize_interp('auto')
... demo_resize_interp('nearest')
... demo_resize_interp('bilinear')
... demo_resize_interp('area')
"""
assert isinstance(new_size, tuple) and len(new_size) == 2 and all(isinstance(x,int) for x in new_size)
old_size = get_image_dimensions(image)
old_height, old_width = old_size
new_height, new_width = new_size
growth_interp = "bilinear"
shrink_interp = "area"
out = image
#In the case that both dimensions grow, or both dimensions shrink, we only need to use the resize_func once
if new_height == old_height and new_width == old_width: out = getattr(out, copy_attr)()
elif new_height >= old_height and new_width >= old_width: out = resize_func(out, size=(new_height, new_width), interp=growth_interp)
elif new_height <= old_height and new_width <= old_width: out = resize_func(out, size=(new_height, new_width), interp=shrink_interp)
else:
#In the case that one dimension grows and the other shrinks, we use the resize_func twice to use both interp methods
#Do the potential shrink operation first then the growth operation for speed's sake
if new_height < old_height: out = resize_func(out, size=(new_height, None ), interp=shrink_interp)
if new_width < old_width : out = resize_func(out, size=(None , new_width), interp=shrink_interp)
if new_height > old_height: out = resize_func(out, size=(new_height, None ), interp=growth_interp)
if new_width > old_width : out = resize_func(out, size=(None , new_width), interp=growth_interp)
return out
def resize_image(image,scale,interp='bilinear'):
"""
resize_image resizes images. Who woulda thunk it? Stretchy-squishy image resizing!
:param image: a numpy array, preferably. But it can also handle pure-python list-of-lists if that fails.
:param scale: can either be a scalar (get it? for SCALE? lol ok yeah that died quickly) or a tuple of integers to specify the new dimensions we want like (128,128)
:param interp: ONLY APPLIES FOR numpy arrays! interp ∈ {'auto','nearest','bilinear','bicubic','cubic'}
:return: returns the resized image
Note: Auto here is kinda redundant: scipy or skimage does nice interp on its own
"""
if interp=='auto': interp='bilinear'
assert interp in {'nearest','bilinear','bicubic'}
if scale == 1:
return image
try:
from scipy.misc import imresize
return imresize(image,float(scale),interp)#We multiply scale by 100 because it's measured in percent
except Exception:pass
try:
assert is_image(image)
pip_import("skimage")
from skimage.transform import resize
if not isinstance(scale,tuple):
height,width=image.shape[:2]
height=int(height*scale)
width =int(width *scale)
else:
height,width=scale
if not height or not width:
#If the user specifies (100,None) it means to rescale the image to a height of 100, and scale the width proportionally
from math import ceil
assert height or width
if not height: height=ceil(get_image_height(image)/get_image_width (image)*width )
if not width : width =ceil(get_image_width (image)/get_image_height(image)*height)
# return resize(image,(height,width))
# if interp=='auto': return _auto_interp_for_resize_image(resize_image, image, (height, width))
order={'nearest':0,'bilinear':1,'bicubic':3}[interp]
return resize(image,(height,width),order=order)
except Exception:pass
if is_number(scale):
#Now we're in kinda bad janky territory...though it will still work...it will be slow because now its runnning in pure python...
try:
return cv_apply_affine_to_image(dog,scale_affine_2d(scale),output_resolution=scale)#Doesn't support 'interp'
except Exception:pass
return grid2d(int(len(image) * scale),int(len(image[0]) * scale),lambda x,y:image[int(x / scale)][int(y / scale)])#The slowest method of all...doesn't support 'interp'
# endregion
# region xyrgb lists ⟷ image:[image_to_xyrgb_lists,xyrgb_lists_to_image,xyrgb_normalize,image_to_all_normalized_xy_rgb_training_pairs,extract_patches] (Invertible Pair)
# try:from sklearn.feature_extraction.image import extract_patches
# except Exception:pass
def image_to_xyrgb_lists(image):
# expects an array like, for example 'image=[[[1,2,3],[4,5,6]],[[7,8,9],[10,11,12]]]'
out_x=[]
out_y=[]
out_r=[]
out_g=[]
out_b=[]
for x_index,x_val in enumerate(image):
for y_index,y_val in enumerate(x_val):
out_x.append(x_index)
out_y.append(y_index)
out_r.append(y_val[0])
out_g.append(y_val[1])
out_b.append(y_val[2])
return out_x,out_y,out_r,out_g,out_b
def xyrgb_lists_to_image(*xyrgb_lists_as_tuple):
xyrgb_lists_as_tuple=detuple(xyrgb_lists_as_tuple) # So we can either accept 5 arguments or one tuple argument with 5 elements.
assert len(xyrgb_lists_as_tuple) == 5,"One element:list for each channel: X Y R G B"
x,y,r,g,b=xyrgb_lists_as_tuple
assert len(x) == len(y) == len(r) == len(g) == len(b),"An outside-noise assumption. If this assertion fails then there is something wrong with the input parameters --> this def is not to blame."
xyrgb_length=len(x) # =len(y)=len(r)=len(g)=len(b) etc. We rename it 'xyrgb_length' to emphasize this symmetry.
out_image=deepcopy_multiply([[None] * (max(y) + 1)],(max(x) + 1)) # Pre-allocating the pixels. [R,G,B] is inserted into each pixel later.
for index in range(xyrgb_length):
out_image[x[index]][y[index]]=[r[index],g[index],b[index]]
return out_image
def xyrgb_normalize(*xyrgb,rgb_old_max=255,rgb_new_max=1,x_new_max=1,y_new_max=1):
# Converts the (X and Y values, originally ﹙integers: the pixel X and Y indexes﹚) into float values between 0 and 1
# Also converts the R,G, and B values from the range [0‚255]⋂ ℤ into the range [0‚1]⋂ ℝ
x,y,r,g,b=detuple(xyrgb)
x_factor=x_new_max / max(x)
y_factor=y_new_max / max(y)
x=list(ⵁ * x_factor for ⵁ in x)
y=list(ⵁ * y_factor for ⵁ in y)
rgb_factor=rgb_new_max / rgb_old_max
r=list(ⵁ * rgb_factor for ⵁ in r)
g=list(ⵁ * rgb_factor for ⵁ in g)
b=list(ⵁ * rgb_factor for ⵁ in b)
return x,y,r,g,b
def image_to_all_normalized_xy_rgb_training_pairs(image):
x,y,r,g,b=xyrgb_normalize(image_to_xyrgb_lists(image))
return list(zip(x,y)),list(zip(r,g,b))
# NOTE: This def exists for efficiency purposes.
# To create a training batch from the image, the minimal syntax would be:
# random_parallel_batch(*image_to_all_normalized_xy_rgb_training_pairs(image),a,b)
# BUT NOTE: It is very inneficient to recalculate this def over and over again.
# Store the output of this as a vairable, and use like so:
# precalculated=image_to_all_normalized_xy_rgb_training_pairs(image)
# new_batch=random_parallel_batch(*precalculated,a,b)
# region Explanatory Example:
# # Goal: create input and output from XY to RGB from image and turn them into a random batch for NN input outputs
# #from r import *
# x=['x₁','x₂','x₃']
# y=['y₁','y₂','y₃']
# r=['r₁','r₂','r₃']
# g=['g₁','g₂','g₃']
# b=['b₁','b₂','b₃']
#
# inputs=list(zip(x,y))
# outputs=list(zip(r,g,b))
# io_pairs=list(zip(inputs,outputs))
#
# ⎧ ⎫
# ⎪ ⎧ ⎫⎪
# ⎪ ⎪ ⎧ ⎫⎪⎪
# print(list(zip(*random_batch(io_pairs,2))))
# ⎪ ⎪ ⎩ ⎭⎪⎪
# ⎪ ⎩ ⎭⎪
# ⎩ ⎭
#
# ⎧ ⎫
# ⎪⎧ ⎫ ⎧ ⎫⎪
# # [(('x₂', 'y₂'), ('x₃', 'y₃')), (('r₂', 'g₂', 'b₂'), ('r₃', 'g₃', 'b₃'))]
# ⎪⎩ ⎭ ⎩ ⎭⎪
# ⎩ ⎭
# endregion
# endregion
def xy_float_images(
height=256,
width=256,
*,
min_x=0,
max_x=1,
min_y=0,
max_y=1
):
"""
Returns a pair of grayscale images: x, y
Where they increase from 0 to 1 on that axis
Args:
height (int): Height of the output images.
width (int): Width of the output images.
min_x, max_x, min_y, max_y (float, optional): The ranges of x and y in the output. Defaults to between 0 and 1.
Returns:
np.ndarray: A tensor of shape (2, height, width) representing the x and y images
EXAMPLES:
>>> #Radius example
>>> distance = (((2*xy_float_images()-1)**2).sum(0))**.5
>>> display_image(distance)
>>> #Animation example
>>> for angle in range(360):
>>> angle*=tau/360
>>> x,y=2 * xy_float_images(height=256,width=256) - 1
>>> rotated=x*np.cos(angle)+y*np.sin(angle)
>>> display_image(2*full_range(rotated**5)-1)
"""
x, y = np.meshgrid(
np.linspace(min_x, max_x, num=width ),
np.linspace(min_y, max_y, num=height),
)
return np.stack([x,y])
_xy_torch_matrices_cache={}
def xy_torch_matrices(
height=256,
width=256,
*,
dtype=None,
device=None,
min_x=0,
max_x=1,
min_y=0,
max_y=1,
use_cache=False
):
"""
Sister function of xy_float_images, but this one uses torch tensors
Returns a pair of matrices: x, y
Where they increase from 0 to 1 on that axis
Args:
height (int): Height of the output images.
width (int): Width of the output images.
dtype (torch.dtype, optional): Data type of the output tensors. Defaults to None, corresponding to torch.float32
device (torch.device, optional): Device to create the output tensors on. Defaults to None, corresponding to "cpu"
min_x, max_x, min_y, max_y (float, optional): The ranges of x and y in the output. Defaults to between 0 and 1.
use_cache: Useful when bottlenecked by excessive CPU/GPU transfers (seeing a lot of Tensor.to's in the profiler)
Returns:
torch.Tensor: A tensor of shape (2, height, width) representing the x and y images.
EXAMPLES:
>>> #Radius example
>>> distance = (((2*xy_torch_matrices()-1)**2).sum(0)).sqrt()
>>> display_image(distance)
>>> #Animation example
>>> for angle in range(360):
>>> angle*=np.pi*2/360
>>> x,y=2 * xy_torch_matrices(height=256,width=256) - 1
>>> rotated=x*np.cos(angle)+y*np.sin(angle)
>>> display_image(2*full_range(rotated**5)-1)
"""
if use_cache:
#Prevent excessive transfers between CPU and GPU
kwargs = gather_vars('height width dtype device min_x max_x min_y max_y')
kwargs_hash = handy_hash(kwargs)
if args_hash not in _xy_torch_matrices_cache:
_xy_torch_matrices_cache[args_hash] = xy_torch_matrices(**kwargs)
return _xy_torch_matrices_cache[args_hash]
pip_import('torch')
import torch
if dtype is None: dtype = torch.float32
if device is None: device = "cpu"
y, x = torch.meshgrid(torch.linspace(min_y, max_y, height, dtype=dtype, device=device),
torch.linspace(min_x, max_x, width , dtype=dtype, device=device),
indexing="ij", #We have to add this for new torch versions
)
return torch.stack([x, y])
def _is_instance_of_module_class(x, module_name: str, class_name: str) -> bool:
"""
Determines if 'x' (object) is an instance of a class (specified by 'class_name')
in a module (specified by 'module_name') efficiently, without importing the module.
Ideal for environments where importing large libraries like numpy or torch is costly.
Safely returns False, avoiding import errors if the library is not installed.
Example Usage:
_is_instance_of_module_class(x, 'numpy', 'ndarray' ) # Checks if x is a numpy array
_is_instance_of_module_class(x, 'torch', 'Tensor' ) # Checks if x is a torch Tensor
_is_instance_of_module_class(x, 'pandas', 'DataFrame') # Checks if x is a pandas DataFrame
_is_instance_of_module_class(image, 'PIL.Image', 'Image' ) # Checks if x is a PIL Image
"""
if module_name in sys.modules:
module = sys.modules[module_name]
class_ = getattr(module, class_name, None)
return isinstance(x, class_) if class_ is not None else False
else:
return False
def is_numpy_array(x):
return _is_instance_of_module_class(x, 'numpy', 'ndarray')
_is_numpy_array = is_numpy_array #Backwards compatibility with code that expected _is_numpy_array
def is_torch_tensor(x):
return _is_instance_of_module_class(x, 'torch', 'Tensor')
def is_torch_image(image):
"Returns True if image could be a CHW torch image"
return is_torch_tensor(image) and image.ndim==3
def is_torch_module(x) -> bool:
return _is_instance_of_module_class(x, 'torch.nn', 'Module')
def _is_pandas_dataframe(x) -> bool:
return _is_instance_of_module_class(x, 'pandas', 'DataFrame')
def _is_pandas_series(x) -> bool:
return _is_instance_of_module_class(x, 'pandas', 'Series')
def _is_pandas_iloc_iterable(x) -> bool:
return _is_pandas_series(x) or _is_pandas_dataframe(x)
def is_pil_image(image) -> bool:
return _is_instance_of_module_class(image, 'PIL.Image', 'Image')
def _is_easydict(x) -> bool:
return _is_instance_of_module_class(x, 'easydict', 'EasyDict')
# region Randomness:[random_index,random_element,random_permutation,randint,random_float,random_chance,random_batch,shuffled,random_parallel_batch]
def random_index(arr_or_len):
"""
Returns a random index for a given array or array length.
If the input is a dictionary or a subclass of dict, it returns a random element from the list of values.
If the input is an integer, it returns a random integer between 0 (inclusive) and the input (exclusive).
If the input is an array-like object with a __len__ method, it returns a random index within the array.
:param arr_or_len: The array, array length, or dict-like object.
:return: A random index or element.
"""
from random import randint
if isinstance(arr_or_len, dict) or issubclass(type(arr_or_len), dict):
return random_element(list(arr_or_len.values()))
if isinstance(arr_or_len, int):
assert arr_or_len != 0, "Array length cannot be zero."
return randint(0, arr_or_len - 1)
if has_len(arr_or_len):
return random_index(len(arr_or_len))
raise TypeError("Input must be an integer, an array with a len, or dict-like object but got type "+str(type(arr_or_len)))
#OLD VERSION: Works perfectly fine, I just wanted to make the code a bit cleaner to read
# def random_index(array_length_or_array_itself):
# if isinstance(array_length_or_array_itself, dict):
# return random_element(list(array_length_or_array_itself))
# # Basically a random integer generator suited for generating array indices.
# # Returns a random integer ∈ ℤ ⋂ [0‚array_length)
# if isinstance(array_length_or_array_itself,int):
# assert array_length_or_array_itself != 0
# from random import randrange #randrange(0,x) ==== randint(0,x-1)
# return randint(0,array_length_or_array_itself - 1)
# else:
# return random_index(len(array_length_or_array_itself))
def random_element(x):
"""
Returns a random element from an iterable, dictionary-like, or set-like object.
Parameters:
x (iterable or Mapping or Set): The collection to choose from.
Returns:
object: A randomly chosen element from x.
EXAMPLES:
>>> import numpy as np
>>> import torch
>>> import pandas as pd
>>> from collections import defaultdict, OrderedDict
>>> from easydict import EasyDict
>>>
>>> assert random_element({'a': 1, 'b': 2}) in [1, 2]
>>> assert random_element(defaultdict(int, {'a': 1, 'b': 2})) in [1, 2]
>>> assert random_element(OrderedDict({'a': 1, 'b': 2})) in [1, 2]
>>> assert random_element(EasyDict({'a': 1, 'b': 2})) in [1, 2]
>>> assert random_element({1, 2, 3}) in [1, 2, 3]
>>> assert random_element(frozenset([1, 2, 3])) in [1, 2, 3]
>>> assert random_element([1, 2, 3]) in [1, 2, 3]
>>> assert random_element(np.array([1, 2, 3])) in [1, 2, 3]
>>> assert random_element(torch.tensor([1, 2, 3])) in [1, 2, 3]
>>> assert random_element(pd.Series([1, 2, 3])) in [1, 2, 3]
Refactored with GPT4: https://chat.openai.com/share/352d630c-fc34-4f36-b302-e4989c7c98b9
Original Version:
def random_element(x):
if isinstance(x,dict):
return random_element(list(x.values()))
if isinstance(x,set):
x=list(x)
assert is_iterable(x)
return x[random_index(len(x))]
"""
from collections.abc import Mapping, Set, Iterable
import random
if isinstance(x, Mapping):
keys = list(x.keys())
chosen_key = keys[random_index(keys)]
return x[chosen_key]
elif isinstance(x, Set):
return random_element(list(x))
elif isinstance(x, Iterable) or has_len(x) and hasattr(x, "__getitem__"):
index = random_index(x)
if _is_pandas_iloc_iterable(x):
x=x.iloc
return x[index]
else:
raise ValueError("Input must be iterable, dictionary-like, or set-like.")
def random_choice(*choices):
return random_element(choices)
def random_permutation(n) -> list or str:
"""
Either n is an integer (as a length) OR n is an iterable
"""
if is_iterable(n): # random_permutation([1,2,3,4,5]) ⟶ [3, 2, 4, 5, 1]
return shuffled(n)
return list(np.random.permutation(n)) # random_permutation(5) ⟶ [3, 2, 1, 4, 0]
def is_a_permutation(permutation):
"""
A permutation is a list of ints ranging from 0 to len(permutation)-1
It's used to specify a reordering of an array
"""
return set(range(len(permutation))) == set(permutation)
def inverse_permutation(permutation):
"""
Returns the 'undo' of a given permutation
EXAMPLE:
a=list(range(100))
p=random_permutation(100)
assert a==gather(p,inverse_permutation(p))
"""
assert is_a_permutation(permutation)
# Create an empty list of the same length as the permutation
inverse = [0] * len(permutation)
# For each index-value pair in the permutation
for i, p in enumerate(permutation):
# Set the value of the index at the value in the inverse
inverse[p] = i
return inverse
def randint(a_inclusive,b_inclusive=0):
"""
If both a and b are specified, the range is inclusive, choose from range[a,b] ⋂ ℤ
Otherwise, if only a is specified, choose random element from the range [a,b) ⋂ ℤ
"""
from random import randint
return randint(min([a_inclusive,b_inclusive]),max([a_inclusive,b_inclusive]))
random_int=randint
def randints(N,a_inclusive=99,b_inclusive=0):
"""
Generate N random integers
Example: randints(10) ==== [9, 36, 82, 49, 13, 9, 62, 81, 80, 66]
This function exists for convenience when using pseudo_terminal (wasn't really meant for use in long-term code, though it totally could be)
"""
assert N>=0 and N==int(N),'Cannot have a non-counting-number length: N='+repr(N)
out=[randint(a_inclusive,b_inclusive) for _ in range(N)]
try:out=np.asarray(out)#Do this IFF we have numpy for convenience's sake
except Exception:pass
return out
random_ints=randints
def randint_complex(*args,**kwargs):
"""
Arguments passed to this function are passed to 'randint'
The only difference between this function and randints is that this also generates a complex component
EXAMPLE:
>>> randints_complex(100)
ans = 56.+64.j
>>> randint_complex(1)
ans = (1+1j)
>>> randint_complex(1)
ans = 0j
>>> randint_complex(1)
ans = (1+0j)
>>> randint_complex(1)
ans = 0j
"""
return randint(*args,**kwargs)+randint(*args,**kwargs)*1j
random_int_complex=randint_complex
def randints_complex(*args,**kwargs):
#Arguments passed to this function are passed to 'randints'
#The only difference between this function and randints is that this also generates a complex component
#EXAMPLE:
# >>> randints_complex(10)
# ans = [56.+64.j 61. +9.j 58.+42.j 93.+71.j 67.+57.j 67.+67.j 24. +3.j 14.+98.j 92.+96.j 32.+29.j]
return randints(*args,**kwargs)+randints(*args,**kwargs)*1j
random_ints_complex=randints_complex
def random_float(exclusive_max: float = 1,inclusive_min=0) -> float:
inclusive_min,exclusive_max=sorted([inclusive_min,exclusive_max])
return (random.random())*(exclusive_max-inclusive_min)+inclusive_min
def random_float_complex(exclusive_max: float = 1,inclusive_min=0) -> float:
return random_float(exclusive_max=exclusive_max,inclusive_min=inclusive_min)+1j*random_float(exclusive_max=exclusive_max,inclusive_min=inclusive_min)
def random_floats(N,exclusive_max=1,inclusive_min=0):
"""
Generate N uniformly distributed random floats
Example: random_floats(10) ==== [0.547 0.516 0.421 0.698 0.732 0.885 0.947 0.668 0.857 0.237]
This function exists for convenience when using pseudo_terminal (wasn't really meant for use in long-term code, though it totally could be)
"""
assert N>=0 and N==int(N),'Cannot have a non-counting-number length: N='+repr(N)
inclusive_min,exclusive_max=sorted([inclusive_min,exclusive_max])
try:return (np.random.rand(N))*(exclusive_max-inclusive_min)+inclusive_min#Do this IFF we have numpy for convenience's sake
except Exception:pass
return [random_float(a_inclusive,b_inclusive) for _ in range(N)]
def random_floats_complex(*args,**kwargs):
"""
Arguments passed to this function are passed to 'random_floats'
The only difference between this function and randints is that this also generates a complex component
EXAMPLE:
>>> random_floats_complex(10)
ans = [0.611+0.569j 0.371+0.036j 0.469+0.336j 0.615+0.069j 0.329+0.16j 0.896+0.22j 0.22 +0.668j 0.901+0.741j 0.827+0.937j 0.619+0.513j]
>>> random_floats_complex(10,-1)
ans = [-0.504-0.998j -0.668-0.345j -0.104-0.952j -0.532-0.019j -0.949-0.488j -0.02 -0.82j -0.805-0.194j -0.021-0.287j -0.708-0.231j -0.152-0.159j]
>>> random_floats_complex(10,-1,0)
ans = [-0.433-0.792j -0.71 -0.633j -0.395-0.383j -0.782-0.336j -0.176-0.176j -0.78 -0.16j -0.505-0.978j -0.199-0.963j -0.98 -0.456j -0.231-0.775j]
>>> random_floats_complex(10,-1,1)
ans = [-0.139-0.101j 0.84 -0.259j 0.347+0.632j -0.362+0.036j 0.002-0.942j -0.685+0.176j 0.852-0.988j 0.188-0.134j 0.011-0.434j -0.578-0.883j]
>>> random_floats_complex(10,0,100)
ans = [40.909+10.029j 51.376+61.357j 15.713+25.714j 99.301+76.956j 5.253+21.822j 8.723+75.36j 15.964+85.891j 20.968+12.191j 37.997+92.09j 87.132+89.107j]
"""
return random_floats(*args,**kwargs)+random_floats(*args,**kwargs)*1j
def random_chance(probability: float = .5) -> bool:
return random_float() < probability
def random_batch(full_list,batch_size: int = None,*,retain_order: bool = False):
"""
Given an input list, torch tensor, numpy array, dict, etc - get a random subset with a given batch_size.
If retain_order is True, will keep the original order.
Will not choose the same index twice.
Batch size must be >=0 but <= the length of the input array
If a dict is given, a dict will be outputted.
If a numpy array is given, a numpy array will be outputted (it's fast).
Same for torch tensors.
If batch_size is not specified, it will use the full length of the input (basically acting like shuffle).
EXAMPLES:
>>> random_batch({1:2,3:4,5:6,7:8},2) --> {5: 6, 3: 4}
>>> random_batch({1:2,3:4,5:6,7:8},2) --> {3: 4, 7: 8}
>>> random_batch({1:2,3:4,5:6,7:8},2) --> {3: 4, 1: 2}
>>> random_batch({1:2,3:4,5:6,7:8},2) --> {5: 6, 7: 8}
>>> random_batch({1:2,3:4,5:6,7:8},3,retain_order=True) --> {3: 4, 5: 6, 7: 8}
>>> random_batch({1:2,3:4,5:6,7:8},3,retain_order=True) --> {1: 2, 5: 6, 7: 8}
>>> random_batch([1,2,3,4,5],3,retain_order=True) --> [3, 4, 5]
>>> random_batch([1,2,3,4,5],3,retain_order=True) --> [1, 2, 4]
>>> random_batch([1,2,3,4,5],3,retain_order=False) --> [5, 2, 1]
>>> random_batch([1,2,3,4,5],3,retain_order=False) --> [2, 4, 1]
>>> random_batch((1,2,3,4,5),3,retain_order=False) --> [3, 2, 5]
>>> random_batch({1,2,3,4,5},3,retain_order=False) --> [4, 3, 2]
>>> random_batch([1,2,3,4,5],retain_order=False) --> [4, 2, 5, 3, 1]
>>> random_batch([1,2,3,4,5],retain_order=True) --> [1, 2, 3, 4, 5]
>>> random_batch([1,2,3,4,5],retain_order=True) --> [1, 2, 3, 4, 5]
>>> random_batch([1,2,3,4,5],0) --> []
>>> random_batch(np.arange(5),3) --> [2 3 4]
>>> random_batch(np.arange(5),3) --> [3 2 1]
>>> random_batch(torch.arange(5),3) --> tensor([2, 1, 3])
>>> random_batch(torch.arange(5),3) --> tensor([4, 3, 2])
Input conditions, assertions and rCode algebra:
rCode: Let ⨀ ≣ random_batch ∴
⨀ a None b ≣ ⨀ a len a b
list a ≣ ⨀ a None True
b ≣ len ⨀ a b
"""
# Check if the input is a pandas DataFrame by class name and presence of .iloc
if _is_pandas_dataframe(full_list) or _is_pandas_iloc_iterable(full_list):
if batch_size is None:
batch_size = len(full_list)
assert 0 <= batch_size <= len(full_list), "batch_size must be between 0 and the number of rows in the DataFrame"
# Use random_batch recursively to select random row indices
random_indices = random_batch(
range(len(full_list)),
batch_size,
retain_order=retain_order,
)
return full_list.iloc[random_indices]
if is_torch_tensor(full_list) or _is_numpy_array(full_list):
random_indices = random_batch(
range(len(full_list)),
batch_size,
retain_order=retain_order,
)
return full_list[random_indices]
from collections.abc import Mapping, Set, Iterable
if isinstance(full_list, Mapping):
#Make it work with dicts too
x = full_list # It's a dict not a list
keys = list(x.keys())
chosen_keys = random_batch(keys,batch_size=batch_size,retain_order=retain_order)
return {key:x[key] for key in chosen_keys}
if isinstance(full_list,set):
full_list=list(full_list)
if batch_size is None: # The default if not specified
# If we don't specify the batch size, assume that we simply want a shuffled version of the full_list
if retain_order:
return list(full_list) # A result of the rCode algebra. This simply speeds up the process.
batch_size=len(full_list)
else:
assert 0 <= batch_size <= len(full_list),"batch_size == " + str(batch_size) + " ⋀ len(full_list) == " + str(len(full_list)) + ",∴ ¬ (0 <= batch_size <= len﹙full_list﹚) Explanation: We do not allow duplicates, ∴ we cannot generate a larger batch than we have elements to choose from full_list"
ⵁ=list(range(len(full_list))) # All possible indices of full_list
random.shuffle(ⵁ) # This shuffles the ⵁ array but doesn't return anything
ⵁ=ⵁ[0:batch_size]
if retain_order:
ⵁ.sort()
return list(full_list[i] for i in ⵁ)
def random_batch_up_to(full_list, max_batch_size=None, retain_order=False):
"""
Like random batch, but when batch_size is larger than the full_list, the output will only have the length of the input full list
This behaviour is as opposed to returning an output with the length of batch_size when len(full_list)<batch_size
"""
if max_batch_size is not None:
assert isinstance(max_batch_size, int)
batch_size = min(len(full_list), max_batch_size)
else:
batch_size = max_batch_size
return random_batch(full_list, batch_size, retain_order = retain_order)
def random_batch_with_replacement(full_list, batch_size: int = None, method: str = "balanced"):
"""
Like random_batch, but it handles batch_size larger than len(full_list) by nicely repeating elements.
It supports different sampling methods specified by the 'method' argument.
Args:
full_list: The list to sample from.
batch_size: The desired size of the output batch.
method: The sampling method to use. Defaults to "balanced".
- "balanced": Perform balanced sampling. The function tries to distribute the elements evenly across the
output list by repeating full_list and shuffling the elements. If duplicates are present,
the distance between them is constrained by the size of full_list.
- "independent": Perform independent sampling with replacement. Each element in full_list has an equal probability
of being selected in each draw, similar to drawing balls from a hat with replacement.
There is no guarantee of duplicates, and if present, the distance between them can be arbitrary.
This is the most basic possible type of sampling with replacement.
Returns:
A list of size batch_size containing elements sampled from full_list.
Examples:
>>> random_batch_with_replacement([1, 2, 3], 10, method="balanced")
[1, 2, 3, 3, 2, 1, 2, 3, 1, 3]
>>> random_batch_with_replacement([1, 2, 3, 4, 5], 20, method="balanced")
[1, 4, 5, 3, 2, 5, 1, 2, 3, 4, 4, 3, 1, 5, 2, 1, 5, 3, 4, 2]
>>> random_batch_with_replacement([1, 2, 3], 10, method="independent")
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # Astronomically unlikely, but possible with independent sampling
[3, 3, 1, 3, 3, 2, 3, 1, 2, 2] # A more typical result of this code
Raises:
ValueError: If an unsupported sampling method is specified.
AssertionError: If the code reaches an unreachable state.
"""
full_list = list(full_list)
assert batch_size >= 0
assert len(full_list) > 0
valid_methods = ["balanced", "independent"]
if method not in valid_methods:
raise ValueError("Unsupported sampling method: %s. Valid methods are: %s" % (method, ', '.join(valid_methods)))
if method == "balanced":
# Perform balanced sampling
repeats = batch_size // len(full_list)
remainder = batch_size % len(full_list)
output = list_flatten(shuffled(full_list) for _ in range(repeats))
output += random_batch(full_list, remainder)
elif method == "independent":
# Perform independent sampling with replacement
output = [random.choice(full_list) for _ in range(batch_size)]
else:
assert False, "This code is unreachable"
return output
def random_substring(string:str,length:int=None):
""" Gets a random substring with a given length """
if length is None:
#If length isn't given, choose a random length
length=random_int(0,len(string))
assert len(string)>=length
assert length>=0
index=random_index(len(string)-length+1)
return string[index:index+length]
def shuffled(l):
""" Randomly shuffle a copy of a list and return it """
if isinstance(l,str): # random_permutation("ABCDE") ⟶ 'EDBCA' special case: if its a string we want a string output, so we can jumble letters in words etc.
return ''.join(shuffled(list(l)))
return random_batch(l) # Due to an r-code identity in random_batch
def random_parallel_batch(*full_lists,batch_size: int = None,retain_order: bool = False):
# Created for machine learning input/output training-pairs generation.
# rCode:
# ⁠⁠⁠⁠ ⎧ ⎫
# ⁠⁠⁠⁠ ⎪ ⎧ ⎫⎪
# ⁠⁠⁠⁠ ⎪ ⎪ ⎧ ⎫⎪⎪
# ⁠⁠⁠⁠ ⎪ ⎪ ⎪ ⎧ ⎫ ⎪⎪⎪
# list(zip(*random_batch(list(zip(*a)),b,c))) ≣ random_parallel_batch(*a,b,c)
# ⁠⁠⁠⁠ ⎪ ⎪ ⎪ ⎩ ⎭ ⎪⎪⎪
# ⁠⁠⁠⁠ ⎪ ⎪ ⎩ ⎭⎪⎪
# ⁠⁠⁠⁠ ⎪ ⎩ ⎭⎪
# ⁠⁠⁠⁠ ⎩ ⎭
# print(parallel_batch(['a','b','c','d'],[1,2,3,4],batch_size=3)) --> [['c', 'b', 'd'], [3, 2, 4]]
# assert_equality(*full_lists,equality_check=lambda a,b:len(a)==len(b))# All lists ∈ full_lists must have the same length
# ⁠⁠⁠⁠ ⎧ ⎫
# ⁠⁠⁠⁠ ⎪ ⎧ ⎫ ⎪
# ⁠⁠⁠⁠ ⎪ ⎪ ⎧ ⎫⎪ ⎪
# ⁠⁠⁠⁠ ⎪ ⎪ ⎪ ⎧ ⎫⎪⎪ ⎪
batch_indexes=random_batch(list(range(len(full_lists[0]))),batch_size=batch_size,retain_order=retain_order) # Select random possible indices that will be synchronized across all lists of the output
# ⁠⁠⁠⁠ ⎪ ⎪ ⎪ ⎩ ⎭⎪⎪ ⎪
# ⁠⁠⁠⁠ ⎪ ⎪ ⎩ ⎭⎪ ⎪
# ⁠⁠⁠⁠ ⎪ ⎩ ⎭ ⎪
# ⁠⁠⁠⁠ ⎩ ⎭
# ⁠⁠⁠⁠ ⎧ ⎫
# ⁠⁠⁠⁠ ⎪ ⎧ ⎫⎪
# ⁠⁠⁠⁠ ⎪ ⎪ ⎧ ⎫ ⎪⎪
# ⁠⁠⁠⁠ ⎪ ⎪ ⎪ ⎧ ⎫⎪ ⎪⎪
return list(map(lambda x:tuple(map(lambda i:x[i],batch_indexes)),full_lists)) # Note that batch_indexes is referenced inside a lambda statement that is called multiple times. This is why it is declared as a separate variable above.
# ⁠⁠⁠⁠ ⎪ ⎪ ⎪ ⎩ ⎭⎪ ⎪⎪
# ⁠⁠⁠⁠ ⎪ ⎪ ⎩ ⎭ ⎪⎪
# ⁠⁠⁠⁠ ⎪ ⎩ ⎭⎪
# ⁠⁠⁠⁠ ⎩ ⎭
# The single-lined return statement shown directly above this line is ≣ to the next 5 lines of code:
# out=deepcopy_multiply([[]],len(full_lists))
# for i in batch_indexes:
# for j in range(len(out)):
# out[j].append(full_lists[j][i])
# return out
@contextlib.contextmanager
def temporary_random_seed(seed=None):
"""
A context manager that sets the random seed for the duration of the context block
using the standard library's random module. If no seed is provided, it does not change
the random state.
Parameters:
seed (int, optional): The seed value to use for generating random numbers.
If None, the random state is not altered.
Example:
>>> import random
>>> random.seed(42)
>>> print("First random number:", random.random())
>>> with temporary_random_seed(seed=99):
... print("Random number in context:", random.random())
>>> print("Second random number:", random.random())
# Note how the above acts the same as if there was no context...
>>> random.seed(42)
>>> print("First random number:", random.random())
>>> print("Second random number:", random.random())
OUTPUT:
First random number: 0.6394267984578837
Random number in context: 0.40397807494366633
Second random number: 0.025010755222666936
First random number: 0.6394267984578837
Second random number: 0.025010755222666936
"""
import random
old_state = random.getstate()
try:
if seed is not None:
random.seed(seed)
yield
finally:
random.setstate(old_state)
@contextlib.contextmanager
def temporary_numpy_random_seed(seed=None):
"""
A context manager that sets the random seed for the duration of the context block
using NumPy's random module. If no seed is provided, it does not change the random state.
Parameters:
seed (int, optional): The seed value to use for generating random numbers.
If None, the random state is not altered.
Example:
>>> import numpy as np
>>> np.random.seed(42)
>>> print("First random number:", np.random.rand())
>>> with temporary_numpy_random_seed(seed=99):
... print("Random number in context:", np.random.rand())
>>> print("Second random number:", np.random.rand())
# Note how the above acts the same as if there was no context...
>>> np.random.seed(42)
>>> print("First random number:", np.random.rand())
>>> print("Second random number:", np.random.rand())
OUTPUT:
First random number: 0.3745401188473625
Random number in context: 0.6722785586307918
Second random number: 0.9507143064099162
First random number: 0.3745401188473625
Second random number: 0.9507143064099162
"""
pip_import('numpy')
import numpy as np
old_state = np.random.get_state()
try:
if seed is not None:
np.random.seed(seed)
yield
finally:
np.random.set_state(old_state)
# endregion
# region rant/ranp: [run_as_new_thread,run_as_new_process]
def run_as_new_thread(func,*args,**kwargs):
"""
Used when we simply don't need/want all the complexities of the threading module.
An anonymous thread that only ceases once the def is finished.
Example:
>>> @run_as_new_thread
... def _():
... for _ in range(5) :
... sleep(1)
... print(_)
>>> run_as_new_thread(save_image, image, 'image.jpg')
"""
new_thread=threading.Thread
new_thread=new_thread(target=func,args=args,kwargs=kwargs)
new_thread.start()
return new_thread
def run_as_new_process(func,*args,**kwargs):
"""
Used when we simply don't need/want all the complexities of the multiprocessing module
An anonymous process that only ceases once the def is finished.
Example:
>>> @run_as_new_process
... def _():
... for _ in range(5) :
... sleep(1)
... print(_)
>>> run_as_new_process(save_image, image, 'image.jpg')
"""
import multiprocessing as mp
new_process=mp.Process(target=func,args=args,kwargs=kwargs)
new_process.start() # can't tell the difference between start and run
return new_process
# endregion
def is_valid_url(url:str)->bool:
""" Return true iff the url string is syntactically valid """
from urllib.parse import urlparse
if not isinstance(url,str):
return False
try:
result=urlparse(url)
return all([result.scheme, result.netloc])
except Exception:
return False
def _erase_terminal_line():
""" erase and go to beginning of line https://stackoverflow.com/questions/5290994/remove-and-replace-printed-items """
sys.stdout.write('\033[2K\033[1G')
def load_files(
load_file,
file_paths,
*,
num_threads:int=None,
show_progress=False,
strict=True,
lazy=False,
buffer_limit=None
):
"""
Load a list of files with optional multithreading.
- load_file (function): A function to load a single file. Expected signature: load_file(str) -> any.
- file_paths (iterable): Paths to the files to be loaded. It's ok if this iterator is slow - paths are loaded concurrently with files.
- num_threads (int, optional): Number of threads for concurrent loading. Defaults to 32 if set to None. If set to 0, runs on the main thread.
- show_progress (True, False, 'eta' or 'tqdm'): Whether to show a progress bar. If set to 'tqdm', uses tqdm library. If set to 'eta', uses rp.eta. Defaults to False.
- strict (True, False, or None): Behavior if a file fails to load. True throws an error, False skips the file, None yields None.
Yields:
- any or None: The content of each file, or None if the file fails to load and strict is set to None.
This function is a generator that yields the loaded files one by one.
TODO: This function is more powerful than the arg names and function name imply. This could be used for general-purpose parallelism with loading bars and error handling. Make a more generalized version of this function and then re-implement a shorter version of this function that uses it.
TODO: Make a save_files version of this - big question: can a save_files function be implemented with the load_files function (and more elegantly with the above generalized version?)
TODO: The eta can't display a specific message like "loading images" etc - it's locked to load_files right now. How can I *elegantly* allow this naming but also allow it to use tqdm?
TODO: Make convert_image_files take advantage of this function
"""
files = _load_files(
load_file,
file_paths,
num_threads,
show_progress,
strict,
buffer_limit,
)
if lazy:
return files
else:
return list(files)
def _load_files(
load_file,
file_paths,
num_threads:int=None,
show_progress=False,
strict=True,
buffer_limit=None,
):
"Helper function for load_files"
assert strict is True or strict is False or strict is None, "The 'strict' parameter must be set to either True, False, or None."
assert show_progress in {True, False, "tqdm", "eta"} or isinstance(show_progress, str) and starts_with_any(show_progress, 'eta:'), "The 'show_progress' parameter must be either True, False, or 'tqdm'."
assert num_threads is None or isinstance(num_threads, int) and num_threads >= 0, "Must have at least 1 thread, or set num_threads=0 to run in the main thread only"
assert is_iterable(file_paths), 'rp.load_files: file_paths must be iterable, but type(file_paths) is '+str(type(file_paths))
assert callable(load_file), 'rp.load_files: load_file must be a function that takes a file path and returns a value, but type(load_file) is '+str(type(load_file))
if num_threads is None:
# Choose a nice default value
num_threads = 32
SKIP = object() # Special object indicating a skipped file
cancelled = None # Will be set if any thread, including the main thread, throws an error
# Define progress_func here...
if show_progress:
file_paths = list(file_paths)
assert hasattr(file_paths,"__len__"), "Cannot show progress because file_paths doesnt have a length"
num_paths = len(file_paths)
if show_progress == "tqdm":
pip_import('tqdm') # Ensures tqdm is installed
from tqdm import tqdm
pbar = tqdm(total=num_paths)
def progress_func(action):
if action == "update":
pbar.update(1)
elif action == "done":
pbar.close()
elif show_progress == True or isinstance(show_progress, str) and (show_progress=='eta' or show_progress.startswith('eta:')):
eta_title = 'Loading Files'
if isinstance(show_progress, str) and show_progress.startswith('eta:'):
# This is currently-undocumented functionality, used internally in rp. Maybe I'll document it in the future
eta_title = show_progress[len('eta:'):]
show_eta = eta(num_paths, title=eta_title)
num_yielded = 0
start_time = gtoc()
def progress_func(action):
nonlocal num_yielded
if action == "update":
num_yielded += 1
show_eta(num_yielded)
elif action == "done":
elapsed_time = gtoc() - start_time
# _print_status("%s: Done! Did %i items in %.3f seconds"%(eta_title, num_yielded, elapsed_time))#This is here because of the specifics of the eta function we're using to display progress
else:
def progress_func(action):
pass
def _load_file(path):
nonlocal cancelled
if cancelled:
raise cancelled
try:
content = load_file(path)
except BaseException as e:
if strict is True:
cancelled = e
raise
elif strict is False:
content = SKIP
else:
assert strict is None
content = None
progress_func("update")
return content
def skip_filter(iterable):
return filter(lambda x: x is not SKIP, iterable)
try:
if not num_threads:
# Load all the files in the main thread
yield from skip_filter(map(_load_file, file_paths))
else:
# Load files with multiple threads
yield from skip_filter(lazy_par_map(
_load_file,
file_paths,
num_threads=num_threads,
buffer_limit=buffer_limit,
))
progress_func("done")
except BaseException as e:
cancelled = e
raise
# region Saving/Loading Images: [load_image,load_image_from_url,save_image,save_image_jpg]
_load_animated_gif_cache={}
def load_animated_gif(location,*,use_cache=True):
"""
Location should be a url or a file path pointing to a GIF file
Loads an array of frames of an RGB animated GIF
Can load from a file or from a URL
EXAMPLE:
while True:
url = 'https://i.pinimg.com/originals/80/26/71/80267166501067a9da5e6b9412bdd9df.gif'
for frame in load_animated_gif(url,use_cache=True):
display_image(frame)
sleep(1/20)
"""
location = get_absolute_path(location) #Important for caching
if use_cache and location in _load_animated_gif_cache:
return _load_animated_gif_cache[location]
pip_import('PIL')
from PIL import Image, ImageSequence
if is_valid_url(location):
try:
from urllib.request import urlopen
gif = Image.open(urlopen(location))
except Exception as e:
#Sometimes the above method doesn't work.
#When it doesn't, often downloading the image and loading it from the hard drive will still work; so we'll try that before giving up.
temp_file=temporary_file_path()
try:
download_url(location,temp_file)
output=load_animated_gif(temp_file)
finally:
delete_file(temp_file)
return output
else:
assert file_exists(location), 'No such file exists: ' + repr(location)
gif = Image.open(location)
frames = [as_numpy_array(frame.convert('RGB')) for frame in ImageSequence.Iterator(gif)]
frames = as_numpy_array(frames)
_load_animated_gif_cache[location]=frames
return frames
_load_image_cache={}
def load_image_from_clipboard():
""" #Grab an image copied from your clipboard """
pip_import('PIL')
from PIL import ImageGrab
assert currently_running_windows() or currently_running_mac(),'load_image_from_clipboard() only works on Mac and Windows right now; sorry. This is because of PIL.'
ans=ImageGrab.grabclipboard()
path=temporary_file_path('.png')
ans.save(path)
ans=load_image(path)
delete_file(path)
return ans
def copy_image_to_clipboard(image):
"""
Takes an image or a path/url to an image and copies that image to your system clipboard. If you're using Ubuntu, you must install xclip.
Note that it only operates on RGB Jpg images right now - so alpha channels will be discarded. In the future, this will be fixed and ideally we will save png images by default.
EXAMPLE:
>>> ans = get_youtube_video_thumbnail('https://www.youtube.com/watch?v=iu54gTucsiE')
... copy_image_to_clipboard(ans)
... #Try pasting into photoshop or something
"""
pip_import('pyjpgclipboard')
if isinstance(image,str):
image=load_image(image)
temp_image_path=temporary_file_path('jpg')
try:
save_image_jpg(image,temp_image_path)
import pyjpgclipboard
pyjpgclipboard.clipboard_load_jpg(temp_image_path)
finally:
squelch_call(delete_file,temp_image_path)
def load_image(location,*,use_cache=False):
""" Automatically detect if location is a URL or a file path and try to smartly choose the appropriate function to load the image """
assert isinstance(location,str),'load_image error: location should be a string representing a URL or file path. However, location is not a string. type(location)=='+repr(type(location))+' and location=='+repr(location)
if path_exists(location):
location=get_absolute_path(location) #This is important for caching. ./image.jpg might mean different things when we're running in different directories.
if use_cache and location in _load_image_cache and use_cache:
return _load_image_cache[location].copy()
if is_valid_url(location):
out = load_image_from_url (location)
else:
out = load_image_from_file(location)
if use_cache:
#Only save to the cache if we're using use_cache, otherwise loading thousands of images with this method might run out of memory
_load_image_cache[location]=out
return out
def load_rgb_image(location,*,use_cache=False):
"""
Like load_image, but makes sure there's no alpha channel
This function is really only here to save you from having to write it out every time
"""
return as_rgb_image(load_image(location,use_cache=use_cache))
class LazyLoadedImages:
def __init__(self,image_paths:list,*args,**kwargs):
self.image_paths=image_paths
self.args=args
self.kwargs=kwargs
def __getitem__(self,i):
image_path=self.image_paths[i]
return load_image(image_path,*self.args,**self.kwargs)
def __len__(self):
return len(self.image_paths)
def load_images(*locations,use_cache=False,show_progress=False,num_threads=None,strict=True):
"""
Simply the plural form of load_image
This is much faster than using load_image sequentially because it's multithreaded. I've had performance boosts of up to 8x speed
This function will throw an error if any one of the images fails to load
If given a folder as the input path, will load all image files from that folder
The locations parameter:
Can be a list of images: load_images(['img1.png','img2.jpg','img3.bmp'])
Can be a varargs of images: load_images( 'img1.png','img2.jpg','img3.bmp' )
Can be a folder of images: load_images( 'path/to/image/folder' )
The strict parameter controls what this function should do when an image fails to load. This is useful when loading a folder full of images, some of which might be corrupted.
If strict==True, this function will throw an error if any one of the images fails to load
If strict==False, this function will skip any images that fail to load (so you might not have as many images in the output as you did paths in the input)
If strict==None, this function will replace any images that failed to load with 'None' instead of a numpy array. So the output might look like [image0. image1, image2, None, image4] where image1, image0 etc are numpy arrays
"""
assert strict in {True, False, None}, 'load_images: The \'strict\' parameter must be set to either True, False, or None. See the documentation for this function to see what that means.'
if len(locations)==1:
locations=locations[0]
if isinstance(locations,str) and is_a_folder(locations):
locations=get_all_image_files(locations,sort_by='number')
return load_images(locations,use_cache=use_cache,show_progress=show_progress,strict=strict)
if isinstance(locations,str):
locations=[locations]
if show_progress in ['eta',True]: show_progress='eta:Loading Images'
return load_files(lambda path:load_image(path,use_cache=use_cache), locations, show_progress=show_progress, strict=strict, num_threads=num_threads)
"""
#The below code works perfectly fine! But since load_files (implemented much later, and actually based on the below code) has a variable number of threads, it's just a teeny bit faster, and now makes this function more concise
if show_progress:
number_of_images_loaded=0
show_time_remaining=eta(len(locations), title='Loading Images')
start_time=gtoc()
cancelled=False
def _load_image(path):
# assert isinstance(path,str)
nonlocal cancelled
if cancelled:
if isinstance(cancelled,Exception):
raise cancelled
else:
return None
try:
image=load_image(path,use_cache=use_cache)
except Exception as e:
if strict==True:
cancelled=e
raise
else:
image=None
if cancelled:
return image
if show_progress:
nonlocal number_of_images_loaded
number_of_images_loaded+=1
show_time_remaining(number_of_images_loaded)
return image
try:
assert all(isinstance(x,str) for x in locations)
images = par_map(_load_image,locations)#This is fast because it's multithreaded
if strict is False:
#When strict is False (as opposed to None), we skip any images that failed to load; meaning we exclude them from the output
images = [image for image in images if image is not None]
except KeyboardInterrupt:
cancelled=True
raise
if show_progress:
end_time=gtoc()
elapsed_time=end_time-start_time
sys.stdout.write('\033[2K\033[1G')#erase and go to beginning of line https://stackoverflow.com/questions/5290994/remove-and-replace-printed-items
print('rp.load_images: Done! Loaded %i images in %.3f seconds'%(len(images),elapsed_time))#This is here because of the specifics of the eta function we're using to display progress
return images
"""
# output=[]
# for i,location in enumerate(locations):
# image=load_image(location,use_cache=use_cache)
# output.append(image)
# if display_progress:
# show_time_remaining(i)
# return [load_image(location,use_cache=use_cache) for location in locations]
#def load_images_in_parallel(*locations,use_cache=False):
# #This is like load_images, except it runs faster.
# locations=delist(detuple(locations))
# output=[]
# show_time_remaining=eta(len(locations))
def load_image_from_file(file_name):
""" Can try opencv as a fallback if this ever breaks """
assert file_exists(file_name),'No such image file exists: '+repr(file_name)
if get_file_extension(file_name)=='exr':
#Imageio doesn't load exr files well consistently across my computers. Sometimes it gives incorrect results because of some glitch in the freeimage library. I don't know why this is...
#That being said, the load_openexr_image is more versatile anyway...and loads exr files properly
try :return load_openexr_image(file_name)
except Exception:pass
if get_file_extension(file_name).upper()=='HEIC':
#Apple photo format - the PIL function can do this
return _load_image_from_file_via_PIL(file_name)
try :return _load_image_from_file_via_imageio(file_name)#Imageio will not forget the alpha channel when loading png files
except Exception :pass #Don't cry over spilled milk...if imageio didn't work we'll try the other libraries.
try :return _load_image_from_file_via_scipy (file_name)#Expecting that scipy.misc.imread doesn't exist on the interpereter for whatever reason
except ImportError:pass
try :return _load_image_from_file_via_opencv (file_name)#OpenCV is our last choice here, because when loading png files it forgets the alpha channel...
except Exception :pass
try :return _load_image_from_file_via_PIL (file_name)
except Exception :raise
# assert False,'rp.load_image_from_file: Failed to load image file: '+repr(file_name)
_init_pillow_heif_called=False
def _init_pillow_heif():
global _init_pillow_heif_called
if not _init_pillow_heif_called:
#https://stackoverflow.com/questions/54395735/how-to-work-with-heic-image-file-types-in-python
pip_import('pillow_heif')
import pillow_heif
pillow_heif.register_heif_opener()
_init_pillow_heif_called = True
def _load_image_from_file_via_PIL(file_name):
if file_name.upper().endswith('.HEIC'):
_init_pillow_heif()
pip_import('PIL')
from PIL import Image
out = as_numpy_array(Image.open(file_name))
assert is_image(out),'Sometimes when PIL fails to load an image it doesnt throw an exception, and returns a useless object. This might be one of those times.'
return out
def _load_image_from_file_via_imageio(file_name):
"""
NOTE if this method fails try the following function:
imageio.plugins.freeimage.download() #https://github.com/imageio/imageio/issues/334 #This helps when it fails. I don't know why it sometimes fails and sometimes doesn't...
NOTE that even though this made it not crash, it's still sometimes reading exr files wrong...depending on the computer....
"""
pip_import('imageio')
from imageio import imread
return imread(file_name)
def _load_image_from_file_via_scipy(file_name):
from scipy.misc import imread
return imread(file_name)
def _load_image_from_file_via_opencv(file_name):
cv2=pip_import('cv2')
image=cv2.imread(file_name, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
# image=cv2.imread(file_name)
if image is None:
assert False,("OpenCV failed to load image file at the path: "+file_name)#By default, opencv doesn't raise an error when the file isn't found, and just returns None....which is dumb. It should act like scipy.misc.imread, which throws a FileNotFoundError when given an invalid path.
return cv_bgr_rgb_swap(image)#OpenCV is really weird and doesn't use RGB: It uses BGR for some strange legacy reason. We have to swap the channels to make it useful.
@contextlib.contextmanager
def _disable_insecure_request_warning():
# Catch warnings related to insecure requests
pip_import('requests')
from requests.packages.urllib3.exceptions import InsecureRequestWarning
with warnings.catch_warnings():
# Temporarily suppress InsecureRequestWarning
warnings.simplefilter("ignore", InsecureRequestWarning)
yield
def load_image_from_url(url: str):
"""
Url should either be like http://website.com/image.png or like data:image/png;base64,iVBORw0KGgoAAAANSUhEUg...
Returns a numpy image
"""
assert url.startswith('data:image') or is_valid_url(url), 'load_image_from_url error: invalid url: ' + repr(url)
pip_import('requests')
pip_import('PIL')
import requests
from PIL import Image
from io import BytesIO
with _disable_insecure_request_warning():
response = requests.get(url, verify=False)
return np.add(Image.open(BytesIO(response.content)), 0) # Converts it to a numpy array by adding 0 to it.
def load_image_from_matplotlib(*,dpi:int=None,fig=None):
"""
Return matplotlib's current display as an image
You can increase the DPI to get a higher resolution. Set it to something like 360 or higher.
Example:
line_graph(random_ints(10))
cv_imshow(load_image_from_matplotlib())
"""
import io
import cv2
import numpy as np
import matplotlib.pyplot as plt
if fig is None: fig=plt.gcf()
if dpi is None: dpi=fig.dpi
buf = io.BytesIO()
fig.savefig(buf, format="png", dpi=dpi)
buf.seek(0)
img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
buf.close()
img = cv2.imdecode(img_arr, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
#def load_openexr_image(file_path:str):
# #NOTE: This function is unnesecary for loading EXR files with full quality...assuming they're RGB. load_image works fine; I haven't tested it with more than 4 channels yet though
#
# #Takes .exr image file with a depth map, and returns an RGBAZ image (where Z is depth, as opposed to an RGBA image.)
# #Because of the way .exr files work, the output of this function is not an image as defined by rp.is_image, because it has 5 channels (all floating point)
# #This function exists because load_image ignores the depth-map channel, which is important informatoin but is ignored by OpenCV's importer as well as Snowy's and all other libraries I've tried so far
# #This function requires a python package called 'openexr'. It can be annoying to install.
# pip_import('OpenEXR') # This package can be a bit of a pain-in-the-ass to get working; it requires apt-installs on Ubuntu and brew-installs on Mac. On ubuntu, try 'sudo apt install openexr ; sudo apt install libopenexr-dev' and if that fails try 'sudo apt remove libopenexr22' and try installing openexr and libopenexr-dev again
#
# assert file_exists(file_path),'File not found: '+file_path
#
# import OpenEXR, Imath, numpy
#
# #Below code adapted from: https://www.blender.org/forum/viewtopic.php?t=24549
#
# exrimage = OpenEXR.InputFile(file_path)
#
# dw = exrimage.header()['dataWindow']
# (width, height) = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
#
# def fromstr(s):
# mat = numpy.fromstring(s, dtype=numpy.float16)
# mat = mat.reshape (height,width)
# return mat
#
# pt = Imath.PixelType(Imath.PixelType.HALF)
# (r, g, b, a, z) = [fromstr(s) for s in exrimage.channels('RGBAZ', pt)]
# return np.dstack((r,g,b,a,z)).astype(float)
def _get_openexr_image_dimensions(file_path)->set:
#Returns the height and width of an .exr image file
pip_import('OpenEXR')
import OpenEXR
if isinstance(file_path,OpenEXR.InputFile):
#This is to save a bit of time when calling this function from other rp functions:
#Preferably don't re-read a file more than once
input_file = file_path
else:
input_file = OpenEXR.InputFile(file_path)
dw = input_file.header()['dataWindow']
width, height= (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
return height, width
def is_valid_openexr_file(file_path):
"""
Returns True iff the file path points to an exr file
"""
pip_import('OpenEXR')
import OpenEXR
return OpenEXR.isOpenExrFile(file_path)
def get_openexr_channels(file_path)->set:
"""
Gets a set of strings indicating what channels are in a given .exr file
Note that .exr files are floating point images that can contain arbitrary numbers of named channels
EXAMPLE:
>>> get_openexr_channels('Image0032.exr')
ans = {'A', 'G', 'R', 'B'}
"""
pip_import('OpenEXR')
import OpenEXR
if isinstance(file_path,OpenEXR.InputFile):
#This is to save a bit of time when calling this function from other rp functions:
#Preferably don't re-read a file more than once
input_file = file_path
else:
input_file = OpenEXR.InputFile(file_path)
return set(input_file.header()['channels'])
def load_openexr_image(file_path,*,channels=None):
"""
Will load a floating point openexr image as a numpy array
The 'channels' argument is used to specify the channel names that are loaded
By default this works only with RGB or RGBA floating point .exr images, but note that .exr files are interesting: they can have arbitrarily many *named* channels, and blender can exploit this.
For example, if you look at the readme on https://github.com/cheind/py-minexr (a library that loads .exr files), they have a demo showing blender outputting both normals, depth, and color in a single exr file
To access these channels, or if you have alpha etc, simply list those channels' names in the 'channels' argument
If you're not sure what channels an openexr image has (maybe it's RGB maybe it's RGBA? Maybe it has depth? Etc) then call get_openexr_channels(file_path)
EXAMPLES:
>>> load_openexr_image('Image0032.exr',channels=None).shape #It happens to be the case that Image0032.exr is RGBA. This was detected automatically because channels=None
ans = (716, 862, 4)
>>> load_openexr_image('Image0032.exr',channels=('R','G','B','A')).shape #You can specify the channels in any order you want manually
ans = (716, 862, 4)
>>> load_openexr_image('Image0032.exr',channels=('R','G','B')).shape #If you want, you can even exclude certain channels
ans = (716, 862, 3)
>>> load_openexr_image('Image0032.exr',channels=('R','G','B','B','B','B')).shape #...or even use some channels more than once, just to prove my point...
ans = (716, 862, 6)
>>> load_openexr_image('Image0032.exr',channels=('R','G','B','asdf')).shape #It will throw errors if the channels you give aren't in the exr file
ERROR: AssertionError: load_openexr_image: OpenEXR file is missing the following channels: {'asdf'}
>>> get_openexr_channels('Image0032.exr') #If you're not sure what channels are contained in an OpenEXR image, use this function
ans = {'A', 'G', 'R', 'B'}
This code was originally from https://www.programcreek.com/python/example/124985/OpenEXR.InputFile
Imageio used to do this well...but for some reason, as of April 27 2022, it suddenly stopped working correctly and gave wrong values along the blue channel
This function has been checked to make sure the resulting floating point numbers are correct
"""
pip_import('OpenEXR')
pip_import('Imath')
pip_import('numpy')
import OpenEXR
import Imath
import numpy as np
# Read OpenEXR file
if not is_valid_openexr_file(file_path):
assert False, 'rp.load_openexr_image: Image %s is not a valid OpenEXR file'%file_path
input_file = OpenEXR.InputFile(file_path)
pixel_type = Imath.PixelType(Imath.PixelType.FLOAT)
height, width = _get_openexr_image_dimensions(input_file)
#Handle the 'channels' argument if it's None
input_channels = get_openexr_channels(input_file)
if channels is None:
#If channels is None, assume the image is either RGB or RGBA
if input_channels=={'R','G','B' }: channels=('R','G','B' )
elif input_channels=={'R','G','B','A'}: channels=('R','G','B','A')
else:
assert False, 'rp.load_openexr_image: This image (aka %s) is neither RGB nor RGBA. Please specify the channels manually, such as channels==%s'%(file_path,str(get_openexr_channels(input_file)))
assert set(channels)<=input_channels, 'rp.load_openexr_image: OpenEXR file is missing the following channels: '+repr(set(channels) - input_channels)
# Read into tensor
image = np.zeros((height, width, len(channels)))
for i, channel in enumerate(channels):
rgb32f = np.fromstring(input_file.channel(channel, pixel_type), dtype=np.float32)
image[:, :, i] = rgb32f.reshape(height, width)
return image
_opencv_supported_image_formats='bmp dib exr hdr jp2 jpe jpeg jpg pbm pfm pgm pic png pnm ppm pxm ras sr tif tiff webp'.split()
def _encode_image_to_bytes(image,filetype:str,quality):
"Helper function for encode_image_to_bytes"
assert is_image(image)
assert isinstance(filetype,str)
pip_import('cv2')
import cv2
filetype=filetype.lower() #Make filetype not case sensitive
if not filetype.startswith('.'):
#Allow filetype to be 'png' which gets turned into '.png'
filetype='.'+filetype
assert filetype.startswith('.')
assert filetype[1:] in _opencv_supported_image_formats, 'Unsupported image format: '+repr(filetype)+', please choose from [.'+', .'.join(_opencv_supported_image_formats)+']'
image=as_byte_image(image)
if filetype in '.png .tif .tiff .webp'.split(): #All filetypes that support transparency should go here
if not is_rgb_image(image): #But don't add transparency unless we have to
image=as_rgba_image(image)
else:
image=as_rgb_image(image)
image=cv_rgb_bgr_swap(image)
if filetype in '.jpg .jpeg .webp .jp2 .jpe'.split():
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
success, encoded_image = cv2.imencode(filetype, image, encode_param)
else:
success, encoded_image = cv2.imencode(filetype, image)
if not success:
raise IOError('Failed to encode image to '+filetype+' bytes')
return encoded_image.tobytes()
def encode_image_to_bytes(image,filetype=None,quality=100):
"""
Encodes an image into a bytestring, without actually saving it to your harddrive
The bytes are the same as if you saved an image to a file with this filetype and quality
Args:
image (str or object): Image location, or an image as defined by rp.is_image
filetype (str, optional): The type of image file we encode it to. If not specified, automatically determined.
* Supported options: bmp, dib, exr, hdr, jp2, jpe, jpeg, jpg, pbm, pfm, pgm, pic, png, pnm, ppm, pxm, ras, sr, tif, tiff, webp
quality (int): If applicable, defines the image quality (useful when filetype=='jpg' for instance). Specified as a percentage.
* Supported filetypes: webp, jpeg, jpg, jpe, jp2
* NOTE: Even at quality=100, it is not currently lossless. TODO: Fix this
Returns:
str: a byttestring that contains the encoded image file
EXAMPLE:
ans='https://upload.wikimedia.org/wikipedia/commons/6/6e/Golde33443.jpg'
ans=load_image(ans)
ans=encode_image_to_bytes(ans,'png')
ans=decode_image_from_bytes(ans)
display_image(ans)
TODO: Add PIL support too, in case cv2 fails. PIL can also do this.
TODO: Add image quality parameters for jpg
Reference: https://jdhao.github.io/2019/07/06/python_opencv_pil_image_to_bytes/
"""
assert is_image(image) or isinstance(image, str)
import base64
if filetype is None:
if isinstance(image,str):
filetype=get_file_extension(image)
else:
if is_rgba_image(image):
#If we have alpha, use a png
filetype='png'
else:
#Otherwise, might as well save a bit of bandwidth
#Assumes we will encode losslessly when quality=100
#filetype='jpg'
#But actually, right now that assumption is false, so...
filetype='png'
if isinstance(image,str) and get_file_extension(image)==filetype and quality==100:
#Save some time and just return the bytestring directly
return curl_bytes(image)
if isinstance(image, str):
image = load_image(image)
byte_data = gather_args_call(_encode_image_to_bytes)
return byte_data
def encode_images_to_bytes(images, filetype=None, quality=100):
object = [encode_image_to_bytes(x, filetype=filetype, quality=quality) for x in images]
return object_to_bytes(object)
def decode_images_from_bytes(encoded_images):
if isinstance(encoded_images, bytes):
encoded_images = bytes_to_object(encoded_images)
images = [decode_image_from_bytes(x) for x in encoded_images]
if len(set(x.shape for x in images))==1:
#If possible turn it into a numpy array
images = as_numpy_array(images)
return images
def encode_image_to_base64(image,filetype=None,quality=100):
"""
Encodes an image into a base64 string. Useful for HTTP requests, or displaying HTML images in jupyter.
Args:
image (str or object): Image location, or an image as defined by rp.is_image
filetype (str, optional): The type of image file we encode it to
quality (int): If applicable, defines the image quality (useful when filetype=='jpg' for instance)
Returns:
str: a base-64 string containing the image
"""
import base64
byte_data = encode_image_to_bytes(image, filetype)
return base64.b64encode(byte_data).decode("utf-8")
def encode_images_to_base64(images,filetype=None,quality=100):
return [encode_image_to_base64(image, filetype, quality) for image in images]
def decode_image_from_bytes(encoded_image:bytes):
"""
Supports any filetype in r._opencv_supported_image_formats, including jpg, bmp, png, exr and tiff
TODO: Fix support for opencv, I suspect it will be faster.
EXAMPLE:
ans='https://upload.wikimedia.org/wikipedia/commons/6/6e/Golde33443.jpg'
ans=load_image(ans)
ans=encode_image_to_bytes(ans)
ans=decode_image_from_bytes(ans)
display_image(ans)
"""
pip_import('PIL')
from io import BytesIO
from PIL import Image
return np.array(Image.open(BytesIO(encoded_image)))
# #TODO: Fix this if PIL is too slow
#pip_import('cv2')
#import cv2
#success, decoded_image = cv2.imdecode(encoded_image,cv2.IMREAD_ANYCOLOR)
#if not success:
# raise IOError('Failed to decode image')
#return decoded_image
def save_image(image,file_name=None,add_png_extension: bool = True):
"""
Todo: Add support for imageio, which can also write images
Simply save a numpy image to a file.
The add_png_extension is annoying legacy stuff...sorry...it would break some of my other scripts to change that right now.
Provide several fallbacks to saving an image file
"""
if file_name==None:
# file_name=temporary_file_path('png')
file_name=get_unique_copy_path('image.png')
if file_name.startswith('~'):
file_name=get_absolute_path(file_name)
#If the specified path's folders don't exist, make them. Don't whine and throw errors.
make_parent_directory(file_name)
if get_file_extension(file_name)=='exr':
#Note that exr filetypes must have a float32 dtype
# image=as_float_image(image).astype(np.float32)
save_openexr_image(image, file_name)
return file_name
elif get_file_extension(file_name)=='jpg':
#Try to save using this jpg-specific method - it guarentees 100% quality
try: return save_image_jpg(image, file_name, quality=100)
except Exception: pass
elif get_file_extension(file_name)=='jxl':
try: return save_image_jxl(image, file_name, quality=100)
except Exception: pass
elif get_file_extension(file_name)=='avif':
try: return save_image_avif(image, file_name, quality=100)
except Exception: pass
elif get_file_extension(file_name)=='webp':
try: return save_image_webp(image, file_name, quality=100)
except Exception: pass
else:
#Suppress any warnings about losing data when coming from a float_image...that's a given, considering that png's only have one byte per color channel...
image=as_byte_image(image)
try:
try:
pip_import('imageio') #This is the best library for this task: it handles the most image types, and it does it just as fast as opencv
if get_file_extension(file_name)=='exr':
from imageio import imwrite as imsave #Imageio is the best at saving .exr files
else:
from imageio import imsave as imsave
except Exception:
from scipy.misc import imsave
except Exception:
try:
from skimage.io import imsave
except Exception:
try:
pip_import('cv2')
from cv2 import imwrite
imsave=lambda filename,data: imwrite(filename,cv_bgr_rgb_swap(as_rgba_image(as_byte_image(data))))
except Exception:
pass
if add_png_extension and not has_file_extension(file_name):#Save a png file by default
file_name+=".png"
if get_file_extension(file_name).lower() in 'jpg jpeg'.split():
image=as_rgb_image(image)
imsave(file_name,image)
return file_name
def save_images(images,paths:list=None,skip_overwrites=False,show_progress=False):
"""
Save images to specified paths concurrently.
Parameters:
- images (list): List of image objects to save.
- paths (list, str, or None, optional): Determines the file paths for saving images.
* If None, each image is saved with a random name followed by an index (such as 'Aos8Bs32_00001.png', 'Aos8Bs32_00002.png' ...)
* If a string:
- If its a folder, uses same names as setting paths==None, except images are saved in that subfolder
- Treated as a format string (like 'image_%03i.png') to generate file paths.
* If a list, each element should be a path corresponding to each image.
- skip_overwrites (bool, optional): If True, does not overwrite existing files. Default is False.
- show_progress (bool, optional): If True, shows progress and estimated time remaining. Default is False.
Returns:
- list: Paths where images were saved.
Examples:
>>> save_images(my_images, 'image_%03i.png', skip_overwrites=True, show_progress=True)
"""
if paths is None or isinstance(paths,str) and folder_exists(paths):
new_paths=random_namespace_hash()+'_%05i.png'
if folder_exists(paths):
new_paths=path_join(paths,new_paths)
paths=new_paths
# paths=[None]*len(images)
#if show_progress:
#print("rp.save_images: No paths were specified for your %i images, so their names will be their hash values...calculating the image hash values...",end='',flush=True)
#
#paths=[str(handy_hash(image)) for image in images] #By defualt, give each image it's own unique name
#
#if show_progress:
#sys.stdout.write('\033[2K\033[1G')#erase and go to beginning of line https://stackoverflow.com/questions/5290994/remove-and-replace-printed-items
if isinstance(paths,str):
#Assume paths is a formattable string that takes an int index
paths=[paths % i for i in range(len(images))]
if show_progress:
number_of_images_saved=0
show_time_remaining=eta(len(paths),title='Saving Images')
start_time=gtoc()
assert len(paths)==len(images),'Must have exactly one path to go with every image'
assert all(map(is_image,images)),'All images must be images as defined by rp.is_image'
assert all(isinstance(path,str) or path is None for path in paths),'All paths must be strings. They are where the images are saved to.'
cancelled=False #This variable is used to make sure that all the other image-saving threads halt if the user of this function throws an exception, such as a KeyboardInterrupt (maybe it was taking too long and they got impatient...)
def _save_image(image,path):
if cancelled:
return
if path is None:
path=str(handy_hash(image))+'.png'
if skip_overwrites and path_exists(path):
pass #We do nothing, we're skipping this image!
else:
maybe_path = save_image(image,path)
if maybe_path is not None:
#Sometimes save_image will change the filename, such as adding .png at the end
path=maybe_path
if cancelled:
return
if show_progress:
nonlocal number_of_images_saved
number_of_images_saved+=1
show_time_remaining(number_of_images_saved)
return path
try:
saved_paths = par_map(_save_image,images,paths)#This is fast because it's multithreaded
except:
cancelled=True
raise
if show_progress:
end_time=gtoc()
elapsed_time=end_time-start_time
sys.stdout.write('\033[2K\033[1G')#erase and go to beginning of line https://stackoverflow.com/questions/5290994/remove-and-replace-printed-items
print('rp.save_images: Done! Saved %i images in %.3f seconds'%(len(images),elapsed_time))#This is here because of the specifics of the eta function we're using to display progress
return saved_paths
def temp_saved_image(image):
"""
Return the path of an image, and return the path we saved it to
Originally used for google colab to display images nicely:
from IPython.display import Image
Image(temp_saved_image(‹some numpy image›,retina=True)) #<-- Displays image at FULL resolution, optimized for a retina monitor. 'retina=True' is totally optional, it just looks really nice on my macbook.
"""
image_name="rp_temp_saved_image_"+random_namespace_hash(10)
save_image(as_byte_image(as_rgba_image(as_float_image(image))),image_name)
return image_name+'.png'
def save_image_to_imgur(image):
"""
Takes an image, or an image path
Returns the url of the saved image
Note: This function can sometimes take up to 10 seconds, depending on the size of the input image
"""
assert is_image_file(image) or is_image(image),'The input image must either be a path to an image, or a numpy array representing an image'
if isinstance(image,str):
assert file_exists(image),'Cannot find a file at path '+repr(image)
assert is_image_file(image),'There is a file, but its not an image: '+repr(path)
image_path=image
pip_import('imgurpython')
from imgurpython import ImgurClient
client=ImgurClient(client_id='e5b018ddc6db007',client_secret='2adb606c63637a04a55dfcbe7e929fb64f48b83d')#Please don't abuse this. There are limited uploads per month.
response = client.upload_from_path(image_path, anon=True)
return response['link']
elif is_image(image):
temp_image_path=temporary_file_path('.png')
try:
save_image(image,temp_image_path)
return save_image_to_imgur(temp_image_path)
finally:
if file_exists(temp_image_path):
delete_file(temp_image_path)
def save_image_jpg(image,path=None,*,quality=100,add_extension=True):
"""
If add_extension is True, will add a '.jpg' or '.jpeg' extension to path IFF it doesn't allready end with such an extension (AKA 'a/b/c.jpg' -> 'a/b/c.jpg' BUT 'a/b/c.png' -> 'a/b/c.png.jpg')
"""
if path is None:
path=get_unique_copy_path('image.jpg')
make_parent_directory(path) #Make sure the directory exists
image=as_numpy_image(image)
image=as_rgb_image(image)
image=as_byte_image(image)
assert 0<=quality<=100,'Jpg quality is measured in percent'
if is_image(image):image=as_rgb_image(image)
from PIL import Image
if not get_file_extension(path).lower() in {'jpeg','jpg'}:
path+='.jpg'
extra_kwargs={}
if quality==100:
#Chroma Subsampling != 0 --> Essentially gives Hue a lower resolution than brightness
#https://stackoverflow.com/questions/19303621/why-is-the-quality-of-jpeg-images-produced-by-pil-so-poor
#https://pillow.readthedocs.io/en/stable/reference/JpegPresets.html
extra_kwargs['subsampling']=0
none= Image.fromarray(image).save(path, "JPEG", quality=quality, optimize=False, progressive=True,**extra_kwargs)
return path
def save_image_webp(image, path=None, *, quality=100, add_extension=True):
"""
Save image in WebP format. Set lossless=True for lossless compression, False for lossy.
If add_extension is True, adds '.webp' extension if not already present.
"""
image = as_numpy_image(image)
if is_grayscale_image(image):
image = as_rgb_image(image)
image = as_byte_image(image)
if path is None:
path = get_unique_copy_path('image.webp')
make_parent_directory(path)
assert 0 <= quality <= 100, 'WebP quality is measured in percent'
if add_extension and not get_file_extension(path).lower() == 'webp':
path += '.webp'
kwargs = dict(lossless=quality==100, quality=quality)
as_pil_image(image).save(path, "WEBP", **kwargs)
return path
def save_image_avif(image, path=None, *, quality=100, add_extension=True):
"""
Save image in AVIF format. Set lossless=True for lossless compression, False for lossy.
If add_extension is True, adds '.avif' extension if not already present.
"""
pip_import("pillow_avif", "pillow-avif-plugin")
image = as_numpy_image(image)
if is_grayscale_image(image):
image = as_rgb_image(image)
image = as_byte_image(image)
if path is None:
path = get_unique_copy_path('image.avif')
make_parent_directory(path)
assert 0 <= quality <= 100, 'AVIF quality is measured in percent'
if add_extension and not get_file_extension(path).lower() == 'avif':
path += '.avif'
kwargs = dict(lossless=quality==100, quality=quality)
as_pil_image(image).save(path, "AVIF", **kwargs)
return path
def save_image_jxl(image, path=None, *, quality=100, add_extension=True):
"""
Save image in JPEG XL format. Set quality=100 for lossless compression.
If add_extension is True, adds '.jxl' extension if not already present.
EXAMPLE (Comparison video between JPG and JXL quality):
>>> emma = load_image('https://github.com/RyannDaGreat/Diffusion-Illusions/blob/gh-pages/images/emma.png?raw=true', use_cache=True)
... text_image = load_image('https://lh3.googleusercontent.com/EE9uifZsj9rVE4PDHKRx4jaTYUymIDItRbgxCNzKc7o14NJijwvj2uhSC7oKByRfxEF1SRqMUVispOb3W6r340P4KA=w640-h400-e365-rj-sc0x00ffffff',use_cache=True)
... text_image=resize_image_to_fit(text_image,width=get_image_width(emma))
... emma=vertically_concatenated_images(emma,text_image)
... emma = full_range(emma)
...
... dir = make_directory('jxl_jpg_webp_avif_comparisons')
... r._pterm_cd(dir)
...
... jpg_sizes = []
... jxl_sizes = []
... wep_sizes = []
... avf_sizes = []
...
... jpg_paths = []
... jxl_paths = []
... wep_paths = []
... avf_paths = []
...
... qualities = [1,2,3,4,5,6,7,8,9,10,20,30,40,50,60,70,80,90,91,92,93,94,95,96,97,98,99,100]
...
... for quality in eta(qualities, title='Saving Images'):
... jpg_path = save_image_jpg (emma, path=f'{quality}', quality=quality)
... jxl_path = save_image_jxl (emma, path=f'{quality}', quality=quality)
... wep_path = save_image_webp(emma, path=f'{quality}', quality=quality)
... avf_path = save_image_avif(emma, path=f'{quality}', quality=quality)
...
... jpg_paths += [jpg_path]
... jxl_paths += [jxl_path]
... wep_paths += [wep_path]
... avf_paths += [avf_path]
...
... jpg_sizes.append(get_file_size(jpg_path))
... jxl_sizes.append(get_file_size(jxl_path))
... wep_sizes.append(get_file_size(wep_path))
... avf_sizes.append(get_file_size(avf_path))
...
... #########
...
... comparisons = []
...
... jpg_index=len(qualities)-1
... jxl_index=len(qualities)-1
... wep_index=len(qualities)-1
... avf_index=len(qualities)-1
...
... while jpg_index>=0 and jxl_index>=0 and wep_index>=0 and avf_index>=0:
...
... print(jpg_index,jxl_index,wep_index,avf_index)
... #Making sure we're always comparing near-equal filesizes
... #We keep popping the largest file of the 4 types
...
... jpg_size = jpg_sizes[jpg_index]
... jxl_size = jxl_sizes[jxl_index]
... wep_size = wep_sizes[wep_index]
... avf_size = avf_sizes[avf_index]
...
... jpg_path = jpg_paths[jpg_index]
... jxl_path = jxl_paths[jxl_index]
... wep_path = wep_paths[wep_index]
... avf_path = avf_paths[avf_index]
...
... jpg_img = load_image(jpg_path, use_cache=True)
... jxl_img = load_image(jxl_path, use_cache=True)
... wep_img = load_image(wep_path, use_cache=True)
... avf_img = load_image(avf_path, use_cache=True)
...
... jpg_img = labeled_image(jpg_img, f'JPG\n{jpg_path}\n{human_readable_file_size(jpg_size)}', font='G:Hind', size=20, size_by_lines=True)
... jxl_img = labeled_image(jxl_img, f'JXL\n{jxl_path}\n{human_readable_file_size(jxl_size)}', font='G:Hind', size=20, size_by_lines=True)
... wep_img = labeled_image(wep_img, f'WEBP\n{wep_path}\n{human_readable_file_size(wep_size)}', font='G:Hind', size=20, size_by_lines=True)
... avf_img = labeled_image(avf_img, f'AVIF\n{avf_path}\n{human_readable_file_size(avf_size)}', font='G:Hind', size=20, size_by_lines=True)
...
... comparison = tiled_images([jpg_img, jxl_img, wep_img, avf_img], border_color='red')
... comparisons.append(comparison)
...
... display_image(comparison)
...
... size_max = max(jpg_size, jxl_size, wep_size, avf_size)
...
... if jpg_size == size_max: jpg_index-=1
... if jxl_size == size_max: jxl_index-=1
... if wep_size == size_max: wep_index-=1
... if avf_size == size_max: avf_index-=1
...
... mp4 = save_video_mp4(comparisons, framerate=60, video_bitrate='max')
... open_file_with_default_application(mp4)
...
... #final_grid = tiled_images(comparisons)
... #png = save_image(final_grid, 'comparison_grid.png')
"""
pip_import("pillow_jxl", "pillow-jxl-plugin")
image = as_numpy_image(image)
if is_grayscale_image(image):
image = as_rgb_image(image)
image = as_byte_image(image)
if path is None:
path = get_unique_copy_path('image.jxl')
make_parent_directory(path)
assert 0 <= quality <= 100, 'JpgXL quality is measured in percent'
if add_extension and not get_file_extension(path).lower() == 'jxl':
path += '.jxl'
kwargs = dict(lossless=True) if quality == 100 else dict(quality=quality)
as_pil_image(image).save(path, "JXL", **kwargs)
return path
def save_animated_webp(video, path=None, *, framerate=60, quality=100, loop=True, add_extension=True):
"""
Save an animated video in WebP format.
If add_extension is True, adds '.webp' extension if not already present.
"""
if path is None:
path = get_unique_copy_path('video.webp')
make_parent_directory(path)
assert 0 <= quality <= 100, 'WebP quality is measured in percent'
if add_extension and not get_file_extension(path).lower() == 'webp':
path += '.webp'
video = as_rgba_images(video)
video = as_byte_images(video)
video = as_pil_images(video)
kwargs = dict(
save_all=True,
append_images=video[1:],
duration=1000 // framerate,
loop=0 if loop else 1,
quality=quality,
)
video[0].save(path, "WEBP", **kwargs)
return path
save_video_webp=save_animated_webp
def save_openexr_image(image, file_path):
"""
Counterpart to load_openexr_image
TODO: Add support for custom channels
This code is based on https://stackoverflow.com/questions/65605761/write-pil-image-to-exr-using-openexr-in-python
"""
pip_import('OpenEXR')
pip_import('Imath')
pip_import('numpy')
import OpenEXR
import Imath
import numpy as np
#Input assertions
assert is_image(image)
assert isinstance(file_path,str)
#Prepare the image: It should be either an RGB or RGBA floating point image
if is_grayscale_image(image):
image=as_rgb_image(image)
image=as_float_image(image,copy=False)
assert len(image.shape)==3 #(height, width, num_channels)
height, width, num_channels = image.shape
# Read OpenEXR file
pixel_type = Imath.PixelType(Imath.PixelType.FLOAT)
if num_channels==3: channels=('R','G','B' )
elif num_channels==4: channels=('R','G','B','A')
else:assert False #Impossible: num_channels comes from an image as defined by rp.is_image, and is not grayscale
header=OpenEXR.Header(width,height)
header['channels']={c:Imath.Channel(pixel_type) for c in channels}
#print(header) #Shows interesting info. Useful for debugging the OpenEXR library - it doesnt have much documentation
output_file = OpenEXR.OutputFile(file_path,header)
output_file.writePixels({channels[i] : image[:,:,i].astype(np.float32).tobytes() for i in range(num_channels)})
output_file.close()
def _get_files_from_paths(paths, get_files=None):
"""
Takes a folder, a list of files, or a list of files and folders as input - all of which can be globbed.
It applies all globbings, then for each folder it fetches releveant files and includes them in the output.
This function is used in other functions to preprocess some argument that aims to specify a bunch of files.
It will always return a list of paths, none of which should be folders
"""
if get_files is None:
get_files = rp.get_all_files
from glob import glob
if isinstance(paths,str):
paths = [paths]
paths = list_flatten(glob(x) for x in paths)
paths = list_flatten(get_files(x) if is_a_folder(x) else [x] for x in paths)
return paths
def convert_image_file(
input_file,
new_extension=None,
output_folder=None,
*,
skip_overwrite=False,
image_transform=lambda image, image_file: image,
name_transform=lambda file_name: file_name,
load_image=lambda path : rp.load_image(path ),
save_image=lambda image, path: rp.save_image(image, path),
delete_original=False
):
"""
Converts an image file to a specified format and saves it to the provided output folder.
It can also be used to resize image files or recolor them or whatever - beyond just filetype conversions.
Args:
input_file (str): Path to the image file to be converted.
new_extension (str, optional): Desired extension for the output files. If None, the extension is not changed.
output_folder (str, optional): Path to the folder where the converted image will be saved.
If not provided, the image is saved in the same folder as the input file.
skip_overwrite (bool, optional): If True, won't overwrite existing files. Defaults to False.
image_transform (func, optional): A function that modifies the image before it's saved again.
It should take in an image and a path string and return an image.
It also works if you pass it a 1-argument function like lambda image:image.
name_transform (func, optional): Modifies the image name (without extension). Takes in the old name and returns a new one.
Defaults to no change. But you might want to have something like lambda x:"new_"+x etc.
Note that it will receive a file name like "image" and not "image.png" or "/path/to/image.png"
It should return an image name in the same way (no /'s or file extensions)
load_image (func, optional): A function to load the image file. Defaults to rp.load_image(path).
save_image (func, optional): A function to save the image file. Defaults to rp.save_image(image, path).
delete_original (bool, optional): USE WITH CAUTION! If true, will delete the original input image after converting it.
Returns:
str: Path to the output file.
Raises:
TypeError: If the input file, output folder, or new extension is not a string.
"""
if output_folder is None:
output_folder = get_parent_folder(input_file)
if new_extension is None:
new_extension = get_file_extension(input_file)
if not isinstance(input_file , str): raise TypeError("Input file must be a string, but got %s." %type(input_file ))
if not isinstance(new_extension, str): raise TypeError("New extension must be a string, but got %s."%type(new_extension))
if not isinstance(output_folder, str): raise TypeError("Output folder must be a string, but got %s."%type(output_folder))
input_file_name = get_file_name(input_file, include_file_extension=False)
output_file_name = input_file_name
output_file_name = name_transform(output_file_name)
output_file_name = with_file_extension(output_file_name, new_extension)
output_file = path_join(output_folder, output_file_name)
assert isinstance(output_file, str)
if file_exists(output_file) and skip_overwrite:
return output_file
image = load_image(input_file)
try:
image = image_transform(image, input_file)
except TypeError:
image = image_transform(image)
output = save_image(image, output_file)
if delete_original and file_exists(output) and file_exists(input_file):
delete_file(input_file)
return output
def convert_image_files(
input_files=".",
new_extension=None,
output_folder=None,
*,
strict=True,
parallel=True,
show_progress=False,
skip_overwrite=False,
image_transform=lambda image, image_file: image,
name_transform=lambda file_name: file_name,
load_image=lambda path : rp.load_image(path ),
save_image=lambda image, path: rp.save_image(image, path),
delete_original=False
):
"""
Converts multiple image files to a specified format and saves them to the provided output folder. The function
leverages concurrent processing for enhanced performance. The function also includes a strict mode that controls
how the function behaves when an image fails to convert.
It can also be used to resize image files or recolor them or whatever - beyond just filetype conversions.
Args:
input_files (list or str): Paths to the image files to be converted, or a folder containing image files.
Defaults to '.', aka all images in the current working directory.
new_extension (str, optional): Desired extension for the output files. If None, the extension is not changed.
output_folder (str, optional): Path to the folder where the converted images will be saved.
If not provided, the images are saved in the same folder as the input files.
strict (bool, optional): Controls what happens when an image fails to convert.
If True (default), an error is raised.
If False, images that fail to convert are skipped.
If None, positions of images that fail to convert are filled with None in the output.
parallel (bool, optional): If True, runs multithreaded. Otherwise computes in main thread.
show_progress (bool, optional): If True, shows a progress bar. Defaults to False.
skip_overwrite (bool, optional): If True, won't overwrite existing files. Defaults to False.
image_transform (func, optional): A function that modifies the images before they're saved again.
It should take in an image and a path string and return an image.
It also works if you pass it a 1-argument function like lambda image:image.
name_transform (func, optional): Modifies the image name (without extension). Takes in the old name and returns a new one.
Defaults to no change. But you might want to have something like lambda x:"new_"+x etc.
Note that it will receive a file name like "image" and not "image.png" or "/path/to/image.png"
It should return an image name in the same way (no /'s or file extensions)
load_image (func, optional): A function to load the image file. Defaults to rp.load_image(path).
save_image (func, optional): A function to save the image file. Defaults to rp.save_image(image, path).
delete_original (bool, optional): USE WITH CAUTION! If true, will delete the original input image after converting it.
Returns:
list: Paths to the output files.
Raises:
TypeError: If the input files, output folder, or new extension is not of type string.
Example:
convert_image_files(
"photoshop_files/*.psd",
new_extension="png",
output_folder="output_files",
image_transform=lambda image, path: labeled_image(
resize_image(image, 0.1), text=get_file_name(path)),
name_transform=lambda name: "smaller_labeled_" + name,
show_progress=True,
skip_overwrite=True,
)
"""
assert is_iterable(input_files), "Input files should be a list of files or a string (a folder path), but got {}.".format(type(input_files))
assert output_folder is None or isinstance(output_folder, str), "Output folder must be a string, but got {}.".format(type(output_folder))
assert strict in {True, False, None}, 'The \'strict\' parameter must be set to either True, False, or None.'
import functools
input_files = _get_files_from_paths(input_files, get_files=get_all_image_files)
cancelled = False
output_files = []
def _convert_image(input_file):
output = None #I'm not sure why but somehow the controlflow can avoid declaring output in some edge case. Instead of debugging it I'll just fix it here.
nonlocal cancelled
if cancelled:
if isinstance(cancelled, Exception):
raise cancelled
else:
return None
try:
output = convert_image_file(input_file, new_extension, output_folder,
skip_overwrite = skip_overwrite,
image_transform = image_transform,
name_transform = name_transform,
load_image = load_image,
save_image = save_image,
delete_original = delete_original)
except Exception as e:
if strict is True:
cancelled = e
raise
elif strict is None:
output = None
if show_progress and not cancelled:
nonlocal number_of_images_converted
number_of_images_converted += 1
show_time_remaining(number_of_images_converted)
return output
try:
if show_progress:
number_of_images_converted = 0
show_time_remaining = eta(len(input_files), title='Converting Images')
start_time = gtoc()
mapper = functools.partial(par_map, buffer_limit=None) if parallel else seq_map
output_files = mapper(_convert_image, input_files)
if strict is False:
output_files = [output_file for output_file in output_files if output_file is not None]
if show_progress:
end_time = gtoc()
elapsed_time = end_time - start_time
sys.stdout.write('\033[2K\033[1G') # erase and go to beginning of line
print('Converted %i images in %.3f seconds' % (len(output_files), elapsed_time))
#TODO: Sometimes it reports it converted 0 images when it actually worked just fine - it's not keeping track of how many conversions it made?
except KeyboardInterrupt:
cancelled = True
raise
return output_files
# endregion
# region Text-To-Speech: [text_to_speech,text_to_speech_via_apple,text_to_speech_via_google,text_to_speech_voices_comparison,text_to_speech_voices_for_apple,text_to_speech_voices_for_google,text_to_speech_voices_all,text_to_speech_voices_favorites]
# region [text_to_speech_via_apple]
text_to_speech_voices_for_apple=['Alex','Alice','Alva','Amelie','Anna','Carmit','Damayanti','Daniel','Diego','Ellen','Fiona','Fred','Ioana','Joana','Jorge','Juan','Kanya','Karen','Kyoko','Laura','Lekha','Luca','Luciana','Maged','Mariska','Mei-Jia','Melina','Milena','Moira','Monica','Nora','Paulina','Samantha','Sara','Satu','Sin-ji','Tessa','Thomas','Ting-Ting','Veena','Victoria','Xander','Yelda','Yuna','Yuri','Zosia','Zuzana'] # The old voices (that don't work on sierra. They used to work on el-capitan though): ["Samantha",'Bad News','Bahh','Bells','Boing','Bubbles','Cellos','Deranged','Good News','Hysterical','Pipe Organ','Trinoids','Whisper','Zarvox','Agnes','Kathy','Princess','Vicki','Victoria','Alex','Bruce','Fred','Junior','Ralph','Albert']
# Favorites (in this order): Samantha, Alex, Moira, Tessa, Fiona, Fred
def text_to_speech_via_apple(text: str,voice="Samantha",run_as_thread=True,rate_in_words_per_minute=None,filter_characters=True):
# region All text_to_speech_via_apple voices along with their descriptions (type 'say -v ?' into terminal to get this):
"""
Alex en_US # Most people recognize me by my voice.
Alice it_IT # Salve, mi chiamo Alice e sono una voce italiana.
Alva sv_SE # Hej, jag heter Alva. Jag är en svensk röst.
Amelie fr_CA # Bonjour, je m’appelle Amelie. Je suis une voix canadienne.
Anna de_DE # Hallo, ich heiße Anna und ich bin eine deutsche Stimme.
Carmit he_IL # שלום. קוראים לי כרמית, ואני קול בשפה העברית.
Damayanti id_ID # Halo, nama saya Damayanti. Saya berbahasa Indonesia.
Daniel en_GB # Hello, my name is Daniel. I am a British-English voice.
Diego es_AR # Hola, me llamo Diego y soy una voz española.
Ellen nl_BE # Hallo, mijn naam is Ellen. Ik ben een Belgische stem.
Fiona en-scotland # Hello, my name is Fiona. I am a Scottish-English voice.
Fred en_US # I sure like being inside this fancy computer
Ioana ro_RO # Bună, mă cheamă Ioana . Sunt o voce românească.
Joana pt_PT # Olá, chamo-me Joana e dou voz ao português falado em Portugal.
Jorge es_ES # Hola, me llamo Jorge y soy una voz española.
Juan es_MX # Hola, me llamo Juan y soy una voz mexicana.
Kanya th_TH # สวัสดีค่ะ ดิฉันชื่อKanya
Karen en_AU # Hello, my name is Karen. I am an Australian-English voice.
Kyoko ja_JP # こんにちは、私の名前はKyokoです。日本語の音声をお届けします。
Laura sk_SK # Ahoj. Volám sa Laura . Som hlas v slovenskom jazyku.
Lekha hi_IN # नमस्कार, मेरा नाम लेखा है.Lekha मै हिंदी मे बोलने वाली आवाज़ हूँ.
Luca it_IT # Salve, mi chiamo Luca e sono una voce italiana.
Luciana pt_BR # Olá, o meu nome é Luciana e a minha voz corresponde ao português que é falado no Brasil
Maged ar_SA # مرحبًا اسمي Maged. أنا عربي من السعودية.
Mariska hu_HU # Üdvözlöm! Mariska vagyok. Én vagyok a magyar hang.
Mei-Jia zh_TW # 您好,我叫美佳。我說國語。
Melina el_GR # Γεια σας, ονομάζομαι Melina. Είμαι μια ελληνική φωνή.
Milena ru_RU # Здравствуйте, меня зовут Milena. Я – русский голос системы.
Moira en_IE # Hello, my name is Moira. I am an Irish-English voice.
Monica es_ES # Hola, me llamo Monica y soy una voz española.
Nora nb_NO # Hei, jeg heter Nora. Jeg er en norsk stemme.
Paulina es_MX # Hola, me llamo Paulina y soy una voz mexicana.
Samantha en_US # Hello, my name is Samantha. I am an American-English voice.
Sara da_DK # Hej, jeg hedder Sara. Jeg er en dansk stemme.
Satu fi_FI # Hei, minun nimeni on Satu. Olen suomalainen ääni.
Sin-ji zh_HK # 您好,我叫 Sin-ji。我講廣東話。
Tessa en_ZA # Hello, my name is Tessa. I am a South African-English voice.
Thomas fr_FR # Bonjour, je m’appelle Thomas. Je suis une voix française.
Ting-Ting zh_CN # 您好,我叫Ting-Ting。我讲中文普通话。
Veena en_IN # Hello, my name is Veena. I am an Indian-English voice.
Victoria en_US # Isn't it nice to have a computer that will talk to you?
Xander nl_NL # Hallo, mijn naam is Xander. Ik ben een Nederlandse stem.
Yelda tr_TR # Merhaba, benim adım Yelda. Ben Türkçe bir sesim.
Yuna ko_KR # 안녕하세요. 제 이름은 Yuna입니다. 저는 한국어 음성입니다.
Yuri ru_RU # Здравствуйте, меня зовут Yuri. Я – русский голос системы.
Zosia pl_PL # Witaj. Mam na imię Zosia, jestem głosem kobiecym dla języka polskiego.
Zuzana cs_CZ # Dobrý den, jmenuji se Zuzana. Jsem český hlas."""
# endregion
# Only works on macs
assert voice in text_to_speech_voices_for_apple
text=str(text)
if filter_characters: # So you don't have to worry about confusing the terminal with command characters like '|', which would stop the terminal from reading anything beyond that.
text=''.join(list(c if c.isalnum() or c in ".," else " " for c in text)) # remove_characters_that_confuse_the_terminal
if rate_in_words_per_minute is not None and not 90 <= rate_in_words_per_minute <= 720:
fansi_print("r.text_to_speech_via_apple: The rate you chose is ineffective. Empirically, I found that only rates between 90 and 720 have any effect in terminal, \n and you gave me a rate of " + str(rate_in_words_per_minute) + " words per minute. This is the same thing as not specifying a rate at all, as it won't cap off at the max or min.")
#⁠⁠⁠⁠ ⎧ ⎫
#⁠⁠⁠⁠ ⎪ ⎧ ⎫⎪
#⁠⁠⁠⁠ ⎪ ⎪ ⎧ ⎫⎪⎪
#⁠⁠⁠⁠ ⎪ ⎪ ⎪ ⎧ ⎫ ⎪⎪⎪
#⁠⁠⁠⁠ ⎪ ⎪ ⎪ ⎪⎧ ⎫ ⎪ ⎪⎪⎪
#⁠⁠⁠⁠ ⎧ ⎫⎪ ⎪ ⎪ ⎪⎪ ⎧ ⎫⎪ ⎪ ⎪⎪⎪
(run_as_new_thread if run_as_thread else run_func)(fog(shell_command,("say -v " + voice + ((" -r " + str(rate_in_words_per_minute)) if rate_in_words_per_minute else"") + " " + text)))
#⁠⁠⁠⁠ ⎩ ⎭⎪ ⎪ ⎪ ⎪⎪ ⎩ ⎭⎪ ⎪ ⎪⎪⎪
#⁠⁠⁠⁠ ⎪ ⎪ ⎪ ⎪⎩ ⎭ ⎪ ⎪⎪⎪
#⁠⁠⁠⁠ ⎪ ⎪ ⎪ ⎩ ⎭ ⎪⎪⎪
#⁠⁠⁠⁠ ⎪ ⎪ ⎩ ⎭⎪⎪
#⁠⁠⁠⁠ ⎪ ⎩ ⎭⎪
#⁠⁠⁠⁠ ⎩ ⎭
# OLD, DIRTIER CODE: (for example, it references shell_command twice!! The new one of course doesn't do that.)
# def text_to_speech_via_apple(msg:str,voice="Samantha",run_as_thread=True,filter_characters=True):
# if filter_characters:
# msg=''.join(list(c if c.isalnum() or c in ".," else " " for c in msg))# remove_characters_that_confuse_the_terminal
# # Only works on macs
# assert voice in text_to_speech_voices_for_apple
# if run_as_thread:
# run_as_new_thread(lambda :shell_command("say -v "+voice+" "+msg))
# else:
# shell_command("say -v " + voice + " " + msg)
# endregion
# region [text_to_speech_via_google]
text_to_speech_voices_for_google=['fr','es-us','el','sr','sv','la','af','lv','zh-tw','sq','da','en-au','ko','cy','mk','id','hy','es','ro','is','zh-yue','hi','zh-cn','th','ta','it','de','ca','sw','ar','nl','pt','cs','sk','ja','tr','zh','hr','es-es','eo','pt-br','pl','fi','hu','en','ru','en-uk','bn','no','en-us','vi']
_text_to_speech_via_google_sound_cache={}
#def text_to_sound(text):
# #Takes a string, turns it into audio (a numpy vector with range [-1,1]) via google's text-to-speech api
def text_to_speech_via_google(text: str,voice='en',*,play_sound: bool = True,run_as_thread: bool = True):
# This only works when online, and has a larger latency than the native OSX text-to-speech function
# Favorite voices: da
# region gTTS: My own version of https://github.com/pndurette/gTTS (I modified it so that it can actually play voices from other languages, which it couldn't do before. I put that functionality in a comment because I don't know how to use Github yet (Feb 2017))
pip_import('requests')
import re,requests
pip_import('gtts_token')
from gtts_token.gtts_token import Token
mp3_file_path=temporary_file_path('mp3')
class gTTS:
""" gTTS (Google Text to Speech): an interface to Google'_s Text to Speech API """
GOOGLE_TTS_URL='https://translate.google.com/translate_tts'
MAX_CHARS=100 # Max characters the Google TTS API takes at a time
LANGUAGES={
'af':'Afrikaans',
'sq':'Albanian',
'ar':'Arabic',
'hy':'Armenian',
'bn':'Bengali',
'ca':'Catalan',
'zh':'Chinese',
'zh-cn':'Chinese (Mandarin/China)',
'zh-tw':'Chinese (Mandarin/Taiwan)',
'zh-yue':'Chinese (Cantonese)',
'hr':'Croatian',
'cs':'Czech',
'da':'Danish',
'nl':'Dutch',
'en':'English',
'en-au':'English (Australia)',
'en-uk':'English (United Kingdom)',
'en-us':'English (United States)',
'eo':'Esperanto',
'fi':'Finnish',
'fr':'French',
'de':'German',
'el':'Greek',
'hi':'Hindi',
'hu':'Hungarian',
'is':'Icelandic',
'id':'Indonesian',
'it':'Italian',
'ja':'Japanese',
'ko':'Korean',
'la':'Latin',
'lv':'Latvian',
'mk':'Macedonian',
'no':'Norwegian',
'pl':'Polish',
'pt':'Portuguese',
'pt-br':'Portuguese (Brazil)',
'ro':'Romanian',
'ru':'Russian',
'sr':'Serbian',
'sk':'Slovak',
'es':'Spanish',
'es-es':'Spanish (Spain)',
'es-us':'Spanish (United States)',
'sw':'Swahili',
'sv':'Swedish',
'ta':'Tamil',
'th':'Thai',
'tr':'Turkish',
'vi':'Vietnamese',
'cy':'Welsh'
}
def __init__(self,text,lang='en',debug=False):
self.debug=debug
if lang.lower() not in self.LANGUAGES:
raise Exception('Language not supported: %s' % lang)
else:
self.lang=lang.lower()
if not text:
raise Exception('No text to speak')
else:
self.text=text
# Split text in parts
if len(text) <= self.MAX_CHARS:
text_parts=[text]
else:
text_parts=self._tokenize(text,self.MAX_CHARS)
# Clean
def strip(x):
return x.replace('\n','').strip()
text_parts=[strip(x) for x in text_parts]
text_parts=[x for x in text_parts if len(x) > 0]
self.text_parts=text_parts
# Google Translate token
self.token=Token()
def save(self,savefile):
""" Do the Web request and save to `savefile` """
with open(savefile,'wb') as f:
self.write_to_fp(f)
f.close()
def write_to_fp(self,fp):
LANGUAGES={'af':'Afrikaans','sq':'Albanian','ar':'Arabic','hy':'Armenian','bn':'Bengali','ca':'Catalan','zh':'Chinese','zh-cn':'Chinese (Mandarin/China)','zh-tw':'Chinese (Mandarin/Taiwan)','zh-yue':'Chinese (Cantonese)','hr':'Croatian','cs':'Czech','da':'Danish','nl':'Dutch','en':'English','en-au':'English (Australia)','en-uk':'English (United Kingdom)','en-us':'English (United States)','eo':'Esperanto','fi':'Finnish','fr':'French','de':'German','el':'Greek','hi':'Hindi','hu':'Hungarian','is':'Icelandic','id':'Indonesian','it':'Italian','ja':'Japanese','ko':'Korean','la':'Latin','lv':'Latvian','mk':'Macedonian','no':'Norwegian','pl':'Polish','pt':'Portuguese','pt-br':'Portuguese (Brazil)','ro':'Romanian','ru':'Russian','sr':'Serbian','sk':'Slovak','es':'Spanish','es-es':'Spanish (Spain)','es-us':'Spanish (United States)','sw':'Swahili','sv':'Swedish','ta':'Tamil','th':'Thai','tr':'Turkish','vi':'Vietnamese','cy':'Welsh'}
""" Do the Web request and save to a file-like object """
for idx,part in enumerate(self.text_parts):
payload={'ie':'UTF-8',
'q':part,
'tl':self.lang,
'total':len(self.text_parts),
'idx':idx,
'client':'tw-ob',
'textlen':len(part),
'tk':self.token.calculate_token(part)}
headers={
"Referer":"http://translate.google.com/",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36"
}
if self.debug: print(payload)
try:
r=requests.get(self.GOOGLE_TTS_URL,params=payload,headers=headers)
if self.debug:
print("Headers: {}".format(r.request.headers))
print("Reponse: {}, Redirects: {}".format(r.status_code,r.history))
r.raise_for_status()
for chunk in r.iter_content(chunk_size=1024):
fp.write(chunk)
except Exception as e:
raise
def _tokenize(self,text,max_size):
""" Tokenizer on basic roman punctuation """
punc="¡!()[]¿?.,;:—«»\n"
punc_list=[re.escape(c) for c in punc]
pattern='|'.join(punc_list)
parts=re.split(pattern,text)
min_parts=[]
for p in parts:
min_parts+=self._minimize(p," ",max_size)
return min_parts
def _minimize(self,thestring,delim,max_size):
""" Recursive function that splits `thestring` in chunks
of maximum `max_size` chars delimited by `delim`. Returns list. """
if len(thestring) > max_size:
idx=thestring.rfind(delim,0,max_size)
return [thestring[:idx]] + self._minimize(thestring[idx:],delim,max_size)
else:
return [thestring]
# endregion
# endregion
if run_as_thread:
return run_as_new_thread(text_to_speech_via_google(text=text,voice=voice,mp3_file_path=mp3_file_path,play_sound=play_sound,run_as_thread=False))
# Note that this method has to save a sound file in order for it to work. I put a default sound_file_path so that it will overwrite itself each time, so that I can avoid putting a ,delete_sound_file_afterwards:bool=True parameter in there (in case you do infact want to save a file)
# NOTE: sound_file_path is only compatible with .mp3 files, so don't try putting a wav extension on it (it will break it)!
lang=voice
assert lang in text_to_speech_voices_for_google,'r.text_to_speech_via_google: The language you input, "' + lang + '", is not a valid option! Please choose one of the following values for lang instead: ' + ', '.join(text_to_speech_voices_for_google) # These are the available languages we can choose from.
if not (text,lang) in _text_to_speech_via_google_sound_cache:
gTTS(text=text,lang=lang).save(mp3_file_path) # gTTS is a class, and .save is a function of an instance of that class.
_text_to_speech_via_google_sound_cache[text,lang]=load_sound_file(mp3_file_path,samplerate=True)
samples,samplerate=_text_to_speech_via_google_sound_cache[text,lang]
if play_sound:
play_sound_from_samples(samples,samplerate)
if file_exists(mp3_file_path):
delete_file(mp3_file_path)
# endregion
text_to_speech_voices_all=text_to_speech_voices_for_apple + text_to_speech_voices_for_google
text_to_speech_voices_favorites=['da','en-au','zh-yue','hi','sk','zh','en','it','Samantha','Alex','Moira','Tessa','Fiona','Fred']
def text_to_speech_voices_comparison(text="Hello world",time_per_voice=2,voices=text_to_speech_voices_favorites + shuffled(text_to_speech_voices_all)):
"""
Will cycle through different voices so you can choose which one you like best. I selected my favorite voices to be the beginning, and it will cycle through all available voices by the end.
"""
for voice in voices:
print("Voice: " + voice)
text_to_speech(text=text,voice=voice,run_as_thread=True)
sleep(time_per_voice)
def text_to_speech(text: str,voice: str = None,run_as_thread=True):
"""
An abstract combination of the other two text-to-speech methods that automatically selects the right one depending on platform compatiability/whether you specified a compatiable voice etc.
Feel free to add more methods into this one: This is what makes the r module so generalizable.
"""
if run_as_thread:
run_as_new_thread(text_to_speech,text=text,voice=voice,run_as_thread=False)
else:
kwargs=dict(text=text,run_as_thread=False)
if voice is not None:
if voice.lower() == 'random': # A little tidbit i decided to throw in
voice=random_element(text_to_speech_voices_favorites)
kwargs['voice']=voice
if currently_running_mac():
text_to_speech_via_apple(**kwargs)
else:
text_to_speech_via_google(**kwargs)
# endregion
# region Audio/Sound Functions: [load_sound_file,play_sound_from_samples,play_sound_file,play_sound_file_via_afplay,play_sound_file_via_pygame,stop_sound,mp3_to_wav]
np=None
def _module_loader():
try:
import numpy#importing numpy takes bootup time
global np
np=numpy
np.set_printoptions(precision=3)#My personal default print option preference: I don't want to see all those digits.
except:
pass
fig=None
def _fig():
#initialize the fig singleton
global fig
if fig is None:
global plt
plt=get_plt()
fig=plt.gcf()#to Get Current Figure
fig=plt.figure()#Commented this line out because this created a second figure. This has to be done in the main thread or else Mac OS Mojave will crash
return fig
def set_numpy_print_options(**kwargs):
"""
np.set_printoptions is used to format the printed output of arrays. It makes the terminal output much easier to read depending on your context.
However, it has a flaw: you can't set a single option without resetting all the other options to the default values.
In other words, when you use np.set_printoptions, such as...
np.set_printoptions(precision=3,suppress=True,edgeitems=123,linewidth=get_terminal_width()),
...only every parameter you didn't specify will be reset to the default value. This isn't as useful as it could be.
Introducing set_numpy_print_options: This function takes the same arguments that np.set_printoptions does, except it sets only the arguments you give it.
See np.set_printoptions?/ for more documentation on what these arguments do.
EXAMPLE: set_numpy_print_options(precision=8) #Prints floating points with up to 8 decimals of precision
"""
import numpy as np
for kwarg in kwargs:
#Make sure we feed only valid parameters to np.set_printoptions
assert kwarg in np.get_printoptions(),'set_numpy_print_options: '+repr(kwarg)+' is not a valid argument name. Available print options: '+repr(np.get_printoptions())#Prints something like this: "AssertionError: set_numpy_print_options: 'sodf' is not a valid argument name. Available print options: {'nanstr': 'nan', 'precision': 3, 'floatmode': 'maxprec', 'linewidth': 152, 'formatter': None, 'suppress': False, 'edgeitems': 3, 'infstr': 'inf', 'sign': '-', 'legacy': False, 'threshold': 1000}"
np.set_printoptions(**{**np.get_printoptions(),**kwargs})
_module_loader()# run_as_new_thread(_module_loader) <--- This caused problems when I tried to show images, so the bootup speed increase (like .1 seconds) is definately not worth it
def load_mp3_file(path):
"""
Takes an mp3 file path, and returns a bunch of samples as a numpy array
Returns floating-point samples in the range [-1.0 , 1.0]
"""
pip_import('pydub')
import pydub
#A function I got from stackoverflow, minimally changed
#https://stackoverflow.com/questions/53633177/how-to-read-a-mp3-audio-file-into-a-numpy-array-save-a-numpy-array-to-mp3
#TODO: Use this same answer to create a save_mp3_file function
def read(f, normalized=False):
"""MP3 to numpy array"""
a = pydub.AudioSegment.from_mp3(f)
y = np.array(a.get_array_of_samples())
if a.channels == 2:
y = y.reshape((-1, a.channels))
if normalized:
return a.frame_rate, np.float32(y) / 2**15
else:
return a.frame_rate, y
samplerate,samples= read(path,True)
return samples,samplerate
def load_wav_file(path):
"""
Takes a wav file path, and returns a bunch of samples as a numpy array
Returns floating-point samples in the range [-1.0 , 1.0]
"""
pip_import('scipy')
import scipy.io.wavfile as wav
samplerate,samples=wav.read(path)
try:
samples=np.ndarray.astype(samples,float) / np.iinfo(samples.dtype).max # ⟶ All samples ∈ [-1,1]
except Exception:
pass
return samples,samplerate
def adjust_samplerate(samples,original_samplerate:int,new_samplerate:int):
"""
Used to change the samplerate of an audio clip (for example, from 9600hz to 44100hz)
"""
pip_install('scipy')
from scipy.signal import resample
length_in_seconds=len(samples) / old_samplerate
new_number_of_samples=int(length_in_seconds * new_samplerate)
return resample(samples,num=new_number_of_samples)
def load_sound_file(file_path:str, samplerate:int=None):
"""
Returns the contents of a sound file at file_path as a numpy array of floats in the range [-1, 1]
samplerate: either True, None or an int. If True, returns (samples, samplerate). If None, returns (samples at original samplerate). If int, returns (samples converted to samplerate).
TODO: Add conversion functions between stereo and mono, and add parameters to this function that use them
"""
#Make sure we support the requested file type
assert isinstance(file_path,str),'r.load_sound_file: file_path must be a string, but you gave it a %s'%str(type(file_path))
assert has_file_extension(file_path), 'r.load_sound_file: Your file doesnt have an extension, so I\'m not sure what to do with it. Your file path: %s. Supported filetypes include: %s'%(repr(file_path),', '.join(supported_filetypes))
supported_filetypes=['mp3','wav']
filetype=get_file_extension(file_path)
assert filetype.lower() in supported_filetypes, 'r.load_sound_file: Sorry, but this function doesnt support %s files. It only supports the following filetypes: %s'%(filetype,', '.join(supported_filetypes))
#Load the specific filetype
if filetype=='wav': samples, original_samplerate = load_wav_file(file_path)
elif filetype=='mp3': samples, original_samplerate = load_mp3_file(file_path)
#Handle the samplerate parameter
if samplerate is True:
return samples, original_samplerate
elif samplerate is None:
return samples
elif is_number(samplerate):
if samplerate!=original_samplerate:
samples=adjust_samplerate(samples, original_samplerate, samplerate)
return samples
else:
assert False,'r.load_sound_file: samplerate must either be True (which will return both the samples and the samplerate), None (which will return the audio at its original samplerate)elif , or an integer representing the desired samplerate.'
#def load_sound_file(file_path: str,samplerate_adjustment=False,override_extension: str = None) :
# #TODO: Integrate this function with load_mp3_file
# #TODO: Use the 'audioread' library to decode more than just .wav files, using more than just ffmpeg. This will make this function more robust. https://github.com/beetbox/audioread
# # Opens sound files and turns them into numpy arrays! Unfortunately right now it only supports mp3 and wav files.
# # Supports only .mp3 and .wav files.
# # samplerate_adjustment:
# # If true, your sound will be re-sampled to match the default_samplerate.
# # If false, it will leave it as-is.
# # If it'_s None, this function will output a tuple containing (the original sound, the original samplerate)
# # Otherwise, it should be a number representing the desired samplerate it will re-sample your sound to match the given samplerate.
# # Set override_extension to either 'mp3' or 'wav' to ignore the extension of the file name you gave it. For example, using override_extension='mp3' on 'music.wav' will force it to read music as an mp3 file instead.
# if file_path.endswith(".mp3") or override_extension is not None and 'mp3' in override_extension:
# return load_mp3_file(file_path)
# file_path=mp3_to_wav(file_path)
# else:
# assert file_path.endswith(".wav") or 'wav' in override_extension,'sound_file_to_samples: ' + file_path + " appears to be neither an mp3 nor wav file." + " Try overriding the extension?" * (override_extension is None)
# pip_import('scipy')
# import scipy.io.wavfile as wav
# samplerate,samples=wav.read(file_path)
# try:
# samples=np.ndarray.astype(samples,float) / np.iinfo(samples.dtype).max # ⟶ All samples ∈ [-1,1]
# except Exception:
# pass
# if samplerate_adjustment is False:
# return samples
# if samplerate_adjustment is None:
# return samples,samplerate
# new_samplerate=default_samplerate if samplerate_adjustment is True else samplerate_adjustment
# if new_samplerate == samplerate: # Don't waste time by performing unnecessary calculations.
# return samples
# from scipy.signal import resample
# length_in_seconds=len(samples) / samplerate
# new_number_of_samples=int(length_in_seconds * new_samplerate)
# return resample(samples,num=new_number_of_samples)
def save_wav(samples,path,samplerate=None) -> None: # Usually samples should be between -1 and 1
pip_import('scipy')
from scipy.io import wavfile
if samples.dtype == np.float64:
samples=samples.astype(np.float32)
wavfile.write(path,samplerate or default_samplerate,samples)
default_samplerate=44100 # In (Hz ⨯ Sample). Used for all audio methods in the 'r' class.
def play_sound_from_samples(samples,samplerate=None,blocking=False,loop=False,**kwargs):
"""
For stereo, use a np matrix
Example: psfs((x%100)/100 for x in range(100000))
Each sample should ∈ [-1,1] or else it will be clipped (if it wasn't clipped it would use modular arithmeti
c on the int16, which would be total garbage for sound)
Just like matlab'_s 'sound' method, except this one doesn't let you play sounds on top of one-another.
"""
try:
pip_import('sounddevice')
except OSError as error:
if OSError.args==('PortAudio library not found',) and currently_running_linux:
fansi_print("Error importing sounddevice; try running\n\tsudo apt-get install libportaudio2","red")
raise
if not running_in_ipython():
import sounddevice
wav_wave=np.array(np.minimum(2 ** 15 - 1,2 ** 15 * np.maximum(-1,np.minimum(1,np.matrix(list(samples)))).transpose()),dtype=np.int16) # ⟵ Converts the samples into wav format. I tried int32 and above: None of them worked. 16-bit seems to be the highest resolution available.
sounddevice.play(wav_wave,samplerate=samplerate or default_samplerate,blocking=blocking,loop=loop,**kwargs)
else:
#This works in google colab!
from IPython.display import Audio
assert not loop,'This function cannot currently play looped audio when running in Jupyter'
assert not blocking,'This function cannot currently block while playing audio when running in Jupyter'#This might change in future versions of rp
Audio(samples,rate=samplerate,autoplay=True)
def play_sound_file(path):
"""
THIS Function is an abstraction of playing sound files. Just plug in whatever method works on your computer into this one to make it work
NOTE: These functions should all run on separate threads from the main thread by default!
"""
try:
if currently_running_linux():
samples,samplerate=load_sound_file(path,samplerate=True)
# ic(samples,samplerate)
play_sound_from_samples(samples,samplerate)
elif currently_running_mac():
play_sound_file_via_afplay(path)
elif currently_running_windows():
pip_import('playsound')
from playsound import playsound
playsound(path)# Worked on windows, but didn't work on my mac
except Exception:
play_sound_file_via_pygame(path)
def play_sound_file_via_afplay(absolute_file_path_and_name: str,volume: float = None,rate: float = None,rate_quality: float = None,parallel: bool = True,debug: bool = True):
"""
Use stop_sound to stop it.
If parallel==False, the code will pause until the song is finished playing.
If parallel==True the sound is run in a new process, and returns this process so you can .terminate() it later. It lets things continue as usual (no delay before the next line of code)
This seems to be a higher quality playback. On the other hand, I can't figure out any way to stop it.
This version doesn't require any dependencies BUT doesn't work on windows and doesn't let us play .mp3 files. The new version uses pygame and DOES allow us to.
Only tested on my MacBook. Uses a terminal command called 'afplay' to play a sound file.
Might not work with windows or linux.
"""
command="afplay '" + absolute_file_path_and_name + "'"
if rate is not None:
assert rate > 0,"r.play_sound_file_via_afplay: Playback rate cannot rate=" + str(rate)
command+=' -r ' + str(rate)
if rate_quality is not None:
if rate is None and debug:
print("r.play_sound_file_via_afplay: There'_s no reason for rate_quality not to be none: rate==None, so rate_quality doesn't matter. Just sayin'. To make me shut up, turn the debug parameter in my method to True.")
command+=' -q ' + str(rate_quality)
if volume is not None:
command+=' -v ' + str(volume)
return (run_as_new_thread if parallel else run_func)(shell_command,command) # If parallel==True, returns the process so we can terminate it later.
def play_sound_file_via_pygame(file_name: str,return_simple_stopping_function=True):
"""
Old because it uses the pygame.mixer.sound instead of pygame.mixer.music, which accepts more file types and has more controls than this one does.
Though, audio and file things are weird. I'm keeping this in case the other two fail for some reason. Other than being a backup like that, this method serves no purpose.
noinspection PyUnresolvedReferences
"""
pip_import('pygame')
import pygame
pygame.init()
pygame.mixer.init()
sound=pygame.mixer.Sound(file_name)
assert isinstance(sound,pygame.mixer.Sound)
sound.play()
if return_simple_stopping_function:
return sound.stop # The 'Sound' class has only two methods: play and stop. Because we've already used the play method, the only other possible method we would want is the stop() method.
return sound # This version gives us a little more control; it gives us the 'play' method too. That'_s the only difference. but python doesn't tell us the method names! This gives us options to, perhaps, stop the sound later on via sound.stop()
def stop_sound():
"""
Stop sounds from all sources I know of that the 'r' module can make.
So far I have been unsuccessful in stopping
"""
try:
shell_command("killall afplay") # Used with 'play_sound_file_via_afplay' on macs.
except ImportError:
pass
# try:run_as_new_thread(shell_command,"killall com.apple.speech.speechsynthesisd")# ⟵ Works when I enter the command in terminal, but doesn't work when called from python! It'_s not very important atm though, so I'm not gonna waste time over it.
# except Exception:pass
try:
import sounddevice
sounddevice.stop()
except ImportError:
pass
try:
import pygame
pygame.mixer.stop()
except ImportError:
pass
def mp3_to_wav(mp3_file_path: str,wav_output_path: str = None,samplerate=None) -> str:
"""
This is a audio file converter that converts mp3 files to wav files.
You must install 'lame' to use this function.
Saves a new wav file derived from the mp3 file you gave it.
shell_command('lame --decode '+mp3_file_path+" "+wav_output_path)# From https://gist.github.com/kscottz/5898352
"""
if wav_output_path is None:
wav_output_path = os.path.splitext(mp3_file_path)[0] + ".wav"
_run_sys_command(
"lame "
+ str(samplerate or default_samplerate)
+ " -V 0 -h --decode "
+ shlex.quote(mp3_file_path)
+ " "
+ shlex.quote(wav_output_path)
) # From https://gist.github.com/kscottz/5898352
return wav_output_path
def wav_to_mp3(wav_file_path: str, mp3_output_path: str = None, samplerate: int = 44100) -> str:
"""
This is an audio file converter that converts wav files to mp3 files.
You must install 'ffmpeg' to use this function.
Saves a new mp3 file derived from the wav file you gave it.
"""
_ensure_ffmpeg_installed()
import os
import subprocess
if mp3_output_path is None:
mp3_output_path = os.path.splitext(wav_file_path)[0] + ".mp3"
cmd = ['ffmpeg', '-i', wav_file_path]
if samplerate:
cmd.extend(['-ar', str(samplerate)])
cmd.extend(['-y', mp3_output_path])
cmd+='-hide_banner -loglevel error'.split()
subprocess.run(cmd, check=True)
return mp3_output_path
def convert_audio_file(input_file, output_file, *, skip_existing=False):
"""
Convert an audio file to a different format using FFmpeg.
Args:
input_file (str): Path to the input audio file.
output_file (str): Desired output format or path. If only extension is provided
(e.g., 'mp3', 'wav'), output will use input filename with new extension.
Supported formats: wav, mp3, ogg, mp4.
Returns:
str: Path to the converted audio file.
Raises:
RuntimeError: If FFmpeg encounters an error or is not installed.
FileNotFoundError: If the input file does not exist or output file could not be created.
Notes:
- Requires FFmpeg to be installed and available in PATH.
- Automatically creates a unique filename if output path already exists.
EXAMPLE:
>>> convert_audio_file('/Users/ryan/Downloads/Diffusion Illusions: SIGGRAPH 2024 Talk.mp4','wav')
ans = /Users/ryan/Downloads/Diffusion Illusions: SIGGRAPH 2024 Talk_copy.wav
"""
import subprocess
import os
if not os.path.exists(input_file):
raise FileNotFoundError(f"Input file not found: {input_file}")
_ensure_ffmpeg_installed()
supported_output_filetypes = "wav ogg mp3 mp4".split()
if output_file in supported_output_filetypes or "." + output_file in supported_output_filetypes:
output_file = rp.with_file_extension(input_file, output_file, replace=True)
if output_file==input_file:
#Converting a file to its own type...take a shortcut and just return the original file!
return input_file
output_file = rp.get_unique_copy_path(output_file)
if os.path.exists(output_file) and skip_existing:
return output_file
make_parent_directory(output_file)
try:
subprocess.run(
["ffmpeg", "-i", input_file, "-y", output_file],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
except subprocess.CalledProcessError as e:
raise RuntimeError("Error converting audio file: " + e.stderr.decode()) from e
except FileNotFoundError:
raise RuntimeError("FFmpeg not found. Please install FFmpeg to convert audio files.")
if not os.path.exists(output_file):
raise FileNotFoundError("Failed to create output file: " + output_file)
return output_file
# endregionx
# region Matplotlib: [display_image,brutish_display_image,display_color_255,display_grayscale_image,line_graph,block,clf]
def _display_image_in_notebook_via_ipyplot(image):
assert is_image(image)
image=as_rgb_image(as_byte_image(image))
image_width=get_image_width(image)
pip_import('ipyplot').plot_images(images=[image],img_width=image_width,labels=[''])
#pip_import('ipyplot').plot_images(images=[image],img_width=image_width,labels=[''],force_b64=True)#force_b64 is set to true so that ipyplot doesn't complain when we're in google colab: ' WARNING! Google Colab Environment detected! If images are not displaying properly please try setting `base_64` param to `True`.' This has never been an issue, but the warning is annoying
def _display_image_in_notebook_via_ipython(image):
import IPython
return IPython.display.display_png(encode_image_to_bytes(image,'png'),raw=True)
def add_ipython_kernel(kernel_name: str = None, display_name: str = None):
"""
Add the current Python interpreter as a Jupyter IPython kernel.
Parameters:
- kernel_name: The name for the kernel, as it would appear in the command to start it. For example, "python3.9".
- display_name: The name as it appears in the Jupyter UI. Defaults to kernel_name. Example: "Python 3.9.5".
Usage:
add_current_python_as_kernel("python39", "Python 3.9.5")
"""
pip_import('ipykernel')
if kernel_name is None:
print("Please enter the title of the new iPython kernel:")
default = _get_session_title()
# Kernel names can't have whitespace
default = default.strip()
default = '-'.join(default.split())
kernel_name = input_default(' > ', default)
import sys
import subprocess
assert isinstance(kernel_name, str)
assert display_name is None or isinstance(display_name, str)
if display_name is None:
display_name = kernel_name
command = [
sys.executable, "-m", "ipykernel", "install", "--user",
"--name", kernel_name,
"--display-name", "Python " + display_name
]
try:
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderr = result.stderr.decode()
except Exception as e:
raise RuntimeError("Error running subprocess: " + str(e))
if result.returncode != 0:
raise RuntimeError("Error adding kernel: " + stderr)
print("Successfully added Python " + display_name + " as a Jupyter kernel.")
def display_video(video,framerate=30,*,loop=False):
"""
Video can either be a string, or a video (aka a 4d tensor or iterable of images)
Example: display_video('https://www.youtube.com/watch?v=jvipPYFebWc')
TODO: Implement loop for jupyter
"""
def loop_wrapper(video):
if hasattr(video, '__getitem__') and hasattr(video, '__len__'):
length = len(video)
index = 0
while True:
index +=1
yield video[index % length]
else:
seen_frames = []
for frame in video:
yield frame
seen_frames.append(frame)
while True:
yield from seen_frames
if running_in_jupyter_notebook():
display_video_in_notebook(video,framerate=framerate)
else:
#Todo: Add keyboard controls to play, pause, rewind, restart, next frame, prev frame, go to frame, adjust framerate
#It would be much like display_image_slideshow (maybe even add functionality to display_image_slideshow and use that?)
if isinstance(video,str):
if not is_valid_url(video):
if not file_exists(video):
raise FileNotFoundError(video)
assert is_video_file(video),repr(video)+' is not a video file'
video=load_video_stream(video)
if loop:
video = loop_wrapper(video)
time_start=gtoc()
time_per_frame=1/framerate
for i, frame in enumerate(video):
try:
time_before_display = gtoc()
display_image(frame)
time_after_display = gtoc()
sleep(max(0, time_per_frame - (time_after_display - time_before_display)))
except KeyboardInterrupt:
fansi_print("rp.display_video: Received KeyboardInterrupt - stopping playback", 'cyan', 'bold')
break
# def display_video_in_notebook(video,framerate=30):
# """
# Video can be either a string pointing to the path of a video, or the video itself. If it is the video itself, it will be embedded as a gif and displayed that way.
# This function can also display gif's and other video URL's we find on the web
# """
# if isinstance(video,str):
# if file_exists(video) or is_valid_url(video):
# filetype=get_file_extension(video)
#
# video_filetypes='webm mp4 ogg'.split() #These are the only video filetypes officially supported by the HTML standard (see https://www.w3schools.com/html/html_media.asp)
# image_filetypes='gif'.split()
#
# assert filetype in video_filetypes+image_filetypes,'Invalid filetype: '+repr(video)+', video must be one of these types: '+str(video_filetypes+image_filetypes).replace("'",'')
#
# if filetype in video_filetypes:
# from IPython.display import Video,display_html
# if is_valid_url(video):
# display_html(Video(url=video))
# else:
# display_html(Video(data=video))
# else:
# assert filetype in image_filetypes
# from IPython.display import Image,display_html
# if is_valid_url(video):
# display_html(Image(url=video))
# else:
# display_html(Image(data=video))
# else:
# raise FileNotFoundError(video)
# else:
# display_embedded_video_in_notebook(video)
def _make_video_dimensions_even(video):
"""
Make the video have an even height and width. Used for saving MP4's.
Without this, if a video with odd height or odd width is displayed with mediapy, it renders as a black rectangle.
If you download that video, it can be viewed with more niche video viewers like Videoloupe, but it cannot be viewed in Vivaldi
"""
video = rp.crop_images_to_max_size(video)
height, width = rp.get_video_dimensions(video)
if height%2 or width%2:
#Can't display an MP4 video with odd height or width!
new_height = math.ceil(height/2)*2
new_width = math.ceil(width /2)*2
video = rp.crop_images(video, new_height, new_width)
return video
def _display_video_via_mediapy(video, framerate):
""" Use mediapy to display a video in a Jupyter notebook """
rp.pip_import('mediapy')
import mediapy
#Prepare the video
video = rp.as_numpy_images(video)
video = rp.as_rgb_images(video)
video = rp.as_byte_images(video)
video = _make_video_dimensions_even(video)
return mediapy.show_video(video, fps=framerate)
def display_video_in_notebook(video, filetype='mp4', *, embed=True, framerate=60):
"""
Display a video or image in a Jupyter notebook.
Args:
video: The video object to display.
- Can be a video: i.e. a list of images as defined by rp.is_image (such as a list of PIL images), or a TCHW torch tensor with values between 0 and 1, or a THWC numpy array
- Can be an existing file path: Such as /path/to/video.mp4
- Can be a URL: Such as https://file-examples.com/storage/feaef0a3ad67b78fd9cc1df/2017/04/file_example_MP4_480_1_5MG.mp4
filetype (str, optional): The filetype of the video or image. Supported filetypes are 'gif', 'png', 'mp4', 'webp' and 'avi'. Defaults to 'gif'.
framerate (int, optional): The framerate of the video. Defaults to 60.
embed (bool, optional): If true, encodes the video as a base-64 string into the notebook itelf - so that the video is saved with the notebook.
If false, it will display it as a reference to some file on the host's computer. Good for decreasing filesize of the notebooks.
Raises:
ValueError: If an unsupported filetype is provided.
Examples:
>>> video = create_video(...)
>>> display_video_in_jupyter_notebook(video, filetype='mp4', embed=True)
"""
return gather_args_call(_display_video_in_notebook)
def _display_video_in_notebook(video, filetype, *, embed, framerate, save_video=None):
if save_video is None:
save_video=save_video
from IPython.display import Image, display, HTML, Video
filetype = filetype.strip('.').lower()
image_filetypes = 'gif png webp'.split()
video_filetypes = 'mp4 avi'.split()
if embed and not isinstance(video, str) and filetype=='mp4':
try:
return _display_video_via_mediapy(video, framerate)
except ImportError:
#If we don't import mediapy, it's ok! We have a fallback, seen below.
pass
try:
if isinstance(video, str):
assert is_valid_url(video) or file_exists(video), 'rp.display_video_in_notebook: Video file {0} does not exist'.format(video)
temp_path = video
filetype = get_file_extension(temp_path)
else:
temp_path = temporary_file_path(filetype)
save_video(video, temp_path, framerate=framerate)
if not embed and filetype in video_filetypes and not is_valid_url(temp_path):
#We need a url like http://0.0.0.0:5678/files/TEMP/video.png
#For some reason this is needed on videos and NOT images??
temp_path = get_relative_path(
temp_path,
_original_pwd,
)
if filetype in image_filetypes:
if embed:
image_hex = file_to_base64(temp_path)
display_object = HTML('<img src="data:image/{0};base64,{1}">'.format(filetype, image_hex))
# display_object = Image(filename=temp_path, embed=embed) #Equivalent - but the former offers more control if we need it later
else:
assert is_valid_url(temp_path), "I wasn't able to get embed=False to work with image paths yet: temp_path=%s"%temp_path
display_object = Image(filename=temp_path, embed=embed)
elif filetype in video_filetypes:
if embed:
video_hex = file_to_base64(temp_path)
display_object = HTML('<video loop autoplay controls><source src="data:video/{0};base64,{1}" type="video/{0}"></video>'.format(filetype, video_hex))
else:
display_object = Video(filename=temp_path, html_attributes='autoplay loop controls')
else:
raise ValueError("rp.display_video_in_notebook: Unsupported filetype: {0}. Supported filetypes are {1}".format(filetype, ', '.join(image_filetypes + video_filetypes)))
return display(display_object)
finally:
if not isinstance(video, str) and embed and file_exists(temp_path):
delete_file(temp_path)
def display_video_in_notebook_webp(video, quality=100, framerate=60):
"""
Displays an animated webp in a Jupyter notebook with a specified quality and framerate
See rp.display_video_in_notebook's docstring for explanations of what the args do
EXAMPLE:
>>> import rp
... video_url = "https://file-examples.com/storage/feaef0a3ad67b78fd9cc1df/2017/04/file_example_MP4_480_1_5MG.mp4"
... video = rp.load_video(video_url, use_cache=True)
...
... for quality in [1, 10, 25, 50, 90, 95, 100]:
... #Ranges from ~500KB to 14MB
... print(quality)
... rp.display_video_in_notebook_webp(video, quality)
"""
def save_video(video, path, framerate):
return save_video_webp(video, path, quality=quality, framerate=framerate)
return gather_args_call(_display_video_in_notebook, filetype='webp', embed=True)
# def display_embedded_video_in_notebook(video,framerate:int=30,filetype:str='gif'):
# """
# This will embed a video into the jupyter notebook you're using
# Warning: This function is still experimental, and sometimes the videos are messed up a bit
# Warning: This can make your notebooks very large, so please be careful to only use small videos with this function
# """
# assert running_in_jupyter_notebook(),'display_embedded_video_in_notebook: This function only works in a jupyter notebook, such as Google Colab or Jupyter Lab'
#
# video_filetypes='webm mp4 ogg'.split() #These are the only video filetypes officially supported by the HTML standard (see https://www.w3schools.com/html/html_media.asp)
# image_filetypes='gif'.split()
# assert filetype in video_filetypes+image_filetypes,'Invalid filetype: '+repr(filetype)+', please choose from '+str(video_filetypes+image_filetypes).replace("'",'')
#
# from IPython.display import HTML, display_html
# from base64 import b64encode
#
#
# video_encoded = b64encode(encode_video_to_bytes(video,filetype,framerate=framerate)).decode()
#
# if filetype in video_filetypes:
# html = '<video controls alt="test" src="data:video/{0};base64,{1}">'.format(filetype, video_encoded)
# else:
# assert filetype in image_filetypes
# html = video_tag = '<img src="data:image/{0};base64,{1}" />'.format(filetype, video_encoded)
#
# display_html(html,raw=True)
def _display_downloadable_image_in_notebook_via_ipython(image, file_name:str):
#When clicked, the image will be downloaded with the given file name
pip_import("IPython")
file_name=with_file_extension(file_name,'png')
import base64
from IPython.display import HTML,display
img_str = base64.b64encode(encode_image_to_bytes(image,'png')).decode('utf-8')
html = '<a href="data:image/png;base64,{img_str}" download="displayed_image.png">' \
'<img src="data:image/png;base64,{img_str}" /></a>'.replace('{image_str}',image_str)
display(HTML(html))
def display_image_in_notebook(image):#, file_name:str=None):
""" Display an image at full resolution in a jupyter notebook. Returns an updatable channel. """
channel = JupyterDisplayChannel()
channel.update(image)
channel.display()
return channel
if file_name is not None:
#TODO: This doesn't actually work right now :(
assert isinstance(file_name,str), 'The given file name must be a string, but got type %s'%type(file_name)
if not has_file_extension(file_name):
file_name=with_file_extension(file_name,'png')
_display_downloadable_image_in_notebook_via_ipython(image, file_name)
#First method: Try to use iPython.display to do it directly. It's faster than ipyplot, and gives crisper images on my macbook.
try: _display_image_in_notebook_via_ipython(image);return
except Exception: pass
#Second method: If that fails, try ipyplot. It gives good image displays as well.
try: _display_image_in_notebook_via_ipyplot(image);return
except Exception: raise
#def display_image_in_notebook(image):
# #Display an image at full resolution in a jupyter notebook
# assert is_image(image)
# image=as_rgb_image(as_byte_image(image))
# pip_import('ipyplot').plot_images([image],img_width=width(image))
def _image_to_html(image):
# Also good: See below
base64_image = rp.encode_image_to_base64(image)
return '<img src="data:content/png;base64,%s"/>' % base64_image
class JupyterDisplayChannel:
def __init__(self):
"""
Used for displaying and updating content in Jupyter notebooks.
It's analagous to a bunch of televisions, all subscribed to this channel.
The JupyterDisplayChannel allows you to create multiple viewports and efficiently
update them with various types of content, including text, numbers, images, and grids of these.
First, create a channel. Then, display it whever you want (can be multiple places in your notebook).
Then, push updates to it to show content with update() or grid_update()
See the self-contained examples below for how to do this.
TODO: Support more update methods, such as side-by-side images and slideshows etc
TODO: Fully support more content types, such as video and audio
EXAMPLE:
>>> from rp import *
>>> channel = JupyterDisplayChannel()
>>> #You can have multiple viewports for a given channel
>>> print("First viewport:")
>>> channel.display()
>>> print("Second viewport:")
>>> channel.display()
>>> #You can efficiently animate images this way
>>> image = rp.cv_text_to_image("Hello\nWorld!")
>>> for angle in range(360 * 3):
>>> channel.update(rp.rotate_image(image, angle))
>>> #You can update anything that Jupyter can display
>>> for num in range(45):
>>> channel.update(list(range(num%15)))
>>> rp.sleep(.1)
>>> #Here's a demo showing how the grid works...
>>> rows = [range(i) for i in range(10)]
>>> for _ in range(30):
>>> rows = rows[1:] + [rows[0]]
>>> channel.grid_update(rows)
>>> rp.sleep(.1)
>>> #And here's a demo showing how the grid can have images in it too...
>>> colors = "red green blue cyan magenta yellow black".split()
>>> rows = [
>>> [
>>> rp.rotate_image(
>>> rp.cv_text_to_image(color, background_color=rp.color_name_to_byte_color(color)),
>>> rp.random_int(-90, 90)
>>> ) for color in colors
>>> ],
>>> [
>>> rp.rotate_image(
>>> rp.cv_text_to_image(color, color=rp.color_name_to_byte_color(color)),
>>> rp.random_int(-90, 90)
>>> ) for color in colors
>>> ],
>>> colors
>>> ]
>>> for _ in range(30):
>>> rows = [row[1:] + [row[0]] for row in rows]
>>> channel.grid_update(rows)
>>> rp.sleep(0.1)
"""
rp.pip_import("IPython")
self._display_id = rp.random_namespace_hash()
self._update(None)
@staticmethod
def _convert_content(content):
from IPython.display import Image, HTML
if content is None:
#Return nothing
return HTML("")
elif rp.is_image(content):
#Return an image
return rp.as_pil_image(content)
# Also good: See below
return HTML(_image_to_html(content))
else:
#Return whatever you gave it
return content
@staticmethod
def _convert_content_grid(content_grid):
rp.pip_import('pandas')
from pandas import DataFrame
from IPython.display import HTML
#If this errors you gave it an invalid grid
grid = [list(row) for row in content_grid]
#Make the grid rectangular by padding each row to max length
width = max(map(len,grid))
grid = [row + [None] * (width - len(row)) for row in grid]
def convert_grid_item(item):
if rp.is_image(item):
item = _image_to_html(item)
elif item is None:
item = ""
return item
grid = [list(map(convert_grid_item, row)) for row in grid]
df = DataFrame(grid)
html = df.to_html(escape=False)
return HTML(html)
def _update(self, converted_content):
from IPython.display import update_display
self._converted_content = converted_content
update_display(self._converted_content, display_id=self._display_id)
def display(self):
"""Adds a new viewport"""
from IPython.display import display, HTML
display(self._converted_content, display_id=self._display_id)
def clear(self):
"""Clears the viewports"""
self.update(None)
def update(self, content):
"""Updates all viewports spawned from this channel"""
self._update(self._convert_content(content))
def grid_update(self, content_grid):
"""
Updates all viewports spawned from this channel with a grid of content
Pass it like [[x0y0, x1y0, x2y0], [x0y1, x1y1, x2y1] ... ]
Supports text, numbers and images as elements
"""
self._update(self._convert_content_grid(content_grid))
def row_update(self, content_row):
"""A row of content gets displayed"""
self.grid_update([content_row])
_disable_display_image=False #Set rp.r._disable_display_image=True to disable the display_image function. Right now this is undocumented functionality, might make it documented later on via helper functions like enable_display_image() and disable_display_image()
def display_image(image,block=False):
"""
Very simple to understand: this function displays an image.
At first, it tries to use matplotlib and if that errors it falls back to opencv's imshow function.
By default this function will not halt your code, but if you set block=True, it will.
This function works in Jupyter Notebooks such as google colab, and will automatically scale the DPI of the output such that the full-resolution image is shown (don't take this for granted)
You can pass this function binary, rgb, rgba, grayscale matrices -- most types of images (see rp.is_image() for more information)
"""
if _disable_display_image:
return fansi_print("rp.display_image: Currently disabled; no image displayed",'yellow')
if currently_in_a_tty() and running_in_ssh() and not running_in_ipython() :
# if currently_in_a_tty() and not currently_running_desktop() and not running_in_ipython() : #THIS MIGHT BE BETTER - this one is more recent, but I decided to make minimal changes today. Maybe uncomment this in the future if you want.
#Let display_image work in terminals too, when in ssh and we have no GUI or ipynb options
return display_image_in_terminal_color(image)
if not running_in_ipython() and not currently_running_desktop():
fansi_print("rp.display_image: Warning, no image was displayed - not in desktop environment.",'yellow') #Please note that cv2.imshow will usually segfault if there's no desktop environment!
return
if isinstance(image,str):
fansi_print("display_image usually meant for use with numpy arrays, but you passed it a string, so we'll try to load the image load_image("+repr(image)+") and display that.")
image=load_image(image)
if is_pil_image(image) or is_torch_tensor(image):
image=as_numpy_image(image,copy=False)
if not isinstance(image,np.ndarray) and not isinstance(image,list):
try:
import torch
if isinstance(image,torch.autograd.Variable):
image=image.data
elif isinstance(image,torch.Tensor):
image=image.cpu().numpy()
except Exception:pass
if running_in_ipython():
#Use the ipyplot library to display images at full resultion while in a jupyter notebook
return display_image_in_notebook(image)
elif module_exists('cv2'):
try:
#Personally, I think cv_imshow is better because it's faster.
#If we have opencv installed, try to use that.
#If not, then oh well - we'll just continue on and try matplotlib instead
return cv_imshow(image,wait=10 if not block else 10000000,label='rp.display_image()')
except Exception:#Only excepting exceptions because KeyboardInterrupt is a BaseException, and we want to be able to interrupt while True:display_image(load_image_from_webcam()) without tryiggering matplotlib
pass #Oh well, we tried!
global plt
plt=get_plt()
if is_image(image):
image=as_rgb_image(as_float_image(image))
try:
plt.clf()
if running_in_ipython():
fig=plt.figure()#Make a new figure. When jupyter, this makes sense; but normally we don't want this (it will make a bazillion windows)
mpl=pip_import('matplotlib')
# import matplotlib as mpl
#region Set the jupyter resolution to the true image size (it usually squashes the image too small for comfort)
old_dpi,old_figsize=mpl.rcParams['figure.dpi'],mpl.rcParams['figure.figsize']#
arbitrary_number=100
mpl.rcParams['figure.dpi'] = arbitrary_number
mpl.rcParams['figure.figsize']=[image.shape[0]/arbitrary_number,image.shape[1]/arbitrary_number]
else:
fig = _fig()
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(image, aspect='equal')
plt.show(block=block)
if not block:
plt.pause(0.0001)
if running_in_ipython():
plt.close(fig)#I don't know if this is necessary. It's a hunch it might make it faster in the long term if we have 239239872 figures opened in jupyter. It doesn't hurt, though, so I'm keeping it here.
mpl.rcParams['figure.dpi'],mpl.rcParams['figure.figsize']=old_dpi,old_figsize
except Exception:
if not running_in_google_colab():
image=np.asarray(image)
#The above seems not to work anymore, so the next thing to try is opencv's image display (in the event that it fails)...
ndim=len(image.shape)
assert ndim in {2,3},'Image tensor must have either two or three dimensions (either a grayscale image or RGB or RGBA image)'
if ndim==2:
image=grayscale_to_rgb(image)
if image.dtype==bool:
image=image.astype(float)
return cv_imshow(image,wait=10 if not block else 1000000)#Hit esc in the image to exit it
def with_alpha_checkerboard(image, *, tile_size=8, first_color=1.0, second_color=0.75):
""" If the given image is RGBA, put a checkerboard pattern behind it and return a new opaque image """
checkers = get_checkerboard_image(
*get_image_dimensions(image),
tile_size=tile_size,
first_color=first_color,
second_color=second_color
)
return blend_images(checkers, image)
def with_alpha_checkerboards(*images, tile_size=8, first_color=1.0, second_color=.75, lazy=False):
""" Plural of rp.with_alpha_checkerboard """
images = detuple(images)
is_numpy = is_numpy_array(images)
output = (
with_alpha_checkerboard(
image,
tile_size=tile_size,
first_color=first_color,
second_color=second_color,
)
for image in images
)
if not lazy:
output = list(output)
if is_numpy and not lazy:
#In future: Optimize
output = as_numpy_array(output)
return output
def display_alpha_image(image, block=False, tile_size=8, first_color=1.0, second_color=0.75):
alpha_checkerboard_image = with_alpha_checkerboard(
image,
tile_size=tile_size,
first_color=first_color,
second_color=second_color
)
display_image(alpha_checkerboard_image, block=block)
def _display_image_slideshow_animated(images):
"""
This works best on Jupyter notebooks right now
It technically works without a jupyter notebook...but at that rate you might as well use display_video...
...this is because jupyter notebooks display nice controls for the video, while default matplotlib doesn't
"""
if not running_in_jupyter_notebook():
display_video(images) #This is objectively better at the moment
pip_import('matplotlib')
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
assert len(images)>0, 'Must have at least one image to display, but len(images)==%i'%len(images)
height,width=get_image_dimensions(images[0])
try:
#Adjust the size of matplotlib's display of the image to match it's resolution
old_dpi,old_figsize=matplotlib.rcParams['figure.dpi'],matplotlib.rcParams['figure.figsize']
arbitrary_number=100
matplotlib.rcParams['figure.dpi'] = arbitrary_number
figsize=[height/arbitrary_number,width/arbitrary_number]
matplotlib.rcParams['figure.figsize']=figsize
#Make sure all the images are standardized
images=[as_rgb_image(as_byte_image(image)) for image in images]
#Remove matplotlib's white border around the image
fig = plt.figure(figsize=figsize[::-1])
plt.axis("off")
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
#Display the animation
ims = [[plt.imshow(image, animated=True)] for image in images]
ani = animation.ArtistAnimation(fig, ims, interval=1000, repeat_delay=1000, blit=True)
if running_in_jupyter_notebook():
#Note: In jupyter notebook, this animation will be embedded.
#This can make the .ipynb files quite large if you're not careful to keep the videos small
from IPython.display import HTML
matplotlib.rcParams['animation.embed_limit'] = 2**128
html=HTML(ani.to_jshtml())
from IPython.display import display_html
display_html(html)
plt.close() #We have to call plt.close, or else it will display an additional image under the animation
else:
plt.show()
finally:
matplotlib.rcParams['figure.dpi'],matplotlib.rcParams['figure.figsize']=old_dpi,old_figsize
def display_qr_code_in_terminal(text):
"""
EXAMPLE:
#Done in Alacritty or the default Mac Terminal
display_qr_code_in_terminal('https://google.com')
EXAMPLE:
#This one is really annoying (funny prank): it will cover the entire camera of the iPhone that sees it for a brief moment
display_qr_code_in_terminal('a'*2300)
"""
pip_import('qrcode')
import qrcode
code=qrcode.QRCode()
code.add_data(text)
if currently_in_a_tty():
code.print_tty()
else:
code.print_ascii()
def display_website_in_terminal(url):
assert is_valid_url(url),'Invalid url: %s'%url
html=curl(url)
pip_import('html2text')
import html2text
output=html2text.html2text(html)
rp.r._rich_print(output)
def display_image_slideshow(images='.',display=None,use_cache=True):
"""
Enters an interactive image slideshow
Useful for exploring large folders/lists of images
images:
images can be a path to a folder containing images
images can be a list of images as defined by r.is_image()
images can be a list of image file paths
display:
if you set display=display_image_in_terminal, you can view the slideshow entirely over SSH
EXAMPLE:
display_image_slideshow(list(map(cv_text_to_image,'abcdefghijklmnopqrstuvwxyz')),display=display_image_in_terminal)
EXAMPLE:
images=line_split('''https://upload.wikimedia.org/wikipedia/commons/4/41/Left_side_of_Flying_Pigeon.jpg
https://d17fnq9dkz9hgj.cloudfront.net/uploads/2020/04/shelter-dog-cropped-1.jpg
https://i.pinimg.com/736x/4d/8e/cc/4d8ecc6967b4a3d475be5c4d881c4d9c.jpg
https://www.dictionary.com/e/wp-content/uploads/2018/03/doge-300x300.jpg
https://i.pinimg.com/originals/cb/e9/b4/cbe9b4280f390636e4d9432a02159528.jpg
https://i.insider.com/5989fc4eefe3df1f008b48b9?width=1100&format=jpeg&auto=webp
https://pyxis.nymag.com/v1/imgs/cd8/804/e0f612fa12d17e68e3d68ccf55f93cac4f-06-rick-morty.rsquare.w700.jpg
https://assets.bwbx.io/images/users/iqjWHBFdfxIU/iXusLDq1QUac/v1/1000x-1.jpg
https://i0.wp.com/huskerchalktalk.com/wp-content/uploads/2016/09/chessboard.jpg?fit=698%2C400&ssl=1https://www.colorado.edu/mcdb/sites/default/files/styles/medium/public/article-image/logo-blm.png?itok=sbQ6vxqb''')
display_image_slideshow(images,display_image_in_terminal)
"""
if display is None:
if running_in_jupyter_notebook():
#If we're in a jupyter notebook, by default display a gui.
#However, if we want that default functionality, we can set display=display_image to override this
_display_image_slideshow_animated(images)
return
elif running_in_ssh() and currently_in_a_tty():
print('Currently running in SSH, so we will print the images into the terminal')
display=display_image_in_terminal
else:
display=display_image
if display in [display_image_in_terminal, display_image_in_terminal_color]:
old_display = display
def display(image):
# _terminal_move_cursor_to_top_left()
old_display(image)
if isinstance(images,str) and is_a_folder(images):
images=get_all_paths(images,sort_by='number',include_files=True,include_folders=False)
images=[path for path in images if is_image_file(path)]
if len(images) and isinstance(images[0],str):
assert all(isinstance(path,str) for path in images)
images=[path for path in images if is_image_file(path) or is_valid_url(path)]
#Todo: Make the images load lazily, but also somehow in parallel
# images=load_images(images,use_cache=use_cache,strict=False)
assert all(is_image(image) or is_image_file(image) for image in images)
assert len(images)>0,'Must have at least one image to create a slideshow'
index=0
def display_help():
print('r.image_slideshow: Displaying a slideshow of %i images'%len(images))
print(' Use the following keymap:')
print(' n: Go to the next image')
print(' p: Go to the prev image')
print(' r: Go to a random image')
print(' #: Go to a selected image')
print(' a: Play Animation - press any key to stop')
print(' +: Zoom In')
print(' -: Zoom Out')
print(' l: Pan Right')
print(' k: Pan Up')
print(' j: Pan Down')
print(' h: Pan Left')
print(' q: Quit the slideshow')
print(' ?: Display this help text')
display_help()
skip_load=False
origin_x=0
origin_y=0
scale=1
scales={}
def zoom_crop_origin(image):
#TODO: Don't waste time when scale=1
#TODO: Fix the issue where it resets if scale is too large (play aronud with zoom pan to see what I mean)
if scale not in scales:
#Do a bit of memoization to speed things up
if scale==1:
scaled_image=image
else:
scaled_image=cv_resize_image(image,scale,interp='nearest')#Todo: Memoize this
scales[scale]=scaled_image
new_image=scales[scale]
new_image=new_image[origin_y*scale:origin_y*scale+get_image_height(image), origin_x*scale:origin_x*scale+get_image_width(image)]
return new_image
autoplay=0
def stop_autoplay_on_keypress():
nonlocal autoplay
input_keypress(handle_keyboard_interrupt=True)
autoplay=0
while True:
if not skip_load:
index%=len(images)
image=images[index]
origin_x=0
origin_y=0
scale=1
scales={}
skip_load=False
try:
image_path=None
if isinstance(image,str):
image_path=image
try:
image=load_image(image,use_cache=use_cache)
except Exception:
print("Failed to load image: "+repr(image))
raise
display(zoom_crop_origin(image))
if scale!=1 or origin_x!=0 or origin_y!=0:
print('Zoom Factor: %i X: %i Y:%i'%(scale,origin_x,origin_y))
if image_path is not None:
print("Image Location:",image_path)
print('Displaying image #%i/%i, %ix%i'%(index+1,len(images),get_image_width(image),get_image_height(image)))
except Exception as e:
print('Failed to display image #%i/%i'%(index+1,len(images)))
# print_stack_trace(e)
if autoplay:
index += autoplay
continue
if currently_in_a_tty():
key=input_keypress()
else:
key=input('Enter a key: ')
#Image Navigation
if key=='n':
index+=1
elif key=='p':
index-=1
elif key=='r':
index=random_index(images)
elif key=='#':
print("Which image would you like to view?")
index=input_integer(0,len(images)-1)
elif key=='a':
autoplay=1
run_as_new_thread(stop_autoplay_on_keypress)
#Panning and zooming
elif key=='+':
scale+=1
scale=max(scale,1)
skip_load=True
elif key=='-':
scale-=1
scale=max(scale,1)
skip_load=True
elif key=='j':
origin_y+=1
origin_y+=int(max(1,get_image_height(image)/scale/10))
origin_y=min(get_image_height(image)-1,max(origin_y,0))
skip_load=True
elif key=='k':
origin_y-=1
origin_y-=int(max(1,get_image_height(image)/scale/10))
origin_y=min(get_image_height(image)-1,max(origin_y,0))
skip_load=True
elif key=='h':
origin_x-=1
origin_x-=int(max(1,get_image_width(image)/scale/10))
origin_x=min(get_image_width(image)-1,max(origin_x,0))
skip_load=True
elif key=='l':
origin_x+=1
origin_x+=int(max(1,get_image_width(image)/scale/10))
origin_x=min(get_image_width(image)-1,max(origin_x,0))
skip_load=True
#Exiting
elif key=='q':
break
#Help
elif key=='?':
display_help()
def brutish_display_image(image):
from copy import deepcopy
global plt
plt=get_plt()
image=deepcopy(image)
for x_index,x in enumerate(image):
for y_index,y in enumerate(x):
for channel_index,channel in enumerate(y):
image[x_index][y_index][channel_index]=max(0,min(1,channel))
display_image(image)
plt.show(block=True)
def display_color_255(*color: list):
""" Example: display_color_255(255,0,0)# ⟵ Displays Red """
# noinspection PyUnresolvedReferences
display_image([(np.matrix(detuple(color)) / 256).tolist()])
def display_float_color(*color):
color=detuple(color)
image=uniform_float_color_image(height=128, width=128, color=color)
display_alpha_image(image,first_color=1,second_color=0)
def display_grayscale_image(matrix,pixel_interpolation_method_name: str = 'bicubic',refresh=True):
pixel_interpolation_method_name=str(pixel_interpolation_method_name).lower() # Note that None⟶'none'
assert pixel_interpolation_method_name in [None,'none','nearest','bilinear','bicubic','spline16','spline36','hanning','hamming','hermite','kaiser','quadric','catrom','gaussian','bessel','mitchell','sinc','lanczos'] # These are the options. See http://stackoverflow.com/questions/14722540/smoothing-between-pixels-of-imagesc-imshow-in-matlab-like-the-matplotlib-imshow/14728122#14728122
global plt
plt=get_plt()
plt.imshow(matrix,cmap=plt.get_cmap('gray'),interpolation=pixel_interpolation_method_name) # "cmap=plt.get_cmap('gray')" makes it show a black/white image instead of a color map.
if refresh:
plt.draw()
plt.show(block=False) # You can also use the r.block() method at any time if you want to make the plot usable.
plt.pause(0.0001) # This is nessecary, keep it here or it will crash. I don't know WHY its necessary, but empirically speaking it seems to be.
def bar_graph(values,*,width=.9,align='center',block=False,xlabel=None,ylabel=None,title=None,label_bars=False,**kwargs):
"""
Create a bar graph with the given y-values
The 'values' parameter is a list of bar heights. They should all be real numbers.
The 'width' parameter sets the width of each bar
The 'align' parameter sets whether the bars are to the center, right or left of each index
The 'label_bars' parameter, if true, will display numbers above each bar displaying their quantity. NOTE: This works best with integers, as opposed to floats!
EXAMPLE: bar_graph(randints(10))
"""
pip_import('matplotlib')
plt=get_plt()
assert align in {'center','left','right'}
if align=='right':
#The right of the bars touch the index numbers, like in a right-riemann-sum
#According to matplotlib, to do this we set align to 'edge' and multiply width by -1
width*=-1
align='edge'
if align=='left':
#Vice versa, see 'right' above
align='edge'
x=list(range(len(values)))
plt.clf()
plt.bar(x,values,width=width,align=align,**kwargs)
if xlabel is not None: plt.xlabel(xlabel)
if ylabel is not None: plt.ylabel(ylabel)
if title is not None: plt.title (title )
if label_bars:
for i in range(len(values)):
plt.text(x=i,y=values[i]+1,s=str(values[i]),size=10,ha='center')
def histogram_in_terminal(values,sideways=False):
"""
Right now this function is very simple (it doesnt let you specify the number of bins, for example)
In the future I might add more functionality like that, or use unicode_loading_bar to make better sideways plots
This is really meant to be used interactively...please don't use this in serious code...
The 'sideways' argument might be renamed to 'dirction='horizontal'' etc...
"""
pip_import('plotille')
import plotille
values=as_numpy_array(values).flatten()
if sideways==True:
out=plotille.hist(values,width=get_terminal_width()-33,bins=get_terminal_height()-0)
else:
out=plotille.histogram(values,width=get_terminal_width()-20,height=get_terminal_height()-15)
print(out)
def line_graph_via_plotille(
y_values,
x_values=None,
width=None,
height=None,
y_min=None,
y_max=None,
x_min=None,
x_max=None,
background_color=None,
line_color=None,
xlabel="X",
ylabel="Y",
silent=False,
):
"""
Draws a line graph in the terminal using Plotille with the given values and colors.
Args:
y_values (list): The y-values of the data points.
x_values (list, optional): The x-values of the data points. Defaults to the indices of y_values will be used.
width (int, optional): The width of the output graph. Defaults to the terminal width.
height (int, optional): The height of the output graph. Defaults to the terminal height.
y_min (float, optional): The minimum value of the y-axis. Defaults to the minimum y-value.
y_max (float, optional): The maximum value of the y-axis. Defaults to the maximum y-value.
x_min (float, optional): The minimum value of the x-axis. Defaults to the minimum x-value.
x_max (float, optional): The maximum value of the x-axis. Defaults to the maximum x-value.
background_color (str, optional): The color of the background. Defaults to None (transparent background).
line_color (str, optional): The color of the line. Defaults to None (default terminal text color).
xlabel (str, optional): The label for the x-axis. Defaults to "X".
ylabel (str, optional): The label for the y-axis. Defaults to "Y".
silent (bool, optional): If True, the graph will not be printed to the terminal. Default is False.
Returns:
str: The string representation of the graph.
Notes:
The graph is printed directly to the terminal using Plotille unless `silent` is True.
The width and height of the graph are automatically adjusted based on
the terminal size if not provided.
Example:
>>> import math
>>> x_values = [x / 10.0 for x in range(-50, 50, 1)]
>>> y_values = [math.sin(x) for x in x_values]
>>> graph = line_graph_in_terminal(y_values, x_values, line_color='blue', xlabel='Angle', ylabel='Sine', silent=True)
>>> print(graph)
"""
pip_import("plotille")
import plotille
if x_values is None:
x_values = list(range(len(y_values)))
graph = plotille.plot(
x_values,
y_values,
bg=background_color,
lc=line_color,
width =width or (get_terminal_width () - 20),
height=height or (get_terminal_height() - 13),
x_min=x_min,
x_max=x_max,
y_min=y_min,
y_max=y_max,
X_label=xlabel,
Y_label=ylabel,
)
if not silent:
print(graph)
return graph
def line_graph_live(func, *, length=None, framerate=60, graph=None):
"""
Continuously update and display a line graph based on values returned by a given function.
This function repeatedly calls the provided `func` to obtain new values, appends them to a list
of `values`, and updates the displayed line graph using the specified `graph` function.
Args:
func (callable): A function that returns the next value to be plotted on the line graph.
Takes no arguments, and returns a number.
length (int, optional): The maximum number of values to display on the graph. If specified,
the list of values will be truncated to this length. Defaults to None, which keeps all values.
Effectively creates a sliding window, like an oscilloscope.
framerate (int, optional): The desired framerate of the graph updates in frames per second.
Defaults to 60.
graph (callable, optional): A function that takes the list of `values` and displays the line graph.
Examples:
>>> #Using matplotlib
... line_graph_live(
... get_mouse_y,
... graph=line_graph,
... length=100,
... )
>>> #Using cv_line_graph
... line_graph_live(
... get_mouse_y,
... graph=lambda values: display_image(cv_line_graph(values, height=200, width=500)),
... length=100,
... )
>>> #Use plotille to plot everything
... line_graph_live(
... get_mouse_y,
... graph=lambda values:line_graph_via_plotille(values,line_color='cyan',background_color='red'),
... length=100,
... framerate=30,
... )
>>> #Faster than plotille
... height=get_terminal_height()
... width=get_terminal_width()
... line_graph_live(
... get_mouse_y,
... graph=lambda values: display_image_in_terminal(
... inverted_image(
... cv_line_graph(
... values,
... height=height,
... width=width,
... antialias=False,
... )
... )
... ),
... length=300,
... )
"""
if graph is None:
if currently_in_a_tty():
graph = line_graph_via_plotille
else:
# TODO: Use JupyterDisplayChannel where applicable
graph = line_graph
interval = 1 / framerate
values = []
while True:
start_time = time.time()
new_value = func()
values.append(new_value)
if length is not None and len(values) > length:
values = values[-length:]
graph(values)
end_time = time.time()
elapsed_time = end_time - start_time
sleep(max(0, interval - elapsed_time))
def line_graph_in_terminal(y):
line_graph_via_plotille(y)
# pip_import('plotille')
# import plotille
# print(plotille.plot(list(range(len(y))),y,bg=None,lc=None,width=get_terminal_width()-20,height=get_terminal_height()-13))
def line_graph(*y_values,
show_dots: bool = False,
clf: bool = True,
ylabel: str = None,
xlabel: str = None,
use_dashed_lines: bool = False,
line_color: str = None,
title = None,
block: bool = False,
background_image = None,
logx:float = None,
logy:float = None) -> None:
"""
This is mainly here as a simple reference for how to create a line-graph with matplotlib.pyplot.
There are plenty of options you can configure for it, such as the color of the line, label of the
axes etc. For more information on this, see http://matplotlib.org/users/pyplot_tutorial.html
"""
pip_import('matplotlib')
global plt
plt=get_plt()
if clf:
plt.clf()
def plot(values):
kwargs={}
if show_dots:
# Put a dot on each point on the line-graph.
kwargs['marker']='o'
if use_dashed_lines:
kwargs['linestyle']='--'
if line_color:
kwargs['color']=line_color # could be 'red' 'green' 'cyan' 'blue' etc
plt.plot(values,**kwargs)
try:
plot(*y_values) # If this works, then y_values must have been a single-graph.
except Exception: # y_values must have been an iterable of iterables, so we will graph each one on top of each other.
# old_hold_value=plt.ishold() #This uses deprecated matplotlib stuff: https://github.com/matplotlib/matplotlib/issues/12337/
# plt.hold(True) # This lets us plot graphs on top of each other.
for y in y_values:
plot(y)
# plt.hold(old_hold_value)
if ylabel:
plt.ylabel(ylabel)
if xlabel:
plt.xlabel(xlabel)
if title:
plt.title(title)
if logy:
if logy is True:
logy=2
plt.yscale('log',base=logy)
if logx:
if logx is True:
logx=2
plt.xscale('log',base=logx)
plt.draw()
display_update(block=block)
plt.pause(.001)
def display_polygon(path,*,
filled =True,
fill_color=None,
line_width=1,
line_style='solid',
line_color=None,
clear =False,
block =False,
alpha =1):
"""
Uses matplotlib
Parameters:
line_width: The width of the border around the polygon (set to 0 for no border)
line_style: Please see https://matplotlib.org/3.1.0/gallery/lines_bars_and_markers/linestyles.html
line_color: The color of the outline aka border of the polygon (like (1,0,0) for red, etc)
filled : boolean whether we should fill the object or just use an outline
fill_color: The color of the area of the polygon (like (1,0,0) for red, etc)
alpha : The transparency value (1 is opaque, 0 is completely transparent)
clear : Whether we should clear the plot before drawing this polygon
block : True for an interactive plot that blocks the current python code; False to display immediately and continue python code; None to just plot it and skip the displaying step (which is faster and useful if you want to plot a lot of polygons at once)
EXAMPLE: display_polygon(random_floats_complex(5),alpha=.5)
"""
pip_import('matplotlib')
from matplotlib.patches import Polygon
from matplotlib import pyplot as plt
path=as_points_array(path)
if fill_color is None: fill_color=random_rgb_float_color()
if clear:
plt.clf()
if len(path): #Prevent edge case errors
#Setting up the polygon
polygon=Polygon(path, True)
polygon.set_fill (filled )
polygon.set_alpha (alpha )
if clear:
plt.clf()
if len(path): #Prevent edge case errors
#Setting up the polygon
polygon=Polygon(path, True)
polygon.set_fill (filled )
polygon.set_alpha (alpha )
#Setting up the polygon
polygon=Polygon(path, True)
polygon.set_fill (filled )
polygon.set_alpha (alpha )
polygon.set_facecolor(fill_color)
polygon.set_linewidth(line_width)
polygon.set_linestyle(line_style)
polygon.set_edgecolor(line_color)
plt.axes().add_patch(polygon)
#Autoscaling
bounding_points=np.row_stack((np.max(path,axis=0),np.min(path,axis=0)))#Get two points representing the bounding box of path
plt.plot(*bounding_points,marker='o')[0].set_visible(False)#Plot two invisible points on this bounding box, so that matplotlib will automatically rescale to accomidate whatever path you gave it
#Displaying
if block is not None:
plt.show(block=block)
if not block:
plt.pause(.01)
def block(on_click=None,on_unclick=None):
_fig()#Initialize fig
# You may specify methods you would like to overwrite here.
# Makes the plot interactive, but also prevents python script from running until the user clicks closes the graph window.
pip_import('matplotlib')
import matplotlib.backend_bases
def handler(function,event_data: matplotlib.backend_bases.MouseEvent):
args=event_data.xdata,event_data.ydata,event_data.button,event_data.dblclick
if None not in args:
function(*args)
handler_maker=lambda function:lambda event:handler(function,event)
if on_click is not None:
assert callable(on_click)
# def on_click(x,y,button,dblclick)
_fig.canvas.mpl_connect('button_press_event',handler_maker(on_click))
if on_unclick is not None:
assert callable(on_unclick)
# def on_unclick(x,y,button,dblclick)
_fig.canvas.mpl_connect('button_release_event',handler_maker(on_unclick))
# PLEASE NOTE THAT MORE METHODS CAN BE ADDED!!!!! A LIST OF THEM IS IN THE BELOW COMMENT:
# - 'button_press_event'
# - 'button_release_event'
# - 'draw_event'
# - 'key_press_event'
# - 'key_release_event'
# - 'motion_notify_event'
# - 'pick_event'
# - 'resize_event'
# - 'scroll_event'
# - 'figure_enter_event',
# - 'figure_leave_event',
# - 'axes_enter_event',
# - 'axes_leave_event'
# - 'close_event'
plt.show(True)
def display_update(block=False,time=.01):
"""
This should be preferred over the older block() function shown above
Note: If time is too low, you can try setting it to a higher value
"""
pip_import('matplotlib')
if block is None:
return#A convention that if block is "None" for some display function, it means we don't actually want to display it right away (for speed purposes, mostly)
import matplotlib.pyplot as plt
if block:
plt.show(block=block)
else:
plt.gcf().canvas.blit()
plt.pause(time)
update_display=display_update#Synonyms
def display_clear():
pip_import('matplotlib')
import matplotlib.pyplot as plt
plt.gcf().clf()
clear_display=display_clear#Synonyms
def clf():
pip_import('matplotlib')
plt.clf()
def display_cv_color_histogram(
image,
*,
channels="rgb",
linestyle="-",
alpha=1,
block=False,
clf=True
):
"""
Displays a color histogram of an image using OpenCV and Matplotlib.
Args:
image (str or numpy.ndarray):
The input image. It can be either a file path or a numpy array.
channels (str, optional):
The color channels to plot. It can be a combination of "r", "g", "b" and "a".
For example, "r", "g", "rgba", "bgr", etc. Defaults to "rgb".
linestyle (str, optional):
The linestyle of the histogram plot. Defaults to "-".
alpha (float, optional):
The transparency of the histogram plot. Defaults to 1.
block (bool, optional):
If True, blocks execution and allows interactive plot manipulation.
If False, displays the plot without blocking the code.
If None, doesn't draw the plot. Defaults to False.
clf (bool, optional):
If True, clears the display before drawing the plot. Defaults to True.
"""
pip_import("cv2")
pip_import("numpy")
pip_import("matplotlib")
if isinstance(image, str):
image = load_image(image)
if clf:
display_clear()
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
image = as_rgba_image(image)
image = as_byte_image(image)
colors = {"r": 0, "g": 1, "b": 2, "a": 3}
for channel in channels:
index = colors[channel]
plot_color = channel
if plot_color == "a":
plot_color = "black"
hist = cv.calcHist([image], [index], None, [256], [0, 256])
plt.plot(hist, color=plot_color, linestyle=linestyle, alpha=alpha)
plt.xlim([0, 256])
plt.draw()
display_update(block=block)
plt.pause(0.001)
def display_cv_color_histograms(
image1,
image2,
channels="rgb",
block=False,
clf=True,
):
"""
Plots color histograms of two images side by side for comparison using OpenCV and Matplotlib.
Args:
image1 (str or numpy.ndarray):
The first input image. It can be either a file path or a numpy array.
image2 (str or numpy.ndarray):
The second input image. It can be either a file path or a numpy array.
channels (str, optional):
The color channels to plot. It can be a combination of "r", "g", and "b".
Defaults to "rgb".
block (bool, optional):
If True, blocks execution and allows interactive plot manipulation.
If False, displays the plot without blocking the code. Defaults to False.
If None, doesn't even draw the plot.
clf (bool, optional):
If True, clears the display before drawing the plot. Defaults to True.
Example:
>>> image1 = "/path/to/image1.jpg"
>>> image2 = "/path/to/image2.png"
>>> display_cv_color_histograms(image1, image2, channels="rg", block=True)
"""
display_cv_color_histogram(
image1,
block=None,
clf=clf,
channels=channels,
)
display_cv_color_histogram(
image2,
block=block,
clf=False,
alpha=0.5,
channels=channels,
)
# endregion
# region Min/Max Indices/Elements:[min_valued_indices,max_valued_indices,min_valued_elements,max_valued_elements,max_valued_index,min_valued_index]
def _minmax_indices(l,f=None,key=None)->list:
if len(l) == 0:
return [] # An empty list
# A helper method for the min/max methods below. f is either 'min' or 'max'
if isinstance(l,dict):
return matching_keys(f(l.values(),key=key),l,key=key)
else:
return matching_indices(f(l,key=key),l,key=key)
def min_valued_indices(l,key=None)->list:
"""
Returns the indices with the minimum-valued elements
TODO: Make this work properly with dicts, like max_valued_index does
"""
return _minmax_indices(l,min,key=key)
def max_valued_indices(l,key=None)->list:
"""
Returns the indices with the maximum-valued elements
TODO: Make this work properly with dicts, like min_valued_index does
EXAMPLE:
>>> max_valued_indices({'a':123,'b':23424})
ans = ['b']
"""
return _minmax_indices(l,max,key=key)
def min_valued_elements(l,key=None):
""" Returns the elements with the smallest values """
return gather(l,min_valued_indices(l,key=key))
def max_valued_elements(l,key=None):
""" Returns the elements with the largest values """
return gather(l,max_valued_indices(l,key=key))
def max_valued_index(l,key=None):
if isinstance(l,dict):
#Let this function work with dictionaries, such that max_valued_index({'a':1,'b':3,'c':2})=='b'
inverted_dict=invert_dict(l)
return inverted_dict[max(inverted_dict,key=key)]
return list(l).index(max(l)) # Gets the index of the maximum value in list 'l'. This is a useful def by rCode standards because it references 'l' twice.
def min_valued_index(l):
if isinstance(l,dict):
#Let this function work with dictionaries, such that max_valued_index({'a':1,'b':3,'c':2})=='b'
inverted_dict=invert_dict(l)
return inverted_dict[min(inverted_dict,key=key)]
return list(l).index(min(l)) # Gets the index of the minimum value in list 'l'. This is a useful def by rCode standards because it references 'l' twice.
# endregion
# region Blend≣Lerp/sign: [blend,iblend,lerp,interp,linterp]
def blend(𝓍,𝓎,α): # Also known as 'lerp'
return (1 - α) * 𝓍 + α * 𝓎 # More α --> More 𝓎 ⋀ Less 𝓍
def iblend(z,𝓍,𝓎): # iblend≣inverse blend. Solves for α, given 𝓏﹦blend(𝓍,𝓎,α)
z=z-𝓍
z=z/(𝓎-𝓍)
return z
#OLD: Has mutations
# z-=𝓍
# z/=𝓎-𝓍
# return z
def interp(x,x0,x1,y0,y1): # 2 point interpolation
return (x - x0) / (x1 - x0) * (y1 - y0) + y0 # https://www.desmos.com/calculator/bqpv7tfvpy
def linterp(values: list, index: float, *, cyclic=False, blend_func=blend):
"""
Linearly interpolates between different values with fractional indices.
This is written in pure python, so any values that implement addition, subtraction and multiplication will work
(This includes floats, vectors, and even images)
Note that linterp(values,some_integer) == values[some_integer] for any valid integer some_integer
# Where l is a list or vector etc
Args:
values (list): A list of values to interpolate between. These values should support addition, subtraction,
and scalar multiplication.
index (float): The fractional index at which to interpolate. If cyclic=False, index should be in the range
[0, len(values)-1]. If cyclic=True, index can be any real number and will be wrapped around
to fall within the valid range.
cyclic (bool, optional): If True, the interpolation will treat the values list as cyclic, wrapping around
from the last element back to the first. Default is False.
blend_func (function, optional): The function used to blend between two adjacent values. Default is the
blend function, which performs a linear interpolation.
Returns:
The interpolated value at the specified index. The type of the returned value will match the type of the
elements in the values list.
Raises:
AssertionError: If index is not a number or values is not an iterable.
IndexError: If index is out of bounds and cyclic is False.
Mathematically, the interpolation is performed as follows:
- If index is an integer i, the function returns values[i].
- If cyclic is False:
- The function calculates the two integer indices x0 and x1 that surround index, such that
x0 <= index <= x1.
- The interpolated value is then calculated as:
blend_func(values[x0], values[x1], (index - x0) / (x1 - x0))
- If cyclic is True:
- The index is first wrapped around to the range [0, len(values)] using modular arithmetic.
- The interpolation is then performed as in the non-cyclic case, but with the values list treated as
cyclic (i.e., values[-1] is followed by values[0]).
EXAMPLE: INTERPOLATING VECTORS
>>> as_numpy_array([ linterp( as_numpy_array([[0,1], [0,0], [1,0]]), index) for index in [0, .5, 1, 1.5, 2] ])
ans = [[0. 1. ]
[0. 0.5]
[0. 0. ]
[0.5 0. ]
[1. 0. ]]
EXAMPLE: INTERPOLATING IMAGES
>>> mountain=load_image('https://cdn.britannica.com/67/19367-050-885866B4/Valley-Taurus-Mountains-Turkey.jpg')
... chicago=load_image('https://pbs.twimg.com/media/EeqFCjvWkAI-rv_.jpg')
... doggy=load_image('https://s3-prod.dogtopia.com/wp-content/uploads/sites/142/2016/05/small-dog-at-doggy-daycare-birmingham-570x380.jpg')
... images=[resize_image(image,(256,256)) for image in [mountain,chicago,doggy]]
With cyclic=True, it will loop through the images
>>> for index in np.linspace(0,10,num=100):
... frame=linterp(images,index,cyclic=True)
... display_image(frame)
... sleep(1/30)
With cyclic=False, it will play the animation only once
>>> for index in np.linspace(0,2,num=100):
... frame=linterp(images,index,cyclic=False)
... display_image(frame)
... sleep(1/30)
"""
assert is_number(index),'The \'index\' parameter should be a single number (which can be a float, but doesnt have to be), but got type '+str(type(index))
assert is_iterable(values),'The \'values\' parameter should be a list of values you\'d like to interpolate between, but type '+str(type(index))+' is not iterable and does not have numerical indices'
l=values
x=index
try:
if cyclic:
x%=len(l)
l=list(l)
l=l+[l[0]]# Don't use append OR += (which acts the same way apparently); this will mutate l!
assert x>=0
x0=int(np.floor(x))
x1=int(np.ceil(x))
if x0==x1:
return l[int(x)]
return blend_func(l[x0],l[x1],iblend(x,x0,x1))
except IndexError:
if cyclic:
fansi_print("ERROR: r.linterp: encountered an index error; did you mean to enable the 'cyclic' parameter?",'red')
raise
# def sign(x):
# return 1 if x>0 else (0 if x==0 else -1)
# endregion
# region Gathering/Matching: [matching_indices,gather,pop_gather]
def matching_keys(x,d:dict,check=lambda x,y:x==y,key=None)->list:
"""
Retuns a list [x0,x1,...] such that for all xi, d[xi]=x
EXAMPLE:
matching_keys('a',{3:'c','q':'a',():'a'}) ==== ['q',()]
"""
assert isinstance(d,dict)
if key is None:key=identity
out=[]
for key,value in d.items():
if key(value)==key(x):
out.append(key)
return out
def matching_indices(x,l,check=lambda x,y:x == y,key=None)->list:
"""
Retuns a list [x0,x1,...] such that for all xi, l[xi]=x
EXAMPLE:
matching_indices('a',['a','b','c','a','t']) ==== [0,4]
matching_indices('a','abcat') ==== [0,4]
matching_indices('a',{3:'c','q':'a',():'a'}) ==== ['q',()]
Returns the matching indices of element 'x' in list 'l'
"""
if key is None:key=identity
if isinstance(l,dict):
#Let this function work for dicts too
return matching_keys(x,l,check=check)
out=[]
for i,y in enumerate(l):
if check(key(x),key(y)):
out.append(i)
return out
def gather(iterable,*indices,as_dict=False):
"""
TODO: Add skip_missing or strict option (idk which yet but probably skip_missing if following in lines with gather_vars)
# indices ∈ list of integers
"""
indices=detuple(indices)
if isinstance(indices,str):
#Dont treat each char of a string a key thats weird
indices=[indices]
# indices=delist(indices)
assert is_iterable(iterable),"The 'iterable' parameter you fed in is not an iterable!"
assert is_iterable(indices),"You need to feed in a list of indices, not just a single index. indices == " + str(indices)
if not as_dict:
return [iterable[i] for i in indices] # ≣list(map(lambda i:iterable[i],indices))
else:
return {i:iterable[i] for i in indices}
def pop_gather(x,*indices):
"""
Uses CSE214 definition of 'pop', in the context of popping stacks.
It is difficult to simultaneously delete multiple indices in a list.
My algorithm goes through the indices chronologically, compensating for
the change in indices by subtracting incrementally larger values from them
Example:
>>> ⵁ = ['0', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
>>> pop_gather(ⵁ,1,3,5,7,9)
ans = ['a', 'c', 'e', 'g', 'i']
>>> g
ans = ['0', 'b', 'd', 'f', 'h']
"""
indices=detuple(indices)
out=gather(x,indices)
for a,b in enumerate(sorted(set(indices))):
del x[b - a]
return out
def gather_vars(*var_names, frames_back=1, skip_missing=False, as_dict=True):
"""
TODO: Elaborate on frames_back = ... functionality for getting ALL frames back - we want a min_frames_back and max_frames_back
Better yet use slice objects, and gather_args_wrap(func)[2:]() lets us specify the frames_back via slices
Also perhaps we would have [2,...,callable] or even [2,...,func_name,...func_name] -- or something likke that....its complicated :(
Collect the given variable names from the specified scope into an EasyDict.
This function takes any number of variable names in different formats as arguments and
collects them into an EasyDict. The variable names can be provided as separate strings,
space-separated strings, lists of strings, or any combination of these formats.
Args:
*var_names: Variable names to be gathered. These can be provided in various formats.
frames_back: An integer specifying the number of frames to go back to find the correct scope. Default is 1.
skip_missing: If True, the output will simply omit any variables it can't find. Otherwise, it will throw an error if any variables are missing.
Returns:
An EasyDict containing the variables specified by the given names.
Examples:
a = 1
b = 2
c = 3
d = 4
e = 5
# Different input formats for gather_vars
result1 = gather_vars('a', 'b', 'c', 'd', 'e')
result2 = gather_vars('a b c d e')
result3 = gather_vars(['a', 'b', 'c', 'd', 'e'])
result4 = gather_vars('a', 'b', ['c', 'd', 'e'])
result5 = gather_vars(['a', 'b', 'c'], 'd', 'e')
result6 = gather_vars('a b', ['c', 'd'], {'e'})
# All results are equivalent and have the same values
assert result1 == result2 == result3 == result4 == result5 == result6
Examples:
>>> a=5
>>> b=6
>>> del c
>>> gather_vars('a b c',skip_missing=True)
ans = {'a': 5, 'b': 6}
>>> gather_vars('a b c',skip_missing=False)
ERROR: KeyError: "Can't find variable 'c'"
Written partially with GPT4: https://shareg.pt/g9N1X3U
"""
if not as_dict:
assert not skip_missing, 'rp.gather_vars: Cannot have as_dict and skip_missing. If we return a list of vars instead of a dict, there cannot be any gaps'
assert frames_back==... or frames_back>=1, 'gather_vars is useless if we don\'t look at least one frame back'
min_frames_back = 1 if frames_back==... else frames_back
pip_import('easydict')
import itertools
import inspect
from easydict import EasyDict
flattened_var_names = list(itertools.chain.from_iterable(
(arg.split() if isinstance(arg, str) else arg) for arg in var_names))
frame = inspect.currentframe()
for _ in range(min_frames_back):
frame = frame.f_back
#local_vars = frame.f_locals <-- Old version only looked for vars in one frame
def is_comprehension(frame):
#Check if a frame is in a list comprehension, or dict comprehension etc
#https://chat.openai.com/share/5cd8b897-402c-4e64-97c9-e8bf32e6f930
if frame is None: return False
name=frame.f_code.co_name
return name.startswith('<') and name.endswith('>') and 'comp' in name
#Get variables from all frames equal to or behind frames_back
#QUESTION: Is this equivalent to f_globals? ANSWER: No, after testing - not all vars are in globals
frame_locals = []
needed_vars = set(flattened_var_names)
while frame is not None and needed_vars:
was_comprehension = is_comprehension(frame)
needed_vars -= set(frame.f_locals) # In deep stacks, avoid traversing if we already have what we need
frame_locals.append(frame.f_locals)
frame=frame.f_back
if was_comprehension or frames_back==...:
#I can't think of an instance where I want the search to stop in the list comprehension - this seems confusing to read and debug - asking 'why does the scope stop here??'
continue
else:
break
local_vars =merged_dicts(frame_locals,precedence='first')
result_dict = {}
for name in flattened_var_names:
if name in local_vars:
result_dict[name] = local_vars[name]
elif not skip_missing:
raise KeyError("Can't find variable '%s'"%name)
if not as_dict:
assert not skip_missing
return gather(result_dict, flattened_var_names)
return EasyDict(result_dict)
def bundle_vars(*args, **kwargs):
"""
Collect the given variables from the calling scope into an EasyDict.
This function takes any number of local variable names as arguments and
collects them into an EasyDict. Optionally, you can also pass additional
key-value pairs as keyword arguments to be included in the output.
Note: This function will raise a ValueError if an expression is passed as an argument,
as it only supports variable names (e.g., bad: bundle_vars(a + b), good: bundle_vars(a, b)).
Examples:
x = 1
y = 2
result = bundle_vars(x, y)
print(result.x) # Output: 1
print(result.y) # Output: 2
a = 5
b = 6
c = 11
result = bundle_vars(a, b, c, extra_var=123)
print(result.a) # Output: 5
print(result.b) # Output: 6
print(result.c) # Output: 11
print(result.extra_var) # Output: 123
Raises:
ValueError: If an expression is passed as an argument (e.g., bad: bundle_vars(a + b), good: bundle_vars(a, b))
Written partially with GPT4: https://shareg.pt/g9N1X3U
"""
#TODO: This currently only works when the arguments are put *ON THE SAME LINE*. It also can't handle when two bundle_vars are on the same line.
#TODO: Look into the implementation of icecream to figure out how. Icecream has some nice classes like Source, that when you use pudb for an icecream.ic call, you'll see
pip_import('easydict')
pip_import('astor')
import ast
import inspect
import astor
from easydict import EasyDict
frame = inspect.currentframe().f_back # Get the previous frame (the calling function's frame)
line = inspect.getframeinfo(frame).code_context[0].strip() # Get the line of code that called bundle_vars
parsed_code = ast.parse(line) # Parse the line of code into an AST (Abstract Syntax Tree)
# Find the bundle_vars function call node in the AST
call_node = None
for node in ast.walk(parsed_code):
if isinstance(node, ast.Call) and hasattr(node.func, "id") and node.func.id == "bundle_vars":
call_node = node
break
if call_node is not None:
# Check if any argument is an expression and raise an error if so
for arg in call_node.args:
if not isinstance(arg, ast.Name):
raise ValueError("Only variable names are supported, expressions are not allowed (e.g., bad: bundle_vars(a + b), good: bundle_vars(a, b))")
variable_names = [astor.to_source(arg).strip() for arg in call_node.args] # Get the variable names passed to bundle_vars
result_dict = gather_vars(*variable_names, frames_back=2) # Use gather_vars to get the variables from the calling scope
result_dict.update(kwargs) # Add any extra keyword arguments to the dictionary
return EasyDict(result_dict) # Return the result as an EasyDict
else:
raise RuntimeError("Couldn't find the variable names")
def gather_attrs(x, *attrs, as_dict=False):
""" li, si = gather_attrs(rp, 'load_image save_image') """
attrs = ' '.join(attrs)
attrs = attrs.split()
if as_dict:
return as_easydict({name:getattr(x, name) for name in attrs})
else:
return [getattr(x, name) for name in attrs]
def destructure(d: dict) -> tuple:
"""
Extracts values from a dictionary based on the variable names in the
assignment expression in the calling line of code.
Mimics Javascript's destructuring assignment feature.
The main purpose of this function is to make your code just a little shorter and less redundant.
It compliments rp.bundle_vars and rp.gather_vars quite nicely.
Note: This function should be considered voodoo, as it's very strange!
It's extremely convenient though and can make your life easier.
It can make your code less redundant, but relies on being able to find
you source code - an assumption which doesnt always hold (for example,
in ptpython or the default python repl. Jupyter and rp work fine though.)
Parameters
----------
d : dict
The dictionary from which to extract values.
Returns
-------
tuple or value
A tuple of extracted values, or a single value if only one is extracted.
Examples
--------
d = {'x': 1, 'y': 2, 'z': 3}
# Destructuring into multiple variables
>>> x, y = destructure(d)
>>> print(x, y)
1 2
# Destructuring into a single variable
>>> z = destructure(d)
>>> print(z)
3
# Useful for getting kwargs out
def make_color(**kwargs):
red,green,blue = destructure(kwargs)
Pitfalls
--------
# Variables on the left-hand side must match keys in the dictionary.
>>> a, b = destructure(d)
KeyError: 'Key not found in the provided dictionary.'
# The function must be used within an assignment operation.
>>> destructure(d)
ValueError: 'Destructuring must be used within an assignment operation.'
# The function doesn't support nested destructuring.
>>> d = {'p': {'q': 4}}
>>> p.q = destructure(d)
AttributeError: 'tuple' object has no attribute 'q'
# Multi-line assignments are not supported
>>> a, \
... b = destructure(d)
TypeError: 'Cannot unpack non-iterable int object.'
"""
import inspect
import ast
# Get the source code of the line that called this function
frame = inspect.currentframe().f_back
info = inspect.getframeinfo(frame)
code = info.code_context[0].strip()
# Use the ast module to parse the source code into a syntax tree
tree = ast.parse(code)
try:
# Find the Assign node (i.e., the assignment operation)
assign_node = next(node for node in ast.walk(tree) if isinstance(node, ast.Assign))
# Check if there are multiple assignment targets
if isinstance(assign_node.targets[0], ast.Tuple):
# Extract the variable names from the left-hand side of the assignment
var_names = [target.id for target in assign_node.targets[0].elts]
else: # Single target
var_names = [assign_node.targets[0].id]
except StopIteration:
raise Error("Destructuring must be used within an assignment operation.")
# Use the variable names as keys to get the corresponding values from the dictionary
values = tuple(d[name] for name in var_names)
# Return single value instead of a tuple if there is only one value
if len(values) == 1:
return values[0]
return values
def gather_args(func, *args, frames_back=1, **kwargs):
"""
Gathers the necessary positional arguments and keyword arguments to call the given function.
This function collects the required arguments and keyword arguments for the specified function
based on a priority ordering. It retrieves the values from the caller's scope, default values,
and overridden values provided through *args and **kwargs.
Priority ordering (highest to lowest):
1. Overridden keyword arguments (**kwargs)
2. Overridden positional arguments (*args)
3. Varargs and varkw:
Overridden keyword arguments' varkw variable's keyword arguments
Overridden keyword arguments' varargs variable's positional arguments
4. Gathered arguments and keyword arguments from the caller's scope
5. Varargs and varkw:
Gathered varkw variable's keyword arguments
Gathered varargs variable's positional arguments
6. Default arguments and keyword arguments from the function signature
Args:
func (callable): The function for which to gather arguments and keyword arguments.
*args: Positional arguments to override the gathered values.
frames_back (int, optional): The number of frames to go back in the caller's scope to gather variables. Defaults to 1.
NOTE: frames_back will not be inferred from the environment, it's the only special keyword argument here!
**kwargs: Keyword arguments to override the gathered values.
Returns:
tuple: A tuple containing the gathered positional arguments and keyword arguments.
- out_args (list): The gathered positional arguments.
- out_kwargs (dict): The gathered keyword arguments.
Raises:
TypeError: If the number of positional arguments provided exceeds the number of positional arguments
in the function signature and the function does not have a *varargs parameter.
TypeError: If a required argument is missing from the gathered variables and is not provided
through *args, **kwargs, or the function's default values.
TODO: Use inspect.signature instead of inspect.getfullargspec, because getfullargspec is old - and functools.wrap doesn't change those signatures
If we want to use this with memoized or stack this on other decorators, it has to read these arguments more robustly
EXAMPLE USE CASE:
# gather_args can be used to greatly simplify code where functions need a lot of each other's variables
def f(a,b,c,d,e,f,g):
return a+b+c+d+e+f+g
#Without gather_args
def g(a,b,c,d,e,f,g):
print(f(a,b,c,d,e,f,g))
#With gather_args (equivalent)
def g(a,b,c,d,e,f,g):
args, kwargs = gather_args(f)
print(f(*args,**kwargs))
#With our sister function, gather_args_wrap:
def g(a,b,c,d,e,f,g):
print(gather_args_wrap(f)())
#With our other sister function, gather_args_call:
def g(a,b,c,d,e,f,g):
print(gather_args_call(f))
EXAMPLES:
def example_func(a, b=2, /, c=3, *d, e, f, g=1, h=2, i=3, **kw):
pass
# Gather arguments and keyword arguments
a = 10
e = 50
f = 60
kw = dict(z=123)
args, kwargs = gather_args(example_func, frames_back=1)
# args: [10, 2, 3]
# kwargs: {'e': 50, 'f': 60, 'g': 1, 'h': 2, 'i': 3, 'z': 123}
# Override arguments and keyword arguments
args, kwargs = gather_args(example_func, 100, e=500, f=600, j=1000, kw=dict(q=321), frames_back=1)
# args: [100, 2, 3]
# kwargs: {'e': 500, 'f': 600, 'g': 1, 'h': 2, 'i': 3, 'j': 1000, 'q': 321}
EXAMPLES:
# These examples are meant to cover many edge cases
>>> def no_kwargs_func(a,b,c,d):
pass
a=1
b=2
>>> gather_args(no_kwargs_func)
ERROR: AssertionError: Missing variables for function call: c, d
>>> gather_args(no_kwargs_func,c=3,d=4)
ans = ([1, 2, 3, 4], {})
>>> gather_args(no_kwargs_func,3,4)
ERROR: AssertionError: Missing variables for function call: c, d
>>> gather_args(no_kwargs_func,None,None,3,4)
ans = ([None, None, 3, 4], {})
>>> gather_args(no_kwargs_func,None,None,3,4,a=a,b=b) #Overriding the None's with kwargs, which have higher priority
ans = ([1, 2, 3, 4], {})
>>> gather_args(no_kwargs_func,c=3,d=4,e=5)
ERROR: AssertionError: Too many keyword arguments given to a function without **kwargs: e
>>> gather_args(no_kwargs_func,5,6,7,8)
ans = ([5, 6, 7, 8], {})
>>> gather_args(no_kwargs_func,5,6,7,8,9)
ERROR: AssertionError: Too many args specified for a function without varargs!
EXAMPLES:
# Showing how priority ordering affects *args for a func
>>> def varargs_func(a,b,*varargs):
pass
a=1
b=2
>>> gather_args(varargs_func)
ans = ([1, 2], {})
>>> gather_args(varargs_func,3,4,5)
ans = ([3, 4, 5], {})
>>> gather_args(varargs_func,varargs=[3,4,5])
ans = ([1, 2, 3, 4, 5], {})
>>> varargs=[6,7]
>>> gather_args(varargs_func)
ans = ([1, 2, 6, 7], {})
>>> gather_args(varargs_func,8,9) #Varargs can be inferred from the scope
ans = ([8, 9, 6, 7], {})
>>> gather_args(varargs_func,8,9,10) #If varargs are directly specified, they take precedent
ans = ([8, 9, 10], {})
>>> gather_args(varargs_func,8,9,varargs=[1,2])
ans = ([8, 9, 1, 2], {})
>>> gather_args(varargs_func,8,9,varargs=[1,2],b=None)
ans = ([8, None, 1, 2], {})
### Continued: What if specified varargs isn't iterable? Answer: It will be ignored
>>> varargs=None #The variable exists but it no longer iterable, so it won't be used for varargs. Something else can override it.
>>> gather_args(varargs_func,8,9)
ans = ([8, 9], {})
>>> gather_args(varargs_func,8,9,varargs='ABC')
ans = ([8, 9, 'A', 'B', 'C'], {})
>>> args=[1,2,3]
>>> gather_args(varargs_func,8,9,args=None) #Similarily, if the manually specified args isn't iterable - that will also be ignored
ans = ([8, 9, 1, 2, 3], {})
FOR REFERENCE: EXAMPLE OF HOW WE GET ARGS AND KWARGS FROM FUNC:
>>> def f(a,b=2,/,c=3,*d,e,f,g=1,h=2,i=3,**kwargs):pass
>>> inspect.getfullargspec(f)
ans = FullArgSpec(
args=['a', 'b', 'c'],
varargs='d', #Or None
varkw='kwargs', #Or None
defaults=(2, 3),
kwonlyargs=['e', 'f', 'g', 'h', 'i'],
kwonlydefaults={'g': 1, 'h': 2, 'i': 3},
annotations={}
)
>>> inspect.getfullargspec(f)
>>> get_positional_only_arg_names(f)
ans = ['a', 'b']
"""
assert frames_back>=1, 'gather_args is useless if we don\'t look at least one frame back'
import inspect
# Get the full argument specification of the function
fullargspec = inspect.getfullargspec(func)
func_arg_names = fullargspec.args
func_kwarg_names = fullargspec.kwonlyargs
#These are optional and might be none, but cannot be empty string
varkw = fullargspec.varkw
varargs = fullargspec.varargs
#Priority #6: Get default variables: the default values given in the function signature
pos_arg_defaults = fullargspec.defaults or []
num_pos_arg_defaults = len(pos_arg_defaults)
default_arg_vars = {name:value for name,value in zip(func_arg_names[-num_pos_arg_defaults:],pos_arg_defaults)}
default_kwarg_vars = dict(fullargspec.kwonlydefaults or {})
assert not (set(default_arg_vars) & set(default_kwarg_vars)), 'This should be impossible - args and kwargs should not have a name conflict in a function signature'
default_vars = {**default_arg_vars, **default_kwarg_vars}
varargs_value = [] #There cant be default value for varargs
#Used by gather_args_bind to allow us to construct partial outputs
do_replace_missing=hasattr(func, 'gather_args_placeholder')
if do_replace_missing:
placeholder = func.gather_args_placeholder
def maybe_add_varkw(variables:dict):
if varkw in variables:
varkw_variables = variables[varkw]
del variables[varkw] #Don't keep it - it cant be passed as a kwarg to the func directly, it has to be expanded
try:
#varkw gets lower priority than originals
varkw_variables = dict(varkw_variables) #If this line errors, skip it
return {**varkw_variables, **variables}
except Exception:
#The gathered varkw simply wasn't a valid set of kwargs
#Dont throw an error for this though - right?
pass
return variables
def maybe_replace_varargs(variables:dict):
nonlocal varargs_value
if varargs in variables:
try:
varargs_value = tuple(variables[varargs])
except Exception:
#The varargs_value wasn't iterable, its ok - ignore it
#Let something else take priority
pass
del variables[varargs]
return varargs_value
#Priority #4: Get gathered variables: the variables in the scope of the caller
gathered_vars = gather_vars(
func_arg_names,
func_kwarg_names,
[varargs] * bool(varargs),
[varkw] * bool(varkw),
skip_missing=True,
frames_back=frames_back+1,
)
gathered_vars = maybe_add_varkw(gathered_vars) #Priority #5
varargs_value = maybe_replace_varargs(gathered_vars)
##Priority #1: Get variable overrides: the *args and **kwargs passed to this function
override_args = {name:value for name,value in zip(func_arg_names,args)} #Priority #1
override_kwargs = dict(kwargs) #Priority #2
override_vars = {**override_args, **override_kwargs}
override_vars = maybe_add_varkw(override_vars) #Priority #3
varargs_value = maybe_replace_varargs(override_vars)
if len(args)>len(func_arg_names):
assert varargs, 'Too many args specified for a function without varargs!' #TODO: This shouldn't be an assertion, should be an error
varargs_value = args[len(func_arg_names):]
#Compose the priorities together into available_vars - varags will be handles at the end after validation
available_vars = {}
available_vars.update(default_vars)
available_vars.update(gathered_vars)
available_vars.update(override_vars)
#Get all variables we need but don't have yet
get_missing_names = lambda: (set(func_arg_names) | set(func_kwarg_names)) - set(available_vars)
#Used by gather_args_bind
if do_replace_missing:
#Replace any missing values with the default value if applicable
for name in get_missing_names():
available_vars[name]=placeholder
#Validation: Make sure we have enough args - TODO: this should be a custom exception not an assertion though
assert not get_missing_names(), 'Missing variables for function call: '+', '.join(sorted(get_missing_names()))
out_args = []
out_kwargs = {}
for arg_name in func_arg_names:
out_args.append(available_vars[arg_name])
del available_vars[arg_name]
out_args += varargs_value
out_kwargs.update(available_vars)
if not varkw:
assert not len(out_kwargs) < len(func_kwarg_names), 'This should be impossible at this point'
assert len(out_kwargs) == len(func_kwarg_names), 'Too many keyword arguments given to a function without **kwargs: '+', '.join(sorted(set(out_kwargs)-set(func_kwarg_names)))
return out_args, out_kwargs
def gather_args_call(func, *args, frames_back=1, **kwargs):
"""
Calls the given function with arguments gathered from the current scope, using rp.gather_args
Please see the docstring of `rp.gather_args` for more info.
Args:
func (callable): The function to call with the gathered arguments.
*args: Positional arguments to override the gathered values.
frames_back (int, optional): The number of frames to go back in the caller's scope to gather variables. Defaults to 1.
**kwargs: Keyword arguments to override the gathered values.
Returns:
Any: The return value of the called function.
Note: gather_args_call will not grab from globals - if you want to do that, you should call gather_args_call on the module-level function that calls gather_args_call
This design choice is to encourage you to use this function cleanly
Example:
>>> def example_func(a, b, c):
... print(f"a={a}, b={b}, c={c}")
...
>>> # In the caller's scope
>>> a = 1
>>> b = 2
>>> c = 3
>>>
>>> gather_args_call(example_func)
a=1, b=2, c=3
Example:
def connect_to_database(db_host, db_port, db_name, db_user, db_password):
# Establish database connection
return db_connection
def load_data_from_api(api_url, api_key, timeout):
# Load data from API
return api_data
def process_data(db_connection, api_data, processing_params):
# Process the data
return processed_data
# Configuration
db_host = 'localhost'
db_port = 5432
db_name = 'my_database'
db_user = 'admin'
db_password = 'secret'
api_url = 'https://api.example.com'
api_key = 'abcdefgh12345'
timeout = 10
processing_params = {...}
db_connection = gather_args_call(connect_to_database)
api_data = gather_args_call(load_data_from_api)
processed_data = gather_args_call(process_data)
"""
out_args, out_kwargs = gather_args(func, *args, frames_back=frames_back+1, **kwargs)
return func(*out_args, **out_kwargs)
def gather_args_wrap(func, *, frames_back=1):
"""
Decorates the given function to use arguments gathered from the current scope, using rp.gather_args
The arguments will be gathered from the scope where the function is called, not where it is wrapped.
Please see the docstring of `rp.gather_args` for more info.
Args:
func (callable): The function to wrap and call with the gathered arguments.
frames_back (int, optional): The number of frames to go back in the caller's scope to gather variables. Defaults to 1.
Returns:
callable: The wrapped function that, when invoked, calls the original function with the gathered arguments.
TODO: Make this play nice with rp.memoized, right now they don't like each other
TODO: Make gather_args_call implemented with gather_args_wrap, not the other way around - that way we can still use the frames_back argument in gather_args_wrap
Example:
>>> @gather_args_wrap
... def example_func(a, b, c):
... print(f"a={a}, b={b}, c={c}")
...
>>> # In the caller's scope
>>> a = 1
>>> b = 2
>>> c = 3
>>>
>>> example_func()
a=1, b=2, c=3
>>> c = 999
>>> example_func()
a=1, b=2, c=999
"""
import functools
@functools.wraps(func)
def wrapper(*args, **kwargs):
return gather_args_call(func, *args, frames_back=frames_back+1, **kwargs)
return wrapper
def gather_args_bind(func, *args, frames_back=1, **kwargs):
"""
Like gather_args_wrap, but binds the values in the namespace upon creation.
Here's an example to show the difference:
TODO: Use inspect.signature instead of inspect.getfullargspec, because getfullargspec is old - and functools.wrap doesn't change those signatures
If we want to use this with memoized or stack this on other decorators, it has to read these arguments more robustly
EXAMPLE:
>>> def f(x,y):
print(x,y)
x=1
y=2
b=gather_args_bind(f)
w=gather_args_wrap(f)
>>> b()
1 2
>>> w()
1 2
>>> x=3
>>> b()
1 2
>>> w()
3 2
EXAMPLE:
>>> def f(x,y,z):
print(x,y,z)
y=123
g=gather_args_bind(f)
>>> g(1,z=99)
1 123 99
>>> g(1)
ERROR: TypeError: rp.gather_args_bind(f): 1 missing arguments:
Missing Positional Arguments:
#3: z
>>> g(z=99)
ERROR: TypeError: rp.gather_args_bind(f): 1 missing arguments:
Missing Positional Arguments:
#1: x
>>> y=999
>>> g(7,z=99)
7 123 99
EXAMPLE:
>>> def f(x,y,z,*,u,v):
print(x,y,z,u,v)
y=123
u=444
b=gather_args_bind(f)
w=gather_args_wrap(f)
>>> w()
ERROR: AssertionError: Missing variables for function call: v, x, z
>>> b()
ERROR: TypeError: rp.gather_args_bind(f): 3 missing arguments:
Missing Positional Arguments:
#1: x
#3: z
Missing Keyword Arguments:
v
>>> x=y=z=u=v=9000
>>> w()
9000 9000 9000 9000 9000
>>> b()
ERROR: TypeError: rp.gather_args_bind(f): 3 missing arguments:
Missing Positional Arguments:
#1: x
#3: z
Missing Keyword Arguments:
v
>>> b(1)
ERROR: TypeError: rp.gather_args_bind(f): 2 missing arguments:
Missing Positional Arguments:
#3: z
Missing Keyword Arguments:
v
>>> b(1,z=2,v=3)
1 123 2 444 3
"""
import functools
import inspect
fullargspec = inspect.getfullargspec(func)
func_arg_names = fullargspec.args
func_name = func.__name__
placeholder = object() #Will only be equal to itself
with TemporarilySetAttr(func, gather_args_placeholder = placeholder):
saved_args, saved_kwargs = gather_args(func, *args, **kwargs, frames_back=frames_back+1)
import functools
@functools.wraps(func)
def wrapper(*args, **kwargs):
new_kwargs = dict(saved_kwargs)
new_kwargs.update(kwargs)
new_args = list(saved_args)
new_args[:len(args)] = args
for name in set(new_kwargs) & set(func_arg_names):
#Turn any kwargs in to args
new_args[func_arg_names.index(name)] = new_kwargs.pop(name)
#Check for placeholder values that haven't been replaced - indicating we didn't fill enough function arguments
missing_arg_indices = [i for i,a in enumerate(new_args) if a is placeholder]
missing_kwarg_names = [n for n,k in new_kwargs.items() if k is placeholder]
missing_arg_names = gather(func_arg_names, missing_arg_indices)
missing_names = missing_arg_names + missing_kwarg_names
if missing_names:
indent = " "
error_message_lines = []
error_message_lines.append("rp.gather_args_bind(%s): %i missing arguments:"%(func_name,len(missing_names)))
if missing_arg_names:
error_message_lines.append(indent + "Missing Positional Arguments:")
for index,name in zip(missing_arg_indices, missing_arg_names):
error_message_lines.append(2*indent + "#%i: %s"%(index+1,name))
if missing_kwarg_names:
error_message_lines.append(indent + "Missing Keyword Arguments:")
for name in missing_kwarg_names:
error_message_lines.append(2*indent + name)
error_message = line_join(error_message_lines)
raise TypeError(error_message)
return func(*new_args, **new_kwargs)
return wrapper
def get_current_function(frames_back=0):
"""
Retrieves the function object from the specified number of frames back in the call stack.
Args:
frames_back (int, optional): The number of frames to go back in the call stack. Defaults to 2.
Returns:
function: The function object from the specified frame.
Raises:
TypeError: If frames_back is not an integer.
ValueError: If frames_back is less than or equal to 1.
RuntimeError: If the specified frame does not correspond to a function.
TODO: Handle being inside list comprehensions etc - like gather_vars does. Also perhaps any other edge cases?
EXAMPLE:
>>> def z(): return get_current_function()
print(z)
print(z())
<function z at 0x127709900>
<function z at 0x127709900>
EXAMPLE:
>>> def f(i): return get_current_function(i)
def g(i): return f(i)
def h(i): return g(i)
print(f)
print(g)
print(h)
print(h(1))
print(h(2))
print(h(3))
print(h(4))
<function f at 0x130c811b0>
<function g at 0x130c82050>
<function h at 0x130c80160>
<function f at 0x130c811b0>
<function g at 0x130c82050>
<function h at 0x130c80160>
ERROR: RuntimeError: The frame 4 levels back does not correspond to a function.
"""
import inspect
frames_back+=1
if not isinstance(frames_back, int):
raise TypeError("frames_back must be an integer.")
if frames_back < 1:
raise ValueError("frames_back must be greater than or equal to 1.")
# Access the call stack
stack = inspect.stack()
# Ensure there are enough frames in the stack
if frames_back >= len(stack):
raise RuntimeError("No sufficient frames in the call stack.")
# The target frame is `frames_back` levels up the stack
target_frame = stack[frames_back].frame
# Get function name from the target frame
func_name = target_frame.f_code.co_name
# Iterate through the stack to find the function object
for frame_info in stack:
# Check function name and code object to ensure the correct function is found
if frame_info.function == func_name and frame_info.frame.f_code == target_frame.f_code:
output = frame_info.frame.f_globals.get(func_name)
if output is not None:
return output
raise RuntimeError("The frame %i levels back does not correspond to a function."%frames_back)
def get_current_function_name(frames_back=0):
return get_current_function(frames_back+1).__name__
def gather_args_recursive_call(*args, frames_back=0, **kwargs):
frames_back+=1
function=get_current_function(frames_back)
return gather_args_call(function,*args,frames_back=frames_back+1,**kwargs)
def replace_if_none(value):
"""
TODO: Make this work with older versions of python, using destructure's strategy
Used to replace default values in a concise way
Please read the examples - this function uses introspection
and the context where this function is called matters!
Parameters
----------
value: any
A value that will be returned if the left-hand of assignment is None
Returns
-------
any
Returns either value, or the value of the left-hand of the assignment
operation where this function is called
Examples
--------
a = None
b = 123
default = 'Hello'
# Plebian, redundant way to write code:
a = default if a is None else a
b = default if b is None else b
# Equivalent way to write it with this func
a = replace_if_none(default)
b = replace_if_none(default)
# Array assignment
arr = [None, 2, 3]
arr[0] = replace_if_none(42)
# Use-case
>>> def f(x=None):
x=replace_if_none('default')
return x
>>> f()
ans = default
>>> f(123)
ans = 123
Pitfalls
--------
# The function must be used within an assignment operation.
>>> replace_if_none(default)
ValueError: 'replace_if_none must be used within an assignment operation.'
# Chained assignments are not supported
>>> a = b = replace_if_none(42)
ValueError: 'replace_if_none only supports single assignment targets.'
# Augmented assignments are not supported
>>> a += replace_if_none(42)
ValueError: 'replace_if_none only supports simple assignments.'
# Tuple unpacking assignments are not supported
>>> a, b = replace_if_none((1, 2))
ValueError: 'replace_if_none only supports single assignment targets.'
# Assignment must be on one line, or this function will get confused
>>> a = \
replace_if_none(123)
ValueError: 'replace_if_none must be used within an assignment operation.'
"""
import inspect
import ast
# Get the source code of the line that called this function
frame = inspect.currentframe().f_back
info = inspect.getframeinfo(frame)
code = info.code_context[0].strip()
# Use the ast module to parse the source code into a syntax tree
tree = ast.parse(code)
try:
# Find the Assign node (i.e., the assignment operation)
assign_node = next(node for node in ast.walk(tree) if isinstance(node, ast.Assign))
# Check for chained assignments
if len(assign_node.targets) > 1:
raise ValueError("rp.replace_if_none only supports single assignment targets.")
# Check for augmented assignments
if isinstance(assign_node.targets[0], (ast.AugAssign, ast.AnnAssign)):
raise ValueError("rp.replace_if_none only supports simple assignments.")
# Extract the assignment target
target = ast.unparse(assign_node.targets[0])
# Evaluate the assignment target in the caller's frame
target_value = eval(target, frame.f_locals)
# Return value if the target value is None, otherwise return the target value
return value if target_value is None else target_value
except StopIteration:
raise ValueError("rp.replace_if_none must be used within an assignment operation.")
# Should probably use current_module = __import__(__name__) instead where this is used
# def get_current_module():
# """
# Traverse up the call stack and return the first module found.
# """
# import inspect
#
# frame = inspect.currentframe()
# while frame is not None:
# frame = frame.f_back
# output = inspect.getmodule(frame)
# if output is not None:
# return output
# raise Exception('get_current_module(): failed to get the current module')
def squelch_call(func, *args, exception_types=(Exception,), on_exception=identity, **kwargs):
"""
Calls the given function with the provided arguments and keyword arguments, suppressing specified exceptions.
Args:
func (callable): The function to be called.
*args: Positional arguments to be passed to the function.
exception_types (type or iterable of types, optional): The exception type(s) to be caught and suppressed.
Default is (Exception,), which catches all exceptions.
on_exception (callable, optional): A function to be called with the caught exception as an argument.
Default is identity, which returns the exception unchanged.
**kwargs: Keyword arguments to be passed to the function.
Returns:
The return value of the function if no exception is raised.
If an exception of the specified type(s) is raised, returns the result of calling on_exception with the caught exception.
Raises:
AssertionError: If exception_types is not a single exception type or an iterable of exception types.
Example:
>>> def divide(a, b):
... return a / b
...
>>> squelch_call(divide, 10, 2)
5.0
>>> squelch_call(divide, 10, 0, exception_types=ZeroDivisionError, on_exception=lambda e: "Cannot divide by zero")
'Cannot divide by zero'
"""
#exception_types can be an exception type, or an iterable of exception types
if not (isinstance(exception_types,type) and issubclass(exception_types,BaseException)):
assert is_iterable(exception_types)
exception_types=tuple(exception_types)
assert all(isinstance(x,type) and issubclass(x,BaseException) for x in exception_types)
try:
return func(*args, **kwargs)
except exception_types as exception:
return on_exception(exception)
def squelch_wrap(func, exception_types=(Exception,), on_exception=identity):
"""
Wraps a function using squelch_call (can be a decorator)
squelch_wrap is to squelch_call as gather_args_wrap is to gather_args_call
TODO: Make squelch_call implemented with squelch_wrap, not the other way around - that way we can still use the on_exception and exception_types arguments in squelch_wrap
EXAMPLE:
>>> def f():
... 1/0
>>> f()
ERROR: ZeroDivisionError: division by zero
>>> print(repr(squelch_call(f)))
ZeroDivisionError('division by zero')
>>> g=squelch_wrap(f)
>>> print(repr(g()))
ZeroDivisionError('division by zero')
"""
import functools
@functools.wraps(func)
def wrapper(*args, **kwargs):
return squelch_call(
func,
*args,
exception_types=exception_types,
on_exception=on_exception,
**kwargs,
)
return wrapper
def rebind_globals_to_module(module, *, monkey_patch=False):
"""
Decorator to change the global environment of functions and classes to another module's namespace.
If monkey_patch is True, the function or class is also added to the module.
The result: the decorated function is as good as if it were created in that module's source code,
allowing it to both read and write from that module's globals. As a consequence, it can no longer
read or write from the module where this decorator is called.
Args:
module: The target module to bind the globals to.
monkey_patch: If True, adds the object to the module.
Returns:
An object with its globals rebound to the target module's namespace.
EXAMPLE:
import rp.r as r
@rebind_globals_to_module(r)
def f():
#returns r._BundledPath
return _BundledPath
some_var=123
@rebind_globals_to_module(r)
def g():
#This crashes as it can no longer see names from the current module.
return some_var
"""
assert is_a_module(module), 'rebind_globals_to_module is a decorator'
import types
import functools
def decorator(obj):
original_module = obj.__module__
if isinstance(obj, types.FunctionType):
if hasattr(obj, "__module__") and obj.__module__ == original_module:
bound_func = types.FunctionType(
obj.__code__,
module.__dict__,
obj.__name__,
obj.__defaults__,
obj.__closure__,
)
return bound_func
return obj
elif isinstance(obj, type): # It's a class
raise NotImplementedError("rebind_globals_to_module has not been tested/verified on classes yet")
# Create a new class with all attributes copied over
new_class_dict = {}
for attr_name, attr_value in obj.__dict__.items():
try:
new_attr_value = decorator(attr_value)
except TypeError:
new_attr_value = attr_value
new_class_dict[attr_name] = attr_value
# Construct the new class type in the module's namespace
new_class = type(obj.__name__, obj.__bases__, new_class_dict)
return new_class
else:
raise TypeError("rebind_globals_to_module can only be applied to functions or classes.")
return decorator
def globalize_locals(func):
"""
Decorator that makes a function's local variables available globally,
allowing a function to effectively act as a macro.
Useful for making reusable cells in a Jupyter notebook.
When a function decorated with @globalize_locals is called, all local variables created
within the function become available in the global namespace after execution.
This includes both function parameters and local variables.
The global namespace modification is the primary feature of this decorator,
intentionally creating side effects by design. Variables defined inside the
function will persist after execution.
Example (Normal Execution):
>>> # Variables don't exist before using the decorator
>>> 'x' in globals(), 'y' in globals(), 'z' in globals()
(False, False, False)
>>> @globalize_locals
... def f(x, y):
... z = x + y
>>> f(1, 2)
>>> # Variables now exist in the global scope
>>> x, y, z
(1, 2, 3)
Example (With Error):
>>> # Variables are still preserved even when an error occurs
>>> @globalize_locals
... def f(x, y):
... 1/0 # Division by zero error
... z = x + y # This line never executes
>>> try:
... f(100, 200)
... except ZeroDivisionError:
... print("Error caught")
Error caught
>>> # Parameters are still preserved in the global scope
>>> x, y
(100, 200)
>>> # But z isn't defined since that line never executed
>>> 'z' in globals()
False
Example:
>>> @globalize_locals
... def part_0a():
... url = "https://www.pupsgonewild.com/s/cc_images/cache_7853498.jpg"
...
...
... @globalize_locals
... def part_0b():
... url = "https://mortenhannemose.github.io/assets/img/Lena_1024.png"
...
...
... @globalize_locals
... def part_0c():
... url = "https://res.cloudinary.com/lancaster-puppies-laravel/image/upload/$date_!12%252F26%252F2024!/t_lpr-full-transform/t_lpr-date-transform/r_16/v1/default/eoxl4syekwivgrdgze5u?_a=AJAJZWI0"
...
...
... @globalize_locals
... def part_1():
... image = load_image(url)
...
...
... @globalize_locals
... def part_2():
... images = []
... for part_0 in [part_0a, part_0b, part_0c]:
... part_0()
... part_1()
... images.append(image)
...
...
... @globalize_locals
... def part_3():
... part_2()
... display_image(horizontally_concatenated_images(images))
...
...
... part_3()
"""
import inspect
import functools
import sys
import types
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Get the caller's globals
caller_frame = sys._getframe(1)
caller_globals = caller_frame.f_globals
# Create a dictionary to store function's locals
func_locals = {}
# First, bind the arguments to parameter names to ensure parameters are captured
sig = inspect.signature(func)
bound_args = sig.bind(*args, **kwargs)
bound_args.apply_defaults()
func_locals.update(bound_args.arguments)
# Set up trace function to capture variables on each line
def trace_func(frame, event, arg):
# We're only interested in the function we're decorating
if frame.f_code == func.__code__:
# Copy all variables from the frame's locals to our dictionary
for var_name, value in frame.f_locals.items():
func_locals[var_name] = value
# Keep tracing this function
return trace_func
# Don't trace other functions
return None
# Set tracing and execute the function
original_trace = sys.gettrace()
sys.settrace(trace_func)
try:
# Call the function and get its result
result = func(*args, **kwargs)
return result
except Exception:
# Capture the exception info
exc_info = sys.exc_info()
# Will re-raise after transferring variables
raise_later = True
finally:
# Restore the original trace function
sys.settrace(original_trace)
# Copy the captured locals to the caller's globals
for name, value in func_locals.items():
caller_globals[name] = value
# If there was an exception, re-raise it
if 'raise_later' in locals() and raise_later:
raise exc_info[1].with_traceback(exc_info[2])
return wrapper
def _filter_dict_via_fzf(input_dict,*,preview=None):
"""Uses fzf to select a subset of a dict and returns that dict."""
#Refactored using GPT4 from a mess: https://chat.openai.com/share/66251028-75eb-4c55-960c-1e7477e34060
pip_import('iterfzf')
import iterfzf
# Extract dictionary items
items_list = list(input_dict.items())
keys, values = list_transpose(items_list)
sorted_keys, sorted_values = sync_sorted(keys, values)
# Define a utility function inside the main function to reduce the number of repeated tasks
def format_string(item):
item=str(item)
if '\n' in item:
return repr(item)
return item
formatted_values = map(format_string, sorted_values)
formatted_keys = map(format_string, sorted_keys)
# Join lines and format the display
joined_values = line_join(formatted_values)
joined_keys = line_join(formatted_keys)
separator = ' | '
for char in separator:
joined_keys = make_string_rectangular(joined_keys + char, fillchar=char)
display_str = horizontally_concatenated_strings(joined_keys, joined_values)
display_lines = display_str.splitlines()
# Use fzf to select lines
selected_lines = _iterfzf(display_lines, multi=True, exact=True, preview=preview)
if selected_lines is None:
fansi_print("r._ISM: Canceled!",'cyan','bold')
selected_lines=[]
selected_indices = [display_lines.index(line) for line in selected_lines]
if selected_indices is None:
# The user cancelled. I'm not sure what the best thing is to return here...so I'll return None for now.
return None
# Extract the selected keys
selected_keys = [sorted_keys[index] for index in selected_indices]
# Return the dictionary subset
return gather(input_dict, selected_keys, as_dict=True)
# endregion
# region List/Dict Functions/Displays: [list_to_index_dict,invert_dict,invert_dict,invert_list_to_dict,dict_to_list,list_set,display_dict,display_list]
def list_to_index_dict(l: list) -> dict:
""" ['a','b','c'] ⟶ {0: 'a', 1: 'b', 2: 'c'} """
return {i:v for i,v in enumerate(l)}
def invert_dict(d: dict, bijection=True) -> dict:
"""
Inverts a dictionary, reversing the mapping of keys to values.
Args:
d (dict): The dictionary to invert.
bijection (bool, optional): If True, assumes the dictionary is a bijection (one-to-one mapping)
and inverts it directly. If False, handles non-bijective dictionaries by grouping keys with
the same value into tuples. Defaults to True.
Returns:
dict: The inverted dictionary.
Examples:
>>> invert_dict({0: 'a', 1: 'b', 2: 'c'})
{'a': 0, 'b': 1, 'c': 2}
>>> invert_dict({0: 'a', 1: 'a', 2: 'b'}, bijection=False)
{'a': (0, 1), 'b': (2,)}
"""
if bijection:
# {0: 'a', 1: 'b', 2: 'c'} ⟶ {'c': 2, 'b': 1, 'a': 0}
return {v:k for v,k in zip(d.values(),d.keys())}
else:
# {0: 'a', 1: 'a', 2: 'b'} ⟶ {'a': (0,1), 'b': (2,)}
out={}
for k,v in d.items():
if v in out:
out[v]+=k,
else:
out[v]=k,
return out
def invert_list_to_dict(l: list) -> dict:
""" ['a','b','c'] ⟶ {'c': 2, 'a': 0, 'b': 1} """
assert len(set(l)) == len(l),'r.dict_of_values_to_indices: l contains duplicate values, so we cannot return a 1-to-1 function; and thus ∄ a unique dict that converts values to indices for this list!'
return invert_dict(list_to_index_dict(l))
def dict_to_list(d: dict) -> list:
""" Assumes keys should be in ascending order """
return gather(d,sorted(d.keys()))
def list_set(x):
"""
Similar to performing list(set(x)), except that it preserves the original order of the items.
You could also think of it as list_set≣remove_duplicates
Demo:
>>> l=[5,4,4,3,3,2,1,1,1]
>>> list(set(l))
ans=[1,2,3,4,5]
>>> list_set(l) ⟵ This method
ans=[5,4,3,2,1]
"""
from more_itertools import unique_everseen # http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-whilst-preserving-order
return list(unique_everseen(x))
# ――――――――――――――――――――――
# Three fansi colors (see the fansi function for all possible color names):
default_display_key_color=lambda x123:fansi(x123,'cyan')
default_display_arrow_color=lambda x123:fansi(x123,'green')
default_display_value_color=lambda x123:fansi(x123,'blue')
def display_dict(d: dict,
key_color = default_display_key_color,
arrow_color = default_display_arrow_color,
value_color = default_display_value_color,
clip_width = False,
post_processor = identity,
key_sorter = sorted,
print_it = True,
arrow = " --> "
# arrow = " ⟶ "
) -> None:
"""
Made by Ryan Burgert for the purpose of visualizing large dictionaries.
EXAMPLE DISPLAY:
>>> display_dict({'name': 'Zed', 'age': 39, 'height': 6 * 12 + 2})
age ⟶ 39
height ⟶ 74
name ⟶ Zed
"""
# Of course, in the console you will see the appropriate colors for each section.
return (print if print_it else identity)((((lambda x:clip_string_width(x,max_wraps_per_line=2,clipped_suffix='………')) if clip_width else identity)(post_processor('\n'.join((key_color(key) + arrow_color(arrow) + value_color(d[key])) for key in key_sorter(d.keys())))))) # Theres a lot of code here because we're trying to make large amounts of text user-friendly in a terminal environment. Thats why this is so complicated and possibly perceived as messy
def display_list(l: list,
key_color = default_display_key_color,
arrow_color = default_display_arrow_color,
value_color = default_display_value_color,
print_it = True) -> None:
# also works with tuples etc
return display_dict(d=list_to_index_dict(l),key_color=key_color,arrow_color=arrow_color,value_color=value_color,print_it=print_it)
def display_markdown(markdown:str):
"""
Display markdown text in both Jupyter notebook and terminal environments.
markdown : str
Markdown text to display or path to a .md file
EXAMPLES:
>>> # Basic markdown elements
>>> display_markdown('''
... # Main Heading
... ## Secondary Heading
...
... **Bold text** and *italic text*
...
... - Bullet point 1
... - Bullet point 2
... - Nested bullet
...
... 1. Numbered item
... 2. Another numbered item
...
... > This is a blockquote
...
... [Link text](https://example.com)
...
... ---
...
... ```python
... def example_function():
... return "Code blocks work too!"
... ```
... ''')
>>> # Advanced markdown elements
>>> display_markdown('''
... ## Tables
...
... | Header 1 | Header 2 | Header 3 |
... |----------|----------|----------|
... | Value 1 | Value 2 | Value 3 |
... | Value 4 | Value 5 | Value 6 |
...
... ## Task Lists
...
... - [x] Completed task
... - [ ] Incomplete task
...
... ## Math Formulas (in Jupyter)
...
... Inline equation: $E = mc^2$
...
... Block equation:
...
... $$
... \\frac{d}{dx}e^x = e^x
... $$
...
... ## Diagrams (in some environments)
...
... ```mermaid
... graph TD;
... A-->B;
... A-->C;
... B-->D;
... C-->D;
... ```
... ''')
"""
if markdown.endswith(".md") and file_exists(markdown):
markdown = load_text_file(markdown)
if running_in_jupyter_notebook():
from IPython.display import display, Markdown
display(Markdown(markdown))
else:
pip_import("rich")
from rich import print
from rich.markdown import Markdown
print(Markdown(markdown))
def _get_carbon_url(code):
"""
Generate a Carbon URL to visualize code snippets with syntax highlighting.
code : str
The code to display
Returns str:
URL that can be opened in a browser to display the code
"""
import urllib.parse
import re
params = {
"code": code,
"l": "python",
"t": "monokai",
}
encoded_params = urllib.parse.urlencode(params)
return "https://carbon.now.sh/?"+str(encoded_params)
def display_code_cell(code, *, title="Code Cell", language=None):
"""
Print code cell with formatting, line numbers, and syntax highlighting.
In a terminal, it displays a clickable link to bring you to the source code copyable online via carbon.sh!
In Jupyter, it displays a custom HTML cell with a copy button and macOS-style window controls!
Parameters:
-----------
code : str
The code to display
title : str
The cell number to display in the title
language: str
If specified, can be like 'py' or 'md' or 'python3' or 'markdown' or 'JSX' or 'javascript' etc
If not specified, defaults to 'python3'
Right now, only python syntax highlighting is supported in the Jupyer version...though this could change.
EXAMPLE:
>>> display_code_cell(get_source_code(load_image))
"""
# IMPORTANT: Do not use f-strings in this function to maintain compatibility
code = code.rstrip() #We have to for the printer...
language = language or 'python3'
if not running_in_jupyter_notebook():
num_prefix = "%s│"
mln = number_of_lines(code)
mln = len(num_prefix % mln)
print(
"\n"
+ indentify(
" " * (mln - 1)
+ fansi(
"┌"
+ (title).center(
string_width(code) + 1,
"─",
),
"bold dark white white dark white",
link=_get_carbon_url(code),
)
+ "\n"
+ with_line_numbers(
fansi_pygments(code, language),
align=True,
prefix=fansi(num_prefix, "dark white white dark white"),
start_from=1,
),
" ",
)
+ "\n"
)
else:
from IPython.display import display, HTML, Javascript
import re
import uuid
import html
# First, make sure highlight.js is loaded (if it's not already)
display(
HTML(
"""
<script>
if (typeof hljs === 'undefined') {
var link = document.createElement('link');
link.rel = 'stylesheet';
link.href = 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.7.0/styles/monokai.min.css';
document.head.appendChild(link);
var script = document.createElement('script');
script.src = 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.7.0/highlight.min.js';
document.head.appendChild(script);
}
</script>
"""
)
)
# Split the code into lines for line numbers
code_lines = code.splitlines()
num_lines = len(code_lines)
line_num_width = len(str(num_lines))
# Escape HTML characters in the code
escaped_code = (
code.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;")
)
# Create HTML with line numbers and code content
line_numbered_code = []
for i, line in enumerate(code_lines, 1):
escaped_line = (
line.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;")
)
if not escaped_line: # Handle empty lines
escaped_line = " "
# Avoid f-strings
line_numbered_code.append(
'<div class="code-line">'
'<span class="line-number">' + str(i) + '</span>'
'<span class="code-content"></span>' # Empty content to be filled by JS
"</div>"
)
# Combine all lines
code_with_line_numbers = "\n".join(line_numbered_code)
# Store the original code in a data attribute (safely escaped)
code_for_attr = html.escape(code)
# Generate a unique ID for this code cell (avoid hyphens for JS compatibility)
cell_id = "codecell" + uuid.uuid4().hex[:8]
# Create style and HTML content using string formatting instead of f-strings
style_template = """
<style>
#{0} .code-cell-container {{
border: 1px solid #2d2d2d;
border-radius: 4px;
margin: 10px 0;
overflow: hidden;
font-size: 8pt;
font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', 'Consolas', 'source-code-pro', monospace;
}}
#{0} .code-cell-header {{
background-color: #272822;
color: #f8f8f2;
padding: 5px;
text-align: center;
font-weight: bold;
display: flex;
align-items: center;
position: relative;
}}
#{0} .window-controls {{
display: flex;
position: absolute;
left: 10px;
gap: 6px;
}}
#{0} .control-button {{
width: 12px;
height: 12px;
border-radius: 50%;
border: none;
cursor: pointer;
}}
#{0} .close-button {{
background-color: #ff5f56;
}}
#{0} .minimize-button {{
background-color: #ffbd2e;
}}
#{0} .control-button:hover {{
filter: brightness(90%);
}}
#{0} .title-text {{
flex: 1;
text-align: center;
}}
#{0} .copy-button, #{0} .hamburger-menu {{
background-color: #49483e;
border: none;
color: #f8f8f2;
padding: 3px 8px;
border-radius: 3px;
cursor: pointer;
font-size: 12px;
display: flex;
align-items: center;
gap: 5px;
transition: background-color 0.2s;
}}
#{0} .copy-button {{
position: absolute;
right: 50px;
}}
#{0} .hamburger-menu {{
position: absolute;
right: 10px;
padding: 3px 5px;
}}
#{0} .copy-button:hover, #{0} .hamburger-menu:hover {{
background-color: #75715e;
}}
#{0} .copy-icon, #{0} .hamburger-icon {{
width: 14px;
height: 14px;
fill: currentColor;
transition: transform 0.3s ease;
}}
#{0} .hamburger-icon.active {{
transform: rotate(90deg);
}}
#{0} .hamburger-icon rect {{
fill: currentColor;
transition: y 0.3s ease, transform 0.3s ease, opacity 0.3s ease;
}}
#{0} .secondary-menu {{
max-height: 0;
overflow: hidden;
background-color: #272822;
padding: 0 10px;
border-top: 1px solid #49483e;
display: flex;
justify-content: flex-end;
gap: 10px;
transition: max-height 0.3s ease, padding 0.3s ease, opacity 0.3s ease;
opacity: 0;
}}
#{0} .secondary-menu.visible {{
max-height: 40px;
padding: 5px 10px;
opacity: 1;
}}
#{0} .secondary-menu.closing {{
max-height: 0;
padding: 0 10px;
opacity: 0;
}}
#{0} .secondary-menu button {{
background-color: #49483e;
border: none;
color: #f8f8f2;
padding: 3px 8px;
border-radius: 3px;
cursor: pointer;
font-size: 12px;
transition: background-color 0.2s;
}}
#{0} #minimize-all-{0}:hover, #{0} #maximize-all-{0}:hover {{
background-color: #ffbd2e;
color: #272822;
}}
#{0} #close-all-{0}:hover {{
background-color: #ff5f56;
color: #272822;
}}
#{0} #minimize-all-{0}.active, #{0} #maximize-all-{0}.active {{
background-color: #ffbd2e;
color: #272822;
}}
#{0} #close-all-{0}.active {{
background-color: #ff5f56;
color: #272822;
}}
#{0} .code-cell-content {{
padding: 10px;
background-color: #272822;
overflow-x: auto;
line-height: 1.5;
color: #f8f8f2;
max-height: 2000px;
transition: all 0.4s cubic-bezier(0.19, 1, 0.22, 1);
transform-origin: top;
opacity: 1;
}}
#{0}.minimized .code-cell-content {{
max-height: 0;
padding-top: 0;
padding-bottom: 0;
opacity: 0.7;
transform: scaleY(0.01);
overflow: hidden;
}}
#{0} .code-line {{
display: flex;
white-space: pre;
min-height: 1.5em;
}}
#{0} .code-content {{
flex: 1;
padding-left: 0.5em;
white-space: pre;
}}
#{0} .line-number {{
color: #75715e !important;
border-right: 1px solid #49483e;
padding-right: 0.5em;
text-align: right;
user-select: none;
min-width: {1}ch;
display: inline-block;
}}
</style>
"""
content_template = """
<div id="{0}" data-original-code="{1}">
<div class="code-cell-container">
<div class="code-cell-header">
<div class="window-controls">
<button class="control-button close-button" id="close-button-{0}" title="Close"></button>
<button class="control-button minimize-button" id="minimize-button-{0}" title="Minimize"></button>
</div>
<div class="title-text">{2}</div>
<button class="copy-button" id="copy-button-{0}">
<svg class="copy-icon" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24">
<path d="M16 1H4c-1.1 0-2 .9-2 2v14h2V3h12V1zm3 4H8c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h11c1.1 0 2-.9 2-2V7c0-1.1-.9-2-2-2zm0 16H8V7h11v14z"/>
</svg>
Copy
</button>
<button class="hamburger-menu" id="hamburger-menu-{0}" title="More options">
<svg class="hamburger-icon" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 14 14">
<rect x="2" y="3" width="10" height="2" rx="1"/>
<rect x="2" y="6" width="10" height="2" rx="1"/>
<rect x="2" y="9" width="10" height="2" rx="1"/>
</svg>
</button>
</div>
<div class="secondary-menu" id="secondary-menu-{0}">
<button id="minimize-all-{0}">Minimize All</button>
<button id="maximize-all-{0}">Maximize All</button>
<button id="close-all-{0}">Close All</button>
</div>
<div class="code-cell-content">
<div style="display:none">
<pre><code class="python">{3}</code></pre>
</div>
<div class="line-numbers-code">
{4}
</div>
</div>
</div>
</div>
"""
script_template = """
<script>
(function() {{
// Setup copy button functionality
function setupCopyButton() {{
const copyButton = document.getElementById('copy-button-{0}');
if (copyButton) {{
copyButton.addEventListener('click', function() {{
try {{
// Get the original code directly from our data attribute
const codeContainer = document.getElementById('{0}');
const originalCode = codeContainer.getAttribute('data-original-code');
// Create a temporary textarea element to copy the text
const textarea = document.createElement('textarea');
textarea.value = originalCode;
textarea.setAttribute('readonly', '');
textarea.style.position = 'absolute';
textarea.style.left = '-9999px';
document.body.appendChild(textarea);
// Select the text and copy it
textarea.select();
document.execCommand('copy');
// Remove the textarea
document.body.removeChild(textarea);
// Update the button to show feedback
const originalText = copyButton.innerHTML;
copyButton.innerHTML = `
<svg class="copy-icon" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24">
<path d="M9 16.17L4.83 12l-1.42 1.41L9 19 21 7l-1.41-1.41L9 16.17z"/>
</svg>
Copied!
`;
copyButton.style.backgroundColor = '#a6e22e';
setTimeout(() => {{
copyButton.innerHTML = originalText;
copyButton.style.backgroundColor = '';
}}, 2000);
}} catch (err) {{
console.error('Failed to copy code: ', err);
alert('Failed to copy code to clipboard: ' + err.message);
}}
}});
}}
}}
// Setup the window control buttons
function setupWindowControls() {{
const closeButton = document.getElementById('close-button-{0}');
const minimizeButton = document.getElementById('minimize-button-{0}');
const codeContainer = document.getElementById('{0}');
if (closeButton) {{
closeButton.addEventListener('click', function() {{
// Hide the entire code cell
codeContainer.style.display = 'none';
}});
}}
if (minimizeButton) {{
minimizeButton.addEventListener('click', function() {{
// Toggle minimized class to show/hide content
const isMinimized = codeContainer.classList.toggle('minimized');
// Add visual indicator to the minimize button
if (isMinimized) {{
minimizeButton.style.boxShadow = 'inset 0 0 0 1px rgba(0, 0, 0, 0.3)';
minimizeButton.setAttribute('title', 'Maximize');
}} else {{
minimizeButton.style.boxShadow = '';
minimizeButton.setAttribute('title', 'Minimize');
// If maximizeing, scroll into view after animation completes
setTimeout(() => {{
const content = document.querySelector('#{0} .code-cell-content');
content.scrollIntoView({{ behavior: 'smooth', block: 'nearest' }});
}}, 400);
}}
}});
}}
}}
// Setup hamburger menu and secondary controls
function setupHamburgerMenu() {{
const hamburgerButton = document.getElementById('hamburger-menu-{0}');
const hamburgerIcon = hamburgerButton.querySelector('.hamburger-icon');
const secondaryMenu = document.getElementById('secondary-menu-{0}');
const minimizeAllButton = document.getElementById('minimize-all-{0}');
const maximizeAllButton = document.getElementById('maximize-all-{0}');
const closeAllButton = document.getElementById('close-all-{0}');
if (hamburgerButton && secondaryMenu) {{
// Toggle secondary menu visibility when hamburger is clicked
hamburgerButton.addEventListener('click', function() {{
// If menu is visible, start closing animation
if (secondaryMenu.classList.contains('visible')) {{
// Add closing class for animation
secondaryMenu.classList.add('closing');
secondaryMenu.classList.remove('visible');
// Reset the hamburger icon
hamburgerIcon.classList.remove('active');
hamburgerButton.style.backgroundColor = '';
// When animation completes, remove the closing class
setTimeout(() => {{
secondaryMenu.classList.remove('closing');
}}, 300); // Match the transition duration
}} else {{
// Show the menu
secondaryMenu.classList.add('visible');
secondaryMenu.classList.remove('closing');
// Animate the hamburger icon
hamburgerIcon.classList.add('active');
hamburgerButton.style.backgroundColor = '#75715e';
}}
}});
}}
// Secondary menu controls - Minimize All button
if (minimizeAllButton) {{
minimizeAllButton.addEventListener('click', function() {{
// Get all code cells on the page
const allCodeCells = document.querySelectorAll('[id^="codecell"]');
// Mark the button as active temporarily
minimizeAllButton.classList.add('active');
maximizeAllButton.classList.remove('active');
// Remove active state after a short delay
setTimeout(() => {{
minimizeAllButton.classList.remove('active');
}}, 1000);
// Minimize all cells
allCodeCells.forEach(cell => {{
// Minimize if not already minimized
if (!cell.classList.contains('minimized')) {{
cell.classList.add('minimized');
// Also update the minimize button's visual state
const minButton = document.querySelector('#' + cell.id + ' .minimize-button');
if (minButton) {{
minButton.style.boxShadow = 'inset 0 0 0 1px rgba(0, 0, 0, 0.3)';
minButton.setAttribute('title', 'Maximize');
}}
}}
}});
}});
}}
// Maximize All button
if (maximizeAllButton) {{
maximizeAllButton.addEventListener('click', function() {{
// Get all code cells on the page
const allCodeCells = document.querySelectorAll('[id^="codecell"]');
// Mark the button as active temporarily
maximizeAllButton.classList.add('active');
minimizeAllButton.classList.remove('active');
// Remove active state after a short delay
setTimeout(() => {{
maximizeAllButton.classList.remove('active');
}}, 1000);
// Maximize all cells
allCodeCells.forEach(cell => {{
// Maximize if minimized
if (cell.classList.contains('minimized')) {{
cell.classList.remove('minimized');
// Update the minimize button's visual state
const minButton = document.querySelector('#' + cell.id + ' .minimize-button');
if (minButton) {{
minButton.style.boxShadow = '';
minButton.setAttribute('title', 'Minimize');
}}
}}
}});
}});
}}
if (closeAllButton) {{
// Close all code cells
closeAllButton.addEventListener('click', function() {{
// Get all code cells
const allCodeCells = document.querySelectorAll('[id^="codecell"]');
// Hide all cells
allCodeCells.forEach(cell => {{
cell.style.display = 'none';
}});
}});
}}
}}
// Setup all interactive elements
setTimeout(() => {{
setupCopyButton();
setupWindowControls();
setupHamburgerMenu(); // Setup the new hamburger menu and secondary controls
}}, 300);
function applyHighlighting() {{
if (typeof hljs !== 'undefined') {{
try {{
// Get the code element and highlight it
const codeElement = document.querySelector('#{0} code');
hljs.highlightElement(codeElement);
// Get the highlighted code
const highlightedCode = codeElement.innerHTML;
// Process the highlighted code line by line
const processedLines = [];
let currentLine = '';
let inTag = false;
let tagContent = '';
// Parse the highlighted HTML to extract properly tagged lines
for (let i = 0; i < highlightedCode.length; i++) {{
const char = highlightedCode[i];
if (char === '<') {{
inTag = true;
tagContent = char;
}} else if (inTag && char === '>') {{
inTag = false;
tagContent += char;
currentLine += tagContent;
}} else if (inTag) {{
tagContent += char;
}} else if (char === '\\n') {{
processedLines.push(currentLine || ' ');
currentLine = '';
}} else {{
currentLine += char;
}}
}}
if (currentLine) {{
processedLines.push(currentLine);
}}
// Apply the highlighted lines to our line-numbered code
const codeContentElements = document.querySelectorAll('#{0} .code-content');
for (let i = 0; i < codeContentElements.length; i++) {{
if (i < processedLines.length) {{
codeContentElements[i].innerHTML = processedLines[i];
}} else {{
codeContentElements[i].innerHTML = ' ';
}}
}}
// Hide the original code block
document.querySelector('#{0} [style="display:none"]').style.display = 'none';
}} catch (e) {{
console.error('Error applying highlighting:', e);
}}
}} else {{
// If highlight.js is not loaded yet, wait and try again
setTimeout(applyHighlighting, 100);
}}
}}
// Start the highlighting process
setTimeout(applyHighlighting, 300);
}})();
</script>
"""
# Apply the style and content templates
style = style_template.format(cell_id, line_num_width + 1)
content = content_template.format(
cell_id,
code_for_attr,
title,
escaped_code,
code_with_line_numbers
)
script = script_template.format(cell_id)
# Combine all HTML parts
html_output = style + content + script
# Display the complete HTML
display(HTML(html_output))
# endregion
# region 'youtube_dl'﹣dependent methods: [rip_music,rip_info]
# noinspection SpellCheckingInspection
default_rip_music_output_filename="rip_music_temp"
def rip_music(URL: str,output_filename: str = default_rip_music_output_filename,desired_output_extension: str = 'wav',quiet=False):
"""
Ryan Burgert Jan 15 2017
Rips a music file off of streaming sites and downloads it to the default directory…
URL: Can take URL's from youtube, Vimeo, SoundCloud...apparently youtube_dl supports over 400 sites!!
output_filename: Shouldn't include an extension, though IDK if it would hurt. By default the output file is saved to the default directory.
desired_output_extension: Could be 'wav', or 'mp3', or 'ogg' etc. You have the freedom to choose the type of file you want to download regardless of the type of the original online file; it will be converted automatically (because youtube is a huge mess of file types)
NOTE: ‘brew install ffmpeg’ (run command in terminal) is necessary for some desired_output_extension types.
This method returns the name of the file it created.
Dependency: youtube_dl ﹙See: https://rg3.github.io/youtube-dl/﹚
Quiet: If this is true, then nothing will display on the console as this method downloads and converts the file.
NOTE: youtube_dl has MANY more cool capabilities such as extracting the title/author/cover picture of the songs…
…as well as breing able to download entire play-lists at once! youtube_dl can also rip videos; which could be very useful in another context!
EXAMPLE: play_sound_file_via_afplay(rip_music('https://www.youtube.com/watch?v=HcgEHrwdSO4'))
"""
pip_import('youtube_dl')
import youtube_dl
ydl_opts= \
{
'format':'bestaudio/best', # Basically, grab the highest quality that we can get.
'outtmpl':output_filename + ".%(ext)s", # https://github.com/rg3/youtube-dl/issues/7870 ⟵ Had to visit this because it kept corrupting the audio files: Now I know why! Don't change this line.
'postprocessors':
[{
'key':'FFmpegExtractAudio',
'preferredcodec':desired_output_extension,
# 'preferredquality': '192',
}],
'quiet':quiet, # If this is not enough, you can add a new parameter, 'verbose', to make it jabber even more. You can find these parameters in the documentation of the module that contains the 'YoutubeDL' method (used in a line below this one)
'noplaylist':True, # only download single song, not playlist
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([URL])
return output_filename + "." + desired_output_extension
def rip_info(URL: str):
"""
A companion method for rip_music, this will give you all the meta-data of each youtube video or vimeo or soundcloud etc.
It will give you this information in the form of a dictionary.
Known keys:
[abr,acodec,age_limit,alt_title,annotations,automatic_captions,average_rating,…
… categories,creator,description,dislike_count,display_id,duration,end_time,ext,…
… extractor,extractor_key,format,format_id,formats,fps,height,id,is_live,license,…
… like_count,playlist,playlist_index,requested_formats,requested_subtitles,resolution,…
… start_time,stretched_ratio,subtitles,tags,thumbnail,thumbnails,title,upload_date,…
… uploader,uploader_id,uploader_url,vbr,vcodec,view_count,webpage_url,webpage_url_basename,width]
"""
pip_import('youtube_dl')
from youtube_dl import YoutubeDL
return YoutubeDL().extract_info(URL,download=False)
# endregion
# region Sending and receiving emails: [send_gmail_email,gmail_inbox_summary,continuously_scan_gmail_inbox]
#This region is commented out because it's broken
## from rp.r_credentials import default_gmail_address # ⟵ The email address we will send emails from and whose inbox we will check in the methods below.
## from rp.r_credentials import default_gmail_password # ⟵ Please don't be an asshole: Don't steal this account! This is meant for free use!
# default_gmail_address=''
# default_gmail_password=''
# default_max_ↈ_emails=100 # ≣ _default_max_number_of_emails to go through in the gmail_inbox_summary method.
# def send_gmail_email(recipientⳆrecipients,subject: str = "",body: str = "",gmail_address: str = default_gmail_address,password: str = default_gmail_password,attachmentⳆattachments=None,shutup=False):
# # For attachmentⳆattachments, include either a single string or iterable of strings containing file paths that you'd like to upload and send.
# # param recipientⳆrecipients: Can be either a string or a list of strings: all the emails we will be sending this message to.
# # Heavily modified but originally from https://www.linkedin.com/pulse/python-script-send-email-attachment-using-your-gmail-account-singh
# from email.mime.text import MIMEText
# from email.mime.application import MIMEApplication
# from email.mime.multipart import MIMEMultipart
# import smtplib
# emaillist=[x.strip().split(',') for x in enlist(recipientⳆrecipients)]
# msg=MIMEMultipart()
# msg['Subject']=subject
# # msg['From']='[email protected]'# ⟵ I couldn't find any visible effect from keeping this active, so I decided to remove it.
# # msg['Reply-to']='[email protected]' # ⟵ I couldn't find any visible effect from keeping this active, so I decided to remove it.
# # msg.preamble='Multipart massage mushrooms.\n' # ⟵ I couldn't find any visible effect from keeping this active, so I decided to remove it.
# msg.attach(MIMEText(body))
# if attachmentⳆattachments:
# for filename in enlist(attachmentⳆattachments):
# assert isinstance(filename,str) # These should be file paths.
# part=MIMEApplication(open(filename,"rb").read())
# part.add_header('Content-Disposition','attachment',filename=filename) # ⟵ I tested getting rid of this line. If you get rid of the line, it simply lists the attachment as a file on the bottom of the email, …
# # … and wouldn't show (for example) an image. With it, though, the image is displayed. Also, for files it really can't display (like .py files), it will simply act as if this line weren't here and won't cause any sort of error.
# msg.attach(part)
# try:
# with smtplib.SMTP("smtp.gmail.com:587") as server:
# server.ehlo()
# server.starttls()
# server.login(gmail_address,password)
# server.sendmail(gmail_address,emaillist,msg.as_string())
# server.close()
# if not shutup:
# print('r.send_gmail_email: successfully sent your email to ' + str(recipientⳆrecipients))
# except Exception as E:
# if not shutup:
# print('r.send_gmail_email: failed to send your email to ' + str(recipientⳆrecipients) + ". Error message: " + str(E))
# # region Old version of send_gmail_email (doesn't support attachments):
# """def send_gmail_email(recipientⳆrecipients, subject:str="", body:str="",gmail_address:str=default_gmail_address,password:str=default_gmail_password,shutup=False):
# # param recipientⳆrecipients: Can be either a string or a list of strings: all the emails we will be sending this message to.
# import smtplib
# FROM = gmail_address
# TO = enlist(recipientⳆrecipients)# Original code: recipient if type(recipient) is list else [recipient]
# SUBJECT = subject
# TEXT = body
# # Prepare actual message
# message = "From: %s\nTo: %s\nSubject: %s\n\n%s\n" % (FROM, ", ".join(TO), SUBJECT, TEXT)
# try:
# server = smtplib.SMTP("smtp.gmail.com", 587)
# server.ehlo()
# server.starttls()
# server.login(gmail_address, password)
# server.sendmail(FROM, TO, message)
# server.close()
# if not shutup:
# print('r: send_gmail_email: successfully sent the mail')
# except:
# if not shutup:
# print( "r: send_gmail_email: failed to send mail")"""
# # endregion
# def gmail_inbox_summary(gmail_address: str = default_gmail_address,password: str = default_gmail_password,max_ↈ_emails: int = default_max_ↈ_emails,just_unread_emails: bool = True):
# # Parameters captured in this summary include the fields (for the dicts in the output list) of
# # TODO[millis,sender,receiver,subject,sender_email,sender_name] (Just using a TODO so that it's a different color in the code so it stands out more) (all accessed as strings, of course)
# # returns a list of dictionaries. The length of this list ﹦ the number of emails in the inbox (both read and unread).
# # max_ↈ_emails ≣ max_number_of_emails --> caps the number of emails in the summary, starting with the most recent ones.
# '''Example output:
# [{'sender_email': '[email protected]', 'sender': '"Richard McKenna" <[email protected]>', 'millis': 1484416777000, 'sender_name': '"Richard McKenna"', 'subject': '[Stony Brook Computing Society] 10 games in 10 days. Today\'s game is "Purple...', 'receiver': 'Stony Brook Computing Society <[email protected]>'},
# {'sender_email': '[email protected]', 'sender': '"Richard McKenna" <[email protected]>', 'millis': 1484368779000, 'sender_name': '"Richard McKenna"', 'subject': '[Stony Brook Game Developers (SBGD)] New link', 'receiver': '"Stony Brook Game Developers (SBGD)" <[email protected]>'},
# {'sender_email': '[email protected]', 'sender': 'Google <[email protected]>', 'millis': 1484366367000, 'sender_name': 'Google', 'subject': 'New sign-in from Safari on iPhone', 'receiver': '[email protected]'},
# {'sender_email': '[email protected]', 'sender': '"Richard McKenna" <[email protected]>', 'millis': 1484271805000, 'sender_name': '"Richard McKenna"', 'subject': '[Stony Brook Computing Society] 10 games in 10 days. Today\'s game is "Jet LIfe"....', 'receiver': 'Stony Brook Computing Society <[email protected]>'},
# {'sender_email': '[email protected]', 'sender': 'imitone sales <[email protected]>', 'millis': 1484240836000, 'sender_name': 'imitone sales', 'subject': 'A new version of imitone is available!', 'receiver': '[email protected]'}]'''
# # The following code I got of the web somewhere and modified a lot, I don't remember where though. Whatevs.
# import datetime
# import email
# import imaplib
# with imaplib.IMAP4_SSL('imap.gmail.com') as mail:
# # ptoc()
# mail.login(gmail_address,password)
# # ptoc()
# mail.list()
# # ptoc()
# mail.select('inbox')
# # ptoc()
# result,data=mail.uid('search',None,"UNSEEN" if just_unread_emails else "ALL") # (ALL/UNSEEN)
# # ptoc()
# email_summaries=[] # A list of dictionaries. Will be added to in the for loop shown below.
# ↈ_emails=len(data[0].split())
# for x in list(reversed(range(ↈ_emails)))[:min(ↈ_emails,max_ↈ_emails)]:
# latest_email_uid=data[0].split()[x]
# result,email_data=mail.uid('fetch',latest_email_uid,'(RFC822)')
# # result, email_data = conn.store(num,'-FLAGS','\\Seen')
# # this might work to set flag to seen, if it doesn't already
# raw_email=email_data[0][1]
# raw_email_string=raw_email.decode('utf-8')
# email_message=email.message_from_string(raw_email_string)
# # Header Details
# date_tuple=email.utils.parsedate_tz(email_message['Date'])
# if date_tuple:
# local_date=datetime.datetime.fromtimestamp(email.utils.mktime_tz(date_tuple))
# # local_message_date=local_date.ctime()# formats the date in a nice readable way
# local_message_date=local_date.timestamp() # Gets seconds since 1970
# local_message_date=int(1000 * local_message_date) # millis since 1970
# email_from=str(email.header.make_header(email.header.decode_header(email_message['From'])))
# email_to=str(email.header.make_header(email.header.decode_header(email_message['To'])))
# subject=str(email.header.make_header(email.header.decode_header(email_message['Subject'])))
# # noinspection PyUnboundLocalVariable
# email_summaries.append(dict(millis=local_message_date,sender=email_from,receiver=email_to,subject=subject,sender_email=email_from[1 + email_from.find('<'):-1] if '<' in email_from else email_from,sender_name=email_from[:email_from.find('<') - 1]))
# # print('\n'.join(map(str,email_summaries)))//⟵Would display all email summaries in console
# return email_summaries
# def _default_what_to_do_with_unread_emails(x):
# # An arbitrary default as an example example so that 'continuously_scan_gmail_inbox' can be run with no arguments
# # Example: continuously_scan_gmail_inbox()
# # By default, the continuous email scan will print out the emails and also read their subjects aloud via text-to-speech. (Assumes you're using a mac for that part).
# print(x)
# text_to_speech_via_apple(x['subject'],run_as_thread=False)
# send_gmail_email(x['sender_email'],'EMAIL RECEIVED: ' + x['subject'])
# def continuously_scan_gmail_inbox(what_to_do_with_unread_emails: callable = _default_what_to_do_with_unread_emails,gmail_address: str = default_gmail_address,password: str = default_gmail_password,max_ↈ_emails: int = default_max_ↈ_emails,include_old_but_unread_emails: bool = False):
# # returns a new thread that is ran constantly unless you kill it. It will constantly scan the subjects of all emails received
# # …AFTER the thread has been started. When it received a new email, it will run the summary of that email through the
# # …'what_to_do_with_unread_emails' method, as a triggered event. It returns the thread it's running on so you can do stuff with it later on.
# # …Unfortunately, I don't know how to make it stop though...
# # include_old_but_unread_emails: If this is false, we ignore any emails that were sent before this method was called. Otherwise, if include_old_but_unread_emails is true, …
# # …we look at all emails in the inbox (note: this is only allowed to be used in this context because python marks emails as 'read' when it accesses them, …
# # …and we hard-code just_unread_emails=True in this method so thfat we never read an email twice.)
# return run_as_new_thread(_continuously_scan_gmail_inbox,what_to_do_with_unread_emails,gmail_address,password,max_ↈ_emails,include_old_but_unread_emails)
# def _continuously_scan_gmail_inbox(what_to_do_with_unread_emails,gmail_address,password,max_ↈ_emails,include_old_but_unread_emails):
# # This is a helper method because it loops infinitely and is therefore run on a new thread each time.
# exclusive_millis_min=millis()
# # times=[] # ⟵ For debugging. Look at the end of the while loop block to see more.
# while True:
# tic()
# # max_millis=exclusive_millis_min
# for x in gmail_inbox_summary(gmail_address,password,max_ↈ_emails):
# assert isinstance(x,dict) # x's type is determined by gmail_inbox_summary, which is a blackbox that returns dicts. This assertion is for type-hinting.
# if x['millis'] > exclusive_millis_min or include_old_but_unread_emails:
# # if x['millis']>max_millis:
# # max_millis=x['millis']
# what_to_do_with_unread_emails(x)
# # exclusive_millis_min=max_millis
# # times.append(toc())
# # line_graph(times)
# # ptoctic()# UPDATE: It's fine. Original (disproved) thought ﹦ (I don't know why, but the time here just keeps growing and growing...)
# endregion
# region Suppress/Restore all console output/warnings: [suppress_console_output,restore_console_output,force_suppress_console_output,force_restore_console_output,force_suppress_warnings,force_restore_warnings]
# b=sys.stdout.write;sys.stdout.write=None;sys.stdout.write=b
_original_stdout_write=sys.stdout.write # ⟵ DO NOT ALTER THIS! It will cause your code to crash.
def _muted_stdout_write(x: str):
assert isinstance(x,str) # ⟵ The original method only accepts strings.
return len(x) # ⟵ The original method returns the length of the string; I don't know why. '
_console_output_level=1
def suppress_console_output(): # Will turn off ALL console output until restore_console_output() is called.
global _console_output_level
_console_output_level-=1
if _console_output_level < 1:
sys.stdout.write=_muted_stdout_write
def restore_console_output(): # The antidote for suppress_console_output
global _console_output_level
_console_output_level+=1
if _console_output_level >= 1:
sys.stdout.write=_original_stdout_write
def force_suppress_console_output(): # Will turn off ALL console output until restore_console_output() is called.
global _console_output_level
_console_output_level=0
sys.stdout.write=_muted_stdout_write
def force_restore_console_output():
global _console_output_level
_console_output_level=1
sys.stdout.write=_original_stdout_write
def force_suppress_warnings():
warnings.filterwarnings("ignore")
def force_restore_warnings():
warnings.filterwarnings("default")
def TemporarilySuppressConsoleOutput():
return TemporarilySetAttr(sys.stdout, write=_muted_stdout_write)
# def toggle_console_output ⟵ I was going to implement this, but then decided against it: it could get really annoying/confusing if used often.
# endregion
# region Ryan's Inspector: [rinsp]
#def get_bytecode(obj):
# #Commented this function out because it's broken, even though it's a good idea
# import dis
# return dis.Bytecode(lambda x:x + 1).dis()
#def format_date(date)->str:
# """
# This function formats datetimes the way I personally like to read them.
#
# EXAMPLE:
# >>> get_current_date()
# ans = 2023-08-22 14:06:01.764838
# >>> format_date(ans)
# ans = Tue Aug 22, 2023 at 2:06:01PM
#
# TODO: In the future, only if I want to, I'll add another argument to let you customize the date string. But I really like this format lol
# """
# import datetime
# assert isinstance(date,datetime.datetime)
#
# assert isinstance(date, datetime.datetime), "Input must be a datetime object"
#
# # Format the date string and append the timezone abbreviation
# formatted_date = date.strftime('%a %b %d, %Y at %-I:%M:%S%p')
#
# if date.tzinfo is not None:
# #If the date has a timezone, add it to the output
# formatted_date += ' ' + date.tzname() #PST, EST, Etc
#
# return formatted_date
_timezone_translations = {
# North America
"PST": "America/Los_Angeles", # Pacific Standard Time
"PDT": "America/Los_Angeles", # Pacific Daylight Time
"PT" : "America/Los_Angeles", # Pacific Time
"MST": "America/Denver", # Mountain Standard Time
"MDT": "America/Denver", # Mountain Daylight Time
"MT" : "America/Denver", # Mountain Time
"CST": "America/Chicago", # Central Standard Time
"CDT": "America/Chicago", # Central Daylight Time
"CT" : "America/Chicago", # Central Time
"EST": "America/New_York", # Eastern Standard Time
"EDT": "America/New_York", # Eastern Daylight Time
"ET" : "America/New_York", # Eastern Time
"HST": "Pacific/Honolulu", # Hawaii Standard Time
"AKST": "America/Anchorage", # Alaska Standard Time
"AKDT": "America/Anchorage", # Alaska Daylight Time
"AST": "America/Puerto_Rico", # Atlantic Standard Time
"ADT": "America/Halifax", # Atlantic Daylight Time
"UTC": "Etc/UTC", # Coordinated Universal Time
"AOE": "Etc/UTC", # Anywhere on Earth
# Asia
"IST": "Asia/Kolkata", # Indian Standard Time
"CST": "Asia/Shanghai", # China Standard Time
"JST": "Asia/Tokyo", # Japan Standard Time
"KST": "Asia/Seoul", # Korea Standard Time
"IDT": "Asia/Jerusalem", # Israel Daylight Time
"IST": "Asia/Jerusalem", # Israel Standard Time
# Europe
"BST": "Europe/London", # British Summer Time
"GMT": "Europe/London", # Greenwich Mean Time
"CET": "Europe/Berlin", # Central European Time
"CEST": "Europe/Berlin", # Central European Summer Time
"EET": "Europe/Athens", # Eastern European Time
"EEST": "Europe/Athens", # Eastern European Summer Time
"MSK": "Europe/Moscow", # Moscow Standard Time
# Australia
"AEST": "Australia/Sydney", # Australian Eastern Standard Time
"AEDT": "Australia/Sydney", # Australian Eastern Daylight Time
"ACST": "Australia/Adelaide", # Australian Central Standard Time
"ACDT": "Australia/Adelaide", # Australian Central Daylight Time
"AWST": "Australia/Perth", # Australian Western Standard Time
# More of North America
"NST": "America/St_Johns", # Newfoundland Standard Time
"NDT": "America/St_Johns", # Newfoundland Daylight Time
# Central and South America
"ART": "America/Buenos_Aires", # Argentina Time
"BRT": "America/Sao_Paulo", # Brasilia Time
"BRST": "America/Sao_Paulo", # Brasilia Summer Time
"CLT": "America/Santiago", # Chile Standard Time
"CLST": "America/Santiago", # Chile Summer Time
"COT": "America/Bogota", # Colombia Time
# More of Europe
"WET": "Europe/Lisbon", # Western European Time
"WEST": "Europe/Lisbon", # Western European Summer Time
"IST": "Europe/Dublin", # Irish Standard Time
# Africa
"EAT": "Africa/Nairobi", # East Africa Time
"CAT": "Africa/Harare", # Central Africa Time
"WAT": "Africa/Lagos", # West Africa Time
"WAST": "Africa/Windhoek", # West Africa Summer Time
"SAST": "Africa/Johannesburg", # South Africa Standard Time
# More of Asia
"SGT": "Asia/Singapore", # Singapore Time
"HKT": "Asia/Hong_Kong", # Hong Kong Time
"MYT": "Asia/Kuala_Lumpur", # Malaysia Time
"WIT": "Asia/Jakarta", # Western Indonesia Time
"PHT": "Asia/Manila", # Philippine Time
"THA": "Asia/Bangkok", # Thailand Standard Time
# Middle East
"AST": "Asia/Riyadh", # Arabian Standard Time
"GST": "Asia/Dubai", # Gulf Standard Time
# Pacific
"NZST": "Pacific/Auckland", # New Zealand Standard Time
"NZDT": "Pacific/Auckland", # New Zealand Daylight Time
"FJT": "Pacific/Fiji", # Fiji Time
"FJST": "Pacific/Fiji", # Fiji Summer Time
"TOT": "Pacific/Tongatapu", # Tonga Time
"CHAST": "Pacific/Chatham", # Chatham Standard Time
"CHADT": "Pacific/Chatham", # Chatham Daylight Time
"LINT": "Pacific/Kiritimati", # Line Islands Time
# GMT Offsets
"GMT-11": "Etc/GMT+11",
"GMT-10": "Etc/GMT+10",
"GMT-9" : "Etc/GMT+9",
"GMT-8" : "Etc/GMT+8",
"GMT-7" : "Etc/GMT+7",
"GMT-6" : "Etc/GMT+6",
"GMT-5" : "Etc/GMT+5",
"GMT-4" : "Etc/GMT+4",
"GMT-3" : "Etc/GMT+3",
"GMT-2" : "Etc/GMT+2",
"GMT-1" : "Etc/GMT+1",
"GMT+1" : "Etc/GMT-1",
"GMT+2" : "Etc/GMT-2",
"GMT+3" : "Etc/GMT-3",
"GMT+4" : "Etc/GMT-4",
"GMT+5" : "Etc/GMT-5",
"GMT+6" : "Etc/GMT-6",
"GMT+7" : "Etc/GMT-7",
"GMT+8" : "Etc/GMT-8",
"GMT+9" : "Etc/GMT-9",
"GMT+10": "Etc/GMT-10",
"GMT+11": "Etc/GMT-11",
"GMT+12": "Etc/GMT-12",
}
def _translate_timezone(x):
assert isinstance(x, str)
x = x.upper()
if not x in _timezone_translations:
raise KeyError(
"Invalid timezone string: "
+ repr(x)
+ ". Please choose from: "
+ ", ".join(_timezone_translations)
)
return _timezone_translations[x]
_default_timezone=None
def format_date(date, timezone=None, *, align=False):
"""
EXAMPLES:
>>> get_current_date()
ans = 2024-07-11 01:12:59.200330
>>> print(type(ans)) ––> <class 'datetime.datetime'>
>>> print(format_date(ans)) ––> Thu Jul 11, 2024 at 1:12:59AM
>>> print(format_date(ans,'pdt')) ––> Thu Jul 11, 2024 at 1:12:59AM
>>> print(format_date(ans,'cdt')) ––> Thu Jul 11, 2024 at 3:12:59PM CDT
>>> print(format_date(ans,'edt')) ––> Thu Jul 11, 2024 at 4:12:59AM EDT
>>> print(format_date(ans,'aoe')) ––> Thu Jul 11, 2024 at 8:12:59AM UTC
>>> print(format_date(ans,'')) ––> Thu Jul 11, 2024 at 1:12:59AM
>>> print(format_date(ans,None)) ––> Thu Jul 11, 2024 at 1:12:59AM
>>> r._default_timezone='UTC'
>>> print(format_date(ans,None)) ––> Thu Jul 11, 2024 at 8:12:59AM UTC
>>> print(format_date(ans,'')) ––> Thu Jul 11, 2024 at 8:12:59AM
ALIGN IS SO YOU CAN DO THINGS LIKE THIS:
35: b06bb58256 [[Fri Feb 28, 2025 at 11:55:05PM]] Ficx eta
34: 04cf83f8d2 [[Sat Mar 01, 2025 at 12:37:04AM]] Fixes and get_nested_attr
33: aedd490a14 [[Sat Mar 01, 2025 at 1:11:26AM]] ETA better!
32: 710eca8bf6 [[Sat Mar 01, 2025 at 2:15:10AM]] Updoot
"""
import datetime
if timezone is None: timezone = _default_timezone
if timezone:
pip_import('pytz')
import pytz
assert isinstance(date, datetime.datetime), "Input must be a datetime object"
# Convert to the desired timezone if specified
if timezone:
target_timezone = pytz.timezone(_translate_timezone(timezone))
date = date.astimezone(target_timezone)
# Manually format the hour to remove leading zeros for cross-platform compatibility
hour = date.hour % 12
hour = hour if hour else 12 # Convert 0 hour to 12 for 12-hour clock
minute = date.minute
second = date.second
am_pm = "AM" if date.hour < 12 else "PM"
# Use platform-independent formatting for the rest of the date
date_part = date.strftime("%a %b %d, %Y")
time_part = "{hour}:{minute:02d}:{second:02d}{am_pm}".format(
hour=hour, minute=minute, second=second, am_pm=am_pm
)
if align:
time_part = time_part.rjust(len(" 2:32:45AM"))
formatted_date = "{date_part} at {time_part}".format(
date_part=date_part, time_part=time_part
)
if date.tzinfo is not None:
# If the date has a timezone, add it to the output
timezone_str = date.strftime("%Z")
formatted_date += " " + timezone_str
return formatted_date
def format_current_date(timezone=None):
"""
EXAMPLES:
>>> format_current_date()#I'm in California
ans = "Thu Jun 27, 2024 at 8:15:24PM"
>>> format_current_date('PST')
ans = "Thu Jun 27, 2024 at 8:15:29PM PDT"
>>> format_current_date('EST')
ans = "Thu Jun 27, 2024 at 11:15:31PM EDT"
"""
# TODO: See format_date todo
return format_date(get_current_date(), timezone=timezone)
_format_datetime = format_date #For compatiability - older code used rp.r._format_datetime
def get_current_timezone():
"""
EXAMPLE:
>>> get_current_timezone()
ans = PST
"""
if _default_timezone is not None:
# If we manually set an override in rprc...
return _default_timezone
date = get_current_date()
target = format_date(date, "")
for timezone in _timezone_translations:
result = format_date(date, timezone)
if result.startswith(target):
return timezone
_rinsp_temp_object=None
_builtin_print=print
def rinsp(object,search_or_show_documentation:bool=False,show_source_code:bool=False,show_summary: bool = False,max_str_lines: int = 5,*,fansi=fansi) -> None: # r.inspect
# This method is really uglily written (by Cthulu Himself, would ya believe!) because I made no attempt to refactor it. But it works and its really useful.
# search_or_show_documentation: If this is a string, it won't show documentation UNLESS show_source_code ⋁ show_summary. BUT it will limit dir⋃dict to entries that contain search_or_show_documentation. Used for looking up that function name you forgot.
printed_lines=[]
def print(*x,end='\n',flush=False):
out=' '.join(map(str,x))+end
printed_lines.append(out)
_builtin_print(end=out,flush=flush)
"""
rinsp report (aka Ryan's Inspection):
OBJECT: rinsp(object, show_source_code=False, max_str_lines:int=5)
TYPE: class 'function'
FILE: module '__main__' from '/Users/Ryan/PycharmProjects/RyanBStandards_Python3.5/r.py'
STR: <function rinsp at 0x109eb10d0>"""
search_filter=isinstance(search_or_show_documentation,str) and search_or_show_documentation or ''
if search_filter:
search_or_show_documentation=False or show_source_code or show_summary
import inspect as i
def linerino(x,prefix_length=0):
max_string_length=max(0,max_str_lines*get_terminal_width()-prefix_length)
number_of_lines=x.count("\n") + 1
if len(x)<=max_string_length:
new_x='\n'.join(x.split('\n')[:max_str_lines])
continuation=fansi("\n" + tab + "\t………continues for " +str(number_of_lines - max_str_lines) + " more lines and "+str(len(x)- len(new_x)) + " more characters………",colour)
return new_x + (continuation if (number_of_lines > max_str_lines + 1) else "") # max_str_lines+1 instead of just max_str_lines so we dont get '………continues for 1 more lines………'
else:
new_x=x[:max_string_length]
continuation=fansi("\n" + tab + "\t………continues for " +str(number_of_lines - new_x.count('\n')-1) + " more lines and "+str(len(x)-max_string_length) + " more characters………",colour)
return new_x + continuation
tab=' '
colour='cyan'
col=lambda x:fansi(x,colour,'bold')
# _=col('rinsp report (aka Ryan\'s Inspection):')
_=col('rinsp report (aka Ryan\'s Inspection):')
print(_)
if True:
#Display ENTRIES
temp=object
try: # noinspection PyStatementEffect
object.__dict__
print(col(tab + "ENTRIES: "),end="",flush=False)
# print(col(tab + "DIR⋃DICT: "),end="",flush=False) # <---- ORIGINAL CODE
except:
temp=type(object)
print(col(tab + "ENTRIES: "),end="",flush=False) # If we can't get the dict of (let's say) a numpy array, we get the dict of it's type which gives all its parameters' names, albeit just their defgault values.
# print(col(tab + "DIR⋃TYPE.DICT: "),end="",flush=False) # If we can't get the dict of (let's say) a numpy array, we get the dict of it's type which gives all its parameters' names, albeit just their defgault values. # <---- ORIGINAL CODE
dict_used=set(temp.__dict__)
dict_used=dict_used.union(set(dir(object)))
d=dict_used
if search_filter:
print(fansi(tab + "FILTERED: ",'yellow','bold'),end="",flush=False)
d={B for B in d if search_filter in B}
def sorty(d):
A=sorted([x for x in d if x.startswith("__") and x.endswith("__")]) # Moving all built-ins and private variables to the end of the list
B=sorted([x for x in d if x.startswith("_") and not x.startswith("__") and not x.endswith("__")])
C=sorted(list(set(d) - set(A) - set(B)))
return C + B + A
dict_used=sorty(d)
if len(dict_used) != 0:
global _rinsp_temp_object
_rinsp_temp_object=object
attrs={}
for attrname in dict_used:
try:
attrs[attrname]=(eval('_rinsp_temp_object.' + attrname))
except:
attrs[attrname]=(fansi("ERROR: Cannot evaluate",'red'))
def color(attr):
try:
attr=eval('_rinsp_temp_object.' + attr) # callable(object.__dir__.__get__(attr))
except:
return ('red',None)
if callable(attr):
return ('green', ) # Green if callable
def is_module(x):
import types
return isinstance(x,types.ModuleType)
if is_module(attr):
return ('blue',)
return [None] # Plain and boring if else
dict_used_with_callables_highlighted_green=[fansi(x,*color(x)) for x in dict_used]
print_string=(str(len(dict_used)) + ' things: [' + ', '.join(dict_used_with_callables_highlighted_green) + "]") # Removes all quotes in the list so you can rad ) +" ⨀ ⨀ ⨀ "+str(dict_used).replace("\n","\\n"))
print_string=print_string.replace('\x1b[0m','')#Make rendering large amounts of commas etc faster (switching between formats seems to make terminal rendering slow and even crashes windows)
if currently_running_windows():
print_string=strip_ansi_escapes(print_string)#This is to prevent really slowwww crashes on windows cause windows sucks lol
print(end=print_string)
print()
else:
print(end="\r") # Erase the previous line (aka "DICT: " or "TYPE.DICT: ")
str_on_top=True
if str_on_top: #ENABLE THIS REGION TO PRINT 'STR:' on the TOP instead of the bottom
try:
# GETTING CHARACTER FOR TEMP
def is_module(x):
import types
return isinstance(x,types.ModuleType)
if not is_module(object):
prefix=tab + "STR: "
print((col(prefix) + linerino(str(object),len(prefix))))
except:
pass
# try:
# # GETTING CHARACTER FOR TEMP
# def is_module(x):
# import types
# return isinstance(x,types.ModuleType)
# if not is_module(object):
# print(end=col(tab + "STR: ") + linerino(str(object)))
# print()
# except Exception as e:
# # print_verbose_stack_trace(e)
# pass
if False:
pass
# _=col(tab + 'OBJECT: ')
# ⵁ_errored=False
# try:
# _+=object.__name__
# except Exception as e:
# _+='[cannot obtain object.__name__ without error: ' + str(e) + ']'
# ⵁ_errored=True
# try:
# _+=str(i.signature(object))
# except:
# pass
# if not ⵁ_errored and _.strip():
# # print()
# print(end=_)
# print()
try:
temp=object
from types import ModuleType
neednewline=False
try:
print(col(tab+"LEN: ")+str(len(object)),end=' ')
if isinstance(object,bytes) or isinstance(object,str):
print(col('aka '+human_readable_file_size(len(object))),end=' ')
neednewline=True
except Exception:pass
if isinstance(object,str):
print(col(tab + "LINES: ")+repr(number_of_lines(object)),flush=False,end='')
if hasattr(object,'shape'):
print(col(tab + "SHAPE: ")+repr(object.shape),flush=False,end='')
neednewline=True
if hasattr(object,'dtype'):
print(col(tab + "DTYPE: ")+repr(object.dtype),flush=False,end='')
if is_numpy_array(object):
print(col(tab + "MEMORY: ")+human_readable_file_size(object.size*object.dtype.itemsize),flush=False,end='')
if is_torch_tensor(object):
print(col(tab + "MEMORY: ")+human_readable_file_size(object.numel()*object.itemsize),flush=False,end='')
if hasattr(object,'device'):
try: print(col(tab + "DEVICE: ")+str(object.device),flush=False,end='')
except Exception: pass
if hasattr(object,'min') and callable(object.min):
try: print(col(tab + "min:")+" %.6f"%(object.min()),flush=False,end='')
except Exception: pass
if hasattr(object,'max') and callable(object.max):
try: print(col(tab + "max:")+" %.6f"%(object.max()),flush=False,end='')
except Exception: pass
if hasattr(object,'mean') and callable(object.mean):
try: print(col(tab + "mean:")+" %.6f"%(object.mean()),flush=False,end='')
except Exception: pass
if neednewline:
print(flush=False)
if isinstance(object,ModuleType):
submodulenames=[x.split('.')[-1] for x in get_all_submodule_names(object)]
if submodulenames:
print(col(tab + "SUBMODULES: ")+(', '.join(submodulenames)),end="\n",flush=False) # If we can't get the dict of (let's say) a numpy array, we get the dict of it's type which gives all its parameters' names, albeit just their defgault values.
if hasattr(object,'__version__'):
print(col(tab + "VERSION: ")+str(object.__version__),end="\n",flush=False) # If we can't get the dict of (let's say) a numpy array, we get the dict of it's type which gives all its parameters' names, albeit just their defgault values.
# try: # noinspection PyStatementEffect
# object.__dict__
# print(col(tab + "ENTRIES: "),end="",flush=False)
# # print(col(tab + "DIR⋃DICT: "),end="",flush=False) # <---- ORIGINAL CODE
# except:
# temp=type(object)
# print(col(tab + "ENTRIES: "),end="",flush=False) # If we can't get the dict of (let's say) a numpy array, we get the dict of it's type which gives all its parameters' names, albeit just their defgault values.
# # print(col(tab + "DIR⋃TYPE.DICT: "),end="",flush=False) # If we can't get the dict of (let's say) a numpy array, we get the dict of it's type which gives all its parameters' names, albeit just their defgault values. # <---- ORIGINAL CODE
# dict_used=set(temp.__dict__)
# dict_used=dict_used.union(set(dir(object)))
# d=dict_used
# if search_filter:
# print(fansi(tab + "FILTERED: ",'yellow','bold'),end="",flush=False)
# d={B for B in d if search_filter in B}
# def sorty(d):
# A=sorted([x for x in d if x.startswith("__") and x.endswith("__")]) # Moving all built-ins and private variables to the end of the list
# B=sorted([x for x in d if x.startswith("_") and not x.startswith("__") and not x.endswith("__")])
# C=sorted(list(set(d) - set(A) - set(B)))
# return C + B + A
# dict_used=sorty(d)
# if len(dict_used) != 0:
# global _rinsp_temp_object
# _rinsp_temp_object=object
# attrs={}
# for attrname in dict_used:
# try:
# attrs[attrname]=(eval('_rinsp_temp_object.' + attrname))
# except:
# attrs[attrname]=(fansi("ERROR: Cannot evaluate",'red'))
# def color(attr):
# try:
# attr=eval('_rinsp_temp_object.' + attr) # callable(object.__dir__.__get__(attr))
# except:
# return 'red',None
# if callable(attr):
# return 'green', # Green if callable
# return [None] # Plain and boring if else
# dict_used_with_callables_highlighted_green=[fansi(x,*color(x)) for x in dict_used]
# print_string=(str(len(dict_used)) + ' things: [' + ', '.join(dict_used_with_callables_highlighted_green) + "]") # Removes all quotes in the list so you can rad ) +" ⨀ ⨀ ⨀ "+str(dict_used).replace("\n","\\n"))
# print_string=print_string.replace('\x1b[0m','')#Make rendering large amounts of commas etc faster (switching between formats seems to make terminal rendering slow and even crashes windows)
# if currently_running_windows():
# print_string=strip_ansi_escapes(print_string)#This is to prevent really slowwww crashes on windows cause windows sucks lol
# print(print_string)
# else:
# print(end="\r") # Erase the previous line (aka "DICT: " or "TYPE.DICT: ")
except:
pass
def parent_class_names(x,exclude={'object'}):
#returns a set of strings containing the names of x's parent classes, exclu
if not isinstance(x,type):
x=type(x)
return {y.__name__ for y in x.__bases__}-exclude
parents=parent_class_names(object)
parent_string=''
if parents:
prefix='PARENT'
if len(parents)>1:
prefix+='S'
parent_string=col(', '+prefix+': ')+', '.join(sorted(parents))
def get_full_class_name(class_object):
out=repr(class_object)
if out.startswith('<class \'') and out.endswith("'>"):
return out[len('<class \''):-len("'>")]
return class_object.__name__
def get_parent_hierarchy(object):
from collections import OrderedDict
# out=OrderedDict()
out={}
if not isinstance(object,type):
object=object.__class__
for parent in object.__bases__:
# out[parent.__name__]=get_parent_hierarchy(parent)
out[get_full_class_name(parent)]=get_parent_hierarchy(parent)
return out
def format_parent_hierarchy(hierarchy:dict,spaces=len(' ANCESTRY: ')):
import pprint
ans=pprint.pformat(hierarchy)
ans=ans.replace("{'object': {}}",'object')
ans=ans.replace("'",' ')
ans=ans.splitlines()
if len(ans)>1:
ans[1:]=[' '*spaces+line for line in ans[1:]]
ans=line_join(ans)
return ans
print(col(tab + 'ANCESTRY: ') + format_parent_hierarchy(get_parent_hierarchy(object)))#This is presenred in an ugly format right now and should eventually replace 'parent'. But this can be done later.
print(col(tab + 'TYPE: ') + str(type(object))[1:-1]+parent_string)
if i.getmodule(object) is not None:
# print(col(tab + 'FILE: ') + str(i.getmodule(object))[1:-1])
try:
print(col(tab + 'FILE: ') + str(get_source_file(object)))
except TypeError as e:
print(col(tab + 'FILE: ') + str(e))
if isinstance(object, int) and process_exists(object):
print_process_info(object)
if isinstance(object, int) and get_port_is_taken(object):
try:
process=get_process_using_port(object)
by_string = 'BY PROCESS %i'%(process)
print(col(tab + 'PORT %i IS TAKEN '%object + by_string))
print_process_info(process)
except Exception as e:
by_string = '(unknown process: %s)'%repr(e)
print(col(tab + 'PORT %i IS TAKEN '%object + by_string))
if isinstance(object, int):
filesize_string = human_readable_file_size(object)
print(col(tab + 'Filesize:'),filesize_string)
if is_symlink(object):
print(col(tab + 'SYMLINK --> '+read_symlink(object)))
if symlink_is_broken(object):
print(tab+tab+col("(symlink is broken)"))
def is_dictlike(x):
return issubclass(type(x),dict)
if is_dictlike(object) and all((isinstance(x, str) and not " " in x) for x in object):
print(col(tab + 'KEYS (%i): '%len(object)) + ' '.join(object))
if isinstance(object,str) and path_exists(object):
stats=[]
def append_stat(title,stat=''):
stats.append(col(title+':')+str(stat))
try:
path=object
if file_exists(path):
append_stat('FILE STATS')
append_stat('size',get_file_size(path,human_readable=True))
if is_image_file(path):
append_stat('resolution',str(get_image_file_dimensions(path)))
if is_utf8_file(path):
append_stat('#lines',number_of_lines_in_file(path))
if is_video_file(path):
append_stat('duration',str(get_video_file_duration(path))+'s')
else:
append_stat('FOLDER STATS')
append_stat('#files',len(get_all_files(path)))
append_stat('#subfolders',len(get_all_folders(path)))
append_stat('date_modified',str(_format_datetime(date_modified(path))))
except Exception as e:
print_stack_trace(e)
pass
print(col(tab + ' '.join(stats)))
def errortext(x):
return fansi(x,'red','underlined')
# if not str_on_top:
# try:
# # GETTING CHARACTER FOR TEMP
# def is_module(x):
# import types
# return isinstance(x,types.ModuleType)
# if not is_module(object):
# prefix=tab + "STR: "
# print((col(prefix) + linerino(str(object),len(prefix))))
# except:
# pass
# else:
# pass
if True:
_=col(tab + 'OBJECT: ')
ⵁ_errored=False
try:
_+=object.__name__
except Exception as e:
_+='[cannot obtain object.__name__ without error: ' + str(e) + ']'
ⵁ_errored=True
try:
def format_signature(item):
assert callable(item)
import inspect
def autoformat_python_via_black(code:str):
if sys.version_info>(3,6):
pip_import('black')
import black
return black.format_str(code,mode=black.Mode())
#Python versions older than 3.6 don't support black
return code
sig=inspect.signature(item)
sig=item.__name__+str(sig)
sig='def '+sig+':pass'
sig=autoformat_python_via_black(sig)
sig=sig[len('def '):]
sig=sig.strip()
sig=sig[:-len('pass')]
sig=sig.strip()
sig=sig[:-len(':')]
return sig
def indentify_all_but_first_line(string,indent):
lines=line_split(string)
if len(lines)<1:
return string
lines[1:]=[indent+line for line in lines[1:]]
return line_join(lines)
try:
signature=format_signature(object)
# signature=signature[len(object.__name__):]
signature=indentify_all_but_first_line(signature,' '*len(' SIGNATURE: '))
except Exception:
signature=object.__name__+str(i.signature(object))
_=col(tab+'SIGNATURE: ')+fansi_syntax_highlighting(str(signature))
except:
pass
if not ⵁ_errored and _.strip():
# print()
print(end=_)
print()
if show_summary:
def to_str(x):
if x is None:
return str(x)
outtype='str()'
out=str(x)
if out and out[0] == '<' and out[-1] == '>':
out=x.__doc__
if out is None:
try:
out=i.getcomments(object)
outtype='doc()'
except:
out=str(out)
outtype='str()'
else:
outtype='doc()'
typestr=str(type(x))
if typestr.count("'") >= 2:
typestr=typestr[typestr.find("'") + 1:]
typestr=typestr[:typestr.find("'")]
elif typestr.count('"') >= 2:
typestr=typestr[typestr.find('"') + 1:]
typestr=typestr[:typestr.find('"')]
out=fansi('[' + typestr + " : " + outtype + "]",'green') + " " + fansi(out,'blue')
if '\n' in out:
indent_prefix='' # '···'
out='\n'.join((indent_prefix + x) for x in out.split('\n'))
while '\n\n' in out:
out=out.replace('\n\n','\n')
out=linerino(out)
out=out.lstrip()
out=out.rstrip()
return out
print(col(tab + "SUMMARY:"))
print_string=display_dict(attrs,print_it=False,key_sorter=sorty,value_color=to_str,arrow_color=lambda x:fansi(x,'green'),key_color=lambda x:fansi(x,'green','bold'),clip_width=True,post_processor=lambda x:'\n'.join(2 * tab + y for y in x.split('\n')))
if currently_running_windows():
print_string=strip_ansi_escapes(print_string)#To avoid crashing windows terminals, cut down on the terminal colorings...
print(print_string)
if show_source_code:
sourcecodeheader=tab + "SOURCE CODE:"
print(col(sourcecodeheader) + fansi("―"*max(0,get_terminal_width()-len(sourcecodeheader)),'cyan','blinking'))
_=code_string_with_comments=''
_+=i.getcomments(object) or '' # ≣i.getc omments(object) if i.getcomments(object) is not None else ''
_=fansi_syntax_highlighting(_)
try:
try:
_+=fansi_syntax_highlighting(str(i.getsource(object)))
except:
_+=fansi_syntax_highlighting(str(i.getsource(object.__class__)))
except Exception as e:
_+=2 * tab + errortext('[Cannot retrieve source code! Error: ' + linerino(str(e)) + "]")
print(_)
if search_or_show_documentation:
print(col(tab + "DOCUMENTATION: "))
try:
if object.__doc__ and not object.__doc__ in _:
print(fansi(str(object.__doc__),'gray'))
else:
if not object.__doc__:
print(2 * tab + errortext("[__doc__ is empty]"))
else: # ∴ object.__doc__ in _
print(2 * tab + errortext("[__doc__ can be found in source code, which has already been printed]"))
except Exception as e:
print(2 * tab + errortext("[Cannot retrieve __doc__! Error: " + str(e) + "]"))
_maybe_display_string_in_pager(''.join(printed_lines),with_line_numbers=False)
# endregion
# region Arduino: [arduino,read_line]
def arduino(baudrate: int = 115200,port_description_keywords:list=['arduino','USB2.0-Serial'],timeout: float = .1,manually_chosen_port: str = None,shutup: bool = False,return_serial_instead_of_read_write=False,marco_polo_timeout=0) -> (callable,callable):# 'USB2.0-Serial' is for a cheap knock-off arduino I got
"""
NOTE: This function uses a library called 'serial', got from 'pip install pyserial'.
BUT THERE'S A SECOND LIBRARY: 'pip install serial' will give errors, as it's module is also called 'serial'. If you get this error, uninstall 'pip uninstall serial' then 'pip install pyserial'
Finds an arduino, connects to it, and returns the read/write methods you use to communicate with it.
Example: read,write=arduino()
read() ⟵ Returns a single byte (of length 1)
write(x:bytes) ⟵ Writes bytes to the arduino, which reads them as individual characters (the 'char' primitive)
If you don't want this method to automatically locate an arduino, set manually_chosen_port to the port name you wish to connect to.
marco_polo_timeout is optional: It's used for a situation where the arduino responds marco-polo style with the python code
"""
'''
//Simple example code for the arduino to go along with this method: It simply parrots back the bytes you write to it.
void setup()
{
Serial.begin(115200);// set the baud rate
}
void loop()
{
if (Serial.available())// only send data back if data has been sent
{
char inByte = Serial.read(); // read the incoming data
Serial.write(inByte); // send the data back as a single byte.
}
}
'''
serial=pip_import('serial','pyserial')
def speak(x: str) -> None:
if not shutup:
print("r.arduino: " + x)
def find_arduino_port(keywords: list = port_description_keywords) -> str:
# Attempts to automatically determine which port the arduino is on.
import serial.tools.list_ports
port_list=serial.tools.list_ports.comports()
port_descriptions=[port.description for port in port_list]
keyword_in_port_descriptions=[any(keyword.lower() in port_description.lower()for keyword in keywords) for port_description in port_descriptions]
number_of_arduinos_detected=sum(keyword_in_port_descriptions)
assert number_of_arduinos_detected > 0,'r.arduino: No arduinos detected! Port descriptions = ' + str(port_descriptions)
arduino_port_indices=max_valued_indices(keyword_in_port_descriptions) # All ports that have 'arduino' in their description.
if number_of_arduinos_detected > 1:
speak("Warning: Multiple arduinos detected. Choosing the leftmost of these detected arduino ports: " + str(gather(port_descriptions,arduino_port_indices)))
chosen_arduino_device=port_list[arduino_port_indices[0]]
speak("Chosen arduino device: " + chosen_arduino_device.device)
return chosen_arduino_device.device
ser=serial.Serial(manually_chosen_port or find_arduino_port(),baudrate=baudrate,timeout=timeout) # Establish the connection on a specific port. NOTE: manually_chosen_port or find_arduino_port() ≣ manually_chosen_port if manually_chosen_port is not None else find_arduino_port()
if return_serial_instead_of_read_write:
return ser
read_bytes,_write_bytes=ser.read,ser.write # NOTE: If read_bytes()==b'', then there is nothing to read at the moment.
def write_bytes(x,new_line=False):
_write_bytes(printed((x if isinstance(x,bytes) else str(x).encode())+(b'\n'if new_line else b'')))
start=tic()
# (next 4 lines) Make sure that the arduino is able to accept write commands before we release it into the wild (the return function):
arbitrary_bytes=b'_' # It doesn't matter what this is, as long as it's not empty
assert arbitrary_bytes != b'' # ⟵ This is the only requirement for that read_bytes must be.
if marco_polo_timeout:
while not read_bytes() and start()<marco_polo_timeout: write_bytes(arbitrary_bytes) # ≣ while read_bytes()==b''
while read_bytes() and start()<marco_polo_timeout: pass # ≣ while read_bytes()!=b''. Basically the idea is to clear the buffer so it's primed and ready-to-go as soon as we return it.
if start()>marco_polo_timeout and not shutup:
print("Marco Polo Timed Out")
speak("Connection successful! Returning read and write methods.")
return read_bytes,write_bytes # Returns the methods that you use to read and write from the arduino
# NOTE: read_bytes() returns 1 byte; but read_byte(n ∈ ℤ) returns n bytes (all in one byte―string)!
# Future: Possibly helpful resources: http://stackoverflow.com/questions/24420246/c-function-to-convert-float-to-byte-array ⨀ ⨀ ⨀ http://forum.arduino.cc/index.php?topic=43222.0
def read_line(getCharFunction,return_on_blank=False) -> bytes:
# Example: read,write=arduino();print(read_line(read))
f=getCharFunction
t=tic()
o=b''
while True:
n=new=f()
if n == b'\n' or return_on_blank and n == b'':
return o
o+=n
# endregion
# region Webcam: [load_image_from_webcam, load_image_from_webcam_in_jupyter_notebook]
#Under construction...
#class CVCamera:
# #This class is a wrapper for OpenCV's camera class
# #
# def __init__(self, index=0):
# pip_import('cv2')
# import cv2
#
# self.index=index
# self.cap=cv2.VideoCapture(index)
#
# def read(self):
# success, img = self.cap.read()
# if not success:
# assert img==None,'This assertion is not important - its just that every time it fails, img seems to be None'
# raise IOError('r.CVCamera: Failed to take a picture using camera %s'%str(self))
#
# return img
#
# def __repr__(self):
# return 'CVCamera(index=%i)'%self.index
#
# def __call__(self):
_cameras=[]
def _cv_initialize_cameras():
if _cameras:
return # Allready initialized
fansi_print("r._cv_initialize_cameras: Initializing camera feeds; this will take a few seconds...",'green',new_line=False)
# noinspection PyUnresolvedReferences
pip_import('cv2')
from cv2 import VideoCapture
i=0
while True:
cam=VideoCapture(i)
if not cam.read()[0]:
break
_cameras.append(cam)
fansi_print("\rr._cv_initialize_cameras: Added camera #" + str(i),'green',new_line=False)
i+=1
fansi_print("\rr._cv_initialize_cameras: Initialization complete!",'green')
def _cv_print_cam_props(index=0):
"""
Prints available opencv camera properties for a given camera index
EXAMPLE:
>>> print_cam_info(1)
CAP_PROP_BACKEND 1200.0
CAP_PROP_FORMAT 16.0
CAP_PROP_FPS 5.0
CAP_PROP_FRAME_HEIGHT 1080.0
CAP_PROP_FRAME_WIDTH 1920.0
"""
import cv2
cap = cv2.VideoCapture(index)
props=[x for x in dir(cv2) if x.startswith('CAP_PROP_')]
def list_cap_props(cap):
for prop in props:
val=cap.get(getattr(cv2,prop))
if val:
print(prop+'\t'+str(val))
list_cap_props(cap)
cap.release()
def load_image_from_webcam(webcam_index: int = 0,
*,
width:int=None,
height:int=None,
shutup=False
):
"""
If your camera supports multiple resolutions, input the dimensions in the height and width parameters
For example, Xiang's raspberry pi webcam was taking pictures at 720p even though the camera could take 1080p pictures.
When I set width=1920 and height=1080, it fixed the problem, letting it take 1080p pictures
Note: this can be finicky! You have to set both the height and width correctly for this to work.
Note that when you set width and height using this method, they will stay like that until changed again.
For example, on my 2021 Macboox Max, using the default webcam, here are some results:
These properties can be discovered using the r._cv_print_cam_props function
To discover which resolutions are supported by your webcam, see this tutorial:
https://www.learnpythonwithrune.org/find-all-possible-webcam-resolutions-with-opencv-in-python/
Change webcam_index if you have multiple cameras
EXAMPLE: while True: display_image(med_filter(load_image_from_webcam(1),σ=0));sleep(0);clf()#⟵ Constant webcam display
"""
if running_in_google_colab():return _load_image_from_webcam_in_jupyter_notebook()
pip_import('cv2')
import cv2
_cv_initialize_cameras()
# _,img=_cameras[webcam_index].read()
# if webcam_index>=_cameras.__len__():
# if not shutup:
# print("r.load_image_from_webcam: Warning: Index is out of range: webcam_index="+str(webcam_index)+" BUT len(_cameras)=="+str(len(_cameras))+", setting webcam_index to 0")
# webcam_index=0
cap=_cameras[webcam_index]
if width is not None: cap.set(cv2.CAP_PROP_FRAME_WIDTH , width )
if height is not None: cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
for _ in range(2):
#This many tries to initialize the camera
#I found the camera returns black
success,img=cap.read()
if img.any():
break
assert success , 'Failed to take a photo with webcam #%i'%webcam_index
assert img is not None, 'Failed to take a photo with webcam #%i'%webcam_index
img=np.add(img,0) # Turns it into numpy array
img=cv_bgr_rgb_swap(img)
return img
def load_webcam_stream():
while True:
yield load_image_from_webcam()
def load_image_from_screenshot():
"""
Take a screeshot, and return the result as a numpy-array-style image
EXAMPLE: display_image(load_image_from_screenshot())
TODO: Make this faster. the 'mss' package from pypi got much better performance, so if you need higher FPS try using that for the inner workings of this function.
"""
pyscreenshot=pip_import('pyscreenshot')
im = pyscreenshot.grab(childprocess=False)
return np.asarray(im)
def _load_image_from_webcam_in_jupyter_notebook():
from IPython.display import HTML, Image
from google.colab.output import eval_js
from base64 import b64decode
VIDEO_HTML = """
<video autoplay
width=800 height=600></video>
<script>
var video = document.querySelector('video')
navigator.mediaDevices.getUserMedia({ video: true })
.then(stream=> video.srcObject = stream)
var data = new Promise(resolve=>{
video.onclick = ()=>{
var canvas = document.createElement('canvas')
var [w,h] = [video.offsetWidth, video.offsetHeight]
canvas.width = w
canvas.height = h
canvas.getContext('2d')
.drawImage(video, 0, 0, w, h)
video.srcObject.getVideoTracks()[0].stop()
video.replaceWith(canvas)
resolve(canvas.toDataURL('image/jpeg', %f))
}
})
</script>
"""
def take_photo(filename='photo.jpg', quality=0.8):
display(HTML(VIDEO_HTML % quality))
data = eval_js("data")
binary = b64decode(data.split(',')[1])
with open(filename, 'wb') as f:
f.write(binary)
return len(binary)
# endregion
# region Audio Recording: [record_mono_audio]
default_audio_stream_chunk_size=1024 # chunk_size determines the resolution of time_in_seconds as the samplerate. Look in the code for more explanation idk how to describe it.
default_audio_mono_input_stream=None # Initialized in the record_mono_audio function
def record_mono_audio(time_in_seconds,samplerate=default_samplerate,stream=None,chunk_size=default_audio_stream_chunk_size) :
"""
You can count on this method having a delay (between when you call the method and when it actually starts recording) on the order of magnitude of 10⁻⁵ seconds
PLEASE NOTE: time_in_seconds is not interpreted precisely
EXAMPLE: play_sound_from_samples(record_mono_audio(2))
"""
pip_import('pyaudio')
if stream is None: # then use default_audio_mono_input_stream instead
global default_audio_mono_input_stream
if default_audio_mono_input_stream is None: # Initialize it.
import pyaudio # You need this module to use this function. Download it if you don't have it.
default_audio_mono_input_stream=pyaudio.PyAudio().open(format=pyaudio.paInt16,channels=1,rate=default_samplerate,input=True,frames_per_buffer=default_audio_stream_chunk_size)
stream=default_audio_mono_input_stream
number_of_chunks_needed=np.ceil(time_in_seconds * samplerate / chunk_size) # Rounding up.
out=np.hstack([np.fromstring(stream.read(num_frames=chunk_size,exception_on_overflow=False),dtype=np.int16) for _ in [None] * int(number_of_chunks_needed)]) # Record the audio
out=np.ndarray.astype(out,float) # Because by default it's an integer (not a floating point thing)
out/=2 ** 15 # --> ∈[﹣1,1] because we use pyaudio.paInt16. I confirmed this by banging on the speaker loudly and seeing 32743.0 as the max observed value. ﹙# out/=max([max(out),-min(out)]) ⟵ originally this﹚
# stream.stop_stream();stream.close() ⟵ Is slow. Takes like .1 seconds. I profiled this method so that it runs very, very quickly (response time is about a 1% of a millisecond)
return out
# endregion
# region MIDI Input/Output: [MIDI_input,MIDI_output]
__midiout=None
def MIDI_output(message: list):
"""
Key:
NOTE_OFF = [0x80, note, velocity]
NOTE_ON = [0x90, note, velocity]
POLYPHONIC_PRESSURE = [0xA0, note, velocity]
CONTROLLER_CHANGE = [0xB0, controller, value]
PROGRAM_CHANGE = [0xC0, program]
CHANNEL_PRESSURE = [0xD0, pressure]
PITCH_BEND = [0xE0, value-lo, value-hi]
For more: see http://pydoc.net/Python/python-rtmidi/0.4.3b1/rtmidi.midiconstants/
"""
pip_import('rtmidi')
# try:
# # Can control applications like FL Studio etc
# # Use this for arduino etc
# global __midiout
# if not __midiout:
# import rtmidi # pip3 install python-rtmidi
# __midiout=rtmidi.RtMidiOut()
# try:
# available_ports=__midiout.get_ports()
# except AttributeError:#AttributeError: 'midi.RtMidiOut' object has no attribute 'get_ports': See https://stackoverflow.com/questions/38166344/attributeerror-in-python-rtmidi-sample-code
# available_ports=__midiout.ports
# if available_ports:
# __midiout.open_port(0)
# print("r.MIDI_output: Port Output Name: '" + __midiout.get_ports()[0])
# else:
# __midiout.open_virtual_port("My virtual output")
# __midiout.send_message(message) # EXAMPLE MESSGES: # note_on = [0x90, 98, 20] # channel 1, middle C, velocity 112 note_off = [0x80, 98, 0]
# except OverflowError as e:
# fansi_print("ERROR: r.MIDI_Output: " + str(e) + ": ",'red',new_line=False)
# fansi_print(message,'cyan')
import rtmidi
global __midiout
if not __midiout:
__midiout = rtmidi.MidiOut()
if __midiout.get_port_count():
__midiout.open_port(0)
print("MIDI_output: Port Output Name: '" + __midiout.get_port_name(0) + "'")
else:
__midiout.open_virtual_port("My virtual output")
try:
__midiout.send_message(message)
except OverflowError as e:
print("ERROR: MIDI_Output: " + str(e) + ": ", end="")
print(message)
def MIDI_control(controller_number: int,value: float): # Controller_number is custom integer, and value is between 0 and 1
MIDI_output([176,controller_number,int(float_clamp(value,0,1) * 127)])
def MIDI_control_precisely(coarse_controller_number: int,fine_controller_number: int,value: float): # TWO bytes of data!!
value=float_clamp(value,0,1)
value*=127
MIDI_output([176,coarse_controller_number,int(value)])
MIDI_output([176,fine_controller_number,int((value % 1) * 127)])
def MIDI_jiggle_control(controller_number: int): # Controller_number is custom integer, and value is between 0 and 1
MIDI_control(controller_number,0)
sleep(.1)
MIDI_control(controller_number,1)
def MIDI_note_on(note: int,velocity: float = 1): # velocity ∈ [0,1]
MIDI_output([144,int_clamp(note,0,255),int(velocity * 127)]) # Notes can only be between 0 and 255, inclusively
def MIDI_note_off(note: int,velocity: float = 0):
MIDI_output([128,note,int(velocity * 127)])
MIDI_pitch_bend_min=-2 # Measured in Δsemitones.
MIDI_pitch_bend_max=6 # Note: These min/max numbers are Based on the limitations of the pitch bender, which is DAW dependent. This is what it appears to be in FL Studio on my computer. Note that these settings
def MIDI_pitch_bend(Δsemitones: float): # Δsemitones ∈ [-2,6] ⟵ ACCORDING TO FL STUDIO
Δsemitones=float_clamp(Δsemitones,MIDI_pitch_bend_min,MIDI_pitch_bend_max)
coarse=int(((Δsemitones + 2) / 8) * 255)
fine=0 # ∈ [0,255] Note that fine is...REALLY REALLY FINE...So much so that I can't really figure out a good way to use it
MIDI_output([224,fine,coarse])
def MIDI_all_notes_off():
for n in range(256):
MIDI_note_off(n)
def MIDI_breath(value: float):
MIDI_output([0x02,int(float_clamp(value,0,1) * 127)])
#
__midiin=None # This variable exists so the garbage collector doesn't gobble up your midi input if you decide not to assign a variable to the output (aka the close method)
def MIDI_input(ƒ_callback: callable = print) -> callable:
# Perfect example:
# close_midi=MIDI_input(MIDI_output) # ⟵ This simply regurgitates the midi-piano's input to a virtual output. You won't be able to tell the difference ;)
# Then, when you're bored of it...
# close_midi()# ⟵ This stops the midi from doing anything.
print("r.MIDI_input: Please specify the details of your request:")
pip_import('rtmidi')
from rtmidi.midiutil import open_midiport # pip3 install python-rtmidi
global __midiin
__midiin,port_name=open_midiport()
__midiin.set_callback(lambda x,y:ƒ_callback(x[0]))
return __midiin.close_port # Returns the method needed to kill the thread
# endregion
# region Comparators: [cmp_to_key,sign]
def cmp_to_key(mycmp):
"""
From: http://code.activestate.com/recipes/576653-convert-a-cmp-function-to-a-key-function/
Must use for custom comparators in the 'sorted' builtin function!
Instead of using sorted(ⵁ,cmp=x) which gives syntax error, use…
…sorted(ⵁ,key=cmp_to_key(x))
I.E., in rCode:
sorted(ⵁ,cmp=x) ⭆ sorted(ⵁ,key=cmp_to_key(x)) ≣ cmp=x ⭆ key=cmp_to_key(x)
'Convert a cmp= function into a key= function'
"""
class K(object):
def __init__(self,obj,*args): self.obj=obj
def __lt__(self,other): return mycmp(self.obj,other.obj) < 0
def __gt__(self,other): return mycmp(self.obj,other.obj) > 0
def __eq__(self,other): return mycmp(self.obj,other.obj) == 0
def __le__(self,other): return mycmp(self.obj,other.obj) <= 0
def __ge__(self,other): return mycmp(self.obj,other.obj) >= 0
def __ne__(self,other): return mycmp(self.obj,other.obj) != 0
return K
# noinspection PyShadowingNames
def sign(x,zero=0):
# You can redefine zero depending on the context. It basically becomes a comparator.
if x > zero:
return 1
elif x < zero:
return -1
return zero
# endregion
# region Pickling:[load_pickled_value,save_pickled_value]
# Pickling is just a weird name the python devs came up with to descript putting the values of variables into files, essentially 'pickling' them for later use
def load_pickled_value(file_name: str):
# Filenames are relative to the current file path
return pickle.load(open(file_name,"rb"))
def save_pickled_value(file_name: str,*variables):
# Filenames are relative to the current file path
pickle.dump(detuple(variables),open(file_name,'wb'))
# load_pickled_value=lambda file_name:pickle.load(open(file_name,"rb"))
# endregion
# region .txt ⟷ str: [string_to_text_file,text_file_to_string]
def string_to_text_file(file_path: str,string: str,) -> None:
"string_to_text_file(file_path, string) writes text file"
file_path=get_absolute_path(file_path)#Make sure it recognizes ~/.vimrc AKA with the ~ attached
try:
file=open(file_path,"w")
except Exception:
if not folder_exists(get_parent_folder(file_path)):
raise FileNotFoundError("Parent folder does not exist: "+str(get_parent_folder(file_path)))
raise
try:
file.write(string)
except Exception:
file=open(file_path,"w",encoding='utf-8')
file.write(string,)
file.close()
return file_path
def save_text_file(string, file_path):
"save_text_file(string, text_file) writes text file"
return string_to_text_file(file_path, string)
_text_file_to_string_cache={}
def text_file_to_string(file_path: str,use_cache=False) -> str:
"text_file_to_string(file_path) reads text file"
#Only reason not to use use_cache is if you're worried about memory consumption
# It will refresh the cache entry if it's out of date even if use_cache is True.
# TODO: Make this happen on load_image, etc...all other functions that read from a file and have use_cache as an option
if is_valid_url(file_path):
#TODO: Add caching for when we use urls
return curl(file_path)
file_path=get_absolute_path(file_path)#Make sure it recognizes ~/.vimrc AKA with the ~ attached. Also, don't cache the same file twice under a relative and absolute path
assert file_exists(file_path),'File %s does not exist'%file_path
if use_cache:
file_path=get_absolute_path(file_path)
current_date=date_modified(file_path)
if file_path in _text_file_to_string_cache:
cached_date,cached_text=_text_file_to_string_cache[file_path]
if current_date<=cached_date:
return cached_text
current_text=text_file_to_string(file_path,use_cache=False)
_text_file_to_string_cache[file_path]=current_date,current_text
return current_text
try:
return open(file_path).read()
except UnicodeDecodeError:
#UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 4781: ordinal not in range(128)
return open(file_path, encoding='latin').read()
#Yes. This is a welcome alias.
load_text_file = text_file_to_string
def load_file_lines(file_path, use_cache=False):
""" Returns all the lines in a file """
return line_split(text_file_to_string(file_path, use_cache))
def save_file_lines(lines, file_path):
assert is_iterable(lines)
lines = [str(x) for x in lines]
string = line_join(lines)
return string_to_text_file(file_path, string)
def load_text_files(*paths, use_cache=False, strict=True, num_threads=None, show_progress=False, lazy=False):
"""
Plural of text_file_to_string
Please see load_files and rp_iglob for more information
Yields the strings as a generator
"""
if folder_exists(detuple(paths)):
paths = detuple(paths)+'/*'
paths = rp_iglob(paths)
load_file = lambda path: text_file_to_string(path, use_cache=use_cache)
if show_progress in ['eta',True]: show_progress='eta:Loading text files'
return load_files(load_file, paths, show_progress=show_progress, strict=strict, num_threads=num_threads, lazy=lazy)
def append_line_to_file(line:str,file_path:str):
#Adds a line to the end of a text file, or creates a new text file if none exists
if not file_exists(file_path):
string_to_text_file(file_path,line)
else:
file=open(file_path, 'a')
try:
file.write('\n'+line)
finally:
file.close()
return file_path
def as_easydict(*args, **kwargs):
pip_import('easydict') #I might make this a pip requirement of rp...its so useful!
from easydict import EasyDict
return EasyDict(*args, **kwargs)
_load_json_cache={}
def load_json(path, *, use_cache=False):
if use_cache and path in _load_json_cache:
from copy import deepcopy
return as_easydict(deepcopy(_load_json_cache[path]))
text=text_file_to_string(path, use_cache=use_cache)
import json
out = json.loads(text)
if not isinstance(out, dict):
return out
if use_cache or path in _load_json_cache:
_load_json_cache[path]=out
return as_easydict(out)
def load_jsons(*paths, use_cache=False, strict=True, num_threads=None, show_progress=False, lazy=False):
"""
Plural of load_json
Please see load_files and rp_iglob for more information
Yields the jsons as an iterator
"""
paths = rp_iglob(paths)
load_file = lambda path: load_json(path, use_cache=use_cache)
if show_progress in ['eta',True]: show_progress='eta:Loading JSON files'
return load_files(load_file, paths, show_progress=show_progress, strict=strict, num_threads=num_threads, lazy=lazy)
def save_json(data,path,*,pretty=False,default=None):
import json
kwargs = dict(default=default)
if pretty:
kwargs.update(
dict(
indent='\t',
separators=(",", ": "),
)
)
text=json.dumps(data,**kwargs)
return string_to_text_file(path,text)
_load_tsv_cache={}
def load_tsv(file_path, *, show_progress=False, header=0, use_cache=False, sep="\t"):
"""
Read a TSV file with optional progress tracking and flexible header handling.
By default tries to be robust - skipping all bad lines.
Parameters:
file_path (str): Path to the TSV file.
show_progress (bool): Whether to display a progress bar. Default is True.
header (str, int, list, or None): Header row handling.
- 0 (default): Use the first row as column names.
- None: Use no column names, only integer indices.
- List: Use the provided list as column names, assuming no header row in the file.
- str: Like list, but uses str.split so you can specify headers like 'col1 col2name whatIcall_Col3' etc
use_cache: If True, will cache the result so you only have to load from drive once
Returns:
pandas.DataFrame: The loaded TSV data as a DataFrame.
EXAMPLE:
>>> load_tsv('urls_oct6.tsv', header='id size url title', use_cache=True, show_progress=True)
... ans = id size url title
... 0 2RH8A49 7.088479e+08 https://video-previews.cont... Joyful Excitement Dancing Woman Meme Expression
... 1 7LDJAUA 2.877922e+08 https://video-previews.cont... funny robot in the background , children's bac...
... 2 DEOXGE2 1.256278e+09 https://video-previews.cont... Hemp Extract in Hands Selective Focus
... 3 TY3VY9R 4.071201e+08 https://video-previews.cont... Coconut palmtrees on the most beautiful tropi...
... 4 S2HJ9Q2 2.214383e+08 https://video-previews.cont... Osteoporosis Diagnostics
... ... ... ... ... ...
... 699921 D58D677 1.960837e+07 https://video-previews.cont... Scientist in PPE suit conducts research on the...
... 699922 BG9G364 5.516978e+08 https://video-previews.cont... Professional Fishing Vessel, Shooting From Dro...
... 699923 PPEF3XB 2.731540e+07 https://video-previews.cont... Rehabilitation Center for Bears in the Carpath...
... 699924 HQWAGMQ 1.771674e+09 https://video-previews.cont... Desperate Stressful Arabic Hispanic Businessma...
... 699925 T686L7K 2.834678e+09 https://video-previews.cont... Dandelion Yellow Flowers Field
...
... [699926 rows x 4 columns]
"""
#Future Parameters:
# mode (str): File reading mode. Use 'robust' to skip bad lines. Default is 'robust'.
mode = 'robust'
pip_import("pandas")
import pandas as pd
import csv
args_hash = handy_hash((file_path, header))
if use_cache and args_hash in _load_tsv_cache:
return _load_tsv_cache[args_hash]
chunk_size = 1000
if isinstance(header,str):
header=header.strip().split()
kwargs = {
"sep": sep,
"chunksize": chunk_size,
"header": header if isinstance(header, int) else None,
"names": header if isinstance(header, list) else None,
}
if mode == "robust":
kwargs.update({"quoting": csv.QUOTE_NONE, "on_bad_lines": "skip"})
iterator = pd.read_csv(file_path, **kwargs)
if show_progress:
pip_import("tqdm")
from tqdm import tqdm
total_lines = number_of_lines_in_file(file_path)
if isinstance(header, int):
total_lines -= 1
iterator = tqdm(iterator, total=total_lines // chunk_size)
df = pd.concat(iterator, ignore_index=True)
if show_progress:
_erase_terminal_line()
# def fix_dataframe_nans(dataframe):
# """
# Replace NaNs with empty strings in DataFrame columns that are otherwise entirely strings,
# improving performance by using pandas type detection.
# I use this in load_tsv because otherwise empty strings in a tsv line like "\t\t\t" might be interpereted as NaN's instead of strings
# """
# for column in dataframe.columns:
# # Use pandas API to check if the column's data type is 'string'
# if pd.api.types.is_string_dtype(dataframe[column]):
# dataframe[column] = dataframe[column].fillna('')
# return dataframe
# df = fix_dataframe_nans(df)
if use_cache:
_load_tsv_cache[args_hash]=df
return df
_load_parquet_cache={}
def load_parquet(file_path, *, show_progress=False, use_cache=False):
"""
Read a Parquet file with optional progress tracking.
Parameters:
file_path (str): Path to the Parquet file.
show_progress (bool): Whether to display a progress bar. Default is True.
use_cache: If True, will cache the result so you only have to load from drive once
Returns:
pandas.DataFrame: The loaded Parquet data as a DataFrame.
EXAMPLE:
>>> load_parquet('data.parquet', use_cache=True, show_progress=True)
... ans = id size url title
... 0 2RH8A49 7.088479e+08 https://video-previews.cont... Joyful Excitement Dancing Woman Meme Expression
... 1 7LDJAUA 2.877922e+08 https://video-previews.cont... funny robot in the background , children's bac...
... 2 DEOXGE2 1.256278e+09 https://video-previews.cont... Hemp Extract in Hands Selective Focus
... 3 TY3VY9R 4.071201e+08 https://video-previews.cont... Coconut palmtrees on the most beautiful tropi...
... 4 S2HJ9Q2 2.214383e+08 https://video-previews.cont... Osteoporosis Diagnostics
... ... ... ... ... ...
... 699921 D58D677 1.960837e+07 https://video-previews.cont... Scientist in PPE suit conducts research on the...
... 699922 BG9G364 5.516978e+08 https://video-previews.cont... Professional Fishing Vessel, Shooting From Dro...
... 699923 PPEF3XB 2.731540e+07 https://video-previews.cont... Rehabilitation Center for Bears in the Carpath...
... 699924 HQWAGMQ 1.771674e+09 https://video-previews.cont... Desperate Stressful Arabic Hispanic Businessma...
... 699925 T686L7K 2.834678e+09 https://video-previews.cont... Dandelion Yellow Flowers Field
...
... [699926 rows x 4 columns]
"""
pip_import("pandas")
pip_import("pyarrow")
import pandas as pd
import pyarrow.parquet as pq
if use_cache:
args_hash = handy_hash((file_path))
if args_hash not in _load_parquet_cache:
value = gather_args_call(load_parquet, use_cache=False)
_load_parquet_cache[args_hash] = value
return _load_parquet_cache[args_hash]
parquet_file = pq.ParquetFile(file_path)
num_row_groups = parquet_file.num_row_groups
dfs = []
indices = range(num_row_groups)
if show_progress:
pip_import("tqdm")
from tqdm import tqdm
#indices = eta(indices)
indices = tqdm(indices)
for i in indices:
df = parquet_file.read_row_group(i, use_threads=True).to_pandas()
dfs.append(df)
df = pd.concat(dfs, ignore_index=True)
return df
def load_yaml_file(path, use_cache=False):
"""
EXAMPLE:
>>> load_yaml_file('alphablock_without_ssim_256.yaml')
ans = {'max_iter': 300000, 'batch_size': 5, 'image_save_iter': 250, ...(etc)... }
"""
pip_import('yaml')
from easydict import EasyDict
import yaml
assert file_exists(path)
text=text_file_to_string(path, use_cache=use_cache)
data=yaml.safe_load(text)
data=as_easydict(data)
return data
load_yaml = load_yaml_file #Alias
def load_yaml_files(*paths, use_cache=False, strict=True, num_threads=None, show_progress=False, lazy=False):
"""
Plural of load_yaml_file
Please see load_files and rp_iglob for more information
Yields the jsons as an iterator
"""
paths = rp_iglob(paths)
load_file = lambda path: load_yaml_file(path, use_cache=use_cache)
if show_progress in ['eta',True]: show_progress='eta:Loading YAML files'
return load_files(load_file, paths, show_progress=show_progress, strict=strict, num_threads=num_threads, lazy=lazy)
def parse_yaml(string):
pip_import('yaml')
from yaml import safe_load
output = safe_load(string)
output = as_easydict(output)
return output
def parse_dyaml(code:str)->dict:
"""
This is like DJSON, except for YAML
TODO: Migrate this function into its own module.
Look at the test_parse_dyaml_junctions() function to see how this language works, it's pretty simple
The only differences between this and YAML:
- When a key has multiple colons in it, like a:b:c:, it's equivalent to multiple lines of keys
- When a key has commas in it, its value is duplicated
This was used in the TRITON codebase's config files!
"""
assert isinstance(code,str)
class Junction:
def __init__(self,key,value):
self.key =key
self.value=value
def __repr__(self):
# u250c u2510
# u2502 u250c u2510 u2502 u250c u2510
return fansi(str(self.key) + ":", "cyan") + str(self.value)
# u2502 u2514 u2518 u2502 u2514 u2518
# u2514 u2518
def __iter__(self):
yield self.key
yield self.value
@property
def is_leaf(self):
return not isinstance(self.value, JunctionList)
class JunctionList(list):
#In this module, every JunctionList created is a list of Junction instances
pass
def handle_key_colons(junction)->Junction:
#If we have a key like "a:b:c",
#Make [a:b:c: z] into [a:[b:[c:z]]]
#EXAMPLE:
# >>> handle_key_colons(Junction('a:b:c','z'))
# ans = a:[b:[c:z]]
key,value=junction
assert isinstance(key,str)
path=key.split(':')
output=value
for sub_key in path[::-1]:
output=Junction(sub_key,output)
output=[output]
output=JunctionList(output)
return output[0]
def split_colon_keys(junctions)->JunctionList:
output=JunctionList()
for junction in junctions:
if not junction.is_leaf:
junction.value=split_colon_keys(junction.value)
junction=handle_key_colons(junction)
output.append(junction)
return output
def parse_dyaml_junctions(src)->JunctionList:
# https://stackoverflow.com/questions/44904290/getting-duplicate-keys-in-yaml-using-python
# We deliberately define a fresh class inside the function,
# because add_constructor is a class method and we don't want to
# mutate pyyaml classes.
pip_import('yaml','PyYAML')
import yaml
class PreserveDuplicatesLoader(yaml.loader.Loader):
pass
def map_constructor(loader, node):
"""Walk the mapping, recording any duplicate keys."""
deep=False
mapping=JunctionList()
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
value = loader.construct_object(value_node, deep=deep)
#mapping.setdefault(key,[]).append(value)
mapping.append(Junction(key,value))
return mapping
PreserveDuplicatesLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, map_constructor)
return yaml.load(src, PreserveDuplicatesLoader)
def expand_comma_keys(junctions) -> JunctionList:
#Note that there may be duplicate lists in multiple places
#This saves memory
assert isinstance(junctions, JunctionList)
from copy import deepcopy
output = JunctionList()
for key,value in junctions:
if isinstance(value, JunctionList):
value=expand_comma_keys(value)
for sub_key in key.split(','):
value=deepcopy(value)
junction = Junction(sub_key, value)
output.append(junction)
return output
def apply_deltas_from_junctions(junctions:JunctionList,recipient:dict):
#Apply all the junctions as deltas
for key,value in junctions:
if isinstance(value,JunctionList):
apply_deltas_from_junctions(value,recipient.setdefault(key,{}))
else:
recipient[key]=value
return recipient
def junctions_to_dict(junctions:JunctionList)->dict:
output={}
return apply_deltas_from_junctions(junctions,{})
def parse_dyaml(src)->dict:
junctions=parse_dyaml_junctions(src)
junctions=split_colon_keys(junctions)
junctions=expand_comma_keys(junctions)
return junctions_to_dict(junctions)
def test_parse_dyaml_junctions():
code="""
a:
b:
c: boochy
b,q:
c,d: creepy
b:
c: cri
a:b:
e: {"Hil":87}
w,x:y,z: pup
"""
print(code)
print(parenthesizer_automator(str( parse_dyaml_junctions(code) )))
print(parenthesizer_automator(str( split_colon_keys(parse_dyaml_junctions(code)) )))
print(parenthesizer_automator(str(expand_comma_keys(split_colon_keys(parse_dyaml_junctions(code))))))
print(parenthesizer_automator(str(parse_dyaml(code))))
# RESULT:
# a:
# b:
# c: boochy
# b,q:
# c,d: creepy
# b:
# c: cri
# a:b:
# e: {"Hil":87}
# w,x:y,z: pup
#
# ┌ ┐
# │ ┌ ┐ ┌ ┐ │
# │ │ ┌ ┐ ┌ ┐ ┌ ┐│ │ ┌ ┐│ │
# [a:[b:[c:boochy], b,q:[c,d:creepy], b:[c:cri]], a:b:[e:[Hil:87]], w,x:y,z:pup]
# │ │ └ ┘ └ ┘ └ ┘│ │ └ ┘│ │
# │ └ ┘ └ ┘ │
# └ ┘
# ┌ ┐
# │ ┌ ┐ │
# │ ┌ ┐ │ ┌ ┐│ │
# │ │ ┌ ┐ ┌ ┐ ┌ ┐│ │ │ ┌ ┐││ ┌ ┐│
# [a:[b:[c:boochy], b,q:[c,d:creepy], b:[c:cri]], a:[b:[e:[Hil:87]]], w,x:[y,z:pup]]
# │ │ └ ┘ └ ┘ └ ┘│ │ │ └ ┘││ └ ┘│
# │ └ ┘ │ └ ┘│ │
# │ └ ┘ │
# └ ┘
# ┌ ┐
# │ ┌ ┐ │
# │ ┌ ┐ │ ┌ ┐│ │
# │ │ ┌ ┐ ┌ ┐ ┌ ┐ ┌ ┐│ │ │ ┌ ┐││ ┌ ┐ ┌ ┐│
# [a:[b:[c:boochy], b:[c:creepy, d:creepy], q:[c:creepy, d:creepy], b:[c:cri]], a:[b:[e:[Hil:87]]], w:[y:pup, z:pup], x:[y:pup, z:pup]]
# │ │ └ ┘ └ ┘ └ ┘ └ ┘│ │ │ └ ┘││ └ ┘ └ ┘│
# │ └ ┘ │ └ ┘│ │
# │ └ ┘ │
# └ ┘
# ┌ ┐
# │ ┌ ┐ │
# │ │ ┌ ┐ │ │
# │ │ │ ┌ ┐│ ┌ ┐│ ┌ ┐ ┌ ┐│
# {'a': {'b': {'c': 'cri', 'd': 'creepy', 'e': {'Hil': 87}}, 'q': {'c': 'creepy', 'd': 'creepy'}}, 'w': {'y': 'pup', 'z': 'pup'}, 'x': {'y': 'pup', 'z': 'pup'}}
# │ │ │ └ ┘│ └ ┘│ └ ┘ └ ┘│
# │ │ └ ┘ │ │
# │ └ ┘ │
# └ ┘
return parse_dyaml(code)
def load_dyaml_file(path:str)->dict:
""" Load a dyaml file (a yaml file with some additional syntax features I added). Stands for "Delta Yaml" """
assert file_exists(path)
code=text_file_to_string(path)
return parse_dyaml(code)
def touch_file(path):
"""Equivalent to the 'touch' command - creates a file if it doesnt exist and if it does updates its date_modified"""
parent = get_parent_folder(path)
make_folder(parent)
from pathlib import Path
Path(path).touch()
return path
# endregion
# region MATLAB Integration: [matlab_session,matlab,matlab_pseudo_terminal]
def matlab_session(matlabroot: str = '/Applications/MATLAB_R2016a.app/bin/matlab',print_matlab_stdout: bool = True): # PLEASE NOTE: this 'matlabroot' was created on my Macbook Pro, and is unlikely to work on your computer unless you specify your own matlab path!
"""
This method is used as an easy-to-use wrapper for creating MATLAB sessions using the pymatbridge module
Worth noting: There's a legit purpose for creating a new matlab session before using it:
Each session you create will be separate and will have a separate namespace!
In other words, you can run them simultaneously/separately. For example:
>>> sess1=matlab_session();sess2=matlab_session();
>>> sess1.run_code("x=1");sess2.run_code("x=1");
>>> sess1.get_variable("x"),sess2.get_variable("x")
ans=(1,2)
Also worth noting: You can use whatever functions you normally use in MATLAB, including .m files that you wrote and kept in your default matlab function/script saving directory.
"""
fansi_print("(A message from Ryan): About to try connecting to MATLAB. Please be a patient, this can take a few seconds! (There is a timeout though, so you won't be kept waiting forever if it fails). Another message will be printed when it's done loading.",None,'bold')
pip_import('pymatbridge')
import pymatbridge # pip3 install pymatbridge (see https://arokem.github.io/python-matlab-bridge/ )
session=pymatbridge.Matlab(executable=matlabroot,maxtime=60) # maxtime=60-->Wait 1 minute to get a connection before timing out. I got this 'matlabroot' parameter by running "matlabroot" ﹙without quotes﹚in my Matlab IDE (and copy/pasting the output)
session.start() # If wait_for_matlab_to_load is true, then this method won't return anything until it'_s made a connection, which will time out if it takes more than max_loading_time_before_giving_up_in_seconds seconds.
assert session.is_connected(),'(A message from Ryan): MATLAB failed to connect! (So we gotta stop here). I made this assertion error to prevent any further confusion if you try to write methods that use me. If I get too annoying, feel free to delete me (the assertion). \n' \
'Troubleshooting: Perhaps the path you specified in the "matlabroot" argument of this method isn\'t really your matlab root? See the comments in this method for further information.'
print_matlab_stdout=[print_matlab_stdout] # Turn the value into a list make it mutable
def handle_matlab_stdout(x: dict):
# x will look something like this: ans = {'result': [], 'success': True, 'content': {'datadir': '/private/tmp/MatlabData/', 'stdout': 'a =\n 5\n', 'figures': []}}
nonlocal print_matlab_stdout
is_error=not x['success'] # Is a boolean.
if print_matlab_stdout[0]:
if is_error:
fansi_print("MATLAB ERROR: ",'red','bold',new_line=False)
fansi_print(x['content']['stdout'],'red' if is_error else'gray')
else:
return x # If we're not printing out the output, we give them ALL the data
def wrapper(code: str = '',**assignments):
assert isinstance(code,str),'The "Code" parameter should always be a string. If you wish to assign values to variables in the MATLAB namespace, use this method\'_s kwargs instead.'
assert len(assignments) == 1 or not assignments,'Either one variable assignment or no variable assignments.'
assert not (code and assignments),'You should either use this method as a way to get values/execute code, XOR to assign variables to non-strings like numpy arrays. NOT both! That could be very confusing to read, and make it difficult for new people to learn how to use this function of the r class. NOTE: This method limits you to a single variable assignment because sessions returns things when you do that, and this wrapper has to return that output. '
# Note that code and va can be used like booleans, because we know that code is a string and we know that va is a dict that has string-based keys (because of the nature of kwargs).
nonlocal session,handle_matlab_stdout
if code:
eval_attempt=session.get_variable(code)
return handle_matlab_stdout(session.run_code(code)) if eval_attempt is None else eval_attempt # If eval_attempt is None, it means MATLAB didn't return a value for the code you gave it (like saying disp('Hello World')), or resulted in an error or something (like saying a=1/0).
if assignments:
for var_name in assignments:
return handle_matlab_stdout(session.set_variable(var_name,assignments[var_name]))
return session # If we receive no arguments, return the raw session (generated by the pymatbridge module).
session.print_matlab_stdout=[print_matlab_stdout] # A list to make it mutable
def enable_stdout(): # Enables the pseudo-matlab to print out, on the python console, what a real matlab would print.
nonlocal print_matlab_stdout
print_matlab_stdout[0]=True
def disable_stdout():
nonlocal print_matlab_stdout
print_matlab_stdout[0]=False
wrapper.disable_stdout=disable_stdout
wrapper.enable_stdout=enable_stdout
wrapper.reboot=lambda *_:[fansi_print("Rebooting this MATLAB session...",None,'bold'),session.stop(),session.start(),fansi_print("...reboot complete!",None,'bold')] and None # wrapper.reboot() in case you accidentally call an infinite loop or something
wrapper.stop=session.stop # I put this here explicitly, so you don't have to hunt around before figuring out that wrapper().stop() does the same thing as (what now is) wrapper.stop()
wrapper.start=session.start # This exists for the same reason that the one above it exists.
return wrapper
_static_matlab_session=matlab_disable_stdout=matlab_enable_stdout=matlab_reboot=matlab_stop=matlab_start=None # Should be None by default. This is the default Matlab session, which is kept in the r module.
# noinspection PyUnresolvedReferences
def _initialize_static_matlab_session():
global _static_matlab_session,matlab_disable_stdout,matlab_enable_stdout,matlab_reboot,matlab_stop,matlab_start
_static_matlab_session=matlab_session()
matlab_disable_stdout=_static_matlab_session.disable_stdout
matlab_enable_stdout=_static_matlab_session.enable_stdout
matlab_reboot=_static_matlab_session.reboot
matlab_stop=_static_matlab_session.stop
matlab_start=_static_matlab_session.start
# noinspection PyUnresolvedReferences
def matlab(*code,**assignments): # Please note: you can create simultaneous MATLAB sessions by using the matlab_session method!
""" This method seriously bends over-back to make using matlab in python more convenient. You don't even have to create a new session when using this method, it takes care of that for you ya lazy bastard! (Talking about myself apparently...) """
global _static_matlab_session,matlab_disable_stdout,matlab_enable_stdout,matlab_reboot,matlab_stop,matlab_start
if _static_matlab_session is None:
fansi_print("r.matlab: Initializing the static matlab session...",None,'bold')
_initialize_static_matlab_session()
return _static_matlab_session(*code,**assignments)
def matlab_pseudo_terminal(pseudo_terminal): # Gives a flavour to a given pseudo_terminal function
# Example usage: matlab_pseudo_terminal(pseudo_terminal)
_initialize_static_matlab_session()
pseudo_terminal("pseudo_terminal() --> Entering interactive MATLAB console! (Running inside of the 'r' module)",lambda x:"matlab('" + x + "')")
# endregion
# region Mini-Terminal: [mini_terminal:str]
# PLEASE READ: This is not meant to be called from the r class.
# Example usage: import r;exec(r.mini_terminal)
# Intended for use everywhere; including inside other functions (places with variables that pseudo_terminal can't reach)
mini_terminal="""#from r import fansi,fansi_print,string_from_clipboard,fansi_syntax_highlighting
_history=[]
fansi_print("Ryan's Mini-Terminal: A miniature pseudo-terminal for running inside functions!",'blue','bold')
fansi_print("\\tValid commands: [PASTE,END,HISTORY]",'blue')
while True:
try:
_header="--> "
_s=input(fansi(_header,'cyan','bold')).replace(_header,"").lstrip()
if not _s:
continue
if _s == "PASTE":
fansi_print("PASTE ⟶ Entering command from clipboard",'blue')
_s=string_from_clipboard
if _s == 'END':
fansi_print("END ⟶ Ending mini-terminal session",'blue')
break
elif _s == 'HISTORY':
fansi_print("HISTORY ⟶ Printing out list of commands you entered that didn't cause errors",'blue')
fansi_print(fansi_syntax_highlighting('\\n'.join(_history)))
else:
try:
_temp=eval(_s)
if _temp is not None:
_ans=_temp
fansi_print('_ans = ' + str(_ans),'green')
_history.append(_s)
except:
try:
exec(_s)
_history.append(_s)
except Exception as _error:
print(fansi("ERROR: ",'red','bold') + fansi(_error,'red'))
except KeyboardInterrupt:
print("Miniterminal: Caught keyboard interrupt (type END to exit)")
"""
# endregion
# region socketWrapper: [socket_writer,socket_reader,socket_read,socket_write,socket_reading_thread,get_my_ip]
default_socket_port=13000
_socket_writers={}# A whole bunch of singletons
def socket_writer(targetIP: str,port: int = None):
if (targetIP,port) in _socket_writers:
return _socket_writers[(targetIP,port)]
from socket import AF_INET,SOCK_DGRAM,socket
# Message Sender
host=targetIP # IP address of target computer. Find yours with print_my_ip
port=port or default_socket_port
addr=(host,port)
UDPSock=socket(AF_INET,SOCK_DGRAM) # UDPSock.close()
def write(asciiData: str):
UDPSock.sendto(str(asciiData).encode("ascii"),addr)
write.targetIP=targetIP# A bit of decorating...
write.port=port# A bit of decorating...
_socket_writers[(targetIP,port)]=write
assert socket_writer(targetIP,port) is write # Should have been added to _socket_writers
return write
def socket_write(targetIP,port,message):
socket_writer(targetIP,port)(message)# Takes advantage of the singleton structure of _socket_writers
_socket_readers={}# A whole bunch of singletons
def socket_reader(port: int = None):# Blocks current thread until it gets a response
if port in _socket_readers:
return _socket_readers[port]
# Message Receiver
from socket import AF_INET,socket,SOCK_DGRAM
host=""
port=port or default_socket_port
buf=1024
addr=(host,port)
UDPSock=socket(AF_INET,SOCK_DGRAM) # UDPSock.close()
UDPSock.bind(addr)
# UDPSock.close()
def read(just_data_if_true_else_tuple_with_data_then_ip_addr:bool=True):
data,addr=UDPSock.recvfrom(buf)
data=data.decode("ascii")
return data if just_data_if_true_else_tuple_with_data_then_ip_addr else (data,addr[0])# addr[0] is a string for ip. addr=tuple(string,int)
read.port=port# A bit of decorating
_socket_readers[port]=read
assert socket_reader(port) is read
return read
def socket_read(port,just_data_if_true_else_tuple_with_data_then_ip_addr:bool=True):
return socket_reader(port)(just_data_if_true_else_tuple_with_data_then_ip_addr) # Takes advantage of the singleton structure of _socket_readers
def socket_reading_thread(handler,port:int=None,just_data_if_true_else_tuple_with_data_then_ip_addr:bool=True):
read=socket_reader(port)
def go():
while True:
handler(read(just_data_if_true_else_tuple_with_data_then_ip_addr=just_data_if_true_else_tuple_with_data_then_ip_addr))
return run_as_new_thread(go)
def get_my_local_ip_address() -> str:
import socket
s=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.connect(("8.8.8.8",80))
try:
return s.getsockname()[0]
finally:
s.close()
get_my_ip=get_my_local_ip_address #Legacy: Some of my old code might depend on this function. It's deprecated because it's a bad name
def get_my_mac_address()->str:
"""
EXAMPLE:
>> get_my_mac_address()
ans = 28:cf:e9:17:d9:a5
"""
if currently_running_linux():
#If we're running linux, this solution works - and we don't have to pip install get-mac
# (pip install get-mac also works, but this saves you the trouble of installing another package)
#Returned as a string
def get_default_iface_name_linux():
#https://stackoverflow.com/questions/20908287/is-there-a-method-to-get-default-network-interface-on-local-using-python3
route = "/proc/net/route"
with open(route) as f:
for line in f.readlines():
try:
iface, dest, _, flags, _, _, _, _, _, _, _, = line.strip().split()
if dest != '00000000' or not int(flags, 16) & 2:
continue
return iface
except Exception:
continue
def getmac(interface):
#https://stackoverflow.com/questions/159137/getting-mac-address
try:
mac = open('/sys/class/net/' + interface + '/address').readline()
except Exception:
mac = "00:00:00:00:00:00"
return mac[0:17]
return getmac(get_default_iface_name_linux())
else:
pip_import('getmac','get-mac')
import getmac
return getmac.get_mac_address()
def get_my_public_ip_address():
assert connected_to_internet(),'Cannot get our public IP address because we are not connected to the internet'
pip_import('requests')
from requests import get
try:
return get('https://icanhazip.com').text.strip()
# return get('https://api.ipify.org').text
except Exception:
return get('http://ipgrab.io').text.strip()
# endregion
# region OSC≣'Open Sound Control' Output [OSC_output]:
default_OSC_port=12345
try:default_OSC_ip=get_my_local_ip_address()
except Exception:pass
_OSC_client=None# This is a singleton
_OSC_values={}
def OSC_output(address,value):
address=str(address)
if not address[0]=='/':
address='/'+address
global default_OSC_ip
default_OSC_ip=default_OSC_ip or get_my_local_ip_address()
from rp.TestOSC import SimpleUDPClient
global _OSC_client
if not _OSC_client:
_OSC_client=SimpleUDPClient(address=default_OSC_ip,port=default_OSC_port)
_OSC_client.send_message(address=address,value=value)
_OSC_values[address]=value# Attempt to keep track of them (though it might sometimes drift out of sync etc idk i haven't tested it as of writing this)
def OSC_jiggle(address):
address=str(address)
if address in _OSC_values:
original_value=_OSC_values[address]
OSC_output(address,1)
sleep(.1)
OSC_output(address,0)
sleep(.1)
if address in _OSC_values:
# noinspection PyUnboundLocalVariable
OSC_output(address,original_value)
# endregion
# Intended for use everywhere; including inside other functions (places with variables that pseudo_terminal can't reach)
mini_terminal_for_pythonista="""
_history=[]
print("Ryan's Mini-Terminal For Pythonista: A microscopic pseudo-terminal for running inside functions; optimized for Pythonista!")
print("\\tValid commands: [PASTE,END,HISTORY]")
while True:
_header=">>> "
_s=input(_header).replace(_header,"").lstrip()
if not _s:
continue
if _s == "PASTE":
import clipboard
print("PASTE: Entering command from clipboard",'blue')
_s=clipboard.get()
if _s == 'END':
print("END: Ending mini-terminal session",'blue')
break
elif _s == 'HISTORY':
print("HISTORY: Printing out list of commands you entered that didn't cause errors",'blue')
print('\\n'.join(_history))
else:
try:
_temp=eval(_s)
if _temp is not None:
_=_temp
print('_ = ' + str(_))
_history.append(_s)
except:
try:
exec(_s)
_history.append(_s)
except BaseException as _error:
print("ERROR: " + str(_error))"""
# endregion
# Other stuff I don't know which category to put in:
def k_means_analysis(data_vectors,k_or_initial_centroids,iterations,tries):
pip_import('scipy')
from scipy.cluster.vq import kmeans,vq
centroids,total_distortion=kmeans(obs=data_vectors,k_or_guess=k_or_initial_centroids,iter=iterations) # [0] returns a list of the centers of the means of each centroid. TRUE. [1] returns the 'distortion' = ∑||𝓍﹣μ(𝓍ʹs cluster)||² = the sum of the squared distances between each point and it's respective cluster's mean
for _ in range(tries - 1):
proposed_centroids,proposed_total_distortion=kmeans(obs=data_vectors,k_or_guess=k_or_initial_centroids,iter=iterations)
if proposed_total_distortion < total_distortion:
total_distortion=proposed_total_distortion
centroids=proposed_centroids
parent_centroid_indexes,parent_centroid_distances=vq(data_vectors,centroids) # ⟵ assign each sample to a cluster
# The rCode Identities section should answer most questions you may have about this def.
# rCode Identities: Let c≣centroids ⋀ i≣parent_centroid_indexes ⋀ d≣parent_centroid_distances …
# … ⋀ v≣data_vectors ⋀ dist(a,b)≣﹙the euclidean distance between vectors a and b﹚ ⋀ k≣k_or_initial_centroids
# ∴ len(v) == len(i) == len(d)
# ∴ ∀ 𝓍 ∈ i, d[𝓍] == dist(v[𝓍],c[𝓍])
# ∴ total_distortion == ∑d²
# ∴ len(c) == k ⨁ len(c) == len(k)
return centroids,total_distortion,parent_centroid_indexes,parent_centroid_distances
def is_iterable(x):
try:
#MOST PROPER WAY:
# from collections.abc import Iterable
# return isinstance(x,Iterable)
#PREVIOUS WAY:
from collections.abc import Iterable
if isinstance(x,Iterable) or hasattr(x,'__iter__') or hasattr(x,'__getitem__'):
return True
#OLDEST WAY:
# for _ in x: pass
# return True
except:
return False
def space_split(x: str) -> list:
""" Please don't use this - it's old and made it before I knew python well. Just use x.split(). """
return list(filter(lambda y:y != '',x.split(" "))) # Splits things by spaces but doesn't allow empty parts
def deepcopy_multiply(iterable,factor: int):
"""
Used for multiplying lists without copying their addresses
"""
out=[]
from copy import deepcopy
for i in range(factor):
out+=deepcopy(iterable)
return out
def assert_equality(*args,equality_check=identity):
"""
When you have a,b,c,d and e and they're all equal and you just can't choose...when the symmetry is just too much symmetry!
PLEASE NOTE: This does not check every combination: it assumes that equality_check is symmetric!
"""
length=len(args)
if length == 0:
return None
base=args[0]
if length == 1:
return base
for arg in args:
base_check=equality_check(base)
arg_check=equality_check(arg)
assert (base_check == arg_check)," assert_equality check failed, because " + str(base_check) + " ≠ " + str(arg_check)
base=arg
return base
def get_nested_value(list_to_be_accessed,*address_int_list,ignore_errors: bool = False):
"""
Needs to be better documented. ignore_errors will simply stop tunneling through the array if it gets an error and return the latest value created.
Also note: this could con
a[b][c][d] ≣ get_nested_value(a,b,c,d)
"""
for i in detuple(address_int_list):
try:
list_to_be_accessed=list_to_be_accessed[i]
except Exception:
if ignore_errors:
break
else:
raise IndexError
return list_to_be_accessed
def get_nested_attr(obj, attr):
"""
Get a nested attribute from an object using dot notation.
Args:
obj: The object to get the attribute from
attr: String with attribute names in dot notation (e.g., "attr1.attr2.attr3")
Any numeric attrs, like attr.1.2 are treated as indexes - like obj.attr[1][2]
Any ..'s are meant for dict accesses, so attr..key.value is treated as obj.attr["key"].value
If an int is passed as attr, such as get_nested_attr(obj, 0), it is equivalent to obj[0]
Likewise, get_nested_attr(obj, ".key") is equivalent to obj["key"]
Returns:
The value of the nested attribute
Raises:
AttributeError: If any attribute in the chain doesn't exist
EXAMPLES:
>>> class Person:
... def __init__(self):
... self.name = "Alice"
... self.skills = ["Python", "SQL"]
... self.metadata = {"status": "active"}
... self.address = type('Address', (), {'city': 'New York'})()
...
>>> person = Person()
>>> get_nested_attr(person, "name") --> 'Alice' # person.name
>>> get_nested_attr(person, "address.city") --> 'New York' # person.address.city
>>> get_nested_attr(person, "skills.0") --> 'Python' # person.skills[0]
>>> get_nested_attr(person, "metadata..status") --> 'active' # person.metadata["status"]
>>> get_nested_attr(["a", "b", "c"], 1) --> 'b' # ["a", "b", "c"][1]
>>> get_nested_attr({"x": 123}, ".x") --> 123 # {"x": 123}["x"]
>>> nested = {"users": [{"profile": {"languages": ["English", "Spanish"]}}]}
>>> get_nested_attr(nested, "users.0..profile..languages.1") --> 'Spanish' # nested["users"][0]["profile"]["languages"][1]
"""
assert isinstance(attr, (str,int)), type(attr)
attr=str(attr) #In case
assert not '...' in attr, 'attr has syntax error - it has ... in it, only . and .. are allowed: '+repr(attr)
attrs = str(attr).split('.')
as_key = False
for name in attrs:
#Handle obj..name as obj["name"]
if not name:
# We had a .. separator
as_key=True
continue
if as_key:
as_key=False
obj = obj[name]
continue
try:
#Normal functionality - object attribute access
obj = getattr(obj, name)
except AttributeError:
#Numeric index access
if name.isnumeric():
obj = obj[int(name)]
return obj
# def shell_command(command: str,as_subprocess=False,return_printed_stuff_as_string: bool = True) -> str or None:
# # region OLD VERSION: had an argument called return_printed_stuff_as_string, which I never really used as False, and run_as_subprocess when True might not return a string anyay. If I recall correctly, I implemented return_printed_stuff_as_string simply because it was sometimes annoying to see the output when using pseudo_terminal
# # def shell_command(command: str,return_printed_stuff_as_string: bool = True,run_as_subprocess=False) -> str or None:
# # if return_printed_stuff_as_string:
# # return (lambda ans:ans[ans.find('\n') + 1:][::-1])(os.popen(command).read()[::-1]) # EX: print(shell_command("pwd")) <-- Gets the current directory
# # from os import system
# # system(command)
# # endregion
# if as_subprocess:
# from subprocess import run
# if return_printed_stuff_as_string:
# stdout=run(command,shell=True).stdout
# if stdout is not None:
# return (lambda ans:ans[ans.find('\n') + 1:][::-1])(stdout[::-1]) # EX: print(shell_command("pwd")) <-- Gets the current directory
# else:
# run(command)
# else:
# if return_printed_stuff_as_string:
# return (lambda ans:ans[ans.find('\n') + 1:][::-1])(os.popen(command).read()[::-1]) # EX: print(shell_command("pwd")) <-- Gets the current directory
# else:
# from os import system
# system(command)
def shell_command(command: str, *, stdin: str = None) -> str:
"""
Execute a shell command and return its output.
Args:
command (str): The shell command to execute
stdin (str): Optional string to pipe into stdin
Returns:
(str) The command's stdout
"""
from subprocess import run
result = run(command, shell=True, capture_output=True, text=True, input=stdin)
output = result.stdout
if output.endswith('\n'):
#Commands like "pwd" have a \n at the end. Annoying!
output = output[:-1]
return output
def get_system_commands(*,use_cache=False):
"""
Retrieve a list of executable commands available in the system's PATH.
The function returns a list of command names that are executable by the os.system().
It has been tested to work on UNIX-like systems (Mac and Linux) and Windows.
If use_cache is True, it might be out of date since the last call! But it will eventually update, usually in under a second
Returns:
A list of strings representing the command names
Example:
get_system_commands() --> ["ls", "pwd", "python3.8", "man", ... ] (on Unix)
["cmd.exe", "notepad.exe", "python.exe", ... ] (on Windows)
"""
if use_cache:
return _get_cached_system_commands()
import os
import subprocess
env_paths = os.environ['PATH']
paths = env_paths.split(os.pathsep)
commands = set()
for path in paths:
if os.path.isdir(path):
entries = os.listdir(path)
for entry in entries:
entry_path = os.path.join(path, entry)
try:
# Check if the entry is a file and it's executable
if os.path.isfile(entry_path):
# On Windows, check if the file has an executable extension
if os.name == 'nt':
filename, ext = os.path.splitext(entry)
if ext.lower() in ['.exe', '.bat', '.cmd']:
commands.add(entry) #EXAMPLE: "ffmpeg.exe"
commands.add(filename) #EXAMPLE: "ffmpeg" Can be executed without extension as well
# On Unix, check if the file has executable permissions
else:
if os.access(entry_path, os.X_OK):
commands.add(entry)
except Exception:
# Permission errors - ignore them
pass
# Turn the set into a list by sorting it in a convenient way to view
commands = sorted(commands)
commands = sorted(commands, key=len)
return commands
_get_sys_commands_cache=set()
def _get_cached_system_commands():
"""
Meant for internal use in pterm! Both kibble and autocomplete.
rp.get_system_commands can take .05 seconds to complete. Do it in a separate thread upon request.
"""
global _get_sys_commands_cache
import rp
def update_sys_commands():
#It doesn't delete anything - that should hopefully prevent any thread collision fuckyness
global _get_sys_commands_cache
_get_sys_commands_cache|=set(rp.get_system_commands())
if not _get_sys_commands_cache:
#If it's empty populate it for the first time
update_sys_commands()
else:
rp.run_as_new_thread(update_sys_commands)
return _get_sys_commands_cache
def _add_system_commands_to_pterm_bash_highlighter():
"""
This function lets us syntax-highlight any system commands in the !<shell stuff> in pterm seen upon boot
It can't update them over time right now, it's a one-time thing
"""
import pygments.lexers.shell as shell
import re
Name=shell.Name
commands=get_system_commands()+['!']
shell.BashLexer.tokens['basic']+=[(r'(^|!|\b)(' + '|'.join(re.escape(x) for x in commands) + r')(?=[\s)\`]|$)', Name.Function),]
_add_system_commands_to_pterm_bash_highlighter()
_system_command_exists_cache = {}
def system_command_exists(command, *, use_cache=False):
"""
Checks if a system command exists; returns True if it does, False otherwise.
Args:
command: The system command to check (string).
use_cache: Whether to use cached results if available (default: True).
Returns:
bool: True if the command exists, False otherwise.
Faster than
>>> command in get_all_system_commands() #Slow
>>> system_command_exists(command) #Fast
Examples:
>>> system_command_exists("nonexistentcommand12345")
False
>>> system_command_exists("ls")
True
>>> system_command_exists("python3")
True
>>> system_command_exists('/opt/homebrew/bin/python3')
True
Benchmark Results (1000 iterations, use_cache=False):
┌──────────────────┬──────────┬──────────┬───────────┬─────────────────┐
│ Method │ Min (ms) │ Max (ms) │ Mean (ms) │ Performance │
├──────────────────┼──────────┼──────────┼───────────┼─────────────────┤
│ shutil.which │ 0.025 │ 0.242 │ 0.038 │ baseline │
│ subprocess.run │ 5.538 │ 19.546 │ 6.057 │ ~200x slower │
│ get_all_commands │ 36.220 │ 59.800 │ 43.680 │ ~1150x slower │
└──────────────────┴──────────┴──────────┴───────────┴─────────────────┘
"""
import shutil
if not isinstance(command, str):
raise TypeError("Command must be a string.")
if use_cache and command in _system_command_exists_cache:
return _system_command_exists_cache[command]
#METHOD 1: Super fast!
exists = shutil.which(command) is not None
##METHOD 2: THIS WORKS, BUT BENCHMARKED OVER 100x SLOWER THAN shutil.which
#try:
# exists = subprocess.run(['which', command], shell=False, check=False, capture_output=True).returncode == 0
#except FileNotFoundError:
# exists = False
##METHOD 3: ORIGINAL, SLOWEST VERSION - DOESN'T HANDLE PATHS LIKE /opt/homebrew/bin/python3
#exists = command in get_system_commands()
_system_command_exists_cache[command] = exists
return exists
def add_to_env_path(path):
"""
Adds a directory to the system's PATH environment variable.
Appends path to $PATH using ':' for Unix, ';' for Windows
If the provided `path` is a file, its parent directory is added to the PATH instead.
Args:
path (str): The file or directory path to add to the PATH environment variable. Must be a string.
Raises:
TypeError: If `path` is not a string.
"""
if not isinstance(path, str):
raise TypeError("Path must be a string but got "+repr(type(path)))
if not path:
return
if os.path.isfile(path):
path = os.path.dirname(path)
current_path = os.environ.get("PATH", "")
if path not in current_path.split(os.pathsep):
if current_path:
os.environ["PATH"] += os.pathsep + path
else:
os.environ["PATH"] = path
def printed(message,value_to_be_returned=None,end='\n'): # For debugging...perhaps this is obsolete now that I have pseudo_terminal though.
print(str(value_to_be_returned if value_to_be_returned is not None else message),end=end)
return value_to_be_returned or message
def blob_coords(image,small_end_radius=10,big_start_radius=50):
#TODO: wtf is this? lollll should I delete it?
# small_end_radius is the 'wholeness' that we look for. Without it we might-as-well pickthe global max pixel we start with, which is kinda junky.
assert big_start_radius >= small_end_radius
if len(image.shape) == 3:
image=tofloat(_rgb_to_grayscale(image))
def global_max(image):
# Finds max-valued coordinates. Randomly chooses if multiple equal maximums. Assumes image is SINGLE CHANNEL!!
assert isinstance(image,np.ndarray)
assert len(image.shape) == 2 # SHOULD BE SINGLE CHANNEL!!
return random_element(np.transpose(np.where(image == image.max()))).tolist()
def get(x,y):
try:
return image[x,y]
except IndexError:
return 0
def local_max(image,x0,y0):
# Gradient ascent pixel-wise. Assumes image is SINGLE CHANNEL!!
assert isinstance(image,np.ndarray)
assert len(image.shape) == 2 # SHOULD BE SINGLE CHANNEL!!
def get(x,y):
try:
return image[x,y]
except IndexError:
return 0
def step(x,y): # A single gradient ascent step
best_val=0 # We're aiming to maximize this
best_x=x
best_y=y
for Δx in [-1,0,1]:
for Δy in [-1,0,1]:
if get(x + Δx,y + Δy) > best_val:
best_val=get(x + Δx,y + Δy)
best_x,best_y=x + Δx,y + Δy
return best_x,best_y
while step(x0,y0) != (x0,y0):
x0,y0=step(x0,y0)
return x0,y0
# image is now a single channel.
def blurred(radius):
return gauss_blur(image,radius,single_channel=True) # ,mode='constant')
x,y=global_max(blurred(big_start_radius))
for r in reversed(range(small_end_radius,big_start_radius)):
x,y=local_max(blurred(r + 1),x,y)
return x,y
def tofloat(ndarray):
"""
Things like np.int16 or np.int64 will all be scaled down by their max values; resulting in
elements that in sound files would be floats ∈ [-1,1] and in images [0,255] ⟶ [0-1]
"""
return np.ndarray.astype(ndarray,float) / np.iinfo(ndarray.dtype).max
def get_plt():
pip_import('matplotlib')
global plt
import matplotlib.pyplot as plt
locals()['plt']=plt
return plt
def display_dot(x,y=None,color='red',size=3,shape='o',block=False):
"""
Used to be called 'dot', in-case any of my old code breaks...
EXAMPLE: for theta in np.linspace(0,tau): display_dot(np.sin(theta),np.cos(theta));sleep(.1)
"""
if y is None:
x,y=as_points_array([x])[0]
plt=get_plt()
plt.plot([x],[y],marker=shape,markersize=size,color=color)
display_update(block=block)
def display_path(path,*,color=None,alpha=1,marker=None,linestyle=None,block=False,**kwargs):
"""
Displays a 'path' aka a series of 2d vectors
If color is None, will plot as a different color every time
"""
x, y = as_points_array(path).T #Get the x, y values of the path as two lists
import matplotlib.pyplot as plt
plt.plot(x, y,color=color,alpha=alpha,marker=marker,linestyle=linestyle,**kwargs)
update_display(block)
def _translate_offline(text,to_language='ru'):
"""
This method was made private because right now it only supports russian and nearby countries lol...this function is currently too niche to be exposed as a general translation function...
ka Georgian
sr Serbian
mn Mongolian
el Greek
bg Bulgarian
mk Macedonian
ru Russian
hy Armenian
l1 Latin1Supplement
uk Ukrainia
TODO: Refine this
This runs much faster than google...but I can't vouch for its quality
Correction: this runs INSANELY fast - translating every line in RP to russian in just .6 seconds!
"""
pip_import('transliterate')
from transliterate import translit, get_available_language_codes
return translit(text,to_language)
def translate(to_translate,to_language="en",from_language="auto"):
# I DID NOT WRITE THIS!! I GOT IT FROM https://github.com/mouuff/mtranslate/blob/master/mtranslate/core.py
"""Returns the translation using google translate
you must shortcut the language you define
(French = fr, English = en, Spanish = es, etc...)
if not defined it will detect it or use english by default
Example:
print(translate("salut tu vas bien?", "en"))
hello you alright?
"""
LANGUAGES={
'af' :'Afrikaans',
'sq' :'Albanian',
'ar' :'Arabic',
'hy' :'Armenian',
'bn' :'Bengali',
'ca' :'Catalan',
'zh' :'Chinese',
'zh-cn' :'Chinese (Mandarin/China)',
'zh-tw' :'Chinese (Mandarin/Taiwan)',
'zh-yue':'Chinese (Cantonese)',
'hr' :'Croatian',
'cs' :'Czech',
'da' :'Danish',
'nl' :'Dutch',
'en' :'English',
'en-au' :'English (Australia)',
'en-uk' :'English (United Kingdom)',
'en-us' :'English (United States)',
'eo' :'Esperanto',
'fi' :'Finnish',
'fr' :'French',
'de' :'German',
'el' :'Greek',
'hi' :'Hindi',
'hu' :'Hungarian',
'is' :'Icelandic',
'id' :'Indonesian',
'it' :'Italian',
'ja' :'Japanese',
'ko' :'Korean',
'la' :'Latin',
'lv' :'Latvian',
'mk' :'Macedonian',
'no' :'Norwegian',
'pl' :'Polish',
'pt' :'Portuguese',
'pt-br' :'Portuguese (Brazil)',
'ro' :'Romanian',
'ru' :'Russian',
'sr' :'Serbian',
'sk' :'Slovak',
'es' :'Spanish',
'es-es' :'Spanish (Spain)',
'es-us' :'Spanish (United States)',
'sw' :'Swahili',
'sv' :'Swedish',
'ta' :'Tamil',
'th' :'Thai',
'tr' :'Turkish',
'vi' :'Vietnamese',
'cy' :'Welsh'
}
from_language=from_language.lower()
to_language=to_language.lower()
assert from_language in set(LANGUAGES)|{'auto'}
assert to_language in set(LANGUAGES)|{'auto'}
def translate(text,dest='en',src='auto'):
pip_import('googletrans','googletrans==4.0.0-rc1')#https://stackoverflow.com/questions/52455774/googletrans-stopped-working-with-error-nonetype-object-has-no-attribute-group
import googletrans
return googletrans.Translator().translate(text,dest,src).text
return translate(to_translate,to_language,from_language)
#OLD VERSION (NO LONGER WORKS)
# LANGUAGES['auto']='(automatic)'
# valid_languages=set(LANGUAGES)
# is_valid=lambda x:x in valid_languages
# assert is_valid(to_language) and is_valid(from_language),'Invalid language! Cannot translate. Valid languages: \n'+strip_ansi_escapes(indentify(display_dict(LANGUAGES,print_it=False,arrow=' --> ')))
# import sys
# import re
# if sys.version_info[0] < 3:
# # noinspection PyUnresolvedReferences
# import urllib2
# import urllib
# # noinspection PyUnresolvedReferences
# import HTMLParser
# else:
# import html.parser
# import urllib.request
# import urllib.parse
# agent={'User-Agent':
# "Mozilla/4.0 (\
# compatible;\
# MSIE 6.0;\
# Windows NT 5.1;\
# SV1;\
# .NET CLR 1.1.4322;\
# .NET CLR 2.0.50727;\
# .NET CLR 3.0.04506.30\
# )"}
# def unescape(text):
# if sys.version_info[0] < 3:
# parser=HTMLParser.HTMLParser()
# else:
# parser=html.parser.HTMLParser()
# try:
# # noinspection PyDeprecation
# return parser.unescape(text)
# except:
# return html.unescape(text)
# base_link="http://translate.google.com/m?hl=%s&sl=%s&q=%s"
# if sys.version_info[0] < 3:
# # noinspection PyUnresolvedReferences
# to_translate=urllib.quote_plus(to_translate)
# link=base_link % (to_language,from_language,to_translate)
# request=urllib2.Request(link,headers=agent)
# raw_data=urllib2.urlopen(request).read()
# else:
# to_translate=urllib.parse.quote(to_translate)
# link=base_link % (to_language,from_language,to_translate)
# request=urllib.request.Request(link,headers=agent)
# raw_data=urllib.request.urlopen(request).read()
# data=raw_data.decode("utf-8")
# expr=r'class="t0">(.*?)<'
# re_result=re.findall(expr,data)
# if len(re_result) == 0:
# result=""
# else:
# result=unescape(re_result[0])
# return result
def sync_sorted(*lists_in_descending_sorting_priority, key=None, reversed=False):
"""
Sorts the first list and reorders all other lists to have the same order as the sorted first list.
Parameters:
*lists_in_descending_sorting_priority: One or more lists to be sorted.
The first list is the main list based on which other lists will be reordered.
key (function or list of functions, optional): A single key function or a list of key functions.
A key of None signifies the identity function - aka no key is applied.
If there's a tie in the first list, subsequent key functions can break the tie.
If 'None' is used in the list, the identity function will be used for that list.
Defaults to None, aka the identity function (lambda x: x).
reversed (bool, optional): If set to True, sorts the lists in descending order. Defaults to False.
Returns:
tuple: A tuple of lists sorted and reordered in sync with the first list.
Examples:
#TODO: Make better examples
>>> # Basic example with ties in the first list
>>> sync_sorted([1, 1, 2], ['c', 'a', 'b'])
([1, 1, 2], ['a', 'c', 'b'])
>>> # Sorting in descending order
>>> sync_sorted([1, 1, 2], ['c', 'a', 'b'], reversed=True)
([2, 1, 1], ['b', 'a', 'c'])
>>> # Using a list of key functions, with 'None' to denote identity function
>>> sync_sorted([1, 1, 2], ['c', 'a', 'b'], [3, 2, 1], key=[None, str, None])
([1, 1, 2], ['a', 'c', 'b'], [2, 3, 1])
>>> # Handling empty lists
>>> sync_sorted([], [], [])
([], [], [])
>>> # Using 'reversed' parameter with multiple keys
>>> sync_sorted([1, 1, 2], ['c', 'a', 'b'], [3, 1, 1], key=[None, str, None], reversed=True)
([2, 1, 1], ['b', 'c', 'a'], [1, 3, 1])
Notes:
This used to be implemented as a one-liner, but it wasn't as readable and didn't handle they key as well.
Old implementation:
def sync_sorted(*lists_in_descending_sorting_priority,key=identity):
# Sorts main_list and reorders all *lists_in_descending_sorting_priority the same way, in sync with main_list
return tuple(zip(*sorted(zip(*lists_in_descending_sorting_priority),key=lambda x:tuple(map(key,x)))))
It was refactored with GPT4: https://chat.openai.com/share/a8975a0c-3199-42f0-b4ad-ef9232ab6ef1
"""
# Input assertions
assert key is None or callable(key) or is_iterable(key) and all(callable(x) or x is None for x in key), 'The given key must be None, a key function, or a list of keys'
assert len(set(map(len,lists_in_descending_sorting_priority))), 'All lists must have the same length'
# Determine whether key is a single function or a list of functions
if key is None or callable(key):
keys = [key]
else:
keys = key
# Combine lists element-wise into a list of tuples
combined_lists = zip(*lists_in_descending_sorting_priority)
# Sort the combined list of tuples based on the keys
def sorting_key(x):
for i in range(len(x)):
if i < len(keys) and keys[i] is not None:
yield keys[i](x[i])
else:
yield x[i]
sorted_combined_lists = sorted(
combined_lists, key=lambda x: tuple(sorting_key(x)), reverse=reversed
)
# Unpack the sorted tuples back into separate lists
sorted_separate_lists = zip(*sorted_combined_lists)
# Convert sorted lists from tuples to lists and return them as a tuple of lists
return tuple(list(sorted_list) for sorted_list in sorted_separate_lists)
sync_sort=sync_sorted#For backwards compatiability
def by_number(x):
"""
Used as a key for sorting
Example: paths=sorted(paths, key=by_number)
"""
return (len(x), x)
def sorted_by_number(x, *, reverse=False):
return sorted(x, key=by_number, reverse=reverse)
def sorted_by_len(x, *, reverse=False):
return sorted(x, key=len, reverse=reverse)
def sorted_by_attr(x, attr, *, key=None, reverse=False):
def new_key(e):
e = get_nested_attr(e, attr)
if key is not None:
e = key(e)
return e
return sorted(x, key=new_key, reverse=reverse)
# def sync_sorted(*lists_in_descending_sorting_priority,key=identity):
# # Sorts main_list and reorders all *lists_in_descending_sorting_priority the same way, in sync with main_list
# return tuple(zip(*sorted(zip(*lists_in_descending_sorting_priority),key=lambda x:tuple(map(key,x)))))
def _string_with_any(string, substrings, match_func, return_match=False):
"Helper function that checks if string matches any of the substrings using the given match_func."
substrings = detuple(substrings)
if isinstance(substrings, str):
substrings = [substrings]
for substring in substrings:
if match_func(string, substring):
if return_match:
return substring
else:
return True
return None if return_match else False
def starts_with_any(string, *prefixes, return_match=False):
"Returns True if begins with any of the prefixes. If return_match, it returns that prefix if it exists - else None."
return _string_with_any(string, prefixes, str.startswith, return_match)
def ends_with_any(string, *suffixes, return_match=False):
"Returns True if ends with any of the suffixes. If return_match, it returns that suffix if it exists - else None."
return _string_with_any(string, suffixes, str.endswith, return_match)
def _contains_func_y(y):
#Used in contains_any, contains_all, in_any, in_all
y=detuple(y)
if not hasattr(y,'__contains__') or type(y) in [str, bytes]:
#Without this, contains_any('abc','axyz')==True
#Because it would iterate through the letters
y=[y]
return y
def contains_any(x,*y):
"""
Returns True if x contains any of y.
TODO: Add a return_match=False optional arg, like in starts_with_any and ends_with_any
EXAMPLES:
assert contains_any('texture','tex') == True
assert contains_any('tex','texture') == False
assert contains_any('texture',['tex']) == True
assert contains_any('texture','abc') == False
assert contains_any('texture','abc','tex') == True
assert contains_any('texture',['abc','tex']) == True
assert contains_any([1,2,3,4],1) == True
assert contains_any([1,2,3,4],2) == True
assert contains_any([1,2,3,4],5) == False
assert contains_any([1,2,3,4],5,6) == False
assert contains_any([1,2,3,4],5,6,1) == True
assert contains_any([1,2,3,4],5,6,1,2) == True
assert contains_any([1,2,3,4],[5,6,1,2]) == True
assert contains_any([1,2,3,4],[1,2]) == True
assert contains_any([1,2,3,4],[5,6]) == False
"""
assert hasattr(x,'__contains__'),'x cannot contain anything. type(x)=='+repr(type(x))
y=_contains_func_y(y)
return any(z in x for z in y)
def contains_all(x,*y):
"""
Returns True if x contains all of y.
EXAMPLES:
assert contains_all('texture','t', 'e', 'x') == True
assert contains_all('texture','z') == False
assert contains_all('texture',['t', 'e', 'x']) == True
assert contains_all([1,2,3,4],1, 2) == True
assert contains_all([1,2,3,4],1, 5) == False
assert contains_all([1,2,3,4],[1,2]) == True
assert contains_all([1,2,3,4],[5,6]) == False
"""
assert hasattr(x,'__contains__'),'x cannot contain anything. type(x)=='+repr(type(x))
y=_contains_func_y(y)
return all(z in x for z in y)
def in_any(x,*y):
"""
Returns True if x is in any of y.
TODO: Add a return_match=False optional arg, like in starts_with_any and ends_with_any
EXAMPLES:
assert in_any('tex','texture', 'textbook') == True
assert in_any('abc','texture', 'textbook') == False
assert in_any(1,[1,2,3], [2,3,4]) == True
assert in_any(5,[1,2,3], [2,3,4]) == False
"""
y=_contains_func_y(y)
assert all(hasattr(z,'__contains__') for z in y), 'Not all y can contain things: '+str(set(map(type,y)))
return any(x in z for z in y)
def in_all(x,*y):
"""
Returns True if x is in all of y.
EXAMPLES:
assert in_all('tex','texture', 'textbook') == False
assert in_all('t','texture', 'textbook') == True
assert in_all(1,[1,2,3], [1,3,4]) == True
assert in_all(5,[1,2,3], [2,3,4]) == False
assert in_all(5,[5,1,2,3], [2,3,4]) == False
assert in_all(5,[5,1,2,3], [5,2,3,4]) == True
"""
y=_contains_func_y(y)
assert all(hasattr(z,'__contains__') for z in y), 'Not all y can contain things: '+str(set(map(type,y)))
return all(x in z for z in y)
def contains_sort(array, *, key=lambda x: x, contains=lambda x, y: y in x, reverse=False):
"""
Sorts a list of strings such that for every pair of indices i, j (i<=j),
if S[i] is a substring of S[j], then S[i] comes before S[j] in the sorted list.
If neither string is a substring of the other, the function falls back to
lexicographic comparison.
Parameters:
S (list of str): List of strings to sort
Returns:
list of str: Sorted list of strings
Example:
>>> contains_sort(["abc", "ab", "abcd"])
['ab', 'abc', 'abcd']
>>> contains_sort(["123", "23", "12"])
['12', '123', '23']
>>> contains_sort(["rat", "cat", "animal", "bat"])
['rat', 'cat', 'bat', 'animal']
>>> contains_sort(["abc", "aabc", "aaabc"])
['abc', 'aabc', 'aaabc']
"""
import functools
def cmp(a, b):
if contains(b, a):
return -1
elif contains(a, b):
return 1
else:
ka = key(a)
kb = key(b)
return (ka > kb) - (ka < kb)
return sorted(array, key=functools.cmp_to_key(cmp),reverse=reverse)
contains_sorted=contains_sort
def sync_shuffled(*lists):
"""
Shuffles lists in sync with one another
EXAMPLE:
>>> sync_shuffled([1,2,3,4,5],'abcde')
ans = [(1, 3, 5, 2, 4), ('a', 'c', 'e', 'b', 'd')]
"""
lists=detuple(lists)
return list(zip(*shuffled(list(zip(*lists)))))
# noinspection PyAugmentAssignment
def full_range(x,min=0,max=1):
try:
if x.dtype==bool:
x=x.astype(float)
except AttributeError:
pass
try:
x=x - np.min(x)
x=x / np.max(x) # Augmented Assignment, AKA x-= or x/= causes numpy errors. I don't know why I wonder if its a bug in numpy.
x=x * (max - min)
x=x + min
return x
except Exception:
# Works with pytorch, numpy, etc
x=x - x.min()
x=x / x.max() # Augmented Assignment, AKA x-= or x/= causes numpy errors. I don't know why I wonder if its a bug in numpy.
x=x * (max - min)
x=x + min
return x
# region Math constants (based on numpy)
π=pi=3.14159265358979323846264338327950288419716939937510582097494459230781640628620899862
τ=tau=2 * π
# endregion
# region Tone Generators
# Note: All Tone Sample Generators have an amplitude of [-1,1]
def sine_tone_sampler(ƒ=None,T=None,samplerate=None):
T=T or default_tone_seconds
samplerate=samplerate or default_samplerate
ƒ=ƒ or default_tone_frequency
ↈλ=ƒ * T # ≣number of wavelengths
return np.sin(np.linspace(0,τ * ↈλ,int(T * (samplerate or default_samplerate))))
def triangle_tone_sampler(ƒ=None,T=None,samplerate=None):
return 2 / π * np.arcsin(sine_tone_sampler(ƒ,T,samplerate))
def sawtooth_tone_sampler(ƒ=None,T=None,samplerate=None):
T=T or default_tone_seconds
samplerate=samplerate or default_samplerate
ƒ=ƒ or default_tone_frequency
ↈλ=ƒ * T # ≣number of wavelengths
return (np.linspace(0,ↈλ,int(T * (samplerate or default_samplerate))) % 1) * 2 - 1
def square_tone_sampler(ƒ=None,T=None,samplerate=None):
return np.sign(sawtooth_tone_sampler(ƒ,T,samplerate))
default_tone_frequency=440 # also known as note A4
default_tone_sampler=sine_tone_sampler
default_tone_seconds=1
def play_tone(hz=None,seconds=None,samplerate=None,tone_sampler=None,blocking=False): # Plays a sine tone
ƒ,T=hz or default_tone_frequency,seconds or default_tone_seconds # Frequency, Time
play_sound_from_samples((tone_sampler or default_tone_sampler)(ƒ,T),samplerate or default_samplerate,blocking=blocking)
def play_semitone(ↈ_semitones_from_A4_aka_440hz=0,seconds=None,samplerate=None,tone_sampler=None,blocking=False):
ↈ=ↈ_semitones_from_A4_aka_440hz
play_tone(semitone_to_hz(ↈ),seconds,samplerate,tone_sampler,blocking)
def semitone_to_hz(ↈ):
return 440 * 2 ** (ↈ / 12)
def play_chord(*semitones:list,t=1,block=True,sampler=triangle_tone_sampler):
play_sound_from_samples(full_range(min=-1,x=sum(sampler(semitone_to_hz(x),T=t)for x in semitones)),blocking=block)
# endregion
def mini_editor(out: str = "",namespace=(),message=""):
"""
Has syntax highlighting. Creates a curses pocket-universe where you can edit text, and then press fn+enter to enter the results. It's like like a normal input() except multiline and editable.
message=message or "Enter text here and then press fn+enter to exit. Supported controls: Arrow keys, backspace, delete, tab, shift+tab, enter"
Please note: You must be using a REAL terminal to run this! Just using pycharm's "run" is not sufficient. Using apple's terminal app, for example, IS however.
"""
import curses
stdscr=curses.initscr()
# region Initialize curses colors:
curses.start_color()
curses.use_default_colors()
curses.init_pair(0,curses.COLOR_BLACK,curses.COLOR_BLACK)
black=curses.color_pair(0)
curses.init_pair(1,curses.COLOR_RED,curses.COLOR_BLACK)
red=curses.color_pair(1)
curses.init_pair(2,curses.COLOR_GREEN,curses.COLOR_BLACK)
green=curses.color_pair(2)
curses.init_pair(3,curses.COLOR_YELLOW,curses.COLOR_BLACK)
yellow=curses.color_pair(3)
curses.init_pair(4,curses.COLOR_BLUE,curses.COLOR_BLACK)
blue=curses.color_pair(4)
curses.init_pair(5,curses.COLOR_CYAN,curses.COLOR_BLACK)
cyan=curses.color_pair(5)
curses.init_pair(6,curses.COLOR_MAGENTA,curses.COLOR_BLACK)
magenta=curses.color_pair(6)
curses.init_pair(7,curses.COLOR_WHITE,curses.COLOR_BLACK)
gray=curses.color_pair(7)
# endregion
def main(stdscr):
print(message,end='',flush=True)
# region http://colinmorris.github.io/blog/word-wrap-in-pythons-curses-library
class WindowFullException(Exception):
pass
def addstr_wordwrap(window,s,mode=0):
""" (cursesWindow, str, int, int) -> None
Add a string to a curses window with given dimensions. If mode is given
(e.g. curses.A_BOLD), then format text accordingly. We do very
rudimentary wrapping on word boundaries.
Raise WindowFullException if we run out of room.
"""
# TODO Is there really no way to get the dimensions of a window programmatically?
# passing in height and width feels ugly.
height,width=window.getmaxyx()
height-=1
width-=1
(y,x)=window.getyx() # Coords of cursor
# If the whole string fits on the current line, just add it all at once
if len(s) + x <= width:
window.addstr(s,mode)
# Otherwise, split on word boundaries and write each token individually
else:
for word in words_and_spaces(s):
if len(word) + x <= width:
window.addstr(word,mode)
else:
if y == height - 1:
# Can't go down another line
raise WindowFullException()
window.addstr(y + 1,0,word,mode)
(y,x)=window.getyx()
def words_and_spaces(s):
import itertools
"""
>>> words_and_spaces('spam eggs ham')
['spam', ' ', 'eggs', ' ', 'ham']
"""
# Inspired by http://stackoverflow.com/a/8769863/262271
return list(itertools.chain.from_iterable(zip(s.split(),itertools.repeat(' '))))[:-1] # Drop the last space
# endregion
nonlocal out
cursor_shift=0
while True:
# region Keyboard input:
stdscr.nodelay(1) # do not wait for input when calling getch
c=stdscr.getch() # get keyboard input
typing=False
updown=None
if c != -1: # getch() returns -1 if none available
# text_to_speech(c)
if chr(c) in "": # ⟵ Up/Down/Left/Right arrow keys (Up/Down ≣ Scroll up down) are not currently implemented. I don't know how.
pass
elif c == ord("Ą"): # left arrow key
cursor_shift+=1
cursor_shift=min(len(out),cursor_shift)
elif c == ord("ą"): # right arrow key
cursor_shift-=1
cursor_shift=max(0,cursor_shift)
elif c == ord("ă"): # up arrow key
updown='up'
elif c == ord("Ă"): # down arrow key
updown='down'
elif c == ord('ŗ') == 343: # fn+enter was pressed# c==10:# Enter key was pressed
return out
else:
typing=True
# out+=chr(c)
# out_lines=out.split("\n")
# cursor_y=len(out_lines)-1
# while cursor_x<0:
# cursor_x+=len(out_lines[cursor_y])
# cursor_y-=1
out_lines=out.split("\n")
cursor_y=0
cursor_x=len(out) - cursor_shift
assert cursor_x >= 0
if updown:
if updown == 'up':
i0=out[:cursor_x].rfind("\n")
i1=out[:i0].rfind("\n")
cursor_x=min(len(out) - 1,max(0,min(cursor_x - i0,i0 - i1) + i1))
cursor_shift=len(out) - cursor_x
else:
assert updown == 'down'
i0=out[:cursor_x].rfind("\n")
i1=out.find("\n",i0 + 1)
cursor_x=min(len(out) - 1,max(0,min(cursor_x - i0,i1 - i0) + i1))
cursor_shift=len(out) - cursor_x
elif typing:
if c == 127: # Backspace key was pressed
if cursor_x:
out=out[:cursor_x - 1] + out[cursor_x:]
elif c == ord("Ŋ"): # Delete key was pressed
if cursor_x < len(out):
out=out[:cursor_x] + out[cursor_x + 1:]
cursor_shift-=1
cursor_x+=1
elif c == ord('\t'): # tab
out=out[:cursor_x] + " " + out[cursor_x:] # 4 spaces per tab
elif c == ord('š'): # shift+tab
if cursor_x:
out=out[:max(0,cursor_x - 4)] + out[cursor_x:] # 4 backspaces
else:
out=out[:cursor_x] + chr(c) + out[cursor_x:]
for i in range(len(out_lines) - 1):
out_lines[i]+="\n" # So that ∑out_lines = out
while cursor_x > len(out_lines[cursor_y]):
cursor_x-=len(out_lines[cursor_y])
cursor_y+=1
try:
if out[len(out) - cursor_shift - 1] == "\n": # c_x+1?
cursor_x=0
cursor_y+=1
except:
pass
# endregion
# region Real-time display:
stdscr.erase()
stdscr.move(0,0) # return curser to start position to re-print everything
height,width=stdscr.getmaxyx()
height-=1
width-=1
def print_fansi_colors_in_curses(stdscr,s: str): # Only supports text colors; DOES NOT support anything else at the moment. Assumes we are given a fansi sequence.
text_color=None
while True: # Until string is empty.
if s.startswith("\x1b["):
while s.startswith("["): # Oddly without this I got -------...... ⭆ ^[[0;33m-^[[0;33m-^[[0;33m-^[[0;33m-^[[0;33m-^[.......
s=s[1:]
i=s.find('m') # there should always be a m somewhere, print(repr(fansi_print("h",'red','bold'))) for example.
ss=s[:i].split(';')
s=s[i + 1:] # +1 to take care of the m which is gone now
if '30' in ss: # black
text_color=black
elif '31' in ss: # red
text_color=red
elif '32' in ss: # green
text_color=green
elif '33' in ss: # yellow
text_color=yellow
elif '34' in ss: # blue
text_color=blue
elif '35' in ss: # magenta
text_color=magenta
elif '36' in ss: # cyan
text_color=cyan
elif '37' in ss: # gray
text_color=gray
else: # if'0'in ss:# clear style
text_color=None
if not s:
break # avoid trying to access indexes in an empty string
if text_color is not None:
# stdscr.addstr(s[0],text_color)
addstr_wordwrap(stdscr,s[0],text_color)
else:
# stdscr.addstr(s[0])
addstr_wordwrap(stdscr,s[0])
s=s[1:]
print_fansi_colors_in_curses(stdscr,fansi_syntax_highlighting(out,namespace))
assert isinstance(out,str)
while cursor_x > width:
cursor_y+=1
cursor_x-=width
cursor_y=min(height,cursor_y)
stdscr.move(cursor_y,cursor_x)
stdscr.refresh()
# endregion
curses.wrapper(main)
return out
def get_terminal_size():
"""
From http://stackoverflow.com/questions/566746/how-to-get-linux-console-window-width-in-python/14422538#14422538
Adapted for windows via GPT4: https://chat.openai.com/share/976384cd-69fe-46fa-bc75-9f1ed97c51ef
Returns a (width:int, height:int) tuple
"""
import os
import platform
current_os = platform.system()
if current_os == 'Windows':
# For Windows
try:
import struct
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
# Default value if above method fails
return 80, 25
else:
# For Linux
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
# Default size or try environment variables
cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
def get_terminal_width():
""" Attempts to return the width of the current TTY in characters - otherwise it will return 80 by default """
return get_terminal_size()[0]
def get_terminal_height():
""" Attempts to return the height of the current TTY in characters - otherwise it will return 25 by default """
return get_terminal_size()[1]
def is_namespaceable(c: str) -> bool:
""" Returns True if the given string can be used as a python variable name """
return str.isidentifier(c) or c==''#Maintaining original functionality but doing it much much faster
import re
try:
c+=random_permutation("ABCDEFGHIJKLMNOPQRSTUVWXYZ") # Just in case this overrides some other variable somehow (I don't know how it would do that but just in case)
exec(c + "=None")
exec("del " + c)
return True
except Exception:
return False
def is_literal(c: str) -> bool:
""" If character can be used as the first of a python variable's name """
return c==":" or (is_namespaceable(c) or c.isalnum())and not c.lstrip().rstrip() in ['False','def','if','raise','None','del','import','return','True','elif','in','try','and','else','is','while','as','except','lambda','with','assert','finally','nonlocal','yield','break','for','not','class','from','or','continue','global','pass']
def clip_string_width(x: str,max_width=None,max_wraps_per_line=1,clipped_suffix='…'):
""" clip to terminal size. works with multi lines at once. """
max_width=(max_width or get_terminal_width()) * max_wraps_per_line
return '\n'.join((y[:max_width - len(clipped_suffix)] + clipped_suffix) if len(y) > max_width else y for y in x.split('\n'))
def properties_to_xml(src_path,target_path): # Found this during my 219 hw4 assignment when trying to quickly convert a .properties file to an xml file to get more credit
"""
SOURCE: https://www.mkyong.com/java/how-to-store-properties-into-xml-file/
Their code was broken so I had to fix it. It works now.
"""
src=open(src_path)
target=open(target_path,'w')
target.write('<?xml version="1.0" encoding="utf-8" standalone="no"?>\n')
target.write('<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">\n')
target.write('<properties>\n')
for line in src.readlines():
word=line.split('=')
key=word[0]
message='='.join(word[1:]).strip() # .decode('unicode-escape')
# message=unicode('='.join(word[1:]).strip(),'unicode-escape')
target.write('\t<entry key="' + key + '"><![CDATA[' + message.encode('utf8').decode() + ']]></entry>\n')
target.write('</properties>')
target.close()
def split_including_delimiters(input: str, delimiter: str):
"""
Splits an input string, while including the delimiters in the output
Unlike str.split, we can use an empty string as a delimiter
Unlike str.split, the output will not have any extra empty strings
Conequently, len(''.split(delimiter))== 0 for all delimiters,
whereas len(input.split(delimiter))>0 for all inputs and delimiters
INPUTS:
input: Can be any string
delimiter: Can be any string
EXAMPLES:
>>> split_and_keep_delimiter('Hello World ! ',' ')
ans = ['Hello ', 'World ', ' ', '! ', ' ']
>>> split_and_keep_delimiter("Hello**World**!***", "**")
ans = ['Hello', '**', 'World', '**', '!', '**', '*']
EXAMPLES:
assert split_and_keep_delimiter('-xx-xx-','xx') == ['-', 'xx', '-', 'xx', '-'] # length 5
assert split_and_keep_delimiter('xx-xx-' ,'xx') == ['xx', '-', 'xx', '-'] # length 4
assert split_and_keep_delimiter('-xx-xx' ,'xx') == ['-', 'xx', '-', 'xx'] # length 4
assert split_and_keep_delimiter('xx-xx' ,'xx') == ['xx', '-', 'xx'] # length 3
assert split_and_keep_delimiter('xxxx' ,'xx') == ['xx', 'xx'] # length 2
assert split_and_keep_delimiter('xxx' ,'xx') == ['xx', 'x'] # length 2
assert split_and_keep_delimiter('x' ,'xx') == ['x'] # length 1
assert split_and_keep_delimiter('' ,'xx') == [] # length 0
assert split_and_keep_delimiter('aaa' ,'xx') == ['aaa'] # length 1
assert split_and_keep_delimiter('aa' ,'xx') == ['aa'] # length 1
assert split_and_keep_delimiter('a' ,'xx') == ['a'] # length 1
assert split_and_keep_delimiter('' ,'' ) == [] # length 0
assert split_and_keep_delimiter('a' ,'' ) == ['a'] # length 1
assert split_and_keep_delimiter('aa' ,'' ) == ['a', '', 'a'] # length 3
assert split_and_keep_delimiter('aaa' ,'' ) == ['a', '', 'a', '', 'a'] # length 5
"""
# I made this question an answer at https://stackoverflow.com/questions/2136556/in-python-how-do-i-split-a-string-and-keep-the-separators/73562313#73562313
# Input assertions
assert isinstance(input,str), "input must be a string"
assert isinstance(delimiter,str), "delimiter must be a string"
if delimiter:
# These tokens do not include the delimiter, but are computed quickly
tokens = input.split(delimiter)
else:
# Edge case: if the delimiter is the empty string, split between the characters
tokens = list(input)
# The following assertions are always true for any string input and delimiter
# For speed's sake, we disable this assertion
# assert delimiter.join(tokens) == input
output = tokens[:1]
for token in tokens[1:]:
output.append(delimiter)
if token:
output.append(token)
# Don't let the first element be an empty string
if output[:1]==['']:
del output[0]
# The only case where we should have an empty string in the output is if it is our delimiter
# For speed's sake, we disable this assertion
# assert delimiter=='' or '' not in output
# The resulting strings should be combinable back into the original string
# For speed's sake, we disable this assertion
# assert ''.join(output) == input
return output
def split_letters_from_digits(s: str) -> list:
"""
Splits letters from numbers into a list from a string.
EXAMPLE: "ads325asd234" -> ['ads', '325', 'asd', '234']
SOURCE: http://stackoverflow.com/questions/28290492/python-splitting-numbers-and-letters-into-sub-strings-with-regular-expression
"""
import re
return re.findall(r'[A-Za-z]+|\d+',s)
def split_camel_case(s: str) -> list:
""" Split camel case names into lists. Example: camel_case_split("HelloWorld")==["Hello","World"] """
from re import finditer
matches=finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)',s)
return [m.group(0) for m in matches]
def split_python_tokens(code: str):
"""
Should return a list of all the individual python tokens, INCLUDING whitespace and newlines etc
When summed together, the token-strings returned by this function should equal the original inputted string
"""
pip_import('pygments')
from pygments.lexers import Python3Lexer
from pygments.lexer import Lexer
def get_all_pygments_tokens(string:str,pygments_lexer:Lexer=Python3Lexer()):
return pygments_lexer.get_tokens_unprocessed(string)
def get_all_token_strings(string:str):
#Returns all the string-value of all tokens parsed from the string, including whitespace and comments
token_string_generator = (token[2] for token in get_all_pygments_tokens(string))
return token_string_generator
return list(get_all_token_strings(code))
def clamp(x,min_value,max_value):
return min([max([min_value,x]),max_value])
def int_clamp(x: int,min_value: int,max_value: int) -> int:
return clamp(x,min_value,max_value)
def float_clamp(x: float,min_value: float,max_value: float) -> float:
# noinspection PyTypeChecker
return clamp(x,min_value,max_value)
#region stack traces
def get_current_exception():
'Returns the current exception if there is one, else None'
_, error, _ = sys.exc_info()
return error
def pop_exception_traceback(exception,n=1):
"""
Takes an exception, mutates it, then returns it
Often when writing my repl, tracebacks will contain an annoying level of function calls (including the 'exec' that ran the code)
This function pops 'n' levels off of the stack trace generated by exception
For example, if print_stack_trace(exception) originally printed:
Traceback (most recent call last):
File "<string>", line 2, in <module>
File "<string>", line 2, in f
File "<string>", line 2, in g
File "<string>", line 2, in h
File "<string>", line 2, in j
File "<string>", line 2, in k
Then print_stack_trace(pop_exception_traceback(exception),3) would print:
File "<string>", line 2, in <module>
File "<string>", line 2, in j
File "<string>", line 2, in k
(It popped the first 3 levels, aka f g and h off the traceback)
Edge case: If we start with let's say only 4 levels, but n=1000, only pop 4 levels (trying to more would result in an error)
"""
for _ in range(n):
if exception.__traceback__ is None:
break
exception.__traceback__=exception.__traceback__.tb_next
return exception
def print_verbose_stack_trace(exception=None):
if exception is None: exception=get_current_exception()
stackprinter=pip_import('stackprinter')
try:
if _disable_fansi:
stackprinter.show(exception,file=sys.stdout)
else:
stackprinter.show(exception,style='darkbg2',file=sys.stdout)
except ValueError as e:#ERROR: ValueError: Can't format KeyboardInterrupt(). Expected an exception instance, sys.exc_info() tuple,a frame or a thread object.
fansi_print("Stackprinter failed to print your verbose stack trace using rp.print_verbose_stack_trace():",'magenta','underlined')
print_stack_trace(e)
fansi_print("Here's your error's traceback:",'magenta','underlined')
print_stack_trace(exception)#Fallback when this fails
def print_stack_trace(error:BaseException=None,full_traceback: bool = True,header='r.print_stack_trace: ERROR: ',print_it=True):
from traceback import format_exception,format_exception_only
if error is None: error=get_current_exception()
# ┌ ┐
# │ ┌ ┐│
# │ │ ┌ ┐ ┌ ┐ ││
# ┌ ┐│ ┌ ┐ │ │ ┌ ┐│ │ ┌ ┐│┌ ┐ ││
return (print if print_it else identity)(fansi(header,'red','bold') + fansi(''.join(format_exception(error.__class__,error,error.__traceback__)) if full_traceback else ''.join(format_exception_only(error.__class__,error))[:-1],'red'))
# └ ┘│ └ ┘ │ │ └ ┘│ │ └ ┘│└ ┘ ││
# │ │ └ ┘ └ ┘ ││
# │ └ ┘│
# └ ┘
def print_highlighted_stack_trace(error:BaseException=None):
"""
Uses pygments to print a stack trace with syntax highlighting
"""
from traceback import format_exception
from pygments import highlight
from pygments.lexers import Python3TracebackLexer
from pygments.formatters import TerminalTrueColorFormatter
from pygments.formatters.terminal import TerminalFormatter
if error is None: error=get_current_exception()
error_string=''.join(format_exception(error.__class__,error,error.__traceback__))
highlighted_error_string=highlight(error_string, Python3TracebackLexer(), TerminalFormatter())
# highlighted_error_string=highlight(error_string, Python3TracebackLexer(), TerminalTrueColorFormatter())
print(highlighted_error_string)
def print_rich_stack_trace(error_or_frames_back=None, *, extra_lines=5, show_locals=False, width=None, print_output=True):
"""
Use the 'rich' library to print or return a stack trace.
This function can handle both exceptions and current execution frames.
Args:
error_or_frames_back: Either an exception to display or an integer representing
how many frames to go back from current execution point.
If None, uses current exception or current frame.
extra_lines (int): Number of extra lines of code to show around the trace point.
show_locals (bool): Whether to display local variables.
width (int, optional): Width of the traceback output. If None, uses terminal width.
print_output (bool): Whether to print the traceback (True) or return it as a string (False).
Returns:
str or None: If print_output is False, returns the traceback as a string.
If print_output is True, prints to the console and returns None.
"""
pip_import('rich')
import inspect
import io
import types
from rich.console import Console
from rich.traceback import Traceback, LOCALS_MAX_LENGTH, LOCALS_MAX_STRING
if width is None:
width = get_terminal_width()
# For backward compatibility: if error_or_frames_back is None, try to get current exception
if error_or_frames_back is None:
error = get_current_exception()
# If no current exception, we'll use the current frame (frames_back=0)
if error is not None:
error_or_frames_back = error
else:
error_or_frames_back = 0
# Determine if we're dealing with an exception or a frame depth
if isinstance(error_or_frames_back, BaseException):
# Handle exception traceback
error = error_or_frames_back
exc_type = type(error)
traceback = error.__traceback__
# Create the traceback object
rich_tb = Traceback.from_exception(
exc_type=exc_type,
exc_value=error,
traceback=traceback,
width=width,
extra_lines=extra_lines,
theme=None,
show_locals=show_locals,
locals_max_length=LOCALS_MAX_LENGTH,
locals_max_string=LOCALS_MAX_STRING,
)
if print_output:
import rich
rich.print(rich_tb)
return None
else:
# Get string representation
string_io = io.StringIO()
console = Console(file=string_io, width=width, force_terminal=True, color_system="standard")
console.print(rich_tb)
return string_io.getvalue()
else:
# Handle current frame traceback (synthetic)
frames_back = error_or_frames_back
if not isinstance(frames_back, int):
frames_back = 0
# Get the caller's frame
current_frame = inspect.currentframe()
# Add 1 to frames_back to account for this function's frame
total_frames_to_skip = frames_back + 1
# Skip the desired number of frames
for _ in range(total_frames_to_skip):
if current_frame.f_back is not None:
current_frame = current_frame.f_back
else:
# No more frames to skip, so we use the last available frame
break
# Now create a synthetic traceback that includes all frames from this point upward
# We need to capture the frames in reverse order (innermost first)
frames = []
frame = current_frame
while frame:
frames.append(frame)
frame = frame.f_back
# Build the traceback chain from the bottom up (outermost to innermost)
synthetic_tb = None
for frame in reversed(frames):
synthetic_tb = types.TracebackType(
tb_next=synthetic_tb,
tb_frame=frame,
tb_lasti=frame.f_lasti,
tb_lineno=frame.f_lineno
)
# Create a dummy exception with our synthetic traceback
exc_value = Exception("Traceback capture point")
# Create a Traceback object
traceback = Traceback.from_exception(
Exception,
exc_value,
synthetic_tb,
width=width,
extra_lines=extra_lines,
show_locals=show_locals,
locals_max_length=LOCALS_MAX_LENGTH,
locals_max_string=LOCALS_MAX_STRING
)
# Get string representation
string_io = io.StringIO()
console = Console(file=string_io, width=width, force_terminal=True, color_system="standard")
console.print(traceback)
# Get the rendered string and filter out the exception message
traceback_str = string_io.getvalue()
traceback_lines = traceback_str.split('\n')
filtered_lines = [line for line in traceback_lines if '─' in line or '│' in line]
result = '\n'.join(filtered_lines)
if print_output:
print(result, end='')
return None
else:
return result
# Helper function for those who prefer a more explicit API
def get_rich_traceback_string(frames_back=0, *, extra_lines=5, show_locals=True, width=None):
"""
Get the current execution frame and format it as a pretty ANSI-colored traceback string.
Args:
frames_back (int): Number of frames to go back in the call stack. Defaults to 0.
extra_lines (int): Number of extra lines of code to show around the trace point.
show_locals (bool): Whether to display local variables.
width (int, optional): Width of the traceback output. If None, uses terminal width.
Returns:
str: ANSI-formatted traceback string
"""
return print_rich_stack_trace(frames_back+1, extra_lines=extra_lines,
show_locals=show_locals, width=width,
print_output=False)
#Private right now because it feels a bit redundant. maybe expose it in the future. used be web_evaluator
#https://chatgpt.com/share/ee550199-4242-41c6-88dd-f2a72c8d4c84
def _get_stack_trace_string(exc):
import traceback
# Get the traceback object from the exception
tb = exc.__traceback__
# Create a TracebackException object
traceback_exception = traceback.TracebackException(type(exc), exc, tb)
# Format the traceback as a string
stack_trace_string = ''.join(traceback_exception.format())
return stack_trace_string
#endregion
def audio_stretch(mono_audio, new_number_of_samples):# Does not take into account the last bit of looping audio
"""
>>> audio_stretch([1,10],10)
ans = [1,2,3,4,5,6,7,8,9,10]
"""
return [ linterp(mono_audio,x) for x in np.linspace(0,len(mono_audio)-1,new_number_of_samples)]
def cartesian_to_polar(x, y, ϴ_unit=τ)->tuple:
"""Input conditions: x,y ∈ ℝ ⨁ x﹦[x₀,x₁,x₂……]⋀ y﹦[y₀,y₁,y₂……]
returns: (r, ϴ) where r ≣ radius,ϴ ≣ angle and 0 ≤ ϴ < ϴ_unit. ϴ_unit﹦τ --> ϴ is in radians,ϴ_unit﹦360 --> ϴ is in degrees"""
return np.hypot(x,y),np.arctan2(y,x)/τ%1*ϴ_unit # Order of operations: % has same precedence as * and /
def complex_to_polar(complex,ϴ_unit=τ)->tuple:
"""returns: (r, ϴ) where r ≣ radius,ϴ ≣ angle and 0 ≤ ϴ < ϴ_unit. ϴ_unit﹦τ --> ϴ is in radians,ϴ_unit﹦360 --> ϴ is in degrees.
Input conditions: c ≣ complex ⋀ c ∈ ℂ ⨁ c﹦[c₀,c₁,c₂……]
Returns r and ϴ either as numbers OR as two lists: all the r's and then all the ϴ's"""
return np.abs(complex),np.angle(complex)# np.abs is calculated per number, not vector etc
default_left_to_right_sum_ratio=0# By default, take a left hand sum
def riemann_sum(f,x0,x1,N,left_to_right_sum_ratio=None):# Verified ✔
"""
Desmos: https://www.desmos.com/calculator/tgyr42ezjq
left_to_right_sum_ratio﹦0 --> left hand sum
left_to_right_sum_ratio﹦.5 --> midpoint hand sum
left_to_right_sum_ratio﹦1 --> right hand sum
The x1 bound MUST be exclusive as per definition of a left riemann sum
"""
c=left_to_right_sum_ratio or default_left_to_right_sum_ratio
w=(x1-x0)/N# Width of the bars
return sum(f(x0+w*(i+c))*w for i in range(N))
def riemann_mean(f,x0,x1,N,left_to_right_sum_ratio=None):# To prevent redundancy of the N parameter
return riemann_sum(f,x0,x1,N,left_to_right_sum_ratio) / (x1-x0)
def fourier(cyclic_function,freq,cyclic_period=τ,ↈ_riemann_terms=100):
# Can enter a vector of frequencies to two vectors of outputs if you so desire
# Returns polar coordinates representing amplitude,phase (AKA r,ϴ)
# With period=τ, sin(x) has a freq of 1.
# With period=1, sin(x) has a freq of 1/τ.
# ⁠⁠⁠⁠ ⎧ ⎫
# ⁠⁠⁠⁠ ⎪ ⎧ ⎫⎪
# ⁠⁠⁠⁠ ⎪ ⎪ ⎧ ⎫ ⎧ ⎫ ⎪⎪
return complex_to_polar(riemann_mean(lambda x:np.exp(freq * τ * x * 1j) * cyclic_function(x*cyclic_period),0,1,ↈ_riemann_terms))
# ⁠⁠⁠ ⎪ ⎪ ⎩ ⎭ ⎩ ⎭ ⎪⎪
# ⁠⁠⁠ ⎪ ⎩ ⎭⎪
# ⁠⁠⁠ ⎩ ⎭
def discrete_fourier(cyclic_vector,freq):# Assuming that cyclic_vector is a single wave-cycle, freq represents the number of its harmonic
# Can enter a vector of frequencies to two vectors of outputs if you so desire
# Returns polar coordinates representing amplitude,phase (AKA r,ϴ)
return fourier(cyclic_function=lambda x:linterp(x,cyclic_vector,cyclic=True),freq=freq,cyclic_period=len(cyclic_vector),ↈ_riemann_terms=len(cyclic_vector))
def matrix_to_tuples(m,filter=lambda r,c,val:True):# Filter can significantly speed it up
# ⁠⁠⁠⁠ ⎧ ⎫
# ⁠⁠⁠⁠ ⎪⎧ ⎫⎪
# ⁠⁠⁠⁠ ⎪⎪⎧ ⎫ ⎪⎪
# ⁠⁠⁠⁠ ⎪⎪⎪ ⎧ ⎫ ⎪ ⎪⎪
# ⁠⁠⁠⁠ ⎪⎪⎪⎧ ⎫ ⎪ ⎧ ⎫⎪ ⎧ ⎫⎪ ⎧ ⎫⎪⎪
return list_flatten([[(r,c,m[r][c]) for c in range(len(m[r])) if filter(r,c,m[r,c])] for r in range(len(m))])# Creates list of coordinates, (x,y,value). WARNING: Can be very slow
# ⎪⎪⎪⎩ ⎭ ⎪ ⎩ ⎭⎪ ⎩ ⎭⎪ ⎩ ⎭⎪⎪
# ⎪⎪⎪ ⎩ ⎭ ⎪ ⎪⎪
# ⎪⎪⎩ ⎭ ⎪⎪
# ⎪⎩ ⎭⎪
# ⎩ ⎭
def perpendicular_bisector_function(x0,y0,x1,y1):
A,B=x0,y0
Y,X=x1,y1
def linear_function(x):
return ((B+Y)/2)-(X-A)/(Y-B)*(x-(A+X)/2) # https://www.desmos.com/calculator/1ykebsqtoa
return linear_function
def harmonic_analysis_via_least_squares(wave,harmonics:int):
"""
My attempt to analyze frequencies by taking the least-squares fit of a bunch of sinusoids to a signal instead of using the fourier transform. It had interesting results, but it's not nearly as fast as a FFT.
"""
prod=np.matmul
inv=np.linalg.inv
b=wave # In terms of linear algebra in Ax~=b
samples=len(b)
m=np.asmatrix(np.linspace(1,harmonics,harmonics)).T*np.matrix(np.linspace(0,tau,samples,endpoint=False))
A=np.asmatrix(np.concatenate([np.sin(m),np.cos(m)])).T
Api=prod(inv(prod(A.T,A)),A.T) # Api====A pseudo inverse
out=np.asarray(prod(Api,b))[0]
out=np.reshape(out,[2,len(out)//2]) # First vector is the sin array second is the cos array
amplitudes=sum(out**2)**.5
phases=np.arctan2(*out)
return np.asarray([amplitudes,phases]) # https://www.desmos.com/calculator/fnlwi71n9x
def cluster_by_key(iterable,key,*,as_dict=False)->list:
"""
Iterable is a list of values
Key is a function that takes a value from iterable and returns a hashable
"""
assert callable(key)
assert is_iterable(iterable)
from collections import OrderedDict
outputs=OrderedDict()
for value in iterable:
k=key(value)
if k not in outputs:
outputs[k]=[]
outputs[k].append(value)
if as_dict:
return outputs
return list(outputs.values())
def cluster_by_attr(iterable,attr,*,as_dict=False)->list:
return cluster_by_key(
iterable,
lambda x: get_nested_attr(x, attr),
as_dict=as_dict,
)
def chunk_by_attr(iterable,attr,*,as_dict=False)->list:
return chunk_by_key(
iterable,
lambda x: get_nested_attr(x, attr),
compare=lambda x, y: x == y,
)
def chunk_by_key(iterable, key=lambda x: x, compare=lambda x, y: x == y):
"""
Divides an iterable into chunks based on the equality of elements, as defined by the compare function.
The key function is applied to each element to determine what should be compared.
Args:
- iterable (iterable): The input iterable to divide into chunks
- key (function): A function to extract comparison key from each element in the iterable
- compare (function): A function to compare equality of two consecutive keys, should return bool
Returns:
- A list of lists where each sublist is a chunk of equal elements from the iterable.
Example:
```
>>> list(chunk_by_key('aAAbccdEEefeee'))
['a', 'AA', 'b', 'cc', 'd', 'EE', 'e', 'f', 'eee']
>>> list(chunk_by_key(iter('aAAbccdEEefeee')))
[['a'] , ['A','A'] , ['b'] , ['c','c'] , ['d'] , ['E','E'] , ['e'] , ['f'] , ['e','e','e']]
>>> list(chunk_by_key(iter('aAAbccdEEefeee'),key=str.lower))
[['a','A','A'] , ['b'] , ['c','c'] , ['d'] , ['E','E','e'] , ['f'] , ['e','e','e']]
>>> list(chunk_by_key([0,5,2,3,7,5,3,4,7,4,2,3,2,1,2,2,5,6],key=lambda x:x%2))
[[0] , [5] , [2] , [3,7,5,3] , [4] , [7] , [4,2] , [3] , [2] , [1] , [2,2] , [5] , [6]]
>>> list(chunk_by_key([4,5,6,7,7,8,9,0,3,4,5,6,5,6,7],compare=lambda x,y: x<y))
[[4,5,6,7] , [7] , [7,8,9] , [0,3,4,5,6] , [5,6,7]]
```
"""
#Input Assertions
assert is_iterable(iterable)
assert callable(key)
assert callable(compare)
if hasattr(iterable,'__getitem__'):
#Attempt to chunk via slicing
#Chunking this way is preferable if possible
#It yields slices of the original input, instead of a new list
#That way it's both faster and returns the same type
#For example, if iterable is a str and we chunk via slices,
#it will yield substrings instead of lists of chars
#This method is slightly more complex than the fallback
#If you don't want to use slicing with your input,
#wrap it in iter() as seen in the examples
try:
chunk_start = 0
chunk_end = 0
for element in iterable:
if chunk_start < chunk_end and not compare(
key(iterable[chunk_start]), key(element)
):
yield iterable[chunk_start:chunk_end]
chunk_start = chunk_end
chunk_end += 1
if chunk_start < chunk_end:
yield iterable[chunk_start:chunk_end]
except Exception:
return chunk_by_key(iter(iterable), key=key, compare=compare)
else:
#Basic chunking - works with any iterable
#Will yield lists
chunk = []
for element in iterable:
if not chunk or compare(key(chunk[-1]), key(element)):
chunk.append(element)
else:
yield chunk
chunk = [element]
if chunk:
yield chunk
def cluster_filter(vec,filter=identity): # This has a terrible name...I'm not sure what to rename it so if you think of something, go for it!
"""
EXAMPLE: cluster_filter([2,3,5,9,4,6,1,2,3,4],lambda x:x%2==1) --> [[3, 5, 9], [1], [3]] <---- It separated all chunks of odd numbers
region Unoptimized, much slower version (that I kept because it might help explain what this function does):
def mask_clusters(vec,filter=identity):
out=[]
temp=[]
for val in vec:
if filter(val):
temp.append(val)
elif temp:
out.append(temp)
temp=[]
return out
endregion
"""
out=[]
s=None # start
for i,val in enumerate(vec):
if filter(val):
if s is None:
s=i
elif s is not None:
out.append(vec[s:i])
s=None
if s is not None:
out.append(vec[s:])
return out
# region Originally created for the purpose of encoding 3 bytes of precision into a single image via r,g,b being three digits
def proportion_to_digits(value,base=256,number_of_digits=3): # Intended for values between 0 and 1
digits=[]
x=value
while len(digits)<number_of_digits:
x*=base
temp=np.floor(x)
digits.append(temp)
x-=np.floor(x)
return digits
def digits_to_proportion(digits,base=256): # Intended for values between 0 and 1
return np.sum(np.asarray(digits)/base**np.linspace(1,len(digits),len(digits)),0)
#def encode_float_matrix_to_rgb_image(m):
# # Encoded precision of values between 0 and 1 as r,g,b (in 8-bit color) values where r g and b are each digits, with b being the most precise and r being the least precise
# #Formerly called 'rgb_encoded_matrix'
# m=np.matrix(m)
# assert len(m.shape)==2,"r.encode_float_matrix_to_rgb: Input should be a matrix of values between 0 and 1, which is not what you gave it! \n m.shape = \n"+str(m.shape)
# r,g,b=proportion_to_digits(m,base=256,number_of_digits=3)
# out=np.asarray([r,g,b])
# out=np.transpose(out,[1,2,0])
# out=out.astype(np.uint8)
# return out
def encode_float_matrix_to_rgba_byte_image(float_matrix):
"""
Can encode a 32-bit float into the 4 channels of an RGBA image
The values should be between 0 and 1
This output can be saved as a .png file
Formerly called 'rgb_encoded_matrix'
It's useful for reading and storing floating-point matrices in .png files
"""
m=float_matrix
m=np.matrix(m)
assert is_grayscale_image(m)
assert is_a_matrix(m),'Please input a two-dimensional floating point matrix with values between 0 and 1. The input you gave is not a matrix.'
assert len(m.shape)==2,"r.encode_float_matrix_to_rgb: Input should be a matrix of values between 0 and 1, which is not what you gave it! \n m.shape = \n"+str(m.shape)
r,g,b,a=proportion_to_digits(m,base=256,number_of_digits=4)
out=np.asarray([r,g,b,a])
out=np.transpose(out,[1,2,0])
out=out.astype(np.uint8)
return out
def decode_float_matrix_from_rgba_byte_image(image):
"""
This function is the inverse of encode_float_matrix_to_rgba_image
Takes an rgba byte-image (that was created with encode_float_matrix_to_rgba_image) and turns it back into a float image
It's useful for reading and storing floating-point matrices in .png files
Formerly called 'matrix_decoded_rgb'
"""
assert is_rgba_image(image)
assert is_byte_image(image)
# assert len(image.shape)==3 and image.shape[-1]==3,"r.encode_float_matrix_to_rgba_image: Input should be an rgb image (with 3 color channels), which is not what you gave it! \n m.shape = \n"+str(image.shape)
r,g,b,a=image.transpose([2,0,1])
return r/256**1 + g/256**2 + b/256**3 + a/256**4
def print_all_git_paths():
fansi_print("Searching for all git repositories on your computer...",'green','underlined')
tmp = shell_command("find ~ -name .git")# Find all git repositories on computer
dirpaths=[x[:-4]for x in tmp.split('\n')]
aliasnames=[(lambda s:(s[:s.find("/")])[::-1])((x[::-1])[1:])for x in dirpaths]
dirpaths,aliasnames=sync_sort(dirpaths,aliasnames)
for x in sorted(zip(aliasnames,dirpaths)):
print(fansi(x[0],'cyan')+" "*(max(map(len,aliasnames))-len(x[0])+3)+fansi(x[1],None))
return dirpaths,aliasnames
def is_int_literal(s:str):
if s[0] in ('-', '+'):
return s[1:].isdigit()
return s.isdigit()
def is_string_literal(s:str):
try:
s=eval(s)
assert isinstance(s,str)
return True
except Exception:
return False
def indentify(s:str,indent='\t'):
if isinstance(indent, int):
indent=' '*indent
return '\n'.join(indent + x for x in s.split('\n'))
def unindent(string, indent=" "):
"""Removes common leading indentation from a multi-line string. Similar to textwrap.dedent - but allows you to specify your own indent characters."""
def count_leading(line, char):
return len(line) - len(line.lstrip(char))
lines = string.splitlines()
levels = [count_leading(line, indent) for line in lines if line.strip(indent)]
indent_level = min(levels)
new_lines = [line[indent_level * len(indent) :] for line in lines]
return line_join(new_lines)
def lrstrip_all_lines(s:str):
return '\n'.join([x.lstrip().rstrip()for x in s.split('\n')])
random_unicode_hash=lambda l:int_list_to_string([randint(0x110000-1)for x in range(l)])
def search_replace_simul(s:str,replacements:dict):
"""
Attempts to make multiple simultaneous string .replace() at the same time
WARNING: This method is NOT perfect, and sometimes makes errors. TODO: Fix it for all input cases
"""
if not replacements:
return s
# search_replace_simul("Hello world",{"Hello":"world","world":"Hello"})
l1 = replacements.keys()
l2 = replacements.values()
l3 = [random_unicode_hash(10) for x in replacements]
ⵁ,l1,l2,l3=sync_sort([-len(x)for x in l1],l1,l2,l3)# Sort the keys in descending number of characters # Safe replacements: f and fun as keys: f won't be seen as in 'fun'
for a,b in zip(l1,l3):
s=s.replace(a,b)
for a,b in zip(l3,l2):
s=s.replace(a,b)
return s
def shorten_url(url:str)->str:
import contextlib
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
import sys
request_url=('http://tinyurl.com/api-create.php?' + urlencode({'url':url}))
with contextlib.closing(urlopen(request_url)) as response:
return response.read().decode('utf-8')
# Update: The following commented code is deprecated, since Google discontinued the ability to create new goo.gl URL's
# # goo.gl links are supposed to last forever, according to https://groups.google.com/forum/#!topic/google-url-shortener/Kt0bc5hx9HE
# # SOURCE: https://stackoverflow.com/questions/17357351/how-to-use-google-shortener-api-with-python
# # API Key source: https://console.developers.google.com/apis/credentials?project=dark-throne-182400
# # >>> goo_shorten_url('ryan-central.org')
# # ans = https://goo.gl/Gkgp86
# import requests
# import json
# post_url = 'https://www.googleapis.com/urlshortener/v1/url?key=AIzaSyBbNJ4ZPCAeDBGAVQKDikwruo3dD4NcsU4'# AIzaSyBbNJ4ZPCAeDBGAVQKDikwruo3dD4NcsU4 is my account's API key.
# payload = {'longUrl': url}
# headers = {'content-type': 'application/json'}
# r = requests.post(post_url, data=json.dumps(payload), headers=headers)
# # RIGHT NOW: r.text==
# # '''{
# # "kind":"urlshortener#url",
# # "id":"https://goo.gl/ZNp1VZ",
# # "longUrl":"https://console.developers.google.com/apis/credentials?project=dark-throne-182400"
# # }'''
# out=eval(r.text)
# assert isinstance(out,dict)
# return out['id']
# def gist(gist_body="Body",gist_filename="File.file",gist_description="Description"):
# # Older version:
# # def gist(code:str,file_name:str='CodeGist.code',username='[email protected]',password='d0gememesl0l'):
# # # Posts a gist with the given code and filename.
# # # >>> gist("Hello, World!")
# # # ans = https://gist.github.com/b5b3e404c414f7974c4ccb12106c4fe7
# # import requests,json
# # r = requests.post('https://api.github.com/gists',json.dumps({'files':{file_name:{"content":code}}}),auth=requests.auth.HTTPBasicAuth(username, password))
# # try:
# # return r.json()['html_url']# Returns the URL
# # except KeyError as e:
# # fansi_print("r.gist ERROR:",'red','bold',new_line=False)
# # fansi_print(" "+str(e)+" AND r.json() = "+str(r.json()),'red')
# from urllib.request import urlopen
# import json
# gist_post_data={'description':gist_description,
# 'public':True,
# 'files':{gist_filename:{'content':gist_body}}}
# json_post_data=json.dumps(gist_post_data).encode('utf-8')
# def upload_gist():
# # print('sending')
# url='https://api.github.com/gists'
# json_to_parse=urlopen(url,data=json_post_data)
# # print('received response from server')
# found_json=(b'\n'.join(json_to_parse.readlines()))
# return json.loads(found_json.decode())['html_url']
# return upload_gist()
# sgist=lambda *x:seq([gist,printed,open_url,shorten_url],*x)# Open the url of a gist and print it
def unshorten_url(shortened_url):
"""
Takes a shortened URL and returns the long one
EXAMPLE: unshorten_url('bit.ly/labinacube') --> 'https://oneoverzero.pythonanywhere.com/'
https://stackoverflow.com/questions/3556266/how-can-i-get-the-final-redirect-url-when-using-urllib2-urlopen/3556287
"""
if not is_valid_url(shortened_url):
shortened_url='https://'+shortened_url
assert is_valid_url(shortened_url),'Please input a valid URL!'
from urllib.request import urlopen
return urlopen(shortened_url).url
def load_gist(gist_url:str):
"""
Takes the URL of a gist, or the shortened url of a gist (by something like bit.ly), and returns the content inside that gist as a string
EXAMPLE:
>>> save_gist('AOISJDIO')
ans = https://git.io/JI2Ez
>>> load_gist(ans)
ans = AOISJDIO
"""
gist_url=unshorten_url(gist_url) #If we shortened the url, unshorten it first. Otherwise, this function will leave it alone.
gist_id=[x for x in gist_url.split('/') if len(x)==32 and set(x)<=set('0123456789abcdef')] # A gist_id is like 162d6a7e7f0386208d323d35dd86a669 -- it has 40 characters
assert len(gist_id)>0,'This is not a valid github GIST url'
gist_id=gist_id[0] #Assume there's only one key in the url...
gist_url='https://gist.githubusercontent.com/raw/'+gist_id
gist_url+='/raw'
pip_import('requests')
import requests,json
response=requests.get(gist_url)
return response.content.decode()
# response_json=json.loads(response.content)
# file_name=list(response_json['files'])[0]
# return response_json['files'][file_name]['content']
def shorten_github_url(url,title=None):
"""
Doesn't work anymore! git.io was discontinued for some god forsaken reason :(
Use rp.shorten_url instead (for backwards compatibility, this function now simply calls that)
Uses git.io to shorten a url
This method specifically only works for Github URL's; it doesn't work for anything else
If title is specified, it will try to get you a particular name for your url (such as git.io/labinacube)
"""
return shorten_url(url) # git.io was discontinued :(
if not is_valid_url(url):
#Try to make it valid
url='https://'+url
assert is_valid_url(url)
# print(url)
pip_import('requests')
import requests
data = {'url': url, 'code':title}
if not title: del data['code']
r = requests.post('https://git.io/', data=data)
out= r.headers.get('Location')
if out is None:
print("rp.shorten_github_url failed! Please update it; github must have changed somehow. Returning the response for debugging purposes.")
return r
# print(out)
return out
#def post_gist(content:str,
# file_name:str='',
# description:str='',
# api_token:str='d65866e83aac7fc09093220a795ca66a5f7cc18d'):
# # Note: Please don't be a dick, this api_token is meant for everybody using this library to share. Don't abuse it.
# # Example:
# # >>> post_gist('Hello World!')
# # ans = https://api.github.com/gists/92d158541ae4f3732267194b1f1ac14d
# # >>> load_gist(ans)
# # ans = Hello World!
# #You can't post the api_token in a gist on github. If you do, github will disable that api_token.
# #To make sure that github doesn't revoke the api_token, we have to make sure it's not in the content string.
# content=content.replace(api_token,api_token[::-1])#Let's just reverse it.
# import urllib
# import json
# import datetime
# import time
# access_url = "https://api.github.com/gists"
# data={
# 'description':description,
# 'public':True,
# 'files':{
# file_name:
# {
# 'content':content
# }
# }
# }
# json_data=bytes(json.dumps(data),'UTF-8');
# req = urllib.request.Request(access_url) #Request
# req.add_header("Authorization", "token {}".format(api_token))
# req.add_header("Content-Type", "application/json")
# res = urllib.request.urlopen(req, data=json_data) #Response
# res_json = json.loads(res.readline())
# return res_json['url']
def save_gist(content:str,*,
shorten_url=False,
description:str='',
filename:str='',
token:str=None):
"""
This function takes an input string, posts it as a gist on Github, then returns the URL of the new gist
I've included a token that anybody using this library is allowed to use. Have fun, but please don't abuse it!
EXAMPLE:
>>> save_gist('AOISJDIO')
ans = https://git.io/JI2Ez
>>> load_gist(ans)
ans = AOISJDIO
You can't post the api_token in a gist on github. If you do, github will disable that api_token.
To make sure that github doesn't revoke the api_token, we have to make sure it's not in the content string.
NOTE: if you get a SSL Error that looks like
URLError: <urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1123)>
Then try running rp.r._fix_CERTIFICATE_VERIFY_FAILED_errors()
"""
if token is None:
#Token can't be in this code or github will revoke it
token = 'g h p _ w d e m 3 K P j U G z N V h 7 G c c M J Y b J b s 6 z U 6 i 0 Y z X s o'.replace(' ','')
import urllib.request, urllib.error, urllib.parse
import json
import datetime
import time
access_url = "https://api.github.com/gists"
data = {
"description": description,
"public": True,
"files": {
filename: {
"content": content
}
}
}
json_data=json.dumps(data)
assert token not in json_data,'You cannot put the github API token anywhere in your gist, or else the API token will be revoked!'
req = urllib.request.Request(access_url)
req.add_header("Authorization", "token {}".format(token))
req.add_header("Content-Type", "application/json")
response=urllib.request.urlopen(req, data=json_data.encode())
response=json.loads(response.read())
gist_url=response['html_url']
if gist_url is None:
print("Save Gist: Failed! Returning response...")
return response
if shorten_url:
gist_url=shorten_github_url(gist_url)
try:
#Try to keep track of all the gists we've created, in case we ever want to go back for some reason
try:
old_gists=open(_old_gists_path,'a+')
old_gists.write(gist_url+'\n')
finally:
old_gists.close()
except Exception as e:
print(e)
#It's no big deal if we can't though
raise
pass
return gist_url
def _fix_CERTIFICATE_VERIFY_FAILED_errors():
#https://stackoverflow.com/questions/50236117/scraping-ssl-certificate-verify-failed-error-for-http-en-wikipedia-org
import os
import os.path
import ssl
import stat
import subprocess
import sys
STAT_0o775 = ( stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
| stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
| stat.S_IROTH | stat.S_IXOTH )
def main():
openssl_dir, openssl_cafile = os.path.split(
ssl.get_default_verify_paths().openssl_cafile)
print(" -- pip install --upgrade certifi")
subprocess.check_call([sys.executable,
"-E", "-s", "-m", "pip", "install", "--upgrade", "certifi"])
import certifi
# change working directory to the default SSL directory
os.chdir(openssl_dir)
relpath_to_certifi_cafile = os.path.relpath(certifi.where())
print(" -- removing any existing file or link")
try:
os.remove(openssl_cafile)
except FileNotFoundError:
pass
print(" -- creating symlink to certifi certificate bundle")
os.symlink(relpath_to_certifi_cafile, openssl_cafile)
print(" -- setting permissions")
os.chmod(openssl_cafile, STAT_0o775)
print(" -- update complete")
if __name__ == '__main__':
main()
def random_namespace_hash(n:int=10,chars_to_choose_from:str="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"):
"""
EXAMPLE:
>>> random_namespace_hash(10)
ans=DZC7B8GV74
"""
out=''
for n in [None]*n:
out+=random_element(chars_to_choose_from)
return out
def random_passphrase():
"""
Generates an easy-to-spell easy-to-remember passphrase
EXAMPLE:
>>> for _ in range(10): print(random_passphrase())
happy_hug
help_dart
crave_crib
cozy_drone
shush_chump
enter_ivory
equal_essay
react_morse
curvy_koala
blog_lid
"""
#TODO: Upgrade this to more words
prefixes="""agile ahead ajar alien alive alone amend ample amuse argue arise ashen avert avoid baggy baked balmy bask bleak bless blog blunt boned bony botch both bring brisk broke busy calm carve chief chomp
civil clad clap clean clink clump come cozy crave crisp cure curvy cushy darn debug dense dice dizzy drab dried droop drown dusk dwell early eject elope elude emit enter equal erase erupt etch evade
even evict false fax fend flaky flame fray fresh fried froth glad gooey grasp greet halt happy harm hasty heap hefty help humid hurt icy juicy lance late lazy legal lend lurk marry mousy musky nag
near neat nutty outer petty plead plus poach polar prank pry quiet quote rant react relax repel rich rigid ripen ritzy romp same savor scoff scowl send shady shaky shine shout showy shun shush sift
skew sleek slurp snort speak spend spew spoof spool stark stuck swear swim tacky task thank thud tint try tutor tweak twine twirl uncut undo unify untie utter vocal wavy widen wipe wired wiry yelp zoom
""".split()
nouns="""acid acorn acre affix aged agent aging agony aide aids aim alarm alias alibi aloe aloha amino angel anger angle ankle apple april apron area arena armor army aroma array arson art atlas atom attic
audio award axis bacon badge bagel baker banjo barge barn bash basil batch bath baton blade blank blast blaze blend blimp blink bloat blob blot blush boast boat body boil bolt bonus book booth boss
boxer breed bribe brick bride brim brink broad broil brook broom brush buck bud buggy bulge bulk bully bunch bunny bunt bush bust buzz cable cache cadet cage cake cameo canal candy cane canon cape
card cargo carol carry case cash cause cedar chain chair chant chaos charm chase cheek cheer chef chess chest chew chili chill chip chop chow chuck chump chunk churn chute cider cinch city claim clamp
clash clasp class claw clay clear cleat cleft clerk click cling clip cloak clock clone cloud coach coast coat cod coil coke cola cold colt coma comma cone cope copy coral cork cost cot couch cough
cover craft cramp crane crank crate crawl crazy crepe crib crook crop cross crowd crown crumb crush crust cub cult cupid curl curry curse curve cut cycle dab dad daily dairy daisy dance dandy dart
dash data date dawn deaf deal dean debit debt decal decay deck decoy deed delay denim dent depth desk dial diary dig dill dime diner disco dish disk ditch dock dodge doll dome donor dose dot dove down
dowry doze drama draw dress drift drill drive drone drove drum dry duck duct dug duke dust duty dwarf eagle earth easel east ebony echo edge eel elbow elder elf elk elm elves empty emu entry envoy
error essay evil exit fable fact fade fall fancy fang feast feed femur fence ferry fetch fever fiber fifth fifty film filth final finch fit five flag flap flask flick fling flint flip flirt float
flock flop floss foam foe fog foil folk food fool found fox frail frame frill frisk front frost frown fruit gag gala game gap gas gear gecko geek gem genre gift gig given giver glass glide gloss glove
glow glue goal going golf gong good goofy gore gown grab grain grant grape graph grass grave gravy gray green grid grief grill grip grit groom grope growl grub grunt guide gulf gulp guru gut guy habit
half halo hash hatch hate haven hazel heat heave hedge hub hug hula hull hunk hunt hurry hush hut ice icing icon igloo image ion iron islam issue item ivory ivy jab jam jazz jeep jelly jet job jog
jolly jolt joy judge juice july jump juror jury keep keg kick kilt king kite kitty kiwi knee koala ladle lady lair lake land lapel large lash lasso last latch left lemon lens lent level lever lid life
lift lilac lily limb line lint lion lip list liver lunch lung lurch lure lying lyric mace maker malt mama mango manor map march mash match mate mocha mold moody morse motor motto mount mouse mouth
move movie mud mug mulch mule mull mummy mural muse music mute nacho nail name nanny nap navy neon nerd nest net niece ninth oak oasis oat ocean oil old olive omen onion opal open opera otter ounce
oven owl ozone pace pagan palm panic paper park party pasta patch path patio payer pecan penny pep perch perm pest petal plank plant plaza plot plow pluck plug pod poem poet point poise poker polka
polo pond pony poppy pork poser pouch pound pout power press print prior prism prize probe prong proof props prude prune pug pull pulp pulse punch punk pupil puppy purr purse push putt quack quill
quilt quota race rack radar radio raft rage raid rail rake rally ramp ranch range rank rash raven reach ream rebel relay relic reply rerun reset rhyme rice ride rinse riot rise risk rival river roast
robe robin rock rogue roman rope rover royal ruby rug ruin rule rush rust rut sage saint salad salon salsa salt satin sauna sax say scale scam scan scare scarf scold scoop scope score scout scrap
scrub scuff sect sedan self sepia serve set seven shade shaft shape share sharp shed sheep sheet shelf shell ship shirt shock shop shore shove shred shrug shy silk silly silo sip siren sixth size
skate skid skier skip skirt skit sky slab slack slain slam slang slash slate sled sleep sleet slice slick sling slip slit slob slot slug slum slush small smash smell smile smirk smog snap snare snarl
sneak sneer sniff snore snout snub snuff speed spill spoil spoke spoon sport spot spout spray spree spur squad squat squid stack staff stage stain stall stamp stand start state steam steep stem step
stew stick sting stir stock stole stomp stool stoop stop storm stout stove straw stray strut stud stuff stump stunt suds sugar sulk surf sushi swab swan swarm sway sweat sweep swell swing swipe swoop
syrup taco tag take tall talon tamer tank taper taps tart taste thaw theme thigh thing think thong thorn throb thumb thump tiara tidy tiger tile tilt trace track trade train trait trap trash tray
treat tree trek trial tribe trick trio trout truck trump trunk tug tulip turf tusk tutu tweet twins twist uncle union unit upper user usher value vapor vegan venue verse vest veto vice video view
virus visa visor vixen voice void volt voter vowel wad wafer wages wagon wake walk wand wasp watch water wheat whiff whole whoop wick widow width wife wilt wimp wind wing wink wise wish wok wolf wool
word work worry wound wrath wreck wrist xerox yahoo yam yard year yeast yield yo-yo yodel yoga zebra zero zone
""".split()
noun = random_element(nouns)
prefix = random_element(prefixes)
return prefix+'_'+noun
def latex_image(equation: str):
"""
Returns an rgba image with the rendered latex string on it in numpy form
"""
import os,requests
def formula_as_file(formula,file,negate=False): # Got this code off the web somewhere but i dont remember where now
tfile=file
if negate:
tfile='tmp.png'
r=requests.get(r'http://latex.codecogs.com/png.latex?\dpi{300} \huge %s' % formula)
f=open(tfile,'wb')
f.write(r.content)
f.close()
if negate:
os.system('convert tmp.png -channel RGB -negate -colorspace rgb %s' % file)
formula_as_file(equation,'temp.png')
return load_image('temp.png')
def display_image_in_terminal(image,dither=True,auto_resize=True,bordered=False):
"""
Uses unicode, and is black-and-white
EXAMPLE: while True: display_image_in_terminal(load_image_from_webcam())
EXAMPLE: Starfield
def stars(density=.001,size=256):
return as_float_image(np.random.rand(size,size)<density)
def zoom(image,factor):
return (crop_image(cv_resize_image(image,factor),*get_image_dimensions(image),origin='center'))
image=stars()
for _ in range(10000):
image=image+stars()
image=zoom(image,1.05)
image*=.99
scene=image
scene=as_binary_image(image,dither=True)
scene=bordered_image_solid_color(scene)
scene=as_binary_image(scene)
display_image_in_terminal(image**1,bordered=True)
display_image(image)
"""
if isinstance(image,str):
image=load_image(image)
image=as_numpy_image(image,copy=False)
def width(image) -> int:
return len(image)
def height(image) -> int:
return len(image[0])
pip_import('drawille')
from drawille import Canvas
if get_image_width(image)>get_terminal_width()*2 and auto_resize==True:
scale_factor=(max(1,get_terminal_width()*2))/get_image_width(image)
image=resize_image(image,scale_factor,'nearest')
i=as_binary_image(as_grayscale_image(image),dither=dither)
if bordered:
#This prevents drawille from cropping the image zeros
i=bordered_image_solid_color(i)
i=as_binary_image(as_grayscale_image(i))
c=Canvas()
for x in range(width(i)):
for y in range(height(i)):
if i[x,y]:
c.set(y,x)
print(c.frame())
_use_rp_timg=True
def display_image_in_terminal_color(image,*,truecolor=True):
"""
Will attempt to draw a color image in the terminal
This is slower than display_image_in_terminal, and relies on both unicode and terminal colors
EXAMPLE:
>>> while True:
... _terminal_move_cursor_to_top_left()
... image=load_image_from_webcam()
... display_image_in_terminal_color(image)
EXAMPLE:
display_image_in_terminal_color(load_image('https://i.guim.co.uk/img/media/faf20d1b2a98cbca9f5eb2946254566527394e15/78_689_3334_1999/master/3334.jpg?width=1200&height=900&quality=85&auto=format&fit=crop&s=69707184a1b38f36fc077f7cafba1130'))#Display Kim Petras in the terminal
"""
import sys
import importlib.util
import rp.libs.timg as timg #A c-optimized version of timg
if file_exists(image) or is_valid_url(image):
image=load_image(image)
assert is_image(image)
image=as_numpy_image(image,copy=False)
image=as_rgb_image(image)
image=as_byte_image(image)
image=resize_image_to_fit(image,width=get_terminal_width(),allow_growth=False)
if get_image_height(image)%2:
#We can only display pixel heights of 2,4,6,8 etc.
#To prevent it from cutting off the bottom pixel, add some black if it's odd...
#For example, display_image_in_terminal_color(uniform_float_color_image(5,10,(255,0,255,255)))
image = bordered_image_solid_color(image, thickness=0, bottom=1, color="black")
def _helper(timg):
# Get the timg module and use its functionality directly
timg_renderer = timg.Renderer()
# Load the image
#timg_renderer.load_image_from_file(temp_file)
timg_renderer.load_image(rp.as_pil_image(image))
# Choose the appropriate rendering method based on truecolor flag
# ┌─────────┬──────────────────────────────────────────────┐
# │ Method │ Description │
# ├─────────┼──────────────────────────────────────────────┤
# │ sixel │ use sixels - best quality but lowest support │
# │ a8f │ low-resolution ANSI 8-bit palette │
# │ a24f │ low-resolution ANSI 24-bit palette │
# │ a8h │ high-resolution ANSI 8-bit palette │
# │ a24h │ high-resolution ANSI 24-bit palette │
# │ ascii │ ASCII art │
# └─────────┴──────────────────────────────────────────────┘
method = 'a24h' if truecolor else 'a8h'
# Render the image with the selected method
timg_renderer.render(timg.METHODS[method]['class'])
global _use_rp_timg
if _use_rp_timg:
try:
_helper(timg)
except PermissionError:
#On XCloud, it fails to build because we have no write permission in that directory for the c compilation of my optimized version
_use_rp_timg=False
if not _use_rp_timg:
pip_import('timg')
import timg
_helper(timg)
def display_image_in_terminal_imgcat(image):
"""
Can display images in some terminals as actual images
Works in:
iterm2
wezterm
tmux (if configured properly)
hyper (with plugin: https://github.com/Rasukarusan/hyper-imgcat)
Does not work in:
alacritty
kitty
terminal.app
EXAMPLE:
while True:
display_image_in_terminal_imgcat(cv_resize_image(load_image_from_webcam(), 0.1))
"""
pip_import("imgcat")
import imgcat
if isinstance(image, str):
image = open(image)
else:
assert is_image(image)
image = as_rgb_image(image)
image = as_byte_image(image)
#I don't know the maximum size, but I'm sure this will do just fine
image = resize_image_to_fit(image, width=1024, height=1024, allow_growth=False)
image = as_pil_image(image)
imgcat.imgcat(image)
def display_video_in_terminal_color(frames, *, loop=True, framerate=None):
"""
Display a video in the terminal with a progress bar.
Args:
frames (list): List of frames to display
loop (bool): Whether to loop the video indefinitely
framerate (int, optional): Target frames per second. If set, will sleep to maintain this rate.
EXAMPLE:
display_video_in_terminal(load_webcam_stream(), framerate=30)
"""
import time
while True:
for i, f in enumerate(frames):
start_time = time.time() if framerate else None
display_image_in_terminal_color(f)
_terminal_move_cursor_to_top_left()
w = get_terminal_width()
if has_len(frames):
fansi_print(
unicode_loading_bar(
i / (len(frames) - 1) * w * 8, chars="▏▎▍▌▋▊▉█"
).ljust(w),
"white green on black blue white",
)
# Sleep to maintain target framerate if specified
if framerate:
elapsed = time.time() - start_time
target_frame_time = 1.0 / framerate
sleep_time = max(0, target_frame_time - elapsed)
if sleep_time > 0:
time.sleep(sleep_time)
if not loop:
break
def auto_canny(image,sigma=0.33,lower=None,upper=None):
""" Takes an image, returns the canny-edges of it (a binary matrix) """
pip_import('cv2')
cv2=pip_import('cv2')
image=as_numpy_image(image,copy=False)
if image.dtype!=np.uint8:
image=full_range(image,0,255).astype(np.uint8)
# compute the median of the single channel pixel intensities
v=np.median(image)
# apply automatic Canny edge detection using the computed median
lower=int(max(0,(1.0 - sigma) * v)) if lower is None else lower
upper=int(min(255,(1.0 + sigma) * v)) if upper is None else upper
edged=cv2.Canny(image,lower,upper)
# return the edged image
return edged
def skeletonize(image):
try:
return _skimage_skeletonize(image)
except Exception:
#Warning: The current _cv_skeletonize method produces different and inferior results than that of _skimage_skeletonize
return _cv_skeletonize(image)
def _skimage_skeletonize(image):
# https://scikit-image.org/docs/dev/auto_examples/edges/plot_skeleton.html
image=as_binary_image(as_grayscale_image(image))
pip_import('skimage')
from skimage.morphology import skeletonize
return skeletonize(image)
def _cv_skeletonize(img):
""" OpenCV function to return a skeletonized version of img, a Mat object"""
cv2=pip_import('cv2')
# Found this on the web somewhere
# hat tip to http://felix.abecassis.me/2011/09/opencv-morphological-skeleton/
img=img.astype(np.uint8)
img=img.copy() # don't clobber original
skel=img.copy()
skel[:,:]=0
kernel=cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
while True:
eroded=cv2.morphologyEx(img,cv2.MORPH_ERODE,kernel)
temp=cv2.morphologyEx(eroded,cv2.MORPH_DILATE,kernel)
temp=cv2.subtract(img,temp)
skel=cv2.bitwise_or(skel,temp)
img[:,:]=eroded[:,:]
if cv2.countNonZero(img) == 0:
break
return skel
def get_edge_drawing(image):
"""
Alternative to Canny Edges that's more robust
Extract edges from an image using EdgeDrawing (ED) algorithm.
EdgeDrawing is a real-time edge detection algorithm developed by Cihan Topal and Cuneyt Akinlar.
It works by first identifying anchor points in the image (pixels with high gradient magnitude),
then linking these anchor points to form continuous edge segments.
When use_parameter_free=True, it uses the Parameter-Free EdgeDrawing (PF-ED) variant, which
automatically determines gradient and anchor thresholds from the image.
Reference:
Topal, C., & Akinlar, C. (2012). Edge drawing: A combined real-time edge and segment
detector. Journal of Visual Communication and Image Representation, 23(6), 862-872.
Args:
image: numpy.ndarray in HW3 format (height, width, 3 channels)
Returns:
Binary edge map as numpy.ndarray
EXAMPLE:
>>> #Live Demo
... stream = load_webcam_stream()
... for frame in stream:
... can = auto_canny(frame)
... edg = get_edge_image(frame)
... display_image(horizontally_concatenated_images(frame,can, edg))
"""
import numpy as np
import cv2
gray = rp.as_byte_image(rp.as_grayscale_image(image))
# Initialize edge detector
ed = cv2.ximgproc.createEdgeDrawing()
params = cv2.ximgproc.EdgeDrawing.Params()
params.PFmode = True # use Parameter-Free mode
ed.setParams(params)
# Detect edges and get binary edge map
edges = ed.detectEdges(gray)
edge_map = ed.getEdgeImage(edges)
return edge_map
# noinspection PyTypeChecker
def print_latex_image(latex: str):
r"""
>>> print_latex_image("\sum_{n=3}^7x^2")
⠀⠀⠀⠀⠠⠟⢉⠟
⠀⠀⠀⠀⠀⠀⡏
⠀⠀⠀⠀⠀⠀⠃
⢀⢀⣀⣀⣀⣀⣀⣀⣀⣀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣠⡀
⠀⠙⠄⠀⠀⠀⠀⠀⠀⠈⠉⢦⠀⠀⠀⠀⠀⠀⠀⠛⠀⡸
⠀⠀⠈⢢⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢠⡞⣡
⠀⠀⠀⠀⠑⡀⠀⠀⠀⠀⠀⠀⠀⠀⠰⠋⣹⠉⠃⠈⠉⠉
⠀⠀⠀⢀⡔⠀⠀⠀⠀⠀⠀⠀⠀⠀⢠⣠⣏⣠⠆
⠀⠀⡠⠊⠀⠀⠀⠀⠀⠀⠀⣠
⢀⢼⣤⣤⣤⣤⣤⡤⠤⠤⠴⠁
⢀⠀⣀⠀⠀⠀⠀⠀⠀⠐⠏⢹
⢣⠏⢨⠃⢘⣛⣛⣛⣋⢀⠈⠙⡄
⠘⠀⠘⠊⠀⠀⠀⠀⠀⠘⠒⠚
Prints it in the console
"""
# @formatter:off
image=latex_image(latex)
image=inverted_image(image)
display_image_in_terminal(image,dither=False)
#DisplayThin= lambda latex:display_image_in_terminal(((resize_image(skeletonize(255 - latex_image(latex)[:,:,0]),scale) > threshold) * 1.0).squeeze(),dither=False)
#DisplayRegular=lambda latex:display_image_in_terminal(((resize_image( (255 - latex_image(latex)[:,:,0]),scale) > threshold) * 1.0).squeeze(),dither=False)
##@formatter:on
#if thin:
# DisplayThin(latex)
#else:
# DisplayRegular(latex)
# cd=os.chdir
image_acro="""di=display_image
li=load_image
dgi=display_grayscale_image
lg=line_graph
cv2=pip_import('cv2')
"""
# def remove_alpha_channel(image:np.ndarray,shutup=False):
# # Strips an image of its' alpha channel if it has one, otherwise basically leaves the image alone.
# sh=image.shape
# l=len(sh)
# if l==2 and not shutup:
# # Don't break the user's script but warn them: this image is not what they thought it was.
# print("r.remove_alpha_channel: WARNING: You fed in a matrix; len(image.shape)==2")
# return image
# if
# assert l==3,'Assuming that it has color channels to begin with, and that its not just a matrix of numbers'
# assert 3<=sh[2]<=4,'Assuming it has R,G,B or R,G,B,A'
#
# return image[:,:,:2]
# def is_valud_url(url: str) -> bool:
# # PROBLEM:
# # >>> ivu("google.com")
# # ans=False
# # I DID NOT WRITE THIS WHOLE FUNCTION ∴ IT MIGHT NOT WORK PERFECTLY. THIS IS FROM: http://stackoverflow.com/questions/452104/is-it-worth-using-pythons-re-compile
# import re
# regex=re.compile(
# r'^(?:http|ftp)s?://' # http:// or https://
# r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
# r'localhost|' # localhost...
# r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
# r'(?::\d+)?' # optional port
# r'(?:/?|[/?]\S+)$',re.IGNORECASE).match(url)
# return regex is not None and (lambda ans:ans.pos == 0 and ans.endpos == len(url))(g.fullmatch(url))
import rp.rp_ptpython.prompt_style as ps
ps.__all__+=("PseudoTerminalPrompt",)
_prompt_style_path=__file__+'.rp_prompt_style'
_get_prompt_style_cached=None
def _get_prompt_style():
global _get_prompt_style_cached
if _get_prompt_style_cached is None:
try:
out=text_file_to_string(_prompt_style_path)
except Exception:
out=' >>> '
_get_prompt_style_cached=out
return _get_prompt_style_cached
def _get_cdh_back_names():
# For autocompletion of CDH or B
return [
x
for x in unique(
reversed(
get_path_names(_get_cd_history())
),
key=str.lower,
)
if x.strip()
]
def _user_path_ans(ans):
"""
EXAMPLE:
>>> ans = /Users/burgert/miniconda3/lib/python3.12/site-packages/rp
>>> _user_path_ans(ans)
ans = ~/miniconda3/lib/python3.12/site-packages/rp
"""
if isinstance(ans, (list, tuple)):
return list(map(_user_path_ans,ans))
ans=str(ans)
ans=get_absolute_path(ans)
if ans.startswith(get_home_directory()):
ans='~'+ans[len(get_home_directory()):]
return ans
def _cdh_back_query(query):
assert isinstance(query, str)
# Given we CDH to the same query over and over again, we should cycle between matches. That's what all the below logic is to ensure.
lines = _get_cd_history()
lines = lines[::-1]
def matches(line):
name = get_path_name(line)
return fuzzy_string_match(query, name, case_sensitive=False)
consecutive_matches = []
other_matches = []
non_matches = []
deleted_matches = []
for line in lines:
if matches(line):
if not non_matches:
consecutive_matches.append(line)
else:
other_matches.append(line)
else:
non_matches.append(line)
if other_matches:
# Try to choose the first non-consecutive match so we can cycle
while other_matches and not folder_exists(other_matches[0]):
deleted_matches.append(other_matches[0])
del other_matches[0]
if other_matches:
return other_matches[0]
if consecutive_matches:
# Let us cycle through history - if we choose the first we'll be stuck
while consecutive_matches and not folder_exists(consecutive_matches[0]):
deleted_matches.append(consecutive_matches[0])
del consecutive_matches[0]
if consecutive_matches:
return consecutive_matches[-1]
if deleted_matches:
raise IndexError("None of the CDH matches for " + repr(query) + " still exist on your filesystem. Use CDH CLEAN to delete them.")
else:
raise IndexError("No CDH matches for " + repr(query))
_cd_history_size_limit=100000#To avoid spamming the console when we use CDH, limit the number of recent directories to this amount #UPDATE: I decided to make this effectively limitless (100000 is very big lol). Why limit it?
_cd_history_path=__file__+'.rp_cd_history.txt'
def _get_cd_history():
try:
output = line_split(text_file_to_string(_cd_history_path))
output = output[:_cd_history_size_limit]
return output
except Exception as e:
# print_verbose_stack_trace(e)
return []
def _add_to_cd_history(path:str):
if path=='.':
return
def unique(l:list):
o=[]
for e in reversed(l):
if e not in o:
o.append(e)
return o[::-1]
# #OLD
# string_to_text_file(_cd_history_path,line_join(entries))
#NEW (SEPARATE THREAD)
import rp.prompt_toolkit.history as h
@h.run_task
def task():
entries=_get_cd_history()
entries.append(path)
entries=unique(entries)
string_to_text_file(_cd_history_path,line_join(entries))
_last_dir=None
def _update_cd_history():
# print()
# print("OLD HISTORY")
# fansi_print(text_file_to_string(_cd_history_path),'magenta')
# fansi_print(_get_cd_history(),'magenta')
global _last_dir
if _last_dir!=get_current_directory():
_last_dir=get_current_directory()
try:
_add_to_cd_history(get_current_directory())
from rp.rp_ptpython.completer import get_all_importable_module_names
get_all_importable_module_names()#Refresh
except FileNotFoundError:
#This will happen if the folder we're currently working in is deleted. Just skip updating the history...
pass
# print("NEW HISTORY")
# fansi_print(text_file_to_string(_cd_history_path),'magenta')
# fansi_print(_get_cd_history(),'magenta')
# print()
cdc_protected_prefixes=[]
def _cdh_folder_is_protected(x):
return (any(x.startswith(prefix) for prefix in cdc_protected_prefixes)) and not folder_exists(x)
def _clean_cd_history():
#Removes all nonexistant paths from CDH
#It removes all the red entries
entries=_get_cd_history()
entries=[entry for entry in entries if path_exists(entry) or _cdh_folder_is_protected(entry)]
string_to_text_file(_cd_history_path,line_join(entries))
def set_prompt_style(style:str=None):
print('Running rp.set_prompt_style:')
default_prompt_styles=[' ⮤ ',' >>> ',' >> ',' > ',' ▶ ',' ▶▶ ',' ►► ' ,' ▷▷ ',' ▷ ',' --> ',' ––> ',' 🠥 ','',' 🡩 ',' ➤ ',' ⮨ ']
cancel_message='Cancelled setting new prompt style.'
if style is None:
custom_option='(custom prompt style)'
cancel_option='(cancel)'
option=input_select('No style was specified. Please select a new prompt style:',[custom_option,cancel_option]+default_prompt_styles)
if option==custom_option:
print('Enter a custom prompt style:')
style=input()
elif option==cancel_option:
print(cancel_message)
return
else:
style=option
assert isinstance(repr(style),str)
fansi_print("Displaying current prompt style:",'blue')
print(repr(_get_prompt_style()))
fansi_print("Displaying new prompt style:",'blue')
print(repr(style))
print(fansi('Some other styles you might want to consider: ','blue')+repr(default_prompt_styles)[1:-1])
if input_yes_no("Are you sure you want to switch?"):
try:
string_to_text_file(_prompt_style_path,style)
global _get_prompt_style_cached
_get_prompt_style_cached=None#Invalidate the cache, forcing it to reload
except BaseException as e:
print("Failed to save new prompt...displaying error")
print_stack_trace(e)
else:
print('...ok. Will not save new prompt style')
if input_yes_no('Would you like to select a different style instead?'):
set_prompt_style()
else:
print(cancel_message)
return
class PseudoTerminalPrompt(ps.ClassicPrompt):
def in_tokens(self,cli):
pip_import('pygments')
from pygments.token import Token
return [(Token.Prompt,_get_prompt_style())]
setattr(ps,'PseudoTerminalPrompt',PseudoTerminalPrompt)
default_python_input_eventloop = None # Singleton for python_input
# def python_input(namespace):
# try:
# from rp.prompt_toolkit.shortcuts import create_eventloop
# from ptpython.python_input import PythonCommandLineInterface,PythonInput as Pyin
# global default_python_input_eventloop
# pyin=Pyin(get_globals=lambda:namespace)
# pyin.enable_mouse_support=False
# pyin.enable_history_search=True
# pyin.highlight_matching_parenthesis=True
# pyin.enable_input_validation=False
# pyin.enable_auto_suggest=False
# pyin.show_line_numbers=True
# pyin.enable_auto_suggest=True
# # exec(mini_terminal)
# pyin.all_prompt_styles['Pseudo Terminal']=ps.PseudoTerminalPrompt()
# # ps.PseudoTerminalPrompt=PseudoTerminalPrompt
# pyin.prompt_style='Pseudo Terminal'
#
# default_python_input_eventloop=default_python_input_eventloop or PythonCommandLineInterface(create_eventloop(),python_input=pyin)
# #
# # try:
# code_obj = default_python_input_eventloop.run()
# if code_obj.text is None:
# print("THE SHARKMAN SCREAMS")
# return code_obj.text
# except Exception as E:
# print_stack_trace(E)
# # except BaseException as re:
# # print_stack_trace(re)
# # print("THE DEMON SCREAMS")
def split_into_sublists(l, sublist_len: int, *, strict=False, keep_remainder=True):
"""
If strict: sublist_len MUST evenly divide len(l)
It will return a list of tuples, unless l is a string, in which case it will return a list of strings
keep_remainder is not applicable if strict
if not keep_remainder and sublist_len DOES NOT evenly divide len(l), we can be sure that all tuples in the output are of len sublist_len, even though the total number of elements in the output is less than in l.
EXAMPLES:
>>> split_into_sublists([1,2,3,4,5,6,7,8,9],3 ,0) -> [(1,2,3),(4,5,6),(7,8,9)]
>>> split_into_sublists([1,2,3,4,5,6,7,8,9],4 ,0) -> [(1,2,3,4),(5,6,7,8),(9,)]
>>> split_into_sublists([1,2,3,4,5,6,7,8,9],5 ,0) -> [(1,2,3,4,5),(6,7,8,9)]
>>> split_into_sublists([1,2,3,4,5,6,7,8,9],6 ,0) -> [(1,2,3,4,5,6),(7,8,9)]
>>> split_into_sublists([1,2,3,4,5,6,7,8,9],66,0) -> [(1,2,3,4,5,6,7,8,9)]
>>> split_into_sublists([1,2,3,4,5,6,7,8,9],66,0,1) -> [(1,2,3,4,5,6,7,8,9)]
>>> split_into_sublists([1,2,3,4,5,6,7,8,9],66,0,0) -> []
>>> split_into_sublists([1,2,3,4,5,6,7,8,9],5 ,0,0) -> [(1,2,3,4,5)]
>>> split_into_sublists([1,2,3,4,5,6,7,8,9],4 ,0,0) -> [(1,2,3,4),(5,6,7,8)]
>>> split_into_sublists([1,2,3,4,5,6,7,8,9],3 ,0,0) -> [(1,2,3),(4,5,6),(7,8,9)]
>>> split_into_sublists([1,2,3,4,5,6,7,8,9],4 ,1,0) -> ERROR: ¬ 4 | 9
"""
assert is_number(sublist_len),'sublist_len should be an integer, but got type '+repr(type(sublist_len))
if strict:
assert not len(l)%sublist_len,'len(l)=='+str(len(l))+' and sublist_len=='+str(sublist_len)+': strict mode is turned on but the sublist size doesnt divide the list input evenly. len(l)%sublist_len=='+str(len(l)%sublist_len)+'!=0'
n=sublist_len
#This line is rather dense, but it makes sense.
output=list(zip(*(iter(l),) * n))+([tuple(l[len(l)-len(l)%n:])] if len(l)%n and keep_remainder else [])
if isinstance(l,str):
output=[''.join(substring) for substring in output]
return output
def split_into_n_sublists(l, n):
"""
Splits the input sequence `l` into `n` sublists as evenly as possible.
Supports any sequence `l` that implements slicing.
Parameters:
l (sequence): The sequence to be split.
n (int): The number of sublists to split `l` into.
Returns:
list: A list containing `n` sublists.
Raises:
ValueError: If `n` is not a positive integer.
Examples:
>>> split_into_n_sublists([1, 2, 3, 4, 5], 3)
[[1, 2], [3, 4], [5]]
>>> split_into_n_sublists([1, 2, 3, 4, 5, 6], 4)
[[1, 2], [3, 4], [5], [6]]
>>> split_into_n_sublists([1, 2, 3, 4, 5], 10)
[[1], [], [2], [], [3], [], [4], [], [5], []]
>>> split_into_n_sublists([], 3)
[[], [], []]
"""
if n <= 0:
raise ValueError("rp.split_into_n_sublists: n must be greater than 0 but n is "+str(n))
if isinstance(l, str):
return ''.join(split_into_n_sublists(list(l), n))
L = len(l)
indices = [int(i * L / n) for i in range(n + 1)]
return [l[indices[i]:indices[i + 1]] for i in range(n)]
def split_into_subdicts(d, subdict_size: int, strict=False, keep_remainder=True):
"""
Splits a dictionary into a list of subdictionaries based on the specified subdict size.
If strict: subdict_size MUST evenly divide len(d)
keep_remainder is not applicable if strict
if not keep_remainder and subdict_size DOES NOT evenly divide len(d), we can be sure that all subdictionaries in the output are of size subdict_size, even though the total number of elements in the output is less than in d.
EXAMPLES:
>>> split_into_subdicts({'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6}, 2)
[{'a': 1, 'b': 2}, {'c': 3, 'd': 4}, {'e': 5, 'f': 6}]
>>> split_into_subdicts({'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}, 3)
[{'a': 1, 'b': 2, 'c': 3}, {'d': 4, 'e': 5}]
>>> split_into_subdicts({'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}, 3, strict=True)
AssertionError: len(d)==5 and subdict_size==3: strict mode is turned on but the subdict size doesn't divide the dictionary evenly. len(d)%subdict_size==2!=0
>>> split_into_subdicts({'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}, 3, keep_remainder=False)
[{'a': 1, 'b': 2, 'c': 3}]
"""
assert isinstance(subdict_size, int), 'subdict_size should be an integer, but got type ' + repr(type(subdict_size))
if strict:
assert not len(d) % subdict_size, 'len(d)==' + str(len(d)) + ' and subdict_size==' + str(subdict_size) + ': strict mode is turned on but the subdict size doesn\'t divide the dictionary evenly. len(d)%subdict_size==' + str(len(d) % subdict_size) + '!=0'
keys = list(d)
key_sublists = split_into_sublists(keys, subdict_size, strict=strict, keep_remainder=keep_remainder)
return [{key:d[key] for key in key_sublist} for key_sublist in key_sublists]
def split_into_n_subdicts(d, n):
"""
Splits a dictionary into a list of n subdictionaries as evenly as possible.
Parameters:
d (dict): The dictionary to be split.
n (int): The number of subdictionaries to split `d` into.
Returns:
list: A list containing `n` subdictionaries.
Raises:
ValueError: If `n` is not a positive integer.
Examples:
>>> split_into_n_subdicts({'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}, 3)
[{'a': 1, 'b': 2}, {'c': 3}, {'d': 4, 'e': 5}]
>>> split_into_n_subdicts({'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6}, 4)
[{'a': 1}, {'b': 2, 'c': 3}, {'d': 4}, {'e': 5, 'f': 6}]
>>> split_into_n_subdicts({'a': 1, 'b': 2, 'c': 3}, 5)
[{'a': 1}, {}, {'b': 2}, {}, {'c': 3}]
>>> split_into_n_subdicts({}, 3)
[{}, {}, {}]
"""
if n <= 0:
raise ValueError("rp.split_into_n_subdicts: n must be greater than 0, but n is "+str(n))
keys = list(d)
keys_sublists = split_into_n_sublists(keys, n)
return [{key:d[key] for key in key_sublist} for key_sublist in keys_sublists]
def join_with_separator(iterable, separator, *, lazy=False, expand_separator=False):
"""
Intersperse a separator between elements of an iterable.
Args:
iterable (iterable): The iterable to intersperse.
separator: The separator to intersperse between elements.
lazy (bool, optional): If True, return a generator. If False, return a list. Defaults to False.
expand_separator (bool, optional): If True, the separator is expanded into its individual elements
and interspersed between the elements of the iterable. Defaults to False.
Returns:
list or generator: A list or generator with the separator interspersed between elements.
Examples:
>>> join_with_separator([], None)
[]
>>> join_with_separator([1], None)
[1]
>>> join_with_separator([1, 2], None)
[1, None, 2]
>>> join_with_separator([1, 2, 3, 4, 5], None)
[1, None, 2, None, 3, None, 4, None, 5]
>>> gen = join_with_separator([1, 2, 3, 4, 5], None, lazy=True)
>>> list(gen)
[1, None, 2, None, 3, None, 4, None, 5]
>>> join_with_separator(['a', 'b', 'c'], '...')
['a', '...', 'b', '...', 'c']
>>> join_with_separator(['a', 'b', 'c'], '...', expand_separator=True)
['a', '.', '.', '.', 'b', '.', '.', '.', 'c']
"""
def generator():
for index, value in enumerate(iterable):
if index:
if expand_separator:
yield from separator
else:
yield separator
yield value
output = generator()
if not lazy:
output = list(output)
return output
def rotate_image(image, angle_in_degrees, interp="bilinear"):
"""
Returns a rotated image by angle_in_degrees, clockwise
The output image size is usually not the same as the input size, unless the angle is 180 (or in the case of a square image, 90, 180, or 270)
Usually, the output image size is larger than the input image size
EXAMPLE:
def create_checkerboard_animation(image_url, D=3):
img = crop_image_to_square(load_image(image_url, use_cache=True))
tiles = split_tensor_into_regions(img, D, D)
frames = crop_images_to_max_size(
[
tiled_images(
[
rotate_image(tile, angle * (1 if (i // D + i % D) % 2 else -1))
for i, tile in enumerate(tiles)
],
border_thickness=0,
)
for angle in [*[0] * 15, *range(91), *[90] * 15]
],
origin="center",
)
display_video((frames + frames[::-1]) * 50, framerate=60)
create_checkerboard_animation(
"https://upload.wikimedia.org/wikipedia/en/7/7d/Lenna_%28test_image%29.png"
)
"""
image=as_numpy_image(image,copy=False)
image=as_rgba_image(image,copy=False)
alpha=get_image_alpha(image)
rotate=lambda x: _rotate_rgb_image(x, angle_in_degrees, interp)
alpha=rotate(alpha)
rgb=rotate(as_rgb_image(image,copy=False))
return with_alpha_channel(rgb,alpha,copy=False)
def rotate_images(*images, angle, interp='bilinear', show_progress=False, lazy=False):
"""
Plural of rotate_image. Arguments are broadcastable.
Angles are measured in degrees!
EXAMPLE:
>>> url = "https://hips.hearstapps.com/hmg-prod/images/dog-puppy-on-garden-royalty-free-image-1586966191.jpg?crop=0.752xw:1.00xh;0.175xw,0&resize=1200:*"
... image = load_image(url, use_cache=True)
... image = resize_image_to_fit(image, height=256, width=256)
... display_video(
... crop_images_to_max_size(
... rotate_images(image, angle=range(360), show_progress=True),
... origin="center",
... ),
... loop=True,
... )
"""
images = detuple(images)
#Prepare for list-based broadcasting. Todo: Make it possible for the broadcasted args to be lazy too!
if is_image(images) : images = [images]
if is_iterable(angle) : angle = list(angle)
if not isinstance(interp, str): interp = list(interp)
kwarg_sets = broadcast_kwargs(
dict(
image=images,
angle_in_degrees=angle,
interp=interp,
)
)
if show_progress:
kwarg_sets = eta(kwarg_sets, title='rp.rotate_images')
output = (rotate_image(**kwarg_set) for kwarg_set in kwarg_sets)
if not lazy:
output = list(output)
return output
def _rotate_rgb_image(image, angle_in_degrees, interp='bilinear'):
"""
Will return an RGB image, not an RGBA one
"""
image=as_numpy_image(image,copy=False)
assert is_image(image)
#Handle the edge cases: 0, 90, 180, 270, 360, etc - we don't need OpenCV for this
if angle_in_degrees%360==0:
return image.copy()
if angle_in_degrees%360==180:
return horizontally_flipped_image(vertically_flipped_image(image))
if angle_in_degrees%90==0:
if is_grayscale_image(image):
if angle_in_degrees%360==270:
return vertically_flipped_image(image.copy().T)
else:
assert angle_in_degrees%360==90
return horizontally_flipped_image(image.copy().T)
else:
assert is_rgb_image(image) or is_rgba_image(image)
if angle_in_degrees%360==270:
return vertically_flipped_image(image.transpose(1,0,2))
else:
assert angle_in_degrees%360==90
return horizontally_flipped_image(image.transpose(1,0,2))
#ALTERNATIVE Implementation that doesn't use OpenCV and instead uses PILLOW (not used in this function):
# https://pythonexamples.org/python-pillow-rotate-image-90-180-270-degrees/#:~:text=You%20can%20rotate%20an%20image,to%20the%20size%20of%20output.
# GOT CODE FROM URL: https://www.pyimagesearch.com/2017/01/02/rotate-images-correctly-with-opencv-and-python/
#TODO: Make a cv_rotate_image version of this function that handles making the black pixels instead reflected images (there's an option for that in cv2.warp_affine). This is better for data augmentation purposes.
angle=angle_in_degrees
cv2=pip_import('cv2')
interp_methods={'bilinear':cv2.INTER_LINEAR,'cubic':cv2.INTER_CUBIC,'nearest':cv2.INTER_NEAREST}
assert interp in interp_methods, 'cv_resize_image: Interp must be one of the following: %s'%str(list(interp_methods))
interp_method=interp_methods[interp]
# grab the dimensions of the image and then determine the
# center
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
return cv2.warpAffine(image, M, (nW, nH), flags=interp_method)
def open_url_in_web_browser(url:str):
from webbrowser import open
open(url)
def google_search_url(query:str)->None:
"""
Returns the URL for google-searching the given query
EXAMPLE:
>>> google_search_url('What is a dog?')
https://www.google.com/search?q=What%20is%20a%20dog%3F
"""
query=str(query)
import urllib.parse
url='https://www.google.com/search?q='+urllib.parse.quote(query)
return url
def open_google_search_in_web_browser(query:str):
"""
Opens up the web browser to a google search of a given query
"""
url=google_search_url(query)
open_url_in_web_browser(url)
return url
def restart_python():
from os import system
print("killall Python\nsleep 2\npython3 "+repr(__file__))
system("killall Python\nsleep 2\npython3 "+repr(__file__))
def reload_module(module):
import importlib
importlib.reload(module)
def reload_rp():
"""
If rp changes mid-notebook, here's a convenient way to reload it
"""
import rp
reload_module(rp)
import rp
return rp
#OLD CODE. WORKS FINE! BUT I WANTED TO REFACTOR IT. THIS CODE IS ANCIENT!!!! SOOO OLD!!!!! LIKE OVER 9 YEARS OLD....2016-esque
# def _eta(total_n,*,min_interval=.3,title="r.eta"):
# """
# Example:
# >>> a = eta(2000,title='test')
# ... for i in range(2000):
# ... sleep(.031)
# ... a(i)
#
# This method is slopily written, but works perfectly.
# """
# timer=tic()
# interval_timer=[tic()]
# title='\r'+title+": "
# def display_eta(proportion_completed,time_elapsed_in_seconds,TOTAL_TO_CIMPLET,COMPLETSOFAR,print_out=True):
# if interval_timer[0]()>=min_interval:
# interval_timer[0]=tic()
# # Estimated time of arrival printer
# from datetime import timedelta
# out_method=(lambda x:print(x,end='') if print_out else identity)
# temp=timedelta(seconds=time_elapsed_in_seconds)
# completerey="\tProgress: " + str(COMPLETSOFAR) + "/" + str(TOTAL_TO_CIMPLET)
# if proportion_completed<=0:
# return out_method(title +"NO PROGRESS; INFINITE TIME REMAINING. T=" +str(temp) +(completerey))
# # exec(mini_terminal)
# eta=float(time_elapsed_in_seconds) / proportion_completed # Estimated time of arrival
# etr=eta- time_elapsed_in_seconds # Estimated time remaining
# return out_method(title+(("ETR=" + str(timedelta(seconds=etr)) + "\tETA=" + str(timedelta(seconds=eta)) + "\tT="+str(temp) + completerey if etr > 0 else "COMPLETED IN " + str(temp)+completerey+"\n")))
# def out(n,print_out=True):
# return display_eta(n/total_n,timer(),print_out=print_out,TOTAL_TO_CIMPLET=total_n,COMPLETSOFAR=n)
# return out
_print_status_prev_len = 0
def _print_status(x):
""" Print a single line in such a way that it will be overwritten if we call _print_status again """
global _print_status_prev_len
x = str(x)
if not running_in_jupyter_notebook():
_erase_terminal_line()
print(
"\r" + " " * _print_status_prev_len + "\r" + x,
end="",
flush=True,
)
_print_status_prev_len = len(x)
def _eta(total_n,*,min_interval=.3,title="r.eta"):
"""
Example:
>>> a = eta(2000,title='test')
... for i in range(2000):
... sleep(.031)
... a(i)
"""
from datetime import timedelta
timer = tic()
interval_timer = tic()
title = title + ": "
shown_done = False
style = 'invert'
def fansi_progress(string, proportion):
""" Used to show a progress bar under the ETA text! """
string = string.expandtabs() #Jupyter doesn't render underlines over tabs
num_chars = round(len(string) * proportion)
return fansi(string[:num_chars], style) + string[num_chars:]
def display_eta(proportion_completed,time_elapsed_in_seconds,TOTAL_TO_COMPLETE,COMPLETED_SO_FAR):
nonlocal interval_timer
nonlocal shown_done
done = proportion_completed >= 1
if interval_timer()>=min_interval or done and not shown_done:
interval_timer=tic()
# Estimated time of arrival printer
temp=timedelta(seconds=time_elapsed_in_seconds)
progress = "\tProgress: " + str(COMPLETED_SO_FAR) + "/" + str(TOTAL_TO_COMPLETE)
if proportion_completed <= 0:
return _print_status(
title
+ "NO PROGRESS; INFINITE TIME REMAINING. T="
+ str(temp)
+ (progress)
)
eta = float(time_elapsed_in_seconds) / proportion_completed
# Estimated time of arrival
etr = eta - time_elapsed_in_seconds # Estimated time remaining
if done:
shown_done = True
return _print_status(
fansi_progress(
title
+ "ETR="
+ str(timedelta(seconds=etr))
+ "\tETA="
+ str(timedelta(seconds=eta))
+ "\tT="
+ str(temp)
+ progress,
proportion_completed,
)
if not done
# else title + "COMPLETED IN " + str(temp) + progress + "\n"
else title + "Done! Did %i items in %s" % (COMPLETED_SO_FAR, temp) + "\n"
)
def out(n):
return display_eta(
n / total_n,
timer(),
TOTAL_TO_COMPLETE=total_n,
COMPLETED_SO_FAR=n,
)
return out
class eta:
"""
Example:
>>> a = eta(2000,title='test')
... for i in range(2000):
... sleep(.031)
... a(i)
Example:
>>> for i in eta(range(100)):
... sleep(.1)
"""
def __init__(self, x, title='r.eta', min_interval=.3, length=None):
assert isinstance(x, int) or hasattr(x, '__len__') or length is not None
if length is not None:
length = int(length)
self.elements = IteratorWithLen(x, length)
x = length
elif has_len(x):
self.elements = x
x = len(x)
else:
assert is_number(x)
self.elements = range(x)
self.display_eta = _eta(x, title=title, min_interval=min_interval)
def __call__(self, n):
self.display_eta(n)
def __iter__(self):
if len(self):
for i,e in enumerate(self.elements):
self(i)
yield e
self(i+1)
def __len__(self):
return len(self.elements)
# @memoized
def get_all_submodule_names(module):
"""
Takes a module and returns a list of strings.
Example:
>>> all_submodule_names(np)
ans = ['numpy.core', 'numpy.fft', 'numpy.linalg', 'numpy.compat', 'numpy.conftest', ...(etc)... ]
This function is NOT recursive
This function IS safe to run (unlike get_all_submodules)
"""
import types,pkgutil
assert isinstance(module,types.ModuleType),'This function accepts a module as an input, but you gave it type '+repr(type(module))
if not hasattr(module,'__path__'):
return []
submodule_names=[]
prefix = module.__name__ + "."
path = module.__path__
for importer, modname, ispkg in pkgutil.iter_modules(path, prefix):
submodule_names.append(modname)
return submodule_names
# def get_all_submodules(module,recursive=True):
# #NOTE: This function is dangerous and may have unintended side-effects if importing a certain module runs unwanted code.
# #Attempt to return as many imported modules as we can...skip all the ones that have errors when importing...
# import types,pkgutil,importlib
# assert isinstance(module,types.ModuleType),'This function accepts a module as an input, but you gave it type '+repr(type(module))
# def recursion_helper(module):
# yield module
# for submodule in all_submodules(module,recursive=False):
# yield from recursion_helper(submodule)
# for submodule_name in all_submodule_names(module):
# try:
# submodule=importlib.import_module(submodule_name)
# except Exception:
# pass
# else:
# if recursive:
# yield from recursion_helper(submodule)
# else:
# yield submodule
# def get_all_submodule_names(module): #OLD CODE
# # SOURCE: https://stackoverflow.com/questions/832004/python-finding-all-packages-inside-a-package
# return [x.split('.')[1] for x in get_all_submodule_names(module)]
# dir = os.path.dirname(module.__file__)
# def is_package(d):
# d = os.path.join(dir, d)
# return os.path.isdir(d) and glob.glob(os.path.join(d, '__init__.py*'))
# return list(filter(is_package, os.listdir(dir)))
# def merged_dicts(*dict_args):
# """
# SOURCE: https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression
# Given any number of dicts, shallow copy and merge into a new dict,
# precedence goes to key value pairs in latter dicts.
# """
# # dict_args=detuple(dict_args)
# result = {}
# for dictionary in dict_args:
# result.update(dictionary)
# return result
def merged_dicts(*dicts, precedence='last', mutate=False):
"""
Merge given dictionaries into a new dictionary or mutate the first one.
The type of the resulting dictionary will be the same as the type of the first dictionary provided
if mutate is False. Precedence determines which dictionary's values will take priority in case of key conflicts.
Args:
*dicts: Variable length dictionary list or a single iterable of dictionaries.
precedence (str): Determines precedence. 'first' means dictionaries listed first take precedence,
'last' means dictionaries listed last take precedence.
mutate (bool): If True, the first dictionary provided will be mutated instead of creating a new one.
Returns:
A new dictionary that is a merge of the given dictionaries, or the first dictionary if mutate is True.
If non-dicts are given, such as EasyDict, the type of the first dict given will be used.
Raises:
ValueError: If 'precedence' is not 'first' or 'last', or if any provided argument is not a dictionary.
Examples:
Example 1: merged_dicts({'a': 1}, {'b': 2}, {'a': 3}) # Output: {'a': 3, 'b': 2}
Example 2: merged_dicts({'a': 1}, {'b': 2}, {'a': 3}, precedence='first') # Output: {'a': 1, 'b': 2}
Example 3: merged_dicts(dict1, {'b': 2}, mutate=True) # `dict1` is now {'a': 1, 'b': 2}
Example 4: merged_dicts(dict1, {'b': 2}) # `dict1` remains {'a': 1}, `result` is {'a': 1, 'b': 2}
Example 5: merged_dicts() # Output: {}
Example 6: merged_dicts([{'a': 1}, {'b': 2}]) # Output: {'a': 1, 'b': 2}
Example 7: Handling non-dict Mapping objects
from easydict import EasyDict
edict1 = EasyDict({'a': 1})
edict2 = EasyDict({'b': 2})
merged_dicts(edict1, edict2) # Expected output: EasyDict({'a': 1, 'b': 2})
"""
from collections.abc import Mapping, Iterable
# Validate precedence argument
if precedence not in ('first', 'last'):
raise ValueError("Invalid precedence value: '{}'. Precedence must be either 'first' or 'last'.".format(precedence))
# Flatten the input if a single iterable is provided.
if len(dicts) == 1 and isinstance(dicts[0], Iterable) and not isinstance(dicts[0], Mapping):
dicts = dicts[0]
# Check to make sure all the dicts are there
if not all(isinstance(d, Mapping) for d in dicts):
raise ValueError("All arguments must be Mapping objects, but they weren't. We got types "+repr(list(map(type,dicts))))
# Determine the order of dictionary processing based on precedence.
dicts = list(reversed(dicts)) if precedence == 'first' else dicts
# Find the first dict if there is one, otherwise make a new one
first_dict = dicts[0] if len(dicts) else {}
# If mutate is True, the first dictionary is updated directly.
result = first_dict if mutate else type(first_dict)()
# Merge the dicts into result
for d in dicts:
result.update(d)
return result
def merged_prefixed_dicts(**kwargs):
"""
Useful for destructuring from multiple dicts
EXAMPLE:
>>> first_output = dict(a=1,b=2,c=3)
>>> second_output = dict(a=4,b=5,c=6)
>>> merged_prefixed_dicts(first_=first_output,second_=second_output)
ans = {'first_a': 1, 'first_b': 2, 'first_c': 3, 'second_a': 4, 'second_b': 5, 'second_c': 6}
>>> first_a, second_a = destructure(merged_prefixed_dicts(first_=first_output,second_=second_output))
"""
out_dict={}
for prefix in kwargs:
dict=kwargs[prefix]
for key,value in dict.items():
new_key=str(prefix)+str(key)
out_dict[new_key]=value
return out_dict
def merged_suffixed_dicts(**kwargs):
"""
Useful for destructuring from multiple dicts by using suffixed keys from each dictionary.
EXAMPLE:
>>> first_output = dict(a=1, b=2, c=3)
>>> second_output = dict(a=4, b=5, c=6)
>>> merged_suffixed_dicts(first_=first_output, second_=second_output)
ans = {'a_first': 1, 'b_first': 2, 'c_first': 3, 'a_second': 4, 'b_second': 5, 'c_second': 6}
"""
out_dict={}
for prefix in kwargs:
dict=kwargs[prefix]
for key, value in dict.items():
new_key=str(key)+'_'+str(prefix)
out_dict[new_key]=value
return out_dict
def keys_and_values_to_dict(keys,values):
"""
EXAMPLE:
>>> keys_and_values_to_dict([1,2,3,4],['a','b','c','d'])
ans = {1: 'a', 2: 'b', 3: 'c', 4: 'd'}
>>> {x:y for x,y in zip(keys,values)} #Equivalent
ans = {1: 'a', 2: 'b', 3: 'c', 4: 'd'}
"""
out={}
for key,value in zip(keys,values):
out[key]=value
return out
def get_source_code(object):
"""
EXAMPLE:
>>> get_source_code(get_source_code)
ans = def get_source_code(object):
import inspect
return inspect.getsource(object)
"""
import inspect
try:
return inspect.getsource(object)
except TypeError:
return inspect.getsource(type(object))
def get_source_file(object):
"""
Might throw an exception
"""
getter=lambda x:inspect.getfile(inspect.getmodule(x))
import inspect
try:
return getter(object)
except TypeError:#ERROR: TypeError: None is not a module, class, method, function, traceback, frame, or code object
return getter(type(object))
# region Editor Launchers
def edit(file_or_object,editor_command='atom'):
if isinstance(file_or_object,str):
return os.system(editor_command +" " + shlex.quote(repr(file_or_object)))# Idk if there's anything worth returning but maybe there is? run_as_subprocess is true so we can edit things in editors like vim, suplemon, emacs etc.
else:
return edit(get_source_file(object=file_or_object),editor_command=editor_command)
sublime=lambda x:edit(x,'sublime')
subl =lambda x:edit(x,'subl' )
vscode =lambda x:edit(x,'code' )
gedit =lambda x:edit(x,'gedit' )
atom =lambda x:edit(x,'atom' )
# vim=lambda x:edit(x,'vim') # Later we define a special, custom function for vim
def _static_calldefs(modpath):
pip_import('xdoctest')
from xdoctest import static_analysis as static
calldefs = dict(static.parse_calldefs(fpath=modpath))
return calldefs
def _get_object_lineno(obj):
#TODO: Make this still work even if the source file was changed (right now, if you use VIMORE and then edit the file then use VIMORE again, it will bring you to the wrong place the second time because of how python works)
#If a function is wrapped, don't show us the wrapper idc about that
while hasattr(obj, '__wrapped__'):
obj = obj.__wrapped__
try:
# functions just
lineno = obj.__code__.co_firstlineno
except Exception:
module_code=text_file_to_string(get_source_file(obj))
obj_code=get_source_code(obj)
first_line=obj_code.splitlines()[0]
index=module_code.find(first_line)
lineno=module_code[:index].count('\n')
lineno+=1
# except AttributeError:
# attrname = obj.__name__
# modpath = sys.modules[obj.__module__].__file__
# calldefs = _static_calldefs(modpath)
# ub.modpath_to_modname(modpath)
# calldef = calldefs[attrname]
# lineno = calldef.lineno
return lineno
def vim(file_or_object=None,line_number=None):
import subprocess
args=['vim']
assert currently_in_a_tty(),'Cannot start Vim because we are not running in a terminal' #In Jupyter Notebook, launching Vim might force you to restart the kernel...very annoying
if isinstance(file_or_object,str):
path=file_or_object
path=get_absolute_path(path)
args.append(path)
elif isinstance(file_or_object, list):
#Can specify a list of objects or files and vim will edit all at once
for path in file_or_object:
if isinstance(path, str):
path = get_absolute_path(path)
else:
path=get_source_file(file_or_object)
args.append(path)
elif file_or_object is None:
path=None
pass
else:
path=get_source_file(file_or_object)
args.append(path)
if line_number is None and not is_a_module(file_or_object):
try:
line_number=_get_object_lineno(file_or_object)
except Exception:
pass
if line_number is not None:
#https://stackoverflow.com/questions/3313418/starting-vim-at-a-certain-position-line-and-column-of-a-file
column_number=0
args+=['+call cursor(%i,%i)'%(line_number,column_number),'+normal zz']
if is_a_folder(path):
folder=path
else:
folder=get_parent_directory(path)
original_directory=get_current_directory()
try:
set_current_directory(folder) # This step is just for convenience; it's completely optional (might be removed if I don't like it). When editing a file, set vim's pwd to it's folder
subprocess.call(args)
finally:
set_current_directory(original_directory)
# # initialize editor methods. Easier to understand when analyzing this code dynamically; static analysis might be really confusing
# __known_editors=['emacs','suplemon','atom','sublime','subl']# NONE of these names should intersect any methods or varables in the r module or else they will be overwritten!
# # for __editor in __known_editors:
# exec("""
# def X(file_or_object):
# _edit(file_or_object,editor_command='X')""".replace('X',__editor))
# del __known_editors,__editor# This is just a setup section to create methods for us, so get rid of the leftovers. __known_editors and __editor are assumed to be unused anywhere else in our current namespace!dz
def xo(file_or_object):
# FYI: 'xo' stands for 'exofrills', a console editor. I haven't used it much though. I don't really use console based editors much…
import xo
try:
if not isinstance(file_or_object,str):
file_or_object=get_source_file(file_or_object)
xo.main([file_or_object])
except Exception:
print("Failed to start exofrills editor")
del xo #I don't ever use this lol. Undelete this if I encounter any code that needs it.
# endregion
def graph_resistance_distance(n, d, x, y):
"""
Originally from Fodor's CSE307 HW 2, Spring 2018
d is dictionary to contain graph edges
n is number of nodes
x is entry node
y is exit node
Reference: wikipedia.org/wiki/Resistance_distance
Example from acmgnyr.org/year2017/problems/G-SocialDist.pdf
graph_resistance_distance(6,{2:(0,1,3),3:(1,4,5),4:(1,5)},1,0) ⟶ 34/21
"""
e=[[] for _ in range(n)]
for k in d:
for i in d[k]:
e[k].append(i)
e[i].append(k)
c = []
s = len(e)
for i, l in enumerate(e):
v = [0]*s
for j in l:
v[i] += 1
v[j] -= 1
c.append(v)
r = [0] * s
r[x] = 1
r[y] = -1
m = max(x,y)
c = [x[:m] + x[m + 1:] for x in c]
c.pop(0)
r.pop(0)
M = [c[i] + [r[i]] for i in range(len(c))]
M=reduced_row_echelon_form(M)
return abs(M[min(x,y)][-1])
namespace="set(list(locals())+list(globals())+list(dir()))" # eval-uable
xrange=range # To make it more compatiable when i copypaste py2 code
term='pseudo_terminal(locals(),globals())'# For easy access: exec(term). Can use in the middle of other methods!
def is_valid_python_syntax(code,mode='exec'):
"""
Returns True if the code is valid python syntax, False otherwise.
The 'mode' specifies the type of python code - 'exec' is a superset of 'eval'.
"""
assert isinstance(code,str),'Code should be a string'
import ast, traceback
valid = True
try:
ast.parse(code,mode=mode)
# except SyntaxError: #ValueError: source code string cannot contain null bytes
except Exception:
valid = False
return valid
def _is_valid_exeval_python_syntax(code, mode='exec'):
code, _ = _parse_exeval_code(code)
return is_valid_python_syntax(code)
def is_valid_shell_syntax(code,*, silent=True, command=None):
"""
Returns True if the code is valid shell syntax for your default shell. If command is specified (such as '/bin/zsh' or rp.get_default_shell()), checks that shell instead.
EXAMPLE:
>>> is_valid_shell_syntax('asoidj')
ans = True
>>> is_valid_shell_syntax('asoidj("')
ans = False
"""
import subprocess
if command is None:
command=get_default_shell()
else:
assert isinstance(command,str)
try:
# Running the shell code with 'sh -n' which checks for syntax without execution
process = subprocess.run(
[command, "-n"], input=code, text=True, stderr=subprocess.PIPE, check=True
)
# If the shell command succeeds without error, the syntax is valid
return True
except subprocess.CalledProcessError as e:
# If there's a syntax error, print the error and return False
if not silent:
print("Syntax error:", e.stderr)
return False
def is_valid_sh_syntax(code, *,silent=True, command="sh"):
"""Returns True if the code is valid bash syntax, False otherwise. If silent=False, will print out more information."""
return is_valid_shell_syntax(code, silent=silent, command=command)
def is_valid_bash_syntax(code, *,silent=True, command="bash"):
"""Returns True if the code is valid bash syntax, False otherwise. If silent=False, will print out more information."""
return is_valid_shell_syntax(code, silent=silent, command=command)
def is_valid_zsh_syntax(code, *,silent=True, command="zsh"):
"""Returns True if the code is valid bash syntax, False otherwise. If silent=False, will print out more information."""
return is_valid_shell_syntax(code, silent=silent, command=command)
def get_default_shell():
"""Returns the path to the user's default shell."""
return os.environ.get('SHELL', '/bin/sh') # Fallback to '/bin/sh' if SHELL is not set
def _ipython_exeval_maker(scope={}):
pip_import('IPython','ipython')#Make sure we have ipython
from IPython.terminal.embed import InteractiveShellEmbed as Shell
shell=Shell(user_ns=scope)
shell.showtraceback = lambda *args,**kwargs:None
def ipython_exeval(code,_ignored_1,_ignored_2):
# fansi_print(scope,'yellow')
result=shell.run_cell(code)#,silent=True)#silent=True avoids making variables like _,__ etc that ipython typically does
exception=result.error_before_exec or result.error_in_exec
if exception:
if not isinstance(exception,SyntaxError):#If it is a syntaxerror, ipython will print its own error...and then we would print 2 errors...
raise exception
return result.result
return ipython_exeval
_ipython_exeval=None
# region This section MUST come last! This is for if we're running the 'r' class as the main thread (runs pseudo_terminal)―――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――
class _ExevalDirective:
def __init__(self, line: str):
self.command, self.args = self.parse(line)
@staticmethod
def parse(directive_line):
"""
Validates that all directives are supported by exeval.
Currently supported:
%return <var namet
%private_scope
"""
assert isinstance(directive_line, str)
try:
if directive_line == "private_scope":
return directive_line, ""
command, args = directive_line.split(maxsplit=1)
if command == "return":
assert args.strip(), "The '%return ...' directive must specify a variable name"
return command, args
if command in "prepend_code append_code".split():
assert args.strip(), "The '%s ...' directive must specify a python expression, not an empty string"%command
assert is_valid_python_syntax(args, 'eval'), "The '%%command ...' directive was given invalid python eval syntax: "%command+repr(args)
return command, args
except Exception:
raise ValueError("Invalid exeval directive line: " + repr('%'+directive_line))
raise ValueError("Invalid exeval directive line: " + repr('%'+directive_line))
def __str__(self):
return (self.command + " " + self.args).strip()
def __repr__(self):
return "rp.r._ExevalDirective(%s)" % repr(str(self))
def __eq__(self, x):
"""
Allows checks such as
assert 'private_scope' in [_ExevalDirective('private_scope')]
To make code more concise
"""
return str(self) == str(x)
def _parse_exeval_code(code:str):
"""
Used to allow exeval to use python code with lines that start with %, called 'directives'
Directives are a set of single-line commands at the beginning of the code that start with a '%' symbol.
The function extracts these directives and the remaining code, returning both.
Parameters:
- code (str): A string containing the full code block, starting with directives prefixed by '%'.
Returns:
- tuple: A length-2 tuple containing a string and a list
1. The python code after the directives (a string)
2. A list of _ExevalDirective objects
Examples:
>>> code_block = '''
%return 123
print('Hello, world!')
for i in range(5):
print(i)
'''
>>> code, directives = _parse_exeval_code(code_block)
>>> print(directives)
['return 123']
>>> print(code)
print('Hello, world!')
for i in range(5):
print(i)
"""
#Iterate over each line exactly once
lines = code.splitlines()
directives = []
code_out = lines[:]
for line in lines:
strip = line.strip()
if strip: #Ignore empty directive lines
if not strip.startswith('%'):
break
directive_line = strip[1:]
if directive_line: #Ignore empty directives
directive = _ExevalDirective(directive_line)
directives.append(directive)
del code_out[0]
code_out = '\n'.join(code_out)
return code_out, directives
def exeval(code:str, scope=None):
"""
Performs either exec(code) or eval(code) and returns the result
The code will be patched into the linecache - so you can get informative stack traces from it!
By default it uses the scope of the caller
The function supports directives at the top of the code block, prefixed with a '%' symbol.
Supported directives:
- 'return <variable_name>': Allows specifying a variable to be returned from the executed code's scope.
This allows you to use exec code
- 'private_scope': Creates a private copy of the scope before executing the code for better concurrency.
- 'prepend_code <python_expression>': Prepends some code to your command, specified by a given python expression
- 'append_code <python_expression>': Just like like prepend_code, except the code is added to the end instead of the beginning
Parameters:
- code (str): The python code string to be executed or evaluated, with possible additional directive lines at the top.
- scope (dict, optional): The scope in which to execute or evaluate the code.
If not provided, the caller's scope is used.
Returns:
- The result of the executed or evaluated code.
If the 'return' directive is used, the value of the specified variable is returned.
Raises:
- KeyError: If the 'return' directive is used and the specified variable is not found in the scope.
- Any exception raised during the execution or evaluation of the code.
Example (return <variable_name> directive):
>>> code = '''
%return z
a = 1
b = 2
z = a + b
'''
>>> result = exeval(code)
>>> print(result)
3
Example (private_scope directive):
>>> scope = {'a' : 0}
>>> exeval('''
%private_scope
a = 1
print(a)
''', scope)
1
>>> exeval('a', scope)
0
>>> exeval('a = 1', scope)
>>> exeval('a', scope)
1
Example (prepend_code directive):
>>> exeval('%prepend_code rp.load_text_file("code.py")')
Example (prepend_code directive):
>>> code = '''
%prepend_code "def greet(name): return 'Hello, ' + name"
print(greet("Alice"))
'''
>>> exeval(code)
Hello, Alice
Example (append_code directive):
>>> exeval('%prepend_code rp.load_text_file("code.py")')
Example (append_code directive):
>>> code = '''
%append_code "result = multiply(3, 4)"
%return result
def multiply(a, b):
return a * b
'''
>>> result = exeval(code)
>>> print(result)
12
"""
code, directives = _parse_exeval_code(code)
if scope is None:
#Execute code in the scope of the caller
scope=get_scope(1)
for directive in directives:
if directive == 'private_scope':
# Create a private copy of the scope so that we don't change variables. Good for concurrency when returning results
scope = scope.copy()
if directive.command in 'prepend_code append_code'.split():
sourced_code = exeval(directive.args, scope)
assert isinstance(sourced_code, str), "The %s directive returned a non-string result: %s"%(repr(directive), type(sourced_code))
if directive.command=='prepend_code': code = sourced_code + "\n" + code
elif directive.command=='append_code' : code = code + "\n" + sourced_code
result, error = _pterm_exeval(code, scope)
if error is not None:
raise error
for directive in directives:
if directive.command == 'return':
return_directive_var_name = directive.args
if return_directive_var_name not in scope:
raise KeyError("rp.exeval return directive: cannot find variable "+repr(return_directive_var_name))
result = scope[return_directive_var_name]
return result
_prev_pterm_profiler = None
def _display_pterm_flamechart(local=False):
if _prev_pterm_profiler is None:
fansi_print("RP Flamechart: Nothing was profiled yet, try running a command after setting PROF ON", 'yellow bold')
return
fansi_print("RP Flamechart: Generating HTML...", 'green bold')
html = _prev_pterm_profiler.output_html()
fansi_print("RP Flamechart: Uploading HTML...", 'green bold')
if not local and connected_to_internet():
_web_copy(html.encode(), show_progress=True)
output_location = _web_clipboard_url
else:
output_location = "file://" + save_text_file(html, temporary_file_path("html"))
fansi_print(
"RP Flamechart: Visit flamechart at " + output_location,
"green bold underdouble",
link=output_location,
)
return html, output_location
def _truncate_string_floats(s, num_sigfigs=4) -> str:
"""
Truncate floating point numbers in a string to the specified number of significant figures.
Is robust - doesn't care about syntax of the given string. Does it to ALL numbers.
Args:
s (str): Input string containing floating point numbers
num_sigfigs (int): Number of significant figures to keep
Returns:
str: String with floating point numbers truncated
>>> truncate_floats('''"time": 0.001026,"attributes": {"l29952": 0.0010257080430164933},''')
ans = "time": 0.001026,"attributes": {"l29952": 0.001026},
"""
import re
pattern = r'(\d+\.\d+)'
def replace(match):
num = float(match.group(0))
format_str = '{{:.{}g}}'.format(num_sigfigs)
return format_str.format(num)
return re.sub(pattern, replace, s)
def _pterm_exeval(code,*dicts,exec=exec,eval=eval,tictoc=False,profile=False,ipython=False):
"""
Evaluate or execute within descending hierarchy of dicts
merged_dict=merged_dicts(*reversed(dicts))# # Will merge them in descending priority of dicts' namespaces
region HOPEFULLY just a temporary patch
assert len(dicts)<=1
if len(dicts)<=1:
print("exeval")
"""
global _prev_pterm_profiler
if len(dicts)==0:
dicts=[get_scope(1)]
merged_dict=dicts[0]
# endregion
if profile:
pyinstrument=pip_import('pyinstrument')#https://github.com/joerick/pyinstrument
profiler = pyinstrument.Profiler()
profiler.start()
if ipython:
exec=eval=_ipython_exeval
import rp.patch_linecache as patch
from time import time as _time
_end_time=None
try:
try:
if is_valid_python_syntax(code,mode='eval'):
_start_time=_time()
ans=patch.run_code(code,'eval',merged_dict,eval)
# ans=eval(code,merged_dict,merged_dict)
_end_time=_time()
else:
_start_time=_time()
ans=patch.run_code(code,'exec',merged_dict,exec)
# ans=exec(code,merged_dict,merged_dict)# ans = None unless using ipython, in which case it might not be
_end_time=_time()
finally:
if tictoc:
fansi_print("TICTOC: "+('%.5f'%((_end_time or _time())-_start_time)).ljust(10)[:10]+' seconds','blue','bold')
for d in dicts:# Place updated variables back in descending order of priority
temp=set()
for k in d.copy():
if k in merged_dict:
d[k]=merged_dict.pop(k)
else:
temp.add(k)
for k in temp:
del d[k]
for k in merged_dict:# If we declared new variables, put them on the top-priority dict
dicts[0][k]=merged_dict[k]
return ans,None
except BaseException as e:
if ipython:
pop_exception_traceback(e,1)
pop_exception_traceback(e,2)
return None,e
finally:
if profile:
if (_end_time or _time())-_start_time>1/1000:#Only show profiler data if it takes more than a millisecond to run your code...
profiler.stop()
prof_display_start_time=_time()
fansi_print("Preparing the PROF display (the profiler, toggle with PROF)...",'blue','underlined')
print(profiler.output_text(unicode=True, color=True,timeline=False,show_all=_PROF_DEEP).replace('\n\n','\n')[1:-1])#show_all is useful but SOOO verbose it's almost unbearable...
fansi_print("...took "+str(_time()-prof_display_start_time)+" seconds to diplay the PROF output",'blue','underlined')
else:
profiler.stop()#Something tells me its not a good idea to leave stray _pterm_profilers running...
global _prev_pterm_profiler
_prev_pterm_profiler = profiler
_PROF_DEEP=True
# def parse(code):
# # Takes care ofmillisecond to run your code...:
# # - Lazy parsers
# # - Indentation fixes
# # -
# pass
def dec2bin(f):
"""
Works with fractions
SOURCE: http://code.activestate.com/recipes/577488-decimal-to-binary-conversion/
"""
import math
if f >= 1:
g = int(math.log(float(f), 2))
else:
g = -1
h = g + 1
ig = math.pow(2, g)
st = ""
while f > 0 or ig >= 1:
if f < 1:
if len(st[h:]) >= 10: # 10 fractional digits max
break
if f >= ig:
st = st + "1"
f = f - ig
else:
st += "0"
ig /= 2
st = st[:h] + "." + st[h:]
return sxt
def run_until_complete(x):
from asyncio import get_event_loop
return get_event_loop().run_until_complete(x)
if __name__=='__main__':fansi_print("Booting rp...",'blue','bold',new_line=False)
import rp.rp_ptpython.prompt_style as ps
from rp.prompt_toolkit.shortcuts import create_eventloop#Unless this can be sped up (inlining just pushes the problem to the next imoprt)
# def create_eventloop(inputhook=None, recognize_win32_paste=True):
# """
# Create and return an
# :class:`~prompt_toolkit.eventloop.base.EventLoop` instance for a
# :class:`~prompt_toolkit.interface.CommandLineInterface`.
# """
# def is_windows():
# """
# True when we are using Windows.
# """
# return sys.platform.startswith('win') # E.g. 'win32', not 'darwin' or 'linux2'
# if is_windows():
# from rp.prompt_toolkit.eventloop.win32 import Win32EventLoop as Loop
# return Loop(inputhook=inputhook, recognize_paste=recognize_win32_paste)
# else:
# from rp.prompt_toolkit.eventloop.posix import PosixEventLoop as Loop
# return Loop(inputhook=inputhook)
from rp.rp_ptpython.python_input import PythonCommandLineInterface,PythonInput as Pyin
default_python_input_eventloop = None # Singleton for python_input
default_ipython_shell = None # Singleton for python_input
pyin=None# huge speed increase when using this as a singleton
_iPython=False
_printed_a_big_annoying_pseudo_terminal_error=False
# default_pseudo_terminal_settings_file=__file__+".pseudo_terinal_settings"
# _pseudo_terminal_settings={
# "pyin.enable_history_search":True,
# "pyin.highlight_matching_parenthesis":True,
# "pyin.enable_input_validation":False,
# "pyin.enable_auto_suggest":True,
# "pyin.show_line_numbers":True,
# "pyin.show_signature":True,
# }
# def _load_pseudo_terminal_settings_from_file(file=None):
# import ast
# global _pseudo_terminal_settings
# _pseudo_terminal_settings=eval(text_file_to_string(file or default_pseudo_terminal_settings_file))
# return None
# def _save_pseudo_terminal_settings_to_file(file=None):
# import ast
# global _pseudo_terminal_settings
# string_to_text_file(file or default_pseudo_terminal_settings_file,repr(_pseudo_terminal_settings))
# return None
def _multi_line_python_input(prompt):
#Enter '/' to enter after entering a multiline prompt, and '\' to delete the previous line
def mli(p):#multilineinput
from re import fullmatch as re
ol=[]#output lines
mlm= lambda x: re(r'( +.*)|(.*[\;\:] *)',x)#multi line marker
bkl= '\\'#back line
ent= '/'#enter
st=True#started
while True:
try:
i=input(p if not ol else '')#input
except ValueError:
fansi_print("RP INPUT ERROR: Standard-Input file has been closed, which means you can't input any more text!","red","bold")
raise BaseException("This exception is being raised to shut down RP so you don't get an infinite loop of spam. Please don't use quit() to exit rp, use control+d or the RETURN command")
return ""
if i!=ent and i!=bkl:
if set(i)<={';',' '}:#Just spaces and ';'s will just be used to create a new line; nothing more
ol.append('')
else:
ol.append(i)
if i==ent or not mlm(i) and st:
break
st=False
if i==bkl and ol:
ol=ol[:-1]
if ol:
print(p+ol[0])
if len(ol)>1:
for l in ol[1:]:
print(l)
if not ol:
return ''
return line_join(ol)
return mli(prompt)
#Simple version:
out=input(prompt)
if out!=out.lstrip() or out.endswith(':') or out.endswith(';'):
return out+'\n'+_multi_line_python_input(' '*len(prompt))
return out
_default_pyin_settings=dict(
enable_mouse_support=False,
enable_history_search=True,
highlight_matching_parenthesis=True,
enable_input_validation=False,
enable_auto_suggest=True,
show_line_numbers=True,
show_signature=True,
# _current_ui_style_name='stars',
_current_ui_style_name='adventure',
# _current_code_style_name='default',
_current_code_style_name='dracula',
show_docstring=False,
show_realtime_input=False,
show_vars=False,
show_meta_enter_message=True,
completion_visualisation='multi-column' if not currently_running_windows() else 'pop-up',
completion_menu_scroll_offset=1,
show_status_bar=True,
wrap_lines=True,
complete_while_typing=True,
vi_mode=False,
paste_mode=False ,
confirm_exit=True ,
accept_input_on_enter=2 ,
enable_open_in_editor=True,
enable_system_bindings=True,
show_all_options=False,
show_last_assignable=False,
# Background settings
ui_bg_fg_contrast=0.0, # Minimum contrast between foreground and background (0.0-1.0)
background_mode='Default', # Background mode: "Nowhere", "Default", "Black", or "White"
show_battery_life=False,
enable_microcompletions=True,
history_syntax_highlighting=False,
history_number_of_lines=2500,
min_bot_space=15,
top_space=0,
true_color=False,
indent_guides_mode='Regular',
highlight_cursor_line=False ,# Highlight the background of the cursor line
highlight_cursor_column=False ,# Highlight the background of the cursor column
highlight_matching_words=True ,# Underline all occurrences of the word under cursor
show_whitespace=True,
# Color customization settings
code_invert_colors=False, # Invert colors for code syntax highlighting
code_invert_brightness=False, # Invert brightness for code syntax highlighting
ui_invert_colors=False, # Invert colors for UI elements (hidden by default)
ui_invert_brightness=False, # Invert brightness for UI elements (hidden by default)
code_hue_shift=0, # Hue shift for code syntax highlighting (0-355 degrees in 5-degree increments)
ui_hue_shift=0, # Hue shift for UI elements (0-355 degrees in 5-degree increments, hidden by default)
code_min_brightness=0.0, # Minimum brightness for code elements (0.0-1.0)
code_max_brightness=1.0, # Maximum brightness for code elements (0.0-1.0)
code_ui_min_brightness=0.0, # Minimum brightness for code UI elements (whitespace, indent guides, row/column highlights) (0.0-1.0)
code_ui_max_brightness=1.0, # Maximum brightness for code UI elements (whitespace, indent guides, row/column highlights) (0.0-1.0)
ui_min_brightness=0.0, # Minimum brightness for UI elements (0.0-1.0)
ui_max_brightness=1.0, # Maximum brightness for UI elements (0.0-1.0)
code_min_saturation=0.0, # Minimum saturation for code elements (0.0-1.0)
code_max_saturation=1.0, # Maximum saturation for code elements (0.0-1.0)
ui_min_saturation=0.0, # Minimum saturation for UI elements (0.0-1.0)
ui_max_saturation=1.0, # Maximum saturation for UI elements (0.0-1.0)
session_title='',
)
_pyin_settings_file_path=__file__+'.rp_pyin_settings'
_globa_pyin=[None]
_rprc_pterm_settings_overrides={}#Modify this as you wish in an rprc.
def _load_pyin_settings_file():
# print("BOOTLEGER",pyin)
_globa_pyin[0]=pyin
try:
settings=eval(text_file_to_string(_pyin_settings_file_path))
assert isinstance(settings,dict)
settings.update(_rprc_pterm_settings_overrides)
# for setting in _default_pyin_settings:
# assert setting in settings
except Exception:
settings=_default_pyin_settings.copy()
def _load_pyin_settings_from_dict(d):
pyin.use_ui_colorscheme(d['_current_ui_style_name'])
pyin.use_code_colorscheme(d['_current_code_style_name'])
pyin.true_color=d['true_color'] if 'true_color' in d else False
for attr in _default_pyin_settings:
if attr in d:
setattr(pyin,attr,d[attr])
# Make sure style settings like background_mode and ui_bg_fg_contrast take effect
pyin._update_style()
_load_pyin_settings_from_dict(settings)
_set_default_session_title()
def _save_pyin_settings_file():
settings={}
for attr in _default_pyin_settings:
settings[attr]=getattr(pyin,attr)
string_to_text_file(_pyin_settings_file_path,repr(settings))
def _delete_pyin_settings_file():
delete_file(_pyin_settings_file_path)
def _set_session_title(title=None):
pyin=_globa_pyin[0]
if title is None:
if hasattr(pyin,'session_title'):
current_title=pyin.session_title
print("Current session title:",repr(current_title))
print("Please enter a new session title:")
title=input_default(" > ", _get_session_title())
pyin.session_title=title
def _get_session_title():
if hasattr(pyin,'session_title'):
current_title=pyin.session_title
else:
current_title=""
os.environ['RP_SESSION_TITLE']=current_title
return current_title
def _get_default_session_title():
current=_get_session_title()
if current:
return current
if running_in_conda():
return ' '+get_conda_name()+' '
if running_in_venv():
return ' '+get_venv_name()+' '
return ""
def _set_default_session_title():
_set_session_title(_get_default_session_title())
def _set_pterm_theme(ui_theme_name=None,code_theme_name=None):
#EXAMPLE:
# _set_pterm_theme('saturn','gruvbox-dark')
import rp.r as r
import rp.rp_ptpython.style as s
if ui_theme_name is None:ui_theme_name=_current_ui_style_name
if code_theme_name is None:ui_theme_name=_current_code_style_name
#r.pyin._current_code_style_name='vim'
#r.pyin._current_code_style_name='gruvbox-dark'
r.pyin._current_code_style_name=code_theme_name
r.pyin.use_ui_colorscheme(ui_theme_name)
_pt_pseudo_terminal_init_settings=False
history_filename=__file__ + ".history.txt"
def python_input(scope,header='',enable_ptpython=True,iPython=False):
import rp.rp_ptpython.completer as completer
# print(completer.completion_cache_pre_origin_doc.keys())
completer.completion_cache_pre_origin_doc={'':tuple(scope())}#clear the cache because variables might change between inputs (in fact, almost certainly they will). BUT for a speed boost, we'll pre-calculate the initial autocompletion now, because we know it starts with an empty string and should be the scope when doing that.
global Pyin
global pyin,_iPython
global _printed_a_big_annoying_pseudo_terminal_error
if not enable_ptpython or _printed_a_big_annoying_pseudo_terminal_error:
return _multi_line_python_input(header)
try:
if iPython:
from rp.rp_ptpython.ipython import IPythonInput as Pyin,InteractiveShellEmbed
global default_ipython_shell
if default_ipython_shell is None:
default_ipython_shell=InteractiveShellEmbed()
if not pyin or _iPython!=iPython:
pyin=Pyin(default_ipython_shell,get_globals=scope,history_filename=history_filename)
else:
if not pyin or _iPython!=iPython:
# exec(mini_terminal)
from rp.rp_ptpython.python_input import PythonCommandLineInterface,PythonInput as Pyin
pyin=Pyin(get_globals=scope,history_filename=history_filename)
_iPython=iPython
global default_python_input_eventloop
# global _pseudo_terminal_settings
global _pt_pseudo_terminal_init_settings
if not _pt_pseudo_terminal_init_settings:
_load_pyin_settings_file()
_pt_pseudo_terminal_init_settings=True
pyin.all_prompt_styles['default']=ps.PseudoTerminalPrompt()
if not currently_running_windows():
pyin.prompt_style='default'
# ps.PseudoTerminalPrompt=PseudoTerminalPrompt
import warnings
with warnings.catch_warnings():
#I don't want anything printed to the console while we're typing...it's super annoying
#Usually these warnings come from autocomplete stumbling upon some property of some library which is deprecated
#I don't care about this, and it interrupts the typing experience
default_python_input_eventloop=default_python_input_eventloop or PythonCommandLineInterface(create_eventloop(),python_input=pyin)
with no_gc(): #One of the bottlenecks of prompt-toolkit is that it triggers the gc so much, something to do with redraws or somethin' idk. But during input let's disable garbage collection.
#If this causes memory leaks, make a new context like reduce_gc_frequency(scale_factor=10) etc
code_obj = default_python_input_eventloop.run()
return code_obj.text
except EOFError:
fansi_print("Caught Control+D; preparing to exit rp.pseudo_terminal() ",'blue','bold')# Presumably in ptpython when you use control+d and then select yes; AKA the exit prompt they built
return "RETURN"
except Exception as E:
if not _printed_a_big_annoying_pseudo_terminal_error:
if sys.stdout.isatty():#No reason to scare
try:
print_verbose_stack_trace(E)
except:
print_stack_trace(E)
fansi_print("The prompt_toolkit version of pseudo_terminal crashed; reverting to the command-line version...",'cyan','bold')
else:
if running_in_google_colab():
reason="you're running in Google Colab, and not in a terminal."
elif running_in_ipython():
reason="you're running in a Jupyter notebook, and not in a terminal."
else:
reason="you're not running in a terminal"
fansi_print("Defaulting to the command-line (aka PT OFF) version because "+reason,'cyan','bold')
_printed_a_big_annoying_pseudo_terminal_error=True
return input(header)
class pseudo_terminal_style:
def __init__(self):
self.message=lambda:"pseudo_terminal() --> Entering interactive session! "
import datetime
timestamp=lambda:datetime.datetime.now().strftime("%B %d, %Y at %I:%M:%S %p")
import sys,platform
version=platform.python_implementation()+' '+str(sys.version_info.major)+'.'+str(sys.version_info.minor)+'.'+str(sys.version_info.micro)
self.message=lambda:"rp.pseudo_terminal() in %s: Welcome! "%version+timestamp()
"""
TODO:
- Does NOT return anything
- Can be used like MiniTerminal
- But should be able to accept arguments for niche areas! Not sure how yet; should be modular though somehow...
- History for every variable
- Scope Hierarchy: [globals(),locals(),
others()]:
- Create new dict that's the composed of all the others then update them accordingly
- HIST: Contains a list of dicts, whose differences can be seen
"""
def _dhistory_helper(history:str)->list:
#Take some python code, rip out just the function definitions, and return them in a list
def get_all_function_names(code:str):
#Return all the names of all functions defined in the given code, in the order that they appear
from rp import line_split,lrstrip_all_lines
lines=line_split(lrstrip_all_lines(code))
import re
defs=[line for line in lines if re.fullmatch(r'\s*def\s+\w+\s*\(.*',line)]
func_names=[d[len('def '):d.find('(')].strip() for d in defs]
return func_names
def _get_function_name(code):
all_func_names=get_all_function_names(code)
if all_func_names:
return all_func_names[0]
return None
from collections import OrderedDict
defstate=True
#defstate=False
nondefchunks=[]
defchunks=[]
chunk=[]
defs=OrderedDict()
decorators=[]
import re
for line in line_split(history):
if line.lstrip()==line and line:
if defstate==True:
defcode=line_join(decorators+chunk)
defchunks.append(defcode)
defname=_get_function_name(defcode)
#assert defname is not None
if defname is not None:
defs[defname]=defcode
decorators=[]
defstate=False
if defstate==False:
if line.startswith('@'):
# print(decorators)
decorators.append(line)
else:
nondefchunks.append(line_join(chunk))
if line.strip() and not bool(re.fullmatch(r'def\s+\w+\s*\(.*',line)):
decorators=[]
chunk=[]
chunk.append(line)
defstate = bool(re.fullmatch(r'def\s+\w+\s*\(.*',line))
else:
chunk.append(line)
if defstate and chunk:
defcode=line_join(decorators+chunk)
defchunks.append(defcode)
defname=_get_function_name(defcode)
#assert defname is not None
if defname is not None:
defs[defname]=defcode
return defs.values()
class _Module:
def __init__(self,name,module):
from inspect import getsourcefile
self.name=name
self.module=module
self.path=getsourcefile(module)
self.date_last_updated=get_current_date()
if not isinstance(self.path,str):
raise TypeError()
def update(self):
#Will check to see if the module is out of date. If it is, it will reload it.
if date_modified(self.path)>self.date_last_updated:
try:
#We should reload this module
from time import time as __time__
starttime=__time__()
fansi_print('RELOAD: Reloading module '+repr(self.name)+'...','blue','bold',new_line=False)
from importlib import reload
reload(self.module)
fansi_print('done in '+str(__time__()-starttime)[:10]+' seconds!','blue','bold')
except BaseException as e:
fansi_print('RELOAD: ERROR: Failed to reload module '+repr(self.name)+"\nStack trace shown below:",'blue','bold')
print_stack_trace(e)
self.date_last_updated=get_current_date()
def __hash__(self):
return self.name
_modules={}
def _reload_modules():
#Re-import any modules that have been modified after the last time we called _reload_modules
for name,module in sys.modules.items():
if name not in _modules:
try:
_modules[name]=_Module(name,module)
except TypeError:pass
except Exception as e:
print_stack_trace(e)
else:
_modules[name].update()
def launch_xonsh():
#Launch the xonsh shell
pip_import('xonsh')
old_sys_argv=sys.argv.copy()
try:
sys.argv=old_sys_argv[:1]#Xonsh doesn't like it if we have custom arguments that don't fit xonsh, probably set by using ARG
import xonsh.main
try:
xonsh.main.main()
except SystemExit as error:
#This happens when we press control+d to exit the shell; we get "SystemExit: 0"
pass
sys.path.append(".")
finally:
#We definitely want to restore the old arguments
sys.argv=old_sys_argv
def with_line_numbers(string, prefix="%i. ", *, start_from=0, align=False):
"""
EXAMPLES:
>>> with_line_numbers('a\nb\nc')
ans = 0. a
1. b
2. c
>>> with_line_numbers('a\nb\nc', start_from=1)
ans = 1. a
2. b
3. c
>>> print(poem)
In the Land of
Mordor where
the Shadows
lie. One ring
to rule them
all, One ring
to find them,
One ring to
bring them all,
and in the
darkness bind
them, In the
land of mordor
where the
shadows lie.
>>> print(with_line_numbers(poem))
0. In the Land of
1. Mordor where
2. the Shadows
3. lie. One ring
4. to rule them
5. all, One ring
6. to find them,
7. One ring to
8. bring them all,
9. and in the
10. darkness bind
11. them, In the
12. land of mordor
13. where the
14. shadows lie.
>>> print(with_line_numbers(poem,align=True))
0. In the Land of
1. Mordor where
2. the Shadows
3. lie. One ring
4. to rule them
5. all, One ring
6. to find them,
7. One ring to
8. bring them all,
9. and in the
10. darkness bind
11. them, In the
12. land of mordor
13. where the
14. shadows lie.
"""
lines=string.splitlines()
prefixes=[prefix%(i+start_from) for i in range(len(lines))]
if align:
max_prefix_length=max(map(len,prefixes))
prefixes = [prefix.rjust(max_prefix_length) for prefix in prefixes]
lines=[prefix + line for prefix, line in zip(prefixes, lines)]
return line_join(lines)
def number_of_lines(string):
return string.count('\n')+1 #This is probably more efficient than the line below this one...
return len(line_split(string))
def number_of_lines_in_terminal(string):
"""
Gets the number of lines a string would appear to have when printed in a terminal, assuming the terminal wraps strings
For example, the string '*'*1000 is technically only one line, but when printed print('*'*1000) might look like several lines in a terminal
"""
if not currently_in_a_tty():
#Perhaps just return 1 if we're not in a TTY? Some places, like jupyter notebooks, don't wrap lines
#For now, we'll ignore this edge case. In the future this block might return something different.
pass
lines=line_split(string)
width=get_terminal_width()
out=0
for line in lines:
out+=len(line)//width+1
return out
def number_of_lines_in_file(filename):
"""
Quickly count the nubmer of lines in a given file.
It's 5-10x faster than text_file_to_string(filename).count('\n')
It also appears to take constant memory; my memory usage didn't flinch even when I threw a 2gb file at it.
Note that it doesn't care if it's a text file or not; it just counts the number of \n bytes in the file!
For example, number_of_lines_in_file('picture.jpg')==280 is a possibility.
https://stackoverflow.com/questions/845058/how-to-get-line-count-of-a-large-file-cheaply-in-python
"""
from itertools import (takewhile,repeat)
f = open(filename, 'rb')
bufgen = takewhile(lambda x: x, (f.raw.read(1024*1024) for _ in repeat(None)))
return sum( buf.count(b'\n') for buf in bufgen )+1
def _all_files_listed_in_exception_traceback(exception:BaseException)->list:
from traceback import format_exception,format_exception
error=exception
error_string=''.join(format_exception(error.__class__,error,error.__traceback__))
ans=error_string
import re
ans=line_split(ans)
ans=[line for line in ans if re.fullmatch(r' File .*, line \d+.*',line)]
#ans=[line for line in ans if re.fullmatch(r' File .*, line \d+, in .*',line)]
def process_line(line):
#Usually a line will look like this:
# ans = File "/home/ryan/anaconda3/lib/python3.7/copy.py", line 240, in _deepcopy_dict
# ➤ split_python_tokens(ans)
# ans = [' ', 'File', ' ', '"', '/home/ryan/anaconda3/lib/python3.7/copy.py', '"', ',', ' ', 'line', ' ', '240', ',', ' ', 'in', ' ', '_deepcopy_dict']
try:
tokens=split_python_tokens(line)
path=tokens[4]
assert path_exists(path)
line=int(tokens[10])
return path,line
except Exception:
return None
ans=list(map(process_line,ans))
ans=[x for x in ans if x is not None]
return ans
#Older version below (which sometimes missed a few files or got the linenumber wrong)
tb=exception.__traceback__
out=[]
while hasattr(tb,'tb_next'):
try:
frame=tb.tb_frame
code =frame.f_code
tb=tb.tb_next
#frame=frame.f_back
# out.append((code.co_filename,code.co_firstlineno))
out.append((code.co_filename,tb.tb_lineno))
except Exception:
pass
return out
def read_symlink(path: str, *, recursive=False):
"""
Resolves the path of a symlink up to a specified number of levels.
Args:
path: Path to the symlink.
recursive: If True, will keep resolving symlinks until it hits a non-symlink
Returns:
Resolved path string.
Raises:
Exception: If strict=True and initial path is not a symlink
"""
assert isinstance(path, str)
if not is_symlink(path): raise AssertionError('Not a symlink: ' + path)
path = path.rstrip('/')
path = os.readlink(path)
if recursive:
while is_a_symlink(path):
path = os.readlink(path)
return path
def make_symlink_absolute(symlink_path, *, recursive=False, physical=True):
"""Replace the destination of a symlink with an absolute path instead of a relative one"""
destination_path = read_symlink(symlink_path, recursive=recursive)
destination_path = get_absolute_path(destination_path, physical=physical)
return make_symlink(destination_path, symlink_path, relative=False, replace=True)
def make_symlink_relative(symlink_path, *, recursive=False):
"""Replace the destination of a symlink with a relative path instead of an absolute one"""
destination_path = read_symlink(symlink_path, recursive=recursive)
if starts_with_any(destination_path, "/", "~"):
destination_path = get_absolute_path(destination_path, physical=False)
make_symlink(destination_path, symlink_path, relative=True, replace=True)
return symlink_path
def read_symlinks(
*symlink_paths,
recursive=False,
strict=True,
num_threads=None,
show_progress=False,
lazy=False
):
""" Plural of rp.read_symlink """
symlink_paths = detuple(symlink_paths)
if show_progress == True: show_progress = "eta:" + get_current_function_name()
return gather_args_call(load_files, read_symlink, symlink_paths)
def make_symlinks_relative(
*symlink_paths,
recursive=False,
strict=True,
num_threads=None,
show_progress=False,
lazy=False
):
""" Plural of rp.make_symlink_relative """
symlink_paths = detuple(symlink_paths)
if show_progress == True: show_progress = "eta:" + get_current_function_name()
return gather_args_call(load_files, make_symlink_relative, symlink_paths)
def make_symlinks_absolute(
*symlink_paths,
recursive=False,
strict=True,
num_threads=None,
show_progress=False,
lazy=False
):
""" Plural of rp.make_symlink_absolute """
symlink_paths = detuple(symlink_paths)
if show_progress == True: show_progress = "eta:" + get_current_function_name()
return gather_args_call(load_files, make_symlink_absolute, symlink_paths)
def symlink_is_broken(path:str):
""" Returns True if the symlink points to a path that doesn't exist """
assert is_symlink(path)
if not path_exists(path):
return True
return False
# def symlink_works(path:str):
# return not symlink_is_broken(path)
def make_hardlink(original_path, hardlink_path, *, recursive=False):
import os
if path_exists(hardlink_path) and not path_exists(original_path):
# If the caller of this function gets the arguments backwards, fix it automatically
hardlink_path, original_path = original_path, hardlink_path
if is_a_folder(hardlink_path):
hardlink_path = path_join(hardlink_path, get_file_name(original_path))
assert path_exists(original_path), "Can't create hardlink to %s because that path does not exist!" % original_path
assert not path_exists(hardlink_path), "Can't create hardlink at %s because a file already exists there!" % hardlink_path
original_is_folder = is_a_folder(original_path)
make_parent_folder(hardlink_path)
if recursive and original_is_folder:
assert currently_running_unix(), 'Recursive hardlinks not implemented in rp for windows yet'
command = 'cp -al '+shlex.quote(original_path)+' '+shlex.quote(hardlink_path)
result = os.system(command)
if result:
raise RuntimeError("Error with command: "+command)
return hardlink_path
else:
assert not original_is_folder or is_symbolic_link(original_path), "Can't create a hardlink to a folder, only to files: " + original_path
os.link(original_path, hardlink_path)
return hardlink_path
def replace_symlink_with_hardlink(symlink_path):
"""Replaces a symlink with a hardlink"""
assert isinstance(symlink_path,str), 'replace_symlink_with_hardlink: Input path must be a string'
assert is_symlink(symlink_path), 'replace_symlink_with_hardlink: Path is not a symlink: '+symlink_path
read_path=read_symlink(symlink_path)
assert not is_a_folder(read_path), 'Cannot hardlink to a folder from symlink '+symlink_path+' --> '+read_path
delete_file(symlink_path)
hardlink_path=symlink_path
return make_hardlink(read_path,hardlink_path)
def replace_symlinks_with_hardlinks(
*symlink_paths,
num_threads: int = None,
show_progress=False,
strict=True,
lazy=False
):
"""Plural of replace_symlink_with_hardlink. TODO: Parallelize this (maybe with load_files), and add strict, num_threads, etc"""
symlink_paths = rp_iglob(symlink_paths)
return load_files(replace_symlink_with_hardlink, symlink_paths, lazy=lazy, strict=strict, show_progress=show_progress, num_threads=num_threads)
def make_symlink(original_path, symlink_path=".", *, relative=False, replace=False, strict=True):
"""
Creates a symbolic link.
Creates a symlink at `symlink_path` pointing to `original_path`.
Args:
original_path: Path to the original file/directory.
symlink_path: Path for the symlink (default: current directory).
If a folder, symlink is created inside it with original's name.
relative: Use a relative symlink, correctly pointing to original_path from the symlink_path (default: False).
replace: Replace existing symlink if True (default: False, error if exists) (defualt: False)
strict: If true, raises an error if the original_path does not exist. (default: True)
Returns:
Path to the created symlink.
Raises:
AssertionError: If `original_path` doesn't exist (and strict) or `symlink_path` already exists (and not replaced).
"""
import os
if path_exists(symlink_path) and not path_exists(original_path):
#If the caller of this function gets the arguments backwards, fix it automatically
symlink_path,original_path=original_path,symlink_path
if is_a_folder(symlink_path):
symlink_path=path_join(symlink_path,get_file_name(original_path))
assert not strict or path_exists(original_path), "Can't create symlink to %s because that path does not exist!"%original_path
assert replace or not path_exists(symlink_path), "Can't create symlink at %s because a file already exists there!"%symlink_path
if relative:
original_path = get_relative_path(original_path, root=get_parent_folder(symlink_path))
if replace and is_symlink(symlink_path):
os.remove(symlink_path)
os.symlink(original_path,symlink_path)
return symlink_path
def is_symbolic_link(path:str):
"""
Returns whether or not a given path is a symbolic link
"""
from pathlib import Path
if not isinstance(path,str):
return False
try:
return Path(path).is_symlink()
except OSError:
#OSError: [Errno 63] File name too long:
return False
is_symlink=is_symbolic_link
def symlink_move(from_path,to_path,*,relative=False):
"""
Move a file or folder, but leave a symlink behind so that programs that try to access the original file aren't affected
"""
from_path=get_absolute_path(from_path)
to_path=get_absolute_path(to_path)
assert path_exists(from_path),from_path
to_path=move_path(from_path,to_path)
make_symlink(from_path,to_path,relative=relative)
return to_path
def _guess_mimetype(file_path)->str:
import mimetypes
if not file_exists(file_path):
return None
mimetype=mimetypes.guess_type(file_path)[0] #mimetype should be something like 'image/jpeg'
if mimetype is None:
return None
return mimetype.split('/')[0]
def is_image_file(file_path):
if not isinstance(file_path,str): return False
if get_file_extension(file_path) in 'exr'.split():
return True
return _guess_mimetype(file_path)=='image'
def is_video_file(file_path):
return _guess_mimetype(file_path)=='video'
def is_sound_file(file_path):
return _guess_mimetype(file_path)=='audio'
def is_utf8_file(path):
"""
Returns True iff the file path is a UTF-8 file
Faster than trying to use text_file_to_string(path), because it doesn't need to read the whole file
"""
if not file_exists(path):
return False
import codecs
try:
f = codecs.open(path, encoding='utf-8', errors='strict')
next(f)
return True
# except UnicodeDecodeError:
except Exception:
return False
# is_text_file=is_utf8_file #TODO: Not sure if this is the right way to do it
def display_file_tree(root=None,*,all=False,only_directories=False,traverse_symlinks=False):
#This code was ripped off of somewhere online, I don't remember where. Search the body of this code on google and you should find it in some github repo that implements the tree command in multiple languages
import os
import sys
printed_lines=[]
def print_line(line):
print(line)
printed_lines.append(line)
def get_stats_string(path):
def is_hidden_file(file):
return get_file_name(file).startswith('.')
stats=[]
color='blue'
image_file_extensions='png jpg jpeg bmp gif tiff tga exr png'.split()
if is_a_folder(path):
try:
files=get_all_paths(path,include_files=True,include_folders=False,recursive=False)
except PermissionError:
#Skip directories we don't have access to, as opposed to crashing
files=[]
all_unhidden_file_extensions=([get_file_extension(file) for file in files if not is_hidden_file(file)])
if len(set(all_unhidden_file_extensions))==1:
extension=all_unhidden_file_extensions[0]
if extension.strip():
stats.append('%i .%s file'%(len(all_unhidden_file_extensions),extension)+('s' if len(files)!=1 else ''))
if extension.lower() in image_file_extensions:
dims=None
try:
display_dims=True
for file in shuffled(files)[:15]:#Take only a random sample size of the image files for the sake of speed. Most likely it will be correct.
dim=get_image_file_dimensions(file)
if dims is None:
dims=dim
if dims!=dim:
display_dims=False
break
except Exception:
display_dims=False
if display_dims:
stats.append('x'.join(map(str,dims)))
# else:
# stats.append('(mixed sizes)')
else:
stats.append('%i file'%(len(all_unhidden_file_extensions),)+('s' if len(files)!=1 else ''))
# stats.append('%i (no file extension) file'%(len(all_unhidden_file_extensions),)+('s' if len(files)!=1 else ''))
else:
if len(files)>0:
stats.append('%i file'%len(files)+('s' if len(files)!=1 else ''))#Number of files in the folder
if is_symbolic_link(path):
color='yellow'
stats.append('is symlink')
elif is_a_file(path):
stats.append(get_file_size(path,human_readable=True))
extension=get_file_extension(path)
if extension.lower() in image_file_extensions:
try:
stats.append('x'.join(map(str,get_image_file_dimensions(path))))
except Exception:pass#Maybe it wasn't actually an image file...
if get_file_size(path,human_readable=False) < 1024*1024*16 and is_utf8_file(path): #If the file is under 16 megabytes large (an arbitrary threshold I use to make sure it's not too slow)
#TODO: Check to see if is a UTF-8 file
# import codecs
# codecs.open(filename, encoding='utf-8', errors='strict')
#For small files, display the number of lines in the file (assume it's a text file)
stats.append('%i lines'%number_of_lines_in_file(path))
if is_utf8_file(path) and get_file_extension(path)=='csv':
#If it's a CSV file, display the number of columns in that file
try:
import csv
number_of_columns=len(next(csv.reader(open(path,'r'), delimiter=',')))
stats.append('%i cols'%number_of_columns)
except Exception:
pass
#Getting number of lines was too slow on large files;
#else:
# try:
# #if it's a text file, say how many lines it has
# string=text_file_to_string(path)
# sum(1 for i in open(path, 'rb'))#https://stackoverflow.com/questions/9629179/python-counting-lines-in-a-huge-10gb-file-as-fast-as-possible
# # stats.append(str(number_of_lines(string))+' lines')
# except Exception:pass
if stats:
return ' '*4 + '\t' + fansi('['+', '.join(stats)+']',color)
else:
return ''
def highlight_child(child,absolute):
if is_a_folder(absolute):
return fansi(child,'blue','bold')
else:
return child
class Tree:
def __init__(self):
self.dirCount = 0
self.fileCount = 0
def register(self, absolute):
if os.path.isdir(absolute):
self.dirCount += 1
else:
self.fileCount += 1
def summary(self):
return str(self.dirCount) + " directories, " + str(self.fileCount) + " files"
def walk(self, directory, prefix = ""):
if not is_a_folder(directory):
return#??? This hack shouldn't be nessecary...
try:
filepaths = sorted([filepath for filepath in os.listdir(directory)])
if only_directories:
# fansi_print("all filepaths:"+str(filepaths),'yellow')
filepaths=[filepath for filepath in filepaths if is_a_folder(path_join(directory,filepath))]
except PermissionError:
#Just in case we get some access-denied error
filepaths = []
for index in range(len(filepaths)):
if not all and filepaths[index][0] == ".":
continue
absolute = os.path.join(directory, filepaths[index])
self.register(absolute)
recurse=os.path.isdir(absolute) and traverse_symlinks or not is_symbolic_link(absolute)
entry= highlight_child(filepaths[index],absolute)+get_stats_string(absolute)
if index == len(filepaths) - 1:
print_line(prefix + "└── " + entry)
if recurse:
self.walk(absolute, prefix + " ")
else:
print_line(prefix + "├── " + entry)
if recurse:
self.walk(absolute, prefix + "│ ")
try:
directory = "." if root is None else root
#if len(sys.argv) > 1:
#directory = sys.argv[1]
print_line(directory)
tree = Tree()
tree.walk(directory)
print_line("\n" + tree.summary())
except KeyboardInterrupt:
#If the user gets tired of waiting and just wants the half-baked results, let them have it...
print_line(fansi("...(incomplete due to a keyboard interrupt, probably because you pressed Control+C before we finished dipslaying the file tree)...",'red','underlined'))
_maybe_display_string_in_pager(line_join(printed_lines))
# if len(printed_lines)>get_terminal_height()*.75 and sys.stdout.isatty():
# display=(line_join(printed_lines))
# display=_line_numbered_string(display)
# display=(fansi("TREE: There were a lot of lines in the output (%i), so we're using rp.string_pager() to show them all. Press 'q' to exit, or press 'h' for more opttions."%len(printed_lines),'blue','bold'))+'\n'+displa
# string_pager(display)
def _line_numbered_string(string,foreground='cyan',style='bold',background='blue'):
lines=line_split(string)
nlines=len(lines)
numwidth=len(str(nlines))
newlines=[fansi(str(i+1).rjust(numwidth)+' '*0,foreground,style,background)+e for i,e in enumerate(lines)]
return line_join(newlines)
def _vimore(exception):
try:
files_and_line_numbers = _all_files_listed_in_exception_traceback(exception)
except Exception as e:
pass
# print("JOLLY")
# print_verbose_stack_trace(e)
files_and_line_numbers = [(lineno,file) for file,lineno in files_and_line_numbers if file_exists(file)]
if not files_and_line_numbers:
fansi_print(' (There are no editable files in the error\'s traceback)','red')
def localized_path(path):
#Return either the global or the local path, whichever is more concise
rel=get_relative_path(path)
#if rel.startswith('..'):
if rel.count('/')<path.count('/'):
return rel
else:
return path
colno=0 #I'm not sure how to tell which column number an error occured on
lineno,path= input_select(
question =fansi('Please choose a linenumber/file pair from the last traceback:',None,'bold') + '\n' + ' ' + \
fansi('pwd: ') + fansi(get_current_directory(),'yellow')
,
options =files_and_line_numbers,
stringify=lambda item: fansi(str(item[0]).rjust(6),'cyan') +' '+ localized_path(item[1])
)
import subprocess
#https://stackoverflow.com/questions/3313418/starting-vim-at-a-certain-position-line-and-column-of-a-file
# ┌ ┐
# │┌ ┐│
subprocess.call(["vim",path,'+call cursor(%i,%i)'%(lineno,colno),'+normal zz'])
# │└ ┘│
# └ ┘
return path
def _load_text_from_file_or_url(location):
if is_valid_url(location):
pip_import('requests')
import requests
url=location
response=requests.request('GET',url)
text=response.text
elif file_exists(location):
text=text_file_to_string(location)
else:
assert False,"Neither a text file nor a url: "+repr(location)+"\nERROR: This is neither a valid url nor a text file"
return text
_warning_ignore_filter=('ignore',None,Warning,None,0)
def _warnings_on():
import warnings
warnings.filters=[x for x in warnings.filters if x!=_warning_ignore_filter]
def _warnings_off():
import warnings
warnings.filters=[_warning_ignore_filter]+warnings.filters
def _warnings_are_off():
import warnings
return _warning_ignore_filter in warnings.filters
def _mv(from_path=None,to_dir=None):
if from_path is None:
fansi_print("Please select a file or folder to be moved",'yellow','bold')
from_path=input_select_path()
if to_dir is None:
print('\n')
fansi_print("Please select a destination folder to move %s into"%from_path,'yellow','bold')
to_dir=input_select_folder()
print(fansi('Moving','blue','bold'),fansi(from_path,'green'),fansi('to directory','blue'),fansi(to_dir,'green'))
return move_file(from_path,to_dir)
def _absolute_path_ans(ans):
#Absolute Path Ans
if isinstance(ans,str):
return get_absolute_path(ans)
else:
return [get_absolute_path(x) for x in ans]
def _relative_path_ans(ans):
#Relative Path Ans
if isinstance(ans,str):
if ans.startswith('~'):
ans=os.path.expanduser(ans)
return get_relative_path(ans)
else:
return [get_relative_path(x) for x in ans]
def _rma(ans):
if not isinstance(ans,str) and not isinstance(ans,list):
raise TypeError('RMA: ans should be a str pointing to a file path or a list of file paths, but ans is a '+str(type(ans)))
if isinstance(ans,list):
bad_paths=[x for x in ans if not path_exists(x)]
if bad_paths:
print("The following paths don't exist:\n"+[' '+x for x in bad_paths])
if input_yes_no("Are you sure you want to delete the below paths?\n"+line_join([' '+str(x) for x in ans])):
for x in ans:
delete_path(x)
else:
if not path_exists(ans):
raise FileNotFoundError(ans)
ans=get_absolute_path(ans)
if input_yes_no("Are you sure you want to delete %s?"%ans):
delete_path(ans)
print('Deleted path: '+ans)
else:
print('Deletion cancelled.'+ans)
def _cpah(paths,method=None):
if method is None:
method=copy_path
if isinstance(paths,str) and '\n' in paths:
paths=line_split(paths)
if isinstance(paths,str):
paths=[paths]
for path in paths:
method(path,'.')
def _get_env_info():
#Adapted from the pytorch github page
#This script gets information about your computer
#It's used in pseudo_terminal's LEVEL command
#The original code: https://gist.github.com/93795ffd6380c79ffc1a709500ed9118
#Returns a named tuple like:
# SystemEnv(cuda_runtime_version='10.1.243', nvidia_gpu_models='GPU 0: NVIDIA GeForce RTX 3090', nvidia_driver_version='470.103.01', os='Ubuntu 20.04.2 LTS (x86_64)')
# Or, on my macbook:
# ans = SystemEnv(cuda_runtime_version=None, nvidia_gpu_models=None, nvidia_driver_version=None, os='macOS 10.15.7 (x86_64)')
import locale
import re
import subprocess
import sys
import os
from collections import namedtuple
# System Environment Information
SystemEnv = namedtuple('SystemEnv', [
'cuda_runtime_version',
'nvidia_gpu_models',
'nvidia_driver_version',
'os',
])
def run(command):
"""Returns (return-code, stdout, stderr)"""
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
raw_output, raw_err = p.communicate()
rc = p.returncode
if get_platform() == 'win32':
enc = 'oem'
else:
enc = locale.getpreferredencoding()
output = raw_output.decode(enc)
err = raw_err.decode(enc)
return rc, output.strip(), err.strip()
def run_and_read_all(run_lambda, command):
"""Runs command using run_lambda; reads and returns entire output if rc is 0"""
rc, out, _ = run_lambda(command)
if rc != 0:
return None
return out
def run_and_parse_first_match(run_lambda, command, regex):
"""Runs command using run_lambda, returns the first regex match if it exists"""
rc, out, _ = run_lambda(command)
if rc != 0:
return None
match = re.search(regex, out)
if match is None:
return None
return match.group(1)
def get_nvidia_driver_version(run_lambda):
if get_platform() == 'darwin':
cmd = 'kextstat | grep -i cuda'
return run_and_parse_first_match(run_lambda, cmd,
r'com[.]nvidia[.]CUDA [(](.*?)[)]')
smi = get_nvidia_smi()
return run_and_parse_first_match(run_lambda, smi, r'Driver Version: (.*?) ')
def get_gpu_info(run_lambda):
if get_platform() == 'darwin':
if TORCH_AVAILABLE and torch.cuda.is_available():
return torch.cuda.get_device_name(None)
return None
smi = get_nvidia_smi()
uuid_regex = re.compile(r' \(UUID: .+?\)')
rc, out, _ = run_lambda(smi + ' -L')
if rc != 0:
return None
# Anonymize GPUs by removing their UUID
return re.sub(uuid_regex, '', out)
def get_running_cuda_version(run_lambda):
return run_and_parse_first_match(run_lambda, 'nvcc --version', r'release .+ V(.*)')
def get_cudnn_version(run_lambda):
"""This will return a list of libcudnn.so; it's hard to tell which one is being used"""
if get_platform() == 'win32':
system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows')
cuda_path = os.environ.get('CUDA_PATH', "%CUDA_PATH%")
where_cmd = os.path.join(system_root, 'System32', 'where')
cudnn_cmd = '{} /R "{}\\bin" cudnn*.dll'.format(where_cmd, cuda_path)
elif get_platform() == 'darwin':
# CUDA libraries and drivers can be found in /usr/local/cuda/. See
# https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html#install
# https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html#installmac
# Use CUDNN_LIBRARY when cudnn library is installed elsewhere.
cudnn_cmd = 'ls /usr/local/cuda/lib/libcudnn*'
else:
cudnn_cmd = 'ldconfig -p | grep libcudnn | rev | cut -d" " -f1 | rev'
rc, out, _ = run_lambda(cudnn_cmd)
# find will return 1 if there are permission errors or if not found
if len(out) == 0 or (rc != 1 and rc != 0):
l = os.environ.get('CUDNN_LIBRARY')
if l is not None and os.path.isfile(l):
return os.path.realpath(l)
return None
files_set = set()
for fn in out.split('\n'):
fn = os.path.realpath(fn) # eliminate symbolic links
if os.path.isfile(fn):
files_set.add(fn)
if not files_set:
return None
# Alphabetize the result because the order is non-deterministic otherwise
files = list(sorted(files_set))
if len(files) == 1:
return files[0]
result = '\n'.join(files)
return 'Probably one of the following:\n{}'.format(result)
def get_nvidia_smi():
# Note: nvidia-smi is currently available only on Windows and Linux
smi = 'nvidia-smi'
if get_platform() == 'win32':
system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows')
program_files_root = os.environ.get('PROGRAMFILES', 'C:\\Program Files')
legacy_path = os.path.join(program_files_root, 'NVIDIA Corporation', 'NVSMI', smi)
new_path = os.path.join(system_root, 'System32', smi)
smis = [new_path, legacy_path]
for candidate_smi in smis:
if os.path.exists(candidate_smi):
smi = '"{}"'.format(candidate_smi)
break
return smi
def get_platform():
if sys.platform.startswith('linux'):
return 'linux'
elif sys.platform.startswith('win32'):
return 'win32'
elif sys.platform.startswith('cygwin'):
return 'cygwin'
elif sys.platform.startswith('darwin'):
return 'darwin'
else:
return sys.platform
def get_mac_version(run_lambda):
return run_and_parse_first_match(run_lambda, 'sw_vers -productVersion', r'(.*)')
def get_windows_version(run_lambda):
system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows')
wmic_cmd = os.path.join(system_root, 'System32', 'Wbem', 'wmic')
findstr_cmd = os.path.join(system_root, 'System32', 'findstr')
return run_and_read_all(run_lambda, '{} os get Caption | {} /v Caption'.format(wmic_cmd, findstr_cmd))
def get_lsb_version(run_lambda):
return run_and_parse_first_match(run_lambda, 'lsb_release -a', r'Description:\t(.*)')
def check_release_file(run_lambda):
return run_and_parse_first_match(run_lambda, 'cat /etc/*-release',
r'PRETTY_NAME="(.*)"')
def get_os(run_lambda):
from platform import machine
platform = get_platform()
if platform == 'win32' or platform == 'cygwin':
return get_windows_version(run_lambda)
if platform == 'darwin':
version = get_mac_version(run_lambda)
if version is None:
return None
return 'macOS {} ({})'.format(version, machine())
if platform == 'linux':
# Ubuntu/Debian based
desc = get_lsb_version(run_lambda)
if desc is not None:
return '{} ({})'.format(desc, machine())
# Try reading /etc/*-release
desc = check_release_file(run_lambda)
if desc is not None:
return '{} ({})'.format(desc, machine())
return '{} ({})'.format(platform, machine())
# Unknown platform
return platform
def squelch(function,run_lambda):
try:
return function(run_lambda)
except Exception:
return None
def get_env_info():
run_lambda = run
cuda_runtime_version =squelch(get_running_cuda_version ,run_lambda)
nvidia_gpu_models =squelch(get_gpu_info ,run_lambda)
nvidia_driver_version=squelch(get_nvidia_driver_version,run_lambda)
os =squelch(get_os ,run_lambda)
return SystemEnv(
cuda_runtime_version =cuda_runtime_version ,
nvidia_gpu_models =nvidia_gpu_models ,
nvidia_driver_version=nvidia_driver_version,
os =os ,
)
env_info_fmt = """
CUDA runtime version: {cuda_runtime_version}
GPU models and configuration: {nvidia_gpu_models}
Nvidia driver version: {nvidia_driver_version}
OS: {os}
"""
return get_env_info()
def _view_image_via_textual_imageview(image):
#Views image in a terminal
assert isinstance(image, str) or is_image(image)
pip_import('textual_imageview', 'textual-imageview')
import textual_imageview.app
if isinstance(image, str):
original_colorterm = os.getenv('COLORTERM')
try:
os.environ['COLORTERM'] = 'truecolor'
app = textual_imageview.app.ImageViewerApp(image)
app.run()
finally:
if original_colorterm is None:
del os.environ['COLORTERM']
else:
os.environ['COLORTERM'] = original_colorterm
else:
assert is_image(image)
try:
path = temporary_file_path('png')
save_image(image, path)
_view_image_via_textual_imageview(path)
finally:
delete_file(path)
def _ISM(ans,*,preview:str=None):
"""
Input Select Multi
TODO make it for things other than lists of strings, like lists of ints. To do this make it into a line-numbered string dict -> values then use those values to look up keys -> get answer. Better yet create a fzf wrapper for this task - to select non-string things!
"""
try:
ans=dict(ans)
except Exception:
pass
if isinstance(ans,dict):
return _filter_dict_via_fzf(ans,preview=preview)
else:
if isinstance(ans,str):
ans=line_split(ans)
ans=ans[::-1] #Let us see the string properly
return line_join(_ISM(ans,preview=preview))
return _iterfzf(ans,multi=True,exact=True,preview=preview)
_which_cache={}
def _which(cmd):
#Faster than using the which command...
import shutil
util_which = shutil.which(cmd)
if util_which:
return util_which
def update(cmd):
output=shell_command('which '+cmd)
_which_cache[cmd]=output
if cmd in _which_cache:
run_as_new_thread(update,cmd)
return _which_cache[cmd]
else:
update(cmd)
return _which_cache[cmd]
_whiches_cache=None
def _whiches():
global _whiches_cache
def refresh(show_progress=False):
global _whiches_cache
keys = get_system_commands()
values = load_files(_which, keys, show_progress=show_progress)
_whiches_cache = {k:v for k,v in zip(keys,values)}
if _whiches_cache is None:
refresh(show_progress='eta:r._whiches')
else:
run_as_new_thread(refresh)
return _whiches_cache
def _ism_whiches():
"A real which hunt if I do say so myself!"
return _ISM(_whiches())
def _view_with_pyfx(data):
from rp.libs.pyfx.app import PyfxApp
from rp.libs.pyfx.model import DataSourceType
if isinstance(data,str) and not '\n' in data and is_utf8_file(data):
if data.endswith('.json'):
data=load_json(data)
elif data.endswith('.yml') or data.endswith('.yaml'):
data=load_yaml_file(data)
PyfxApp().run(DataSourceType.VARIABLE, data)
def _view_json_via_jtree(json):
pip_import('jtree')
if isinstance(json,str):
assert file_exists(json)
import jtree
jtree.JSONTreeApp(open(json)).run()
else:
temp_json_path=temporary_file_path('json')
try:
save_json(json,temp_json_path)
_view_json_via_jtree(temp_json_path)
finally:
delete_file(temp_json_path)
def _view_interactive_json(data):
try:
import json
if not isinstance(data, str):
json.dumps(data)
_view_json_via_jtree(data)
except Exception:
_view_with_pyfx(data)
def _get_processor_name():
import os, platform, subprocess, re
#https://stackoverflow.com/questions/4842448/getting-processor-information-in-python
if platform.system() == "Windows":
return platform.processor()
elif platform.system() == "Darwin":
os.environ['PATH'] = os.environ['PATH'] + os.pathsep + '/usr/sbin'
command ="sysctl -n machdep.cpu.brand_string"
return shell_command(command)
return subprocess.check_output(command).strip()
elif platform.system() == "Linux":
command = "cat /proc/cpuinfo"
all_info = subprocess.check_output(command, shell=True).strip().decode()
for line in all_info.split("\n"):
if "model name" in line:
return re.sub( ".*model name.*:", "", line,1)
return ""
def _display_columns(entries,title=None):
pip_import('rich')
from rich import print
from rich.columns import Columns
columns = Columns(entries, equal=False, expand=False, title=title)
print(columns)
def _input_select_multiple_history_multiline(history_filename=history_filename,old_code=None):
history=text_file_to_string(history_filename)
paragraphs=history.split('\n\n')
paragraphs=paragraphs[::-1]
def process_paragraph(paragraph):
lines=paragraph.splitlines()
lines[1:]=[x[1:] for x in lines[1:]]
return line_join(lines)
paragraphs=[process_paragraph(x) for x in paragraphs]
# if fansi_is_enabled():
# paragraphs=map(fansi_syntax_highlighting,paragraphs)
import json
# lines=map(repr,paragraphs)
lines=map(json.dumps,paragraphs)
lines=(x[1:-1] for x in lines)
preview_width=get_terminal_width()//2-2-2
#Older versions:
# lines=pip_import('iterfzf').iterfzf(lines,multi=True,exact=True,preview='echo {} | fold -w %i'%preview_width)
# lines=pip_import('iterfzf').iterfzf(lines,multi=True,exact=True,preview='echo {} | nl -v 0 | fold -w %i'%preview_width) #Have line numbers Start from 0
#Ideally we would have a program take a pure python string and syntax highlight it with numbers and wrapping. However, rp loads too slowly for this to be decently fast - much less instant like fzf is
#NOTE this syntax highlighting is hampered by a problem: the ansi escape codes are counted towards the line wrapping (done by the fold command). Idk how to ignore them in fold,
#You can disable highlighting with fansi_off()
# lines=pip_import('iterfzf').iterfzf(lines,multi=True,exact=True,preview='echo {} | nl -b a -v 0 -s\\|\ \ | fold -w %i'%preview_width) #
if old_code is None:
if fansi_is_enabled():
lines=_iterfzf(lines,multi=True,exact=True,preview='echo {} | %s %s %i '%(
json.dumps(sys.executable),
json.dumps(get_module_path("rp.experimental.stdin_python_highlighter")),
preview_width),
) #
else:
lines=_iterfzf(lines,multi=True,exact=True,preview=r'echo {} | nl -b a -v 0 -s\\|\ \ | fold -w %i'%preview_width) #
# lines=pip_import('iterfzf').iterfzf(lines,multi=True,exact=True,preview='echo {} | nl -v 0 | fold -w %i'%preview_width) #Have line numbers Start from 0
else:
assert old_code is not None
#We're gonna do diffs and merge them
#No support for fansi_disabled right now
lines=_iterfzf(lines,multi=True,exact=True,preview='echo {} | %s %s %i %s %s'%(
json.dumps(sys.executable),
json.dumps(get_module_path("rp.experimental.stdin_python_highlighter")),
preview_width,
"diff_mode",
json.dumps(json.dumps(old_code)),
),
) #
if lines is None:
#The user cancelled
return None
# return line_join(lines)
if lines is None:
#The user cancelled
return None
# highlighter_code=text_file_to_string(get_module_path("rp.experimental.stdin_python_highlighter"))
# highlighter_code=highlighter_code.replace("HARDCODED_WIDTH=None","HARDCODED_WIDTH=%i")%preview_width
# lines=pip_import('iterfzf').iterfzf(lines,multi=True,exact=True,preview='echo {} | %s -i {} '%(
# # json.dumps(get_module_path("rp.experimental.stdin_python_highlighter")),
# json.dumps(__file__),
# json.dumps(sys.executable),
# preview_width),
# ) #
import ast
# selected_paragraphs=[ast.literal_eval(x) for x in lines]
selected_paragraphs=[json.loads('"'+x+'"') for x in lines]
# out=line_join(lines)
out='\n\n'.join(selected_paragraphs)
# if fansi_is_enabled():
# out=strip_ansi_escapes(out)
out = out.splitlines()
if len(out)>1 and out[1].startswith('!'):
#If selecting a ! command put the ! before the date
if True:
#DELETE THE DATE
out=out[1:]
else:
#KEEP THE DATE
out[0]='!'+out[0]
out[1]=out[1][1:]
out=line_join(out)
return out
def _autocomplete_lss_name(lss_name,command_name=None):
"""
If there's an autocomplete thing in prompt-toolkit autocompletions thats a path return it otherwise dont change the input
That way, 'LSS co' --> 'LSS CogVideoX'
"""
if command_name is not None:
lss_name=lss_name[len(command_name+' '):]
import rp.r_iterm_comm as ric
if (
ric.current_candidates
and fuzzy_string_match(
lss_name, ric.current_candidates[0], case_sensitive=False
)
and not path_exists(lss_name)
):
#Don't need tab to autocomplete these paths, which is why it's fast...
candidate_0 = ric.current_candidates[0]
candidate_0 = candidate_0.strip('/').strip('\\')
if path_exists(candidate_0):
if candidate_0 != lss_name and command_name is not None:
fansi_print(
command_name
+ ": Completed "
+ repr(lss_name)
+ " to "
+ repr(candidate_0),
"blue",
)
lss_name = candidate_0
return lss_name
def _input_select_multiple_history(history_filename=history_filename):
history=text_file_to_string(history_filename)
lines=history.splitlines()
preview_width=get_terminal_width()//2-2
lines=_iterfzf(lines,multi=True,exact=True,preview='echo {} | fold -w %i'%preview_width)
lines=[('#' if x.startswith('#') else '')+x[1:] for x in lines]
# if len(lines)>1 and lines[1].startswith('!'):
# #If selecting a ! command put the ! before the date
# lines[0]='!'+lines[0]
# lines[1]=lines[1][1:]
out=line_join(lines)
return out
_need_module_refresh=False #Set to true if we do something with pip. Used by pterm
#def pudb_shell(_globals,_locals_):
# #https://documen.tician.de/pudb/shells.html
# pseudo_terminal(_globals,_locals)
def _pterm_fuzzy_cd(query_path, do_cd=False):
def is_a_match(query_path, real_path, case_sensitive):
query_name = get_path_name(query_path)
real_name = get_path_name(real_path )
if query_name in ['','.','..','/']:
#Special names
return True
# print(query_name, real_name)
return fuzzy_string_match(query_name, real_name, case_sensitive=case_sensitive)
query_path=os.path.expanduser(query_path)
if query_path.startswith('/'):
#Doesn't work for windows. Who cares lol
root='/'
elif query_path.startswith('~/'):
root=get_home_directory()
else:
root='.'
new_pwd = root
failed=False
def joined_names(names):
names=sorted(names)
max_len=5
if len(names)>max_len:
return joined_names(
random_batch(names, max_len, retain_order=True)
) + " ... %i more not shown ... " % (len(names) - max_len)
return ' '.join(map(shlex.quote, names))
subpaths = path_split(query_path)
if subpaths and subpaths[0]=='/':
del subpaths[0]
for query_name in subpaths:
subfolders = _get_all_paths_fast(new_pwd, include_files=False)
query_pwd = path_join(new_pwd, query_name)
# from icecream import ic
# ic(query_name,query_pwd,new_pwd)
#If there's a direct match, don't try fuzzy searching
if query_name in get_path_names(subfolders):
new_pwd = path_join(new_pwd, query_name)
continue
#Do fuzzy matching
case_sensitive_matches = sorted(x for x in subfolders if is_a_match(query_pwd, x, True ))
case_insensitive_matches = sorted(x for x in subfolders if is_a_match(query_pwd, x, False))
#If we get multiple fuzzy matches with case-insensitive, try case sensitive
if len(case_sensitive_matches)==1:
matches = case_sensitive_matches
else:
matches = case_insensitive_matches
#Handle each case
if len(matches)==1:
new_pwd = matches[0]
continue
elif len(matches)==0:
import shlex
print(
fansi("Can't find any fuzzy matches for ", "red")
+ fansi(query_name, "cyan", "bold")
+ fansi(" in ", 'red')
+_fansi_highlight_path(new_pwd)
+ "\n "
+ fansi("Subfolders: ", "red")
+ fansi(
joined_names(get_folder_names(subfolders)),
"yellow",
)
)
failed = True
break
elif len(matches)>1:
if len(subpaths)==1:
#Break the ambiguity with the current completion candidates if available...
#Currently for simplicity of implementation checking we're not going deep into paths aka len(subpaths)==1...
can = _ric_current_candidate_fuzzy_matches(query_name)
if can is not None:
new_pwd = can.strip('/')
if currently_running_windows():
new_pwd = new_pwd.strip("\\")
continue
import shlex
print(
fansi("Multiple fuzzy matches for ", "red")
+ fansi(query_name, "cyan", "bold")
+ fansi(" in ", 'red')
+_fansi_highlight_path(new_pwd)
+ "\n "
+ fansi("Matches: ", "red")
+ fansi(
joined_names(matches),
"yellow",
)
)
failed = True
break
else:
assert False,'impossible'
#Return the path and maybe cd into it
if failed:
return query_path
else:
if do_cd:
_pterm_cd(new_pwd)
return new_pwd
def _ric_current_candidate_fuzzy_matches(query):
#Return the first pt completion candidate if they exist and match the query...
import rp.r_iterm_comm as ric
can = ric.current_candidates
if can and fuzzy_string_match(query, can[0], case_sensitive=False):
return can[0]
else:
return None
def _pterm_cd(dir,repeat=1):
dir=os.path.expanduser(dir)
pwd=get_current_directory()
if _cd_history and _cd_history[-1]!=pwd:
_cd_history.append(pwd)
for _ in range(repeat):
set_current_directory(dir)
sys.path.append(get_absolute_path(get_current_directory()))
print(_fansi_highlight_path(get_current_directory()))
def _profile_vim_startup_plugins():
from rp.libs.profile_vim_plugins import run
run()
def _view_markdown_in_terminal(file_or_string):
pip_import("frogmouth")
path=file_or_string.strip()
if file_exists(path):
path=file_or_string
temp_path=False
else:
path=temporary_file_path('md')
path=string_to_text_file(path,file_or_string)
temp_path=True
os.system('frogmouth '+repr(path)) # Displays markdown
if temp_path:
delete_file(path)
def _get_function_names(ans):
if isinstance(ans, str):
return ans
elif hasattr(ans,"__name__"):
return ans.__name__
elif is_iterable(ans):
return [_get_function_names(x) for x in ans]
else:
raise ValueError("Is not a function: type(ans)="+str(type(ans)))
def _convert_powerpoint_file(path,message=None):
if message is not None:
fansi_print(message,'green','bold')
from rp.experimental import process_powerpoint_file
return process_powerpoint_file(path)
def _write_default_gitignore():
types_to_ignore='pyc bak swo swp swn swm swh swi swj swk swl swm swn swo swp un~ gstmp ipynb_checkpoints DS_Store'.split()
types_to_ignore=['*.'+x for x in types_to_ignore]
new_lines = (
["#<RP Default Gitignore Start>"]
+ types_to_ignore
+ ["#<RP Default Gitignore End>"]
)
new_text = "\n" + line_join(new_lines) + "\n\n"
git_repo = get_parent_folder(get_git_repo_root())
file = path_join(git_repo, '.gitignore')
if not file_exists(file) or new_text.strip() not in load_text_file(file):
append_line_to_file(new_text,file)
fansi_print("Wrote lines to "+file,'green','bold')
else:
fansi_print("Nothing written, "+file+" already has rp's list "+file,'green','bold')
return file
def _add_pterm_prefix_shortcut(shortcut:str,replacement:str):
"""
When using pterm, you can type commands like 'pi ' --> 'PIP install '
This lets you add custom ones from your rprc file, like _add_prefix_shortcut('fu','!fileutil')
"""
assert isinstance(shortcut,str), shortcut
assert isinstance(replacement,str) or isinstance(replacement, list) and len(replacement)==2, replacement
import rp.r_iterm_comm as ric
ric.kibble_shortcuts[shortcut]=replacement
def _add_pterm_command_shortcuts(shortcuts:str):
"""
EXAMPLE:
>>> _add_pterm_command_shortcuts('''
>>> CLC $r._pterm_cd("~/CleanCode")
>>> RZG $os.system(f"cd {$get_path_parent($get_module_path(rp)} ; lazygit")
>>> ''')
"""
shortcuts=shortcuts.splitlines()
import rp.r_iterm_comm as ric
ric.additional_command_shortcuts+=shortcuts
def _get_pterm_verbose():
return False
return True
class _PtermLevelTitleContext:
def __init__(self, level_title):
import rp.r_iterm_comm as ric
self.level = ric.pseudo_terminal_level
self.should_do = bool(level_title)# or bool(self.level)
if self.should_do:
self.level_title = level_title
self.old_title = rp.r._get_session_title()
# self.end = '] ' if not self.level else ' : LV%i]'%self.level
self.end = ']'
self.new_title = self.old_title + '[' + str(self.level_title) + self.end
def __enter__(self):
if self.should_do:
rp.r._set_session_title(self.new_title)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.should_do:
rp.r._set_session_title(self.old_title)
# Global state
_no_gc_gc_lock = threading.RLock()
_no_gc_disable_count = 0
_no_gc_was_enabled = True
@contextlib.contextmanager
def no_gc():
"""
Simple context manager to temporarily disable garbage collection.
Thread-safe and supports nested calls.
Example:
with no_gc():
# Critical code without garbage collection
process_large_dataset()
"""
import gc
import threading
import contextlib
global _no_gc_disable_count, _no_gc_was_enabled
with _no_gc_gc_lock:
if _no_gc_disable_count == 0:
_no_gc_was_enabled = gc.isenabled()
gc.disable()
_no_gc_disable_count += 1
try:
yield
finally:
with _no_gc_gc_lock:
_no_gc_disable_count -= 1
if _no_gc_disable_count == 0 and _no_gc_was_enabled:
gc.enable()
_user_created_var_names=set()
_cd_history=[]
def pseudo_terminal(
*dicts,
get_user_input=python_input,
modifier=None,
style=pseudo_terminal_style(),
enable_ptpython=True,
eval=eval,
exec=exec,
rprc="",
level_title=""
):
"""An interactive terminal session, powered by RP """
with _PtermLevelTitleContext(level_title):
try:
import signal
signal.signal(signal.SIGABRT,lambda:"rpy: pseudo terminal: sigabrt avoided!")
except Exception as E:
fansi_print("Warning: This pseudo_terminal is being started in a separate thread",'yellow')
# print_stack_trace(E)
import re
import sys
pwd=get_current_directory()
if pwd not in sys.path:
sys.path.append(pwd)
# TODO: Make better error reports than are available by default in python! Let it debug things like nested parenthesis and show where error came from instead of just throwing a tantrum.
# @author: Ryan Burgert 2016,2017,2018
try:
import readline# Makes pseudo_terminal nicer to use if in a real terminal (AKA if using pseudo_terminal on the terminal app on a mac); aka you can use the up arrow key to go through history etc.
import rlcompleter
readline.parse_and_bind("tab: complete")#Enable autocompletion even with PT OFF https://docs.python.org/2/library/rlcompleter.html
except:
pass# Not important if it fails, especially on windows (which doesn't support readline)
# from r import fansi_print,fansi,space_split,is_literal,string_from_clipboard,mini_editor,merged_dicts,print_stack_trace# Necessary imports for this method to function properly.
import rp.r_iterm_comm# Used to talk to ptpython
def level_label(change=0):
return (("(Level "+str(rp.r_iterm_comm.pseudo_terminal_level+change)+")")if rp.r_iterm_comm.pseudo_terminal_level else "")
try:
fansi_print(style.message() +' '+ level_label(),'blue','bold')
rp.r_iterm_comm.pseudo_terminal_level+=1
from copy import deepcopy,copy
def dictify(d):# If it's an object and not a dict, use it's __dict__ attribute
if isinstance(d,dict):
return d
return d.__dict__
new_dicts=[get_scope(1)]
for d in dicts:
new_dicts[0].update(d)
new_dicts[0]['ans']=None
dicts=new_dicts
# dicts=[{"ans":None,'blarge':1234}]#,*map(dictify,dicts)]# Keeping the 'ans' variable separate. It has highest priority
def dupdate(d,key,default=None): # Make sure a key exists inside a dict without nessecarily overwriting it
if key not in d:
d[key]=default
try:
dupdate(dicts[0],'ans')
except Exception:pass
def scope():
return merged_dicts(*reversed(dicts))
def equal(a,b):
if a is b:
return True
try:
#Uses the Dill library...
if handy_hash(a)==handy_hash(b):
return True
else:
return id(a)==id(b)
except Exception as e:
pass
try:
try:
import numpy as np
if isinstance(a,np.ndarray) or isinstance(b,np.ndarray):
if isinstance(a,np.ndarray) != isinstance(b,np.ndarray):
return False
if isinstance(a,np.ndarray) and isinstance(b,np.ndarray):
if not a.shape==b.shape:
return False
return np.all(a==b)
except:
pass
if a==b:
return True
# else:
# exec(mini_terminal)
return a==b # Fails on numpy arrays
except Exception:pass
return a is b # Will always return SOMETHING at least
class UndoRedoStack():
#TODO: This can be used for PREV, NEXT, CDB, UNDO, REDO, PREVMORE, NEXTMORE
def __init__(self,clear_redo_on_do=True):
self.undo_stack=[]
self.redo_stack=[]
self.clear_redo_on_do=clear_redo_on_do
def can_undo(self):
return len(self.undo_stack)!=0
def can_redo(self):
return len(self.redo_stack)!=0
def undo(self):
output=self.undo_stack.pop()
self.redo_stack.insert(0,output)
return output
def redo(self):
output=self.redo_stack.pop(0)
self.undo_stack.append(output)
return output
def do(self,value):
if self.clear_redo_on_do:
self.redo_stack.clear()
self.undo_stack.append(value)
def do_if_new(self,value):
if self.undo_stack and self.undo_stack[-1]==value:
return
self.do(value)
error_stack=UndoRedoStack(clear_redo_on_do=False)
def deep_dark_dict_copy(d):
# out={}
# for k in d:
# out[k]=d[k]
# return out
out={}
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")# /Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/copy.py:164: RuntimeWarning: use movie: No module named 'pygame.movie'
for k in d:
try:
import types
if isinstance(d[k],types.ModuleType):
raise Exception# When copying xonsh, the process below was reallly really slow. These are just some special cases worth putting out there to optimize this method.
try:
q=deepcopy(d[k])
if equal(d[k],q):
out[k]=deepcopy(d[k])
else:
raise Exception
except:
# print("Deepcopy failed: "+k)
q=copy(d[k])
if equal(d[k],q):
out[k]=copy(d[k])
else:
raise Exception
except:
# print("Copy failed: "+k)
out[k]=d[k]# Failed to copy
return out
global _need_module_refresh
def get_snapshot():# Snapshot of our dicts/scope
# exec(mini_terminal)
return list(map(deep_dark_dict_copy,dicts))
def set_snapshot(snapshot):
# snapshot is a list of dicts to replace *dicts
for s,d in zip(snapshot,dicts):
assert isinstance(d,dict)
assert isinstance(s,dict)
sk=set(s) # snapshot keys
dk=set(d) # dict keys
changed=False
for k in dk-sk : # -{'__builtins__'}:# '__builtins__' seems to be put there as a consequence of using eval or exec, no matter what we do with it. It also is confusing and annoying to see it pop up when reading the results of UNDO
# assert isinstance(k,str)
print(fansi(" - Removed: ",'red')+k)
changed=True
del d[k]
for k in sk-dk : # -{'__builtins__'}:
# assert isinstance(k,str)
print(fansi(" - Added: ",'green')+k)
changed=True
d[k]=s[k]
for k in dk&sk : # -{'__builtins__'}:
assert k in dk
assert k in sk
assert isinstance(k,str)
if not equal(s[k],d[k]):# To avoid spam
print(fansi(" - Changed: ",'blue')+k)
changed=True
d[k]=s[k]
return changed
gave_undo_warning=False
def take_snapshot():
nonlocal gave_undo_warning
import time
start=time.time()
if snapshots_enabled:
snapshot_history.append(get_snapshot())
if not gave_undo_warning and time.time()-start>.25:#.25 seconds is way too long to wait for a new prompt. We're delaying the prompt, and this can get annoying quickly...
fansi_print("NOTE: ",'blue','bold',new_line=False)
fansi_print("pseudo_terminal took "+str(start())[:5]+" seconds to save the UNDO snapshot, which might be because of a large namespace. If your prompts are lagging, this is probably why. You can fix this by using 'UNDO ALL', 'UNDO OFF'. This message will only show once.",'blue','bold')
gave_undo_warning=True
def get_ans():
dupdate(dicts[0],'ans')
return dicts[0]['ans']# This should exist
should_print_ans=True
# A little python weridness demo: ⮤print(999 is 999)⟶True BUT ⮤a=999⮤print(a is 999)⟶False
use_ans_history=True
def set_ans(val,save_history=True,snapshot=True,force_green=False):
try:
import rp.r_iterm_comm as ric
ric.ans=val
save_history&=use_ans_history
dupdate(dicts[0],'ans')
if snapshot:# default: save changes in a snapshot BEFORE making modifications to save current state! snapshot_history is independent of ans_history
take_snapshot()
if save_history:
ans_history.append(val)
dicts[0]['ans']=val
except Exception as e:
print_verbose_stack_trace(e)
print("HA HA CAUGHT YOU LA SNEAKY LITTLE BUG! (Idk if this ever errors...but when it might...it's rare")
if should_print_ans!=False:
try:
#__str__ returned non-string (type NoneType)
val_str=str(val)
except TypeError as error:
val_str='(Error when converting ans to string: %s)'%error
try:
import numpy as np
set_numpy_print_options(linewidth=max(0,get_terminal_width()-len('ans = ')))#Make for prettier numpy printing, by dynamically adjusting the linewidth each time we enter a command
if type(val).__name__ in 'ndarray DataFrame Series Tensor'.split() and len(line_split(val_str))>1:#Recognize pandas dataframes, series, numpy Arrays, pytorch Tensors
# if isinstance(val,np.ndarray) and len(line_split(val_str))>1:
#It will take more than one line to print this numpy array.
#Example:
# ans = [[ 1 -3 -5 0 0 0]
# [ 0 1 0 1 0 0]
# [ 0 0 2 0 1 0]
# [ 0 3 2 0 0 1]]
#The above is ugly, because the top row isn't aligned with the others, because it takes up multiple lines.
#There's a way to handle it, which prevents a line containing just 'ans=' from existing:
val_str=line_split(val_str)
val_str=[val_str[0]]+[' '*len('ans = ')+line for line in val_str[1:]]
val_str='\n'.join(val_str,)
#The result:
# ans = [[ 1 -3 -5 0 0 0]
# [ 0 1 0 1 0 0]
# [ 0 0 2 0 1 0]
# [ 0 3 2 0 0 1]]
#Which is much prettier.
except Exception:pass#print("Failed to set numpy width")# AttributeError: readonly attribute '__module__'
fansi_print("ans = " + val_str,('green'if save_history or force_green else 'yellow')if use_ans_history else 'gray')
def print_history(return_as_string_instead_of_printing=False):
output=''
output+=fansi("HISTORY --> Here is a list of all valid python commands you have entered so far (green means it is a single-line command, whilst yellow means it is a multi-lined command):",'blue','underlined')+'\n'
flipflop=False
def fansify(string,*args):
return line_join([fansi(line,*args) for line in line_split(string)])
for x in successful_command_history:
multiline='\n' in x
if x.strip():#And x.strip() because we don't want to alternate bolding if it's invisible cause then it would look like we have two bold in a row
flipflop=not flipflop#Print every other yellow prompt in bold
output+=fansify(x,'yellow' if multiline else'green','bold' if multiline and flipflop else None)+'\n'# Single line commands are green, and multi-line commands are yellow
if return_as_string_instead_of_printing:
return output
else:
print(end=output)
_maybe_display_string_in_pager(output,with_line_numbers=False)
def show_error(E):
try:
error_stack.do_if_new(E)
except AttributeError:
# File "/apps/bdi-venv-37-0.1.0-h96.d6e899e~bionic/lib/python3.7/site-packages/pynvml/nvml.py", line 797, in __eq__
# return self.value == other.value
# AttributeError: 'AttributeError' object has no attribute 'value'
pass
nonlocal error,display_help_message_on_error,error_message_that_caused_exception
if display_help_message_on_error:
display_help_message_on_error=False
if False: #Nah, don't need this anymore lol
fansi_print("""Sorry, but that command caused an error that pseudo_terminal couldn't fix! Command aborted.
Type 'HELP' for instructions on how to use pseudo_terminal in general.
To see the full traceback of any error, type either 'MORE' or 'MMORE' (or alt+m as a shortcut).
NOTE: This will be the last time you see this message, unless you enter 'HELP' without quotes.""",'red','bold')
error_message_that_caused_exception=user_message# so we can print it in magenta if asked to by 'MORE'
# print_verbose_stack_trace(E)
print_stack_trace(E,False,'ERROR: ')
error=E
error_message_that_caused_exception=None
display_help_message_on_error=True# A flag that will turn off the first time it displays "Sorry, but that command caused an error that pseudo_terminal couldn't fix! Command aborted. Type 'HELP' for instructions on pseudo_terminal. To see the full error traceback, type 'MORE'." so that we don't bombard the user with an unnessecary amount of stuff
pwd_history=[]
successful_command_history=[]
all_command_history=[]
snapshot_history=[]
ans_redo_history=[]
snapshots_enabled=False#Turning this on can break flann_dict. I haven't investigated why. Heres's some code that can break with it turned on:
# (Example code) f=FlannDict()
# (Example code) for _ in range(2000):
# (Example code) f[randint(100),randint(100)]=randint(100)
# (Example code) ans=f[34,23]
# (Example code) ans=f[34,23]
# (Example code) ans=f[34,23]
# (Example code) ans=f[34,23]
ans_history=[]
_tictoc=False
_profiler=False
_use_ipython_exeval=False
global _user_created_var_names
_user_created_var_names=set()
allow_keyboard_interrupt_return=False
use_modifier=True# Can be toggled with pseudo_terminal keyword commands, enumerated via 'HELP'
error=None# For MORE
last_assignable=last_assignable_candidate=None
assignable_history={}
warned_about_ans_print_on=False
do_garbage_collection_before_input=False#I'm going to see if this makes it faster when doing stuff with pytorch
_reload=False#If this is true, call _reload_modules right before each exeval is called
global _printed_a_big_annoying_pseudo_terminal_error
# garbage_collector_timer=tic()
def pterm_pretty_print(value,*args,**kwargs):
#If it's a string with valid python code, highlight it
#Otherwise, pretty_print it
def _display_pterm_image(value):
if isinstance(value,str):
value=load_image(value)
if running_in_jupyter_notebook():
display_image_in_notebook(value)
else:
display_image_in_terminal_color(value)
if isinstance(value,str) and is_valid_python_syntax(value):
highlighted_code=fansi_syntax_highlighting(value)
print(highlighted_code)
_maybe_display_string_in_pager(highlighted_code,False)
elif file_exists(value) and is_image_file(value):
_display_pterm_image(value)
elif isinstance(value,str) and is_valid_url(value):
if get_file_extension(value).lower() in 'jpg png jpeg tiff bmp gif'.split():
_display_pterm_image(value)
else:
display_website_in_terminal(value)
elif is_image(value):
_display_pterm_image(value)
else:
pretty_print(value,*args,**kwargs)
return
#from contextlib import redirect_stdout
#import io
#f = io.StringIO()
#with redirect_stdout(f):
# pretty_print(value,*args,**kwargs)
# help(pow)
#s = f.getvalue()
#print(s)
#_maybe_display_string_in_pager(s)
#return s
try:
#TODO: For some reason psuedo_terminal doesnt capture the scope it was called in. IDK why. Fix that. The next few lines are a patch and should eventually not be nesecay once bugs are fixed.
_pterm_exeval("None",*dicts,exec=exec,eval=eval)#I don't know why this is necessary (and haven't really tried to debug it) but without running something before importing all from rp nothihng works....
_,error=_pterm_exeval(rprc,*dicts,exec=exec,eval=eval)#Try to import RP
if error is not None:
fansi_print("ERROR in RPRC:",'red','bold')
print_verbose_stack_trace(error)
except BaseException as e:
print("PSEUDO TERMINAL ERROR: FAILED TO IMPORT RP...THIS SHOULD BE IMPOSSIBLE...WAT")
print_stack_trace(e)
SHOWN_PERMISSION_ERROR=False
def add_to_successful_command_history(x):
try:
_write_to_pterm_hist(x)
except PermissionError as e:
print_stack_trace(e)
print("PERMISSION ERROR SAVING PTERM HISTORY, FROM r._write_to_pterm_hist(...). COMMAND HISTORY NOT SAVED.")
print("THIS ERROR WILL ONLY BE SHOWN ONCE PER PSEUDO-TERMINAL SESSION TO AVOID SPAM")
successful_command_history.append(x)
import rp.r_iterm_comm
rp.r_iterm_comm.successful_commands=successful_command_history.copy()
help_commands_string="""
<Input Modifier>
MOD ON
MOD OFF
MOD SET
SMOD SET
<Stack Traces>
MORE
MMORE
DMORE
AMORE
GMORE
HMORE
RMORE
VIMORE
PIPMORE
IMPMORE
PREVMORE
NEXTMORE
<Command History>
HISTORY (HIST)
GHISTORY (GHIST)
AHISTORY (AHIST)
CHISTORY (CHIST)
DHISTORY (DHIST)
VHISTORY (VHIST)
ALLHISTORY (ALLHIST)
<Clipboards>
COPY
PASTE
EPASTE
WCOPY
WPASTE
TCOPY
TPASTE
LCOPY
LPASTE
VCOPY
VPASTE
FCOPY
FPASTE
MLPASTE
<'ans' History>
NEXT
PREV
PREV ON
PREV OFF
PREV CLEAR
PREV ALL
<Namespace History>
UNDO
UNDO ON
UNDO OFF
UNDO CLEAR
UNDO ALL
<Prompt Toolkit>
PT ON
PT OFF
PT
<RP Settings>
PT SAVE
PT RESET
SET TITLE
SET STYLE
<Shell Commands>
!
!!
SRUNA
SSRUNA
<Python>
PY
PYM
APY
APYM
PU
PIP
RUN
RUNA
<Simple Timer>
TICTOC
TICTOC ON
TICTOC OFF
<Profiler>
PROF
PROF ON
PROF OFF
PROF FLAME
PROF FLAME OPEN
PROF FLAME COPY
PROF FLAME PASTE
<Toggle Colors>
FANSI ON
FANSI OFF
<Module Reloading>
RELOAD ON
RELOAD OFF
<Documentation>
HELP
HHELP
SHORTCUTS
<Startup Files>
RPRC
VIMRC
TMUXRC
XONSHRC
RYAN RPRC
RYAN VIMRC
RYAN TMUXRC
RYAN XONSHRC
RYAN RANGERRC
<Inspection>
?
??
??? ?r
?.
?v
?s ?lj
?t ?j
?h (?/)
?e
?p
?c ?+c ?c+ ?cp
?i
?vd
<Others>
RETURN (RET)
SUSPEND (SUS)
CLEAR
WARN
GPU
TOP
TAB
TABA
VDA
MONITOR
UPDATE
ANS PRINT ON (APON)
ANS PRINT OFF (APOF)
ANS PRINT FAST (APFA)
SHELL (SH)
LEVEL
DITTO
EDIT
VARS
RANT
FORK
WANS
WANS+
ARG
VIM
VIMH
VIMA
AVIMA
GC OFF
GC ON
GC
<Unimportant>
NUM COM
PROF DEEP
CDH CLEAN
ALS
ALSD
ALSF
<File System>
RM
RN
MV
LS
LST
LSD
LSN
CD
CDP
CDA
CDB
CDU
CDH
CDH FAST
CDH GIT
CDZ
CDQ
CAT
NCAT
CCAT
ACAT
CATA
NCATA
CCATA
ACATA
PWD
CPWD
APWD
TAKE
MKDIR
OPEN
OPENH
OPENA
DISK
DISKH
TREE
TREE ALL
TREE DIR
TREE ALL DIR
FD
AFD (FDA)
FDT
FDTA
FD SEL (FDS)
LS SEL (LSS)
LS REL (LSR)
LS FZF (LSZ)
LS QUE (LSQ)
RANGER (RNG)
"""
# """
# <Broken>
# RYAN PUDBRC
# IPYTHON
# IPYTHON ON
# IPYTHON OFF
#
# <Truly Unimportant>
# IHISTORY (IHIST)
# RYAN RPRC YES #Theres a shortcut RRY for this, we don't really need to document it...
# RYAN VIMRC YES #Theres a shortcut RVY for this, we don't really need to document it...
# """
help_commands=[]#All commands, so we can search through them and turn uncapitablized ones into capitalized ones
for line in help_commands_string.splitlines():
if '#' in line or not line.strip() or not line.replace(' ','').replace('(','').replace(')','').isalpha():
#Skip <Documentation>, ???, blank lines etc
continue
line=line.strip()
if '(' in line:
#LS SEL (LSS) ---> LS SEL and LSS
first_help_command=line[:line.find('(')].strip()
second_help_command=line[line.find('('):].strip()[1:-1].strip()
help_commands.append(first_help_command)
help_commands.append(second_help_command)
else:
help_command=line.strip()
help_commands.append(help_command)
help_commands_no_spaces_to_spaces={x.replace(' ',''):x for x in help_commands}
# print(help_commands)#Should be like ['MOD ON', 'MOD OFF', 'MOD SET', 'SMOD SET.....
#TODO: Make APOF, APON etc implemented HERE, not elsewhere.
#TODO: Make these configurable in rprc
#There are duplicate shortcuts. This is a good thing! They don't interfere with variables.
#Example: H and HI. Maybe there's a variable called H. You can still use HI.
rp_import="__import__('rp')."
command_shortcuts_string='''
M MORE
MM MMORE
DM DMORE
GM GMORE
HM HMORE
AM AMORE
VM VIMORE
PM PIPMORE
IM IMPMORE
UM PREVMORE
NM NEXTMORE
RIM RMORE
HI HIST
DH DHIST
DHI DHIST
CH CHIST
CHI CHIST
GH GHIST
GHI GHIST
AH AHIST
AHI AHIST
VH VHIST
VHI VHIST
H HELP
HE HELP
HH HHELP
SC SHORTCUTS
CO COPY
WCO WCOPY
LC LCOPY
WC WCOPY
LCO LCOPY
TC TCOPY
TCO TCOPY
VCO VCOPY
VC VCOPY
EPA EPASTE
EP EPASTE
PA PASTE
WP WPASTE
WPA WPASTE
VP VPASTE
VPA VPASTE
LP LPASTE
LPA LPASTE
TP TPASTE
TPA TPASTE
FP FPASTE
FPA FPASTE
FC FCOPY
MLP MLPASTE
PSP $delist($shlex.split($string_from_clipboard()))
PAS $delist($shlex.split($string_from_clipboard()))
PASH $delist($shlex.split($string_from_clipboard()))
TPWC $web_copy($printed($tmux_paste()))
WCTP $web_copy($printed($tmux_paste()))
TPCO $string_to_clipboard($printed(str($tmux_paste())))
COTP $string_to_clipboard($printed(str($tmux_paste())))
TPLC $local_copy($printed($tmux_paste()))
LCTP $local_copy($printed($tmux_paste()))
TPVC $vim_copy($printed(str($tmux_paste())))
VCTP $vim_copy($printed(str($tmux_paste())))
WPTC $tmux_copy($printed(str($web_paste())))
TCWP $tmux_copy($printed(str($web_paste())))
WPCO $string_to_clipboard($printed(str($web_paste())))
COWP $string_to_clipboard($printed(str($web_paste())))
WPLC $local_copy($printed($web_paste()))
LCWP $local_copy($printed($web_paste()))
WPVC $vim_copy($printed(str($web_paste())))
VCWP $vim_copy($printed(str($web_paste())))
PATC $tmux_copy($printed(str($string_from_clipboard())))
TCPA $tmux_copy($printed(str($string_from_clipboard())))
PAWC $web_copy($printed($string_from_clipboard()))
WCPA $web_copy($printed($string_from_clipboard()))
PALC $local_copy($printed($string_from_clipboard()))
LCPA $local_copy($printed($string_from_clipboard()))
PAVC $vim_copy($printed(str($string_from_clipboard())))
VCPA $vim_copy($printed(str($string_from_clipboard())))
LPTC $tmux_copy($printed(str($local_paste())))
TCLP $tmux_copy($printed(str($local_paste())))
LPWC $web_copy($printed($local_paste()))
WCLP $web_copy($printed($local_paste()))
LPCO $string_to_clipboard($printed(str($local_paste())))
COLP $string_to_clipboard($printed(str($local_paste())))
LPVC $vim_copy($printed(str($local_paste())))
VCLP $vim_copy($printed(str($local_paste())))
VPTC $tmux_copy($printed(str($vim_paste())))
TCVP $tmux_copy($printed(str($vim_paste())))
VPWC $web_copy($printed($vim_paste()))
WCVP $web_copy($printed($vim_paste()))
VPCO $string_to_clipboard($printed(str($vim_paste())))
COVP $string_to_clipboard($printed(str($vim_paste())))
VPLC $local_copy($printed($vim_paste()))
LCVP $local_copy($printed($vim_paste()))
U CDU
UU $r._pterm_cd('../..')
UUU $r._pterm_cd('../../..')
UUUU $r._pterm_cd('../../../..')
UUUUU $r._pterm_cd('../../../../..')
UUUUUU $r._pterm_cd('../../../../../..')
UUUUUUU $r._pterm_cd('../../../../../../..')
UUUUUUUU $r._pterm_cd('../../../../../../../..')
UUUUUUUUU $r._pterm_cd('../../../../../../../../..')
UUUUUUUUUU $r._pterm_cd('../../../../../../../../../..')
UUUUUUUUUUU $r._pterm_cd('../../../../../../../../../../..')
UUUUUUUUUUUU $r._pterm_cd('../../../../../../../../../../../..')
UUUUUUUUUUUUU $r._pterm_cd('../../../../../../../../../../../../..')
UUUUUUUUUUUUUU $r._pterm_cd('../../../../../../../../../../../../../..')
UUUUUUUUUUUUUUU $r._pterm_cd('../../../../../../../../../../../../../../..')
UUUUUUUUUUUUUUUU $r._pterm_cd('../../../../../../../../../../../../../../../..')
UUUUUUUUUUUUUUUUU $r._pterm_cd('../../../../../../../../../../../../../../../../..')
UUUUUUUUUUUUUUUUUU $r._pterm_cd('../../../../../../../../../../../../../../../../../..')
UUUUUUUUUUUUUUUUUUU $r._pterm_cd('../../../../../../../../../../../../../../../../../../..')
1U CDU
2U $r._pterm_cd('../..')
3U $r._pterm_cd('../../..')
4U $r._pterm_cd('../../../..')
5U $r._pterm_cd('../../../../..')
6U $r._pterm_cd('../../../../../..')
7U $r._pterm_cd('../../../../../../..')
8U $r._pterm_cd('../../../../../../../..')
9U $r._pterm_cd('../../../../../../../../..')
10U $r._pterm_cd('../../../../../../../../../..')
11U $r._pterm_cd('../../../../../../../../../../..')
12U $r._pterm_cd('../../../../../../../../../../../..')
13U $r._pterm_cd('../../../../../../../../../../../../..')
14U $r._pterm_cd('../../../../../../../../../../../../../..')
15U $r._pterm_cd('../../../../../../../../../../../../../../..')
16U $r._pterm_cd('../../../../../../../../../../../../../../../..')
17U $r._pterm_cd('../../../../../../../../../../../../../../../../..')
18U $r._pterm_cd('../../../../../../../../../../../../../../../../../..')
19U $r._pterm_cd('../../../../../../../../../../../../../../../../../../..')
20U $r._pterm_cd('../../../../../../../../../../../../../../../../../../../..')
B CDB
BB CDBCDB
BBB CDBCDBCDB
BBBB CDBCDBCDBCDB
BBBBB CDBCDBCDBCDBCDB
BBBBBB CDBCDBCDBCDBCDBCDB
BBBBBBB CDBCDBCDBCDBCDBCDBCDB
BBBBBBBB CDBCDBCDBCDBCDBCDBCDBCDB
BBBBBBBBB CDBCDBCDBCDBCDBCDBCDBCDBCDB
BBBBBBBBBB CDBCDBCDBCDBCDBCDBCDBCDBCDBCDB
BBBBBBBBBBB CDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDB
BBBBBBBBBBBB CDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDB
BBBBBBBBBBBBB CDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDB
BBBBBBBBBBBBBB CDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDB
BBBBBBBBBBBBBBB CDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDB
BBBBBBBBBBBBBBBB CDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDB
BBBBBBBBBBBBBBBBB CDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDB
BBBBBBBBBBBBBBBBBB CDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDB
BBBBBBBBBBBBBBBBBBB CDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDB
BBBBBBBBBBBBBBBBBBBB CDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDBCDB
WCIJ1 web_copy(encode_image_to_bytes(ans,'jpeg',quality=10))
WCIJ2 web_copy(encode_image_to_bytes(ans,'jpeg',quality=20))
WCIJ3 web_copy(encode_image_to_bytes(ans,'jpeg',quality=30))
WCIJ4 web_copy(encode_image_to_bytes(ans,'jpeg',quality=40))
WCIJ5 web_copy(encode_image_to_bytes(ans,'jpeg',quality=50))
WCIJ6 web_copy(encode_image_to_bytes(ans,'jpeg',quality=60))
WCIJ7 web_copy(encode_image_to_bytes(ans,'jpeg',quality=70))
WCIJ8 web_copy(encode_image_to_bytes(ans,'jpeg',quality=80))
WCIJ9 web_copy(encode_image_to_bytes(ans,'jpeg',quality=90))
WCIJ95 web_copy(encode_image_to_bytes(ans,'jpeg',quality=95))
WCIJ web_copy(encode_image_to_bytes(ans,'jpeg',quality=100))
WCIP web_copy(encode_image_to_bytes(ans,'png'))
WPI decode_image_from_bytes(web_paste())
DI $display_image(ans) if $is_image(ans) else $display_video(ans)
DV $display_video(ans)
DVL $display_video(ans,loop=True)
A ACATA
AA ACATA
ACA ACATA
AC ACAT
CA CAT
CAA CATA
WN WARN
WR WARN
TT TICTOC
TA TAKE
TK TAKE
MK MKDIR
MA MKDIR
` CD ~
D` CD ~
CD` CD ~
+PA str(ans)+$string_from_clipboard()
PPA str(ans)+$string_from_clipboard()
+PAL str(ans)+'\\n'+$string_from_clipboard()
PPAL str(ans)+'\\n'+$string_from_clipboard()
PPLA str(ans)+'\\n'+$string_from_clipboard()
PAL str(ans)+'\\n'+$string_from_clipboard()
PLA str(ans)+'\\n'+$string_from_clipboard()
PF PROF
PO PROF
POD PROF DEEP
POF PROF FLAME
FLAME PROF FLAME
FLA PROF FLAME
FLAO PROF FLAME OPEN
FLAC PROF FLAME COPY
FLAP PROF FLAME PASTE
N NEXT
P PREV
NN NEXT
PP PREV
B CDB
U CDU
DU CDU
CDC cdhclean
CCL cdhclean
HC CDH
HD CDH
DG CDH GIT
HDG CDH GIT
HDF CDH FAST
CDHF CDH FAST
VCDH $vim($r._cd_history_path);ans=$r._cd_history_path
CDHV $vim($r._cd_history_path);ans=$r._cd_history_path
VHD $vim($r._cd_history_path);ans=$r._cd_history_path
HDV $vim($r._cd_history_path);ans=$r._cd_history_path
GMP $get_module_path(ans)
DA CDA
RU RUN
SSRA SSRUNA
SSA SSRUNA
SSR SSRUNA
SS SSRUNA
SRA SRUNA
SA SRUNA
SR SRUNA
RA RUNA
EA RUNA
CPR !$PY -m rp call check_pip_requirements
BAA $os.system('bash '+str(ans))
ZSHA $os.system('bash '+str(ans))
#PYA and PUA are similar to EA except they run in separate process
PYA $os.system($sys.executable+' '+$shlex.quote(ans));
PUA $os.system($sys.executable+' -m pudb '+$shlex.quote(ans));
PDA $os.system($sys.executable+' -m pudb '+$shlex.quote(ans));
V VIM
VI VIM
AV AVIMA
AVA AVIMA
VA VIMA
VHE VIMH
VIH VIMH
CM RMORE
GPP $get_path_parent(ans)
GFN $get_file_name(ans) if isinstance(ans,str) else $get_file_names(ans)
GPN $get_path_name(ans)
SFE $strip_file_extension(ans) if isinstance(ans,str) else $strip_file_extensions(ans)
GFE $get_file_extension(ans) if isinstance(ans,str) else $get_file_extensions(ans)
64P ans=$printed($string_from_clipboard()) ; $fansi_print($human_readable_file_size(len( ans )), 'bold cyan') ; ans=$base64_to_object(ans) #Copy object via Base64 String
64C _ans64=$printed($object_to_base64(ans)) ; $fansi_print($human_readable_file_size(len(_ans64)), 'bold cyan') ; $string_to_clipboard(_ans64) #Copy object via Base64 String
# GO GC
MON MONITOR
VD VDA
TRAD treealldir
TRD treedir
TR tree
TRA treeall
CVD $fansi_print('CUDA_VISIBLE_DEVICES: %s'%get_cuda_visible_devices(), 'bold yellow yellow on dark blue')
FON fansion
FOF fansioff
FOFF fansioff
UOF UNDO OFF
UON UNDO ON
RRC ryanrprc
RTC ryantmuxrc
RVC ryanvimrc
RXC ryanxonshrc
RR ryanrprc
RT ryantmuxrc
# RV ryanvimrc
RX ryanxonshrc
RRY RYAN RPRC YES
RVY RYAN VIMRC YES
RVN RYAN VIMRC NO
RRNG RYAN RANGERRC
strip ans.strip()
sp ans.strip()
RZG $r._load_ryan_lazygit_config()
VIMPROF $r._profile_vim_startup_plugins()
LSF LSQ
FDZ LSZ
FDQ LSQ
RG RNG
VS VARS
OP OPEN
OPH OPENH
OH OPENH
OPA OPENA
OA OPENA
LVL LEVEL
LV LEVEL
L LEVEL
SHOGA $pip_install_multiple(ans, shotgun=True) #Shotgun Ans - works well with PIF
PIMA $pip_install_multiple(ans, shotgun=False) #Pip Install Multiple Ans - works well with PIF
PIRA $pip_install('-r '+ans)
PIR PIP install -r requirements.txt
UP UPDATE
UPWA if $input_yes_no(ans+"\\n\\n"+$fansi("Set r.py to this?",'red','bold')): $string_to_text_file($get_module_path($r), ans)
UPYE PIP install rp --upgrade --no-cache
DK DISK
DD DITTO
DT DITTO
DO DITTO
PW PWD
PD PWD
WD PWD
APW APWD
AP APWD
AW APWD
WA WANS
WAP WANS+
VSS $repr_vars(*$r._iterfzf($r._user_created_var_names,multi=True)) #VS Select
VSM $repr_vars(*$r._iterfzf($r._user_created_var_names,multi=True)) #VS Select
VSR $repr_vars(*$r._user_created_var_names) #VS Repr
CVSR $string_to_clipboard($repr_vars(*$r._user_created_var_names)) #Copy VS Repr
COVSR $string_to_clipboard($repr_vars(*$r._user_created_var_names)) #Copy VS Repr
CVS $string_to_clipboard($repr_vars(*$r._user_created_var_names)) #Copy VS Repr
AFD FDA
LSA ALS
LSAD ALSD
LSAF ALSF
ATS $tmux_get_scrollback()
quit() RETURN
exit() RETURN
RETK $fansi_print("RETK: Killing this process forcefully!", 'cyan', 'bold'); $kill_process($get_process_id())
DKH DISKH
KH DISKH
DQ CDHQ FAST
PRP PYM rp
SURP $os.system('sudo '+sys.executable+' -m rp')
GOO $open_google_search_in_web_browser(str(ans))
GOOP $open_google_search_in_web_browser($string_from_clipboard())
SMI $os.system("nvidia-smi");
NVI $pip_import('nvitop');$pip_import('nvitop.__main__').main()
NVT $r._ensure_nvtop_installed();$os.system("nvtop");#sudo_apt_install_nvtop
ZSH $r._ensure_zsh_installed();$os.system("zsh");
BOP TOP
bashtop $r._run_bashtop() #Good where BOP doesn't work and MON is too basic
BA $os.system("bash");
S $os.system("sh");
Z $os.system("zsh");
JL PYM jupyter lab
UNCOMMIT !git reset --soft HEAD^
REATTACH_MASTER !git branch temp-recovery-branch ; git checkout temp-recovery-branch ; git checkout master ; git merge temp-recovery-branch ; git branch -d temp-recovery-branch #Reattach from the reflog to master
PULL !git pull
PUL !git pull
EMA $explore_torch_module(ans)
NB $extract_code_from_ipynb()
NBA $extract_code_from_ipynb(ans)
NBC $r.clear_jupyter_notebook_outputs()
NBCA $r._nbca(ans) # Clear a notebook
NBCH $r._nbca($get_all_files(file_extension_filter='ipynb')) #Clear all notebooks in the current directory
NBCHY $r._nbca($get_all_files(file_extension_filter='ipynb',sort_by='size')[::-1], auto_yes=True) #Clear all notebooks in the current directory without confirmation
NBCHYF $r._nbca($get_all_files(file_extension_filter='ipynb',sort_by='size')[::-1], auto_yes=True,parallel=True) #Clear all notebooks in the current directory without confirmation
NCA $r._nbca(ans)
JL PYM jupyter lab
IPYK $add_ipython_kernel()
INS $input_select("Select:", $line_split(ans) if isinstance(ans,str) else ans)
ISA $input_select("Select:", $line_split(ans) if isinstance(ans,str) else ans)
ISM $r._ISM(ans) #Input Select Multi
IMA $r._ISM(ans) #Input Select Multi
IMS $r._ISM(ans) #Input Select Multi
ISENV $r._ISM($os.environ) #Input Select (multiple) Environment (variables)
ENV $r._ISM($os.environ) #Input Select (multiple) Environment (variables)
ENP $r._ISM({x:y for x,y in $os.environ.items() if ':' in y},preview="""echo {} | cut -d'|' -f2- | cut -c2- | tr ':' '\\n'""") #Input Select (multiple) Environment (variables) with preview for when split by : like on PATH
ENVP $r._ISM({x:y for x,y in $os.environ.items() if ':' in y},preview="""echo {} | cut -d'|' -f2- | cut -c2- | tr ':' '\\n'""") #Input Select (multiple) Environment (variables) with preview for when split by : like on PATH
WHI $r._ism_whiches() #Investigate 'which x' for every system command x
WHICH $r._ism_whiches() #Investigate 'which x' for every system command x
GSC $r._ISM($get_system_commands())
SCO $r._ISM($get_system_commands())
VCL $delete_file($get_absolute_path('~/.viminfo'))#VimClear_use_when_VCOPY_doesnt_work_properly
ALSF $get_all_paths($get_current_directory(),include_files=True,include_folders=False,relative=True)
LSAG $get_all_paths (relative=False,sort_by='name') #LSA Global
LSAFG $get_all_files (relative=False,sort_by='name') #LSA Files Global
LSADG $get_all_folders(relative=False,sort_by='name') #LSA Directories Global
LSM $r._iterfzf($get_all_paths('.',relative=False,sort_by='name'),multi=True,exact=True)
LSAI $get_all_image_files()
IASM $import_all_submodules(ans,verbose=True);
SUH $sublime('.')
SUA $sublime(ans)
COH $vscode('.')
COA $vscode(ans)
SG $save_gist(ans)
LG $load_gist(input($fansi('URL:','blue','bold')))
LGA $load_gist(ans)
OG $load_gist($input_select(options=$line_split($text_file_to_string($path_join($get_parent_folder($get_module_path($rp)),'old_gists.txt')))))
# CAH $copy_path(ans,'.')
# CPAH $copy_path(ans,'.')
CAH $r._cpah(ans)
CPAH $r._cpah(ans)
HLAH $r._cpah(ans,method=make_hardlink)#HardLink Ans Here
# CPPH $copy_path($string_from_clipboard(),'.')
# CPH $copy_path($string_from_clipboard(),'.')
CPPH $r._cpah($string_from_clipboard())
CPH $r._cpah($string_from_clipboard())
# MAH $move_path(ans,'.')
# MVAH $move_path(ans,'.')
# MVPH $move_path($string_from_clipboard(),'.')
# MPH $move_path($string_from_clipboard(),'.')
MAH $r._cpah(ans,$move_path)
MVAH $r._cpah(ans,$move_path)
MVPH $r._cpah($string_from_clipboard(),$move_path)
MPH $r._cpah($string_from_clipboard(),$move_path)
GCLP $git_clone($string_from_clipboard())
GCLPS $git_clone($string_from_clipboard(),depth=1)
GCLA $git_clone(ans,show_progress=True)
GCLAS $git_clone(ans,show_progress=True,depth=1) #Git-Clone ans Shallow
GURL $get_git_remote_url()
SURL $shorten_url(ans)
REPO $get_path_parent($get_git_repo_root($get_absolute_path('.')))
UG $r._pterm_cd($get_path_parent($get_git_repo_root($get_absolute_path('.'))))
GU $r._pterm_cd($get_path_parent($get_git_repo_root($get_absolute_path('.'))))
WGA if $os.system('wget\\x20'+ans)==0: ans=$get_file_name(ans)
LNAH $os.symlink(ans,$get_file_name(ans));ans=$get_file_name(ans)#Created_Symlink
LN $os.symlink(ans,$get_file_name(ans));ans=$get_file_name(ans)#Created_Symlink
HL $make_hardlink(ans,$get_file_name(ans))
TMDA $os.system('tmux list-sessions -F "#{session_name}" | xargs -I % tmux detach -s %') #Detach all users from all tmux sessions
RF $random_element([x for x in $os.scandir() if not x.is_dir(follow_symlinks=False)]).name
RD $random_element([x for x in $os.scandir() if x.is_dir(follow_symlinks=False)]).name
RE ($random_element(ans.splitlines()) if isinstance(ans,str) else $random_element(ans))
RDA $r._pterm_cd($random_element([x for x in $os.scandir() if x.is_dir(follow_symlinks=False)])) # RD then DA
CDR $r._pterm_cd($random_element([x for x in $os.scandir() if x.is_dir(follow_symlinks=False)]))
LJ LINE JOIN ANS
AJ JSON ANS
JA JSON ANS
JEA JSON ANS
LJEA [$line_join(x) for x in ans] #Line Join Each Ans
CJ ans.split(",") if isinstance(ans,str) else ",".join(map(str,ans))
SJ ans.split(" ") if isinstance(ans,str) else " ".join(map(str,ans))
SPAJ ans.split(" ") if isinstance(ans,str) else " ".join(map(str,ans))
SGC $select_git_commit()
DUNKA $pip_import('dunk');$os.system(f"git diff {ans} | dunk")
DUNKP $dunk_string_diff(ans,$string_from_clipboard())
PDUNK $dunk_string_diff($string_from_clipboard(),ans)
FN $r._get_function_names(ans)
SHA $get_sha256_hash(ans,show_progress=True)
DCI $display_image_in_terminal_color(ans)
FCA $web_copy_path(ans,show_progress=True)
FCH print("FCH->FileCopyHere");$web_copy_path($get_absolute_path('.'),show_progress=True)
RMA $r._rma(ans)
RNA $rename_file(ans,$input_default($fansi('NewPathName:','blue'),$get_file_name(ans)))
APA $r._absolute_path_ans(ans)
RPA $r._relative_path_ans(ans)
UPA $r._user_path_ans(ans)
UZA $unzip_to_folder(ans)
ZIH $make_zip_file_from_folder($get_absolute_path('.'))
ZIA $make_zip_file_from_folder(ans)
RWC $web_copy($get_source_code($r))
CCA $r._run_claude_code(ans).code
CCH $r._run_claude_code('.')
RST __import__('os').system('reset')
RS __import__('os').system('reset')
BLA $r._autoformat_python_code_via_black(str(ans))
SIM $r.sort_imports_via_isort(ans)
CBP ans=$string_from_clipboard();ans=$r.autoformat_python_via_black_macchiato(ans);$string_to_clipboard(ans)
CSP ans=$string_from_clipboard();ans=$sort_imports_via_isort(ans);$string_to_clipboard(ans)
RMS $r._removestar(ans)
DAPI __import__('rp.pypi_inspection').pypi_inspection.display_all_pypi_info()
DISC $display_image_slideshow('.',display=$display_image_in_terminal_color)
DISI $display_image_slideshow('.',display=lambda image:$display_image_in_terminal_imgcat($with_alpha_checkerboard(image)))
FZM $r._iterfzf(ans,multi=True)
NLS $fansi_print(len($os.listdir()),"blue","bold")
DUSH !du -sh
PTS ptsave
ST settitle
STIT settitle
UR $unshorten_url(ans)
UUR $unshorten_url(ans)
UURL $unshorten_url(ans)
GP $print_gpu_summary()
VGP $r._ensure_viddy_installed() ; $r._run_sys_command('viddy '+sys.executable+' call print_gpu_summary')
NGP $print_notebook_gpu_summary()
LEA [eval(str(x)) for x in ans]
EVLA [eval(str(x)) for x in ans]
PAF ans=$string_from_clipboard(); ans=ans.splitlines() if '\\n' in ans else ans[1:-1].split("' '") if ans.startswith("'") and ans.endswith("'") else ans #Paste Files (for MacOS when you copy multiple files)
CLS CLEAR
VV !vim
RCLAHF $os.system($printed("rclone copy --progress --transfers 128 --metadata %s ."%('"'+ans+'"'))); #Quickly copy a network drive folder. Copies the contents, not the folder itself! The 'F' stands for fast, which is because this skips checksums - it wont overwrite any files ever!
RCLAH $os.system($printed("rclone copy --checksum --progress --transfers 128 --metadata %s ."%('"'+ans+'"'))); #Quickly copy a network drive folder. Copies the contents, not the folder itself!
WEV import rp.web_evaluator as wev
DR $r._display_columns(dir(),'dir():')
DUSHA $fansi_print($human_readable_file_size(sum($get_file_size(x,False)for x in $enlist(ans))),'cyan','bold')
INM __name__="__main__"
QPHP $r._input_select_multiple_history_multiline() #Query Prompt-Toolkit History Paragraphs (F3)
QPH $r._input_select_multiple_history() #Query Prompt-Toolkit History Lines (F3)
QVH $r._input_select_multiple_history($pterm_history_filename) #Query VHISTORY
GITIGNORE $r._write_default_gitignore()
GITIGN $r._write_default_gitignore()
IGN $r._write_default_gitignore()
IGNORE $r._write_default_gitignore()
GIG $r._write_default_gitignore()
GPL !git pull
PPTA $r._convert_powerpoint_file(ans)
PPT $r._convert_powerpoint_file($input_select_file(file_extension_filter='pptx'),message='Select a powerpoint file')
TMD !tmux d
TMA !tmux a
TM !tmux
TMUX !tmux
FB $r._run_filebrowser()
NL $fansi_print('Number of lines in ans: %i'%$number_of_lines(ans), 'yellow')
ZG $r._install_lazygit();$os.system('lazygit')
UNCOMMIT !git reset --soft HEAD^
# ZGA $os.system('cd '+ans'+' && lazygit') #NOT Ready yet - CDA's logic is more complex and can handle funcs and modules, this could only handle strings...
FART $r._fart(); #Find and replace text in current directory (recursively). Tip: best to use this with FDT
AFART $r._fart() #Find and replace text in current directory (recursively). Tip: best to use this with FDT
FARTA $r._fart(ans); #Find and replace text in paths specified by ans. Tip: best to use this with FDT
AFARTA $r._fart(ans) #Find and replace text in paths specified by ans. Tip: best to use this with FDT
HTTP $os.system($sys.executable+' -m http.server')
HTP $os.system($sys.executable+' -m http.server')
FMA $r._view_markdown_in_terminal(ans) # Displays markdown
MDA $r._view_markdown_in_terminal(ans) # Displays markdown
PIF PIP freeze
HOSTLAB !$PY -m rp call pip_import jupyter --auto_yes True ; $PY -m jupyter lab --ip 0.0.0.0 --port 5678 --NotebookApp.password='' --NotebookApp.token='' --allow-root
'''
# SA string_to_text_file(input("Filename:"),str(ans))#SaveAnsToFile
# BB set_current_directory(r._get_cd_history()[-2]);fansi_print('BB-->CDH1-->'+get_current_directory(),'blue','bold')#Use_BB_instead_of_CDH_<enter>_1_<enter>_to_save_time_when_starting_rp
#Note: \x20 is the space character
command_shortcuts=line_split(command_shortcuts_string)
import rp.r_iterm_comm as ric
if hasattr(ric,'additional_command_shortcuts'):
command_shortcuts+=list(ric.additional_command_shortcuts)
command_shortcuts = [x.replace('$PY',sys.executable) for x in command_shortcuts]
command_shortcuts = [x.replace('$',rp_import) for x in command_shortcuts]
command_shortcuts=list(map(str.strip,command_shortcuts))
command_shortcuts=[x for x in command_shortcuts if not x.startswith('#')]
command_shortcuts=[x for x in command_shortcuts if x]
# command_shortcuts_pairs=list(map(str.split,command_shortcuts))
command_shortcuts_pairs=[str.split(x,maxsplit=1)for x in command_shortcuts]
def join_command(pair):
#Let us have spaces on the right side
return [pair[0],' '.join(pair[1:])]
command_shortcuts_pairs=list(map(join_command,command_shortcuts_pairs))
command_shortcuts={x:y for x,y in command_shortcuts_pairs}
for key in list(command_shortcuts):
command_shortcuts[key.lower()]=command_shortcuts[key]#Make it case-insensitive
try:
import rp.r_iterm_comm
rp.r_iterm_comm.globa=scope()#prime it and get it ready to go (before I had to enter some valid command like '1' etc to get autocomplete working at 100%)
while True:
rp.r_iterm_comm.rp_pt_user_created_var_names[:]=list(_user_created_var_names)
try:
# region Get user_message, xor exit with second keyboard interrupt
_update_cd_history()
try:
def evaluable_part(cmd:str):
# DOesn't take into account the ';' character
cmd=cmd.rstrip().split('\n')[-1]
# TODO Make everything evaluable like in ipython
def try_eval(x,true=False):# If true==True, then we return the actual value, not a formatted string
# region Communicate with ptpython via r_iterm_comm
if x==rp.r_iterm_comm.try_eval_mem_text:
return rp.r_iterm_comm.rp_evaluator_mem# Text hasn't changed, so don't evaluate it again
rp.r_iterm_comm.try_eval_mem_text=x
temp=sys.stdout.write
try:
sys.stdout.write=_muted_stdout_write
s=scope()
# true_value=eval(x,merged_dicts(s,globals(),locals()))
if x.count('RETURN')==1:
exec(x.split('RETURN')[0],rp.r_iterm_comm.globa)# If we have a RETURN in it,
x=x.split('RETURN')[1].lstrip()# lstrip also removes newlines
out="eval("+repr(x)+") = \n"
true_value=eval(x,rp.r_iterm_comm.globa)
if true:
return true_value
from pprint import pformat
out=out+(str if isinstance(true_value,str) else repr)((true_value)) # + '\nans = '+str(dicts[0]['ans'])
rp.r_iterm_comm.rp_evaluator_mem=out
return str(out)+"\n"
except Exception as E:
return str(rp.r_iterm_comm.rp_evaluator_mem)+"\nERROR: "+str(E)
finally:
sys.stdout.write=temp
rp.r_iterm_comm.rp_evaluator=try_eval
rp.r_iterm_comm.rp_VARS_display=str(' '.join(sorted(list(_user_created_var_names))))
# endregion
import gc as garbage_collector
if do_garbage_collection_before_input:
garbage_collector.collect()#Sometimes we run into memory issues, maybe this is what's making it slow when using pytorch and big tensors?
# print("GC!")
# garbage_collector_timer=tic()
if _need_module_refresh:
_refresh_autocomplete_module_list()
if get_current_directory()=='.':
fansi_print("WARNING: Current directory was deleted; moving to a new location",'yellow','bold')
set_current_directory('/')
fansi_print("PWD: "+_fansi_highlight_path(get_current_directory()),"blue",'bold')
user_message=get_user_input(lambda:scope(),header=_get_prompt_style(),enable_ptpython=enable_ptpython)
try:set_numpy_print_options(linewidth=max(0,get_terminal_width()-len('ans = ')))#Make for prettier numpy printing, by dynamically adjusting the linewidth each time we enter a command
except Exception:pass#print("Failed to set numpy width")
if not user_message:
continue# A bit of optimization for aesthetic value when we hold down the enter key
allow_keyboard_interrupt_return=False
except (KeyboardInterrupt,EOFError):
if allow_keyboard_interrupt_return:
fansi_print("Caught repeated KeyboardInterrupt or EOFError --> RETURN",'cyan','bold')
while True:
try:
if input_yes_no("Are you sure you want to RETURN?"):
user_message="RETURN"
break
else:
break
except:
print("<KeyboardInterrupt>\nCaught another KeyboardInterrupt or EOFError...if you'd like to RETURN, please enter 'yes'")
pass
else:
allow_keyboard_interrupt_return=True
raise
# endregion
_user_created_var_names&=set(scope())# Make sure that the only variables in this list actually exist. For example, if we use 'del' in pseudo_terminal, ∄ code to remove it from this list (apart from this line of course)
# region Non-exevaluable Terminal Commands (Ignore user_message)
_update_cd_history()
import re
if not '\n' in user_message and '/' in user_message and not ' ' in user_message:
#Avoid the shift key when doing r?v by letting you do r/v (assuming v doesn't exist)
#When applicable, let thing/v --> thing?v and /v --> ?v
#Likewise, let /s --> ?s etc
#not ' ' in user_message is just a good heuristic
split=user_message.split('/')
left=''.join(split[:-1])
right=split[-1]
if right in 'p e s v t h c r i j c+ +c cp lj vd'.split():
#/p --> ?p /e --> ?e /t --> ?t /s ---> ?s /v --> ?v /h --> ?h /c --> ?c /r --> ?r /i --> ?i /cp --> ?cp
if not right in scope():
user_message=left+'?'+right
fansi_print("Transformed input to "+repr(user_message)+' because variable '+repr(right)+' doesn\'t exist','magenta','bold')
# if 'PWD' in help_commands:
# print("JAJAJA")
# if 'vim' in scope():
# print("GLOO GLOO")
if user_message in command_shortcuts and user_message not in scope():
original_user_message=user_message
user_message=command_shortcuts[user_message]
if _get_pterm_verbose() or not user_message.isupper(): fansi_print("Transformed input to "+repr(user_message.replace(rp_import,''))+' because variable '+repr(original_user_message)+' doesn\'t exist but is a shortcut in SHORTCUTS','magenta','bold')
if user_message.strip().isalpha() and user_message.strip() and user_message.islower() and not user_message.strip() in scope() and user_message.upper().strip() in help_commands_no_spaces_to_spaces:
original_user_message=user_message
user_message=user_message.upper().strip()
user_message=help_commands_no_spaces_to_spaces[user_message]#Allow 'ptoff' --> 'PT OFF'
if _get_pterm_verbose(): fansi_print("Transformed input to "+repr(user_message)+' because variable '+repr(original_user_message)+' doesn\'t exist but '+user_message+' is a command','magenta','bold')
if user_message == 'RETURN' or user_message =='RET':
try:
if get_ans() is None:
fansi_print("rp.pseudo_terminal(): Exiting session. No value returned.",'blue','bold')
else:
# fansi_print("rp.pseudo_terminal(): Exiting session. Returning ans = " + str(get_ans()),'blue','bold')
fansi_print("rp.pseudo_terminal(): Exiting session. Returning ans",'blue','bold')
return get_ans()
except Exception as e:
print_verbose_stack_trace(e)
fansi_print("rp.pseudo_terminal(): Exiting session. Failed to call get_ans() (this is a strange, rare error). Returning ans = None",'blue','bold')
return None#Sometimes, calling get_ans() fails
elif user_message=='SHORTCUTS':
lines=[]
lines.append(fansi("Showing all pseudo-terminal command shortcuts:\n * NOTE: Shortcuts are not case sensitive!",'green','bold'))
for x,y in command_shortcuts_pairs:
if x.isupper():
lines.append(fansi(x.ljust(4),'cyan','bold')+' --> '+fansi(y.replace(rp_import,''),'blue','bold'))
print(line_join(lines))
_maybe_display_string_in_pager(line_join(lines),False)
elif user_message=='HHELP':
fansi_print("HHELP --> Displaying full documentation for rp:",'blue','bold')
import rp
ans=rp.__file__
ans=get_parent_directory(ans)
ans=path_join(ans,'documentation.py')
ans=text_file_to_string(ans)
ans=ans.replace('\t',' ')
try:
string_pager(ans)
except:
print(ans)
fansi_print("HHELP --> Finished printing documentation.",'blue','bold')
elif user_message == 'HELP':
def columnify_strings(strings_input):
height=55#Total height of output
spacing=' '*4#Spacing between columns
#strings_input is a string separated by newlines and double-newlines
assert isinstance(strings_input,str)
strings_input=strings_input.strip()
for _ in range(100) :
strings_input=strings_input.replace('\n\n\n','\n\n')
strings=strings_input.split('\n\n')
bl=strings
o=[]
s=[]
for l in bl:
l=horizontally_concatenated_strings(l,spacing,rectangularize=True)
l=l.strip()
#l+='\n'
if (line_join(s+[l])).count('\n')<=height:
s+=[l,'']
else:
o+=[line_join(s)]
s=[l,'']
if s:
o+=[line_join(s)]
ans=horizontally_concatenated_strings(o,rectangularize=True)
return ans
strings_input=help_commands_string
strings_input=lrstrip_all_lines(strings_input)
command_list=columnify_strings(strings_input)
display_help_message_on_error=True# Seems appropriate if they're looking for help
fansi_print("HELP --> Here are the instructions (type HHELP for more info):",'blue','underlined')
fansi_print(""" For those of you unfamiliar, this will basically attempt to exec(input()) repeatedly.",'blue')
For more documentation, type 'HHELP'
NOTE: If you're using linux, please use 'sudo apt-get install xclip' to let rp access your system's clipboard
Note that you must import any modules you want to access; this terminal runs inside a def.
If the command you enter returns a value other than None, a variable called 'ans' will be assigned that value.
If the command you enter returns an error, pseudo_terminal will try to fix it, and if it can't it will display a summary of the error.
To set different prompt styles, use set_prompt_style(' >> ') or set_prompt_style(' ⮤ ') etc. This currently only works with PT ON. This setting will be saved between sessions.
To launch the debugger, type debug() on the top of your code. HINT: Typing the microcompletion '\\de' will toggleååååå this for you.
Enter 'HISTORY' without quotes to get a list of all valid python commands you have entered so far, so you can copy and paste them into your code.
NOTE:
Enter 'EPASTE' without quotes to run what is copied to your clipboard, allowing you to run multiple lines at the same time
Enter 'MORE' without quotes to see the full error traceback of the last error, assuming the last attempted command caused an error.
Enter 'RETURN' without quotes to end the session, and return ans as the output value of this function.
Games: Type 'import pychess', 'import snake', 'import py2048', 'import sudoku', 'import mario', 'import tetris', or 'import flappy' (Tetris has to be fixed, its currently a big buggy)
Enter 'CD directory/path/etc' to cd into a directory, adding it to the system path (so you can use local imports etc with RUN)
Enter 'RUN pythonfile.py -arg1 --args --blah' to run a python file with the given args
Enter 'PT OFF' to turn prompt-toolkit off. This saves battery life, and has less features. It's the default when using a non-tty command line
When PT OFF, use '\\' to delete previous line of input and '/' to enter a multiline input. Yes, you can use multi-line even if PT OFF.
Enter 'EDIT0' or 'EDIT1' etc to edit the n'th last entry in an external editor (for multiline input when PT OFF)
Enter 'import shotgun' to attempt to pip-install a bunch of useful optional dependencies
Note: rinsp is automatically imported into every pseudo_terminal instance; use it to debug your code really easily!
"rinsp ans 1" is parsed to "rinsp(ans,1)" for convenience (generalized to literals etc)
"+ 8" is parsed to "ans + 8" and ".shape" is parsed into
play_sound_from_samples([.1,.2,.3,.4,.5,.5,.6,.6,.6,.6,.6,.6,.6,.6]*238964,3000) ⟵ Play that sound or something like it to debug speed in rp
Sometimes, you don't have to type a command in all caps. For example, 'pwd' acts like 'PWD' if there's no variable called 'pwd'. This saves you from having to reach to the shift key. Other examples: 'tictocon'-->'TICTOC ON', 'gcon'-->'GC ON'
Sometimes, you can use "some_variable/v" in place of "some_variable?v" when variable v doesn't exist, to save you from having to reach for the shift key. This also works for "/s"-->"?s", "/p"-->"?e" etc.
The ?. command has some variations. r?.image will print a list of results. But, just r?. alone will enter FZF. r?.3 will enter FZF with a max search depth of 3.
ALL COMMANDS:\n"""*0+indentify(command_list,' '*4*0), "blue")
# Other commands: 'MOD ON', 'MOD OFF', 'SMOD SET', 'MOD SET', 'VARS', 'MORE', 'MMORE', 'RETURN NOW', 'EDIT', 'AHISTORY', GHISTORY', 'COPY', 'PASTE', 'CHISTORY', 'DITTO', 'LEVEL', 'PREV', 'NEXT', 'UNDO', 'PT ON', 'PT OFF', 'RANT', '!', '!!', '?/', '?.', '?', '??', '???', '????', '?????','SHELL', 'IPYTHON', 'UNDO ALL', 'PREV ALL', 'UNDO ON', 'UNDO OFF', 'PREV ON', 'PREV OFF', 'PREV CLEAR', 'UNDO CLEAR', 'GC ON', 'GC OFF', 'SUSPEND', 'TICTOC ON', 'TICTOC OFF', 'TICTOC', 'FANSI ON', 'FANSI OFF', 'RUN', 'CD', 'PROF ON', 'PROF OFF', 'PROF', 'IPYTHON ON', 'IPYTHON OFF', 'PROF DEEP', 'SET STYLE', 'PT SAVE', 'PT RESET', 'RELOAD ON', 'RELOAD OFF', 'PWD', 'CPWD', 'LS', 'FORK'
elif user_message =='SET TITLE':
_set_session_title()
elif user_message =='CLEAR':
import os
os.system('clear')
if running_in_jupyter_notebook():
from IPython.display import clear_output
clear_output()
elif user_message =='PT SAVE':
try:
fansi_print("Saving your Prompt-Toolkit-based GUIs settings, such as the UI and Code color themes, whether to use mouse mode, etc...", 'blue', 'underlined')
_save_pyin_settings_file()
fansi_print("...done!", 'blue', 'underlined')
except Exception as e:
fansi_print("...failed to PT SAVE!", 'red', 'underlined')
print_stack_trace(e)
elif user_message =='PT RESET':
try:
if input_yes_no("Are you sure you want to delete your settings file? This will reset all your settings to the defaults. This might sometimes be necessary if an invalid settings file prevents you from using PT ON. You can't undo this unless you've made a backup of "+repr(_pyin_settings_file_path)):
fansi_print("Deleting your settings file...", 'blue', 'underlined')
_delete_pyin_settings_file()
fansi_print("...done! When you restart rp, your changes should take effect. If you change your mind before you close this session and want to keep your settings, use PT SAVE before exiting.", 'blue', 'underlined')
else:
fansi_print("...very well then. We won't reset your PT (PromptToolkit) settings file.", 'blue', 'underlined')
except Exception as e:
fansi_print("...failed to PT RESET!", 'red', 'underlined')
print_stack_trace(e)
elif user_message == 'SET STYLE':
set_prompt_style()
elif user_message=='ANS PRINT FAST' or user_message=='APFA':
fansi_print("ANS PRINT FAST --> Will still print the value of 'ans', but it won't check if it's the same value as before (which can make it much faster). It will still print the answer, but it won't always be highlighted yellow if 'ans' is unchanged (normally it's green if there's a new value of 'ans', and yellow if 'ans' hasn't changed)", 'blue', 'bold')
# print("TODO: This might be made the default option, in which case ANS PRINT FAST will be removed") #It's not fullproof. [0] twice is green twice, instead of green than yelloq
should_print_ans=''
elif user_message=='ANS PRINT OFF' or user_message=='APOF':
fansi_print("ANS PRINT OFF --> Will no longer automatically print the value of 'ans'. This is often useful when str(ans) is so large that printing 'ans' spams the console too much.", 'blue', 'bold')
should_print_ans=False
elif user_message=='ANS PRINT ON' or user_message=='APON':
fansi_print("ANS PRINT ON --> Will automatically print the value of 'ans'. Will print it in green if it's a new value, and in yellow if it's the same value as it was before.", 'blue', 'bold')
should_print_ans=True
elif user_message == 'PROF DEEP':
global _PROF_DEEP
if not _PROF_DEEP:
if not _profiler:
fansi_print("Turned PROFILER on. This will profile each command you run. To turn if off use PROF OFF.", 'blue', 'underlined')
_profiler=True
_PROF_DEEP=True
fansi_print("Toggled _PROF_DEEP. We just the PROFILER to DEEP mode ON. This means we record all functions, even ones from external libraries. It's more verbose. Use PROF DEEP again to go back to shallow mode.", 'blue', 'underlined')
else:
fansi_print("Toggled _PROF_DEEP. We just the PROFILER to DEEP mode OFF. Use PROF DEEP again to go back to deep mode.", 'blue', 'underlined')
_PROF_DEEP=False
elif user_message in ['PROF FLAME']:
flamechart_html, flamechart_location = _display_pterm_flamechart(local=False)
elif user_message in ['PROF FLAME OPEN']:
flamechart_html, flamechart_location = _display_pterm_flamechart(local=True)
open_file_with_default_application(flamechart_location)
elif user_message in ['PROF FLAME COPY']:
flamechart_html, flamechart_location = _display_pterm_flamechart(local=True)
string_to_clipboard(flamechart_html)
compressed_html = object_to_base64(_truncate_string_floats(flamechart_html))
print(compressed_html)
fansi_print(
"Copied flamechart compressed HTML to clipboard! (or if not, copy the above base64 string). View it with FLAP (PROF FLAME PASTE) on a local rp",
"bold cyan",
)
elif user_message in ['PROF FLAME PASTE']:
compressed_html = string_from_clipboard()
html = base64_to_object(compressed_html)
assert isinstance(html, str), 'Failed to decode pasted flamechart!'
path = temporary_file_path('html')
save_text_file(html, path)
open_file_with_default_application(path)
fansi_print("Copied HTML from clipboard to "+path, 'bold cyan')
elif user_message == 'WARN':
if _warnings_are_off():
fansi_print("WARN --> Toggles warnings --> Turning warnings back on", 'blue', 'bold')
_warnings_on()
else:
fansi_print("WARN --> Toggles warnings --> Turning all warnings off", 'blue', 'bold')
_warnings_off()
elif user_message == 'PROF ON':
# fansi_print("Turned PROFILER on. This will profile each command you run. To get more detailed profiles, use 'PROF DEEP'. Note: Commands that take under a millisecond to run will not be profiled, to maintain both accuracy and your sanity.", 'blue', 'underlined')
fansi_print("Turned PROFILER on. This will profile each command you run. Note: Commands that take under a millisecond to run will not be profiled, to maintain both accuracy and your sanity.", 'blue', 'underlined')
_profiler=True
elif user_message == 'PROF OFF':
fansi_print("Turned PROFILER off.", 'blue', 'underlined')
_profiler=False
elif user_message == 'PROF':
_profiler=not _profiler
if _profiler:
# fansi_print("Turned PROFILER on. This will profile each command you run. To get more detailed profiles, use 'PROF DEEP'", 'blue', 'underlined')
fansi_print("Turned PROFILER on. This will profile each command you run.", 'blue', 'underlined')
else:
fansi_print("Turned PROFILER off.",'blue','underlined')
elif user_message=='MONITOR':
fansi_print("MONITOR -> Entering a system monitoring tool to show you cpu usage/memory etc of the current computer...", 'blue', 'bold',new_line=False)
pip_import('glances').main()
fansi_print('...done!','blue','bold')
elif user_message=='GPU':
try:
pip_import('gpustat').main()
except BaseException as e:
print_stack_trace(e)
pass
elif user_message == 'TICTOC ON':
fansi_print("Turned TICTOC on. This will display the running time of each command.", 'blue', 'underlined')
_tictoc=True
elif user_message == 'TICTOC OFF':
fansi_print("Turned TICTOC off.",'blue','underlined')
_tictoc=False
elif user_message == 'TICTOC':
_tictoc=not _tictoc
if _tictoc:
fansi_print("Turned TICTOC on. This will display the running time of each command.",'blue','underlined')
else:
fansi_print("Turned TICTOC off.",'blue','underlined')
elif user_message == 'RELOAD ON':
_reload_modules()
fansi_print("Turned RELOAD ON. This will re-import any modules that changed at the beginning of each of your commands.",'blue','underlined')
_reload=True
elif user_message == 'RELOAD OFF':
fansi_print("Turned RELOAD OFF",'blue','underlined')
_reload=False
elif user_message=='FORK':
#TODO: Make this work with PT ON
#TODO: Right now this is just a proof of concept, of how to set checkpoints. Might rename this CHECKPOINT, but that's a long name...fork is nicer...\
#Used in-case you wanna try something risky that even UNDO can't fix...like mutating tons of variables etc...
#But unlike UNDO, it won't use tons and tons of memory (in theory) because of copy-on-write
#TODO: Handle Ctrl+C events from being propogaetd to each process at once
#TODO: Properly handle stdout so we can support PT ON
import os, sys
fansi_print("FORK -> Attempting to fork...",'blue','underlined')
child_pid = os.fork()
if child_pid == 0:
if currently_running_mac():
#This only seems to be a problem on MacOS, PT ON in FORK runs fine in Ubuntu..
fansi_print("Note: PT ON is not currently supported while forking yet on MacOS." ,'blue','underlined')#PT ON gives OSError: [Errno 9] Bad file descriptor
enable_ptpython=False
else:
fansi_print("...spawning child process. Also, please don't use control+c yet, that's not supported either, and if you send a keyboard interrupt during FORK this program will act very glitchy. To exit, use RETURN (or RET, for short).",'blue','underlined')#PT ON gives OSError: [Errno 9] Bad file descriptor
# child process
# os.system('ping -c 20 www.google.com >/tmp/ping.out')
# sys.exit(0)
else:
pid, status = os.waitpid(child_pid, 0)
fansi_print("FORK: resuming parent process...",'blue','underlined')
elif user_message=='RANGER' or user_message=='RNG':
fansi_print('RANGER --> Launching ranger, a curses-based file manager with vim bindings...','blue',new_line=True)
_launch_ranger()
fansi_print('...done!','blue',new_line=True)
elif user_message=='TOP':
fansi_print("TOP --> running 'bpytop'",'blue','bold')
if sys.version_info>(3,6):
pip_import('bpytop')
import subprocess
subprocess.run([sys.executable, "-m",'bpytop'])
else:
fansi_print("Sorry, bpytop is not supported in python versions < 3.6",'red','bold')
elif user_message=='TREE ALL DIR':
display_file_tree(all=True,only_directories=True)
elif user_message=='TREE DIR':
display_file_tree(all=False,only_directories=True)
elif user_message=='TREE ALL':
display_file_tree(all=True)
elif user_message=='TREE':
display_file_tree(all=False)
elif user_message=='DISKH':
_display_filetype_size_histogram()
elif user_message=='DISK':
print(fansi("Showing disk usage tree for current directory: ",'blue','bold')+fansi(get_current_directory(),'yellow'))
pip_import('duviz').main()
elif user_message in {'HISTORY','HIST'}:print_history()
elif user_message in {'IHISTORY','IHIST'}:
#Because of the automatic _maybe_display_string_in_pager feature of HIST, this is no longer a nessecary command
#It's harmess though, so I'll leave it in anyway (maybe you don't want to spam the console for whatever reason)
fansi_print('IHISTORY --> Interactive History --> Displaying HISTORY interactively','blue','bold')
string_pager(print_history(True))
elif user_message in {'ALLHISTORY','ALLHIST'}:fansi_print("ALLHISTORY --> Displaying all history, including failures:",'blue','bold');display_list(all_command_history)
elif user_message == 'SUSPEND' or user_message=='SUS':
try:
psutil=pip_import('psutil')
fansi_print("Suspending this python session...",'blue','underlined')
import psutil,os
psutil.Process(os.getpid()).suspend()
fansi_print("...restored!",'blue','underlined')
except ImportError:
fansi_print("ERROR: psutil not installed. Try pip install psutil.",'red')
elif user_message in {'DHISTORY','DHIST'}:
fansi_print("DHISTORY --> DEF HISTORY --> Here is a list of all your most recent function definitions in your HISTORY:",'blue','underlined')
dhistory=_dhistory_helper('\n'.join(successful_command_history))
set_ans('\n'.join(dhistory))
#set_ans('\n'+'\n'.join(dhistory))
# bold=False
# for defcode in :
# fansi_print('\n'+defcode,'yellow','bold' if bold else None)
elif user_message in {'GHISTORY','GHIST'}:
fansi_print("GHISTORY --> GREEN HISTORY --> Here is a list of all valid single-lined python commands you have entered so far:",'blue','underlined')
for x in successful_command_history:
fansi_print(x if '\n' not in x else '','green') # x if '\\n' not in x else '' ≣ '\\n' not in x and x or ''
elif user_message in {'CHISTORY','CHIST'}:
fansi_print("CHISTORY --> COPY HISTORY --> Copied history to clipboard!",'blue','underlined')
string_to_clipboard('\n'.join(successful_command_history))
elif user_message == "MORE":
if _get_pterm_verbose(): fansi_print("The last command that caused an error is shown below in magenta:",'red','bold')
fansi_print(error_message_that_caused_exception,'magenta')
if error is None:# full_exception_with_traceback is None --> Last command did not cause an error
fansi_print( "(The last command did not cause an error)",'red')
else:
print_stack_trace(error,True,'')
elif user_message == "HMORE":
#HMORE is like MORE but with syntax highlighting. It's a tiny difference.
if _get_pterm_verbose(): fansi_print("The last command that caused an error is shown below in magenta:",'red','bold')
# fansi_print(error_message_that_caused_exception,'magenta')
if error is None:# full_exception_with_traceback is None --> Last command did not cause an error
fansi_print( "(The last command did not cause an error)",'red')
else:
try:
#By default, try to print a syntax-highlighted stack trace. Fall back to a regular one.
print_highlighted_stack_trace(error)
except:
print_stack_trace(error,True,'')
elif user_message == "MMORE":
if _get_pterm_verbose(): fansi_print("The last command that caused an error is shown below in magenta:",'red','bold')
fansi_print(error_message_that_caused_exception,'magenta')
fansi_print("A detailed stack trace is shown below:",'red','bold')
if error is None:# full_exception_with_traceback is None --> Last command did not cause an error
fansi_print( "(The last command did not cause an error)",'red')
else:
print_verbose_stack_trace(error)
elif user_message == "RMORE":
if error is None:# full_exception_with_traceback is None --> Last command did not cause an error
fansi_print( "(The last command did not cause an error)",'red')
else:
print_rich_stack_trace(error)
elif user_message == "AMORE":
if _get_pterm_verbose(): fansi_print("AMORE --> 'ans MORE' --> Setting 'ans' to the error",'red','bold')
set_ans(error)
# if error is None:# full_exception_with_traceback is None --> Last command did not cause an error
# fansi_print( "(The last command did not cause an error)",'red')
# else:
# print_verbose_stack_trace(error)
elif user_message == 'DMORE':
if _get_pterm_verbose(): fansi_print("DMORE --> Entering a post-mortem debugger","blue")
tb=error.__traceback__
if currently_in_a_tty() and not currently_running_windows():
try:
pip_import('pudb').post_mortem(tb)
except Exception:
import pdb
#In jupyter, this will somehow magically become ipdb. Idk how that works but it does.
pdb.post_mortem(tb)
else:
import pdb
pdb.post_mortem(tb)
# fansi_print("DMORE has not yet been implemented. It will be a post mortem debugger for your error using rp_ptpdb",'red','bold')
elif user_message.startswith('MOD SET'):
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment