Skip to content

Instantly share code, notes, and snippets.

@dustinvtran
Created December 14, 2020 08:02
Show Gist options
  • Save dustinvtran/84fe690f0d3a025979a4c30d493072b7 to your computer and use it in GitHub Desktop.
Save dustinvtran/84fe690f0d3a025979a4c30d493072b7 to your computer and use it in GitHub Desktop.
Python version of source code for https://mcspeedrun.com/dream.pdf. This implementation uses default precision (float64), so the decimal values are slightly off from the original Java implementation which uses BigDecimal.
import numpy as np
import scipy.stats
from typing import List
def shifty_investigator(num_trials: int,
num_successes: int,
p_success: float):
p_fail = 1. - p_success
target_p = 1. - scipy.stats.binom.cdf(n=num_trials,
k=num_successes - 1,
p=p_success)
significant_cutoffs = [None] * (num_trials + 1)
significant_cutoffs[0] = 0
for observed_trials in range(1, num_trials + 1):
suc_trials = significant_cutoffs[observed_trials - 1]
found_cutoff = False
while not found_cutoff and suc_trials <= observed_trials:
current_p = 1. - scipy.stats.binom.cdf(n=observed_trials,
k=suc_trials - 1.,
p=p_success)
found_cutoff = current_p - target_p <= 0
if found_cutoff:
significant_cutoffs[observed_trials] = suc_trials
suc_trials += 1
if not found_cutoff:
significant_cutoffs[observed_trials] = observed_trials
last_row = [1., 0.]
for n in range(1, num_trials):
last_row = gen_nth_row_of_pascal_with_cutoffs(n,
last_row,
significant_cutoffs[n],
p_success,
p_fail)
total = sum(last_row)
return 1. - total
def gen_nth_row_of_pascal_with_cutoffs(n: int,
last_row: List[float],
significant_cutoff: int,
p_success: float,
p_fail: float):
next_row = [None] * (n + 2)
next_row[0] = last_row[0] * p_fail
for i in range(1, n + 2):
if i <= significant_cutoff:
next_row[i] = last_row[i - 1] * p_success + last_row[i] * p_fail
else:
next_row[i] = 0.
return next_row
print("Ender pearls")
pearl_stopping_rule = shifty_investigator(262, 42, 0.0473)
print(f"Stopping Rule: {pearl_stopping_rule}")
pearl_stream_bias = 1. - np.exp(np.log1p(-pearl_stopping_rule) * 66.)
pearl_runner_bias = 1. - np.exp(np.log1p(-pearl_stream_bias) * 1000.)
print(f"Stopping rule + stream selection bias + runner selection bias: {pearl_runner_bias}")
print("Blaze rods")
rod_stopping_rule = shifty_investigator(305, 211, 0.5)
print(f"Stopping Rule: {rod_stopping_rule}")
print(f"Final probability")
fisher_statistic = -2. * (np.log(pearl_runner_bias) + np.log(rod_stopping_rule))
p_value = 1. - scipy.stats.chi2(df=4).cdf(fisher_statistic)
p_hacking = p_value * 90
print(p_hacking)
## Ender pearls
## Stopping Rule: 1.2050027642374062e-11
## Stopping rule + stream selection bias + runner selection bias: 7.953015084272153e-07
## Blaze rods
## Stopping Rule: 4.656419694271108e-11
## Final probability
## 1.2989609388114332e-13
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment