Skip to content

Instantly share code, notes, and snippets.

@Azeirah
Last active April 13, 2025 23:46
Show Gist options
  • Save Azeirah/48d75fdc2f01d68513e639479ff3de26 to your computer and use it in GitHub Desktop.
Save Azeirah/48d75fdc2f01d68513e639479ff3de26 to your computer and use it in GitHub Desktop.
Plugin culprit finding simulation, binary search vs linear search
import random
import numpy as np
import matplotlib.pyplot as plt
def simulate_linear_search(num_plugins, t_toggle, t_check, prob_index):
"""
Simulate linear search for the problematic plugin.
In linear search, we disable one plugin at a time, check if the problem is fixed,
and re-enable it before trying the next one.
"""
total_time = 0
for i in range(num_plugins):
# Disable the current plugin
total_time += t_toggle
# Check if the problem is fixed
total_time += t_check
if i == prob_index:
# Found the problematic plugin, we're done
return total_time
# Re-enable the plugin before moving to the next one
total_time += t_toggle
return total_time
def simulate_binary_search(num_plugins, t_toggle, t_check, prob_index):
"""
Simulate binary search for the problematic plugin.
1. Disable all plugins initially
2. Check to confirm the problem is gone
3. Iteratively narrow down the search space by enabling half the plugins at a time
"""
total_time = 0
# Disable all plugins initially
total_time += num_plugins * t_toggle
# Define the search space
left, right = 0, num_plugins - 1
while left < right:
mid = (left + right) // 2
# Enable plugins from left to mid
num_to_enable = mid - left + 1
total_time += num_to_enable * t_toggle
# Check if the problem occurs
total_time += t_check
if left <= prob_index <= mid:
# Problem is in the first half
# Disable the plugins we just enabled
total_time += num_to_enable * t_toggle
right = mid
else:
# Problem is in the second half
# Disable the plugins we just enabled
total_time += num_to_enable * t_toggle
left = mid + 1
# At this point, left == right, which is the index of the problematic plugin
return total_time
def run_monte_carlo_simulation(num_trials, num_plugins, t_toggle, t_check):
"""Run multiple trials and compute average search times."""
linear_times = []
binary_times = []
for _ in range(num_trials):
# Randomly select the problematic plugin index
prob_index = random.randint(0, num_plugins - 1)
# Run simulations
linear_time = simulate_linear_search(num_plugins, t_toggle, t_check, prob_index)
binary_time = simulate_binary_search(num_plugins, t_toggle, t_check, prob_index)
linear_times.append(linear_time)
binary_times.append(binary_time)
# Calculate statistics
linear_avg = np.mean(linear_times)
binary_avg = np.mean(binary_times)
return {
"linear_search": {
"avg_time_ms": linear_avg,
"min_time_ms": min(linear_times),
"max_time_ms": max(linear_times)
},
"binary_search": {
"avg_time_ms": binary_avg,
"min_time_ms": min(binary_times),
"max_time_ms": max(binary_times)
},
"comparison": {
"faster_method": "linear" if linear_avg < binary_avg else "binary",
"speedup_factor": max(linear_avg, binary_avg) / min(linear_avg, binary_avg)
}
}
# Define parameter sets to test
parameter_sets = [
{"name": "25a", "num_plugins": 25, "t_toggle": 0, "t_check": 1200},
{"name": "25b", "num_plugins": 25, "t_toggle": 100, "t_check": 1200},
{"name": "25c", "num_plugins": 25, "t_toggle": 200, "t_check": 1200},
{"name": "25d", "num_plugins": 25, "t_toggle": 500, "t_check": 1200},
{"name": "50a", "num_plugins": 50, "t_toggle": 0, "t_check": 1200},
{"name": "50b", "num_plugins": 50, "t_toggle": 100, "t_check": 1200},
{"name": "50c", "num_plugins": 50, "t_toggle": 200, "t_check": 1200},
{"name": "50d", "num_plugins": 50, "t_toggle": 500, "t_check": 1200},
{"name": "150a", "num_plugins": 150, "t_toggle": 0, "t_check": 1200},
{"name": "150b", "num_plugins": 150, "t_toggle": 100, "t_check": 1200},
{"name": "150c", "num_plugins": 150, "t_toggle": 200, "t_check": 1200},
{"name": "150d", "num_plugins": 150, "t_toggle": 500, "t_check": 1200},
{"name": "500a", "num_plugins": 500, "t_toggle": 0, "t_check": 1200},
{"name": "500b", "num_plugins": 500, "t_toggle": 100, "t_check": 1200},
{"name": "500c", "num_plugins": 500, "t_toggle": 200, "t_check": 1200},
{"name": "500d", "num_plugins": 500, "t_toggle": 500, "t_check": 1200},
]
# Run simulations and display results
results_all = []
for params in parameter_sets:
name = params.pop("name")
print(f"\nScenario: {name}")
print(f" Plugins: {params['num_plugins']}")
print(f" Disable Time: {params['t_toggle']}ms")
print(f" Check Time: {params['t_check']}ms")
results = run_monte_carlo_simulation(num_trials=1000, **params)
results_all.append((name, results))
print(f" Linear Search Avg Time: {results['linear_search']['avg_time_ms']:.2f}ms")
print(f" Binary Search Avg Time: {results['binary_search']['avg_time_ms']:.2f}ms")
print(f" Faster Method: {results['comparison']['faster_method']}")
print(f" Speedup Factor: {results['comparison']['speedup_factor']:.2f}x")
# Create a bar chart comparing the methods
plt.figure(figsize=(12, 8))
scenario_names = [r[0] for r in results_all]
linear_times = [r[1]['linear_search']['avg_time_ms'] for r in results_all]
binary_times = [r[1]['binary_search']['avg_time_ms'] for r in results_all]
x = np.arange(len(scenario_names))
width = 0.35
plt.bar(x - width/2, (np.array(linear_times)/1000), width, label='Linear Search')
plt.bar(x + width/2, (np.array(binary_times)/1000), width, label='Binary Search')
plt.xlabel('Scenario')
plt.ylabel('Average Time (s)')
plt.title('Linear vs Binary Search Performance')
plt.xticks(x, scenario_names)
plt.legend()
for i, v in enumerate(linear_times):
plt.text(i - width/2, v + 100, f"{v:.0f}", ha='center')
for i, v in enumerate(binary_times):
plt.text(i + width/2, v + 100, f"{v:.0f}", ha='center')
plt.tight_layout()
plt.show()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment