Created
November 5, 2024 18:58
-
-
Save FrenzyExists/7191b0704643224a2f884fe725dc691e to your computer and use it in GitHub Desktop.
A script I made based on an exam. It takes a csv table that represents the a state table, parses it, creates an implication table, reduces the states, gets the pairs and shows the new table.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import pandas as pd | |
from collections import defaultdict | |
import argparse | |
""" | |
PS, 0, 1, z1, z2 | |
A, F, D, 0, 0 | |
B, J, C, 0, 0 | |
C, A, F, 1, 1 | |
D, I, B, 1, 1 | |
E, I, G, 0, 0 | |
F, G, A, 1, 0 | |
G, J, B, 1, 1 | |
H, E, I, 1, 1 | |
I, D, E, 1, 0 | |
J, G, A, 1, 0 | |
""" | |
def process_table(file_path): | |
# Load the CSV file into a pandas DataFrame | |
try: | |
df1 = pd.read_csv(file_path) | |
print("CSV loaded successfully!") | |
print(df1) | |
# Clean up column names | |
df1.columns = df1.columns.str.strip() | |
letters = df1["PS"].to_list() | |
start_index_row = df1.index[1] | |
end_index_row = df1.index[-1] | |
start_index_col = df1.index[0] | |
end_index_col = df1.index[-2] | |
ps_subset_row = letters[start_index_row : end_index_row + 1] | |
ps_subset_col = letters[start_index_col : end_index_col + 1] | |
triangle_df = pd.DataFrame("", index=ps_subset_row, columns=ps_subset_col) | |
for i in range(len(ps_subset_row)): | |
for j in range(i, len(ps_subset_col)): | |
triangle_df.iloc[j, i] = "" | |
for i in range(len(df1)): | |
for j in range(i + 1, len(df1)): | |
ps_i = df1.iloc[i]["PS"] | |
ps_j = df1.iloc[j]["PS"] | |
z1_i = df1.iloc[i]["z1"] | |
z1_j = df1.iloc[j]["z1"] | |
z2_i = df1.iloc[i]["z2"] | |
z2_j = df1.iloc[j]["z2"] | |
n0_i = df1.iloc[i]["0"] | |
n0_j = df1.iloc[j]["0"] | |
n1_i = df1.iloc[i]["1"] | |
n1_j = df1.iloc[j]["1"] | |
if z1_i == z1_j and z2_i == z2_j: | |
if n0_j == n0_i and n1_j == n1_i: | |
triangle_df.loc[ps_j, ps_i] = "@" | |
else: | |
triangle_df.loc[ps_j, ps_i] = "{}{}-{}{}".format( | |
n0_j, n0_i, n1_j, n1_i | |
) | |
else: | |
triangle_df.loc[ps_j, ps_i] = "X" | |
pairs = [] | |
for i in range(len(ps_subset_row)): | |
for j in range(i, len(ps_subset_col)): | |
value = triangle_df.iloc[j, i] | |
row_label = triangle_df.index[j] | |
col_label = triangle_df.columns[i] | |
if triangle_df.iloc[j, i] != "X": | |
pairs.append("{}{}".format(row_label, col_label)) | |
graph = defaultdict(set) | |
for pair in pairs: | |
a, b = pair | |
graph[a].add(b) | |
graph[b].add(a) | |
def dfs(node, visited, component): | |
visited.add(node) | |
component.add(node) | |
for neighbor in graph[node]: | |
if neighbor not in visited: | |
dfs(neighbor, visited, component) | |
visited = set() | |
components = [] | |
for node in graph: | |
if node not in visited: | |
component = set() | |
dfs(node, visited, component) | |
components.append(component) | |
components = [sorted(component) for component in components] | |
formatted_components = [ | |
"(" + "".join(component) + ")" for component in components | |
] | |
result = "".join(formatted_components) | |
print("\n{}\n\n{}\n\n\n{}\n".format(triangle_df, pairs, result)) | |
print(components) | |
v = 0 | |
for i in letters: | |
if i == components[v][0]: | |
print("{} | {}".format(i, ",".join(components[v]))) | |
v = v + 1 if v < len(components) - 1 else v | |
else: | |
print("{} | -".format(i)) | |
except Exception as e: | |
print(f"Error loading CSV file: {e}") | |
def main(): | |
# Set up argument parser | |
parser = argparse.ArgumentParser(description="Parse the path to a CSV file.") | |
parser.add_argument("csv_file", type=str, help="Path to the CSV file") | |
# Parse arguments | |
args = parser.parse_args() | |
# Load the CSV file using the provided path | |
process_table(args.csv_file) | |
if __name__ == "__main__": | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment