Skip to content

Instantly share code, notes, and snippets.

@rahuljantwal-8451
Last active October 3, 2024 21:17
Show Gist options
  • Save rahuljantwal-8451/f8bd9526cb196914978cece249484e03 to your computer and use it in GitHub Desktop.
Save rahuljantwal-8451/f8bd9526cb196914978cece249484e03 to your computer and use it in GitHub Desktop.
Generate High Cardinality Data
import pandas as pd
import numpy as np
import pyarrow.parquet as pq
from tqdm import tqdm
import os
import argparse
def parse_args():
parser = argparse.ArgumentParser(description='Generate a large categorical dataset')
parser.add_argument('--col1_cardinality', type=int, default=50_000_000, help='Cardinality of column 1')
parser.add_argument('--col2_cardinality', type=int, default=10, help='Cardinality of column 2')
parser.add_argument('--col3_cardinality', type=int, default=4, help='Cardinality of column 3')
parser.add_argument('--total_rows', type=int, default=100_000_000, help='Total number of rows to generate')
parser.add_argument('--output_dir', type=str, default='./data/simulated', help='Output directory for dataset')
return parser.parse_args()
def generate_dataset_chunk(chunk_size, col1_start, col1_cardinality, col2_values, col3_values):
col1 = np.arange(col1_start, col1_start + chunk_size) % col1_cardinality
col2 = np.random.choice(col2_values, size=chunk_size)
col3 = np.random.choice(col3_values, size=chunk_size)
df = pd.DataFrame({
'col1': col1,
'col2': col2,
'col3': col3
})
return df
def write_parquet_file(df, output_dir, file_index):
file_name = f'chunk_{file_index:05d}.parquet'
file_path = os.path.join(output_dir, file_name)
df.to_parquet(file_path, engine='pyarrow', index=False)
return file_path
def generate_large_categorical_dataset(output_dir, total_rows, col1_cardinality, col2_cardinality, col3_cardinality, chunk_size=1000000):
os.makedirs(output_dir, exist_ok=True)
col2_values = list(range(1, col2_cardinality + 1))
col3_values = list(range(1, col3_cardinality + 1))
num_chunks = (total_rows + chunk_size - 1) // chunk_size
file_paths = []
for i in tqdm(range(num_chunks), desc="Generating chunks"):
chunk_start = i * chunk_size
chunk_end = min((i + 1) * chunk_size, total_rows)
actual_chunk_size = chunk_end - chunk_start
df_chunk = generate_dataset_chunk(
actual_chunk_size,
chunk_start,
col1_cardinality,
col2_values,
col3_values
)
file_path = write_parquet_file(df_chunk, output_dir, i)
file_paths.append(file_path)
# Ensure all values in col2 and col3 are present
df_final = pd.DataFrame({
'col1': np.arange(col1_cardinality, col1_cardinality + len(col2_values) * len(col3_values)),
'col2': np.repeat(col2_values, len(col3_values)),
'col3': np.tile(col3_values, len(col2_values))
})
file_path = write_parquet_file(df_final, output_dir, num_chunks)
file_paths.append(file_path)
return file_paths
def read_dataset(file_paths):
return pq.ParquetDataset(file_paths).read()
if __name__ == "__main__":
args = parse_args()
output_dir = os.path.join(args.output_dir, 'source_dataset')
file_paths = generate_large_categorical_dataset(
output_dir,
args.total_rows,
args.col1_cardinality,
args.col2_cardinality,
args.col3_cardinality
)
# Verify the dataset
dataset = read_dataset(file_paths)
df_verify = dataset.to_pandas()
print(f"Total rows: {len(df_verify)}")
print(f"Col1 unique values: {df_verify['col1'].nunique()}")
print(f"Col2 unique values: {df_verify['col2'].nunique()}")
print(f"Col3 unique values: {df_verify['col3'].nunique()}")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment