Last active
October 27, 2023 07:21
-
-
Save vijayanandrp/3f9f6e7469d6378ebe4c5b8f4272375d to your computer and use it in GitHub Desktop.
Big Query to Google Cloud storage
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python | |
# Copyright 2016 Google Inc. All Rights Reserved. | |
import os | |
import sys | |
import time | |
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'BigQuery.json' | |
from google.cloud import bigquery | |
from google.cloud.bigquery.job import DestinationFormat, ExtractJobConfig, Compression | |
from gcs_util import create_bucket, list_bucket, list_blobs | |
def list_projects(): | |
bigquery_client = bigquery.Client() | |
for project in bigquery_client.list_projects(): | |
return project.project_id | |
def list_datasets(project=None): | |
"""Lists all datasets in a given project. | |
If no project is specified, then the currently active project is used. | |
""" | |
bigquery_client = bigquery.Client(project=project) | |
all_datasets = list() | |
for dataset in bigquery_client.list_datasets(): | |
all_datasets.append(dataset.dataset_id) | |
return all_datasets | |
def list_tables(dataset_id, project=None): | |
"""Lists all of the tables in a given dataset. | |
If no project is specified, then the currently active project is used. | |
""" | |
bigquery_client = bigquery.Client(project=project) | |
dataset_ref = bigquery_client.dataset(dataset_id) | |
all_tables = list() | |
for table in bigquery_client.list_tables(dataset_ref): | |
all_tables.append(table.table_id) | |
return all_tables | |
def list_rows(dataset_id, table_id, project=None): | |
"""Prints rows in the given table. | |
Will print 25 rows at most for brevity as tables can contain large amounts | |
of rows. | |
If no project is specified, then the currently active project is used. | |
""" | |
bigquery_client = bigquery.Client(project=project) | |
dataset_ref = bigquery_client.dataset(dataset_id) | |
table_ref = dataset_ref.table(table_id) | |
# Get the table from the API so that the schema is available. | |
table = bigquery_client.get_table(table_ref) | |
# Load at most 25 results. | |
rows = bigquery_client.list_rows(table, max_results=25) | |
# Use format to create a simple table. | |
format_string = '{!s:<16} ' * len(table.schema) | |
# Print schema field names | |
field_names = [field.name for field in table.schema] | |
print(format_string.format(*field_names)) | |
for row in rows: | |
print(format_string.format(*row)) | |
def copy_table(dataset_id, table_id, new_table_id, project=None): | |
"""Copies a table. | |
If no project is specified, then the currently active project is used. | |
""" | |
bigquery_client = bigquery.Client(project=project) | |
dataset_ref = bigquery_client.dataset(dataset_id) | |
table_ref = dataset_ref.table(table_id) | |
# This sample shows the destination table in the same dataset and project, | |
# however, it's possible to copy across datasets and projects. You can | |
# also copy multiple source tables into a single destination table by | |
# providing addtional arguments to `copy_table`. | |
destination_table_ref = dataset_ref.table(new_table_id) | |
# Create a job to copy the table to the destination table. | |
# Start by creating a job configuration | |
job_config = bigquery.CopyJobConfig() | |
# Configure the job to create the table if it doesn't exist. | |
job_config.create_disposition = ( | |
bigquery.job.CreateDisposition.CREATE_IF_NEEDED) | |
copy_job = bigquery_client.copy_table( | |
table_ref, destination_table_ref, job_config=job_config) | |
print('Waiting for job to finish...') | |
copy_job.result() | |
print('Table {} copied to {}.'.format(table_id, new_table_id)) | |
def export_table_to_gcs(dataset_id, table_id, destination, file_format='.json', compression=True): | |
""" | |
Exports data from BigQuery to an object in Google Cloud Storage. | |
For more information, see the README.rst. | |
Example invocation: | |
$ python export_data_to_gcs.py example_dataset example_table \\ | |
gs://example-bucket/example-data.csv | |
The dataset and table should already exist. | |
""" | |
bigquery_client = bigquery.Client() | |
dataset_ref = bigquery_client.dataset(dataset_id) | |
table_ref = dataset_ref.table(table_id) | |
job_config = ExtractJobConfig() | |
if file_format == '.json': | |
job_config.destination_format = DestinationFormat.NEWLINE_DELIMITED_JSON | |
if compression: | |
job_config.compression = Compression.GZIP | |
elif file_format == '.avro': | |
job_config.destination_format = DestinationFormat.AVRO | |
else: | |
job_config.destination_format = DestinationFormat.CSV | |
if compression: | |
job_config.compression = Compression.GZIP | |
job = bigquery_client.extract_table(table_ref, destination, job_config=job_config) | |
job.result(timeout=300) # Waits for job to complete | |
print('Exported {}:{} to {}'.format(dataset_id, table_id, destination)) | |
if __name__ == '__main__': | |
project_name = list_projects() | |
print('The project name is {}'.format(project_name)) | |
all_datasets = list_datasets(project=project_name) | |
if not all_datasets: | |
print('No datasets in this project.') | |
sys.exit(-1) | |
print('There are {} datasets so far.'.format(len(all_datasets))) | |
print('Following are the datasets in software advice,') | |
buckets_list = list_bucket() | |
if 'Adwords' in all_datasets: | |
all_datasets.remove('Adwords') | |
for index, dataset in enumerate(all_datasets): | |
print(index+1, dataset) | |
new_bucket_name = '{}-{}'.format(project_name, dataset) | |
if new_bucket_name not in buckets_list: | |
print('Also, Creating bucket-name as {}'.format(new_bucket_name)) | |
create_bucket(bucket_name=new_bucket_name) | |
buckets_list = list_bucket() | |
buckets_list.sort() | |
print('New bucket list - {}'.format(', '.join(buckets_list))) | |
print('Ok. Lets see what is inside each datasets :)') | |
file_format = '.json' | |
errors_dict = dict() | |
for index, dataset in enumerate(all_datasets): | |
gcs_bucket_name = '{}-{}'.format(project_name, dataset) | |
all_tables = list_tables(dataset_id=dataset, project=project_name) | |
print(index+1, dataset, '-', '({})'.format(len(all_tables)), ','.join(all_tables)) | |
blobs_list = list_blobs(bucket_name=gcs_bucket_name) | |
for table in all_tables: | |
if table+file_format not in blobs_list: | |
print('Moving {} data item {} to gs://{}/{}{}'.format(dataset, table, gcs_bucket_name, | |
table, file_format)) | |
destination_file = 'gs://{}/{}{}'.format(gcs_bucket_name, table, file_format) | |
try: | |
export_table_to_gcs(dataset_id=dataset, table_id=table, destination=destination_file, | |
file_format=file_format) | |
time.sleep(1) | |
except Exception as error: | |
print('Error - ', str(error)) | |
if dataset not in errors_dict.keys(): | |
errors_dict[dataset] = [{table: str(error)}] | |
else: | |
errors_dict[dataset].append({table: str(error)}) | |
else: | |
print('Skipping {} data item {} to gs://{}/{}{}'.format(dataset, table, gcs_bucket_name, | |
table, file_format)) | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment