|
#!/bin/python |
|
|
|
""" |
|
This script is useful to pull all the S3 bucket names and it's relevant tags from AWS S3 to a CSV file programmatically. |
|
|
|
Some highlights on this script: |
|
* One has to provide all the tags which has to be read and converted to a CSV Header |
|
* These tags are mentioned in TagSets class, wherein, the variables like: "OWNER", "TEAM" and all is available in S3 tags |
|
* Create a CSV order in which format/sequence do you want the headers using "ORDER" array in TagSets |
|
* Execute this script as `python get_s3_tags.py` |
|
* The default CSV file created in the same directory is named as: `s3_details.csv` |
|
""" |
|
|
|
|
|
import boto3 |
|
import csv |
|
from botocore.exceptions import ClientError |
|
|
|
class TagSets: |
|
""" |
|
This class needs to be modified to capture the proper tags one is willing to download. |
|
""" |
|
OWNER = "OWNER" |
|
TEAM = "TEAM" |
|
PRODUCT = "PRODUCT" |
|
ENVIRONMENT = "ENVIRONMENT" |
|
BUCKET = "BUCKET" |
|
ORDER = [BUCKET, PRODUCT, OWNER, TEAM, ENVIRONMENT] |
|
|
|
@classmethod |
|
def convert_to_order(cls, tag_set_map): |
|
value = [] |
|
for item in cls.ORDER: |
|
value.append(tag_set_map.get(item, "")) |
|
|
|
return value |
|
|
|
s3 = boto3.client('s3') |
|
response = s3.list_buckets()['Buckets'] |
|
file_name = "s3_details.csv" |
|
with open(file_name, 'w') as file: |
|
writer = csv.writer(file) |
|
headers = TagSets.ORDER |
|
writer.writerow(headers) |
|
|
|
for bucket in response: |
|
bucket_name = bucket['Name'] |
|
tags = [] |
|
try: |
|
response = s3.get_bucket_tagging(Bucket=bucket_name) |
|
if 'TagSet' in response: |
|
tag_set = response['TagSet'] |
|
tag_set_map = dict([(item['Key'], item['Value']) for item in tag_set]) |
|
tag_set_map[TagSets.BUCKET] = bucket_name |
|
values = [] |
|
values.extend(TagSets.convert_to_order(tag_set_map)) |
|
writer.writerow(values) |
|
except ClientError: |
|
writer.writerow([bucket_name]) |
|
pass |
|
|
|
print("Completed writing S3 tags to a csv file: " + file_name) |