Last active
December 13, 2015 18:09
-
-
Save zmsmith/4953517 to your computer and use it in GitHub Desktop.
A inefficient but effective way to scan logs on s3
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from __future__ import print_function | |
import StringIO | |
import gzip | |
from boto.s3.connection import S3Connection | |
conn = S3Connection("<key_id>", "<secret>") | |
bucket = conn.get_bucket('<bucket>') | |
def get_matching_lines(prefix, match): | |
""" | |
prefix - the prefix of files to match, useful for limiting date ranges | |
match - the string present in relevant log entries | |
""" | |
with open('logs.out', 'w') as f: | |
count = 0 | |
keys = bucket.get_all_keys(prefix=prefix) | |
print("Searching {} log files".format(len(keys))) | |
for key in keys: | |
fileobj = StringIO.StringIO(key.get_contents_as_string()) | |
gzip_file_handle = gzip.GzipFile(fileobj=fileobj) | |
log = gzip_file_handle.read() | |
logs = log.split('\n') | |
logs = [l for l in logs if match in l] | |
for l in logs: | |
print(l, file=f) | |
count += 1 | |
print("Found {} matching lines".format(count)) | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment