Skip to content

Instantly share code, notes, and snippets.

View medvedev1088's full-sized avatar

Evgeny Medvedev medvedev1088

View GitHub Profile
#standardSQL
-- MIT License
-- Copyright (c) 2018 Evgeny Medvedev, [email protected]
with double_entry_book as (
-- debits
select to_address as address, value as value, block_timestamp, block_number
from `bigquery-public-data.ethereum_blockchain.traces`
where to_address is not null
and status = 1
and (call_type not in ('delegatecall', 'callcode', 'staticcall') or call_type is null)
#standardSQL
-- MIT License
-- Copyright (c) 2018 Evgeny Medvedev, [email protected]
with double_entry_book as (
-- debits
select to_address as address, value as value, block_timestamp
from `bigquery-public-data.crypto_ethereum.traces`
where to_address is not null
and status = 1
and (call_type not in ('delegatecall', 'callcode', 'staticcall') or call_type is null)
with ether_emitted_by_date as (
select date(block_timestamp) as date, sum(value) as value
from `bigquery-public-data.crypto_ethereum.traces`
where trace_type in ('genesis', 'reward')
group by date(block_timestamp)
)
select date, sum(value) OVER (ORDER BY date) / power(10, 18) AS supply
from ether_emitted_by_date
from_address to_address value trace_address error status
0xf1bc217a1e50a697d37caf568d5260033d53e1e9 0x18a672e11d637fffadccc99b152f4895da069601 1 12 Bad jump destination 0
0x0e95adb39a2dfe1c8f3969de4480a5d3ddfadebd 0xf1bc217a1e50a697d37caf568d5260033d53e1e9 5 Out of gas 0
0xf1bc217a1e50a697d37caf568d5260033d53e1e9 0x05f65ab88dc474a83a98994291c7a0abd13c9ae1 1 4 0
0xf1bc217a1e50a697d37caf568d5260033d53e1e9 0x4a574510c7014e4ae985403536074abe582adfc8 1 3 0
0xf1bc217a1e50a697d37caf568d5260033d53e1e9 0xa2d4035389aae620e36bd828144b2015564c2702 1 10 0
0xf1bc217a1e50a697d37caf568d5260033d53e1e9 0xcab97565438f594964c95eb0a1c6c4c7a3ca989c 1 11 0
0xf1bc217a1e50a697d37caf568d5260033d53e1e9 0xf8f31fc73f2a16002cc9c96af465e50efd74469e 1 5 0
0xf1bc217a1e50a697d37caf568d5260033d53e1e9 0x7803a643be2f08f7b4843fa241de822e239d3511 1 7 0
0xf1bc217a1e50a697d37caf568d5260033d53e1e9 0xbb9bc244d798123fde783fcc1c72d3bb8c189413 1 0 0
from_address to_address trace_address error status
0xc5b373618d4d01a38f822f56ca6d2ff5080cc4f2 0xc5f60fa4613493931b605b6da1e9febbdeb61e16 1
0xc5f60fa4613493931b605b6da1e9febbdeb61e16 0x06012c8cf97bead5deae237070f9587f8e7a266d 0 Reverted 0
address balance
0x742d35cc6634c0532925a3b844bc454e4438f44e 1661212597005168500000000
0x281055afc982d96fab65b3a49cac8b878184cb16 1538423106565967645420298
0x6f46cf5569aefa1acc1009290c8e043747172d89 1510065642130147102014178
0x90e63c3d53e0ea496845b7a03ec7548b70014a91 1507810438757735812302993
0x53d284357ec70ce289d6d64134dfac8e511c8a3d 1378754093068188911481302
0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2 1125093884258551166830589
0x61edcdf5bb737adffe5043706e7c5bb1f1a56eea 1009999000010000000000000
0xab7c74abc0c4d48d1bdad5dcb26153fc8780f83e 1000000011463125364129410
0xbe0eb53f46cd790cd13851d5eff43d12404d33e8 988888054768100000000000
WITH traces_with_status AS (
-- Find all nested traces of failed traces
WITH nested_failed_traces AS (
SELECT distinct child.transaction_hash, child.trace_address
FROM `bigquery-public-data.ethereum_blockchain.traces` parent
JOIN `bigquery-public-data.ethereum_blockchain.traces` child
ON (parent.trace_address IS NULL OR starts_with(child.trace_address, concat(parent.trace_address, ',')))
AND child.transaction_hash = parent.transaction_hash
where parent.trace_type IN ('call', 'create')
AND parent.error IS NOT NULL
#standardSQL
-- MIT License
-- Copyright (c) 2018 Evgeny Medvedev, [email protected]
with double_entry_book as (
-- debits
select to_address as address, value as value
from `bigquery-public-data.crypto_ethereum.traces`
where to_address is not null
and status = 1
and (call_type not in ('delegatecall', 'callcode', 'staticcall') or call_type is null)
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
## @params: [JOB_NAME]
args = getResolvedOptions(sys.argv, ['JOB_NAME'])
# The below partitioning tries to make each partition of equal size.
# The first million blocks are in a single partition.
# The next 3 million blocks are in 100k partitions.
# The next 1 million blocks are in 10k partitions.
# Note that there is a limit in Data Pipeline on the number of objects, which can be
# increased in the Support Center
# https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-limits.html
EXPORT_PARTITIONS = [(0, 999999)] + \
[(start, end) for start, end in split_to_batches(1000000, 1999999, 100000)] + \
[(start, end) for start, end in split_to_batches(2000000, 2999999, 100000)] + \