Table:
AccountCustomerQuotaCounters
Keys:
accountId : pk : S
ym_id : sk : S
Table:
AccountCustomerQuotaCounters
Keys:
accountId : pk : S
ym_id : sk : S
https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-create-rule-schedule.html
on each job creation event create a “one time” event bridge schedule event
let the event bridge target be sqs (so that we can buffer and consume in a controlled rate)
the sqs payload can contain the job-id,
import boto3 | |
import json | |
s3_client = boto3.client('s3') | |
def lambda_handler(event, context): | |
# Extract the S3 bucket and object key from the event | |
bucket = event['Records'][0]['s3']['bucket']['name'] | |
key = event['Records'][0]['s3']['object']['key'] |
import java.util.ArrayList; | |
import java.util.List; | |
import java.util.concurrent.CompletableFuture; | |
import java.util.concurrent.ExecutionException; | |
import java.util.concurrent.TimeUnit; | |
import java.util.concurrent.TimeoutException; | |
public class RequestHandler { | |
private ExecutorService threadPool; |
import sys | |
import logging | |
import traceback | |
import json | |
logger = logging.getLogger() | |
logger.setLevel(logging.INFO) | |
def process(record): |
val a_s = Seq.fill(9)("a" -> 1):+ ("a" -> 10) | |
// a_s: Seq[(String, Int)] = List((a,1), (a,1), (a,1), (a,1), (a,1), (a,1), (a,1), (a,1), (a,1), (a,10)) | |
val b_s = Seq.fill(9)("b" -> 2):+ ("b" -> 10) | |
// b_s: Seq[(String, Int)] = List((b,2), (b,2), (b,2), (b,2), (b,2), (b,2), (b,2), (b,2), (b,2), (b,10)) | |
val df = (a_s ++ b_s).toDF("kind", "value") | |
// df: org.apache.spark.sql.DataFrame = [kind: string, value: int] | |
df.groupBy("kind").agg(expr("approx_percentile(value, 0.90, 20)").as("x_percentile")).show |
(In Ktor: 1.6.2)
application.conf
...
jwt {
issuer = "https://cognito-idp.ap-northeast-1.amazonaws.com/__SPECIFY_POOL_ID_HERE__"
audience = "__SPECIFY_CLIENT_ID_HERE__"
realm = "ktor sample app"
import sys | |
# choose() is the same as computing the number of combinations. Normally this is | |
# equal to: | |
# | |
# factorial(N) / (factorial(m) * factorial(N - m)) | |
# | |
# but this is very slow to run and requires a deep stack (without tail | |
# recursion). | |
# |
export AWS_ACCESS_KEY_ID='A???' | |
export AWS_SECRET_ACCESS_KEY='g???' | |
spark-shell --packages "org.apache.hadoop:hadoop-aws:3.3.4,com.amazonaws:aws-java-sdk-bundle:1.12.262" | |
val test = spark.read.parquet("s3a://bucket/prefix/part-000000.snappy.parquet") |
from PIL import Image | |
from PIL.ExifTags import TAGS, GPSTAGS | |
def get_exif_data(image): | |
"""Returns a dictionary from the exif data of an PIL Image item. Also converts the GPS Tags""" | |
exif_data = {} | |
info = image._getexif() | |
if info: | |
for tag, value in info.items(): | |
decoded = TAGS.get(tag, tag) |