Python relative imports in AWS Lambda fail with attempted relative import with no known parent package
In AWS Lambda if I attempt an explicit relative import like this
.
├── lambda_file.py
└── example.py
// save to windows-user directory | |
linters: with_defaults(object_name_linter = NULL, | |
object_length_linter(50), | |
commented_code_linter = NULL, | |
object_usage_linter = NULL, | |
line_length_linter(120), | |
cyclocomp_linter = cyclocomp_linter(50)) |
needs(dplyr, readr, reshape2, tidyr, zoo) | |
read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv") %>% | |
rename(country = `Country/Region`, | |
province = `Province/State`) %>% | |
select(-Lat, -Long) %>% | |
pivot_longer(-c(country, province), names_to = "date", values_to = "confirmed") %>% | |
mutate(date = as.Date(date, "%m/%d/%y")) %>% | |
mutate(country = ifelse(country == "China" & province == "Hong Kong", | |
"Hong Kong", country)) %>% |
# Bivariate Choropleth Map | |
## this is based on Timo Grossenbacher and Angelo Zehr's tutorial - | |
## https://timogrossenbacher.ch/2019/04/bivariate-maps-with-ggplot2-and-sf/ | |
# dependencies | |
library(dplyr) # data wrangling | |
library(ggplot2) # plotting | |
library(purrr) # iteration | |
library(tidyr) # data wrangling |
# -*- coding: utf-8 -*- | |
import csv | |
import os | |
import scrapy | |
from scrapy.http import FormRequest | |
from my_scraper.models import TableRow | |
END_DATE = '01/01/2019' | |
BASE_PATH = os.path.dirname(__file__) | |
ID_FILE = os.path.join(BASE_PATH, 'ids.csv') |
Inspired by the following exchange on Twitter, in which someone captures and posts a valuable video onto Twitter, but doesn't have the resources to easily transcribe it for the hearing-impaired, I thought it'd be fun to try out Amazon's AWS Transcribe service to help with this problem, and to see if I could do it all from the bash command-line like a Unix dork.
The instructions and code below show how to use command-line tools/scripting and Amazon's Transcribe service to transcribe the audio from online video. tl;dr: AWS Transcribe is a pretty amaz
library(tidycensus) | |
library(leaflet) | |
library(sf) | |
library(viridis) | |
options(tigris_use_cache = TRUE) | |
il1 <- get_acs(geography = "county", | |
variables = c(hhincome = "B19013_001"), | |
state = "IL", | |
geometry = TRUE) %>% |
library(httr) | |
library(magick) | |
library(hrbrthemes) | |
library(ggplot2) | |
theme_tweet_rc <- function(grid = "XY", style = c("stream", "card"), retina=TRUE) { | |
style <- match.arg(tolower(style), c("stream", "card")) | |
switch( |
-- specify input and output directories | |
set infile_directory to "/Users/doug/Desktop/inputs/" | |
set outfile_directory to "/Users/doug/Desktop/outputs/" | |
-- get the basenames of each input file | |
tell application "System Events" | |
set infile_list to files of folder infile_directory | |
end tell | |
-- process each input file |
library(tidyverse) | |
# data from https://www.kaggle.com/bls/american-time-use-survey | |
df.resp <- read_csv('../data/atus/atusresp.csv') | |
df.act <- read_csv('../data/atus/atusact.csv', col_types=cols(tustarttim = col_character(), tustoptime = col_character())) | |
df.sum <- read_csv('../data/atus/atussum.csv') | |
df.tmp <- df.act %>% | |
mutate(activity = case_when(trtier2p == 1301 ~ 'Exercise', |