Add yourself to the list below and give a short blurb about the topic. Go wild!
about something interesting
(defun mapcar@ (fun seq) | |
(let (result) | |
(loop for elem in (reverse seq) | |
for newelem = (funcall fun elem) | |
if (and (listp newelem) | |
(eq (car newelem) '@)) | |
do (loop for newelem2 in (cdr newelem) | |
do (setq result (cons newelem2 result))) | |
else do (setq result (cons newelem result))) | |
result)) |
# stubby - a local DNS Privacy stub resolver | |
# | |
# stubby acts as a local DNS Privacy stub resolver, using DNS-over-TLS. | |
# Stubby encrypts DNS queries sent from the local machine to a DNS Privacy resolver, increasing end user privacy. | |
# | |
description "stubby server" | |
start on runlevel [2345] | |
stop on runlevel [!2345] |
#!/usr/bin/awk -f | |
# Create variables containing counts of the number of different incident types within each area, | |
# where an area is defined as a unique longitude/latitude pair to the nearest 2 decimal places | |
BEGIN { | |
# Define csv fields | |
FPAT="\"[^\"]*\"|[^\",]*"; | |
PROCINFO["sorted_in"] = "@ind_str_asc"; | |
} |
;; This file contains an example of how to extract data from pdf files using `extract-text-from-files' | |
;; It extracts state-by-state data on total number of law enforcement employees from pdf files | |
;; downloaded from the FBI website. | |
;; There is a lot more data available in these files, but I only need total employees for now. | |
;; PDF files must first be downloaded from these URLs: | |
;; https://www.fbi.gov/about-us/cjis/ucr/crime-in-the-u.s/1995/95sec6.pdf | |
;; https://www.fbi.gov/about-us/cjis/ucr/crime-in-the-u.s/1996/96sec6.pdf | |
;; https://www.fbi.gov/about-us/cjis/ucr/crime-in-the-u.s/1997/97sec6.pdf |
## Analysis of voting patterns for 2016 Eurovision song contest | |
library(clusterfly) | |
library(igraph) | |
library(magrittr) | |
## load the voting data | |
votedata <- read.csv("eurovision-votes_2016.csv") | |
## remove total votes column | |
votedata$total_votes <- NULL |
# Some examples of how to use geocode data of different formats. | |
## load libraries | |
library("magrittr") | |
library("eurostat") | |
library("eurostat") | |
library("ggplot2") | |
library("countrycode") | |
library("rgdal") | |
library("colorbrewer") | |
## plotting NUTS shape files |
#!/usr/bin/awk -f | |
BEGIN {FS=","} | |
$1 !~ /^[0-9]+$/ {print "Line "NR": Field 1 invalid"} | |
$2 !~ /^"?[a-zA-Z][^,]+"?$/ {print "Line "NR": Field 2 invalid"} | |
$3 !~ /^[0-9.]+$/ {print "Line "NR": Field 3 invalid"} | |
$4 !~ /[0-9]+/ {print "Line "NR": Field 4 invalid"} | |
$5 !~ /[0-9](\.[0-9])? - [0-9](\.[0-9])?/ {print "Line "NR": Field 5 invalid"} | |
/^$/ { print "Line "NR" is empty" } |
# convert wide-form Freedom House Good Governance data to long-form | |
# Read the data | |
wide <- read.csv("freedom_house_good_governance.csv") | |
# Get the columns corresponding to each wide-form variable that will be converted to long form. | |
PRvars <- names(wide)[(1:40)*3-1] | |
CLvars <- names(wide)[(1:40)*3] | |
Statusvars <- names(wide)[(1:40)*3+1] | |
# Get the times associated with the wide-form variables |
#!/usr/bin/perl | |
use DBI; | |
#example running | |
#csv_load_db.pl Changed.csv pat_regan_combined create_table.sql | |
$host="localhost"; | |
$port=5433; | |
$db="geopolitical"; |