Skip to content

Instantly share code, notes, and snippets.

@albert-decatur
Created November 24, 2010 19:15
Show Gist options
  • Select an option

  • Save albert-decatur/714209 to your computer and use it in GitHub Desktop.

Select an option

Save albert-decatur/714209 to your computer and use it in GitHub Desktop.
collection of BASH scripts for GIS
#!/bin/bash
#geocode a CSV file using both Google and Yahoo! APIs, send output to either KMl or ESRI Shapefile
# example Yahoo geocode request: http://where.yahooapis.com/geocode?q=950+main+street,+Worcester,+ma&appid=[yourappidhere]
# example Google geocode request: http://maps.googleapis.com/maps/api/geocode/xml?address=950+main+st,+worcester,+MA&sensor=true
# get working directory
WORKING_DIR=`pwd`
# copy csv file with addresses
cp $1 /tmp/$1.copy
# copy header to new file
sed q /tmp/$1.copy > /tmp/$1.header
ORIGINAL_HEADER=`cat /tmp/$1.header`
# get rid of header
sed -i '1d' /tmp/$1.copy
# define copy of CSV as varialbe
GEOCODE_CSV=/tmp/$1.copy
# define the endings of URLs needed to request lat/long
YAHOO_END="&appid=[yourappidhere]"
GOOGLE_END="&sensor=true"
### Yahoo! API ###
# define which API is in use
API="yahoo"
# set progress for progress report
POINT_NUM=1
touch /tmp/$1.yahoo
while read line
do
# show script progress
PROGRESS_PHRASE="retrieving "$API" coordinates for point $POINT_NUM"
# show progress bar
echo $PROGRESS_PHRASE
# make a shared string off of which to build when requesting lat/long
COMMON_GEOCODING_STRING=`echo -n $line | egrep -o "[^,]+" | tr '\n' ',' | sed 's/\,$//g' | sed 's/\,/\, /g' | sed 's/[ ]/+/g'`
# request lat/long from Yahoo!
links -dump "http://where.yahooapis.com/geocode?q=${COMMON_GEOCODING_STRING}${YAHOO_END}" | sed q | sed 's/^[ \ta-zA-Z0-9]*[_US]*.....//g' | egrep -o "[.0-9-]*" | egrep -o "^[0-9-]+[.][0-9-]+[.][0-9]{6}" > /tmp/$1.yahoolatlong_temp
# increment progress
POINT_NUM=$(($POINT_NUM+1))
# parse latitude out of HTML dump
YAHOO_LAT=`egrep -o "^[0-9]*[.][0-9]{6}" /tmp/$1.yahoolatlong_temp`
# separate the lat and the long based on the presence of 6 digits after the deicmal point
YAHOO_LATLONG=`sed "s/^[0-9]*[.][0-9]\{6\}/$YAHOO_LAT,/g" /tmp/$1.yahoolatlong_temp`
# redirect Yahoo! lat/long to a file
echo $YAHOO_LATLONG > /tmp/$1.yahoolatlong
# redirect all Yahoo! lat/long to the same file
cat /tmp/$1.yahoolatlong >> /tmp/$1.yahoo
done < $GEOCODE_CSV
# join the CSV and yahoo lat/long
paste -d, /tmp/$1.yahoo /tmp/$1.copy > /tmp/"$1"."$API"_points.csv
INPUT_CSV="/tmp/"$1"."$API"_points.csv"
# remake the header for the CSV
SPATIAL_HEADER="Y,X,"
NEW_HEADER="${SPATIAL_HEADER}${ORIGINAL_HEADER}"
# append new header to top of CSV
sed -i 1i"$NEW_HEADER" /tmp/"$1"."$API"_points.csv
# define input CSV for OGR
INPUT_LAYER=$(basename $INPUT_CSV .csv)
# OGR name defined
OGR_NAME=""$1"."$API""
# make XML suitable as OGR VRT
XML_FILE="<OGRVRTDataSource>
<OGRVRTLayer name=\""$OGR_NAME"\">
<SrcDataSource>$INPUT_CSV</SrcDataSource>
<SrcLayer>"$INPUT_LAYER"</SrcLayer>
<GeometryType>wkbPoint</GeometryType>
<LayerSRS>WGS84</LayerSRS>
<GeometryField encoding=\"PointFromColumns\" x=\"x\" y=\"y\"/>
</OGRVRTLayer>
</OGRVRTDataSource>"
# save this XML as .vrt
echo $XML_FILE > /tmp/$1.$API.vrt
# use the .vrt information to create a OGR compatible vector
echo "building "$API" "$2""
if [ $2 = "kml" ]; then
ogr2ogr -f KML /tmp/$API.$2 /tmp/$1.$API.vrt
else
if [ $2 = "shp" ]; then
ogr2ogr -f "ESRI Shapefile" /tmp/$API.$2 /tmp/$1.$API.vrt
else
echo You have selected neither KML nor ESRI Shapefile as your output.
echo Please use either \"kml\" or \"shp\" as the second argument.
exit 0
fi
fi
# move .$2 to working directory
mv /tmp/$API* $WORKING_DIR/
### Google API ###
# define which API is in use
API="google"
# set progress for progress report
POINT_NUM=1
touch /tmp/$1.google
while IFS= read -r line
do
# show script progress
PROGRESS_PHRASE="retrieving "$API" coordinates for point $POINT_NUM"
# show progress bar
echo $PROGRESS_PHRASE
# make a shared string off of which to build when requesting lat/long
COMMON_GEOCODING_STRING=`echo -n $line | egrep -o "[^,]+" | tr '\n' ',' | sed 's/\,$//g' | sed 's/\,/\, /g' | sed 's/[ ]/+/g'`
# increment progress
POINT_NUM=$(($POINT_NUM+1))
# request lat/long from Google
GOOGLE_LATLONG=`links -dump "http://maps.googleapis.com/maps/api/geocode/xml?address=${COMMON_GEOCODING_STRING}${GOOGLE_END}" | egrep -o "[-0-9]*[.][-0-9]*" | sed 2q | tr '\n' ',' | sed 's/,$//g'`
# redirect Google lat/long to a file
echo $GOOGLE_LATLONG > /tmp/$1.googlelatlong
# redirect all Google lat/long to the same file
cat /tmp/$1.googlelatlong >> /tmp/$1.google
done < $GEOCODE_CSV
# join the CSV and Google lat/long
paste -d, /tmp/$1.google /tmp/$1.copy > /tmp/"$1"."$API"_points.csv
INPUT_CSV="/tmp/"$1"."$API"_points.csv"
# remake the header for the CSV
SPATIAL_HEADER="Y,X,"
NEW_HEADER="${SPATIAL_HEADER}${ORIGINAL_HEADER}"
# append new header to top of CSV
sed -i 1i"$NEW_HEADER" /tmp/"$1"."$API"_points.csv
# define input CSV for OGR
INPUT_LAYER=$(basename $INPUT_CSV .csv)
# OGR name defined
OGR_NAME=""$1"."$API""
# make XML suitable as OGR VRT
XML_FILE="<OGRVRTDataSource>
<OGRVRTLayer name=\""$OGR_NAME"\">
<SrcDataSource>$INPUT_CSV</SrcDataSource>
<SrcLayer>"$INPUT_LAYER"</SrcLayer>
<GeometryType>wkbPoint</GeometryType>
<LayerSRS>WGS84</LayerSRS>
<GeometryField encoding=\"PointFromColumns\" x=\"x\" y=\"y\"/>
</OGRVRTLayer>
</OGRVRTDataSource>"
# save this XML as .vrt
echo $XML_FILE > /tmp/$1.$API.vrt
# use the .vrt information to create an OGR compatible vector
echo "building "$API" "$2""
if [ $2 = "kml" ]; then
ogr2ogr -f KML /tmp/$API.$2 /tmp/$1.$API.vrt
else
if [ $2 = "shp" ]; then
ogr2ogr -f "ESRI Shapefile" /tmp/$API.$2 /tmp/$1.$API.vrt
else
echo You have selected neither KML nor ESRI Shapefile as your output.
echo Please use either \"kml\" or \"shp\" as the second argument.
exit 0
fi
fi
# move .$2 to working directory
mv /tmp/$API* $WORKING_DIR/
### after API handling ###
# clean up temporary files
rm /tmp/$1.* 2>/dev/null
exit 0
#!/bin/bash
# clip one raster by many vectors, inspired by Tim Sutton, linfiniti.com
# arguments: 1) raster to clip 2) directory of vectors to clip by 3) output location
rext=`echo $1 | awk -F . '{print $NF}'`
rbase=`basename $1 $rext`
mkdir $3 2>/dev/null
cd $2
for i in *.shp
do
PROJWIN_STRING=`ogrinfo -al -so $i | grep Extent | egrep -o "[0-9.-]*" | sed '3d' | sed 's/[ \t]//g' | tr '\n' ',' | sed 's/,/ /g' | gawk '{ print $1,$4,$3,$2 }'`
vext=`echo $i | awk -F . '{print $NF}'`
vbase=`basename $i $vext`
# rectangular clip
gdal_translate -projwin $PROJWIN_STRING $1 $3/rect.$rbase$vbase$rext
# polygonal clip
gdalwarp -r lanczos -cutline $i $3/rect.$rbase$vbase$rext $3/$rbase$vbase$rext
rm $3/rect.*
done
exit 0
#!/bin/bash
# convert CSV file to many vector lines
# prepare a version of the input without the latitude and longitude fields
gawk -F, "(OFS=\",\")("\$$3"=\"\")("\$$4"=\"\")1" $1 | sed 's/,\+/,/g' > /tmp/$1.no_spatial
# make variable out of header
ORIGINAL_HEADER=`sed q /tmp/$1.no_spatial`
# prepare variables to remake the header
SPATIAL_HEADER="wkt_geom,"
NEW_HEADER="${SPATIAL_HEADER}${ORIGINAL_HEADER}"
# remove header on file without lat/long
sed -i '1d' /tmp/$1.no_spatial
# parse third argument to be field delimiter for text file
FIELD_SELECT="\$$2"
# print selected field
gawk -F, "{ print ${FIELD_SELECT} }" $1 > /tmp/$1.csv2line
# remove header on field
sed -i '1d' /tmp/$1.csv2line
# list only unique entries for selected field
uniq /tmp/$1.csv2line > /tmp/$1.csv2line_uniq
### make a new vector line file for each unique entry ###
# make a file to redirect linestrings
touch /tmp/$1.linestrings
# make a file to redirect attributes of first record relating to linestring
touch /tmp/$1.attributes
# make variable for progress bar
LINE_NUM=1
while read line
do
# print all records that correspond to a unique value
egrep "(${line})" $1 > /tmp/$1_records
# show script progress
PROGRESS_PHRASE="building line number $LINE_NUM"
# show progress bar
echo $PROGRESS_PHRASE
# turns lat/long from those records into OGR compatible linestring
# note that awk flips the column order to long/lat
gawk -F, "{print "\$$4","\$$3" }" /tmp/$1_records | sed 's/[,]//g' | sed 's/$/,/g' | tr '\n' ' ' | sed 's/,[ ]$//g' | sed 's/^[ ]//g' | sed 's/[,][ ]*/,/g' | sed 's/[ ]+/\n/g' > /tmp/$1.linelatlong
OGR_LINESTRING_LATLONG=`cat /tmp/$1.linelatlong`
OGR_LINESTRING="\"LINESTRING (${OGR_LINESTRING_LATLONG})\""
echo $OGR_LINESTRING > /tmp/$1.loop_string
# append each linestring to a file
cat /tmp/$1.loop_string >> /tmp/$1.linestrings
# increment variable for progress bar
LINE_NUM=$(($LINE_NUM+1))
# build a file with attributes based on first line with unique value
egrep "(${line})" /tmp/$1.no_spatial | sed q >> /tmp/$1.attributes
done < /tmp/$1.csv2line_uniq
# join the finished linestrings to the unique names
paste -d, /tmp/$1.linestrings /tmp/$1.attributes > /tmp/$1.join.csv
# append new header to top of the unique records
sed -i 1i"$NEW_HEADER" /tmp/$1.join.csv
# build a VRT file for OGR based on each unique entry in the field
OGR_DATA_SOURCE="/tmp/$1.join.csv"
OGR_LAYER=$(basename /tmp/$1.join.csv .csv)
OGR_VRT="
<OGRVRTDataSource>
<OGRVRTLayer name=\""$1"\">
<SrcDataSource>$OGR_DATA_SOURCE</SrcDataSource>
<SrcLayer>"$OGR_LAYER"</SrcLayer>
<GeometryType>wkbLineString</GeometryType>
<LayerSRS>WGS84</LayerSRS>
<GeometryField encoding=\"WKT\" field=\"wkt_geom\" />
</OGRVRTLayer>
</OGRVRTDataSource>"
# save this VRT
echo $OGR_VRT > /tmp/$1.csv2line.vrt
# take note of fifth argument, which is output file
OUT_FILE="$5"
# take note of the file extension used in the fifth argument
FILE_EXT=`echo $5 | awk -F . '{print $NF}'`
# use the .vrt information to create either an ESRI Shapefile or a KML
if [ $FILE_EXT = "kml" ]; then
ogr2ogr -f KML $OUT_FILE /tmp/$1.csv2line.vrt
else
if [ $FILE_EXT = "shp" ]; then
ogr2ogr -f "ESRI Shapefile" $OUT_FILE /tmp/$1.csv2line.vrt
else
echo You have selected neither KML nor ESRI Shapefile as your output.
echo Please use either \"kml\" or \"shp\" as the second argument.
exit 0
fi
fi
# clean up temporary files
rm /tmp/$1* 2>/dev/null
exit 0
#!/bin/bash
# BASH script to produce a dataset from a network linked kmz file
# arguments: 1) kmz file, 2) image file extension, e.g. png, 3) directory for output files
# set value for progress report
IMAGE_NUM=1
# make scartch directory
mkdir $3/scratch 2>/dev/null
# copy kmz to scratch directory
cp $1 $3/scratch
# change directory to scratch
cd $3/scratch
# get basename of argument 1
BASENAME_ARG1=`basename $1`
#echo $BASENAME_ARG1
# unzip kmz
unzip $BASENAME_ARG1
# download network linked kml
KMZ_WO_EXT1=`basename $1 .kmz`
KML_NETWORK_LINK=`egrep "<href>http" ${KMZ_WO_EXT1}.kml | sed 's/<href>//g' | sed 's/<\/href>//g'`
#echo $KML_NETWORK_LINK
wget -q $KML_NETWORK_LINK
# get kml name
KML_NAME1=`echo $KML_NETWORK_LINK | egrep -o "[a-zA-Z0-9-]*[.]kml$"`
#echo $KML_NAME1
# get kmz URLs
egrep "[.]kmz</href></Link></NetworkLink>$" $KML_NAME1 | sed 's/<NetworkLink><Link><href>//g' | sed 's/<\/href><\/Link><\/NetworkLink>//g' |
# download each KMZ, search for image tiles, download, manipulate
while read line
do
KMZ_URL=`echo $line`
#echo "KMZ_URL is" $KMZ_URL
# download kmz
wget -q $line
# get file name
KMZ_NAME1=`echo $line | egrep -o "[a-zA-Z0-9-]*[.]kmz$"`
#echo "KMZ_NAME1 is" $KMZ_NAME1
# unzip kmz
unzip $KMZ_NAME1
# get kmz name without extension
KMZ_WO_EXT2=`basename $KMZ_NAME1 .kmz`
#echo "KMZ_WO_EXT2 is" $KMZ_WO_EXT2
# remove any existing kmls
ls | egrep "(kml|kmz)" | grep -v $KMZ_WO_EXT2.kml | xargs rm
# in new KML, parse out image URLs
for input in *.kml
do
# sanitize the input for POSIX systems in case of DOS issue
tr -d '\r' < $input > /tmp/$0.$input
# search for file URLs using supplied regex string -> TODO this is still tailored to files that match "[0-9]{4}[.]$2"
egrep "[0-9]{4}[.]"$2"</href>" /tmp/$0.$input | sed 's/<Icon><href>//g' | sed 's/<\/href><\/Icon>//g' |
while read line
do
#echo "line is" $line
#echo "input is" $input
# reformat to create actual url -> need to use part of kmz's url
IMG_URL_PART1=`echo $KMZ_URL | sed "s/"$KMZ_NAME1"//g"`
#echo "IMG_URL_PART1 is" $IMG_URL_PART1
# report progress
PROGRESS_PHRASE="retrieving image $IMAGE_NUM from "$input""
#echo $PROGRESS_PHRASE
#echo "complete url is" $IMG_URL_PART1$line
# download dependably with wget, be quiet, need to put together complete image url
wget -c -O $input.$IMAGE_NUM.$2 -q $IMG_URL_PART1$line
# to make bounding box, make new variable for image name
IMG_NAME=`echo $line | egrep -o "[0-9]*[.]png$"`
# gather bounding box info from KML
BOUNDING_BOX=`sed -e "1,/<name>$IMG_NAME<\/name>/ s/.*//" -e '/<\/drawOrder>/,$ s/.*//' $input | sed '/^$/d'| sed '1d' | egrep -o "[0-9.-]*"`
# gather string for gdal_translate based on bounding box
PREP_ULLR_STRING=`echo $BOUNDING_BOX | tr '\n' ',' | sed 's/,/ /g'`
# parse string for gdal_translate to reorder north, south, east, west
ULLR_STRING=`echo $PREP_ULLR_STRING | gawk '{ print $4,$1,$3,$2 }'`
# create georeferenced GTiff based on downloaded file and bounding box, project to WGS84
gdal_translate -a_srs EPSG:4236 -a_ullr $ULLR_STRING $input.$IMAGE_NUM.$2 $input.$IMAGE_NUM.tif
# find images that contain transparency
ALPHA_LEVEL_MIN=`gdalinfo -stats $input.$IMAGE_NUM.tif | sed -e '1,/Band 4/ s/.*//' -e '/STATISTICS_MAXIMUM/,$ s/.*//' | sed '/^$/d' | sed '$!d' | egrep -o "[0-9]*"`
if [ "$ALPHA_LEVEL_MIN" -eq 0 ]; then
# make band 4 of raster with alpha problem, with 0 as nodata
gdal_translate -b 4 -a_nodata 0 $input.$IMAGE_NUM.tif $input.$IMAGE_NUM.b4.tif
# make a vector
gdal_polygonize.py $input.$IMAGE_NUM.b4.tif $input.$IMAGE_NUM.b4.gml
# project vector to WGS84
ogr2ogr -a_srs EPSG:4326 $input.$IMAGE_NUM.b4-proj.gml $input.$IMAGE_NUM.b4.gml
# obtain extent of vector for gdal_translate -projwin
PROJWIN_STRING=`ogrinfo -al -so $input.$IMAGE_NUM.b4-proj.gml | grep Extent | egrep -o "[0-9.-]*" | sed '3d' | sed 's/[ \t]//g' | tr '\n' ',' | sed 's/,/ /g' | gawk '{ print $1,$4,$3,$2 }'`
# rectangular clip on downloaded raster -> removes areas where band 4 = 0
gdal_translate -projwin $PROJWIN_STRING $input.$IMAGE_NUM.tif $input.$IMAGE_NUM.clip.tif
# polygonal clip of area where alpha band is zero
gdalwarp -of GTiff -r lanczos -cutline $input.$IMAGE_NUM.b4-proj.gml $input.$IMAGE_NUM.clip.tif $input.$IMAGE_NUM.clip2.tif
# remove downloaded file with alpha level problem
rm $input.$IMAGE_NUM.tif
# name the new fixed file the same as the original
mv $input.$IMAGE_NUM.clip2.tif $input.$IMAGE_NUM.tif
# remove band 4 rasters and vectors
rm $input.$IMAGE_NUM.b4*
else
false
fi
mkdir `basename $input .kml` 2>/dev/null
mv $input.$IMAGE_NUM.tif `basename $input .kml`/$IMAGE_NUM.tif
# remove downloaded file
rm $input.$IMAGE_NUM.$2
# increment progress
IMAGE_NUM=$(($IMAGE_NUM+1))
done # if
done # while
done # while
# remove downloaded png
#ls | egrep "*[.][0-9]+[.]"$2"$" | xargs rm
# mv tifs to output dir
#ls | egrep "*[.][0-9]+[.]tif$" | xargs -I '{}' mv {} $3
# make new dir for kml
#mkdir kml
# move kmls to new dir
#ls|egrep "*[.]kml$" | xargs -I '{}' mv {} kml
# clean up temporary files
rm /tmp/$0[.]* 2>/dev/null
exit 0
#!/bin/bash
# basically a copy of one of Tim Sutton's found at linfiniti.com
# here so I can remember it!
mkdir tiles
XDIM=79011
YDIM=88977
BLOCKSIZE=1000
XPOS=0
YPOS=0
BLOCKNO=0
while [ $YPOS -le $YDIM ]
do
while [ $XPOS -le $XDIM ]
do
echo "$XPOS $YPOS : ${BLOCKNO}.tif"
gdal_translate -of GTiff -srcwin $XPOS $YPOS $BLOCKSIZE $BLOCKSIZE mosaic.vrt \
tiles/${BLOCKNO}.tif
BLOCKNO=`echo "$BLOCKNO + 1" | bc`
XPOS=`echo "$XPOS + $BLOCKSIZE" | bc`
done
YPOS=`echo "$YPOS + $BLOCKSIZE" | bc`
XPOS=0
done
#!/bin/bash
# convert DBpedia's .nq of georeferenced wikipedia articles into shp or kml, reporting wikipedia abstract, url, wordcount, number of references, number of links within wikipedia, number of images, cumulative dimensions of images, date article was modified
# arguments are: 1) directory to store .nq file, 2) output CSV file 3) output shp or kml
# TODO: make VRT more general and make most fields integer. allow for use of all languages, not just english
if [[ ${3: -4} == ".shp" || ${3: -4} == ".kml" ]]; then
false
else
echo "Your must provide an output filename with extension .shp or .kml"
exit 0
fi
mkdir $1 2>/dev/null
cd $1
axel -n 20 http://downloads.dbpedia.org/3.5.1/en/geo_coordinates_en.nq.bz2
wget -c http://downloads.dbpedia.org/3.5.1/en/geo_coordinates_en.nq.bz2
bunzip2 geo_coordinates_en.nq.bz2
nq=`basename geo_coordinates_en.nq .nq`
egrep "<http://www.georss.org/georss/point>" $nq > /tmp/$nq.copy
touch $2
cat /dev/null > $2
header="article,lat,lng,url,abs,word"
echo $header | cat >> $2
incr=1
while read line
do
echo "processing point #${incr}"
echo $line > /tmp/$nq.line
article=`awk '{print $1}' /tmp/$nq.line | egrep -o "[^/]*$" | sed 's/>//g'`
lnglat=`awk '{print $3,$4}' /tmp/$nq.line | egrep -o "[0-9.-]*" | \
tr '\n' ',' | sed 's/,$//g'`
wikiurl=`awk '{print $5}' /tmp/$nq.line | sed 's/^<//g'| sed 's/>$//g'`
word=`links -dump $wikiurl | wc -w`
absurl=`awk '{print $1}' /tmp/$nq.line | sed 's/<//g' | sed 's/>//g' | sed 's/resource/data/g' | sed 's/$/.atom/'`
abs=`links -source $absurl | egrep "<dbpedia-owl:abstract xml:lang=\"en\">"| \
sed 's/<dbpedia-owl:abstract xml:lang=\"en\">//g' | sed 's/<\/dbpedia-owl:abstract>//g' | sed 's/^/\"/' | sed 's/$/\"/'`
record="${article},${lnglat},${wikiurl},${abs},${word}"
echo $record | cat >> $2
incr=$(($incr+1))
done < /tmp/$nq.copy
vrt="<OGRVRTDataSource>
<OGRVRTLayer name=\"wikipedia_pts\">
<SrcDataSource>$2</SrcDataSource>
<SrcLayer>$nq</SrcLayer>
<GeometryType>wkbPoint</GeometryType>
<LayerSRS>WGS84</LayerSRS>
<GeometryField encoding=\"PointFromColumns\" x=\"lng\" y=\"lat\"/>
</OGRVRTLayer>
</OGRVRTDataSource>"
echo $vrt > /tmp/$nq.vrt
ext=`echo $3 | awk -F . '{print $NF}'`
if [ $ext == kml ]; then
ogr2ogr -f KML $3 /tmp/$nq.vrt
else
ogr2ogr -f "ESRI Shapefile" $3 /tmp/$nq.vrt
fi
rm /tmp/$nq*
exit 0
#!/bin/bash
# Script to download US National Weather Service daily precipitation
# and display it on OpenLayers and Google Earth using a web server
# Please change the two variable below as needed.
# This script definitely runs with: GDAL 1.7.2-2, GraphicsMagick 1.3.7, GNU tar 1.23, wget 1.12, lighttpd 1.4.26
#SERVER_DIR=$SERVER_DIR:/srv/http
#export SERVER_DIR
[[ `id -u` -ne 0 ]] && { echo "$0: You must be root user to run this script. Run it as 'sudo $0'"; exit 1; }
########## change these as needed #########
SERVER_DIR="/srv/http"
TEMPLATE_X_DIMENSION="500"
TEMPLATE_Y_DIMENSION="500"
TEMPLATE_IN_USE="template-1000x1000.tif"
########## nothing to change below ##########
TEMPLATE_FILE_NAME="template-$TEMPLATE_X_DIMENSIONx$TEMPLATE_Y_DIMENSION.tif"
# make necessary directories
mkdir -p ~/.nws/precip/{keep,geo,archive} 2>/dev/null
sudo mkdir $SERVER_DIR/maptiles/precip 2>/dev/null
# make template.tif to burn in vector attributes
if [ -a ~/.nws/precip/keep/$TEMPLATE_IN_USE ]; then
false
else
cd ~/.nws/precip/keep
wget http://water.weather.gov/precip/p_download_new/nws_precip_allpoint.tar.gz
tar xvzf nws_precip_allpoint.tar.gz
gdal_grid -a average:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 -txe -124.898 -64.4481 -tye 17.5427 52.9737 -outsize $TEMPLATE_X_DIMENSION $TEMPLATE_Y_DIMENSION -l nws_precip_allpoint nws_precip_allpoint.shp $TEMPLATE_FILE_NAME
fi
# define variables for NWS downloads
DIR_DATE=`date +%F | sed 's/[-]/\//g'`
FILE_DATE=`date +%F | sed 's/[-]//g'`
# Download today's precipitation ESRI shapefile from NWS
# Note that your system date must be correct. NTP is recommended
if [ -a SERVER_DIR/maptiles/precip/$FILE_DATE ]; then
false
else
# remove old files
sudo rm -r ~/.nws/precip/archive/$FILE_DATE.tar.gz 2>/dev/null
sudo rm -r ~/.nws/precip/geo/$FILE_DATE 2>/dev/null
rm ~/.nws/precip/geo/$FILE_DATE.tif 2>/dev/null
# make new files
cd ~/.nws/precip/geo
wget http://water.weather.gov/precip/p_download_new/$DIR_DATE/nws_precip_1day_observed_shape_$FILE_DATE.tar.gz
tar xzvf nws_precip_1day_observed_shape_$FILE_DATE.tar.gz
rm *.tar.gz
# copy GeoTiff used to burn in point value
cd ~/.nws/precip/geo
cp ~/.nws/precip/keep/$TEMPLATE_IN_USE $FILE_DATE.tif
# burn point values from ESRI shapefile into the GeoTiff made by gdal_grid
cd ~/.nws/precip/geo
gdal_rasterize -a Globvalue -l nws_precip_1day_observed_$FILE_DATE nws_precip_1day_observed_$FILE_DATE.shp $FILE_DATE.tif
# change symbology for $FILE_DATE.tif
gdaldem color-relief $FILE_DATE.tif ~/.nws/precip/keep/scale.txt $FILE_DATE.tif
# tile the color $FILE_DATE.tif for OpenLayers and Google Earth
cd ~/.nws/precip/geo
gdal2tiles.py -k -z 0-8 $FILE_DATE.tif
# remove uneeded files
rm $FILE_DATE.tif
rm nws_precip_1day_observed_$FILE_DATE.*
# use GraphicksMagick to make white transparent, move files to your server directory
find . -iname "*.png" -type f | xargs -I '{}' gm convert -transparent "rgb(255,255,255)" {} {}
tar czvf ~/.nws/precip/archive/precip_$FILE_DATE.tar.gz $FILE_DATE/
sudo rm -r $SERVER_DIR/maptiles/precip/$FILE_DATE
sudo mv ~/.nws/precip/geo/$FILE_DATE $SERVER_DIR/maptiles/precip
fi
exit 0
#!/bin/bash
# reporjects any number of ESRI shapefiles according to template
# make directory for output
mkdir out_shp
# use template project for directory of shapefiles
for input in *.shp
do
ogr2ogr -t_srs $1.WKT out.$input $input
done
# move and rename files
mv out.* out_shp
cd out_shp
rename out. "" *
# remove WKT file
rm $WKT_PATH/*.shp.WKT
exit 0
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment