Skip to content

Instantly share code, notes, and snippets.

@jwkidd3
jwkidd3 / webapp_cli
Last active February 22, 2018 22:44
gitrepo=<use your slots project on github>
webappname=mywebapp$RANDOM
az group create --location westeurope --name myResourceGroup
az appservice plan create --name $webappname --resource-group myResourceGroup --sku S1
az webapp create --name $webappname --resource-group myResourceGroup \
--plan $webappname
az webapp deployment slot create --name $webappname --resource-group myResourceGroup \
--slot staging
data.sql
insert into PURCHASE (CUSTOMERNAME, PRODUCT, PURCHASEDATE)
values('Bruce','Mountain Bike','2010-05-12 00:00:00.0');
insert into PURCHASE (CUSTOMERNAME, PRODUCT, PURCHASEDATE)
values('Paul','Football','2010-04-30 00:00:00.0');
insert into PURCHASE (CUSTOMERNAME, PRODUCT, PURCHASEDATE)
values('Rick','Kayak','2010-06-05 00:00:00.0');
schema.sql
using Microsoft.Azure.WebJobs;
using Microsoft.Azure.WebJobs.Host;
using Microsoft.ServiceBus.Messaging;
using Microsoft.Azure;
using Microsoft.WindowsAzure.Storage;
using Microsoft.WindowsAzure.Storage.Table;
using Newtonsoft.Json.Linq;
using System.IO;
using System.Runtime.Serialization.Json;
using System.Text;
// First we're going to import the classes we need
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat
import org.apache.avro.generic.GenericRecord
import parquet.hadoop.ParquetInputFormat
import parquet.avro.AvroReadSupport
import org.apache.spark.rdd.RDD
// Then we create RDD's for 2 of the files we imported from MySQL with Sqoop
// RDD's are Spark's data structures for working with distributed datasets
def rddFromParquetHdfsFile(path: String): RDD[GenericRecord] = {
@jwkidd3
jwkidd3 / hadoop
Last active December 5, 2017 18:47
#Scripts for class
-- Most popular product categories
select c.category_name, count(order_item_quantity) as count
from order_items oi
inner join products p on oi.order_item_product_id = p.product_id
inner join categories c on c.category_id = p.product_category_id
group by c.category_name
order by count desc
limit 10;
#in web.config
<!-- ClientId and ClientSecret refer to the web application registration with Azure Active Directory -->
<add key="ClientId" value="clientid" />
<add key="ClientSecret" value="clientsecret" />
<!-- SecretUri is the URI for the secret in Azure Key Vault -->
<add key="SecretUri" value="secreturi" />
using Microsoft.IdentityModel.Clients.ActiveDirectory;
using System.Threading.Tasks;
sensible-editor cloud-init-jenkins.txt
az vm create --resource-group table0jkjenkinsRG --name table0jkjenkins --image UbuntuLTS --admin-user azureuser --generate-ssh-keys --custom-data cloud-init-jenkins.txt
az vm open-port --resource-group table0jkjenkinsRG --name table0jkjenkins --port 8080 --priority 1001
az vm show --resource-group table0jkjenkinsRG --name table0jkjenkins -d --query [publicIps] --o tsv
az vm list-ip-addresses -g jenkinsrg -n jkjenkins
#cloud-config
package_upgrade: true
write_files:
- path: /etc/systemd/system/docker.service.d/docker.conf
content: |
[Service]
ExecStart=
ExecStart=/usr/bin/dockerd
- path: /etc/docker/daemon.json
content: |
from flask import Flask
from redis import Redis
import os
app = Flask(__name__)
redis = Redis(host='redis', port=6379)
@app.route('/')
def hello():
redis.incr('hits')
return 'Hello World! I have been seen %s times.' % redis.get('hits')
FROM ubuntu:16.04
MAINTAINER John Doe "[email protected]"
ENV REFRESHED_AT 2017-06-01
RUN apt-get update && apt-get -y install ruby ruby-dev build-essential redis-tools
RUN gem install --no-rdoc --no-ri sinatra json redis
RUN mkdir -p /opt/webapp
EXPOSE 4567
CMD [ "/opt/webapp/bin/webapp" ]