Edgewater 200 AW, 200A, 200AW default cli access:
login: root
password: @#$%^&*!()
Also works for Edgemarc 4550.
WITH parcel_ar AS ( | |
SELECT cadastre.parcel_id, json_agg(json_build_object('ar_id', r.ar_id, 'year', r.year)) as ar_ids FROM cadastre JOIN role_p r on st_intersects(cadastre.wkb_geometry, r.geom) WHERE cadastre.parcel_id = '5618810' GROUP BY parcel_id | |
) | |
SELECT row_to_json(t) | |
FROM ( | |
SELECT | |
parcel.parcel_id, | |
parcel_ar.ar_ids, | |
aq_lim_arrondissements.nomarr as borough, | |
aq_lim_municipales.nommun as city, |
#!/usr/bin/env python | |
from ofxtools.Parser import OFXTree | |
from csv import DictWriter | |
from glob import glob | |
import sys | |
HEADER = ('date', 'amount', 'description', 'reference') | |
parser = OFXTree() |
#!/usr/bin/env python | |
import smtplib | |
from email.mime.multipart import MIMEMultipart | |
from email.mime.text import MIMEText | |
from email.mime.base import MIMEBase | |
from email import encoders | |
import sys | |
import glob | |
import os |
Edgewater 200 AW, 200A, 200AW default cli access:
login: root
password: @#$%^&*!()
Also works for Edgemarc 4550.
PERC H700 raid cli (megacli64) | |
Add disk to an existing virtual drive: | |
megacli -LDRecon -Start -r0 -Add -PhysDrv[32:5] -l<virtual drive group id> -a<adapter id> | |
psql postgres | |
CREATE ROLE new_user WITH PASSWORD 'big secure pass'; | |
psql new_db | |
GRANT CONNECT ON DATABASE new_db TO new_user; | |
GRANT USAGE ON SCHEMA public TO new_user; | |
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO new_user; | |
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO new_user; | |
GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA public TO new_user; | |
GRANT CONNECT ON DATABASE new_db TO new_user; |
# lsb_release -a
No LSB modules are available.
Distributor ID: Ubuntu
Description: Ubuntu 16.04.5 LTS
Release: 16.04
Codename: xenial
apt-get update
apt-get upgrade
import sys | |
import re | |
from pprint import pprint | |
import csv | |
from unidecode import unidecode | |
TR = re.compile(r'(?P<transaction_day>\d{2})\s+(?P<transaction_month>\d{2})\s+(?P<inscription_day>\d{2})\s+(?P<inscription_month>\d{2})\s+(?P<sequence>\d{3})\s+(?P<reference>\d{23})?(?P<payee>.*?)\s*(?P<amount>[\s0-9,]+)(?P<transaction_type>CR)?$') | |
FIELDS = ['Date','Amount','Payee','Description','Reference'] | |
def parse(year): |
from pyspark import SparkContext, SparkConf | |
from boto.s3.connection import S3Connection | |
def process(key): | |
return key.name | |
if __name__=='__main__': | |
bucket_name = 'test-bucket' | |
conn = S3Connection() | |
bucket = conn.get_bucket(bucket_name) |
On debian-etch-x86_64-5 | |
Problem: | |
$ apt-get upgrade | |
Reading package lists... Done | |
Building dependency tree... Done | |
You might want to run `apt-get -f install' to correct these. | |
The following packages have unmet dependencies: | |
Segmentation fault |