git clone [email protected]:YOUR-USERNAME/YOUR-FORKED-REPO.git
cd into/cloned/fork-repo
git remote add upstream git://github.com/ORIGINAL-DEV-USERNAME/REPO-YOU-FORKED-FROM.git
git fetch upstream
git clone [email protected]:YOUR-USERNAME/YOUR-FORKED-REPO.git
cd into/cloned/fork-repo
git remote add upstream git://github.com/ORIGINAL-DEV-USERNAME/REPO-YOU-FORKED-FROM.git
git fetch upstream
1. Convert our ".jks" file to ".p12" (PKCS12 key store format): | |
keytool -importkeystore -srckeystore oldkeystore.jks -destkeystore newkeystore.p12 -deststoretype PKCS12 | |
1.1. List new keystore file contents: | |
keytool -deststoretype PKCS12 -keystore newkeystore.p12 -list | |
2. Extract pem (certificate) from ".p12" keysotre file: | |
# to generate your dhparam.pem file, run in the terminal | |
openssl dhparam -out /etc/nginx/ssl/dhparam.pem 2048 |
var | |
// Local ip address that we're trying to calculate | |
address | |
// Provides a few basic operating-system related utility functions (built-in) | |
,os = require('os') | |
// Network interfaces | |
,ifaces = os.networkInterfaces(); | |
// Iterate over interfaces ... |
import org.apache.spark.mllib.linalg.distributed.RowMatrix | |
import org.apache.spark.mllib.linalg._ | |
import org.apache.spark.{SparkConf, SparkContext} | |
// To use the latest sparse SVD implementation, please build your spark-assembly after this | |
// change: https://github.com/apache/spark/pull/1378 | |
// Input tsv with 3 fields: rowIndex(Long), columnIndex(Long), weight(Double), indices start with 0 | |
// Assume the number of rows is larger than the number of columns, and the number of columns is | |
// smaller than Int.MaxValue |
rabbitmqctl add_user test test | |
rabbitmqctl set_user_tags test administrator | |
rabbitmqctl set_permissions -p / test ".*" ".*" ".*" |
^([0-9]+)\.([0-9]+)\.([0-9]+)(?:-([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?(?:\+[0-9A-Za-z-]+)?$
""" | |
Copies all keys from the source Redis host to the destination Redis host. | |
Useful to migrate Redis instances where commands like SLAVEOF and MIGRATE are | |
restricted (e.g. on Amazon ElastiCache). | |
The script scans through the keyspace of the given database number and uses | |
a pipeline of DUMP and RESTORE commands to migrate the keys. | |
Requires Redis 2.8.0 or higher. |
import ec2 = require('@aws-cdk/aws-ec2'); | |
import ecs = require('@aws-cdk/aws-ecs'); | |
import elbv2 = require('@aws-cdk/aws-elasticloadbalancingv2'); | |
import cdk = require('@aws-cdk/core'); | |
class PublicFargateService extends cdk.Stack { | |
constructor(scope: cdk.App, id: string) { | |
super(scope, id); | |
// Create VPC and Fargate Cluster |
WSL2 uses Hyper-V for networking. The WSL2 network settings are ephemeral and configured on demand when any WSL2 instance is first started in a Windows session. The configuration is reset on each Windows restart and the IP addresses change each time. The Windows host creates a hidden switch named "WSL" and a network adapter named "WSL" (appears as "vEthernet (WSL)" in the "Network Connections" panel). The Ubuntu instance creates a corresponding network interface named "eth0".
Assigning static IP addresses to the network interfaces on the Windows host or the WSL2 Ubuntu instance enables support for the following scenarios: