This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// dependency for TabLayout | |
compile 'com.android.support:design:24.1.1' | |
// xml | |
<android.support.design.widget.AppBarLayout | |
android:layout_width="match_parent" | |
android:layout_height="wrap_content" | |
android:theme="@style/ThemeOverlay.AppCompat.Dark.ActionBar"> | |
<include |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// create new directive with cli : | |
ng g d directive-name | |
// directives are used as attribute to existant html element ex : | |
<div direcrive-name> ... </div> | |
// example of a directive : | |
@Directive({ | |
selector: '[myDirectives]' |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
import matplotlib.pyplot as plt | |
import pandas as pd | |
from sklearn.preprocessing import Imputer | |
from sklearn.preprocessing import LabelEncoder, OneHotEncoder | |
from sklearn.linear_model import LinearRegression | |
# Importing the dataset | |
dataset = pd.read_csv('train.csv') | |
df = pd.DataFrame(dataset) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
from PIL import ImageGrab | |
import cv2 | |
def draw_detections(img, rects, thickness = 1): | |
for x, y, w, h in rects: | |
pad_w, pad_h = int(0.15*w), int(0.05*h) | |
cv2.rectangle(img, (x+pad_w, y+pad_h), (x+w-pad_w, y+h-pad_h), (0, 255, 0), thickness) | |
hog = cv2.HOGDescriptor() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// styles.scss | |
@import '~@angular/material/prebuilt-themes/indigo-pink.css'; | |
// HTML | |
<mat-table class="lessons-table mat-elevation-z8" [dataSource]="dataSource"> | |
<ng-container matColumnDef="id"> | |
<mat-header-cell *matHeaderCellDef>#</mat-header-cell> | |
<mat-cell *matCellDef="let customer">{{customer.id}}</mat-cell> |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Cucmumber datatable to spark dataframe | |
import scala.collection.convert.ImplicitConversions.`collection AsScalaIterable` | |
import io.cucumber.datatable.DataTable | |
import org.apache.spark.sql.DataFrame | |
def dataTableToDataframe(table: DataTable): DataFrame = { | |
import sparkSession.implicits._ | |
val columns: Seq[String] = table.cells().head.toSeq | |
val data = table.cells().drop(1).toSeq.map(r => r.toList) | |
data.toDF().select(columns.indices.map(i => col("value")(i).alias(columns(i))): _*) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// when you have a skewed data when joining this method will fix it | |
def saltedJoin(df: DataFrame, buildDf: DataFrame, joinExpression: Column, joinType: String, salt: Int): DataFrame = { | |
import org.apache.spark.sql.functions._ | |
val tmpDf = buildDf.withColumn("slt_range", array(Range(0, salt).toList.map(lit): _*)) | |
val tableDf = tmpDf.withColumn("slt_ratio_s", explode(tmpDf("slt_range"))).drop("slt_range") | |
val streamDf = df.withColumn("slt_ratio", monotonically_increasing_id % salt) | |
val saltedExpr = streamDf("slt_ratio") === tableDf("slt_ratio_s") && joinExpression | |
streamDf.join(tableDf, saltedExpr, joinType).drop("slt_ratio_s").drop("slt_ratio") |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// case FetchFailedException or MetadataFetchFailedException: how to avoid BroadcastNestedLoopJoin | |
- spark.executor.memoryOverhead=1g | |
- spark.kubernetes.memoryOverheadFactor=0.2 | |
// avoid skew spark >= 3.0 | |
spark.sql.adaptive.optimizeSkewedJoin.enabled |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Hadoop | |
Download winutils.exe and hadoop.dll: https://github.com/kontext-tech/winutils | |
add them inside older C:\hadoop\bin | |
add env varible hadoop.home.dir and HADOOP_HOME, with value = C:\hadoop | |
add %HADOOP_HOME%\bin to the path | |
add hadoop.dll to C:\Windows\system32 | |
Make sure your JVM is 64 bit. | |
// Spark | |
download spark from https://spark.apache.org/downloads.html |