Skip to content

Instantly share code, notes, and snippets.

View saisumit's full-sized avatar

SUMIT VOHRA saisumit

View GitHub Profile
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
from __future__ import print_function
import wave
import numpy as np
import utils
import librosa
# from IPython import embed
import os
from sklearn import preprocessing
# prediction=clf.predict(test_features)
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.cross_validation import train_test_split
#include <bits/stdc++.h>
using namespace std ;
#define REP(i, a, b) for (int i = a; i <= b; i++)
#define FOR(i, n) for (int i = 0; i < n; i++)
#define foreach(it, ar) for ( typeof(ar.begin()) it = ar.begin(); it != ar.end(); it++ )
#define fill(ar, val) memset(ar, val, sizeof(ar))
#define PI 3.1415926535897932385
#define uint64 unsigned long long
#define Int long long
#define int64 long long
# import libraries
from __future__ import print_function
from __future__ import division
import numpy as np
import pandas as pd
import os
import re
import tensorflow as tf
%{
#include <stdio.h>
%}
op "+"|"-"|"*"|"/"
%%
[a-z] { yylval=*yytext; return id; }
{op} { return (int) yytext[0]; }
\n { return(0); }
. { return err; }
%%
# import libraries
from __future__ import print_function
from __future__ import division
import numpy as np
import pandas as pd
import os
import re
import tensorflow as tf
#include <bits/stdc++.h>
using namespace std;
#define REP(i, a, b) for (int i = a; i <= b; i++)
#define FOR(i, n) for (int i = 0; i < n; i++)
#define foreach(it, ar) for ( typeof(ar.begin()) it = ar.begin(); it != ar.end(); it++ )
#define fill(ar, val) memset(ar, val, sizeof(ar))
#define PI 3.1415926535897932385
#define uint64 unsigned long long
#define Int long long
w = [2,-3,-3] # assume some random weights and data
x = [-1, -2]
# forward pass
dot = w[0]*x[0] + w[1]*x[1] + w[2]
f = 1.0 / (1 + math.exp(-dot)) # sigmoid function
# backward pass through the neuron (backpropagation)
ddot = (1 - f) * f # gradient on dot variable, using the sigmoid gradient derivation
dx = [w[0] * ddot, w[1] * ddot] # backprop into x
const int MAXN = 3e5 + 50 ;
int A[ MAXN ] ;
int S[ MAXN ] ;
int T[ 4*MAXN ] ;
int N ;
int LB = -1 ;
int UB = INT_MAX;
int Case1( int idx )
{