Skip to content

Instantly share code, notes, and snippets.

#include <bits/stdc++.h>
#include<ext/pb_ds/assoc_container.hpp>
typedef long long ll;
using namespace std;
using namespace __gnu_pbds;
#define lcnt (cnt<<1)
#define rcnt (cnt<<1|1)
#define vt vector
#define pb push_back
#define all(c) (c).begin(), (c).end()
@bougui505
bougui505 / icp.py
Created October 1, 2020 07:53
Iterative Closest Point (ICP) implementation with least squares fit (lstsq) in Pytorch
#!/usr/bin/env python
# -*- coding: UTF8 -*-
# Author: Guillaume Bouvier -- [email protected]
# https://research.pasteur.fr/en/member/guillaume-bouvier/
# 2020-10-01 09:51:45 (UTC+0200)
import sys
import torch
#define debug(x) cout << '>' << #x << ':' << (x) << endl;
#define debug2(y,z) cout << '>' << #y << ':' << (y) << " " << #z << ":" <<(z) << endl;
@ShichengChen
ShichengChen / main.cpp
Last active August 31, 2019 08:36
ac
#include <iostream>
#include <algorithm>
#include <set>
#include <vector>
#include <cstring>
#include <queue>
#include <bits/stdc++.h>
#include <cstdio>
#include <stdio.h>
using namespace std;
@kittinan
kittinan / client.py
Last active November 15, 2024 15:35
Python OpenCV webcam send image frame over socket
import cv2
import io
import socket
import struct
import time
import pickle
import zlib
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(('192.168.1.124', 8485))
@odashi
odashi / cudnn_convolution_forward.cu
Created January 8, 2018 15:40
Example usage of cuDNN convolution forward functions.
#include <iomanip>
#include <iostream>
#include <cstdlib>
#include <vector>
#include <cuda.h>
#include <cudnn.h>
#define CUDA_CALL(f) { \
cudaError_t err = (f); \
@karpathy
karpathy / pg-pong.py
Created May 30, 2016 22:50
Training a Neural Network ATARI Pong agent with Policy Gradients from raw pixels
""" Trains an agent with (stochastic) Policy Gradients on Pong. Uses OpenAI Gym. """
import numpy as np
import cPickle as pickle
import gym
# hyperparameters
H = 200 # number of hidden layer neurons
batch_size = 10 # every how many episodes to do a param update?
learning_rate = 1e-4
gamma = 0.99 # discount factor for reward