Skip to content

Instantly share code, notes, and snippets.

View edgarriba's full-sized avatar
😎
hacking

Edgar Riba edgarriba

😎
hacking
View GitHub Profile
@edgarriba
edgarriba / theia_opencv
Created February 22, 2016 15:10
External features extraction
/* Reimplementation of `AddImagesToReconstructionBuilder`
https://github.com/sweeneychris/TheiaSfM/blob/master/applications/build_reconstruction.cc#L334
Using an external local features extractor. In this case AKAZE from OpenCV.
*/
void AddImagesToReconstructionBuilder(
ReconstructionBuilder* reconstruction_builder) {
std::vector<std::string> image_files;
CHECK(theia::GetFilepathsFromWildcard(FLAGS_images, &image_files));
// Create the feature matcher.
@edgarriba
edgarriba / relative_pose.cpp
Last active October 27, 2021 03:02
relative pose with opengv
// Returns the camera matrix given the focal and the image itself
// img The reference to the image
// f The focal length in pixels of the camera
cv::Mat getCameraMatrix(const cv::Mat& img, const float f) {
const int cx = img.cols / 2;
const int cy = img.rows / 2;
return (cv::Mat_<float>(3, 3) << f, 0, cx, 0, f, cy, 0, 0, 1);
}
// Convert a set of points to bearing
@startuml
package layers #DDDDDD {
enum Engine {
CUSTOM
NNPACK
AVX
AVX2
SSE
OPENCL
@startuml
enum DeviceType {
CPU
GPU
}
class Layer {
}
class Context {
@startuml
class Tensor {
+ Tensor(cnn_size_t batch, cnn_size_t width, cnn_size_t height, cnn_size_t depth)
+ shape(): int[]
+ operator [](cnn_size_t index) : float_t&
+ operator [](cnn_size_t index) const : const float_t&
+ at(cnn_size_t batch, cnn_size_t width, cnn_size_t height, cnn_size_t depth) : float_t&
+ at(cnn_size_t batch, cnn_size_t width, cnn_size_t height, cnn_size_t depth) const : const float_t&
+ ptr(cnn_size_t batch, cnn_size_t width, cnn_size_t height, cnn_size_t depth) : float_t&*
import torch
import numpy as np
import pickle
f = open('/home/eriba/software/pytorch/examples-edgarriba/triplet/nan_test.pkl', 'rb')
data = pickle.load(f)
a = torch.from_numpy(data['a']).cuda()
@edgarriba
edgarriba / triplet_loss_grad_check.py
Last active February 17, 2017 15:02
triplet_loss gradient check
import torch
from torch.autograd import Variable
from torch.autograd import Function
import numpy as np
anchor_str = 'p0\x0f?>yK\xbf\xcayY\xbf\x80l3?\x12\x13j?\xf3\xe3x?\x06\xbf+\xbf\x94\x10\t?D\xed\x8b=\xb9\xb9r>\'\xd9%?\xd1\x1fe?\'\x7f~?\xdc\xef\xa9\xbep\xe8\xbb>\xf4\xcc\x1b?\xd4\xda\xd5\xbe}\x12i\xbf\\L\x0e?\xe5:\xfe>{\xe1\xd3\xbe\x13\xbfD?-\xdb2\xbd2\xb2(\xbf\xfa<}\xbe-\x9fN?\xe1\xcfE?\xb6\x1cq\xbf\xb2C3?<\x0f%\xbf\xed\x80K\xbcE3i\xbe\xa2\xa55?E&\x96>\xe9\xb3f>N8}\xbf\xdf}c\xbfZa1>\xa8\x9c[\xbf\xc4x\x7f\xbf\x9e)\x83=\xcd%o>\\BC\xbf\xd7\xa0|?Mn\x8a\xbe%uw?\x80\xeeR?\x16\x19\xf2>\x85\x8f<>\x9aN\xf5>\x8d\xc3j\xbf\xc8E\x94\xbe\xa0/{?\xefq\xfc>\xaa\xf5S\xbfb\x1b}?J\x11U\xbf`\xadm?\x85\xa6\xc1=|\x0bv;\xfc\xbbp\xbf:p?\xbfV\x93\xc6=T\xfa\xb1>T\xe0z?j\xe2,?`\xe7x\xbf(\x95\x06\xbf\x00\x08\x7f?\x83\xcaJ\xbf\xd6$\x86\xbe,\x910?/\x08\x13\xbe\xa2J/\xbf\xfb\x90\xb3\xbdZ\n\xf2>UF\x88>))\xfc>J\x95;?\x91\x1b<?\x15\xf4\xa3\xbeV\x91x?\x01\xa8\\\xbf\xf2=\xa6\xbeG\xa2w\xbf\xf8\x13|?\xd8\xee5?\x8e\xbe|?\xc5\xae)>J\xe7c?>\xd2^
@edgarriba
edgarriba / pytorch_train_template.py
Last active October 9, 2019 02:44
PyTorch training template
/*
* Copyright (c) 2019 Edgar Riba.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 3.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
@edgarriba
edgarriba / pytorch_lightning_optuna.py
Created June 10, 2021 10:44
lightning_optuna_multigpu example
# https://github.com/optuna/optuna-examples/blob/main/pytorch/pytorch_lightning_simple.py
"""
Optuna example that optimizes multi-layer perceptrons using PyTorch Lightning.
In this example, we optimize the validation accuracy of hand-written digit recognition using
PyTorch Lightning, and FashionMNIST. We optimize the neural network architecture. As it is too time
consuming to use the whole FashionMNIST dataset, we here use a small subset of it.
You can run this example as follows, pruning can be turned on and off with the `--pruning`
argument.
$ python pytorch_lightning_simple.py [--pruning]
@edgarriba
edgarriba / hosq.rs
Created October 24, 2024 18:22
data pipelines in rust with zenoh
use std::{
sync::{Arc, Mutex},
time::Duration,
};
use zenoh::Wait;
#[derive(thiserror::Error, Debug)]
pub enum NodeError {
#[error("Node error: {0}")]