This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
require 'torch'; | |
require 'math' | |
require 'io' | |
require 'cutorch'; | |
require 'cunn'; | |
opt = 'DOUBLE' -- FLOAT / DOUBLE / CUDA | |
----------------------------------------------------------- |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#include "stdio.h" | |
//enable error check | |
#define CUDA_ERROR_CHECK | |
//check the synchronous function call errorcode 'err' if it is a cudaSuccess | |
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) | |
//check if any error happened during asynchronous execution of Cuda kernel __global__ function | |
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ ) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#define MALLOC_LIMIT 1024*1024*1024 //1 GB | |
//eliminate calling cudaDeviceSetLimit multiple times | |
bool IsMallocSet = false; | |
/** | |
* API to set the malloc limit of GPU | |
*/ | |
static void setMallocLimit() { |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/** | |
* API to report the memory usage of the GPU | |
*/ | |
static void reportMemStatus() { | |
// show memory usage of GPU | |
size_t free_byte; | |
size_t total_byte; | |
size_t malloc_byte; |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#include <stdio.h> | |
/** | |
* A dummy cuda asynchronous function | |
*/ | |
__global__ void fillContents(int N, int* output) | |
{ | |
int correctIndex = threadIdx.x * N; | |
for(int i = correctIndex; i < N; i++) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
if(opt.traintype == 'finetuning' and epoch == 1) then | |
-- find the number of elements for which the learning rates to be reduced | |
totalParams = 0; | |
isAfterCrossNeighbor = 0; | |
dontFreeze = 0; | |
for index, node in ipairs(model.modules) do | |
--currParams = node:getParameters() | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# @Author: arul | |
# @Date: 2017-11-13T21:45:01+05:30 | |
# @Last modified by: arul | |
# @Last modified time: 2017-11-13T22:56:47+05:30 | |
# Your init script | |
# | |
# Atom will evaluate this file each time a new window is opened. It is run |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
running install | |
running build_deps | |
+ USE_CUDA=0 | |
+ USE_ROCM=0 | |
+ USE_NNPACK=0 | |
+ USE_MKLDNN=0 | |
+ USE_GLOO_IBVERBS=0 | |
+ USE_DISTRIBUTED_MW=0 | |
+ FULL_CAFFE2=0 | |
+ [[ 10 -gt 0 ]] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
import torch.nn as nn | |
import torch.nn.functional as F | |
class model(nn.Module): | |
def __init__(self): | |
super(model, self).__init__() | |
self.conv1 = nn.Conv1d(9, 18, kernel_size=3) #9 input channels, 18 output channels | |
self.conv2 = nn.Conv1d(18, 36, kernel_size=3) #18 input channels from previous Conv. layer, 36 out | |
self.conv2_drop = nn.Dropout2d() #dropout |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch, torch.nn as nn, torch.nn.functional as F | |
import numpy as np | |
import torch.optim as optim | |
# tied autoencoder using off the shelf nn modules | |
class TiedAutoEncoderOffTheShelf(nn.Module): | |
def __init__(self, inp, out, weight): | |
super().__init__() | |
self.encoder = nn.Linear(inp, out, bias=False) | |
self.decoder = nn.Linear(out, inp, bias=False) |
OlderNewer