Updated 4/11/2018
Here's my experience of installing the NVIDIA CUDA kit 9.0 on a fresh install of Ubuntu Desktop 16.04.4 LTS.
| import logging | |
| import multiprocessing | |
| import time | |
| import mplog | |
| FORMAT = '%(asctime)s - %(processName)s - %(levelname)s - %(message)s' | |
| logging.basicConfig(level=logging.DEBUG, format=FORMAT) | |
| existing_logger = logging.getLogger('x') |
| library(shiny) | |
| library(datasets) | |
| Logged = FALSE; | |
| PASSWORD <- data.frame(Brukernavn = "withr", Passord = "25d55ad283aa400af464c76d713c07ad") | |
| # Define server logic required to summarize and view the selected dataset | |
| shinyServer(function(input, output) { | |
| source("www/Login.R", local = TRUE) | |
| observe({ | |
| if (USER$Logged == TRUE) { |
| #!/usr/bin/env python3 | |
| # -*- coding: utf-8 -*- | |
| import sys | |
| import math | |
| import numpy as np | |
| sys.path.append('xgboost/wrapper/') | |
| import xgboost as xgb |
| ## install Catalyst proprietary | |
| sudo ntfsfix /dev/sda2 | |
| sudo cp /etc/X11/xorg.conf /etc/X11/xorg.conf.BAK | |
| sudo apt-get remove --purge fglrx* | |
| sudo apt-get install linux-headers-generic | |
| sudo apt-get install fglrx xvba-va-driver libva-glx1 libva-egl1 vainfo | |
| sudo amdconfig --initial | |
| ## install build essentials | |
| sudo apt-get install cmake |
| // ------------- | |
| // in gstCamera.cpp, gstCamera::buildLaunchStr(): | |
| ss << "v4l2src device=\"/dev/video1\" ! video/x-raw, width=(int)" << mWidth << ", height=(int)" << mHeight << ", format=(string)YUY2 ! appsink name=mysink"; | |
| // ------------- | |
| // in gstCamera.cpp, gstCamera::ConvertRGBA: | |
| if(CUDA_FAILED(cudaYUYVToRGBAf((uchar2*)input, (float4*)mRGBA, mWidth, mHeight))) | |
| return false; |
| #!/bin/bash | |
| # | |
| # EDIT: this script is outdated, please see https://forums.developer.nvidia.com/t/pytorch-for-jetson-nano-version-1-6-0-now-available | |
| # | |
| sudo apt-get install python-pip | |
| # upgrade pip | |
| pip install -U pip | |
| pip --version | |
| # pip 9.0.1 from /home/ubuntu/.local/lib/python2.7/site-packages (python 2.7) |
| import os | |
| import sys | |
| import numpy as np | |
| from data_util import * | |
| import datetime | |
| ply_filelist = 'scripts/modelnet40_ply_filelist_shuffled.txt' | |
| H5_BATCH_SIZE = 2000 |
| """ | |
| Use in PyTorch. | |
| """ | |
| def accuracy(output, target): | |
| """Computes the accuracy for multiple binary predictions""" | |
| pred = output >= 0.5 | |
| truth = target >= 0.5 | |
| acc = pred.eq(truth).sum() / target.numel() | |
| return acc |
Updated 4/11/2018
Here's my experience of installing the NVIDIA CUDA kit 9.0 on a fresh install of Ubuntu Desktop 16.04.4 LTS.