Skip to content

Instantly share code, notes, and snippets.

View RAMitchell's full-sized avatar

Rory Mitchell RAMitchell

  • Nvidia
  • Cologne, Germany
View GitHub Profile
import numpy as np
import xgboost as xgb
import time
print("Xgboost version: {}".format(xgb.__version__))
n = 10000000
query_groups = 100000
group_size = n / query_groups
m = 10
import cupy as cp
import GPUtil
import xgboost as xgb
import time
print("Xgboost version: {}".format(xgb.__version__))
n_train = 10000
n_test = 1000
iterations = 20
import numpy as np
import GPUtil
import xgboost as xgb
print("Xgboost version: {}".format(xgb.__version__))
n = 10000
m = 100
X = np.random.randn(n, m)
y = np.random.randn(n)
exp_models = []
import unittest
import numpy as np
from sklearn.metrics import log_loss
import xgboost as xgb
print("Xgboost version: {}".format(xgb.__version__))
np.random.seed(1994)
kRows = 1000
kCols = 64
kClasses = 4
import cupy
import torch
import GPUtil
import xgboost as xgb
from torch.utils.dlpack import to_dlpack
from torch.utils.dlpack import from_dlpack
mem_before = GPUtil.getGPUs()[0].memoryUsed
# Create a PyTorch tensor.
@RAMitchell
RAMitchell / device_dmatrix_memory.py
Created May 19, 2020 02:31
Demonstration of memory usage for XGBoost DeviceQuantileDMatrix
import xgboost as xgb
import cupy as cp
import GPUtil
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("seaborn")
dmatrix_type = [xgb.DMatrix, xgb.DeviceQuantileDMatrix]
test_sizes = [(300000, 1000), (30000, 10000)]
@RAMitchell
RAMitchell / avx.R
Last active November 15, 2018 12:02 — forked from khotilov/avx.R
AVX sigmoid test
library(Rcpp)
# set the compiler flags in ~/Documents/.R/Makevars or ~/Documents/.R/Makevars
# by adding the following line to it
# CXXFLAGS=-O3 -Wall -mtune=native -funroll-loops -mavx -mfma
sourceCpp("avx_test.cc")
curve(approximate_sigmoid(x) - 1/(1 + exp(-x)), -12, 12, n = 1000); grid()
# the exp4096 mostly underestimates the exp
curve(log(exp4096(x)) - x, -9, 9, n = 1000); grid()