Skip to content

Instantly share code, notes, and snippets.

@StevenMMortimer
Last active March 8, 2024 22:06
Show Gist options
  • Save StevenMMortimer/1b4b626d3d91240a77f969ae04b37114 to your computer and use it in GitHub Desktop.
Save StevenMMortimer/1b4b626d3d91240a77f969ae04b37114 to your computer and use it in GitHub Desktop.
Most Starred R Packages
# load packages & custom functions ---------------------------------------------
today_date <- Sys.Date()
from_date <- as.Date("2015-06-01")
to_date <- as.Date("2020-05-31")
library(tidyverse)
library(httr)
library(cranlogs)
library(ggrepel)
library(scales)
library(lubridate)
library(knitr)
library(stringr)
date_without_zeros <- function(x){
gsub("0(\\d)", "\\1", format(x, "%m/%d/%y"))
}
gh_from_url <- function(x){
x <- gsub("http://", "https://", tolower(x))
x <- gsub("www\\.github\\.com", "github.com", x)
x <- gsub("^github.com", "https://github.com", x)
x <- gsub("/issues", "", x)
x <- gsub("\\.git", "", x)
x <- gsub("For source code, development versions and issue tracker see", "", x, ignore.case=TRUE)
x <- trimws(x)
x <- gsub("development versions and issue tracker see ", "", x, ignore.case=TRUE)
x <- trimws(x)
x <- gsub("^<(.*)>$", "\\1", x)
if(grepl(',', x)){
x <- strsplit(x, ",")[[1]]
x <- trimws(x[min(which(grepl('http://github.com|https://github.com|http://www.github.com', x)))])
}
if(grepl(' ', x)){
x <- strsplit(x, " ")[[1]]
x <- trimws(x[min(which(grepl('http://github.com|https://github.com|http://www.github.com', x)))])
}
x <- gsub("^(.*)/(.*)#\\([a-zA-z]+\\)\\b", "\\1/\\2", x)
x <- gsub("^(.*)/(.*)[[:space:]]+\\([a-zA-z]+\\)\\b", "\\1/\\2", x)
x <- gsub("^(.*) http(.*)$", "http\\2", x)
x <- trimws(x)
x <- gsub("/$", "", x)
x <- trimws(x)
return(x)
}
aut_maintainer_from_details <- function(x){
x <- gsub("'|\"", "", x)
if(grepl(',', x)){
x <- strsplit(x, "\\],")[[1]]
aut_cre_ind <- grepl(pattern='\\[aut, cre|\\[cre, aut|\\[cre', x, ignore.case=TRUE)
if(any(aut_cre_ind)){
x <- x[min(which(aut_cre_ind))]
x <- gsub("\\[aut, cre|\\[cre, aut|\\[cre", "", x)
}
x <- strsplit(x, ",")[[1]][1]
x <- trimws(gsub("\\]", "", x))
x <- trimws(gsub(" \\[aut", "", x))
}
x <- trimws(gsub(" \\(.*\\)$", "", x))
x <- trimws(gsub(" <.*>$", "", x))
return(x)
}
gh_star_count <- function(url){
Sys.sleep(0.5)
stars <- tryCatch({
this_url <- gsub("https://github.com/", "https://api.github.com/repos/", url)
req <- GET(this_url, gtoken)
stop_for_status(req)
cont <- content(req)
cont$stargazers_count
}, error = function(e){
return(NA_integer_)
})
return(stars)
}
gh_last_commit_date <- function(url){
last_commit <- tryCatch({
this_url <- gsub("https://github.com/", "https://api.github.com/repos/", url)
req <- GET(paste0(this_url, "/commits?page=1&per_page=1"), gtoken)
stop_for_status(req)
cont <- content(req)
cont[[1]]$commit$committer$date
}, error = function(e){
return(NA_character_)
})
return(last_commit)
}
# authenticate to github -------------------------------------------------------
# use Hadley's key and secret
myapp <- oauth_app("github",
key = "56b637a5baffac62cad9",
secret = "8e107541ae1791259e9987d544ca568633da2ebf")
github_token <- oauth2.0_token(oauth_endpoints("github"), myapp)
gtoken <- config(token = github_token)
# pull list of packages --------------------------------------------------------
# get list of currently available packages on CRAN
pkgs <- tools::CRAN_package_db()
# remove duplicate MD5sum column since tibbles can't handle duplicate column names
pkgs <- pkgs[,unique(names(pkgs))]
# filter out lines any duplicates
pkgs <- pkgs %>%
rename(Name = Package) %>%
distinct(Name, .keep_all = TRUE)
# get details for each package -------------------------------------------------
all_pkg_details <- NULL
# old fashioned looping!
# WARNING: This takes awhile to complete
for(i in 1:nrow(pkgs)){
if(i %% 5 == 0){
Sys.sleep(1)
}
if(i %% 100 == 0){
message(sprintf("Processing package #%s out of %s", i, nrow(pkgs)))
}
this_url <- pkgs[i,]$URL
on_github <- FALSE
this_github_url <- NA_character_
gh_stars <- NA_integer_
gh_last_commit <- NA_character_
if(!is.null(this_url)){
on_github <- grepl('http://github.com|https://github.com|http://www.github.com', this_url)
if(on_github){
this_github_url <- gh_from_url(this_url)
gh_stars <- gh_star_count(this_github_url)
gh_last_commit <- gh_last_commit_date(this_github_url)
} else {
# check the BugReports URL as a backup (e.g. shiny package references GitHub this way)
issues_on_github <- grepl('http://github.com|https://github.com|http://www.github.com', pkgs[i,]$BugReports)
if(length(issues_on_github) == 0 || !issues_on_github){
this_github_url <- NA_character_
} else {
this_github_url <- gh_from_url(pkgs[i,]$BugReports)
gh_stars <- gh_star_count(this_github_url)
gh_last_commit <- gh_last_commit_date(this_github_url)
on_github <- TRUE
}
}
} else {
this_url <- NA_character_
}
downloads <- cran_downloads(pkgs[i,]$Name, from=from_date, to=to_date)
all_pkg_details <- rbind(all_pkg_details,
tibble(name = pkgs[i,]$Name,
description = pkgs[i,]$Description,
published = pkgs[i,]$Published,
author = aut_maintainer_from_details(pkgs[i,]$Author),
url = this_url,
github_ind = on_github,
github_url = this_github_url,
downloads = sum(downloads$count),
stars = gh_stars,
last_commit = gh_last_commit))
}
# save dataset for Twitter bot -------------------------------------------------
# remove observations where the GitHub URL refers to a repository that
# is not specific to R and therefore might have an inflated star count
all_pkg_details_clean <- all_pkg_details %>%
filter(!(name %in% c('xgboost', 'h2o', 'feather', 'prophet', 'mlflow', 'xtensor', 'arrow', 'interpret', 'mlr'))) %>%
filter(as_datetime(last_commit) >= today() - years(1)) %>% # MUST BE RECENTLY BEING WORKED ON IN LAST YEAR!!!
mutate(downloads_per_star = downloads / stars,
downloads_per_star = ifelse(!is.finite(downloads_per_star), NA_real_, downloads_per_star))
write_csv(all_pkg_details_clean, "r-package-star-download-data.csv")
# basic summary stats ----------------------------------------------------------
# proportion of all packages listing github
sum(all_pkg_details$github_ind)
mean(all_pkg_details$github_ind)
# proportion of packages with stars
mean(!is.na(all_pkg_details$stars))
# typical number of stars per package
mean(all_pkg_details_clean$stars, na.rm=TRUE)
median(all_pkg_details_clean$stars, na.rm=TRUE)
max(all_pkg_details_clean$stars, na.rm=TRUE)
# typical number of downloads per package
mean(all_pkg_details_clean$downloads, na.rm=TRUE)
median(all_pkg_details_clean$downloads, na.rm=TRUE)
# percent of packages over 10 stars
mean(all_pkg_details_clean$stars > 10, na.rm=TRUE)
mean(all_pkg_details_clean$downloads_per_star, na.rm=TRUE)
median(all_pkg_details_clean$downloads_per_star, na.rm=TRUE)
# stars histogram --------------------------------------------------------------
ggplot(data=all_pkg_details_clean, mapping=aes(stars)) +
geom_histogram(aes(fill=..count..), bins=60) +
scale_x_continuous(trans = "log1p", breaks=c(0,1,2,3,10,100,1000,3000)) +
labs(x = "Stars",
y = "Count",
fill = "Count",
caption = sprintf("Sources: api.github.com as of %s",
date_without_zeros(today_date))) +
ggtitle("Distribution of GitHub Stars on R Packages") +
theme_bw() +
theme(panel.grid.minor = element_blank(),
plot.caption=element_text(hjust = 0))
# stars to downloads scatterplot -----------------------------------------------
plot_dat <- all_pkg_details_clean
idx_label <- which(with(plot_dat, downloads > 10000000 | stars > 1000))
plot_dat$name2 <- plot_dat$name
plot_dat$name <- ""
plot_dat$name[idx_label] <- plot_dat$name2[idx_label]
ggplot(data=plot_dat, aes(stars, downloads, label = name)) +
geom_point(color = ifelse(plot_dat$name == "", "grey50", "red")) +
geom_text_repel(box.padding = .5) +
scale_y_continuous(labels = comma) +
scale_x_continuous(labels = comma) +
labs(x = "GitHub Stars",
y = "CRAN Downloads",
caption = sprintf("Sources:\napi.github.com as of %s\ncranlogs as of %s - %s",
date_without_zeros(today_date),
date_without_zeros(from_date),
date_without_zeros(to_date))) +
ggtitle("Relationship Between CRAN Downloads and GitHub Stars") +
theme_bw() +
theme(plot.caption=element_text(hjust = 0))
# author stats -----------------------------------------------------------------
# summary by author
authors_detail <- all_pkg_details_clean %>%
group_by(author) %>%
summarize(downloads = sum(downloads, na.rm=TRUE),
stars = sum(stars, na.rm=TRUE)) %>%
mutate(downloads_per_star = downloads / stars,
downloads_per_star = ifelse(!is.finite(downloads_per_star), NA_real_, downloads_per_star)) %>%
arrange(desc(downloads))
# popular authors
pop_authors <- tibble(author = c('Hadley Wickham',
'Dirk Eddelbuettel',
'Yihui Xie',
'Winston Chang',
'Jennifer Bryan',
'JJ Allaire',
'Jeroen Ooms',
'Scott Chamberlain',
'Jim Hester',
'Kirill Müller'),
notable_packages = c('ggplot2, dplyr, httr',
'Rcpp, BH',
'knitr, rmarkdown, bookdown',
'R6, shiny',
'readxl, gapminder, googlesheets',
'rstudioapi, reticulate, tensorflow',
'jsonlite, curl, openssl',
'geojsonio, taxize',
'devtools, memoise, readr',
'tibble, DBI')
)
author_stats <- pop_authors %>%
inner_join(., authors_detail, by='author') %>%
select(author, notable_packages, downloads, stars, downloads_per_star) %>%
mutate(downloads_per_star = round(downloads_per_star, 1)) %>%
rename_all(. %>% gsub("_", " ", .) %>% str_to_title)
# single author
#all_pkg_details_clean %>% filter(author == 'Dirk Eddelbuettel') %>% arrange(desc(downloads))
# top 10 lists -----------------------------------------------------------------
# Top 10 Most Starred Packages
top_starred <- all_pkg_details_clean %>%
select(name, author, downloads, stars, downloads_per_star) %>%
arrange(desc(stars)) %>%
slice(1:10) %>%
mutate(downloads_per_star = round(downloads_per_star, 1)) %>%
rename_all(. %>% gsub("_", " ", .) %>% str_to_title)
# Top 10 Most Downloaded Packages with stars
top_downloaded <- all_pkg_details_clean %>%
filter(!is.na(stars)) %>%
select(name, author, downloads, stars, downloads_per_star) %>%
arrange(desc(downloads)) %>%
slice(1:10) %>%
mutate(downloads_per_star = round(downloads_per_star, 1)) %>%
rename_all(. %>% gsub("_", " ", .) %>% str_to_title)
# Bottom 10 Packages by Downloads per Star (frequently starred)
frequently_starred <- all_pkg_details_clean %>%
filter(downloads > 100) %>%
select(name, author, downloads, stars, downloads_per_star) %>%
arrange(downloads_per_star) %>%
slice(1:10) %>%
mutate(downloads_per_star = round(downloads_per_star, 2)) %>%
rename_all(. %>% gsub("_", " ", .) %>% str_to_title)
# Top 10 Packages by Downloads per Star (infrequently starred)
infrequently_starred <- all_pkg_details_clean %>%
select(name, author, downloads, stars, downloads_per_star) %>%
arrange(desc(downloads_per_star)) %>%
slice(1:10) %>%
rename_all(. %>% gsub("_", " ", .) %>% str_to_title)
We can't make this file beautiful and searchable because it's too large.
name,description,published,author,url,github_ind,github_url,downloads,stars,last_commit,downloads_per_star
abbyyR,"Get text from images of text using Abbyy Cloud Optical Character
Recognition (OCR) API. Easily OCR images, barcodes, forms, documents with
machine readable zones, e.g. passports. Get the results in a variety of formats
including plain text and XML. To learn more about the Abbyy OCR API, see
<http://ocrsdk.com/>.",2019-06-25,Gaurav Sood,http://github.com/soodoku/abbyyR,TRUE,https://github.com/soodoku/abbyyr,38425,38,2019-06-30T01:53:13Z,1011.1842105263158
ABCoptim,"An implementation of Karaboga (2005) Artificial Bee Colony
Optimization algorithm <http://mf.erciyes.edu.tr/abc/pub/tr06_2005.pdf>.
This (working) version is a Work-in-progress, which is
why it has been implemented using pure R code. This was developed upon the basic
version programmed in C and distributed at the algorithm's official website.",2017-11-06,George Vega Yon,"http://github.com/gvegayon/ABCoptim, http://mf.erciyes.edu.tr/abc/",TRUE,https://github.com/gvegayon/abcoptim,32889,22,2020-05-31T18:09:24Z,1494.9545454545455
abdiv,"A collection of measures for measuring ecological diversity.
Ecological diversity comes in two flavors: alpha diversity measures the
diversity within a single site or sample, and beta diversity measures the
diversity across two sites or samples. This package overlaps considerably
with other R packages such as 'vegan', 'gUniFrac', 'betapart', and 'fossil'.
We also include a wide range of functions that are implemented in software
outside the R ecosystem, such as 'scipy', 'Mothur', and 'scikit-bio'. The
implementations here are designed to be basic and clear to the reader.",2020-01-20,Kyle Bittinger,https://github.com/kylebittinger/abdiv,TRUE,https://github.com/kylebittinger/abdiv,2831,0,2020-01-26T20:25:01Z,NA
abjutils,"The Brazilian Jurimetrics Association (ABJ in
Portuguese, see <http://www.abjur.org.br/en/> for more information) is
a non-profit organization which aims to investigate and promote the
use of statistics and probability in the study of Law and its
institutions. This package implements general purpose tools used by
ABJ, such as functions for sampling and basic manipulation of
Brazilian lawsuits identification number. It also implements functions
for text cleaning, such as accentuation removal.",2019-02-07,Caio Lente,https://github.com/abjur/abjutils,TRUE,https://github.com/abjur/abjutils,40051,19,2019-09-28T20:34:27Z,2107.9473684210525
ace2fastq,"The ACE file format is used in genomics to store contigs from sequencing machines.
This tools converts it into FASTQ format. Both formats contain the
sequence characters and their
corresponding quality information. Unlike the FASTQ file, the ace file stores the
quality values numerically.
The conversion algorithm uses the standard Sanger formula. The package facilitates insertion
into pipelines, and content inspection.",2019-06-20,Reinhard Simon,https://github.com/c5sire/ace2fastq,TRUE,https://github.com/c5sire/ace2fastq,5850,0,2020-02-24T10:48:08Z,NA
ActFrag,"Recent studies haven shown that, on top of total daily active/sedentary volumes, the time
accumulation strategies provide more sensitive information. This package provides functions to extract
commonly used fragmentation metrics to quantify such time accumulation strategies based on minute level
actigraphy-measured activity counts data. ",2020-02-11,Junrui Di,https://github.com/junruidi/ActFrag,TRUE,https://github.com/junruidi/actfrag,5735,0,2020-02-28T02:22:27Z,NA
activityCounts,"ActiLife software generates activity counts from data collected by Actigraph accelerometers <https://s3.amazonaws.com/actigraphcorp.com/wp-content/uploads/2017/11/26205758/ActiGraph-White-Paper_What-is-a-Count_.pdf>.
Actigraph is one of the most common research-grade accelerometers. There is considerable research
validating and developing algorithms for human activity using ActiLife counts. Unfortunately,
ActiLife counts are proprietary and difficult to implement if researchers use different accelerometer brands.
The code creates ActiLife counts from raw acceleration data for different accelerometer brands and it is developed
based on the study done by Brond and others (2017) <doi:10.1249/MSS.0000000000001344>.",2019-07-31,SeyedJavad KhataeiPour,"https://github.com/walkabillylab/activityCounts,
https://github.com/jbrond/ActigraphCounts",TRUE,https://github.com/walkabillylab/activitycounts,5069,2,2019-11-20T17:12:22Z,2534.5
adapr,"Tracks reading and writing within R scripts that are organized into
a directed acyclic graph. Contains an interactive shiny application adaprApp().
Uses git2r package, Git and file hashes to track version histories of input
and output. See package vignette for how to get started. V1.02 adds parallel
execution of project scripts and function map in vignette. Makes project
specification argument last in order. V2.0 adds project specific libraries, packrat option, and adaprSheet().",2017-11-30,Jon Gelfond,NA,TRUE,https://github.com/gelfondjal/adapr,19374,13,2020-01-28T22:56:18Z,1490.3076923076924
AdaptGauss,"Multimodal distributions can be modelled as a mixture of components. The model is derived using the Pareto Density Estimation (PDE) for an estimation of the pdf. PDE has been designed in particular to identify groups/classes in a dataset. Precise limits for the classes can be calculated using the theorem of Bayes. Verification of the model is possible by QQ plot, Chi-squared test and Kolmogorov-Smirnov test. The package is based on the publication of Ultsch, A., Thrun, M.C., Hansen-Goos, O., Lotsch, J. (2015) <DOI:10.3390/ijms161025897>.",2020-02-03,Michael Thrun,https://www.uni-marburg.de/fb12/datenbionik/software-en,TRUE,https://github.com/mthrun/adaptgauss,29418,0,2020-02-03T17:16:37Z,NA
adaptMT,"Implementation of adaptive p-value thresholding (AdaPT), including both a framework that allows the user to specify any
algorithm to learn local false discovery rate and a pool of convenient functions that implement specific
algorithms. See Lei, Lihua and Fithian, William (2016) <arXiv:1609.06035>.",2018-07-31,Lihua Lei,"https://arxiv.org/abs/1609.06035,
https://github.com/lihualei71/adaptMT",TRUE,https://github.com/lihualei71/adaptmt,9397,6,2020-02-29T23:25:30Z,1566.1666666666667
add2ggplot,Create 'ggplot2' themes and color palettes.,2020-02-07,Jiaxiang Li,https://github.com/JiaxiangBU/add2ggplot,TRUE,https://github.com/jiaxiangbu/add2ggplot,2492,2,2020-02-08T12:07:44Z,1246
addinslist,"Browse through a continuously updated list of existing RStudio
addins and install/uninstall their corresponding packages.",2019-08-30,Dean Attali,https://github.com/daattali/addinslist,TRUE,https://github.com/daattali/addinslist,42488,567,2020-05-11T17:07:38Z,74.93474426807761
addinsOutline,"'RStudio' allows to show and navigate for the outline of a
R Markdown file, but not for R Markdown projects with multiple
files. For this reason, I have developed several 'RStudio' addins capable
of show project outline. Each addin is specialized in showing projects
of different types: R Markdown project, 'bookdown' package project
and 'LaTeX' project. There is a configuration file that allows you
to customize additional searches.",2019-12-02,Pedro L. Luque-Calvo,https://github.com/calote/addinsOutline,TRUE,https://github.com/calote/addinsoutline,3302,0,2019-11-29T09:25:38Z,NA
ade4,"Tools for multivariate data analysis. Several methods are provided for the analysis (i.e., ordination) of one-table (e.g., principal component analysis, correspondence analysis), two-table (e.g., coinertia analysis, redundancy analysis), three-table (e.g., RLQ analysis) and K-table (e.g., STATIS, multiple coinertia analysis). The philosophy of the package is described in Dray and Dufour (2007) <doi:10.18637/jss.v022.i04>.",2020-02-13,Stéphane Dray,http://pbil.univ-lyon1.fr/ADE-4,TRUE,https://github.com/sdray/ade4,1469321,13,2020-04-23T15:23:48Z,113024.69230769231
ade4TkGUI,A Tcl/Tk GUI for some basic functions in the 'ade4' package.,2019-09-17,Jean Thioulouse,"http://pbil.univ-lyon1.fr/ade4TkGUI, Mailing list:
http://listes.univ-lyon1.fr/wws/info/adelist",TRUE,https://github.com/aursiber/ade4tkgui,78145,0,2019-09-13T09:21:07Z,NA
adegenet,"Toolset for the exploration of genetic and genomic
data. Adegenet provides formal (S4) classes for storing and handling
various genetic data, including genetic markers with varying ploidy
and hierarchical population structure ('genind' class), alleles counts
by populations ('genpop'), and genome-wide SNP data ('genlight'). It
also implements original multivariate methods (DAPC, sPCA), graphics,
statistical tests, simulation tools, distance and similarity measures,
and several spatial methods. A range of both empirical and simulated
datasets is also provided to illustrate various methods.",2020-05-10,Thibaut Jombart,https://github.com/thibautjombart/adegenet,TRUE,https://github.com/thibautjombart/adegenet,300883,101,2020-05-20T00:21:46Z,2979.039603960396
adegraphics,Graphical functionalities for the representation of multivariate data. It is a complete re-implementation of the functions available in the 'ade4' package.,2018-12-18,Stéphane Dray,"http://pbil.univ-lyon1.fr/ADE-4, Mailing list:
http://listes.univ-lyon1.fr/wws/info/adelist",TRUE,https://github.com/sdray/adegraphics,132944,6,2020-06-03T14:04:53Z,22157.333333333332
adept,"Designed for optimal use in performing fast,
accurate walking strides segmentation from high-density
data collected from a wearable accelerometer worn
during continuous walking activity.",2019-06-18,Marta Karas,https://github.com/martakarass/adept,TRUE,https://github.com/martakarass/adept,5402,3,2019-06-18T06:17:42Z,1800.6666666666667
AdhereR,"Computation of adherence to medications from Electronic Health care
Data and visualization of individual medication histories and adherence
patterns. The package implements a set of S3 classes and
functions consistent with current adherence guidelines and definitions.
It allows the computation of different measures of
adherence (as defined in the literature, but also several original ones),
their publication-quality plotting,
the estimation of event duration and time to initiation,
the interactive exploration of patient medication history and
the real-time estimation of adherence given various parameter settings.
It scales from very small datasets stored in flat CSV files to very large
databases and from single-thread processing on mid-range consumer
laptops to parallel processing on large heterogeneous computing clusters.
It exposes a standardized interface allowing it to be used from other
programming languages and platforms, such as Python.",2020-05-12,Dan Dediu,https://github.com/ddediu/AdhereR,TRUE,https://github.com/ddediu/adherer,19940,14,2019-06-14T13:16:52Z,1424.2857142857142
AdhereRViz,"Interactive graphical user interface (GUI) for the package
'AdhereR', allowing the user to access different data sources, to explore
the patterns of medication use therein, and the computation of various
measures of adherence. It is implemented using Shiny and HTML/CSS/JavaScript. ",2020-05-16,Dan Dediu,https://github.com/ddediu/AdhereR,TRUE,https://github.com/ddediu/adherer,389,14,2019-06-14T13:16:52Z,27.785714285714285
adjclust,"Implements a constrained version of hierarchical agglomerative
clustering, in which each observation is associated to a position, and
only adjacent clusters can be merged. Typical application fields in
bioinformatics include Genome-Wide Association Studies or Hi-C data
analysis, where the similarity between items is a decreasing function of
their genomic distance. Taking advantage of this feature, the implemented
algorithm is time and memory efficient. This algorithm is described in
Chapter 4 of Alia Dehman (2015)
<https://hal.archives-ouvertes.fr/tel-01288568v1>.",2019-12-10,Pierre Neuvial,https://github.com/pneuvial/adjclust,TRUE,https://github.com/pneuvial/adjclust,13790,13,2020-06-08T14:59:19Z,1060.7692307692307
adjustedcranlogs,Adjusts output of 'cranlogs' package to account for 'CRAN'-wide daily automated downloads and re-downloads caused by package updates.,2017-11-23,Tyler Morgan-Wall,https://github.com/tylermorganwall/adjustedcranlogs,TRUE,https://github.com/tylermorganwall/adjustedcranlogs,11927,24,2020-02-24T00:56:11Z,496.9583333333333
AdMit,"Provides functions to perform the fitting of an adaptive mixture
of Student-t distributions to a target density through its kernel function as described in
Ardia et al. (2009) <doi:10.18637/jss.v029.i03>. The
mixture approximation can then be used as the importance density in importance
sampling or as the candidate density in the Metropolis-Hastings algorithm to
obtain quantities of interest for the target density itself. ",2020-04-20,David Ardia,https://github.com/ArdiaD/AdMit,TRUE,https://github.com/ardiad/admit,41881,2,2020-04-19T20:50:45Z,20940.5
adoptr,"Optimize one or two-arm, two-stage designs for clinical trials with
respect to several pre-implemented objective criteria or implement custom
objectives.
Optimization under uncertainty and conditional (given stage-one outcome)
constraints are supported.
See Pilz M, Kunzmann K, Herrmann C, Rauch G, Kieser M. A variational
approach to optimal two-stage designs. Statistics in Medicine. 2019;38(21):4159–4171.
<doi:10.1002/sim.8291> for details.",2020-01-09,Kevin Kunzmann,https://github.com/kkmann/adoptr,TRUE,https://github.com/kkmann/adoptr,8701,3,2020-02-10T15:02:50Z,2900.3333333333335
adpss,"Provides the functions for planning and conducting a
clinical trial with adaptive sample size determination. Maximal statistical
efficiency will be exploited even when dramatic or multiple adaptations
are made. Such a trial consists of adaptive determination of sample size
at an interim analysis and implementation of frequentist statistical test at the
interim and final analysis with a prefixed significance level. The required
assumptions for the stage-wise test statistics are independent and stationary
increments and normality. Predetermination of adaptation rule is not required.",2018-09-20,Kosuke Kashiwabara,https://github.com/ca4wa/R-adpss,TRUE,https://github.com/ca4wa/r-adpss,8842,0,2020-01-07T02:52:34Z,NA
afex,"Convenience functions for analyzing factorial experiments using ANOVA or
mixed models. aov_ez(), aov_car(), and aov_4() allow specification of
between, within (i.e., repeated-measures), or mixed (i.e., split-plot)
ANOVAs for data in long format (i.e., one observation per row),
automatically aggregating multiple observations per individual and cell
of the design. mixed() fits mixed models using lme4::lmer() and computes
p-values for all fixed effects using either Kenward-Roger or Satterthwaite
approximation for degrees of freedom (LMM only), parametric bootstrap
(LMMs and GLMMs), or likelihood ratio tests (LMMs and GLMMs).
afex_plot() provides a high-level interface for interaction or one-way
plots using ggplot2, combining raw data and model estimates. afex uses
type 3 sums of squares as default (imitating commercial statistical software).",2020-03-28,Henrik Singmann,"http://afex.singmann.science/, https://github.com/singmann/afex",TRUE,https://github.com/singmann/afex,224665,80,2020-06-09T20:32:59Z,2808.3125
afpt,"Allows estimation and modelling of flight costs in animal (vertebrate) flight,
implementing the aerodynamic power model described in Klein Heerenbrink et al.
(2015) <doi:10.1098/rspa.2014.0952>. Taking inspiration from the program
'Flight', developed by Colin Pennycuick (Pennycuick (2008) ""Modelling the flying
bird"". Amsterdam: Elsevier. ISBN 0-19-857721-4), flight performance is estimated
based on basic morphological measurements such as body mass, wingspan and wing
area. 'afpt' can be used to make predictions on how animals should adjust their
flight behaviour and wingbeat kinematics to varying flight conditions.",2020-03-19,Marco KleinHeerenbrink,https://github.com/MarcoKlH/afpt-r/,TRUE,https://github.com/marcoklh/afpt-r,12870,1,2020-03-18T15:32:16Z,12870
aftgee,"A collection of methods for both the rank-based estimates and least-square estimates
to the Accelerated Failure Time (AFT) model.
For rank-based estimation, it provides approaches that include the computationally
efficient Gehan's weight and the general's weight such as the logrank weight.
Details of the rank-based estimation can be found in
Chiou et al. (2014) <doi:10.1007/s11222-013-9388-2> and
Chiou et al. (2015) <doi:10.1002/sim.6415>.
For the least-square estimation, the estimating equation is solved with
generalized estimating equations (GEE).
Moreover, in multivariate cases, the dependence working correlation structure
can be specified in GEE's setting.
Details on the least-squares estimation can be found in
Chiou et al. (2014) <doi:10.1007/s10985-014-9292-x>.",2018-07-24,Sy Han Chiou,http://github.com/stc04003/aftgee,TRUE,https://github.com/stc04003/aftgee,32629,0,2019-12-19T16:33:21Z,NA
AGD,"Tools for the analysis of growth data: to extract an
LMS table from a gamlss object, to calculate the standard
deviation scores and its inverse, and to superpose two wormplots
from different models. The package contains a some varieties of
reference tables, especially for The Netherlands.",2018-05-29,Stef van Buuren,https://github.com/stefvanbuuren/AGD,TRUE,https://github.com/stefvanbuuren/agd,104241,1,2020-05-05T19:48:54Z,104241
AGHmatrix,"Computation of A (pedigree), G (genomic-base), and H (A corrected
by G) relationship matrices for diploid and autopolyploid species. Several methods
are implemented considering additive and non-additive models.",2019-07-30,Rodrigo Amadeu,http://github.com/prmunoz/AGHmatrix,TRUE,https://github.com/prmunoz/aghmatrix,11668,5,2020-01-14T14:20:10Z,2333.6
agop,"Tools supporting multi-criteria and group decision making,
including variable number of criteria, by means of
aggregation operators, spread measures,
fuzzy logic connectives, fusion functions,
and preordered sets. Possible applications include,
but are not limited to, quality management, scientometrics,
software engineering, etc.",2020-01-08,Marek Gagolewski,http://www.gagolewski.com/software/,TRUE,https://github.com/gagolews/agop,25078,3,2020-01-10T05:51:35Z,8359.333333333334
AGread,"Standardize the process of bringing various modes of output files
into R. Additionally, processes are provided to read and minimally pre-
process raw data from primary accelerometer and inertial measurement unit files,
as well as binary .gt3x files. ActiGraph monitors are used to estimate physical
activity outcomes via body-worn sensors that measure (e.g.) acceleration or
rotational velocity.",2020-02-26,Paul R. Hibbing,https://github.com/paulhibbing/AGread,TRUE,https://github.com/paulhibbing/agread,13402,7,2020-06-07T02:37:18Z,1914.5714285714287
agridat,"Datasets from books, papers, and websites related to agriculture.
Example graphics and analyses are included. Data come from small-plot trials,
multi-environment trials, uniformity trials, yield monitors, and more.",2018-07-06,Kevin Wright,https://github.com/kwstat/agridat,TRUE,https://github.com/kwstat/agridat,54968,64,2020-01-20T15:28:35Z,858.875
agriwater,"Spatial modeling of energy balance and actual
evapotranspiration using satellite images and meteorological data.
Options of satellite are: Landsat-8 (with and without thermal bands),
Sentinel-2 and MODIS. Respectively spatial resolutions are 30, 100,
10 and 250 meters. User can use data from a single meteorological
station or a grid of meteorological stations (using any spatial
interpolation method). Teixeira (2010) <doi:10.3390/rs0251287>.
Teixeira et al. (2015) <doi:10.3390/rs71114597>.
Silva, Manzione, and Albuquerque Filho (2018) <doi:10.3390/horticulturae4040044>.",2019-01-30,Cesar de Oliveira Ferreira Silva,NA,TRUE,https://github.com/cesarofs/agriwater,7271,3,2020-03-03T20:44:34Z,2423.6666666666665
AHMbook,"Provides functions and data sets to accompany the two volume publication ""Applied Hierarchical Modeling in Ecology: Analysis of distribution, abundance and species richness in R and BUGS"" by Marc Kéry and Andy Royle: volume 1 (2016, ISBN: 978-0-12-801378-6) and volume 2 (2020, ISBN: 978-0-12-809585-0), <https://www.mbr-pwrc.usgs.gov/pubanalysis/keryroylebook>.",2020-06-09,Mike Meredith,"https://www.mbr-pwrc.usgs.gov/pubanalysis/keryroylebook/,
https://sites.google.com/site/appliedhierarchicalmodeling/home",TRUE,https://github.com/mikemeredith/ahmbook,18100,9,2020-06-08T13:14:36Z,2011.111111111111
aimsir17,"Named after the Irish name for weather, this package contains
tidied data from the Irish Meteorological Service's hourly observations for 2017.
In all, the data sets include observations from 25 weather stations, and also
latitude and longitude coordinates for each weather station.",2019-12-02,Jim Duggan,"https://github.com/JimDuggan/aimsir17, https://www.met.ie",TRUE,https://github.com/jimduggan/aimsir17,3065,0,2019-12-04T14:14:22Z,NA
aire.zmvm,"Tools for downloading hourly averages, daily maximums and minimums from each of the
pollution, wind, and temperature measuring stations or geographic zones in the Mexico City
metro area. The package also includes the locations of each of the stations and zones. See
<http://aire.cdmx.gob.mx/> for more information.",2019-03-30,Diego Valle-Jones,"https://hoyodesmog.diegovalle.net/aire.zmvm/,
https://github.com/diegovalle/aire.zmvm",TRUE,https://github.com/diegovalle/aire.zmvm,16966,9,2020-05-05T02:50:57Z,1885.111111111111
aiRly,Get information about air quality using 'Airly' <https://airly.eu/> API through R.,2020-03-19,Piotr Janus,https://github.com/piotrekjanus/aiRly,TRUE,https://github.com/piotrekjanus/airly,1544,0,2020-03-19T22:31:01Z,NA
airportr,"Retrieves open source airport data and provides tools to look up information, translate names into codes and vice-verse, as well as some basic calculation functions for measuring distances. Data is licensed under the Open Database License. ",2019-10-09,Dmitry Shkolnik,https://github.com/dshkol/airportr,TRUE,https://github.com/dshkol/airportr,9904,4,2020-05-24T06:22:26Z,2476
airqualityES,"These dataset contains daily quality air measurements in
Spain over a period of 18 years (from 2001 to 2018). The measurements refer to
several pollutants. These data are openly published by the Government of Spain.
The datasets were originally spread over a number of files and formats. Here,
the same information is contained in simple dataframe for convenience of
researches, journalists or general public. See the Spanish Government website
<http://www.miteco.gob.es/> for more information.",2020-02-29,Jose V. Die,https://github.com/jdieramon/airqualityES,TRUE,https://github.com/jdieramon/airqualityes,2098,0,2020-03-03T18:02:08Z,NA
airr,"Schema definitions and read, write and validation tools for data
formatted in accordance with the AIRR Data Representation schemas defined
by the AIRR Community <http://docs.airr-community.org>.",2020-05-27,Jason Vander Heiden,http://docs.airr-community.org,TRUE,https://github.com/airr-community/airr-standards,11243,18,2020-06-01T21:21:28Z,624.6111111111111
akc,"A tidy framework for automatic knowledge classification and visualization. Currently, the core functionality of the framework is mainly supported by modularity-based clustering (community detection) in keyword co-occurrence network, and focuses on co-word analysis of bibliometric research. However, the designed functions in 'akc' are general, and could be extended to solve other tasks in text mining as well. ",2020-01-30,Tian-Yuan Huang,https://github.com/hope-data-science/akc,TRUE,https://github.com/hope-data-science/akc,3382,8,2020-02-17T01:01:31Z,422.75
ALA4R,"The Atlas of Living Australia (ALA) provides tools to enable users
of biodiversity information to find, access, combine and visualise data on
Australian plants and animals; these have been made available from
<https://ala.org.au/>. ALA4R provides a subset of the tools to be
directly used within R. It enables the R community to directly access data
and resources hosted by the ALA.",2020-04-04,Peggy Newman,https://github.com/AtlasOfLivingAustralia/ALA4R,TRUE,https://github.com/atlasoflivingaustralia/ala4r,21946,32,2020-04-03T05:17:58Z,685.8125
albopictus,Implements discrete time deterministic and stochastic age-structured population dynamics models described in Erguler and others (2016) <doi:10.1371/journal.pone.0149282> and Erguler and others (2017) <doi:10.1371/journal.pone.0174293>.,2018-11-29,Kamil Erguler,https://github.com/kerguler/albopictusR,TRUE,https://github.com/kerguler/albopictusr,15595,0,2020-03-20T15:34:46Z,NA
alfr,"Allows you to connect to an 'Alfresco' content management repository and interact
with its contents using simple and intuitive functions. You will be able to establish a connection session to the 'Alfresco' repository,
read and upload content and manage folder hierarchies. For more details on the 'Alfresco' content management repository
see <https://www.alfresco.com/ecm-software/document-management>.",2019-07-19,Roy Wetherall,"https://github.com/rwetherall/alfr,
https://rwetherall.github.io/alfr/",TRUE,https://github.com/rwetherall/alfr,5086,0,2019-07-19T02:21:15Z,NA
AlgDesign,"Algorithmic experimental designs. Calculates exact and
approximate theory experimental designs for D,A, and I
criteria. Very large designs may be created. Experimental
designs may be blocked or blocked designs created from a
candidate list, using several criteria. The blocking can be
done when whole and within plot factors interact.",2019-11-29,Bob Wheeler,https://github.com/jvbraun/AlgDesign,TRUE,https://github.com/jvbraun/algdesign,815651,6,2019-11-29T02:10:41Z,135941.83333333334
algorithmia,"The company, Algorithmia, houses the largest marketplace of online
algorithms. This package essentially holds a bunch of REST wrappers that
make it very easy to call algorithms in the Algorithmia platform and access
files and directories in the Algorithmia data API. To learn more about the
services they offer and the algorithms in the platform visit
<http://algorithmia.com>. More information for developers can be found at
<http://developers.algorithmia.com>.",2019-08-01,James Sutton,NA,TRUE,https://github.com/algorithmiaio/algorithmia-r,17860,10,2019-08-02T19:03:08Z,1786
aliases2entrez,"Queries multiple resources authors HGNC (2019) <https://www.genenames.org>, authors limma (2015) <doi:10.1093/nar/gkv007>
to find the correspondence between evolving nomenclature of human gene symbols, aliases, previous symbols or synonyms with
stable, curated gene entrezID from NCBI database. This allows fast, accurate and up-to-date correspondence
between human gene expression datasets from various date and platform (e.g: gene symbol: BRCA1 - ID: 672).",2020-05-19,Raphael Bonnet,NA,TRUE,https://github.com/peyronlab/aliases2entrez,3859,1,2019-10-08T08:14:36Z,3859
almanac,"Provides tools for defining recurrence rules and
recurrence bundles. Recurrence rules are a programmatic way to define
a recurring event, like the first Monday of December. Multiple
recurrence rules can be combined into larger recurrence bundles.
Together, these provide a system for adjusting and generating
sequences of dates while simultaneously skipping over dates in a
recurrence bundle's event set.",2020-05-28,Davis Vaughan,https://github.com/DavisVaughan/almanac,TRUE,https://github.com/davisvaughan/almanac,138,52,2020-05-28T17:39:14Z,2.6538461538461537
alookr,"A collection of tools that support data splitting, predictive modeling, and model evaluation.
A typical function is to split a dataset into a training dataset and a test dataset.
Then compare the data distribution of the two datasets.
Another feature is to support the development of predictive models and to compare the performance of several predictive models,
helping to select the best model. ",2020-06-07,Choonghyun Ryu,NA,TRUE,https://github.com/choonghyunryu/alookr,1660,6,2020-06-07T15:02:02Z,276.6666666666667
alpaca,"Provides a routine to concentrate out factors with many levels during the
optimization of the log-likelihood function of the corresponding generalized linear model (glm).
The package is based on the algorithm proposed by Stammann (2018) <arXiv:1707.01815> and is
restricted to glm's that are based on maximum likelihood estimation and non-linear. It also offers
an efficient algorithm to recover estimates of the fixed effects in a post-estimation routine and
includes robust and multi-way clustered standard errors. Further the package provides analytical
bias corrections for binary choice models (logit and probit) derived by Fernandez-Val
and Weidner (2016) <doi:10.1016/j.jeconom.2015.12.014> and Hinz, Stammann, and Wanner (2019).",2020-01-12,Amrei Stammann,https://github.com/amrei-stammann/alpaca,TRUE,https://github.com/amrei-stammann/alpaca,27931,23,2020-01-19T12:39:26Z,1214.391304347826
alphavantager,"
Alpha Vantage has free historical financial information.
All you need to do is get a free API key at <https://www.alphavantage.co>.
Then you can use the R interface to retrieve free equity information.
Refer to the Alpha Vantage website for more information.",2020-03-01,Matt Dancho,https://github.com/business-science/alphavantager,TRUE,https://github.com/business-science/alphavantager,84585,44,2020-03-01T14:14:43Z,1922.3863636363637
altair,"Interface to 'Altair' <https://altair-viz.github.io>, which itself
is a 'Python' interface to 'Vega-Lite' <https://vega.github.io/vega-lite>.
This package uses the 'Reticulate' framework
<https://rstudio.github.io/reticulate> to manage the interface between R
and 'Python'.",2020-01-23,Ian Lyttle,https://github.com/vegawidget/altair,TRUE,https://github.com/vegawidget/altair,5680,68,2020-01-23T20:38:11Z,83.52941176470588
alterryx,"A tool to access each of the 'Alteryx' Gallery 'API' endpoints.
Users can queue jobs, poll job status, and retrieve application output as
a data frame. You will need an 'Alteryx' Server license and have 'Alteryx'
Gallery running to utilize this package. The 'API' is accessed through the
'URL' that you setup for the server running 'Alteryx' Gallery and more
information on the endpoints can be found at
<https://gallery.alteryx.com/api-docs/>.",2019-06-06,Michael Treadwell,"https://github.com/mtreadwell/alterryx,
https://gallery.alteryx.com/api-docs/",TRUE,https://github.com/mtreadwell/alterryx,17370,3,2019-09-03T17:15:38Z,5790
altR2,"Provides alternatives to the normal adjusted R-squared estimator for the estimation of the multiple squared correlation in regression models,
as fitted by the lm() function. The alternative estimators are described in Karch (2016) <DOI:10.31234/osf.io/v8dz5>.",2019-09-23,Julian Karch,https://github.com/karchjd/altR2,TRUE,https://github.com/karchjd/altr2,4061,0,2019-09-26T10:24:21Z,NA
ambient,"Generation of natural looking noise has many application within
simulation, procedural generation, and art, to name a few. The 'ambient'
package provides an interface to the 'FastNoise' C++ library and allows for
efficient generation of perlin, simplex, worley, cubic, value, and white
noise with optional pertubation in either 2, 3, or 4 (in case of simplex and
white noise) dimensions.",2020-03-21,Thomas Lin Pedersen,"https://ambient.data-imaginist.com,
https://github.com/thomasp85/ambient",TRUE,https://github.com/thomasp85/ambient,10102,59,2020-03-19T20:43:12Z,171.22033898305085
ameco,Annual macro-economic database provided by the European Commission.,2018-05-04,Eric Persson,http://github.com/expersso/ameco,TRUE,https://github.com/expersso/ameco,28191,6,2019-09-10T08:50:00Z,4698.5
amerika,"A color palette generator inspired by American politics, with colors ranging from blue on the
left to gray in the middle and red on the right. A variety of palettes allow for a range of applications
from brief discrete scales (e.g., three colors for Democrats, Independents, and Republicans) to
continuous interpolated arrays including dozens of shades graded from blue (left) to red (right). This
package greatly benefitted from building on the source code (with permission) from Ram and Wickham (2015).",2019-05-03,Philip Waggoner,NA,TRUE,https://github.com/pdwaggoner/amerika,7873,0,2019-11-14T20:08:36Z,NA
AmpGram,"Predicts antimicrobial peptides using random forests trained on the
n-gram encoded peptides. The implemented algorithm can be accessed from
both the command line and shiny-based GUI. The AmpGram model is too large
for CRAN and it has to be downloaded separately from the repository:
<https://github.com/michbur/AmpGramModel>.",2020-05-31,Michal Burdukiewicz,https://github.com/michbur/AmpGram,TRUE,https://github.com/michbur/ampgram,10,1,2020-05-22T09:21:48Z,10
ampir,"A toolkit to predict antimicrobial peptides from protein sequences on a genome-wide scale.
It incorporates two support vector machine models (""precursor"" and ""mature"") trained on publicly available antimicrobial peptide data using calculated
physico-chemical and compositional sequence properties described in Meher et al. (2017) <doi:10.1038/srep42362>.
In order to support genome-wide analyses, these models are designed to accept any type of protein as input
and calculation of compositional properties has been optimised for high-throughput use. For details see Fingerhut et al. 2020 <doi:10.1101/2020.05.07.082412>.",2020-05-11,Legana Fingerhut,https://github.com/Legana/ampir,TRUE,https://github.com/legana/ampir,3243,5,2020-05-11T12:00:38Z,648.6
amt,"Manage and analyze animal movement data. The functionality of 'amt' includes methods to calculate track statistics (e.g. step lengths, speed, or turning angles), prepare data for fitting habitat selection analyses (resource selection functions and step-selection functions <doi:10.1890/04-0953> and integrated step-selection functions <doi:10.1111/2041-210X.12528>), and simulation of space-use from fitted step-selection functions <doi:10.1002/ecs2.1771>.",2020-04-28,Johannes Signer,https://github.com/jmsigner/amt,TRUE,https://github.com/jmsigner/amt,19886,9,2020-05-22T11:29:08Z,2209.5555555555557
AmyloGram,"Predicts amyloid proteins using random forests trained on the
n-gram encoded peptides. The implemented algorithm can be accessed from
both the command line and shiny-based GUI.",2017-10-11,Michal Burdukiewicz,https://github.com/michbur/AmyloGram,TRUE,https://github.com/michbur/amylogram,15714,7,2020-05-21T19:41:56Z,2244.8571428571427
AnaCoDa,"Is a collection of models to analyze genome scale codon
data using a Bayesian framework. Provides visualization
routines and checkpointing for model fittings. Currently
published models to analyze gene data for selection on codon
usage based on Ribosome Overhead Cost (ROC) are: ROC (Gilchrist
et al. (2015) <doi:10.1093/gbe/evv087>), and ROC with phi
(Wallace & Drummond (2013) <doi:10.1093/molbev/mst051>). In
addition 'AnaCoDa' contains three currently unpublished models.
The FONSE (First order approximation On NonSense Error) model
analyzes gene data for selection on codon usage against of
nonsense error rates. The PA (PAusing time) and PANSE (PAusing
time + NonSense Error) models use ribosome footprinting data to
analyze estimate ribosome pausing times with and without
nonsense error rate from ribosome footprinting data.",2019-05-11,Cedric Landerer,https://github.com/clandere/AnaCoDa,TRUE,https://github.com/clandere/anacoda,13327,1,2019-06-12T11:13:15Z,13327
analogsea,"Provides a set of functions for interacting with the 'Digital
Ocean' API at <https://developers.digitalocean.com/documentation/v2>, including
creating images, destroying them, rebooting, getting details on regions, and
available images.",2020-01-30,Scott Chamberlain,https://github.com/sckott/analogsea,TRUE,https://github.com/sckott/analogsea,68685,108,2020-04-15T00:43:56Z,635.9722222222222
analogue,"Fits Modern Analogue Technique and Weighted Averaging transfer
function models for prediction of environmental data from species
data, and related methods used in palaeoecology.",2020-02-06,Gavin L. Simpson,https://github.com/gavinsimpson/analogue,TRUE,https://github.com/gavinsimpson/analogue,58809,11,2020-02-04T04:26:31Z,5346.272727272727
analogueExtra,"Provides additional functionality for the analogue package
that is not required by all users of the main package.",2016-04-10,Gavin L. Simpson,https://github.com/gavinsimpson/analogueExtra,TRUE,https://github.com/gavinsimpson/analogueextra,20394,1,2019-08-26T23:20:36Z,20394
analysisPipelines,"Enables data scientists to compose pipelines of analysis which consist of data manipulation, exploratory analysis & reporting, as well as modeling steps. Data scientists can use tools of their choice through an R interface, and compose interoperable pipelines between R, Spark, and Python.
Credits to Mu Sigma for supporting the development of the package.
Note - To enable pipelines involving Spark tasks, the package uses the 'SparkR' package.
The SparkR package needs to be installed to use Spark as an engine within a pipeline. SparkR is distributed natively with Apache Spark and is not distributed on CRAN. The SparkR version needs to directly map to the Spark version (hence the native distribution), and care needs to be taken to ensure that this is configured properly.
To install SparkR from Github, run the following command if you know the Spark version: 'devtools::install_github('apache/[email protected]', subdir='R/pkg')'.
The other option is to install SparkR by running the following terminal commands if Spark has already been installed: '$ export SPARK_HOME=/path/to/spark/directory && cd $SPARK_HOME/R/lib/SparkR/ && R -e ""devtools::install('.')""'.",2020-05-05,Mu Sigma,https://github.com/Mu-Sigma/analysis-pipelines,TRUE,https://github.com/mu-sigma/analysis-pipelines,8539,18,2020-05-05T14:06:35Z,474.3888888888889
Andromeda,"Storing very large data objects on a local drive, while still making it possible to manipulate the data in an efficient manner.",2020-06-03,Martijn Schuemie,"https://ohdsi.github.io/Andromeda/,
https://github.com/OHDSI/Andromeda",TRUE,https://github.com/ohdsi/andromeda,588,0,2020-06-03T05:11:30Z,NA
anglr,"Gives direct access to generic 3D tools and provides a full suite
of mesh-creation and 3D plotting functions. By extending the 'rgl' package
conversion and visualization functions for the 'mesh3d' class a wide variety of
complex spatial data can be brought into 3D scenes. These tools allow for
spatial raster, polygons, and lines that are common in 'GIS' contexts to be
converted into mesh forms with high flexibility and the ability to integrate
disparate data types. Vector and raster data can be seamlessly combined as
meshes, and surfaces can be set to have material properties based on data
values or with image textures. Textures and other data combinations use
projection transformations to map between coordinate systems, and objects can
be easily visualized in an interactive scene at any stage. This package relies
on the 'RTriangle' package for high-quality triangular meshing which is
licensed restrictively under 'CC BY-NC-SA 4.0'. ",2020-05-13,Michael D. Sumner,https://github.com/hypertidy/anglr,TRUE,https://github.com/hypertidy/anglr,1113,45,2020-05-21T10:39:40Z,24.733333333333334
angstroms,"Helper functions for working with Regional Ocean Modeling System 'ROMS' output. See
<https://www.myroms.org/> for more information about 'ROMS'. ",2017-05-01,Michael D. Sumner,https://github.com/mdsumner/angstroms,TRUE,https://github.com/mdsumner/angstroms,12475,2,2020-04-12T14:20:27Z,6237.5
animation,"Provides functions for animations in statistics, covering topics
in probability theory, mathematical statistics, multivariate statistics,
non-parametric statistics, sampling survey, linear models, time series,
computational statistics, data mining and machine learning. These functions
may be helpful in teaching statistics and data analysis. Also provided in this
package are a series of functions to save animations to various formats, e.g.
Flash, 'GIF', HTML pages, 'PDF' and videos. 'PDF' animations can be inserted
into 'Sweave' / 'knitr' easily.",2018-12-11,Yihui Xie,https://yihui.name/animation,TRUE,https://github.com/yihui/animation,632661,162,2020-05-20T04:36:17Z,3905.314814814815
aniview,Animate Shiny and R Markdown content when it comes into view using 'animate-css' effects thanks to 'jQuery AniView'.,2020-03-31,Félix Luginbuhl,"https://felixluginbuhl.com/aniview,
https://github.com/lgnbhl/aniview",TRUE,https://github.com/lgnbhl/aniview,1240,1,2020-04-11T14:36:36Z,1240
ANN2,"Training of neural networks for classification and regression tasks
using mini-batch gradient descent. Special features include a function for
training autoencoders, which can be used to detect anomalies, and some
related plotting functions. Multiple activation functions are supported,
including tanh, relu, step and ramp. For the use of the step and ramp
activation functions in detecting anomalies using autoencoders, see
Hawkins et al. (2002) <doi:10.1007/3-540-46145-0_17>. Furthermore,
several loss functions are supported, including robust ones such as Huber
and pseudo-Huber loss, as well as L1 and L2 regularization. The possible
options for optimization algorithms are RMSprop, Adam and SGD with momentum.
The package contains a vectorized C++ implementation that facilitates
fast training through mini-batch learning.",2020-03-14,Bart Lammers,https://github.com/bflammers/ANN2,TRUE,https://github.com/bflammers/ann2,29442,6,2020-03-14T21:49:29Z,4907
AnnotationBustR,Extraction of subsequences into FASTA files from GenBank annotations where gene names may vary among accessions.,2018-04-09,Samuel R. Borstein,"https://github.com/sborstein/AnnotationBustR,
https://www.ncbi.nlm.nih.gov/nuccore,
https://en.wikipedia.org/wiki/FASTA_format",TRUE,https://github.com/sborstein/annotationbustr,15667,0,2019-11-12T18:51:33Z,NA
anomalize,"
The 'anomalize' package enables a ""tidy"" workflow for detecting anomalies in data.
The main functions are time_decompose(), anomalize(), and time_recompose().
When combined, it's quite simple to decompose time series, detect anomalies,
and create bands separating the ""normal"" data from the anomalous data at scale (i.e. for multiple time series).
Time series decomposition is used to remove trend and seasonal components via the time_decompose() function
and methods include seasonal decomposition of time series by Loess (""stl"") and
seasonal decomposition by piecewise medians (""twitter""). The anomalize() function implements
two methods for anomaly detection of residuals including using an inner quartile range (""iqr"")
and generalized extreme studentized deviation (""gesd""). These methods are based on
those used in the 'forecast' package and the Twitter 'AnomalyDetection' package.
Refer to the associated functions for specific references for these methods. ",2019-09-21,Matt Dancho,https://github.com/business-science/anomalize,TRUE,https://github.com/business-science/anomalize,58802,222,2020-04-24T20:06:48Z,264.8738738738739
antaresProcessing,"
Process results generated by 'Antares', a powerful open source software developed by
RTE (Réseau de Transport d’Électricité) to simulate and study electric power systems (more information about
'Antares' here: <https://github.com/AntaresSimulatorTeam/Antares_Simulator>).
This package provides functions to create new columns like net load, load factors, upward and
downward margins or to compute aggregated statistics like economic surpluses
of consumers, producers and sectors.",2020-02-26,Veronique Bachelier,https://github.com/rte-antares-rpackage/antaresProcessing,TRUE,https://github.com/rte-antares-rpackage/antaresprocessing,29552,8,2020-02-28T14:38:34Z,3694
antaresRead,"Import, manipulate and explore results generated by 'Antares', a
powerful open source software developed by RTE (Réseau de Transport d’Électricité) to simulate and study electric power systems
(more information about 'Antares' here : <https://antares-simulator.org/>).",2020-03-18,Veronique Bachelier,https://github.com/rte-antares-rpackage/antaresRead,TRUE,https://github.com/rte-antares-rpackage/antaresread,38639,9,2020-03-04T08:58:03Z,4293.222222222223
antaresViz,"Visualize results generated by Antares, a powerful open source software
developed by RTE to simulate and study electric power systems
(more information about Antares here: <https://github.com/AntaresSimulatorTeam/Antares_Simulator>).
This package provides functions that create interactive charts to help
Antares users visually explore the results of their simulations.",2020-05-26,Veronique Bachelier,https://github.com/rte-antares-rpackage/antaresViz,TRUE,https://github.com/rte-antares-rpackage/antaresviz,21816,14,2020-05-26T08:56:49Z,1558.2857142857142
anthro,"Provides WHO Child Growth Standards (z-scores) with
confidence intervals and standard errors around the
prevalence estimates, taking into account complex sample designs.
More information on the methods is
available online:
<http://www.who.int/childgrowth/standards/en/>.",2020-05-21,Dirk Schumacher,https://github.com/dirkschumacher/anthro,TRUE,https://github.com/dirkschumacher/anthro,9660,11,2020-05-21T11:01:43Z,878.1818181818181
AntWeb,"A complete programmatic interface to the AntWeb database from the
California Academy of Sciences.",2014-08-14,Karthik Ram,https://github.com/ropensci/AntWeb,TRUE,https://github.com/ropensci/antweb,26568,8,2019-12-09T12:00:33Z,3321
anyflights,"Supplies a set of functions to query air travel data for user-
specified years and airports. Datasets include on-time flights, airlines,
airports, planes, and weather.",2020-04-27,Simon P. Couch,http://github.com/simonpcouch/anyflights,TRUE,https://github.com/simonpcouch/anyflights,3021,4,2020-05-01T18:11:34Z,755.25
anytime,"Convert input in any one of character, integer, numeric, factor,
or ordered type into 'POSIXct' (or 'Date') objects, using one of a number of
predefined formats, and relying on Boost facilities for date and time parsing.",2020-01-20,Dirk Eddelbuettel,http://dirk.eddelbuettel.com/code/anytime.html,TRUE,https://github.com/eddelbuettel/anytime,445286,124,2020-04-14T21:29:59Z,3591.016129032258
aof,"A breakpoint-based method to detect ontogenetic shifts in
univariate time-activity budget series of central-place foraging insects.
The method finds a single breakpoint according to the likelihood function.
The method was developed with honey bees in order to detect the Age at
Onset of Foraging (AOF), but can be used for the detection of other
ontogenetic shifts in other central-place foraging insects. ",2020-03-09,Fabrice Requier,https://github.com/frareb/aof/,TRUE,https://github.com/frareb/aof,1643,1,2020-05-07T09:38:35Z,1643
aos,"Trigger animation effects on scroll on any HTML element
of 'shiny' and 'rmarkdown', such as any text or plot, thanks to
the 'AOS' Animate On Scroll jQuery library.",2020-04-29,Félix Luginbuhl,"https://felixluginbuhl.com/aos, https://github.com/lgnbhl/aos",TRUE,https://github.com/lgnbhl/aos,708,0,2020-04-25T16:12:17Z,NA
apa,"Formatter functions in the 'apa' package take the return value of a
statistical test function, e.g. a call to chisq.test() and return a string
formatted according to the guidelines of the APA (American Psychological
Association).",2020-04-21,Daniel Gromer,https://github.com/dgromer/apa,TRUE,https://github.com/dgromer/apa,25777,23,2020-04-21T12:43:47Z,1120.7391304347825
apaTables,"A common task faced by researchers is the creation of APA style
(i.e., American Psychological Association style) tables from statistical
output. In R a large number of function calls are often needed to obtain all of
the desired information for a single APA style table. As well, the process of
manually creating APA style tables in a word processor is prone to transcription
errors. This package creates Word files (.doc files) containing APA style tables
for several types of analyses. Using this package minimizes transcription errors
and reduces the number commands needed by the user.",2018-08-29,David Stanley,https://github.com/dstanley4/apaTables,TRUE,https://github.com/dstanley4/apatables,83807,32,2020-04-27T14:44:46Z,2618.96875
apcf,"The adapted pair correlation function transfers the concept of the
pair correlation function from point patterns to patterns of objects of
finite size and irregular shape (e.g. lakes within a country). This is a
reimplementation of the method suggested by Nuske et al. (2009)
<doi:10.1016/j.foreco.2009.09.050> using the libraries 'GEOS' and 'GDAL'
directly instead of through 'PostGIS'. ",2020-02-04,Robert Nuske,https://github.com/rnuske/apcf,TRUE,https://github.com/rnuske/apcf,8446,6,2020-04-14T07:22:41Z,1407.6666666666667
apex,"Toolkit for the analysis of multiple gene data (Jombart et al. 2017) <doi:10.1111/1755-0998.12567>.
Apex implements the new S4 classes 'multidna', 'multiphyDat' and associated methods to handle aligned DNA sequences from multiple genes.",2020-04-11,Klaus Schliep,https://github.com/thibautjombart/apex,TRUE,https://github.com/thibautjombart/apex,36653,4,2020-05-06T05:52:57Z,9163.25
apexcharter,"Provides an 'htmlwidgets' interface to 'apexcharts.js'.
'Apexcharts' is a modern JavaScript charting library to build interactive charts and visualizations with simple API.
'Apexcharts' examples and documentation are available here: <https://apexcharts.com/>.",2020-03-31,Victor Perrier,"https://github.com/dreamRs/apexcharter,
https://dreamrs.github.io/apexcharter",TRUE,https://github.com/dreamrs/apexcharter,7488,68,2020-06-09T15:03:49Z,110.11764705882354
aplot,"For many times, we are not just aligning plots as what 'cowplot' and 'patchwork' did. Users would like to align associated information that requires axes to be exactly matched in subplots, e.g. hierarchical clustering with a heatmap. This package provides utilities to aligns associated subplots to a main plot at different sides (left, right, top and bottom) with axes exactly matched. ",2020-04-07,Guangchuang Yu,https://github.com/YuLab-SMU/aplot,TRUE,https://github.com/yulab-smu/aplot,2976,32,2020-04-15T15:21:04Z,93
applicable,"A modeling package compiling applicability domain methods in R.
It combines different methods to measure the amount of extrapolation new
samples can have from the training set. See Netzeva et al (2005)
<doi:10.1177/026119290503300209> for an overview of applicability domains. ",2020-05-25,Marly Gotti,https://github.com/tidymodels/applicable,TRUE,https://github.com/tidymodels/applicable,172,23,2020-05-26T07:18:05Z,7.478260869565218
aprof,"Assists the evaluation of whether and
where to focus code optimization, using Amdahl's law and visual aids
based on line profiling. Amdahl's profiler organizes profiling output
files (including memory profiling) in a visually appealing way.
It is meant to help to balance development
vs. execution time by helping to identify the most promising sections
of code to optimize and projecting potential gains. The package is
an addition to R's standard profiling tools and is not a wrapper for them.",2018-05-22,Marco D. Visser,http://github.com/MarcoDVisser/aprof,TRUE,https://github.com/marcodvisser/aprof,29453,22,2020-01-18T16:45:45Z,1338.7727272727273
apyramid,"Provides a quick method for visualizing non-aggregated line-list
or aggregated census data stratified by age and one or two categorical
variables (e.g. gender and health status) with any number of values. It
returns a 'ggplot' object, allowing the user to further customize the
output. This package is part of the 'R4Epis' project
<https://r4epis.netlify.com>.",2020-05-08,Zhian N. Kamvar,"https://github.com/R4EPI/apyramid, https://r4epis.netlify.com",TRUE,https://github.com/r4epi/apyramid,3199,4,2020-05-08T14:44:23Z,799.75
aqp,"The Algorithms for Quantitative Pedology (AQP) project was started in 2009 to organize a loosely-related set of concepts and source code on the topic of soil profile visualization, aggregation, and classification into this package (aqp). Over the past 8 years, the project has grown into a suite of related R packages that enhance and simplify the quantitative analysis of soil profile data. Central to the AQP project is a new vocabulary of specialized functions and data structures that can accommodate the inherent complexity of soil profile information; freeing the scientist to focus on ideas rather than boilerplate data processing tasks <doi:10.1016/j.cageo.2012.10.020>. These functions and data structures have been extensively tested and documented, applied to projects involving hundreds of thousands of soil profiles, and deeply integrated into widely used tools such as SoilWeb <https://casoilresource.lawr.ucdavis.edu/soilweb-apps/>. Components of the AQP project (aqp, soilDB, sharpshootR, soilReports packages) serve an important role in routine data analysis within the USDA-NRCS Soil Science Division. The AQP suite of R packages offer a convenient platform for bridging the gap between pedometric theory and practice.",2020-01-24,Dylan Beaudette,https://github.com/ncss-tech/aqp,TRUE,https://github.com/ncss-tech/aqp,146911,18,2020-06-09T19:28:39Z,8161.722222222223
ArchaeoPhases,"Provides a list of functions for the statistical analysis of archaeological dates and groups of dates (see <doi:10.18637/jss.v093.c01> for a description). It is based on the post-processing of the Markov Chains whose stationary distribution is the posterior distribution of a series of dates. Such output can be simulated by different applications as for instance 'ChronoModel' (see <http://www.chronomodel.fr>), 'Oxcal' (see <https://c14.arch.ox.ac.uk/oxcal.html>) or 'BCal' (see <http://bcal.shef.ac.uk/>). The only requirement is to have a csv file containing a sample from the posterior distribution.",2020-05-29,Anne Philippe,NA,TRUE,https://github.com/archaeostat/archaeophases,19960,2,2020-06-09T13:41:13Z,9980
archivist,"Data exploration and modelling is a process in which a lot of data
artifacts are produced. Artifacts like: subsets, data aggregates, plots,
statistical models, different versions of data sets and different versions
of results. The more projects we work with the more artifacts are produced
and the harder it is to manage these artifacts. Archivist helps to store
and manage artifacts created in R. Archivist allows you to store selected
artifacts as a binary files together with their metadata and relations.
Archivist allows to share artifacts with others, either through shared
folder or github. Archivist allows to look for already created artifacts by
using it's class, name, date of the creation or other properties. Makes it
easy to restore such artifacts. Archivist allows to check if new artifact
is the exact copy that was produced some time ago. That might be useful
either for testing or caching.",2019-08-31,Przemyslaw Biecek,https://pbiecek.github.io/archivist/,TRUE,https://github.com/pbiecek/archivist,101647,73,2019-08-26T21:27:49Z,1392.4246575342465
arcos,"A wrapper for the 'ARCOS API' <https://arcos-api.ext.nile.works/__swagger__/>
that returns raw and summarized data frames from the
Drug Enforcement Administration’s Automation of Reports and Consolidated Orders System,
a database that monitors controlled substances transactions between manufacturers and
distributors which was made public by The Washington Post and The Charleston Gazette-Mail.",2020-05-18,Andrew Ba Tran,https://github.com/wpinvestigative/arcos,TRUE,https://github.com/wpinvestigative/arcos,4654,14,2020-04-20T03:44:32Z,332.42857142857144
ARDL,"Creates complex autoregressive distributed lag (ARDL) models
providing just the order and automatically constructs the underlying
unrestricted and restricted error correction model (ECM). It also performs
the bounds-test for cointegration as described in Pesaran et al. (2001) <doi:10.1002/jae.616> and provides the multipliers and the cointegrating
equation.",2020-04-10,Kleanthis Natsiopoulos,https://github.com/Natsiopoulos/ARDL,TRUE,https://github.com/natsiopoulos/ardl,1279,2,2020-04-08T23:44:43Z,639.5
areal,"A pipeable, transparent implementation of areal weighted interpolation
with support for interpolating multiple variables in a single function call.
These tools provide a full-featured workflow for validation and estimation
that fits into both modern data management (e.g. tidyverse) and spatial
data (e.g. sf) frameworks.",2020-05-12,Christopher Prener,https://github.com/slu-openGIS/areal,TRUE,https://github.com/slu-opengis/areal,10582,64,2020-05-12T11:48:12Z,165.34375
argonDash,"Create awesome 'Bootstrap 4' dashboards powered by 'Argon'.
See more here <https://rinterface.github.io/argonDash/>.",2019-11-27,David Granjon,https://github.com/RinteRface/argonDash,TRUE,https://github.com/rinterface/argondash,42173,84,2019-11-27T08:13:48Z,502.0595238095238
argonR,"R wrapper around the argon HTML library.
More at <https://demos.creative-tim.com/argon-design-system/>.",2019-11-27,David Granjon,https://github.com/RinteRface/argonR,TRUE,https://github.com/rinterface/argonr,43416,38,2019-11-27T08:01:44Z,1142.5263157894738
argparse,"A command line parser to
be used with Rscript to write ""#!"" shebang scripts that gracefully
accept positional and optional arguments and automatically generate usage.",2019-03-08,Trevor L Davis,https://github.com/trevorld/r-argparse,TRUE,https://github.com/trevorld/r-argparse,582771,47,2020-02-01T09:24:08Z,12399.382978723404
ari,"Create videos from 'R Markdown' documents, or images and audio
files. These images can come from image files or HTML slides, and the audio
files can be provided by the user or computer voice narration can be created
using 'Amazon Polly'. The purpose of this package is to allow users to create
accessible, translatable, and reproducible lecture videos. See
<https://aws.amazon.com/polly/> for more information.",2020-02-08,Sean Kross,http://github.com/seankross/ari,TRUE,https://github.com/seankross/ari,14529,83,2020-05-29T16:53:30Z,175.04819277108433
aricode,"Implements an efficient O(n) algorithm based on bucket-sorting for
fast computation of standard clustering comparison measures. Available measures
include adjusted Rand index (ARI), normalized information distance (NID),
normalized mutual information (NMI), adjusted mutual information (AMI),
normalized variation information (NVI) and entropy, as described in Vinh et al (2009)
<doi:10.1145/1553374.1553511>.",2019-06-29,Julien Chiquet,https://github.com/jchiquet/aricode (dev version),TRUE,https://github.com/jchiquet/aricode,11730,8,2019-06-29T06:50:55Z,1466.25
arkdb,"Flat text files provide a robust, compressible, and portable
way to store tables from databases. This package provides convenient
functions for exporting tables from relational database connections
into compressed text files and streaming those text files back into
a database without requiring the whole table to fit in working memory.",2018-10-31,Carl Boettiger,https://github.com/ropensci/arkdb,TRUE,https://github.com/ropensci/arkdb,15086,54,2020-03-11T22:18:24Z,279.3703703703704
arkhe,"A collection of classes that represent
archaeological data. This package provides a set of S4 classes that
extend the basic matrix data type (absolute/relative frequency,
presence/absence data, co-occurrence matrix, etc.) upon which package
developers can build subclasses. It also provides a set of generic
methods (mutators and coercion mechanisms) and functions (e.g.
predicates). In addition, a few classes of general interest (e.g. that
represent stratigraphic relationships) are implemented.",2020-03-23,Nicolas Frerebeau,"http://arkhe.archaeo.science, https://github.com/nfrerebeau/arkhe,
https://cran.r-project.org/package=arkhe",TRUE,https://github.com/nfrerebeau/arkhe,4176,0,2020-05-20T16:51:21Z,NA
arm,"Functions to accompany A. Gelman and J. Hill, Data Analysis Using Regression and Multilevel/Hierarchical Models, Cambridge University Press, 2007.",2020-04-27,Yu-Sung Su,https://CRAN.R-project.org/package=arm,TRUE,https://github.com/suyusung/arm,1577259,16,2020-04-27T02:34:39Z,98578.6875
aroma.affymetrix,A cross-platform R framework that facilitates processing of any number of Affymetrix microarray samples regardless of computer system. The only parameter that limits the number of chips that can be processed is the amount of available disk space. The Aroma Framework has successfully been used in studies to process tens of thousands of arrays. This package has actively been used since 2006.,2019-06-23,Henrik Bengtsson,"https://www.aroma-project.org/,
https://github.com/HenrikBengtsson/aroma.affymetrix",TRUE,https://github.com/henrikbengtsson/aroma.affymetrix,69721,5,2019-12-16T05:47:10Z,13944.2
aroma.cn,"Methods for analyzing DNA copy-number data. Specifically,
this package implements the multi-source copy-number normalization (MSCN)
method for normalizing copy-number data obtained on various platforms and
technologies. It also implements the TumorBoost method for normalizing
paired tumor-normal SNP data.",2015-10-28,Henrik Bengtsson,"http://www.aroma-project.org/,
https://github.com/HenrikBengtsson/aroma.cn",TRUE,https://github.com/henrikbengtsson/aroma.cn,24651,1,2019-12-15T01:58:27Z,24651
aroma.core,"Core methods and classes used by higher-level 'aroma.*' packages
part of the Aroma Project, e.g. 'aroma.affymetrix' and 'aroma.cn'.",2020-02-04,Henrik Bengtsson,"https://github.com/HenrikBengtsson/aroma.core,
https://www.aroma-project.org/",TRUE,https://github.com/henrikbengtsson/aroma.core,79930,1,2020-02-04T18:11:00Z,79930
arsenal,"An Arsenal of 'R' functions for large-scale statistical summaries,
which are streamlined to work within the latest reporting tools in 'R' and
'RStudio' and which use formulas and versatile summary statistics for summary
tables and models. The primary functions include tableby(), a Table-1-like
summary of multiple variable types 'by' the levels of one or more categorical
variables; paired(), a Table-1-like summary of multiple variable types paired across
two time points; modelsum(), which performs simple model fits on one or more endpoints
for many variables (univariate or adjusted for covariates);
freqlist(), a powerful frequency table across many categorical variables;
comparedf(), a function for comparing data.frames; and
write2(), a function to output tables to a document.",2020-02-15,Ethan Heinzen,"https://github.com/eheinzen/arsenal,
https://cran.r-project.org/package=arsenal,
https://eheinzen.github.io/arsenal/",TRUE,https://github.com/eheinzen/arsenal,71898,144,2020-05-28T22:29:58Z,499.2916666666667
ARTool,"The Aligned Rank Transform for nonparametric
factorial ANOVAs as described by J. O. Wobbrock,
L. Findlater, D. Gergle, & J. J. Higgins, ""The Aligned
Rank Transform for nonparametric factorial analyses
using only ANOVA procedures"", CHI 2011 <DOI:10.1145/1978942.1978963>.",2020-03-20,Matthew Kay,https://github.com/mjskay/ARTool,TRUE,https://github.com/mjskay/artool,36708,22,2020-03-11T17:59:09Z,1668.5454545454545
ARTP2,Pathway and gene level association test using raw data or summary statistics.,2018-11-30,Han Zhang,https://github.com/zhangh12/ARTP2,TRUE,https://github.com/zhangh12/artp2,19596,4,2019-08-15T21:39:12Z,4899
arules,"Provides the infrastructure for representing,
manipulating and analyzing transaction data and patterns (frequent
itemsets and association rules). Also provides
C implementations of the association mining algorithms Apriori and Eclat.
Hahsler, Gruen and Hornik (2005) <doi:10.18637/jss.v014.i15>.",2020-05-15,Michael Hahsler,https://github.com/mhahsler/arules,TRUE,https://github.com/mhahsler/arules,1539641,119,2020-06-08T14:57:43Z,12938.159663865546
arulesCBA,Provides the infrastructure for association rule-based classification including algorithms like Classification Based on Associations (CBA).,2020-04-20,Michael Hahsler,https://github.com/ianjjohnson/arulesCBA,TRUE,https://github.com/ianjjohnson/arulescba,41233,27,2020-05-09T03:48:08Z,1527.148148148148
arulesNBMiner,NBMiner is an implementation of the model-based mining algorithm for mining NB-frequent itemsets and NB-precise rules. Michael Hahsler (2006) <doi:10.1007/s10618-005-0026-2>. ,2020-04-26,Michael Hahsler,https://github.com/mhahsler/arulesNBMiner,TRUE,https://github.com/mhahsler/arulesnbminer,31586,3,2020-04-26T20:04:43Z,10528.666666666666
arulesViz,Extends package 'arules' with various visualization techniques for association rules and itemsets. The package also includes several interactive visualizations for rule exploration.,2019-05-20,Michael Hahsler,https://github.com/mhahsler/arulesViz,TRUE,https://github.com/mhahsler/arulesviz,733638,33,2020-04-27T16:58:20Z,22231.454545454544
aRxiv,"An interface to the API for 'arXiv'
(<https://arxiv.org>), a repository of electronic preprints for
computer science, mathematics, physics, quantitative biology,
quantitative finance, and statistics.",2019-08-08,Karthik Ram,https://github.com/ropensci/aRxiv,TRUE,https://github.com/ropensci/arxiv,38466,40,2019-12-09T12:01:20Z,961.65
asciiSetupReader,"Lets you open a fixed-width ASCII file (.txt or
.dat) that has an accompanying setup file (.sps or .sas). These file
combinations are sometimes referred to as .txt+.sps, .txt+.sas,
.dat+.sps, or .dat+.sas. This will only run in a txt-sps or txt-sas
pair in which the setup file contains instructions to open that text
file. It will NOT open other text files, .sav, .sas, or .por data
files. Fixed-width ASCII files with setup files are common in older
(pre-2000) government data.",2020-03-21,Jacob Kaplan,https://github.com/jacobkap/asciiSetupReader,TRUE,https://github.com/jacobkap/asciisetupreader,16489,3,2020-03-20T18:33:32Z,5496.333333333333
ashr,"The R package 'ashr' implements an Empirical Bayes
approach for large-scale hypothesis testing and false discovery
rate (FDR) estimation based on the methods proposed in
M. Stephens, 2016, ""False discovery rates: a new deal"",
<DOI:10.1093/biostatistics/kxw041>. These methods can be applied
whenever two sets of summary statistics---estimated effects and
standard errors---are available, just as 'qvalue' can be applied
to previously computed p-values. Two main interfaces are
provided: ash(), which is more user-friendly; and ash.workhorse(),
which has more options and is geared toward advanced users. The
ash() and ash.workhorse() also provides a flexible modeling
interface that can accommodate a variety of likelihoods (e.g.,
normal, Poisson) and mixture priors (e.g., uniform, normal).",2020-02-20,Peter Carbonetto,https://github.com/stephens999/ashr,TRUE,https://github.com/stephens999/ashr,35356,62,2020-04-08T14:01:08Z,570.258064516129
AsioHeaders,"'Asio' is a cross-platform C++ library for network and low-level
I/O programming that provides developers with a consistent asynchronous model
using a modern C++ approach. It is also included in Boost but requires linking
when used with Boost. Standalone it can be used header-only (provided a recent
compiler). 'Asio' is written and maintained by Christopher M. Kohlhoff, and
released under the 'Boost Software License', Version 1.0.",2020-03-11,Dirk Eddelbuettel,NA,TRUE,https://github.com/eddelbuettel/asioheaders,56810,9,2020-05-12T23:09:01Z,6312.222222222223
aslib,"Provides an interface to the algorithm selection benchmark library
at <http://www.aslib.net> and the 'LLAMA' package
(<https://cran.r-project.org/package=llama>) for building
algorithm selection models; see Bischl et al. (2016)
<doi:10.1016/j.artint.2016.04.003>.",2020-05-24,Bernd Bischl,https://github.com/coseal/aslib-r/,TRUE,https://github.com/coseal/aslib-r,12741,6,2020-05-22T19:56:08Z,2123.5
aSPU,"R codes for the (adaptive) Sum of Powered Score ('SPU' and 'aSPU')
tests, inverse variance weighted Sum of Powered score ('SPUw' and 'aSPUw') tests
and gene-based and some pathway based association tests (Pathway based Sum of
Powered Score tests ('SPUpath'), adaptive 'SPUpath' ('aSPUpath') test, 'GEEaSPU'
test for multiple traits - single 'SNP' (single nucleotide polymorphism)
association in generalized estimation equations, 'MTaSPUs' test for multiple
traits - single 'SNP' association with Genome Wide Association Studies ('GWAS')
summary statistics, Gene-based Association Test that uses an extended 'Simes'
procedure ('GATES'), Hybrid Set-based Test ('HYST') and extended version
of 'GATES' test for pathway-based association testing ('GATES-Simes'). ).
The tests can be used with genetic and other data sets with covariates. The
response variable is binary or quantitative. Summary; (1) Single trait-'SNP' set
association with individual-level data ('aSPU', 'aSPUw', 'aSPUr'), (2) Single trait-'SNP'
set association with summary statistics ('aSPUs'), (3) Single trait-pathway
association with individual-level data ('aSPUpath'), (4) Single trait-pathway
association with summary statistics ('aSPUsPath'), (5) Multiple traits-single
'SNP' association with individual-level data ('GEEaSPU'), (6) Multiple traits-
single 'SNP' association with summary statistics ('MTaSPUs'), (7) Multiple traits-'SNP' set association with summary statistics('MTaSPUsSet'), (8) Multiple traits-pathway association with summary statistics('MTaSPUsSetPath').",2020-05-13,Il-Youp Kwak and others,https://github.com/ikwak2/aSPU,TRUE,https://github.com/ikwak2/aspu,29550,5,2020-05-13T04:58:29Z,5910
assignPOP,"Use Monte-Carlo and K-fold cross-validation coupled with machine-
learning classification algorithms to perform population assignment, with
functionalities of evaluating discriminatory power of independent training
samples, identifying informative loci, reducing data dimensionality for genomic
data, integrating genetic and non-genetic data, and visualizing results.",2020-03-16,Kuan-Yu (Alex) Chen,https://github.com/alexkychen/assignPOP,TRUE,https://github.com/alexkychen/assignpop,19011,10,2020-03-16T13:36:36Z,1901.1
ASSISTant,"Clinical trial design for subgroup selection in three-stage group
sequential trial. Includes facilities for design, exploration and analysis of
such trials. An implementation of the initial DEFUSE-3 trial is also provided
as a vignette.",2019-05-03,Balasubramanian Narasimhan,https://github.com/bnaras/ASSISTant,TRUE,https://github.com/bnaras/assistant,16799,0,2019-11-22T03:30:19Z,NA
astsa,"Data sets and scripts to accompany Time Series Analysis and Its Applications: With R Examples (4th ed), by R.H. Shumway and D.S. Stoffer. Springer Texts in Statistics, 2017, <DOI:10.1007/978-3-319-52452-8>, and Time Series: A Data Analysis Approach Using R. Chapman-Hall, 2019, <ISBN: 978-0367221096>. ",2020-05-01,David Stoffer,"https://github.com/nickpoison/astsa,
http://www.stat.pitt.edu/stoffer/tsa4/,
http://www.stat.pitt.edu/stoffer/tsda/",TRUE,https://github.com/nickpoison/astsa,300866,44,2020-06-09T19:51:46Z,6837.863636363636
atable,"Create Tables for Reporting Clinical Trials.
Calculates descriptive statistics and hypothesis tests,
arranges the results in a table ready for reporting with LaTeX, HTML or Word.",2020-04-13,Armin Ströbel,https://github.com/arminstroebel/atable,TRUE,https://github.com/arminstroebel/atable,10065,2,2020-04-13T12:09:08Z,5032.5
attachment,"Tools to help manage dependencies during package
development. This can retrieve all dependencies that are used in R
files in the ""R"" directory, in Rmd files in ""vignettes"" directory and
in 'roxygen2' documentation of functions. There is a function to
update the Description file of your package and a function to create a
file with the R commands to install all dependencies of your package.
All functions to retrieve dependencies of R scripts and Rmd files can
be used independently of a package development.",2020-03-15,Vincent Guyader,https://github.com/Thinkr-open/attachment,TRUE,https://github.com/thinkr-open/attachment,15143,54,2020-06-03T10:01:45Z,280.4259259259259
attempt,"Tools for defensive programming, inspired by 'purrr' mappers and
based on 'rlang'.'attempt' extends and facilitates defensive programming by
providing a consistent grammar, and provides a set of easy to use functions
for common tests and conditions. 'attempt' only depends on 'rlang', and
focuses on speed, so it can be easily integrated in other functions and
used in data analysis. ",2020-05-03,Colin Fay,https://github.com/ColinFay/attempt,TRUE,https://github.com/colinfay/attempt,46918,85,2020-04-17T10:38:35Z,551.9764705882353
attenuation,"Confidence curves, confidence intervals and p-values for
correlation coefficients corrected for attenuation due to measurement error.
Implements the methods described in Moss (2019, <arxiv:1911.01576>).",2019-11-08,Jonas Moss,https://github.com/JonasMoss/attenuation/,TRUE,https://github.com/jonasmoss/attenuation,3183,0,2019-11-08T14:26:00Z,NA
auditor,"Provides an easy to use unified interface for creating validation plots for any model.
The 'auditor' helps to avoid repetitive work consisting of writing code needed to create residual plots.
This visualizations allow to asses and compare the goodness of fit, performance, and similarity of models. ",2020-05-28,Alicja Gosiewska,https://github.com/ModelOriented/auditor,TRUE,https://github.com/modeloriented/auditor,18470,47,2020-05-28T13:26:48Z,392.97872340425533
auk,"Extract and process bird sightings records from
eBird (<http://ebird.org>), an online tool for recording bird
observations. Public access to the full eBird database is via the
eBird Basic Dataset (EBD; see <http://ebird.org/ebird/data/download>
for access), a downloadable text file. This package is an interface to
AWK for extracting data from the EBD based on taxonomic, spatial, or
temporal filters, to produce a manageable file size that can be
imported into R.",2020-04-03,Matthew Strimas-Mackey,"https://github.com/CornellLabofOrnithology/auk,
http://CornellLabofOrnithology.github.io/auk/",TRUE,https://github.com/cornelllabofornithology/auk,24565,68,2020-06-09T17:24:17Z,361.25
auth0,"Uses Auth0 API (see <https://auth0.com> for more
information) to use a simple and secure authentication system. It provides
tools to log in and out a shiny application using social networks or a list
of e-mails.",2019-09-26,Julio Trecenti,NA,TRUE,https://github.com/curso-r/auth0,10817,78,2020-04-21T21:52:36Z,138.67948717948718
autocogs,Automatically calculates cognostic groups for plot objects and list column plot objects. Results are returned in a nested data frame.,2020-04-03,Barret Schloerke,https://github.com/schloerke/autocogs,TRUE,https://github.com/schloerke/autocogs,17603,3,2020-04-03T01:11:27Z,5867.666666666667
AutoDeskR,"An interface to the 'AutoDesk' 'API' Platform including the Authentication
'API' for obtaining authentication to the 'AutoDesk' Forge Platform, Data Management
'API' for managing data across the platform's cloud services, Design Automation 'API'
for performing automated tasks on design files in the cloud, Model
Derivative 'API' for translating design files into different formats, sending
them to the viewer app, and extracting design data, and Viewer for rendering
2D and 3D models (see <https://developer.autodesk.com> for more information).",2017-07-10,Paul Govan,https://github.com/paulgovan/autodeskr,TRUE,https://github.com/paulgovan/autodeskr,15800,5,2020-04-01T00:47:32Z,3160
autoimage,"Functions for displaying multiple images or scatterplots with a color
scale, i.e., heat maps, possibly with projected coordinates. The
package relies on the base graphics system, so graphics are
rendered rapidly.",2020-05-27,Joshua French,NA,TRUE,https://github.com/jpfrench81/autoimage,24036,5,2020-05-26T20:13:21Z,4807.2
autokeras,"R Interface to 'AutoKeras' <https://autokeras.com/>.
'AutoKeras' is an open source software library for Automated Machine
Learning (AutoML). The ultimate goal of AutoML is to provide easily
accessible deep learning tools to domain experts with limited data science
or machine learning background. 'AutoKeras' provides functions to
automatically search for architecture and hyperparameters of deep
learning models.",2020-02-20,Juan Cruz Rodriguez,https://github.com/r-tensorflow/autokeras,TRUE,https://github.com/r-tensorflow/autokeras,2239,62,2020-02-11T00:05:25Z,36.11290322580645
automultinomial,"Fits the autologistic model described in Besag's famous 1974 paper on auto- models <http://www.jstor.org/stable/2984812>. Fits a multicategory generalization of the autologistic model when there are more than 2 response categories. Provides support for both asymptotic and bootstrap confidence intervals. For full model descriptions and a guide to the use of this package, please see the vignette.",2018-10-31,Stephen Berg,NA,TRUE,https://github.com/stephenberg/automultinomial,14891,4,2019-10-23T21:29:38Z,3722.75
autoplotly,"Functionalities to automatically generate interactive visualizations for
statistical results supported by 'ggfortify', such as time series, PCA,
clustering and survival analysis, with 'plotly.js' <https://plot.ly/> and
'ggplot2' style. The generated visualizations can also be easily extended
using 'ggplot2' and 'plotly' syntax while staying interactive.",2018-04-21,Yuan Tang,https://github.com/terrytangyuan/autoplotly,TRUE,https://github.com/terrytangyuan/autoplotly,15715,55,2020-01-23T16:04:32Z,285.72727272727275
autoTS,"Offers a set of functions to easily make predictions for univariate time series.
'autoTS' is a wrapper of existing functions of the 'forecast' and 'prophet' packages,
harmonising their outputs in tidy dataframes and using default values for each.
The core function getBestModel() allows the user to effortlessly benchmark seven
algorithms along with a bagged estimator to identify which one performs the best
for a given time series.",2020-06-05,Vivien Roussez,https://github.com/vivienroussez/autoTS,TRUE,https://github.com/vivienroussez/autots,0,6,2020-06-05T12:31:14Z,0
av,"Bindings to 'FFmpeg' <http://www.ffmpeg.org/> AV library for working with
audio and video in R. Generates high quality video from images or R graphics with
custom audio. Also offers high performance tools for reading raw audio, creating
'spectrograms', and converting between countless audio / video formats. This package
interfaces directly to the C API and does not require any command line utilities.",2020-01-29,Jeroen Ooms,"https://docs.ropensci.org/av (website),
https://github.com/ropensci/av (devel)",TRUE,https://github.com/ropensci/av,208420,66,2020-05-16T09:48:52Z,3157.878787878788
available,"Check if a given package name is available to use. It checks the
name's validity. Checks if it is used on 'GitHub', 'CRAN' and 'Bioconductor'. Checks
for unintended meanings by querying Urban Dictionary, 'Wiktionary' and Wikipedia.",2019-07-19,Jim Hester,https://github.com/ropenscilabs/available,TRUE,https://github.com/ropenscilabs/available,20287,112,2020-05-15T12:01:09Z,181.13392857142858
avar,"Implements the allan variance and allan variance linear regression estimator for latent time series models. More details about the method can be found, for example, in Guerrier, S., Molinari, R., & Stebler, Y. (2016) <doi:10.1109/LSP.2016.2541867>. ",2020-01-15,Stéphane Guerrier,https://github.com/SMAC-Group/avar,TRUE,https://github.com/smac-group/avar,4585,0,2020-01-26T20:39:26Z,NA
AWAPer,"NetCDF files of the Bureau of Meteorology Australian Water Availability Project daily national climate grids are built and used for the efficient extraction of daily point and catchment area weighted precipitation, daily minimum temperature, daily maximum temperature, vapour pressure deficit, solar radiation and various measures of evapotranspiration. For details on the source climate data see <http://www.bom.gov.au/jsp/awap/>.",2020-02-01,Tim Peterson,https://github.com/peterson-tim-j/AWAPer,TRUE,https://github.com/peterson-tim-j/awaper,2364,4,2020-06-02T05:18:06Z,591
aweek,"Which day a week starts depends heavily on the either the local or
professional context. This package is designed to be a lightweight solution
to easily switching between week-based date definitions. ",2020-04-29,Zhian N. Kamvar,https://www.repidemicsconsortium.org/aweek,TRUE,https://github.com/reconhub/aweek,30433,10,2019-06-21T14:31:22Z,3043.3
aws.comprehend,"Client for 'AWS Comprehend' <https://aws.amazon.com/comprehend>, a cloud natural language processing service that can perform a number of quantitative text analyses, including language detection, sentiment analysis, and feature extraction.",2020-03-18,Thomas J. Leeper,https://github.com/cloudyr/aws.comprehend,TRUE,https://github.com/cloudyr/aws.comprehend,9276,11,2020-03-18T14:58:34Z,843.2727272727273
aws.ec2metadata,Retrieve Amazon EC2 instance metadata from within the running instance.,2019-07-15,Thomas J. Leeper,https://github.com/cloudyr/aws.ec2metadata,TRUE,https://github.com/cloudyr/aws.ec2metadata,37038411,9,2019-07-15T14:25:30Z,4115379
aws.iam,"A simple client for the Amazon Web Services ('AWS') Identity
and Access Management ('IAM') 'API' <https://aws.amazon.com/iam/>.",2020-04-07,Thomas J. Leeper,https://github.com/cloudyr/aws.iam,TRUE,https://github.com/cloudyr/aws.iam,22115,10,2020-05-11T04:54:42Z,2211.5
aws.kms,"Client package for the 'AWS Key Management Service' <https://aws.amazon.com/kms/>, a cloud service for managing encryption keys.",2020-04-14,Thomas J. Leeper,https://github.com/cloudyr/aws.kms,TRUE,https://github.com/cloudyr/aws.kms,7226,0,2020-04-13T23:22:47Z,NA
aws.lambda,"A simple client package for the Amazon Web Services ('AWS') Lambda
API <https://aws.amazon.com/lambda/>.",2020-04-15,Thomas J. Leeper,https://github.com/cloudyr/aws.lambda,TRUE,https://github.com/cloudyr/aws.lambda,17596,21,2020-04-29T15:53:42Z,837.9047619047619
aws.polly,"A client for AWS Polly <http://aws.amazon.com/documentation/polly>, a speech synthesis service.",2020-03-11,Thomas J. Leeper,https://github.com/cloudyr/aws.polly,TRUE,https://github.com/cloudyr/aws.polly,23628,19,2020-03-18T11:17:18Z,1243.578947368421
aws.s3,"A simple client package for the Amazon Web Services ('AWS') Simple
Storage Service ('S3') 'REST' 'API' <https://aws.amazon.com/s3/>.",2020-04-07,Simon Urbanek,https://github.com/cloudyr/aws.s3,TRUE,https://github.com/cloudyr/aws.s3,32914965,274,2020-05-27T21:58:33Z,120127.6094890511
aws.signature,"Generates version 2 and version 4 request signatures for Amazon Web Services ('AWS') <https://aws.amazon.com/> Application Programming Interfaces ('APIs') and provides a mechanism for retrieving credentials from environment variables, 'AWS' credentials files, and 'EC2' instance metadata. For use on 'EC2' instances, users will need to install the suggested package 'aws.ec2metadata' <https://cran.r-project.org/package=aws.ec2metadata>.",2020-06-01,Thomas J. Leeper,https://github.com/cloudyr/aws.signature,TRUE,https://github.com/cloudyr/aws.signature,1759157,21,2020-06-01T09:50:45Z,83769.38095238095
aws.transcribe,"Client for 'AWS Transcribe' <https://aws.amazon.com/documentation/transcribe>, a cloud transcription service that can convert an audio media file in English and other languages into a text transcript.",2020-03-11,Thomas J. Leeper,https://github.com/cloudyr/aws.transcribe,TRUE,https://github.com/cloudyr/aws.transcribe,8686,4,2020-03-18T13:11:10Z,2171.5
aws.translate,"A client for 'AWS Translate' <https://aws.amazon.com/documentation/translate>, a machine translation service that will convert a text input in one language into a text output in another language.",2020-03-11,Thomas J. Leeper,https://github.com/cloudyr/aws.translate,TRUE,https://github.com/cloudyr/aws.translate,8735,3,2020-03-18T12:58:06Z,2911.6666666666665
AzureAuth,"Provides Azure Active Directory (AAD) authentication functionality for R users of Microsoft's 'Azure' cloud <https://azure.microsoft.com/>. Use this package to obtain 'OAuth' 2.0 tokens for services including Azure Resource Manager, Azure Storage and others. It supports both AAD v1.0 and v2.0, as well as multiple authentication methods, including device code and resource owner grant. Tokens are cached in a user-specific directory obtained using the 'rappdirs' package. The interface is based on the 'OAuth' framework in the 'httr' package, but customised and streamlined for Azure. Part of the 'AzureR' family of packages.",2020-05-23,Hong Ooi,https://github.com/Azure/AzureAuth https://github.com/Azure/AzureR,TRUE,https://github.com/azure/azureauth,108428,19,2020-05-23T01:25:11Z,5706.736842105263
azuremlsdk,"Interface to the 'Azure Machine Learning' Software Development Kit
('SDK'). Data scientists can use the 'SDK' to train, deploy, automate, and
manage machine learning models on the 'Azure Machine Learning' service. To
learn more about 'Azure Machine Learning' visit the website:
<https://docs.microsoft.com/en-us/azure/machine-learning/service/overview-what-is-azure-ml>.",2020-02-05,Heemanshu Suri,https://github.com/azure/azureml-sdk-for-r,TRUE,https://github.com/azure/azureml-sdk-for-r,11969,43,2020-05-20T23:27:58Z,278.3488372093023
AzureRMR,"A lightweight but powerful R interface to the 'Azure Resource Manager' REST API. The package exposes a comprehensive class framework and related tools for creating, updating and deleting 'Azure' resource groups, resources and templates. While 'AzureRMR' can be used to manage any 'Azure' service, it can also be extended by other packages to provide extra functionality for specific services. Part of the 'AzureR' family of packages.",2020-05-15,Hong Ooi,https://github.com/Azure/AzureRMR https://github.com/Azure/AzureR,TRUE,https://github.com/azure/azurermr,111172,7,2020-06-09T11:58:54Z,15881.714285714286
AzureStor,"Manage storage in Microsoft's 'Azure' cloud: <https://azure.microsoft.com/services/storage>. On the admin side, 'AzureStor' includes features to create, modify and delete storage accounts. On the client side, it includes an interface to blob storage, file storage, and 'Azure Data Lake Storage Gen2': upload and download files and blobs; list containers and files/blobs; create containers; and so on. Authenticated access to storage is supported, via either a shared access key or a shared access signature (SAS). Part of the 'AzureR' family of packages.",2020-06-05,Hong Ooi,https://github.com/Azure/AzureStor https://github.com/Azure/AzureR,TRUE,https://github.com/azure/azurestor,109014,18,2020-06-09T13:58:24Z,6056.333333333333
AzureVM,"Functionality for working with virtual machines (VMs) in Microsoft's 'Azure' cloud: <https://azure.microsoft.com/en-us/services/virtual-machines/>. Includes facilities to deploy, startup, shutdown, and cleanly delete VMs and VM clusters. Deployment configurations can be highly customised, and can make use of existing resources as well as creating new ones. A selection of predefined configurations is provided to allow easy deployment of commonly used Linux and Windows images, including Data Science Virtual Machines. With a running VM, execute scripts and install optional extensions. Part of the 'AzureR' family of packages.",2020-02-06,Hong Ooi,https://github.com/Azure/AzureVM https://github.com/Azure/AzureR,TRUE,https://github.com/azure/azurevm,11425,7,2020-02-06T18:11:01Z,1632.142857142857
babelwhale,"Provides a unified interface to interact with 'docker' and 'singularity' containers.
You can execute a command inside a container, mount a volume or copy a file.",2019-10-03,Robrecht Cannoodt (<https://orcid.org/0000-0003-3641-729X>,https://github.com/dynverse/babelwhale,TRUE,https://github.com/dynverse/babelwhale,7312,10,2019-10-03T13:01:36Z,731.2
BacArena,"Can be used for simulation of organisms living in
communities (Bauer and Zimmermann (2017) <doi:10.1371/journal.pcbi.1005544>).
Each organism is represented individually and genome scale
metabolic models determine the uptake and release of compounds. Biological
processes such as movement, diffusion, chemotaxis and kinetics are available
along with data analysis techniques.",2020-05-20,Johannes Zimmermann,https://BacArena.github.io/,TRUE,https://github.com/euba/bacarena,21783,14,2020-05-20T15:49:49Z,1555.9285714285713
backbone,"Provides methods for extracting from a weighted graph
a binary or signed backbone that retains only the significant edges.
The user may input a weighted graph, or a bipartite graph
from which a weighted graph is first constructed via projection.
Backbone extraction methods include the stochastic degree sequence model (Neal, Z. P. (2014). <doi:10.1016/j.socnet.2014.06.001>),
hypergeometric model (Neal, Z. (2013). <doi:10.1007/s13278-013-0107-y>),
the fixed degree sequence model (Zweig, K. A., and Kaufmann, M. (2011). <doi:10.1007/s13278-011-0021-0>),
as well as a universal threshold method. ",2020-05-15,Rachel Domagalski,"https://github.com/domagal9/backbone,
https://www.zacharyneal.com/backbone",TRUE,https://github.com/domagal9/backbone,4293,11,2020-05-19T15:18:54Z,390.27272727272725
backports,"
Functions introduced or changed since R v3.0.0 are re-implemented in this
package. The backports are conditionally exported in order to let R resolve
the function name to either the implemented backport, or the respective base
version, if available. Package developers can make use of new functions or
arguments by selectively importing specific backports to
support older installations.",2020-05-13,Michel Lang,https://github.com/r-lib/backports,TRUE,https://github.com/r-lib/backports,16755091,45,2020-06-06T20:20:07Z,372335.35555555555
badger,Query information and generate badge for using in README and GitHub Pages.,2019-11-15,Guangchuang Yu,https://github.com/GuangchuangYu/badger,TRUE,https://github.com/guangchuangyu/badger,15235,89,2020-06-05T03:43:33Z,171.17977528089887
baggr,"Running and comparing meta-analyses of data with hierarchical
Bayesian models in Stan, including convenience functions for formatting
data, plotting and pooling measures specific to meta-analysis.",2020-02-28,Witold Wiecek,https://github.com/wwiecek/baggr,TRUE,https://github.com/wwiecek/baggr,5302,9,2020-02-27T14:09:39Z,589.1111111111111
baguette,"Tree- and rule-based models can be bagged using
this package and their predictions equations are stored
in an efficient format to reduce the model objects size
and speed. ",2020-04-14,Max Kuhn,https://github.com/tidymodels/baguette,TRUE,https://github.com/tidymodels/baguette,1097,8,2020-04-24T19:44:45Z,137.125
balance,"Balances have become a cornerstone of compositional data analysis. However,
conceptualizing balances is difficult, especially for high-dimensional data. Most often,
investigators visualize balances with ""balance dendrograms"". However, this visualization
tool does not scale well for large data. This package provides an alternative scheme for
visualizing balances, described in [Quinn (2018) <DOI:10.12688/f1000research.15858.1>].
This package also provides a method for principal balance analysis.",2019-07-10,Thomas Quinn,http://github.com/tpq/balance,TRUE,https://github.com/tpq/balance,9967,2,2019-07-10T04:47:48Z,4983.5
Ball,"Hypothesis tests and sure independence screening (SIS) procedure based on ball statistics, including ball divergence <doi:10.1214/17-AOS1579>, ball covariance <doi:10.1080/01621459.2018.1543600>, and ball correlation <doi:10.1080/01621459.2018.1462709>, are developed to analyze complex data in metric spaces, e.g, shape, directional, compositional and symmetric positive definite matrix data. The ball divergence and ball covariance based distribution-free tests are implemented to detecting distribution difference and association in metric spaces <arXiv:1811.03750>. Furthermore, several generic non-parametric feature selection procedures based on ball correlation, BCor-SIS and all of its variants, are implemented to tackle the challenge in the context of ultra high dimensional data.",2019-12-17,Xueqin Wang,https://github.com/Mamba413/Ball,TRUE,https://github.com/mamba413/ball,22081,12,2020-05-16T03:00:12Z,1840.0833333333333
bama,"Perform mediation analysis in the presence of high-dimensional
mediators based on the potential outcome framework. Bayesian Mediation
Analysis (BAMA), developed by Song et al (2019) <doi:10.1111/biom.13189>,
relies on two Bayesian sparse linear mixed models to simultaneously analyze
a relatively large number of mediators for a continuous exposure and outcome
assuming a small number of mediators are truly active. This sparsity
assumption also allows the extension of univariate mediator analysis by
casting the identification of active mediators as a variable selection
problem and applying Bayesian methods with continuous shrinkage priors on
the effects.",2020-05-02,Alexander Rix,https://github.com/umich-cphds/bama,TRUE,https://github.com/umich-cphds/bama,3970,0,2020-05-01T14:45:33Z,NA
BAMBI,Fit (using Bayesian methods) and simulate mixtures of univariate and bivariate angular distributions. Chakraborty and Wong (2017) <arXiv:1708.07804> .,2019-12-18,Saptarshi Chakraborty,https://arxiv.org/abs/1708.07804,TRUE,https://github.com/c7rishi/bambi,22297,1,2020-04-23T06:32:07Z,22297
bamboo,"Implementation of the Bamboo methods described in Li, Dahl, Vannucci, Joo, and Tsai (2014) <DOI:10.1371/journal.pone.0109832>.",2020-04-02,David B. Dahl,https://github.com/dbdahl/bamboo,TRUE,https://github.com/dbdahl/bamboo,19345,3,2020-04-02T21:40:17Z,6448.333333333333
bamp,"Bayesian Age-Period-Cohort Modeling and Prediction using efficient Markov Chain Monte Carlo Methods. This is the R version of the previous BAMP software as described in Volker Schmid and Leonhard Held (2007) <DOI:10.18637/jss.v021.i08> Bayesian Age-Period-Cohort Modeling and Prediction - BAMP, Journal of Statistical Software 21:8. This package includes checks of convergence using Gelman's R.",2020-01-23,Volker Schmid,https://volkerschmid.github.io/bamp/,TRUE,https://github.com/volkerschmid/bamp,8608,3,2020-01-21T21:50:12Z,2869.3333333333335
bang,"Provides functions for the Bayesian analysis of some simple
commonly-used models, without using Markov Chain Monte Carlo (MCMC)
methods such as Gibbs sampling. The 'rust' package
<https://cran.r-project.org/package=rust> is used to simulate a random
sample from the required posterior distribution, using the generalized
ratio-of-uniforms method. See Wakefield, Gelfand and Smith (1991)
<DOI:10.1007/BF01889987> for details. At the moment three conjugate
hierarchical models are available: beta-binomial, gamma-Poisson and a 1-way
analysis of variance (ANOVA).",2020-02-24,Paul J. Northrop,"https://paulnorthrop.github.io/bang/,
http://github.com/paulnorthrop/bang",TRUE,https://github.com/paulnorthrop/bang,14158,3,2020-02-25T10:20:04Z,4719.333333333333
banR,"A client for the ""Base Adresses Nationale"" (BAN) API, which allows to (batch)
geocode and reverse-geocode French addresses. For more information about the BAN and its API, please see <https://adresse.data.gouv.fr/api>. ",2020-05-11,Joel Gombin,"http://joelgombin.github.io/banR/,
http://github.com/joelgombin/banR/",TRUE,https://github.com/joelgombin/banr,13837,18,2020-05-11T08:54:10Z,768.7222222222222
BARIS,"Allows the user to access and import data from the rich French open data portal through the provided free API <https://doc.data.gouv.fr/api/reference/>.
The portal is free, and no credential is required for extracting datasets. ",2020-05-25,Mohamed El Fodil Ihaddaden,https://github.com/feddelegrand7/BARIS,TRUE,https://github.com/feddelegrand7/baris,1953,13,2020-06-03T12:46:38Z,150.23076923076923
bartCause,Contains a variety of methods to generate typical causal inference estimates using Bayesian Additive Regression Trees (BART) as the underlying regression model (Hill (2012) <doi:10.1198/jcgs.2010.08162>).,2020-04-02,Vincent Dorie,https://github.com/vdorie/bartCause,TRUE,https://github.com/vdorie/bartcause,1163,28,2020-03-31T21:00:54Z,41.535714285714285
baRulho,"Intended to facilitate acoustic analysis of (animal) sound transmission experiments, which typically aim to quantify changes in signal structure when transmitted in a given habitat by broadcasting and re-recording animal sounds at increasing distances. The package offers a workflow with functions to prepare the data set for analysis as well as to calculate and visualize several degradation metrics, including blur ratio, signal-to-noise ratio, excess attenuation and envelope correlation among others (Dabelsteen et al 1993 <doi:10.1121/1.406682>).",2020-06-07,Marcelo Araya-Salas,https://github.com/maRce10/baRulho,TRUE,https://github.com/marce10/barulho,2117,0,2020-05-11T21:35:16Z,NA
BAS,"Package for Bayesian Variable Selection and Model Averaging
in linear models and generalized linear models using stochastic or
deterministic sampling without replacement from posterior
distributions. Prior distributions on coefficients are
from Zellner's g-prior or mixtures of g-priors
corresponding to the Zellner-Siow Cauchy Priors or the
mixture of g-priors from Liang et al (2008)
<DOI:10.1198/016214507000001337>
for linear models or mixtures of g-priors from Li and Clyde
(2019) <DOI:10.1080/01621459.2018.1469992> in generalized linear models.
Other model selection criteria include AIC, BIC and Empirical Bayes
estimates of g. Sampling probabilities may be updated based on the sampled
models using sampling w/out replacement or an efficient MCMC algorithm which
samples models using a tree structure of the model space
as an efficient hash table. See Clyde, Ghosh and Littman (2010)
<DOI:10.1198/jcgs.2010.09049> for details on the sampling algorithms.
Uniform priors over all models or beta-binomial prior distributions on
model size are allowed, and for large p truncated priors on the model
space may be used to enforce sampling models that are full rank.
The user may force variables to always be included in addition to imposing
constraints that higher order interactions are included only if their
parents are included in the model.
This material is based upon work supported by the National Science
Foundation under Division of Mathematical Sciences grant 1106891.
Any opinions, findings, and
conclusions or recommendations expressed in this material are those of
the author(s) and do not necessarily reflect the views of the
National Science Foundation.",2020-01-24,Merlise Clyde,"https://www.r-project.org, https://github.com/merliseclyde/BAS",TRUE,https://github.com/merliseclyde/bas,90339,26,2020-03-09T00:48:42Z,3474.576923076923
base64url,"In contrast to RFC3548, the 62nd character (""+"") is replaced with
""-"", the 63rd character (""/"") is replaced with ""_"". Furthermore, the encoder
does not fill the string with trailing ""="". The resulting encoded strings
comply to the regular expression pattern ""[A-Za-z0-9_-]"" and thus are
safe to use in URLs or for file names.
The package also comes with a simple base32 encoder/decoder suited for
case insensitive file systems.",2018-05-14,Michel Lang,https://github.com/mllg/base64url,TRUE,https://github.com/mllg/base64url,79794,10,2020-01-11T00:16:27Z,7979.4
basetheme,Functions to create and select graphical themes for the base plotting system. Contains: 1) several custom pre-made themes 2) mechanism for creating new themes by making persistent changes to the graphical parameters of base plots.,2019-10-17,Karolis Koncevičius,https://github.com/KKPMW/basetheme,TRUE,https://github.com/kkpmw/basetheme,7445,84,2019-10-17T23:35:30Z,88.63095238095238
basf,"Resurrects the standard plot for shapes established by the
'base' and 'graphics' packages. This is suited to workflows that require
plotting using the established and traditional idioms of plotting spatially
coincident data where it belongs. This package depends on 'sf' and only replaces
the plot method. ",2020-04-15,Michael Sumner,https://github.com/mdsumner/basf,TRUE,https://github.com/mdsumner/basf,1007,0,2020-04-12T13:57:11Z,NA
basictabler,"Easily create tables from data
frames/matrices. Create/manipulate tables
row-by-row, column-by-column or cell-by-cell.
Use common formatting/styling to output
rich tables as 'HTML', 'HTML widgets' or to
'Excel'. ",2020-03-07,Christopher Bailiss,https://github.com/cbailiss/basictabler,TRUE,https://github.com/cbailiss/basictabler,16490,23,2020-03-07T10:04:51Z,716.9565217391304
basket,"Implementation of multisource exchangeability models for Bayesian analyses of prespecified subgroups arising in the context of basket trial design and monitoring. The R 'basket' package facilitates implementation of the binary, symmetric multi-source exchangeability model (MEM) with posterior inference arising from both exact computation and Markov chain Monte Carlo sampling. Analysis output includes full posterior samples as well as posterior probabilities, highest posterior density (HPD) interval boundaries, effective sample sizes (ESS), mean and median estimations, posterior exchangeability probability matrices, and maximum a posteriori MEMs. In addition to providing ""basketwise"" analyses, the package includes similar calculations for ""clusterwise"" analyses for which subgroups are combined into meta-baskets, or clusters, using graphical clustering algorithms that treat the posterior exchangeability probabilities as edge weights. In addition plotting tools are provided to visualize basket and cluster densities as well as their exchangeability. References include Hyman, D.M., Puzanov, I., Subbiah, V., Faris, J.E., Chau, I., Blay, J.Y., Wolf, J., Raje, N.S., Diamond, E.L., Hollebecque, A. and Gervais, R (2015) <doi:10.1056/NEJMoa1502309>; Hobbs, B.P. and Landin, R. (2018) <doi:10.1002/sim.7893>; Hobbs, B.P., Kane, M.J., Hong, D.S. and Landin, R. (2018) <doi:10.1093/annonc/mdy457>; and Kaizer, A.M., Koopmeiners, J.S. and Hobbs, B.P. (2017) <doi:10.1093/biostatistics/kxx031>.",2020-04-07,Michael J. Kane,https://github.com/kaneplusplus/basket,TRUE,https://github.com/kaneplusplus/basket,5769,1,2020-04-22T22:40:59Z,5769
batata,"
Allows the user to manage easily R packages removal. It offers many functions to display installed packages according to
specific dates and removes them if needed. The user is always prompted when running the removal functions in order to confirm
the required action. It offers also a function that removes all the installed packages in case one wants to switch from one R version
to another and start fresh. ",2020-06-09,Mohamed El Fodil Ihaddaden,https://github.com/feddelegrand7/batata,TRUE,https://github.com/feddelegrand7/batata,0,16,2020-06-09T14:07:19Z,0
BatchExperiments,"Extends the BatchJobs package to run statistical experiments on
batch computing clusters. For further details see the project web page.",2017-11-30,Bernd Bischl,https://github.com/tudo-r/BatchExperiments,TRUE,https://github.com/tudo-r/batchexperiments,33444,15,2020-05-19T20:00:48Z,2229.6
BatchJobs,"Provides Map, Reduce and Filter variants to generate jobs on batch
computing systems like PBS/Torque, LSF, SLURM and Sun Grid Engine.
Multicore and SSH systems are also supported. For further details see the
project web page.",2019-05-14,Bernd Bischl,https://github.com/tudo-r/BatchJobs,TRUE,https://github.com/tudo-r/batchjobs,139938,74,2020-05-19T19:59:59Z,1891.054054054054
batchtools,"As a successor of the packages 'BatchJobs' and 'BatchExperiments',
this package provides a parallel implementation of the Map function for high
performance computing systems managed by schedulers 'IBM Spectrum LSF'
(<https://www.ibm.com/us-en/marketplace/hpc-workload-management>),
'OpenLava' (<http://www.openlava.org/>), 'Univa Grid Engine'/'Oracle Grid
Engine' (<http://www.univa.com/>), 'Slurm' (<http://slurm.schedmd.com/>),
'TORQUE/PBS'
(<https://adaptivecomputing.com/cherry-services/torque-resource-manager/>),
or 'Docker Swarm' (<https://docs.docker.com/swarm/>).
A multicore and socket mode allow the parallelization on a local machines,
and multiple machines can be hooked up via SSH to create a makeshift
cluster. Moreover, the package provides an abstraction mechanism to define
large-scale computer experiments in a well-organized and reproducible way.",2020-03-19,Michel Lang,https://github.com/mllg/batchtools,TRUE,https://github.com/mllg/batchtools,93065,113,2020-05-03T19:35:23Z,823.5840707964602
bayes4psy,Contains several Bayesian models for data analysis of psychological tests. A user friendly interface for these models should enable students and researchers to perform professional level Bayesian data analysis without advanced knowledge in programming and Bayesian statistics. This package is based on the Stan platform (Carpenter et el. 2017 <doi:10.18637/jss.v076.i01>).,2020-02-20,Jure Demšar,https://github.com/bstatcomp/bayes4psy,TRUE,https://github.com/bstatcomp/bayes4psy,5754,3,2020-02-21T10:15:20Z,1918
bayesAB,"A suite of functions that allow the user to analyze A/B test
data in a Bayesian framework. Intended to be a drop-in replacement for
common frequentist hypothesis test such as the t-test and chi-sq test.",2019-07-02,Frank Portman,https://github.com/FrankPortman/bayesAB,TRUE,https://github.com/frankportman/bayesab,36276,253,2019-11-13T21:25:11Z,143.38339920948616
bayescopulareg,"Tools for Bayesian copula generalized linear models (GLMs).
The sampling scheme is based on Pitt, Chan, and Kohn (2006) <doi:10.1093/biomet/93.3.537>.
Regression parameters (including coefficients and dispersion parameters) are
estimated via the adaptive random walk Metropolis approach developed by
Haario, Saksman, and Tamminen (1999) <doi:10.1007/s001800050022>.
The prior for the correlation matrix is based on Hoff (2007) <doi:10.1214/07-AOAS107>.",2020-05-28,Ethan Alt,https://github.com/ethan-alt/bayescopulareg,TRUE,https://github.com/ethan-alt/bayescopulareg,658,0,2020-05-28T00:42:48Z,NA
BayesCTDesign,"A set of functions to help clinical trial researchers calculate power and sample size for two-arm Bayesian randomized clinical trials that do or do not incorporate historical control data. At some point during the design process, a clinical trial researcher who is designing a basic two-arm Bayesian randomized clinical trial needs to make decisions about power and sample size within the context of hypothesized treatment effects. Through simulation, the simple_sim() function will estimate power and other user specified clinical trial characteristics at user specified sample sizes given user defined scenarios about treatment effect,control group characteristics, and outcome. If the clinical trial researcher has access to historical control data, then the researcher can design a two-arm Bayesian randomized clinical trial that incorporates the historical data. In such a case, the researcher needs to work through the potential consequences of historical and randomized control differences on trial characteristics, in addition to working through issues regarding power in the context of sample size, treatment effect size, and outcome. If a researcher designs a clinical trial that will incorporate historical control data, the researcher needs the randomized controls to be from the same population as the historical controls. What if this is not the case when the designed trial is implemented? During the design phase, the researcher needs to investigate the negative effects of possible historic/randomized control differences on power, type one error, and other trial characteristics. Using this information, the researcher should design the trial to mitigate these negative effects. Through simulation, the historic_sim() function will estimate power and other user specified clinical trial characteristics at user specified sample sizes given user defined scenarios about historical and randomized control differences as well as treatment effects and outcomes. The results from historic_sim() and simple_sim() can be printed with print_table() and graphed with plot_table() methods. Outcomes considered are Gaussian, Poisson, Bernoulli, Lognormal, Weibull, and Piecewise Exponential. ",2019-08-02,Barry Eggleston,http://github.com/begglest/BayesCTDesign,TRUE,https://github.com/begglest/bayesctdesign,7694,0,2020-04-21T15:16:58Z,NA
bayesdfa,"Implements Bayesian dynamic factor analysis with 'Stan'. Dynamic
factor analysis is a dimension reduction tool for multivariate time series.
'bayesdfa' extends conventional dynamic factor models in several ways.
First, extreme events may be estimated in the latent trend by modeling
process error with a student-t distribution. Second, autoregressive and
moving average components can be optionally included. Third, the estimated
dynamic factors can be analyzed with hidden Markov models to evaluate
support for latent regimes.",2019-05-22,Eric J. Ward,https://github.com/fate-ewi/bayesdfa,TRUE,https://github.com/fate-ewi/bayesdfa,10562,17,2020-06-07T20:42:05Z,621.2941176470588
bayesDP,"Functions for data augmentation using the
Bayesian discount prior function for 1 arm and 2 arm clinical trials.",2020-02-03,Hickey Graeme L.,https://github.com/graemeleehickey/bayesDP,TRUE,https://github.com/graemeleehickey/bayesdp,23200,0,2020-03-16T16:38:09Z,NA
bayesGARCH,"Provides the bayesGARCH() function which performs the
Bayesian estimation of the GARCH(1,1) model with Student's t innovations as described in Ardia (2008) <doi:10.1007/978-3-540-78657-3>.",2020-04-20,David Ardia,https://github.com/ArdiaD/bayesGARCH,TRUE,https://github.com/ardiad/bayesgarch,40792,4,2020-04-19T20:50:32Z,10198
BayesianNetwork,"A 'Shiny' web application for creating interactive Bayesian Network models,
learning the structure and parameters of Bayesian networks, and utilities for classic
network analysis.",2018-12-02,Paul Govan,https://github.com/paulgovan/bayesiannetwork,TRUE,https://github.com/paulgovan/bayesiannetwork,20088,77,2020-03-09T15:33:11Z,260.8831168831169
BayesianTools,"General-purpose MCMC and SMC samplers, as well as plot and
diagnostic functions for Bayesian statistics, with a particular focus on
calibrating complex system models. Implemented samplers include various
Metropolis MCMC variants (including adaptive and/or delayed rejection MH), the
T-walk, two differential evolution MCMCs, two DREAM MCMCs, and a sequential
Monte Carlo (SMC) particle filter.",2019-12-09,Florian Hartig,https://github.com/florianhartig/BayesianTools,TRUE,https://github.com/florianhartig/bayesiantools,31878,58,2020-06-03T19:07:47Z,549.6206896551724
BayesLogit,"Tools for sampling from the PolyaGamma distribution based on Polson, Scott, and Windle (2013) <doi:10.1080/01621459.2013.829001>. Useful for logistic regression.",2019-09-26,Jesse Windle,https://github.com/jwindle/BayesLogit,TRUE,https://github.com/jwindle/bayeslogit,25943,3,2019-09-25T15:20:06Z,8647.666666666666
BayesMallows,"An implementation of the Bayesian version of the Mallows rank model (Vitelli et al., Journal of Machine Learning Research, 2018 <http://jmlr.org/papers/v18/15-481.html>; Crispino et al., to appear in Annals of Applied Statistics). Both Cayley, footrule, Hamming, Kendall, Spearman, and Ulam distances are supported in the models. The rank data to be analyzed can be in the form of complete rankings, top-k rankings, partially missing rankings, as well as consistent and inconsistent pairwise preferences. Several functions for plotting and studying the posterior distributions of parameters are provided. The package also provides functions for estimating the partition function (normalizing constant) of the Mallows rank model, both with the importance sampling algorithm of Vitelli et al. and asymptotic approximation with the IPFP algorithm (Mukherjee, Annals of Statistics, 2016 <doi:10.1214/15-AOS1389>).",2020-03-23,Oystein Sorensen,https://github.com/ocbe-uio/BayesMallows,TRUE,https://github.com/ocbe-uio/bayesmallows,11213,5,2020-05-11T08:38:09Z,2242.6
BayesNetBP,"Belief propagation methods in Bayesian Networks to propagate evidence through the network. The implementation of these methods are based on the article: Cowell, RG (2005). Local Propagation in Conditional Gaussian Bayesian Networks <http://www.jmlr.org/papers/volume6/cowell05a/>. The optional 'cyjShiny' package for running the Shiny app is available at <https://github.com/cytoscape/cyjShiny>. Please see the example in the documentation of 'runBayesNetApp' function for installing 'cyjShiny' package from GitHub.",2020-04-14,Han Yu,NA,TRUE,https://github.com/hyu-ub/bayesnetbp,15588,6,2020-04-10T14:31:37Z,2598
bayesplot,"Plotting functions for posterior analysis, MCMC diagnostics,
prior and posterior predictive checks, and other visualizations
to support the applied Bayesian workflow advocated in
Gabry, Simpson, Vehtari, Betancourt, and Gelman (2019) <doi:10.1111/rssa.12378>.
The package is designed not only to provide convenient functionality
for users, but also a common set of functions that can be easily used by
developers working on a variety of R packages for Bayesian modeling,
particularly (but not exclusively) packages interfacing with 'Stan'.",2020-05-28,Jonah Gabry,https://mc-stan.org/bayesplot,TRUE,https://github.com/stan-dev/bayesplot,570039,225,2020-06-03T22:25:37Z,2533.5066666666667
BayesPostEst,"An implementation of functions to generate and plot postestimation quantities after estimating Bayesian regression models using Markov chain Monte Carlo (MCMC). Functionality includes the estimation of the Precision-Recall curves (see Beger, 2016 <doi:10.2139/ssrn.2765419>), the implementation of the observed values method of calculating predicted probabilities by Hanmer and Kalkan (2013) <doi:10.1111/j.1540-5907.2012.00602.x>, the implementation of the average value method of calculating predicted probabilities (see King, Tomz, and Wittenberg, 2000 <doi:10.2307/2669316>), and the generation and plotting of first differences to summarize typical effects across covariates (see Long 1997, ISBN:9780803973749; King, Tomz, and Wittenberg, 2000 <doi:10.2307/2669316>). This package can be used with MCMC output generated by any Bayesian estimation tool including 'JAGS', 'BUGS', 'MCMCpack', and 'Stan'.",2020-05-28,Johannes Karreth,https://github.com/ShanaScogin/BayesPostEst,TRUE,https://github.com/shanascogin/bayespostest,4379,6,2020-06-09T23:26:05Z,729.8333333333334
BayesSampling,"Allows the user to apply the Bayes Linear approach to finite population with the Simple Random Sampling - BLE_SRS() - and
the Stratified Simple Random Sampling design - BLE_SSRS() - (both without replacement) and to the Ratio estimator (using auxiliary
information) - BLE_Ratio().
The Bayes linear estimation approach is applied to a general linear regression model for finite population prediction in BLE_Reg()
and it is also possible to achieve the design based estimators using vague prior distributions.
Based on Gonçalves, K.C.M, Moura, F.A.S and Migon, H.S.(2014) <https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886>.",2020-04-24,Pedro Soares Figueiredo,"https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886,
https://github.com/pedrosfig/BayesSampling",TRUE,https://github.com/pedrosfig/bayessampling,684,1,2020-04-20T13:25:01Z,684
bayestestR,"Provides utilities to describe posterior distributions and Bayesian models. It includes point-estimates such as Maximum A Posteriori (MAP), measures of dispersion (Highest Density Interval - HDI; Kruschke, 2015 <doi:10.1016/C2012-0-00477-2>) and indices used for null-hypothesis testing (such as ROPE percentage, pd and Bayes factors).",2020-04-20,Dominique Makowski,https://easystats.github.io/bayestestR/,TRUE,https://github.com/easystats/bayestestr,264586,219,2020-05-29T08:35:47Z,1208.1552511415525
BayesVarSel,"Conceived to calculate Bayes factors in Linear models and then to provide a formal Bayesian answer to testing and variable selection problems. From a theoretical side, the emphasis in this package is placed on the prior distributions and it allows a wide range of them: Jeffreys (1961); Zellner and Siow(1980)<DOI:10.1007/bf02888369>; Zellner and Siow(1984); Zellner (1986)<DOI:10.2307/2233941>; Fernandez et al. (2001)<DOI:10.1016/s0304-4076(00)00076-2>; Liang et al. (2008)<DOI:10.1198/016214507000001337> and Bayarri et al. (2012)<DOI:10.1214/12-aos1013>. The interaction with the package is through a friendly interface that syntactically mimics the well-known lm() command of R. The resulting objects can be easily explored providing the user very valuable information (like marginal, joint and conditional inclusion probabilities of potential variables; the highest posterior probability model, HPM; the median probability model, MPM) about the structure of the true -data generating- model. Additionally, this package incorporates abilities to handle problems with a large number of potential explanatory variables through parallel and heuristic versions of the main commands, Garcia-Donato and Martinez-Beneito (2013)<DOI:10.1080/01621459.2012.742443>. It also allows problems with p>n and p>>n and also incorporates routines to handle problems with variable selection with factors.",2020-02-18,Anabel Forte,https://github.com/comodin19/BayesVarSel,TRUE,https://github.com/comodin19/bayesvarsel,32299,5,2020-05-19T09:12:54Z,6459.8
bayesvl,"Provides users with its associated functions for pedagogical purposes in visually learning Bayesian networks and Markov chain Monte Carlo (MCMC) computations. It enables users to: a) Create and examine the (starting) graphical structure of Bayesian networks; b) Create random Bayesian networks using a dataset with customized constraints; c) Generate 'Stan' code for structures of Bayesian networks for sampling the data and learning parameters; d) Plot the network graphs; e) Perform Markov chain Monte Carlo computations and produce graphs for posteriors checks. The package refers to one reference item, which describes the methods and algorithms: Vuong, Quan-Hoang and La, Viet-Phuong (2019) <doi:10.31219/osf.io/w5dx6> The 'bayesvl' R package. Open Science Framework (May 18).",2019-05-24,Viet-Phuong La,https://github.com/sshpa/bayesvl,TRUE,https://github.com/sshpa/bayesvl,4929,4,2020-05-09T11:01:32Z,1232.25
BayLum,"Bayesian analysis of luminescence data and C-14 age estimates. Bayesian models are based on the following publications: Combes, B. & Philippe, A. (2017) <doi:10.1016/j.quageo.2017.02.003> and Combes et al (2015) <doi:10.1016/j.quageo.2015.04.001>. This includes, amongst others, data import, export, application of age models and palaeodose model.",2018-09-19,Anne Philippe,NA,TRUE,https://github.com/r-lum/baylum,13738,3,2020-05-27T20:59:26Z,4579.333333333333
baymedr,"BAYesian inference for MEDical designs in R. Convenience functions
for the computation of Bayes factors for common biomedical research
designs. Implemented are functions to test the equivalence (equiv_bf),
non-inferiority (infer_bf), and superiority (super_bf) of an experimental
group compared to a control group. Bayes factors for these three tests can
be computed based on raw data (x, y) or summary statistics (n_x, n_y,
mean_x, mean_y, sd_x, sd_y [or ci_margin and ci_level]), making it possible
to reanalyse findings (e.g., from publications) without the need to obtain
the raw data.",2019-10-21,Maximilian Linde,https://github.com/maxlinde/baymedr,TRUE,https://github.com/maxlinde/baymedr,3322,0,2019-10-17T12:04:48Z,NA
baytrends,"Enable users to evaluate long-term trends using a Generalized
Additive Modeling (GAM) approach. The model development includes selecting a
GAM structure to describe nonlinear seasonally-varying changes over time,
incorporation of hydrologic variability via either a river flow or salinity,
the use of an intervention to deal with method or laboratory changes
suspected to impact data values, and representation of left- and
interval-censored data. The approach has been applied to water quality data
in the Chesapeake Bay, a major estuary on the east coast of the United
States to provide insights to a range of management- and research-focused
questions.",2020-03-31,Rebecca Murphy,https://github.com/tetratech/baytrends,TRUE,https://github.com/tetratech/baytrends,9346,3,2020-03-31T16:03:19Z,3115.3333333333335
bazar,"A collection of miscellaneous functions for
copying objects to the clipboard ('Copy');
manipulating strings ('concat', 'mgsub', 'trim', 'verlan');
loading or showing packages ('library_with_dep', 'require_with_dep',
'sessionPackages');
creating or testing for named lists ('nlist', 'as.nlist', 'is.nlist'),
formulas ('is.formula'), empty objects ('as.empty', 'is.empty'),
whole numbers ('as.wholenumber', 'is.wholenumber');
testing for equality ('almost.equal', 'almost.zero') and computing
uniqueness ('almost.unique');
getting modified versions of usual functions ('rle2', 'sumNA');
making a pause or a stop ('pause', 'stopif');
converting into a function ('as.fun');
providing a C like ternary operator ('condition %?% true %:% false');
finding packages and functions ('get_all_pkgs', 'get_all_funs');
and others ('erase', '%nin%', 'unwhich', 'top', 'bot', 'normalize'). ",2019-03-15,Paul Poncet,https://github.com/paulponcet/bazar,TRUE,https://github.com/paulponcet/bazar,78202,0,2019-07-13T23:51:42Z,NA
BBmisc,"Miscellaneous helper functions for and from B. Bischl and
some other guys, mainly for package development.",2017-03-10,Bernd Bischl,https://github.com/berndbischl/BBmisc,TRUE,https://github.com/berndbischl/bbmisc,561546,13,2020-05-25T08:07:27Z,43195.846153846156
bbmle,Methods and functions for fitting maximum likelihood models in R.,2020-02-03,Ben Bolker,https://github.com/bbolker/bbmle,TRUE,https://github.com/bbolker/bbmle,366916,14,2020-04-29T00:21:36Z,26208.285714285714
bbricks,"A set of frequently used Bayesian parametric and nonparametric model structures, as well as a set of tools for common analytical tasks. Structures include linear Gaussian systems, Gaussian and Normal-Inverse-Wishart conjugate structure, Gaussian and Normal-Inverse-Gamma conjugate structure, Categorical and Dirichlet conjugate structure, Dirichlet Process on positive integers, Dirichlet Process in general, Hierarchical Dirichlet Process ... Tasks include updating posteriors, sampling from posteriors, calculating marginal likelihood, calculating posterior predictive densities, sampling from posterior predictive distributions, calculating ""Maximum A Posteriori"" (MAP) estimates ... See <https://chenhaotian.github.io/Bayesian-Bricks/> to get started.",2020-05-07,Haotian Chen,https://github.com/chenhaotian/Bayesian-Bricks,TRUE,https://github.com/chenhaotian/bayesian-bricks,1790,3,2020-05-07T19:04:49Z,596.6666666666666
bbsBayes,"The North American Breeding Bird Survey (BBS) is a long-running
program that seeks to monitor the status and trends of the breeding birds in
North America. Since its start in 1966, the BBS has accumulated over 50 years
of data for over 300 species of North American Birds. Given the temporal and
spatial structure of the data, hierarchical Bayesian models are used to assess
the status and trends of these 300+ species of birds. 'bbsBayes' allows you to perform
hierarchical Bayesian analysis of BBS data. You can run a full
model analysis for one or more species that you choose, or you can take
more control and specify how the data should be stratified, prepared
for 'JAGS', or modelled. The functions provided here allow you to replicate
analyses performed by the United State Geological Survey (USGS, see Link
and Sauer (2011) <doi:10.1525/auk.2010.09220>) and Canadian Wildlife Service
(CWS, see Smith and Edwards (2020) <doi:10.1101/2020.03.26.010215>).",2020-05-31,Brandon P.M. Edwards,https://github.com/BrandonEdwards/bbsBayes,TRUE,https://github.com/brandonedwards/bbsbayes,136,12,2020-05-30T20:04:14Z,11.333333333333334
bbw,"The blocked weighted bootstrap (BBW) is an estimation technique
for use with data from two-stage cluster sampled surveys in which either
prior weighting (e.g. population-proportional sampling or PPS as used in
Standardized Monitoring and Assessment of Relief and Transitions or SMART
surveys) or posterior weighting (e.g. as used in rapid assessment method or
RAM and simple spatial sampling method or S3M surveys). The method was
developed by Accion Contra la Faim, Brixton Health, Concern Worldwide,
Global Alliance for Improved Nutrition, UNICEF Sierra Leone, UNICEF Sudan
and Valid International. It has been tested by the Centers for Disease
Control (CDC) using infant and young child feeding (IYCF) data. See Cameron
et al (2008) <doi:10.1162/rest.90.3.414> for application of bootstrap
to cluster samples. See Aaron et al (2016) <doi:10.1371/journal.pone.0163176>
and Aaron et al (2016) <doi:10.1371/journal.pone.0162462> for application
of the blocked weighted bootstrap to estimate indicators from two-stage
cluster sampled surveys.",2018-01-17,Mark Myatt,https://github.com/validmeasures/bbw,TRUE,https://github.com/validmeasures/bbw,10257,2,2020-01-05T22:36:22Z,5128.5
bcdata,"Search, query, and download tabular and
'geospatial' data from the British Columbia Data Catalogue
(<https://catalogue.data.gov.bc.ca/>). Search catalogue data records
based on keywords, data licence, sector, data format, and B.C.
government organization. View metadata directly in R, download many
data formats, and query 'geospatial' data available via the B.C.
government Web Feature Service ('WFS') using 'dplyr' syntax.",2019-12-17,Andy Teucher,"https://bcgov.github.io/bcdata/,
https://catalogue.data.gov.bc.ca/,
https://github.com/bcgov/bcdata",TRUE,https://github.com/bcgov/bcdata,3949,45,2020-06-04T23:32:36Z,87.75555555555556
Bchron,"Enables quick calibration of radiocarbon dates under various
calibration curves (including user generated ones); age-depth modelling
as per the algorithm of Haslett and Parnell (2008) <DOI:10.1111/j.1467-9876.2008.00623.x>; Relative sea level
rate estimation incorporating time uncertainty in polynomial regression
models (Parnell and Gehrels 2015) <DOI:10.1002/9781118452547.ch32>; non-parametric phase modelling via
Gaussian mixtures as a means to determine the activity of a site
(and as an alternative to the Oxcal function SUM; currently
unpublished), and reverse calibration of dates from calibrated into
un-calibrated years (also unpublished).",2020-04-13,Andrew Parnell,http://andrewcparnell.github.io/Bchron/,TRUE,https://github.com/andrewcparnell/bchron,48248,17,2020-04-13T12:00:31Z,2838.1176470588234
bcmaps,"Provides access to various spatial layers for B.C., such as
administrative boundaries, natural resource management boundaries, etc.
All layers are imported from the 'bcmapsdata' package as 'sf' or 'Spatial' objects
through function calls in this package. All layers are in B.C. 'Albers' equal-area projection
<http://spatialreference.org/ref/epsg/nad83-bc-albers/>, which is the B.C.
government standard.",2020-04-29,Andy Teucher,https://github.com/bcgov/bcmaps,TRUE,https://github.com/bcgov/bcmaps,13558,42,2020-04-28T16:41:00Z,322.8095238095238
bcrm,"Implements a wide variety of one- and two-parameter Bayesian CRM
designs. The program can run interactively, allowing the user to enter outcomes
after each cohort has been recruited, or via simulation to assess operating
characteristics. See Sweeting et al. (2013): <doi:10.18637/jss.v054.i13>.",2019-08-23,Graham Wheeler,https://github.com/mikesweeting/bcrm,TRUE,https://github.com/mikesweeting/bcrm,30575,0,2019-08-19T09:40:49Z,NA
bcTSNE,"Implements the projected t-SNE method for batch correction of
high-dimensional data. Please see Aliverti et al. (2020)
<doi:10.1093/bioinformatics/btaa189> for more information.",2020-04-28,Dayne L Filer,https://github.com/emanuelealiverti/BC_tSNE,TRUE,https://github.com/emanuelealiverti/bc_tsne,660,4,2020-04-15T14:38:19Z,165
bdchecks,Supplies a Shiny app and a set of functions to perform and managing data checks for biodiversity data. ,2019-02-18,Povilas Gibas,https://github.com/bd-R/bdchecks,TRUE,https://github.com/bd-r/bdchecks,6698,1,2020-03-28T04:33:26Z,6698
bdclean,"Provides features to manage the complete workflow for biodiversity data cleaning. Uploading data, gathering input from users (in order to adjust cleaning procedures), cleaning data and finally, generating various reports and several versions of the data. Facilitates user-level data cleaning, designed for the inexperienced R user. T Gueta et al (2018) <doi:10.3897/biss.2.25564>. T Gueta et al (2017) <doi:10.3897/tdwgproceedings.1.20311>.",2019-04-11,Thiloshon Nagarajah,"https://github.com/bd-R/bdclean,
https://bd-r.github.io/The-bdverse/index.html",TRUE,https://github.com/bd-r/bdclean,5786,5,2020-05-16T10:51:58Z,1157.2
bdl,"Interface to Local Data Bank ('Bank Danych Lokalnych' - 'bdl') API
<https://api.stat.gov.pl/Home/BdlApi?lang=en> with set of useful tools like
quick plotting and map generating using data from bank. ",2020-04-01,Artur Sławomirski,https://github.com/statisticspoland/R_Package_to_API_BDL,TRUE,https://github.com/statisticspoland/r_package_to_api_bdl,3656,10,2020-03-29T21:54:24Z,365.6
bdpar,"
Provide a tool to easily build customized data flows to pre-process large volumes
of information from different sources. To this end, 'bdpar' allows to (i) easily use and
create new functionalities and (ii) develop new data source extractors according to the
user needs. Additionally, the package provides by default a predefined data flow
to extract and pre-process the most relevant information (tokens, dates, ... ) from some textual
sources (SMS, Email, tweets, YouTube comments).",2020-02-20,Miguel Ferreiro-Díaz,https://github.com/miferreiro/bdpar,TRUE,https://github.com/miferreiro/bdpar,4928,4,2020-02-20T09:47:40Z,1232
beakr,"A minimalist web framework for developing application programming
interfaces in R that provides a flexible framework for handling common
HTTP-requests, errors, logging, and an ability to integrate any R code as
server middle-ware.",2020-02-10,Jonathan Callahan,https://github.com/MazamaScience/beakr,TRUE,https://github.com/mazamascience/beakr,2069,55,2020-02-21T22:35:38Z,37.61818181818182
beam,"Fast Bayesian inference of marginal and conditional independence structures from high-dimensional data. Leday and Richardson (2019), Biometrics, <doi:10.1111/biom.13064>.",2020-05-28,Gwenael G.R. Leday,https://github.com/gleday/beam,TRUE,https://github.com/gleday/beam,11032,0,2020-05-28T17:52:32Z,NA
beats,"Import and process electrocardiogram (ECG) data.
Reads binary data files from UFI devices (.ube files) and provides a
Shiny app for finding and exporting heart beats.",2020-02-28,Max Czapanskiy,https://github.com/FlukeAndFeather/beats,TRUE,https://github.com/flukeandfeather/beats,1812,1,2020-02-20T15:59:05Z,1812
beautier,"'BEAST2' (<https://www.beast2.org>) is a widely used
Bayesian phylogenetic tool, that uses DNA/RNA/protein data
and many model priors to create a posterior of jointly estimated
phylogenies and parameters.
'BEAUti 2' (which is part of 'BEAST2') is a GUI tool
that allows users to specify the many possible setups
and generates the XML file 'BEAST2' needs to run.
This package provides a way to create 'BEAST2' input
files without active user input, but using
R function calls instead.",2020-05-06,Richèl J.C. Bilderbeek,"https://docs.ropensci.org/beautier,
https://github.com/ropensci/beautier",TRUE,https://github.com/ropensci/beautier,9420,6,2020-04-22T11:56:07Z,1570
BEDMatrix,"A matrix-like data structure that allows for efficient,
convenient, and scalable subsetting of binary genotype/phenotype files
generated by PLINK (<https://www.cog-genomics.org/plink2>), the whole
genome association analysis toolset, without loading the entire file into
memory.",2020-03-11,Alexander Grueneberg,https://github.com/QuantGen/BEDMatrix,TRUE,https://github.com/quantgen/bedmatrix,26565,10,2020-03-11T17:54:51Z,2656.5
beezdemand,"Facilitates many of the analyses performed in studies of
behavioral economic demand. The package supports commonly-used options for
modeling operant demand including (1) data screening proposed by Stein,
Koffarnus, Snider, Quisenberry, & Bickel (2015; <doi:10.1037/pha0000020>),
(2) fitting models of demand such as linear (Hursh, Raslear, Bauman,
& Black, 1989, <doi:10.1007/978-94-009-2470-3_22>), exponential (Hursh & Silberberg, 2008,
<doi:10.1037/0033-295X.115.1.186>) and modified exponential (Koffarnus,
Franck, Stein, & Bickel, 2015, <doi:10.1037/pha0000045>), and (3) calculating
numerous measures relevant to applied behavioral economists (Intensity,
Pmax, Omax). Also supports plotting and comparing data.",2018-07-31,Brent Kaplan,https://github.com/brentkaplan/beezdemand,TRUE,https://github.com/brentkaplan/beezdemand,8349,8,2020-06-08T15:57:20Z,1043.625
beginr,"Useful functions for R beginners, including hints for the arguments of the 'plot()' function, self-defined functions for error bars, user-customized pair plots and hist plots, enhanced linear regression figures, etc.. This package could be helpful to R experts as well.",2019-05-02,Peng Zhao,https://github.com/pzhaonet/beginr,TRUE,https://github.com/pzhaonet/beginr,16187,12,2020-02-10T08:52:24Z,1348.9166666666667
behavr,Implements an S3 class based on 'data.table' to store and process efficiently ethomics (high-throughput behavioural) data.,2019-01-03,Quentin Geissmann,https://github.com/rethomics/behavr,TRUE,https://github.com/rethomics/behavr,12218,4,2020-06-09T01:45:46Z,3054.5
belg,"Calculates the Boltzmann entropy of a landscape gradient.
This package uses the analytical method created by Gao, P., Zhang, H.
and Li, Z., 2018 (<doi:10.1111/tgis.12315>) and by Gao, P. and Li, Z., 2019
(<doi:10.1007/s10980-019-00854-3>). It also extend the original ideas by
allowing calculations on data with missing values.",2020-04-01,Jakub Nowosad,https://r-spatialecology.github.io/belg/,TRUE,https://github.com/r-spatialecology/belg,12220,9,2020-04-29T12:16:21Z,1357.7777777777778
bench,Tools to accurately benchmark and analyze execution times for R expressions.,2020-01-13,Jim Hester,https://github.com/r-lib/bench,TRUE,https://github.com/r-lib/bench,72927,176,2020-05-20T12:43:06Z,414.35795454545456
benchmarkme,"Benchmark your CPU and compare against other CPUs.
Also provides functions for obtaining system specifications, such as
RAM, CPU type, and R version.",2020-05-09,Colin Gillespie,https://github.com/csgillespie/benchmarkme,TRUE,https://github.com/csgillespie/benchmarkme,52838,27,2020-05-09T20:59:15Z,1956.962962962963
benchmarkmeData,"Crowd sourced benchmarks from running the
'benchmarkme' package.",2020-04-23,Colin Gillespie,https://github.com/csgillespie/benchmarkme-data,TRUE,https://github.com/csgillespie/benchmarkme-data,52627,1,2020-04-23T14:31:48Z,52627
benford.analysis,Provides tools that make it easier to validate data using Benford's Law.,2018-12-21,Carlos Cinelli,http://github.com/carloscinelli/benford.analysis,TRUE,https://github.com/carloscinelli/benford.analysis,36968,28,2019-08-24T05:04:03Z,1320.2857142857142
berryFunctions,"Draw horizontal histograms, color scattered points by 3rd dimension,
enhance date- and log-axis plots, zoom in X11 graphics, trace errors and warnings,
use the unit hydrograph in a linear storage cascade, convert lists to data.frames and arrays,
fit multiple functions.",2020-06-06,Berry Boessenkool,https://github.com/brry/berryFunctions,TRUE,https://github.com/brry/berryfunctions,61949,8,2020-06-09T12:04:45Z,7743.625
BEST,"An alternative to t-tests, producing posterior estimates
for group means and standard deviations and their differences and
effect sizes.",2020-05-18,John K. Kruschke and Mike Meredith,NA,TRUE,https://github.com/mikemeredith/best,50153,15,2020-05-22T06:35:25Z,3343.5333333333333
bestNormalize,"Estimate a suite of normalizing transformations, including
a new adaptation of a technique based on ranks which can guarantee
normally distributed transformed data if there are no ties: ordered
quantile normalization (ORQ). ORQ normalization combines a rank-mapping
approach with a shifted logit approximation that allows
the transformation to work on data outside the original domain. It is
also able to handle new data within the original domain via linear
interpolation. The package is built to estimate the best normalizing
transformation for a vector consistently and accurately. It implements
the Box-Cox transformation, the Yeo-Johnson transformation, three types
of Lambert WxF transformations, and the ordered quantile normalization
transformation. It also estimates the normalization efficacy of other
commonly used transformations, and finally it allows users to specify
custom transformations or normalization statistics.",2020-06-08,Ryan Andrew Peterson,https://github.com/petersonR/bestNormalize,TRUE,https://github.com/petersonr/bestnormalize,95532,17,2020-06-09T14:14:52Z,5619.529411764706
bets.covid19,"Implements likelihood inference for early epidemic analysis. BETS is short for the four key epidemiological events being modeled: Begin of exposure, End of exposure, time of Transmission, and time of Symptom onset. The package contains a dataset of the trajectory of confirmed cases during the coronavirus disease (COVID-19) early outbreak. More detail of the statistical methods can be found in Zhao et al. (2020) <arXiv:2004.07743>.",2020-05-12,Qingyuan Zhao,https://github.com/qingyuanzhao/bets.covid19,TRUE,https://github.com/qingyuanzhao/bets.covid19,463,26,2020-06-01T18:24:53Z,17.807692307692307
BFpack,"Implementation of various default Bayes factors
for testing statistical hypotheses. The package is
intended for applied quantitative researchers in the
social and behavioral sciences, medical research,
and related fields. The Bayes factor tests can be
executed for statistical models such as
univariate and multivariate normal linear models,
generalized linear models, special cases of
linear mixed models, survival models, relational
event models. Parameters that can be tested are
location parameters (e.g., regression coefficients),
variances (e.g., group variances), and measures of
association (e.g,. bivariate correlations).
The statistical underpinnings are
described in
Mulder, Hoijtink, and Xin (2019) <arXiv:1904.00679>,
Mulder and Gelissen (2019) <arXiv:1807.05819>,
Mulder (2016) <DOI:10.1016/j.jmp.2014.09.004>,
Mulder and Fox (2019) <DOI:10.1214/18-BA1115>,
Mulder and Fox (2013) <DOI:10.1007/s11222-011-9295-3>,
Boeing-Messing, van Assen, Hofman, Hoijtink, and Mulder <DOI:10.1037/met0000116>,
Hoijtink, Mulder, van Lissa, and Gu, (2018) <DOI:10.31234/osf.io/v3shc>,
Gu, Mulder, and Hoijtink, (2018) <DOI:10.1111/bmsp.12110>,
Hoijtink, Gu, and Mulder, (2018) <DOI:10.1111/bmsp.12145>, and
Hoijtink, Gu, Mulder, and Rosseel, (2018) <DOI:10.1037/met0000187>.",2020-05-11,Joris Mulder,https://github.com/jomulder/BFpack,TRUE,https://github.com/jomulder/bfpack,4090,6,2020-05-22T15:03:18Z,681.6666666666666
BFS,Search and download data from the Swiss Federal Statistical Office <https://www.bfs.admin.ch/>.,2020-03-25,Félix Luginbuhl,"https://felixluginbuhl.com/BFS, https://github.com/lgnbhl/BFS",TRUE,https://github.com/lgnbhl/bfs,3996,4,2020-03-30T16:58:08Z,999
bfsl,"Provides the solution from York (1968) <doi:10.1016/S0012-821X(68)80059-7>
for fitting a straight line to bivariate data with errors in both coordinates.
It gives unbiased estimates of the intercept, slope and standard errors of the
best-fit straight line to independent points with (possibly correlated)
normally distributed errors in both x and y. Other commonly used
errors-in-variables methods, such as orthogonal distance regression, geometric
mean regression or Deming regression are special cases of York’s solution.",2018-12-16,Patrick Sturm,https://github.com/pasturm/bfsl,TRUE,https://github.com/pasturm/bfsl,6165,0,2020-04-17T10:26:03Z,NA
bfsMaps,"At the Swiss Federal Statistical Office (SFSO), spatial maps of Switzerland are available free of charge as 'Cartographic bases for small-scale thematic mapping'. This package contains convenience functions to import ESRI (Environmental Systems Research Institute) shape files using the package 'rgdal' and to plot them easily and quickly without having to worry too much about the technical details.
It contains utilities to combine multiple areas to one single polygon and to find neighbours for single regions. For any point on a map, a special locator can be used to determine to which municipality, district or canton it belongs.",2020-04-17,Andri Signorell,https://github.com/AndriSignorell/bfsMaps/,TRUE,https://github.com/andrisignorell/bfsmaps,1275,0,2020-04-28T15:45:34Z,NA
bfw,"Derived from the work of Kruschke (2015, <ISBN:9780124058880>),
the present package aims to provide a framework for conducting Bayesian
analysis using Markov chain Monte Carlo (MCMC) sampling utilizing the
Just Another Gibbs Sampler ('JAGS', Plummer, 2003, <http://mcmc-jags.sourceforge.net/>).
The initial version includes several modules for conducting Bayesian
equivalents of chi-squared tests, analysis of variance (ANOVA),
multiple (hierarchical) regression, softmax regression, and for fitting data
(e.g., structural equation modeling).",2019-11-25,Øystein Olav Skaar,https://github.com/oeysan/bfw/,TRUE,https://github.com/oeysan/bfw,11096,9,2019-11-25T08:02:17Z,1232.888888888889
BGData,"An umbrella package providing a phenotype/genotype data structure
and scalable and efficient computational methods for large genomic datasets
in combination with several other packages: 'BEDMatrix', 'LinkedMatrix',
and 'symDMatrix'.",2019-01-25,Alexander Grueneberg,https://github.com/QuantGen/BGData,TRUE,https://github.com/quantgen/bgdata,14333,17,2020-05-12T21:18:25Z,843.1176470588235
BGGM,"Fit Bayesian Gaussian graphical models. The methods are separated into
two Bayesian approaches for inference: hypothesis testing and estimation. There are
extensions for confirmatory hypothesis testing, comparing Gaussian graphical models,
and node wise predictability. These methods were recently introduced in the Gaussian
graphical model literature, including
Williams (2019) <doi:10.31234/osf.io/x8dpr>,
Williams and Mulder (2019) <doi:10.31234/osf.io/ypxd8>,
Williams, Rast, Pericchi, and Mulder (2019) <doi:10.31234/osf.io/yt386>.",2020-05-31,Donald Williams,NA,TRUE,https://github.com/donaldrwilliams/bggm,2572,20,2020-06-08T22:17:02Z,128.6
bggum,"Provides a Metropolis-coupled Markov chain Monte Carlo sampler,
post-processing and parameter estimation functions, and plotting utilities
for the generalized graded unfolding model of Roberts, Donoghue, and
Laughlin (2000) <doi:10.1177/01466216000241001>.",2020-01-19,JBrandon Duck-Mayr,https://github.com/duckmayr/bggum,TRUE,https://github.com/duckmayr/bggum,2731,2,2020-01-19T13:37:45Z,1365.5
BH,"Boost provides free peer-reviewed portable C++ source
libraries. A large part of Boost is provided as C++ template code
which is resolved entirely at compile-time without linking. This
package aims to provide the most useful subset of Boost libraries
for template use among CRAN packages. By placing these libraries in
this package, we offer a more efficient distribution system for CRAN
as replication of this code in the sources of other packages is
avoided. As of release 1.72.0-3, the following Boost libraries are
included: 'accumulators' 'algorithm' 'align' 'any' 'atomic' 'bimap'
'bind' 'circular_buffer' 'compute' 'concept' 'config' 'container'
'date_time' 'detail' 'dynamic_bitset' 'exception' 'flyweight'
'foreach' 'functional' 'fusion' 'geometry' 'graph' 'heap' 'icl'
'integer' 'interprocess' 'intrusive' 'io' 'iostreams' 'iterator'
'math' 'move' 'mp11' 'mpl' 'multiprcecision' 'numeric' 'pending'
'phoenix' 'polygon' 'preprocessor' 'propery_tree' 'random' 'range'
'scope_exit' 'smart_ptr' 'sort' 'spirit' 'tuple' 'type_traits'
'typeof' 'unordered' 'utility' 'uuid'.",2020-01-08,Dirk Eddelbuettel,https://github.com/eddelbuettel/bh,TRUE,https://github.com/eddelbuettel/bh,19932259,66,2020-05-03T19:22:25Z,302003.92424242425
bib2df,Parse a BibTeX file to a data.frame to make it accessible for further analysis and visualization.,2019-05-22,Philipp Ottolinger,https://github.com/ropensci/bib2df,TRUE,https://github.com/ropensci/bib2df,16578,81,2019-12-09T12:07:40Z,204.66666666666666
bibliometrix,"Tool for quantitative research in scientometrics and bibliometrics.
It provides various routines for importing bibliographic data from 'SCOPUS' (<http://scopus.com>),
'Clarivate Analytics Web of Science' (<http://www.webofknowledge.com/>), 'Digital Science Dimensions'
(<https://www.dimensions.ai/>), 'Cochrane Library' (<http://www.cochranelibrary.com/>)
and 'PubMed' (<https://www.ncbi.nlm.nih.gov/pubmed/>) databases, performing bibliometric analysis
and building networks for co-citation, coupling, scientific collaboration and co-word analysis.",2020-05-25,Massimo Aria,"https://www.bibliometrix.org,
https://github.com/massimoaria/bibliometrix",TRUE,https://github.com/massimoaria/bibliometrix,181549,148,2020-06-01T19:01:45Z,1226.6824324324325
biclique,"A tool for enumerating maximal complete bipartite graphs. The input should be a edge list file or a binary matrix file.
The output are maximal complete bipartite graphs. Algorithms used can be found in this paper Y. Lu et al. BMC Res Notes 13, 88 (2020) <doi:10.1186/s13104-020-04955-0>.",2020-03-03,Yuping Lu,https://github.com/YupingLu/biclique,TRUE,https://github.com/yupinglu/biclique,10895,14,2020-03-03T21:51:21Z,778.2142857142857
biclustermd,"Biclustering is a statistical learning technique that simultaneously
partitions and clusters rows and columns of a data matrix. Since the solution
space of biclustering is in infeasible to completely search with current
computational mechanisms, this package uses a greedy heuristic. The algorithm
featured in this package is, to the best our knowledge, the first biclustering
algorithm to work on data with missing values. Li, J., Reisner, J., Pham, H.,
Olafsson, S., and Vardeman, S. (2020) Biclustering with Missing Data. Information
Sciences, 510, 304–316.",2020-04-15,John Reisner,http://github.com/jreisner/biclustermd,TRUE,https://github.com/jreisner/biclustermd,4669,3,2020-04-15T01:03:16Z,1556.3333333333333
BifactorIndicesCalculator,"The calculator computes bifactor indices such as explained common variance (ECV), hierarchical Omega (OmegaH), percentage of uncontaminated correlations (PUC), item explained common variance (I-ECV), and more. This package is an R version of the 'Excel' based 'Bifactor Indices Calculator' (Dueber, 2017) <doi:10.13023/edp.tool.01> with added convenience features for directly utilizing output from several programs that can fit confirmatory factor analysis or item response models.",2020-04-11,David Dueber,https://github.com/ddueber/BifactorIndicesCalculator,TRUE,https://github.com/ddueber/bifactorindicescalculator,3501,2,2020-04-10T01:44:49Z,1750.5
bife,"Estimates fixed effects binary choice models (logit and probit) with potentially many
individual fixed effects and computes average partial effects. Incidental parameter bias can be
reduced with an asymptotic bias-correction proposed by Fernandez-Val (2009)
<doi:10.1016/j.jeconom.2009.02.007>.",2020-01-12,Amrei Stammann,https://github.com/amrei-stammann/bife,TRUE,https://github.com/amrei-stammann/bife,38053,1,2020-01-19T12:42:15Z,38053
BIFIEsurvey,"
Contains tools for survey statistics (especially in educational
assessment) for datasets with replication designs (jackknife,
bootstrap, replicate weights; see Kolenikov, 2010;
Pfefferman & Rao, 2009a, 2009b, <doi:10.1016/S0169-7161(09)70003-3>,
<doi:10.1016/S0169-7161(09)70037-9>); Shao, 1996,
<doi:10.1080/02331889708802523>).
Descriptive statistics, linear and logistic regression,
path models for manifest variables with measurement error
correction and two-level hierarchical regressions for weighted
samples are included. Statistical inference can be conducted for
multiply imputed datasets and nested multiply imputed datasets
and is in particularly suited for the analysis of plausible values
(for details see George, Oberwimmer & Itzlinger-Bruneforth, 2016;
Bruneforth, Oberwimmer & Robitzsch, 2016; Robitzsch, Pham &
Yanagida, 2016; <doi:10.17888/fdb-demo:bistE813I-16a>).
The package development was supported by BIFIE (Federal Institute for
Educational Research, Innovation and Development of the Austrian
School System; Salzburg, Austria).",2019-06-12,Alexander Robitzsch,"http://www.bifie.at,
https://www.bifie.at/bildungsforschung/forschungsdatenbibliothek,
https://www.bifie.at/large-scale-assessment-mit-r-methodische-grundlagen-der-oesterreichischen-bildungsstandardueberpruefung,
https://github.com/alexanderrobitzsch/BIFIEsurvey,
https://sites.google.com/site/alexanderrobitzsch2/software",TRUE,https://github.com/alexanderrobitzsch/bifiesurvey,125753,1,2019-06-12T15:24:00Z,125753
bigassertr,"
Enhanced message functions (cat() / message() / warning() / error())
using wrappers around sprintf(). Also, multiple assertion functions
(e.g. to check class, length, values, files, arguments, etc.).",2020-04-01,Florian Privé,https://github.com/privefl/bigassertr,TRUE,https://github.com/privefl/bigassertr,16977,1,2020-04-01T07:55:39Z,16977
BIGDAWG,"Data sets and functions for chi-squared Hardy-Weinberg and case-control association tests of highly polymorphic genetic data [e.g., human leukocyte antigen (HLA) data]. Performs association tests at multiple levels of polymorphism (haplotype, locus and HLA amino-acids) as described in Pappas DJ, Marin W, Hollenbach JA, Mack SJ (2016) <doi:10.1016/j.humimm.2015.12.006>. Combines rare variants to a common class to account for sparse cells in tables as described by Hollenbach JA, Mack SJ, Thomson G, Gourraud PA (2012) <doi:10.1007/978-1-61779-842-9_14>.",2019-11-12,Derek Pappas,"http://tools.immunogenomics.org/,
https://github.com/IgDAWG/BIGDAWG",TRUE,https://github.com/igdawg/bigdawg,30306,2,2019-10-18T01:52:44Z,15153
BIGL,"Response surface methods for drug synergy analysis. Available
methods include generalized and classical Loewe formulations as well as Highest
Single Agent methodology. Response surfaces can be plotted in an interactive
3-D plot and formal statistical tests for presence of synergistic effects are
available. Implemented methods and tests are described in the article
""BIGL: Biochemically Intuitive Generalized Loewe null model for prediction
of the expected combined effect compatible with partial agonism and antagonism""
by Koen Van der Borght, Annelies Tourny, Rytis Bagdziunas, Olivier Thas,
Maxim Nazarov, Heather Turner, Bie Verbist & Hugo Ceulemans (2017)
<doi:10.1038/s41598-017-18068-5>.",2020-02-20,Heather Turner,https://github.com/openanalytics/BIGL,TRUE,https://github.com/openanalytics/bigl,22107,4,2020-02-04T14:29:06Z,5526.75
biglasso,"Extend lasso and elastic-net model fitting for ultrahigh-dimensional,
multi-gigabyte data sets that cannot be loaded into memory. It's much more
memory- and computation-efficient as compared to existing lasso-fitting packages
like 'glmnet' and 'ncvreg', thus allowing for very powerful big data analysis
even with an ordinary laptop.",2019-09-09,Yaohui Zeng,"https://github.com/YaohuiZeng/biglasso,
https://arxiv.org/abs/1701.05936",TRUE,https://github.com/yaohuizeng/biglasso,43694,70,2020-02-13T17:56:46Z,624.2
bigmemory,"Create, store, access, and manipulate massive matrices.
Matrices are allocated to shared memory and may use memory-mapped
files. Packages 'biganalytics', 'bigtabulate', 'synchronicity', and
'bigalgebra' provide advanced functionality.",2019-12-23,Michael J. Kane,https://github.com/kaneplusplus/bigmemory,TRUE,https://github.com/kaneplusplus/bigmemory,431107,87,2019-12-23T00:36:01Z,4955.252873563219
bigparallelr,"Utility functions for easy parallelism in R. Include some reexports
from other packages, utility functions for splitting and parallelizing over
blocks, and choosing and setting the number of cores used.",2020-01-09,Florian Privé,https://github.com/privefl/bigparallelr,TRUE,https://github.com/privefl/bigparallelr,12175,1,2020-02-29T17:23:47Z,12175
bigQueryR,"Interface with 'Google BigQuery',
see <https://cloud.google.com/bigquery/> for more information.
This package uses 'googleAuthR' so is compatible with similar packages,
including 'Google Cloud Storage' (<https://cloud.google.com/storage/>) for result extracts. ",2019-10-09,Mark Edmondson,http://code.markedmondson.me/bigQueryR/,TRUE,https://github.com/cloudyr/bigqueryr,47510,31,2020-03-12T11:53:12Z,1532.5806451612902
bigreadr,"Read large text files by splitting them in smaller files.
Package 'bigreadr' also provides some convenient wrappers around fread()
and fwrite() from package 'data.table'. ",2019-10-18,Florian Privé,https://github.com/privefl/bigreadr,TRUE,https://github.com/privefl/bigreadr,23452,27,2019-11-02T08:25:29Z,868.5925925925926
bigrquery,Easily talk to Google's 'BigQuery' database from R.,2020-05-15,Hadley Wickham,https://github.com/rstats-db/bigrquery,TRUE,https://github.com/rstats-db/bigrquery,431039,393,2020-05-15T16:09:57Z,1096.7913486005089
bigsnpr,"Easy-to-use, efficient, flexible and scalable tools
for analyzing massive SNP arrays <doi:10.1093/bioinformatics/bty185>.",2020-03-09,Florian Privé,https://privefl.github.io/bigsnpr,TRUE,https://github.com/privefl/bigsnpr,3693,55,2020-06-01T05:40:05Z,67.14545454545454
bigsparser,"Provides a sparse matrix format with data stored on disk, to be
used in both R and C++. This is intended for more efficient use of sparse
data in C++ and also when parallelizing, since data on disk does not need
copying. Only a limited number of features will be implemented. For now,
conversion can be performed from a 'dgCMatrix' of R package 'Matrix'.",2020-05-25,Florian Privé,https://github.com/privefl/bigsparser,TRUE,https://github.com/privefl/bigsparser,1281,2,2020-05-15T14:54:49Z,640.5
bigstatsr,"Easy-to-use, efficient, flexible and scalable statistical tools.
Package bigstatsr provides and uses Filebacked Big Matrices via memory-mapping.
It provides for instance matrix operations, Principal Component Analysis,
sparse linear supervised models, utility functions and more
<doi:10.1093/bioinformatics/bty185>.",2020-03-12,Florian Privé,https://privefl.github.io/bigstatsr,TRUE,https://github.com/privefl/bigstatsr,23776,101,2020-03-11T17:15:11Z,235.40594059405942
bigstep,"Selecting linear and generalized linear models for large data sets
using modified stepwise procedure and modern selection criteria (like
modifications of Bayesian Information Criterion). Selection can be
performed on data which exceed RAM capacity.",2019-07-25,Piotr Szulc,http://github.com/pmszulc/bigstep,TRUE,https://github.com/pmszulc/bigstep,16186,1,2019-07-23T06:55:23Z,16186
bigutilsr,"Utility functions for large-scale data. For now, package 'bigutilsr'
mainly includes functions for outlier detection and PCA projection.",2020-05-15,Florian Privé,https://github.com/privefl/bigutilsr,TRUE,https://github.com/privefl/bigutilsr,9604,6,2020-03-30T16:06:58Z,1600.6666666666667
BigVAR,Estimates VAR and VARX models with structured Lasso Penalties.,2019-12-02,Will Nicholson,http://www.github.com/wbnicholson/BigVAR,TRUE,https://github.com/wbnicholson/bigvar,25067,31,2020-03-07T17:53:16Z,808.6129032258065
billboarder,"Provides an 'htmlwidgets' interface to 'billboard.js',
a re-usable easy interface JavaScript chart library, based on D3 v4+.
Chart types include line charts, scatterplots, bar/lollipop charts, histogram/density plots, pie/donut charts and gauge charts.
All charts are interactive, and a proxy method is implemented to smoothly update a chart without rendering it again in 'shiny' apps. ",2020-01-09,Victor Perrier,https://github.com/dreamRs/billboarder,TRUE,https://github.com/dreamrs/billboarder,43810,145,2020-05-18T10:32:01Z,302.13793103448273
binb,"A collection of 'LaTeX' styles using 'Beamer' customization for
pdf-based presentation slides in 'RMarkdown'. At present it contains
'RMarkdown' adaptations of the LaTeX themes 'Metropolis' (formerly 'mtheme')
theme by Matthias Vogelgesang and others (now included in 'TeXLive'), the
'IQSS' by Ista Zahn (which is included here), and the 'Monash' theme by
Rob J Hyndman. Additional (free) fonts may be needed: 'Metropolis' prefers
'Fira', and 'IQSS' requires 'Libertinus'.",2019-11-02,Dirk Eddelbuettel,https://github.com/eddelbuettel/binb,TRUE,https://github.com/eddelbuettel/binb,13082,136,2020-06-09T03:03:27Z,96.19117647058823
binman,"Tools and functions for managing the download of binary files.
Binary repositories are defined in 'YAML' format. Defining new
pre-download, download and post-download templates allow additional
repositories to be added.",2018-07-18,John Harrison,https://github.com/ropensci/binman,TRUE,https://github.com/ropensci/binman,135582,12,2019-12-09T12:08:28Z,11298.5
binmapr,"The raw NGS (Next Generation Sequencing) variants called
from GBS (Genotyping by Sequencing) / WES (Whole Exon Sequencing)/
WGS (Whole Genome Sequencing) may include many error sites. The
'binmapr' could fix the potential error sites and generate highly
confident markers for downstream analysis, such as QTL (quantitative
trait locus) mapping, genetic map construction.
Davey, J.W. (2011) <doi:10.1038/nrg3012>.",2019-10-20,Zhougeng Xu,https://github.com/xuzhougeng/binmapr,TRUE,https://github.com/xuzhougeng/binmapr,3130,8,2019-10-15T07:51:47Z,391.25
bioacoustics,"Contains all the necessary tools to process audio recordings of
various formats (e.g., WAV, WAC, MP3, ZC), filter noisy files,
display audio signals, detect and extract automatically acoustic
features for further analysis such as classification.",2020-05-24,Jean Marchal,https://github.com/wavx/bioacoustics/,TRUE,https://github.com/wavx/bioacoustics,22254,23,2020-05-23T18:34:05Z,967.5652173913044
bioC.logs,Download stats reports from the BioConductor.org stats website.,2020-02-13,Marcelo Ponce,https://github.com/mponce0/bioC.logs,TRUE,https://github.com/mponce0/bioc.logs,2155,0,2020-02-26T18:41:15Z,NA
BiocManager,A convenient tool to install and update Bioconductor packages.,2019-11-16,Martin Morgan,NA,TRUE,https://github.com/bioconductor/biocmanager,1444786,28,2020-05-23T20:05:55Z,51599.5
biocompute,"Tools to create, validate, and export BioCompute Objects
described in King et al. (2019) <doi:10.17605/osf.io/h59uh>.
Users can encode information in data frames, and compose
BioCompute Objects from the domains defined by the standard.
A checksum validator and a JSON schema validator are provided.
This package also supports exporting BioCompute Objects as JSON,
PDF, HTML, or 'Word' documents, and exporting to cloud-based platforms.",2019-11-28,Nan Xiao,"https://sbg.github.io/biocompute/,
https://github.com/sbg/biocompute",TRUE,https://github.com/sbg/biocompute,3071,1,2020-04-23T16:11:09Z,3071
biogram,"Tools for extraction and analysis of various
n-grams (k-mers) derived from biological sequences (proteins
or nucleic acids). Contains QuiPT (quick permutation test) for fast
feature-filtering of the n-gram data.",2020-03-31,Michal Burdukiewicz,https://github.com/michbur/biogram,TRUE,https://github.com/michbur/biogram,25558,6,2020-04-04T19:59:18Z,4259.666666666667
bioimagetools,"Tools for 3D imaging, mostly for biology/microscopy.
Read and write TIFF stacks. Functions for segmentation, filtering and analyzing 3D point patterns.",2020-05-29,Volker Schmid,https://bioimaginggroup.github.io/bioimagetools,TRUE,https://github.com/bioimaginggroup/bioimagetools,19344,3,2020-05-29T11:12:05Z,6448
BioInstaller,"
Can be used to integrate massive bioinformatics resources, such as tool/script and database. It provides the R functions and Shiny web application. Hundreds of bioinformatics tool/script and database have been included.",2018-11-20,Jianfeng Li,https://github.com/JhuangLab/BioInstaller,TRUE,https://github.com/jhuanglab/bioinstaller,38832,34,2019-11-28T07:59:31Z,1142.1176470588234
biomartr,"Perform large scale genomic data retrieval and functional annotation retrieval. This package aims to provide users with a standardized
way to automate genome, proteome, 'RNA', coding sequence ('CDS'), 'GFF', and metagenome
retrieval from 'NCBI RefSeq', 'NCBI Genbank', 'ENSEMBL', 'ENSEMBLGENOMES',
and 'UniProt' databases. Furthermore, an interface to the 'BioMart' database
(Smedley et al. (2009) <doi:10.1186/1471-2164-10-22>) allows users to retrieve
functional annotation for genomic loci. In addition, users can download entire databases such
as 'NCBI RefSeq' (Pruitt et al. (2007) <doi:10.1093/nar/gkl842>), 'NCBI nr',
'NCBI nt', 'NCBI Genbank' (Benson et al. (2013) <doi:10.1093/nar/gks1195>), etc. as
well as 'ENSEMBL' and 'ENSEMBLGENOMES' with only one command.",2020-01-10,Hajk-Georg Drost,"https://docs.ropensci.org/biomartr,
https://github.com/ropensci/biomartr",TRUE,https://github.com/ropensci/biomartr,58748,127,2020-06-04T07:21:25Z,462.5826771653543
BIOMASS,"Contains functions to estimate aboveground biomass/carbon and its uncertainty in tropical forests.
These functions allow to (1) retrieve and to correct taxonomy, (2) estimate wood density and its uncertainty,
(3) construct height-diameter models, (4) manage tree and plot coordinates,
(5) estimate the aboveground biomass/carbon at the stand level with associated uncertainty.
To cite BIOMASS, please use citation(""BIOMASS"").
See more in the article of Réjou-Méchain et al. (2017) <doi:10.1111/2041-210X.12753>.",2019-05-03,Maxime Réjou-Méchain,https://github.com/AMAP-dev/BIOMASS,TRUE,https://github.com/amap-dev/biomass,21519,5,2020-04-02T13:00:27Z,4303.8
BioMedR,"Calculating 293 chemical descriptors and 14 kinds of chemical fingerprints, 9920 protein descriptors based on protein sequences, more than 6000 DNA/RNA descriptors from nucleotide sequences, and six types of interaction descriptors using three different combining strategies. ",2019-07-05,Min-feng Zhu,https://github.com/wind22zhu/BioMedR,TRUE,https://github.com/wind22zhu/biomedr,6654,4,2019-10-19T11:02:19Z,1663.5
bioRad,"Extract, visualize and summarize aerial movements of birds and
insects from weather radar data. See <doi:10.1111/ecog.04028>
for a software paper describing package and methodologies.",2020-05-11,Adriaan M. Dokter,"https://github.com/adokter/bioRad,
https://adokter.github.io/bioRad",TRUE,https://github.com/adokter/biorad,8126,11,2020-05-27T18:27:10Z,738.7272727272727
bipartite,"Functions to visualise webs and calculate a series of indices commonly used to describe pattern in (ecological) webs. It focuses on webs consisting of only two levels (bipartite), e.g. pollination webs or predator-prey-webs. Visualisation is important to get an idea of what we are actually looking at, while the indices summarise different aspects of the web's topology. ",2020-04-03,Carsten F. Dormann,https://github.com/biometry/bipartite,TRUE,https://github.com/biometry/bipartite,119467,15,2020-05-29T12:57:01Z,7964.466666666666
BIRDS,"It helps making the evaluation and preparation of biodiversity data
easy, systematic and reproducible. It also helps the users to overlay the
point observations into a custom grid that is useful for further analysis.
The review summarise statistics that helps evaluate whether a set of species
observations is fit-for-use and take decisions upon its use of on further
analyses. It does so by quantifying the sampling effort (amount of effort
expended during an event) and data completeness (data gaps) to help judge
whether the data is representative, valid and fit for any intended purpose.
The 'BIRDS' package is most useful when working with heterogeneous data sets
with variation in the sampling process, i.e. where data have been collected
and reported in various ways and therefore varying in sampling effort
and data completeness (i.e. how well the reported observations describe the
true state). Primary biodiversity data (PBD) combining data from different
data sets, like e.g. Global Biodiversity Information Facility (GBIF) mediated
data, commonly vary in the ways data has been generated - containing
opportunistically collected presence-only data together with and data from
systematic monitoring programs. The set of tools provided is aimed at
understanding the process that generated the data (i.e. observing, recording
and reporting species into databases). There is a non-vital function on this
package (makeDggrid()) that depends the package 'dggridR' that is no longer on CRAN.
You can find it here <https://github.com/r-barnes/dggridR>. References:
Ruete (2015) <doi:10.3897/BDJ.3.e5361>; Szabo, Vesk, Baxter & Possingham (2010)
<doi:10.1890/09-0877.1>; Telfer, Preston 6 Rothery (2002) <doi:10.1016/S0006-3207(02)00050-2>.",2020-03-20,Debora Arlt,https://github.com/greensway/BIRDS,TRUE,https://github.com/greensway/birds,1614,3,2020-06-04T21:19:56Z,538
biscale,"Provides a 'ggplot2' centric approach to bivariate mapping. This is a
technique that maps two quantities simultaneously rather than the single value
that most thematic maps display. The package provides a suite of tools
for calculating breaks using multiple different approaches, a selection of
palettes appropriate for bivariate mapping and a scale function for 'ggplot2'
calls that adds those palettes to maps. A tool for creating bivariate legends
is also included.",2020-05-06,Christopher Prener,https://github.com/slu-openGIS/biscale,TRUE,https://github.com/slu-opengis/biscale,5627,58,2020-05-06T17:02:54Z,97.01724137931035
BisqueRNA,"Provides tools to accurately estimate cell type abundances
from heterogeneous bulk expression. A reference-based method utilizes
single-cell information to generate a signature matrix and transformation
of bulk expression for accurate regression based estimates. A marker-based
method utilizes known cell-specific marker genes to measure relative
abundances across samples.
For more details, see Jew and Alvarez et al (2019) <doi:10.1101/669911>.",2020-05-04,Brandon Jew,https://www.biorxiv.org/content/10.1101/669911v1,TRUE,https://github.com/cozygene/bisque,5749,23,2020-05-04T04:24:22Z,249.95652173913044
bitmexr,"A client for cryptocurrency exchange BitMEX
<https://www.bitmex.com/> including the ability to obtain historic
trade data and place, edit and cancel orders. BitMEX's Testnet and
live API are both supported.",2020-05-25,Harry Fisher,"https://github.com/hfshr/bitmexr, https://hfshr.github.io/bitmexr",TRUE,https://github.com/hfshr/bitmexr,718,2,2020-06-02T17:53:55Z,359
bitsqueezr,"Provides a implementation of floating-point quantization algorithms for use in precision-preserving
compression, similar to the approach taken in the 'netCDF operators' (NCO) software package and
described in Zender (2016) <doi:10.5194/gmd-2016-63>.",2020-01-17,Daniel Baston,https://github.com/dbaston/bitsqueezr,TRUE,https://github.com/dbaston/bitsqueezr,6571,0,2019-09-30T17:35:23Z,NA
BivRec,"A collection of models for bivariate alternating recurrent event data analysis.
Includes non-parametric and semi-parametric methods.",2020-01-15,Sandra Castro-Pearson,https://github.com/SandraCastroPearson/BivRec,TRUE,https://github.com/sandracastropearson/bivrec,7594,1,2020-01-19T18:18:52Z,7594
bjscrapeR,"Drawing heavy influence from 'blscrapeR', this package scrapes crime data from <https://www.bjs.gov/>. Specifically, it scrapes data from the National Crime Victimization Survey which tracks personal and household crime in the USA. The idea is to utilize the 'tidyverse' methodology to create an efficient work flow when dealing with crime statistics.",2018-06-06,Dylan McDowell,https://github.com/dylanjm/bjscrapeR,TRUE,https://github.com/dylanjm/bjscraper,8124,4,2019-06-28T04:46:29Z,2031
bkmr,"Implementation of a statistical approach
for estimating the joint health effects of multiple
concurrent exposures.",2017-03-24,Jennifer F. Bobb,https://github.com/jenfb/bkmr,TRUE,https://github.com/jenfb/bkmr,16825,12,2020-05-25T20:57:54Z,1402.0833333333333
blandr,"Carries out Bland Altman analyses (also known as a Tukey
mean-difference plot) as described by JM Bland and DG Altman in
1986 <doi:10.1016/S0140-6736(86)90837-8>. This package was created in
2015 as existing Bland-Altman analysis functions did not calculate
confidence intervals. This package was created to rectify this,
and create reproducible plots. This package is also available as a module
for the 'jamovi' statistical spreadsheet (see <https://www.jamovi.org>
for more information).",2018-05-10,Deepankar Datta,https://github.com/deepankardatta/blandr/,TRUE,https://github.com/deepankardatta/blandr,15914,10,2020-03-28T07:15:04Z,1591.4
blastula,"Compose and send out responsive HTML email messages that render
perfectly across a range of email clients and device sizes. Helper functions
let the user insert embedded images, web link buttons, and 'ggplot2' plot
objects into the message body. Messages can be sent through an 'SMTP'
server, through the 'RStudio Connect' service, or through the 'Mailgun' API
service <http://mailgun.com/>.",2020-05-19,Richard Iannone,https://github.com/rich-iannone/blastula,TRUE,https://github.com/rich-iannone/blastula,37627,297,2020-05-19T16:43:28Z,126.6902356902357
blindrecalc,"Computation of key characteristics and plots for blinded sample size recalculation.
Continuous as well as binary endpoints are supported in superiority and non-inferiority trials.
The implemented methods include the approaches by
Lu, K. (2019) <doi:10.1002/pst.1737>,
Kieser, M. and Friede, T. (2000) <doi:10.1002/(SICI)1097-0258(20000415)19:7%3C901::AID-SIM405%3E3.0.CO;2-L>,
Friede, T. and Kieser, M. (2004) <doi:10.1002/pst.140>,
Friede, T., Mitchell, C., Mueller-Veltern, G. (2007) <doi:10.1002/bimj.200610373>, and
Friede, T. and Kieser, M. (2011) <doi:10.3414/ME09-01-0063>.",2020-05-11,Maximilian Pilz,https://github.com/imbi-heidelberg/blindrecalc,TRUE,https://github.com/imbi-heidelberg/blindrecalc,389,2,2020-05-11T14:35:39Z,194.5
blme,"Maximum a posteriori estimation for linear and generalized
linear mixed-effects models in a Bayesian setting. Extends
'lme4' by Douglas Bates, Martin Maechler, Ben Bolker, and Steve Walker.",2015-06-14,Vincent Dorie,https://github.com/vdorie/blme,TRUE,https://github.com/vdorie/blme,169550,25,2020-02-26T19:20:44Z,6782
blob,"R's raw vector is useful for storing a single
binary object. What if you want to put a vector of them in a data
frame? The 'blob' package provides the blob object, a list of raw
vectors, suitable for use as a column in data frame.",2020-01-20,Kirill Müller,https://github.com/tidyverse/blob,TRUE,https://github.com/tidyverse/blob,3104956,28,2020-01-23T12:25:21Z,110891.28571428571
blockCV,"Creating spatially or environmentally separated folds for cross-validation to provide a robust error estimation in spatially structured environments; Investigating and visualising the effective range of spatial autocorrelation in continuous raster covariates to find an initial realistic distance band to separate training and testing datasets spatially described in Valavi, R. et al. (2019) <doi:10.1111/2041-210X.13107>.",2020-02-23,Roozbeh Valavi,https://github.com/rvalavi/blockCV,TRUE,https://github.com/rvalavi/blockcv,2912,58,2020-04-22T13:30:58Z,50.206896551724135
blockForest,"A random forest variant 'block forest' ('BlockForest') tailored to the
prediction of binary, survival and continuous outcomes using block-structured
covariate data, for example, clinical covariates plus measurements of a certain
omics data type or multi-omics data, that is, data for which measurements of
different types of omics data and/or clinical data for each patient exist. Examples
of different omics data types include gene expression measurements, mutation data
and copy number variation measurements.
Block forest are presented in Hornung & Wright (2019). The package includes four
other random forest variants for multi-omics data: 'RandomBlock', 'BlockVarSel',
'VarProb', and 'SplitWeights'. These were also considered in Hornung & Wright (2019),
but performed worse than block forest in their comparison study based on 20 real
multi-omics data sets. Therefore, we recommend to use block forest ('BlockForest')
in applications. The other random forest variants can, however, be consulted for
academic purposes, for example, in the context of further methodological
developments.
Reference: Hornung, R. & Wright, M. N. (2019) Block Forests: random forests for blocks of clinical and omics covariate data. BMC Bioinformatics 20:358. <doi:10.1186/s12859-019-2942-y>.",2019-12-06,Roman Hornung,https://github.com/bips-hb/blockForest,TRUE,https://github.com/bips-hb/blockforest,8250,3,2019-12-06T08:01:03Z,2750
blockRAR,"Computes power for response-adaptive randomization with a block design that captures both the time and treatment effect. T. Chandereng, R. Chappell (2019) <arXiv:1904.07758>.",2020-01-21,Thevaa Chandereng,https://github.com/thevaachandereng/blockRAR/,TRUE,https://github.com/thevaachandereng/blockrar,5652,2,2020-06-07T17:51:49Z,2826
blogdown,"Write blog posts and web pages in R Markdown. This package supports
the static site generator 'Hugo' (<https://gohugo.io>) best, and it also
supports 'Jekyll' (<http://jekyllrb.com>) and 'Hexo' (<https://hexo.io>).",2020-05-22,Yihui Xie,https://github.com/rstudio/blogdown,TRUE,https://github.com/rstudio/blogdown,144961,1156,2020-05-28T15:33:27Z,125.39878892733564
blorr,"Tools designed to make it easier for beginner and intermediate users to build and validate
binary logistic regression models. Includes bivariate analysis, comprehensive regression output,
model fit statistics, variable selection procedures, model validation techniques and a 'shiny'
app for interactive model building.",2020-05-28,Aravind Hebbali,"URL: https://blorr.rsquaredacademy.com/,
https://github.com/rsquaredacademy/blorr",TRUE,https://github.com/rsquaredacademy/blorr,15594,12,2020-05-28T13:25:52Z,1299.5
blscrapeR,"Scrapes various data from <https://www.bls.gov/>. The U.S. Bureau of Labor Statistics is the statistical branch of the United States Department of Labor. The package has additional functions to help parse, analyze and visualize the data.",2019-12-17,Kris Eberwein,https://github.com/keberwein/blscrapeR,TRUE,https://github.com/keberwein/blscraper,34028,70,2019-12-17T16:56:17Z,486.1142857142857
bltm,"Fits latent threshold model for simulated data
and describes how to adjust model using real data. Implements algorithm
proposed by Nakajima and West (2013) <doi:10.1080/07350015.2012.747847>.
This package has a function to generate data, a function to configure
priors and a function to fit the model. Examples may be checked inside
the demonstration files.",2019-07-18,Julio Trecenti,https://github.com/curso-r/bltm,TRUE,https://github.com/curso-r/bltm,4238,1,2019-07-13T18:39:36Z,4238
BMA,"Package for Bayesian model averaging and variable selection for linear models,
generalized linear models and survival models (cox
regression).",2020-03-11,Adrian Raftery,"http://stats.research.att.com/volinsky/bma.html,
https://github.com/hanase/BMA",TRUE,https://github.com/hanase/bma,195487,6,2020-03-10T23:13:48Z,32581.166666666668
bmass,"Multivariate tool for analyzing genome-wide association
study results in the form of univariate summary statistics. The
goal of 'bmass' is to comprehensively test all possible multivariate
models given the phenotypes and datasets provided. Multivariate
models are determined by assigning each phenotype to being either
Unassociated (U), Directly associated (D) or Indirectly associated
(I) with the genetic variant of interest. Test results for each model
are presented in the form of Bayes factors, thereby allowing direct
comparisons between models. The underlying framework implemented
here is based on the modeling developed in ""A Unified Framework
for Association Analysis with Multiple Related Phenotypes"",
M. Stephens (2013) <doi:10.1371/journal.pone.0065245>.",2019-05-17,Michael Turchin,https://github.com/mturchin20/bmass,TRUE,https://github.com/mturchin20/bmass,4654,8,2020-05-17T00:57:35Z,581.75
BMTME,"Genomic selection and prediction models with the capacity to use multiple traits and environments, through ready-to-use Bayesian models. It consists a group of functions
that help to create regression models for some genomic models proposed by Montesinos-López, et al. (2016) <doi:10.1534/g3.116.032359>
also in Montesinos-López et al. (2018) <doi:10.1534/g3.118.200728> and Montesinos-López et al. (2018) <doi:10.2134/agronj2018.06.0362>.",2020-05-26,Francisco Javier Luna-Vazquez,https://github.com/frahik/BMTME,TRUE,https://github.com/frahik/bmtme,7983,6,2019-10-17T21:30:09Z,1330.5
bnclassify,"State-of-the art algorithms for learning discrete Bayesian network classifiers from data, including a number of those described in Bielza & Larranaga (2014) <doi:10.1145/2576868>, with functions for prediction, model evaluation and inspection.",2020-03-12,Mihaljevic Bojan,http://github.com/bmihaljevic/bnclassify,TRUE,https://github.com/bmihaljevic/bnclassify,29535,15,2020-04-02T14:17:15Z,1969
bnpsd,"The Pritchard-Stephens-Donnelly (PSD) admixture model has k intermediate subpopulations from which n individuals draw their alleles dictated by their individual-specific admixture proportions. The BN-PSD model additionally imposes the Balding-Nichols (BN) allele frequency model to the intermediate populations, which therefore evolved independently from a common ancestral population T with subpopulation-specific FST (Wright's fixation index) parameters. The BN-PSD model can be used to yield complex population structures. Method described in Ochoa and Storey (2016) <doi:10.1101/083923>.",2020-01-10,Alejandro Ochoa,https://github.com/StoreyLab/bnpsd/,TRUE,https://github.com/storeylab/bnpsd,10854,6,2020-05-28T20:34:05Z,1809
BNrich,"Maleknia et al. (2020) <doi:10.1101/2020.01.13.905448>. A novel pathway enrichment analysis package based on Bayesian network to investigate the topology features of the pathways. firstly, 187 kyoto encyclopedia of genes and genomes (KEGG) human non-metabolic pathways which their cycles were eliminated by biological approach, enter in analysis as Bayesian network structures. The constructed Bayesian network were optimized by the Least Absolute Shrinkage Selector Operator (lasso) and the parameters were learned based on gene expression data. Finally, the impacted pathways were enriched by Fisher’s Exact Test on significant parameters.",2020-04-04,Samaneh Maleknia,https://github.com/Samaneh-Bioinformatics/BNrich,TRUE,https://github.com/samaneh-bioinformatics/bnrich,1581,0,2020-04-04T07:37:58Z,NA
bnspatial,"Allows spatial implementation of Bayesian networks and mapping in geographical space. It makes maps of expected value (or most likely state) given known and unknown conditions, maps of uncertainty measured as coefficient of variation or Shannon index (entropy), maps of probability associated to any states of any node of the network. Some additional features are provided as well: parallel processing options, data discretization routines and function wrappers designed for users with minimal knowledge of the R language. Outputs can be exported to any common GIS format. ",2020-01-17,Dario Masante,http://github.com/dariomasante/bnspatial,TRUE,https://github.com/dariomasante/bnspatial,21968,13,2020-01-30T12:10:29Z,1689.8461538461538
bold,"A programmatic interface to the Web Service methods provided by
Bold Systems (<http://www.boldsystems.org/>) for genetic 'barcode' data.
Functions include methods for searching by sequences by taxonomic names,
ids, collectors, and institutions; as well as a function for searching
for specimens, and downloading trace files.",2020-05-01,Scott Chamberlain,"https://docs.ropensci.org/bold, https://github.com/ropensci/bold",TRUE,https://github.com/ropensci/bold,148265,12,2020-05-01T21:15:40Z,12355.416666666666
bomrang,"Provides functions to interface with Australian Government Bureau
of Meteorology ('BOM') data, fetching data and returning a tidy data frame
of precis forecasts, historical and current weather data from stations,
agriculture bulletin data, 'BOM' 0900 or 1500 weather bulletins and
downloading and importing radar and satellite imagery files. Data (c)
Australian Government Bureau of Meteorology Creative Commons (CC)
Attribution 3.0 licence or Public Access Licence (PAL) as appropriate. See
<http://www.bom.gov.au/other/copyright.shtml> for further details.",2020-01-20,Adam H. Sparks,"https://github.com/ropensci/bomrang,
https://docs.ropensci.org/bomrang/",TRUE,https://github.com/ropensci/bomrang,30733,66,2020-01-20T22:31:56Z,465.6515151515151
bookdown,Output formats and utilities for authoring books and technical documents with R Markdown.,2020-05-15,Yihui Xie,https://github.com/rstudio/bookdown,TRUE,https://github.com/rstudio/bookdown,620701,1955,2020-05-23T19:10:02Z,317.49411764705883
bookdownplus,"A collection and selector of R 'bookdown' templates. 'bookdownplus' helps you write academic journal articles, guitar books, chemical equations, mails, calendars, and diaries. R 'bookdownplus' extends the features of 'bookdown', and simplifies the procedure. Users only have to choose a template, clarify the book title and author name, and then focus on writing the text. No need to struggle in 'YAML' and 'LaTeX'.",2020-02-26,Peng Zhao,https://github.com/pzhaonet/bookdownplus,TRUE,https://github.com/pzhaonet/bookdownplus,31933,173,2020-03-17T21:21:46Z,184.58381502890174
boot.heterogeneity,"Implements a bootstrap-based heterogeneity test for standardized mean differences (d), Fisher-transformed Pearson's correlations (r), and natural-logarithm-transformed odds ratio (or) in meta-analysis studies. Depending on the presence of moderators, this Monte Carlo based test can be implemented in the random- or mixed-effects model. This package uses rma() function from the R package 'metafor' to obtain parameter estimates and likelihoods, so installation of R package 'metafor' is required. This approach refers to the studies of Anscombe (1956) <doi:10.2307/2332926>, Haldane (1940) <doi:10.2307/2332614>, Hedges (1981) <doi:10.3102/10769986006002107>, Hedges & Olkin (1985, ISBN:978-0123363800), Silagy, Lancaster, Stead, Mant, & Fowler (2004) <doi:10.1002/14651858.CD000146.pub2>, Viechtbauer (2010) <doi:10.18637/jss.v036.i03>, and Zuckerman (1994, ISBN:978-0521432009). ",2020-05-08,Ge Jiang,https://github.com/gabriellajg/boot.heterogeneity/,TRUE,https://github.com/gabriellajg/boot.heterogeneity,429,0,2020-05-07T06:31:49Z,NA
bootstrapFP,"Finite Population bootstrap algorithms to estimate the variance
of the Horvitz-Thompson estimator for single-stage sampling.
For a survey of bootstrap methods for finite populations, see Mashreghi et Al. (2016) <doi:10.1214/16-SS113>.",2019-02-24,Roberto Sichera,NA,TRUE,https://github.com/rhobis/bootstrapfp,6378,0,2019-12-04T11:38:48Z,NA
BOSSreg,"Best orthogonalized subset selection (BOSS) is a least-squares (LS) based subset selection method, that performs best subset selection upon an orthogonalized basis of ordered predictors, with the computational effort of a single ordinary LS fit. This package provides a highly optimized implementation of BOSS and estimates a heuristic degrees of freedom for BOSS, which can be plugged into an information criterion (IC) such as AICc in order to select the subset from candidates. It provides various choices of IC, including AIC, BIC, AICc, Cp and GCV. It also implements the forward stepwise selection (FS) with no additional computational cost, where the subset of FS is selected via cross-validation (CV). CV is also an option for BOSS. For details see: Tian, Hurvich and Simonoff (2019), ""On the Use of Information Criteria for Subset Selection in Least Squares Regression"", <arXiv:1911.10191>.",2019-12-06,Sen Tian,https://github.com/sentian/BOSSreg,TRUE,https://github.com/sentian/bossreg,2688,1,2020-01-15T04:16:54Z,2688
botor,"Fork-safe, raw access to the 'Amazon Web Services' ('AWS') 'SDK' via the 'boto3' 'Python' module, and convenient helper functions to query the 'Simple Storage Service' ('S3') and 'Key Management Service' ('KMS'), partial support for 'IAM', the 'Systems Manager Parameter Store' and 'Secrets Manager'.",2020-02-16,Gergely Daróczi,https://daroczig.github.io/botor,TRUE,https://github.com/daroczig/botor,4658,22,2020-05-22T19:07:41Z,211.72727272727272
boundingbox,"Generate ground truth cases for object localization algorithms.
Cycle through a list of images, select points around which to generate bounding
boxes and assign classifiers. Output the coordinates, and images annotated with
boxes and labels. For an example study that uses bounding boxes for image
localization and classification see Ibrahim, Badr, Abdallah, and Eissa (2012)
""Bounding Box Object Localization Based on Image Superpixelization""
<doi:10.1016/j.procs.2012.09.119>.",2020-06-09,David Stomski,<https://github.com/stomperusa/boundingbox>,TRUE,https://github.com/stomperusa/boundingbox,0,1,2020-06-06T00:15:06Z,0
boxr,"An R interface for the remote file hosting service 'Box'
(<https://www.box.com/>). In addition to uploading and downloading files,
this package includes functions which mirror base R operations for local
files, (e.g. box_load(), box_save(), box_read(), box_setwd(), etc.), as well
as 'git' style functions for entire directories (e.g. box_fetch(),
box_push()).",2019-11-19,Ian Lyttle,https://github.com/r-box/boxr/,TRUE,https://github.com/r-box/boxr,32003,43,2020-04-27T16:32:38Z,744.2558139534884
bpbounds,"Implementation of the nonparametric bounds for the average causal
effect under an instrumental variable model by Balke and Pearl (Bounds on
Treatment Effects from Studies with Imperfect Compliance, JASA, 1997, 92,
439, 1171-1176). The package can calculate bounds for a binary outcome, a
binary treatment/phenotype, and an instrument with either 2 or 3
categories. The package implements bounds for situations where these 3
variables are measured in the same dataset (trivariate data) or where the
outcome and instrument are measured in one study and the
treatment/phenotype and instrument are measured in another study
(bivariate data).",2020-01-21,Tom Palmer,https://github.com/remlapmot/bpbounds,TRUE,https://github.com/remlapmot/bpbounds,7782,0,2020-06-07T09:13:36Z,NA
bpnreg,"Fitting Bayesian multiple and mixed-effect regression models for
circular data based on the projected normal distribution. Both continuous
and categorical predictors can be included. Sampling from the posterior is
performed via an MCMC algorithm. Posterior descriptives of all parameters,
model fit statistics and Bayes factors for hypothesis tests for inequality
constrained hypotheses are provided. See Cremers, Mulder & Klugkist (2018)
<doi:10.1111/bmsp.12108> and Nuñez-Antonio & Guttiérez-Peña (2014)
<doi:10.1016/j.csda.2012.07.025>.",2020-02-04,Jolien Cremers,https://github.com/joliencremers/bpnreg,TRUE,https://github.com/joliencremers/bpnreg,9477,2,2020-02-05T08:11:37Z,4738.5
bracer,"Performs brace expansions on strings. Made popular by Unix shells, brace expansion allows users to concisely generate certain character vectors by taking a single string and (recursively) expanding the comma-separated lists and double-period-separated integer and character sequences enclosed within braces in that string. The double-period-separated numeric integer expansion also supports padding the resulting numbers with zeros.",2019-09-03,Trevor Davis,https://github.com/trevorld/bracer,TRUE,https://github.com/trevorld/bracer,4153,1,2019-11-24T19:09:12Z,4153
brainGraph,"A set of tools for performing graph theory analysis of brain MRI
data. It works with data from a Freesurfer analysis (cortical thickness,
volumes, local gyrification index, surface area), diffusion tensor
tractography data (e.g., from FSL) and resting-state fMRI data (e.g., from
DPABI). It contains a graphical user interface for graph visualization and
data exploration, along with several functions for generating useful
figures.",2019-11-07,Christopher G. Watson,https://github.com/cwatson/brainGraph,TRUE,https://github.com/cwatson/braingraph,27726,79,2019-11-06T05:31:18Z,350.9620253164557
BRDT,"This is an implementation of design methods for binomial reliability demonstration tests (BRDTs) with failure count data.
The acceptance decision uncertainty of BRDT has been quantified and the impacts of the uncertainty on related reliability assurance activities such as reliability growth (RG) and warranty services (WS) are evaluated.
This package is associated with the work from the published paper ""Optimal Binomial Reliability Demonstration Tests Design under Acceptance Decision Uncertainty"" by Suiyao Chen et al. (2020) <doi:10.1080/08982112.2020.1757703>.",2020-06-09,Suiyao Chen,https://github.com/ericchen12377/BRDT,TRUE,https://github.com/ericchen12377/brdt,0,2,2020-06-09T19:08:00Z,0
breakDown,"Model agnostic tool for decomposition of predictions from black boxes.
Break Down Table shows contributions of every variable to a final prediction.
Break Down Plot presents variable contributions in a concise graphical way.
This package work for binary classifiers and general regression models. ",2020-04-05,Przemyslaw Biecek,https://pbiecek.github.io/breakDown/,TRUE,https://github.com/pbiecek/breakdown,29388,87,2020-04-04T23:57:50Z,337.7931034482759
breathtestcore,"Reads several formats of 13C data (IRIS/Wagner,
BreathID) and CSV. Creates artificial sample data for testing. Fits
Maes/Ghoos, Bluck-Coward self-correcting formula using 'nls', 'nlme'.
Methods to fit breath test curves with Bayesian Stan methods are
refactored to package 'breathteststan'. For a Shiny GUI, see package
'dmenne/breathtestshiny' on github.",2020-03-22,Dieter Menne,https://github.com/dmenne/breathtestcore,TRUE,https://github.com/dmenne/breathtestcore,14985,1,2020-06-08T08:03:44Z,14985
breathteststan,"Stan-based curve-fitting function
for use with package 'breathtestcore' by the same author.
Stan functions are refactored here for easier testing.",2020-03-22,Dieter Menne,https://github.com/dmenne/breathteststan,TRUE,https://github.com/dmenne/breathteststan,17500,3,2020-04-13T07:36:34Z,5833.333333333333
brglm2,"Estimation and inference from generalized linear models based on various methods for bias reduction and maximum penalized likelihood with powers of the Jeffreys prior as penalty. The 'brglmFit' fitting method can achieve reduction of estimation bias by solving either the mean bias-reducing adjusted score equations in Firth (1993) <doi:10.1093/biomet/80.1.27> and Kosmidis and Firth (2009) <doi:10.1093/biomet/asp055>, or the median bias-reduction adjusted score equations in Kenne et al. (2016) <arXiv:1604.04768>, or through the direct subtraction of an estimate of the bias of the maximum likelihood estimator from the maximum likelihood estimates as in Cordeiro and McCullagh (1991) <http://www.jstor.org/stable/2345592>. See Kosmidis et al (2019) <doi:10.1007/s11222-019-09860-6> for more details. Estimation in all cases takes place via a quasi Fisher scoring algorithm, and S3 methods for the construction of of confidence intervals for the reduced-bias estimates are provided. In the special case of generalized linear models for binomial and multinomial responses (both ordinal and nominal), the adjusted score approaches return estimates with improved frequentist properties, that are also always finite, even in cases where the maximum likelihood estimates are infinite (e.g. complete and quasi-complete separation). 'brglm2' also provides pre-fit and post-fit methods for detecting separation and infinite maximum likelihood estimates in binomial response generalized linear models.",2020-03-19,Ioannis Kosmidis,https://github.com/ikosmidis/brglm2,TRUE,https://github.com/ikosmidis/brglm2,27917,5,2020-03-19T15:35:26Z,5583.4
brickr,"
Generate digital LEGO models using 'tidyverse' functions.
Convert image files into 2D and 3D LEGO mosaics, complete with piece counts and instructions.
Render 3D models using simple data frame instructions.
Developed under the LEGO Group's Fair Play policy <https://www.lego.com/en-us/legal/notices-and-policies/fair-play/>.",2020-05-09,Ryan Timpe,https://github.com/ryantimpe/brickr,TRUE,https://github.com/ryantimpe/brickr,1930,312,2020-05-09T20:02:55Z,6.185897435897436
bridgesampling,"Provides functions for estimating marginal likelihoods, Bayes
factors, posterior model probabilities, and normalizing constants in general,
via different versions of bridge sampling (Meng & Wong, 1996,
<http://www3.stat.sinica.edu.tw/statistica/j6n4/j6n43/j6n43.htm>).
Gronau, Singmann, & Wagenmakers (2020) <doi:10.18637/jss.v092.i10>.",2020-02-26,Quentin F. Gronau,https://github.com/quentingronau/bridgesampling,TRUE,https://github.com/quentingronau/bridgesampling,208871,19,2020-02-24T23:15:25Z,10993.21052631579
brio,"Functions to handle basic input output, these functions always
read and write UTF-8 (8-bit Unicode Transformation Format) files and provide
more explicit control over line endings.",2020-03-26,Jim Hester,https://github.com/r-lib/brio,TRUE,https://github.com/r-lib/brio,1670,20,2020-04-20T13:08:53Z,83.5
BRISC,Fits Bootstrap with univariate spatial regression models using Bootstrap for Rapid Inference on Spatial Covariances (BRISC) for large datasets using Nearest Neighbor Gaussian Processes detailed in Saha and Datta (2018) <doi:10.1002/sta4.184>.,2019-08-19,Arkajyoti Saha,https://github.com/ArkajyotiSaha/BRISC,TRUE,https://github.com/arkajyotisaha/brisc,8546,1,2019-08-22T18:41:21Z,8546
BRL,"Implementation of the record linkage methodology proposed by Sadinle (2017) <doi:10.1080/01621459.2016.1148612>. It handles the bipartite record linkage problem, where two duplicate-free datafiles are to be merged.",2020-01-13,Mauricio Sadinle,https://github.com/msadinle/BRL,TRUE,https://github.com/msadinle/brl,3256,3,2020-01-11T01:25:32Z,1085.3333333333333
brms,"Fit Bayesian generalized (non-)linear multivariate multilevel models
using 'Stan' for full Bayesian inference. A wide range of distributions
and link functions are supported, allowing users to fit -- among others --
linear, robust linear, count data, survival, response times, ordinal,
zero-inflated, hurdle, and even self-defined mixture models all in a
multilevel context. Further modeling options include non-linear and
smooth terms, auto-correlation structures, censored data, meta-analytic
standard errors, and quite a few more. In addition, all parameters of the
response distribution can be predicted in order to perform distributional
regression. Prior specifications are flexible and explicitly encourage
users to apply prior distributions that actually reflect their beliefs.
Model fit can easily be assessed and compared with posterior predictive
checks and leave-one-out cross-validation. References: Bürkner (2017)
<doi:10.18637/jss.v080.i01>; Bürkner (2018) <doi:10.32614/RJ-2018-017>;
Carpenter et al. (2017) <doi:10.18637/jss.v076.i01>.",2020-05-27,Paul-Christian Bürkner,"https://github.com/paul-buerkner/brms,
http://discourse.mc-stan.org",TRUE,https://github.com/paul-buerkner/brms,411359,714,2020-06-09T16:24:28Z,576.1330532212885
Brobdingnag,"Handles very large numbers in R. Real numbers are held
using their natural logarithms, plus a logical flag indicating
sign. The package includes a vignette that gives a
step-by-step introduction to using S4 methods.",2018-08-13,Robin K. S. Hankin,https://github.com/RobinHankin/Brobdingnag.git,TRUE,https://github.com/robinhankin/brobdingnag,211351,1,2020-04-30T08:43:11Z,211351
broman,"Miscellaneous R functions, including functions related to
graphics (mostly for base graphics), permutation tests, running
mean/median, and general utilities.",2020-05-22,Karl W Broman,https://github.com/kbroman/broman,TRUE,https://github.com/kbroman/broman,43235,157,2020-05-21T15:29:32Z,275.38216560509557
broom,"Summarizes key information about statistical
objects in tidy tibbles. This makes it easy to report results, create
plots and consistently work with large numbers of models at once.
Broom provides three verbs that each provide different types of
information about a model. tidy() summarizes information about model
components such as coefficients of a regression. glance() reports
information about an entire model, such as goodness of fit measures
like AIC and BIC. augment() adds information about individual
observations to a dataset, such as fitted values or influence
measures.",2020-04-20,Alex Hayes,http://github.com/tidyverse/broom,TRUE,https://github.com/tidyverse/broom,10361715,953,2020-06-09T16:03:05Z,10872.733473242393
broom.mixed,"Convert fitted objects from various R mixed-model packages
into tidy data frames along the lines of the 'broom' package.
The package provides three
S3 generics for each model: tidy(), which summarizes a model's statistical findings such as
coefficients of a regression; augment(), which adds columns to the original
data such as predictions, residuals and cluster assignments; and glance(), which
provides a one-row summary of model-level statistics.",2020-05-17,Ben Bolker,http://github.com/bbolker/broom.mixed,TRUE,https://github.com/bbolker/broom.mixed,73266,158,2020-05-29T00:44:08Z,463.7088607594937
broomExtra,"Provides helper functions that assist in data
analysis workflows involving regression analyses. The goal is to
combine the functionality offered by different set of packages
('broom', 'broom.mixed', 'parameters', and 'performance') through a
common syntax to return tidy dataframes containing model parameters
and performance measure summaries. The 'grouped_' variants of the
generics provides a convenient way to execute functions across a
combination of grouping variable(s) in a dataframe.",2020-05-11,Indrajeet Patil,"https://indrajeetpatil.github.io/broomExtra/,
https://github.com/IndrajeetPatil/broomExtra",TRUE,https://github.com/indrajeetpatil/broomextra,52301,30,2020-05-30T21:16:09Z,1743.3666666666666
brranching,"Includes methods for fetching 'phylogenies' from a variety
of sources, including the 'Phylomatic' web service
(<http://phylodiversity.net/phylomatic>), and 'Phylocom'
(<https://github.com/phylocom/phylocom/>).",2019-07-27,Scott Chamberlain,https://github.com/ropensci/brranching,TRUE,https://github.com/ropensci/brranching,27516,13,2020-06-03T23:30:36Z,2116.6153846153848
brunnermunzel,"Provides the functions for Brunner-Munzel test and
permuted Brunner-Munzel test,
which enable to use formula, matrix, and table as argument.
These functions are based on Brunner and Munzel (2000)
<doi:10.1002/(SICI)1521-4036(200001)42:1%3C17::AID-BIMJ17%3E3.0.CO;2-U>
and Neubert and Brunner (2007) <doi:10.1016/j.csda.2006.05.024>,
and are written with FORTRAN.",2020-01-08,Toshiaki Ara,https://github.com/toshi-ara/brunnermunzel,TRUE,https://github.com/toshi-ara/brunnermunzel,8462,3,2020-01-07T13:07:33Z,2820.6666666666665
bs4Dash,"Make 'Bootstrap 4' dashboards. Use the full power
of 'AdminLTE3', a dashboard template built on top of 'Bootstrap 4'
<https://github.com/ColorlibHQ/AdminLTE>.",2019-11-27,David Granjon,"https://rinterface.github.io/bs4Dash/index.html,
https://github.com/RinteRface/bs4Dash",TRUE,https://github.com/rinterface/bs4dash,52042,189,2020-05-27T23:08:18Z,275.35449735449737
bsam,"Tools to fit Bayesian state-space models to animal tracking data. Models are provided for location
filtering, location filtering and behavioural state estimation, and their hierarchical versions.
The models are primarily intended for fitting to ARGOS satellite tracking data but options exist to fit
to other tracking data types. For Global Positioning System data, consider the 'moveHMM' package.
Simplified Markov Chain Monte Carlo convergence diagnostic plotting is provided but users are encouraged
to explore tools available in packages such as 'coda' and 'boa'.",2017-07-01,Ian Jonsen,https://github.com/ianjonsen/bsam,TRUE,https://github.com/ianjonsen/bsam,17634,13,2020-01-24T13:07:27Z,1356.4615384615386
bSims,"A highly scientific and utterly addictive
bird point count simulator
to test statistical assumptions, aid survey design,
and have fun while doing it.
The simulations follow time-removal and distance sampling models
based on Matsuoka et al. (2012) <doi:10.1525/auk.2012.11190>,
Solymos et al. (2013) <doi:10.1111/2041-210X.12106>,
and Solymos et al. (2018) <doi:10.1650/CONDOR-18-32.1>,
and sound attenuation experiments by
Yip et al. (2017) <doi:10.1650/CONDOR-16-93.1>.",2019-12-20,Peter Solymos,https://github.com/psolymos/bSims,TRUE,https://github.com/psolymos/bsims,2686,1,2020-05-28T21:54:21Z,2686
bsplus,"The Bootstrap framework lets you add some JavaScript functionality to your web site by
adding attributes to your HTML tags - Bootstrap takes care of the JavaScript
<https://getbootstrap.com/javascript>. If you are using R Markdown or Shiny, you can
use these functions to create collapsible sections, accordion panels, modals, tooltips,
popovers, and an accordion sidebar framework (not described at Bootstrap site).",2018-04-05,Ian Lyttle,https://github.com/ijlyttle/bsplus,TRUE,https://github.com/ijlyttle/bsplus,26670,118,2020-05-16T18:38:47Z,226.01694915254237
bssm,"Efficient methods for Bayesian inference of state space models
via particle Markov chain Monte Carlo (MCMC) and MCMC based on parallel
importance sampling type weighted estimators
(Vihola, Helske, and Franks, 2020, <arXiv:1609.02541>).
Gaussian, Poisson, binomial, negative binomial, and Gamma
observation densities and basic stochastic volatility models with Gaussian state
dynamics, as well as general non-linear Gaussian models and discretised
diffusion models are supported.",2020-06-09,Jouni Helske,NA,TRUE,https://github.com/helske/bssm,28538,15,2020-06-09T13:49:25Z,1902.5333333333333
BSW,Implements a modified Newton-type algorithm (BSW algorithm) for solving the maximum likelihood estimation problem in fitting a log-binomial model under linear inequality constraints.,2020-03-25,Adam Bekhit,https://github.com/adam-bec/BSW,TRUE,https://github.com/adam-bec/bsw,1157,0,2020-03-24T14:24:27Z,NA
btergm,"Temporal Exponential Random Graph Models (TERGM) estimated by maximum pseudolikelihood with bootstrapped confidence intervals or Markov Chain Monte Carlo maximum likelihood. Goodness of fit assessment for ERGMs, TERGMs, and SAOMs. Micro-level interpretation of ERGMs and TERGMs.",2020-04-07,Philip Leifeld,http://github.com/leifeld/btergm,TRUE,https://github.com/leifeld/btergm,128371,6,2020-04-06T19:19:02Z,21395.166666666668
BTM,"Biterm Topic Models find topics in collections of short texts.
It is a word co-occurrence based topic model that learns topics by modeling word-word co-occurrences patterns which are called biterms.
This in contrast to traditional topic models like Latent Dirichlet Allocation and Probabilistic Latent Semantic Analysis
which are word-document co-occurrence topic models.
A biterm consists of two words co-occurring in the same short text window.
This context window can for example be a twitter message, a short answer on a survey, a sentence of a text or a document identifier.
The techniques are explained in detail in the paper 'A Biterm Topic Model For Short Text' by Xiaohui Yan, Jiafeng Guo, Yanyan Lan, Xueqi Cheng (2013) <https://github.com/xiaohuiyan/xiaohuiyan.github.io/blob/master/paper/BTM-WWW13.pdf>.",2020-05-02,Jan Wijffels,https://github.com/bnosac/BTM,TRUE,https://github.com/bnosac/btm,10324,47,2020-05-27T18:01:45Z,219.6595744680851
bucky,"Provides functions for various statistical techniques commonly used in the social sciences, including functions to compute clustered robust standard errors, combine results across multiply-imputed data sets, and simplify the addition of robust and clustered robust standard errors.",2019-12-17,Alexander Tahk,http://github.com/atahk/bucky,TRUE,https://github.com/atahk/bucky,13011,6,2019-12-17T19:00:36Z,2168.5
buildmer,"Finds the largest possible regression model that will still converge
for various types of regression analyses (including mixed models and generalized
additive models) and then optionally performs stepwise elimination similar to the
forward and backward effect-selection methods in SAS, based on the change in
log-likelihood or its significance, Akaike's Information Criterion, the Bayesian
Information Criterion, or the explained deviance.",2020-05-27,Cesko C. Voeten,NA,TRUE,https://github.com/cvoeten/buildmer,10016,1,2020-06-07T18:18:51Z,10016
buildr,Working with reproducible reports or any other similar projects often requires to run the script that builds the output file in a specified way. One can become tired from repeatedly switching to the build script and sourcing it. The 'buildr' package does this one simple thing via 'RStudio' addin – user can set up the keyboard shortcut and run the build script with one keystroke anywhere anytime. The second way is to pass buildr() command to console which does the same thing. Both ways source the build.R (case insensitive) file present in the current working directory.,2020-05-12,Jan Netik,https://github.com/netique/buildr,TRUE,https://github.com/netique/buildr,669,1,2020-05-12T10:53:05Z,669
bunching,"Implementation of the bunching estimator for kinks and notches.
Allows for flexible estimation of counterfactual (e.g. controlling for round number bunching, accounting for other bunching masses within bunching window, fixing bunching point to be minimum, maximum or median value in its bin, etc.).
It produces publication-ready plots in the style followed since Chetty et al. (2011) <DOI:10.1093/qje/qjr013>, with lots of functionality to set plot options.",2019-09-23,Panos Mavrokonstantis,http://github.com/mavpanos/bunching,TRUE,https://github.com/mavpanos/bunching,3713,1,2020-05-19T09:48:45Z,3713
bupaR,"Comprehensive Business Process Analysis toolkit. Creates S3-class for event log objects, and related handler functions. Imports related packages for filtering event data, computation of descriptive statistics, handling of 'Petri Net' objects and visualization of process maps. See also packages 'edeaR','processmapR', 'eventdataR' and 'processmonitR'.",2020-01-22,Gert Janssenswillen,"https://www.bupar.net, https://github.com/bupaverse/bupaR",TRUE,https://github.com/bupaverse/bupar,42098,14,2020-04-30T06:52:58Z,3007
burnr,"Tools to read, write, parse, and analyze forest fire history data (e.g. FHX). Described in Malevich et al. (2018) <doi:10.1016/j.dendro.2018.02.005>.",2019-08-21,Steven Malevich,https://github.com/ltrr-arizona-edu/burnr/,TRUE,https://github.com/ltrr-arizona-edu/burnr,23656,8,2020-03-30T18:25:04Z,2957
butcher,Provides a set of five S3 generics to axe components of fitted model objects and help reduce the size of model objects saved to disk.,2020-01-23,Joyce Cahoon,"https://tidymodels.github.io/butcher,
https://github.com/tidymodels/butcher",TRUE,https://github.com/tidymodels/butcher,6314,62,2020-05-14T17:40:15Z,101.83870967741936
BuyseTest,"Implementation of the Generalized Pairwise Comparisons (GPC)
as defined in Buyse (2010) <doi:10.1002/sim.3923> for complete observations,
and extended in Peron (2018) <doi:10.1177/0962280216658320> to deal with right-censoring.
GPC compare two groups of observations (intervention vs. control group)
regarding several prioritized endpoints to estimate the probability that a random observation drawn from
one group performs better than a random observation drawn from the other group (Mann-Whitney parameter).
The net benefit and win ratio statistics,
i.e. the difference and ratio between the probabilities relative to the intervention and control groups,
can then also be estimated. Confidence intervals and p-values are obtained using permutations, a non-parametric bootstrap, or the asymptotic theory.
The software enables the use of thresholds of minimal importance difference,
stratification, non-prioritized endpoints (O'Brien test), and can handle right-censoring and competing-risks.",2020-05-07,Brice Ozenne,https://github.com/bozenne/BuyseTest,TRUE,https://github.com/bozenne/buysetest,20311,1,2020-05-27T13:27:15Z,20311
BVAR,"Estimation of hierarchical Bayesian vector autoregressive models.
Implements hierarchical prior selection for conjugate priors in the fashion
of Giannone, Lenza & Primiceri (2015) <doi:10.1162/REST_a_00483>. Functions
to compute and identify impulse responses, calculate forecasts,
forecast error variance decompositions and scenarios are available.
Several methods to print, plot and summarise results facilitate analysis.",2020-05-05,Nikolas Kuschnig,https://github.com/nk027/bvar,TRUE,https://github.com/nk027/bvar,8466,9,2020-05-11T10:50:51Z,940.6666666666666
bvartools,"Assists in the set-up of algorithms for Bayesian inference of vector autoregressive (VAR) models. Functions for posterior simulation, forecasting, impulse response analysis and forecast error variance decomposition are largely based on the introductory texts of Koop and Korobilis (2010) <doi:10.1561/0800000013> and Luetkepohl (2007, ISBN: 9783540262398). ",2019-08-20,Franz X. Mohr,https://github.com/franzmohr/bvartools,TRUE,https://github.com/franzmohr/bvartools,6318,6,2020-06-03T20:57:04Z,1053
BWStest,"Performs the 'Baumgartner-Weiss-Schindler' two-sample test of equal
probability distributions, <doi:10.2307/2533862>. Also performs
similar rank-based tests for equal probability distributions due to
Neuhauser <doi:10.1080/10485250108832874> and
Murakami <doi:10.1080/00949655.2010.551516>.",2018-10-18,Steven E. Pav,https://github.com/shabbychef/BWStest,TRUE,https://github.com/shabbychef/bwstest,41480,0,2019-09-02T16:25:04Z,NA
bwsTools,"Tools to design best-worst scaling designs (i.e., balanced incomplete block designs) and
to analyze data from these designs, using aggregate and individual methods such as: difference
scores, Louviere, Lings, Islam, Gudergan, & Flynn (2013) <doi:10.1016/j.ijresmar.2012.10.002>;
analytical estimation, Lipovetsky & Conklin (2014) <doi:10.1016/j.jocm.2014.02.001>; empirical
Bayes, Lipovetsky & Conklin (2015) <doi:10.1142/S1793536915500028>; Elo, Hollis (2018)
<doi:10.3758/s13428-017-0898-2>; and network-based measures.",2020-03-19,Mark White,https://github.com/markhwhiteii/bwsTools,TRUE,https://github.com/markhwhiteii/bwstools,2598,3,2020-06-09T01:35:57Z,866
bysykkel,"Functions to get and download city bike data from
the website and API service of each city bike service in Norway. The
package aims to reduce time spent on getting Norwegian city bike data,
and lower barriers to start analyzing it. The data is retrieved from
Oslo City Bike, Bergen City Bike, and Trondheim City Bike. The data is
made available under NLOD 2.0 <https://data.norge.no/nlod/en/2.0>.",2020-04-19,Iman Ghayoornia,http://github.com/imangR/bysykkel,TRUE,https://github.com/imangr/bysykkel,6294,0,2020-04-19T14:02:25Z,NA
c14bazAAR,"Query different C14 date databases and apply basic data cleaning, merging and calibration steps.",2020-01-12,Clemens Schmid,"https://docs.ropensci.org/c14bazAAR,
https://github.com/ropensci/c14bazAAR",TRUE,https://github.com/ropensci/c14bazaar,8334,19,2020-04-23T13:07:10Z,438.63157894736844
c3,"Create interactive charts with the 'C3.js' <http://c3js.org/> charting library. All plot
types in 'C3.js' are available and include line, bar, scatter, and mixed geometry plots. Plot
annotations, labels and axis are highly adjustable. Interactive web based charts can be embedded
in R Markdown documents or Shiny web applications. ",2020-03-16,Matt Johnson,https://github.com/mrjoh3/c3,TRUE,https://github.com/mrjoh3/c3,10613,36,2020-03-16T13:02:49Z,294.80555555555554
C50,"C5.0 decision trees and rule-based models for pattern recognition that extend the work of Quinlan (1993, ISBN:1-55860-238-0).",2020-05-26,Max Kuhn,https://topepo.github.io/C5.0,TRUE,https://github.com/topepo/c5.0,525329,40,2020-01-09T20:20:51Z,13133.225
CAISEr,"Functions for performing experimental comparisons of algorithms
using adequate sample sizes for power and accuracy. Implements the
methodology originally presented in Campelo and Takahashi (2019)
<doi:10.1007/s10732-018-9396-7>
for the comparison of two algorithms, and later generalised in
Campelo and Wanner (Submitted, 2019) <arxiv:1908.01720>.",2020-02-04,Felipe Campelo,https://fcampelo.github.io/CAISEr/,TRUE,https://github.com/fcampelo/caiser,12204,1,2020-02-04T10:08:04Z,12204
calculus,"Efficient C++ optimized functions for numerical and symbolic calculus. It includes basic symbolic arithmetic, tensor calculus, Einstein summing convention, fast computation of the Levi-Civita symbol and generalized Kronecker delta, Taylor series expansion, multivariate Hermite polynomials, accurate high-order derivatives, differential operators (Gradient, Jacobian, Hessian, Divergence, Curl, Laplacian) and numerical integration in arbitrary orthogonal coordinate systems: cartesian, polar, spherical, cylindrical, parabolic or user defined by custom scale factors. ",2020-03-23,Emanuele Guidotti,https://github.com/emanuele-guidotti/calculus,TRUE,https://github.com/emanuele-guidotti/calculus,4391,24,2020-05-20T23:42:33Z,182.95833333333334
calcUnique,"This is a one-function package that will pass only unique values to a computationally-expensive function that returns an output of the same length as the input.
In importing and working with tidy data, it is common to have index columns, often including time stamps that are far from unique. Some functions to work with these such as text conversion to other variable types (e.g. as.POSIXct()), various grep()-based functions, and often the cut() function are relatively slow when working with tens of millions of rows or more.",2020-05-04,Stephen Froehlich,https://github.com/stephenbfroehlich/calcUnique,TRUE,https://github.com/stephenbfroehlich/calcunique,691,0,2020-05-04T18:45:11Z,NA
calibrar,"Automated parameter estimation for complex (ecological) models in R.
This package allows the parameter estimation or calibration of complex models,
including stochastic ones. It is a generic tool that can be used for fitting
any type of models, especially those with non-differentiable objective functions.
It supports multiple phases and constrained optimization.
It implements maximum likelihood estimation methods and automated construction
of the objective function from simulated model outputs.
See <http://roliveros-ramos.github.io/calibrar> for more details.",2016-02-17,Ricardo Oliveros-Ramos,http://roliveros-ramos.github.io/calibrar,TRUE,https://github.com/roliveros-ramos/calibrar,14720,4,2020-02-04T01:50:10Z,3680
calibrator,"Performs Bayesian calibration of computer models as per
Kennedy and O'Hagan 2001. The package includes routines to find the
hyperparameters and parameters; see the help page for stage1() for a
worked example using the toy dataset. A tutorial is provided in the
calex.Rnw vignette; and a suite of especially simple one dimensional
examples appears in inst/doc/one.dim/.",2019-03-07,Robin K. S. Hankin,https://github.com/RobinHankin/calibrator.git,TRUE,https://github.com/robinhankin/calibrator,35397,1,2020-05-05T21:26:27Z,35397
calmate,A multi-array post-processing method of allele-specific copy-number estimates (ASCNs).,2015-10-27,Henrik Bengtsson,https://github.com/HenrikBengtsson/calmate/,TRUE,https://github.com/henrikbengtsson/calmate,20755,0,2019-12-09T00:29:10Z,NA
camsRad,"Copernicus Atmosphere Monitoring Service (CAMS) radiations service
provides time series of global, direct, and diffuse irradiations on horizontal
surface, and direct irradiation on normal plane for the actual weather
conditions as well as for clear-sky conditions.
The geographical coverage is the field-of-view of the Meteosat satellite,
roughly speaking Europe, Africa, Atlantic Ocean, Middle East. The time coverage
of data is from 2004-02-01 up to 2 days ago. Data are available with a time step
ranging from 15 min to 1 month. For license terms and to create an account,
please see <http://www.soda-pro.com/web-services/radiation/cams-radiation-service>. ",2016-11-30,Lukas Lundstrom,https://github.com/ropenscilabs/camsRad,TRUE,https://github.com/ropenscilabs/camsrad,12475,8,2019-12-09T12:16:03Z,1559.375
camtrapR,"Management of and data extraction from camera trap data in wildlife studies. The package provides a workflow for storing and sorting camera trap photos (and videos), tabulates records of species and individuals, and creates detection/non-detection matrices for occupancy and spatial capture-recapture analyses with great flexibility. In addition, it can visualise species activity data and provides simple mapping functions with GIS export.",2020-04-23,Juergen Niedballa,"https://github.com/jniedballa/camtrapR,
https://jniedballa.github.io/camtrapR,
https://groups.google.com/forum/#!forum/camtrapr",TRUE,https://github.com/jniedballa/camtrapr,47112,2,2020-05-25T19:10:24Z,23556
cancensus,"Integrated, convenient, and uniform access to Canadian
Census data and geography retrieved using the 'CensusMapper' API. This package produces analysis-ready
tidy data frames and spatial data in multiple formats, as well as convenience functions
for working with Census variables, variable hierarchies, and region selection. API
keys are freely available with free registration at <https://censusmapper.ca/api>.
Census data and boundary geometries are reproduced and distributed on an ""as
is"" basis with the permission of Statistics Canada (Statistics Canada 2001; 2006;
2011; 2016).",2020-05-12,Jens von Bergmann,"https://github.com/mountainMath/cancensus,
https://mountainmath.github.io/cancensus/,
https://censusmapper.ca/api",TRUE,https://github.com/mountainmath/cancensus,15416,45,2020-05-30T09:12:48Z,342.5777777777778
candisc,"Functions for computing and visualizing
generalized canonical discriminant analyses and canonical correlation analysis
for a multivariate linear model.
Traditional canonical discriminant analysis is restricted to a one-way 'MANOVA'
design and is equivalent to canonical correlation analysis between a set of quantitative
response variables and a set of dummy variables coded from the factor variable.
The 'candisc' package generalizes this to higher-way 'MANOVA' designs
for all factors in a multivariate linear model,
computing canonical scores and vectors for each term. The graphic functions provide low-rank (1D, 2D, 3D)
visualizations of terms in an 'mlm' via the 'plot.candisc' and 'heplot.candisc' methods. Related plots are
now provided for canonical correlation analysis when all predictors are quantitative.",2020-04-22,Michael Friendly,NA,TRUE,https://github.com/friendly/candisc,159436,2,2020-05-17T17:06:12Z,79718
Canopy,"A statistical framework and computational procedure for identifying
the sub-populations within a tumor, determining the mutation profiles of each
subpopulation, and inferring the tumor's phylogenetic history. The input are
variant allele frequencies (VAFs) of somatic single nucleotide alterations
(SNAs) along with allele-specific coverage ratios between the tumor and matched
normal sample for somatic copy number alterations (CNAs). These quantities can
be directly taken from the output of existing software. Canopy provides a
general mathematical framework for pooling data across samples and sites to
infer the underlying parameters. For SNAs that fall within CNA regions, Canopy
infers their temporal ordering and resolves their phase. When there are
multiple evolutionary configurations consistent with the data, Canopy outputs
all configurations along with their confidence assessment.",2017-12-18,Yuchao Jiang,https://github.com/yuchaojiang/Canopy,TRUE,https://github.com/yuchaojiang/canopy,17935,42,2019-06-19T14:45:10Z,427.0238095238095
canprot,"Compositional analysis of differentially expressed proteins in
cancer and cell culture proteomics experiments. The data include lists of up-
and down-regulated proteins in different cancer types (breast, colorectal,
liver, lung, pancreatic, prostate) and laboratory conditions (hypoxia,
hyperosmotic stress, high glucose, 3D cell culture, and proteins secreted in
hypoxia), together with amino acid compositions computed for protein sequences
obtained from UniProt. Functions are provided to calculate compositional metrics
including protein length, carbon oxidation state, and stoichiometric hydration
state. In addition, phylostrata (evolutionary ages) of protein-coding genes are
compiled using data from Liebeskind et al. (2016) <doi:10.1093/gbe/evw113> or
Trigos et al. (2017) <doi:10.1073/pnas.1617743114>. The vignettes contain
plots of compositional differences, phylostrata for human proteins, and
references for all datasets.",2020-05-11,Jeffrey Dick,http://github.com/jedick/canprot,TRUE,https://github.com/jedick/canprot,12453,2,2020-06-08T01:02:46Z,6226.5
cansim,"Searches for, accesses, and retrieves new-format and old-format Statistics Canada data
tables, as well as individual vectors, as tidy data frames. This package deals with encoding issues, allows for
bilingual English or French language data retrieval, and bundles convenience functions
to make it easier to work with retrieved table data. Optional caching features are provided.",2020-03-13,Jens von Bergmann,"https://github.com/mountainMath/cansim,
https://mountainmath.github.io/cansim/",TRUE,https://github.com/mountainmath/cansim,10903,19,2020-05-13T01:51:44Z,573.8421052631579
canvasXpress,"Enables creation of visualizations using the CanvasXpress framework
in R. CanvasXpress is a standalone JavaScript library for reproducible research
with complete tracking of data and end-user modifications stored in a single
PNG image that can be played back. See <https://www.canvasxpress.org> for more
information.",2020-04-11,Connie Brett,https://github.com/neuhausi/canvasXpress.git,TRUE,https://github.com/neuhausi/canvasxpress,47364,233,2020-06-02T19:58:25Z,203.27896995708156
canvasXpress.data,"Contains the prepared data that is needed for the 'shiny' application examples in the
'canvasXpress' package. This package also includes datasets used for automated 'testthat' tests.
Scotto L, Narayan G, Nandula SV, Arias-Pulido H et al. (2008) <doi:10.1002/gcc.20577>.
Davis S, Meltzer PS (2007) <doi:10.1093/bioinformatics/btm254>.",2020-05-19,Connie Brett,https://github.com/neuhausi/canvasXpress.data.git,TRUE,https://github.com/neuhausi/canvasxpress.data,1725,0,2020-05-19T21:40:42Z,NA
captioner,"Provides a method for automatically numbering figures,
tables, or other objects. Captions can be displayed in full, or as citations.
This is especially useful for adding figures and tables to R markdown
documents without having to numbering them manually.",2015-07-16,Letaw Alathea,https://github.com/adletaw/captioner,TRUE,https://github.com/adletaw/captioner,35044,101,2020-02-13T19:26:46Z,346.970297029703
caracas,"Computer algebra via the 'SymPy' library (<https://www.sympy.org/>).
This makes it possible to solve equations symbolically,
find symbolic integrals, symbolic sums and other important quantities. ",2020-05-21,Mikkel Meyer Andersen,https://github.com/r-cas/caracas,TRUE,https://github.com/r-cas/caracas,2095,6,2020-06-08T13:03:28Z,349.1666666666667
caRamel,"Multi-objective optimizer initially developed for the calibration of hydrological models.
The algorithm is a hybrid of the MEAS algorithm (Efstratiadis and Koutsoyiannis (2005) <doi:10.13140/RG.2.2.32963.81446>) by using the directional search method based on the simplexes of the objective space
and the epsilon-NGSA-II algorithm with the method of classification of the parameter vectors archiving management by epsilon-dominance (Reed and Devireddy <doi:10.1142/9789812567796_0004>).",2019-05-28,Fabrice Zaoui,https://github.com/fzao/caRamel,TRUE,https://github.com/fzao/caramel,10586,1,2019-09-30T13:10:04Z,10586
CARBayes,"Implements a class of univariate and multivariate spatial generalised linear mixed models for areal unit data, with inference in a Bayesian setting using Markov chain Monte Carlo (MCMC) simulation. The response variable can be binomial, Gaussian, multinomial, Poisson or zero-inflated Poisson (ZIP), and spatial autocorrelation is modelled by a set of random effects that are assigned a conditional autoregressive (CAR) prior distribution. A number of different models are available for univariate spatial data, including models with no random effects as well as random effects modelled by different types of CAR prior, including the BYM model (Besag et al. (1991) <doi:10.1007/BF00116466>), the Leroux model (Leroux et al. (2000) <doi:10.1007/978-1-4612-1284-3_4>) and the localised model (Lee et al. (2015) <doi:10.1002/env.2348>). Additionally, a multivariate CAR (MCAR) model for multivariate spatial data is available, as is a two-level hierarchical model for modelling data relating to individuals within areas. Full details are given in the vignette accompanying this package. The initial creation of this package was supported by the Economic and Social Research Council (ESRC) grant RES-000-22-4256, and on-going development has been supported by the Engineering and Physical Science Research Council (EPSRC) grant EP/J017442/1, ESRC grant ES/K006460/1, Innovate UK / Natural Environment Research Council (NERC) grant NE/N007352/1 and the TB Alliance. ",2020-03-13,Duncan Lee,http://github.com/duncanplee/CARBayes,TRUE,https://github.com/duncanplee/carbayes,103977,3,2020-03-13T08:29:50Z,34659
CARBayesST,"Implements a class of spatio-temporal generalised linear mixed models for areal unit data, with inference in a Bayesian setting using Markov chain Monte Carlo (MCMC) simulation. The response variable can be binomial, Gaussian, or Poisson, but for some models only the binomial and Poisson data likelihoods are available. The spatio-temporal autocorrelation is modelled by random effects, which are assigned conditional autoregressive (CAR) style prior distributions. A number of different random effects structures are available, including models similar to Bernardinelli et al. (1995) <doi:10.1002/sim.4780142112>, Rushworth et al. (2014) <doi:10.1016/j.sste.2014.05.001> and Lee et al. (2016) <doi:10.1214/16-AOAS941>. Full details are given in the vignette accompanying this package. The creation of this package was supported by the Engineering and Physical Sciences Research Council (EPSRC) grant EP/J017442/1 and the Medical Research Council (MRC) grant MR/L022184/1.",2020-03-09,Duncan Lee,http://github.com/duncanplee/CARBayesST,TRUE,https://github.com/duncanplee/carbayesst,36116,4,2020-03-06T12:50:52Z,9029
carbonate,"Create beautiful images of source code using
'carbon.js'<https://carbon.now.sh/about>.",2020-02-07,Jonathan Sidi,https://github.com/yonicd/carbonate,TRUE,https://github.com/yonicd/carbonate,9488,143,2020-05-30T04:56:55Z,66.34965034965035
caret,"Misc functions for training and plotting classification and
regression models.",2020-03-20,Max Kuhn,https://github.com/topepo/caret/,TRUE,https://github.com/topepo/caret,6106514,1212,2020-03-20T03:07:25Z,5038.377887788779
caretEnsemble,"Functions for creating ensembles of caret models: caretList()
and caretStack(). caretList() is a convenience function for fitting multiple
caret::train() models to the same dataset. caretStack() will make linear or
non-linear combinations of these models, using a caret::train() model as a
meta-model, and caretEnsemble() will make a robust linear combination of
models using a GLM.",2019-12-12,Zachary A. Deane-Mayer,https://github.com/zachmayer/caretEnsemble,TRUE,https://github.com/zachmayer/caretensemble,131548,210,2020-05-01T11:15:12Z,626.4190476190477
Carlson,"Evaluation of the Carlson elliptic integrals and the incomplete elliptic integrals with complex arguments. The implementations use Carlson's algorithms <doi.org/10.1007/BF02198293>. Applications of elliptic integrals include probability distributions, geometry, physics, mechanics, electrodynamics, statistical mechanics, astronomy, geodesy, geodesics on conics, and magnetic field calculations.",2020-03-04,Stéphane Laurent,https://github.com/stla/Carlson,TRUE,https://github.com/stla/carlson,1681,0,2020-02-26T09:40:27Z,NA
cartograflow,"Functions to prepare and filter an origin-destination matrix for thematic flow mapping purposes.
This comes after Bahoken, Francoise (2016), Mapping flow matrix a contribution, PhD in Geography - Territorial sciences. See Bahoken (2017) <doi:10.4000/netcom.2565>.",2020-06-03,Sylvain Blondeau,https://github.com/fbahoken/cartogRaflow,TRUE,https://github.com/fbahoken/cartograflow,5109,6,2020-06-05T20:59:34Z,851.5
cartogram,Construct continuous and non-contiguous area cartograms.,2019-12-07,Sebastian Jeworutzki,https://github.com/sjewo/cartogram,TRUE,https://github.com/sjewo/cartogram,118226,91,2020-02-18T19:58:56Z,1299.1868131868132
cartography,"Create and integrate maps in your R workflow. This package helps
to design cartographic representations such as proportional symbols,
choropleth, typology, flows or discontinuities maps. It also offers several
features that improve the graphic presentation of maps, for instance, map
palettes, layout elements (scale, north arrow, title...), labels or legends.
See Giraud and Lambert (2017) <doi:10.1007/978-3-319-57336-6_13>.",2020-04-20,Timothée Giraud,https://github.com/riatelab/cartography/,TRUE,https://github.com/riatelab/cartography,103149,329,2020-06-09T11:07:13Z,313.5227963525836
Cascade,"A modeling tool allowing gene selection, reverse engineering, and prediction in cascade networks. Jung, N., Bertrand, F., Bahram, S., Vallat, L., and Maumy-Bertrand, M. (2014) <doi:10.1093/bioinformatics/btt705>.",2019-08-24,Frederic Bertrand,"http://www-irma.u-strasbg.fr/~fbertran/,
https://github.com/fbertran/Cascade",TRUE,https://github.com/fbertran/cascade,7323,1,2019-10-01T10:30:34Z,7323
CascadeData,"These experimental expression data (5 leukemic 'CLL' B-lymphocyte of aggressive form from 'GSE39411', <doi:10.1073/pnas.1211130110>), after B-cell receptor stimulation, are used as examples by packages such as the 'Cascade' one, a modeling tool allowing gene selection, reverse engineering, and prediction in cascade networks. Jung, N., Bertrand, F., Bahram, S., Vallat, L., and Maumy-Bertrand, M. (2014) <doi:10.1093/bioinformatics/btt705>.",2019-02-07,Frederic Bertrand,"http://www-irma.u-strasbg.fr/~fbertran/,
https://github.com/fbertran/CascadeData",TRUE,https://github.com/fbertran/cascadedata,7343,1,2019-10-01T10:35:07Z,7343
casebase,"Implements the case-base sampling approach of Hanley and Miettinen (2009) <DOI:10.2202/1557-4679.1125>,
Saarela and Arjas (2015) <DOI:10.1111/sjos.12125>, and Saarela (2015) <DOI:10.1007/s10985-015-9352-x>, for fitting flexible hazard
regression models to survival data with single event type or multiple competing causes via logistic and multinomial regression.
From the fitted hazard function, cumulative incidence, risk functions of time, treatment and profile
can be derived. This approach accommodates any log-linear hazard function of prognostic time, treatment,
and covariates, and readily allows for non-proportionality. We also provide a plot method for visualizing
incidence density via population time plots.",2017-04-28,Sahir Bhatnagar,http://sahirbhatnagar.com/casebase/,TRUE,https://github.com/sahirbhatnagar/casebase,11289,4,2020-05-28T15:59:42Z,2822.25
casen,"Funciones para realizar estadistica descriptiva e inferencia con el
disenio complejo de la Encuesta CASEN (Encuesta de Caracterizacion
Socio-Economica). Incluye datasets que permiten armonizar los codigos de
comunas que cambian entre anios y permite convertir a los codigos oficiales de
SUBDERE.
(Functions to compute descriptive and inferential statistics with CASEN
Survey [Socio-Economic Characterization Survey] complex design. Includes
datasets to harmonize commune codes that change across years and allows to
convert to official SUBDERE codes.)",2020-04-08,Mauricio Vargas,https://pachamaltese.github.io/casen/,TRUE,https://github.com/pachamaltese/casen,1755,3,2020-04-08T04:39:39Z,585
CAST,"Supporting functionality to run 'caret' with spatial or spatial-temporal data. 'caret' is a frequently used package for model training and prediction using machine learning. This package includes functions to improve spatial-temporal modelling tasks using 'caret'. It prepares data for Leave-Location-Out and Leave-Time-Out cross-validation which are target-oriented validation strategies for spatial-temporal models. To decrease overfitting and improve model performances, the package implements a forward feature selection that selects suitable predictor variables in view to their contribution to the target-oriented performance.",2020-05-19,Hanna Meyer,https://github.com/HannaMeyer/CAST,TRUE,https://github.com/hannameyer/cast,17071,29,2020-06-05T12:29:45Z,588.6551724137931
cat.dt,"Implements the Merged Tree-CAT method (Javier Rodriguez-Cuadrado et al., 2020, <doi:10.1016/j.eswa.2019.113066>) to generate Computerized Adaptive Tests (CATs) based on a decision tree. The tree growth is controlled by merging branches with similar trait distributions and estimations. This package has the necessary tools for creating CATs and estimate the subject's ability level. ",2020-04-23,Javier Rodriguez-Cuadrado,https://github.com/jlaria/cat.dt,TRUE,https://github.com/jlaria/cat.dt,4353,1,2020-05-02T16:20:13Z,4353
categoryEncodings,"Simple, fast, and automatic encodings for category data using
a data.table backend. Most of the methods are an implementation
of ""Sufficient Representation for Categorical Variables"" by
Johannemann, Hadad, Athey, Wager (2019) <arXiv:1908.09874>,
particularly their mean, sparse principal component analysis,
low rank representation, and multinomial logit encodings.",2020-03-02,Juraj Szitas,https://github.com/JSzitas/categoryEncodings,TRUE,https://github.com/jszitas/categoryencodings,1714,0,2020-01-30T17:08:53Z,NA
cati,"Detect and quantify community assembly processes using trait values of individuals or populations, the T-statistics (Violle et al. (2012) <doi:10.1016/j.tree.2011.11.014>) and other metrics, and dedicated null models described in Taudiere & Violle (2016) <doi:10.1111/ecog.01433>.",2020-03-02,Adrien Taudiere,https://github.com/adrientaudiere/cati,TRUE,https://github.com/adrientaudiere/cati,23166,7,2019-12-16T14:34:36Z,3309.4285714285716
catsim,"Computes a structural similarity metric (after the style of
MS-SSIM for images) for binary and categorical 2D and 3D images. Can be
based on accuracy (simple matching), Cohen's kappa, Rand index, adjusted
Rand index, Jaccard index, Dice index, normalized mutual information, or
adjusted mutual information. In addition, has fast computation
of Cohen's kappa, the Rand indices, and the two mutual informations.
Implements the methods of Thompson and Maitra (2020) <arXiv:2004.09073>.",2020-05-06,Geoffrey Thompson,"http://github.com/gzt/catsim, https://gzt.github.io/catsim",TRUE,https://github.com/gzt/catsim,793,2,2020-05-06T03:46:17Z,396.5
catSurv,"Provides methods of computerized adaptive testing for survey researchers. See Montgomery and Rossiter (2019) <doi:10.1093/jssam/smz027>. Includes functionality for data fit with the classic item response methods including the latent trait model, Birnbaum`s three parameter model, the graded response, and the generalized partial credit model. Additionally, includes several ability parameter estimation and item selection routines. During item selection, all calculations are done in compiled C++ code.",2019-12-09,Erin Rossiter,NA,TRUE,https://github.com/erossiter/catsurv,11590,6,2019-12-09T20:29:30Z,1931.6666666666667
cattonum,"Functions for aggregate encoding, dummy encoding,
frequency encoding, label encoding, leave-one-out encoding,
mean encoding, median encoding, and one-hot encoding.",2020-02-09,Bernie Gray,https://github.com/bfgray3/cattonum,TRUE,https://github.com/bfgray3/cattonum,11674,29,2020-06-07T00:30:51Z,402.55172413793105
causaloptim,"When causal quantities are not identifiable from the observed data, it still may be possible
to bound these quantities using the observed data. We outline a class of problems for which the
derivation of tight bounds is always a linear programming problem and can therefore, at least
theoretically, be solved using a symbolic linear optimizer. We extend and generalize the
approach of Balke and Pearl (1994) <doi:10.1016/B978-1-55860-332-5.50011-0> and we provide
a user friendly graphical interface for setting up such problems via directed acyclic
graphs (DAG), which only allow for problems within this class to be depicted. The user can
then define linear constraints to further refine their assumptions to meet their specific
problem, and then specify a causal query using a text interface. The program converts this
user defined DAG, query, and constraints, and returns tight bounds. The bounds can be
converted to R functions to evaluate them for specific datasets, and to latex code for
publication. The methods and proofs of tightness and validity of the bounds are described in
a preprint by Sachs, Gabriel, and Sjölander (2020)
<https://sachsmc.github.io/causaloptim/articles/CausalBoundsMethods.pdf>.",2020-05-07,Michael C Sachs,https://github.com/sachsmc/causaloptim,TRUE,https://github.com/sachsmc/causaloptim,1746,7,2020-05-07T14:36:26Z,249.42857142857142
CAWaR,"Tools to process ground-truth data on crop types and perform a phenology based crop type classification. These tools were developed in the scope of the CAWa project and extend on the work of Conrad et al. (2011) <doi:10.1080/01431161.2010.550647>. Moreover, they introduce an innovative classification and validation scheme that utilizes spatially independent samples as proposed by Remelgado et al. (2017) <doi:10.1002/rse2.70>.",2020-06-04,Ruben Remelgado,https://github.com/RRemelgado/fieldRS/,TRUE,https://github.com/rremelgado/fieldrs,2724,10,2020-06-02T13:27:19Z,272.4
CBDA,"Classification performed on Big Data. It uses concepts from compressive sensing, and implements ensemble predictor (i.e., 'SuperLearner') and knockoff filtering as the main machine learning and feature mining engines.",2018-04-16,Simeone Marino,https://github.com/SOCR/CBDA,TRUE,https://github.com/socr/cbda,8576,12,2020-01-23T00:51:37Z,714.6666666666666
cbsodataR,"The data and meta data from Statistics
Netherlands (<https://www.cbs.nl>) can be browsed and downloaded. The client uses
the open data API of Statistics Netherlands.",2020-02-20,Edwin de Jonge,https://github.com/edwindj/cbsodataR,TRUE,https://github.com/edwindj/cbsodatar,26311,15,2020-05-27T21:51:48Z,1754.0666666666666
ccafs,"Client for Climate Change, Agriculture, and Food Security ('CCAFS')
General Circulation Models ('GCM') data. Data is stored in Amazon 'S3', from
which we provide functions to fetch data.",2017-02-24,Scott Chamberlain,https://github.com/ropensci/ccafs,TRUE,https://github.com/ropensci/ccafs,12556,10,2019-12-09T12:18:13Z,1255.6
CCAMLRGIS,"Loads and creates spatial data, including layers and tools that are relevant
to the activities of the Commission for the Conservation of Antarctic Marine Living
Resources. Provides two categories of functions: load functions and create functions.
Load functions are used to import existing spatial layers from the online CCAMLR GIS
such as the ASD boundaries. Create functions are used to create layers from user data
such as polygons and grids.",2020-06-07,Stephane Thanassekos,https://github.com/ccamlr/CCAMLRGIS,TRUE,https://github.com/ccamlr/ccamlrgis,2600,3,2020-06-06T11:11:19Z,866.6666666666666
cchsflow,"Supporting the use of the Canadian Community Health Survey
(CCHS) by transforming variables from each cycle into harmonized,
consistent versions that span survey cycles (currently, 2001 to
2014). CCHS data used in this library is accessed and adapted in
accordance to the Statistics Canada Open Licence Agreement. This
package uses rec_with_table(), which was developed from 'sjmisc'
rec(). Lüdecke D (2018). ""sjmisc: Data and Variable Transformation
Functions"". Journal of Open Source Software, 3(26), 754.
<doi:10.21105/joss.00754>.",2020-03-30,Doug Manuel,https://github.com/Big-Life-Lab/cchsflow,TRUE,https://github.com/big-life-lab/cchsflow,2334,8,2020-03-30T16:10:11Z,291.75
cdata,"Supplies higher-order coordinatized data specification and fluid transform operators that include pivot and anti-pivot as special cases.
The methodology is describe in 'Zumel', 2018, ""Fluid data reshaping with 'cdata'"", <http://winvector.github.io/FluidData/FluidDataReshapingWithCdata.html> , doi:10.5281/zenodo.1173299 .
This package introduces the idea of explicit control table specification of data transforms.
Works on in-memory data or on remote data using 'rquery' and 'SQL' database interfaces.",2020-02-01,John Mount,"https://github.com/WinVector/cdata/,
https://winvector.github.io/cdata/",TRUE,https://github.com/winvector/cdata,69573,40,2020-02-15T17:51:40Z,1739.325
cdcfluview,"The 'U.S.' Centers for Disease Control and Prevention (CDC) maintain
a portal <https://gis.cdc.gov/grasp/fluview/fluportaldashboard.html> for
accessing state, regional and national influenza statistics as well as
mortality surveillance data. The web interface makes it difficult and
time-consuming to select and retrieve influenza data. Tools are provided
to access the data provided by the portal's underlying 'API'.",2020-04-02,Bob Rudis,https://github.com/hrbrmstr/cdcfluview,TRUE,https://github.com/hrbrmstr/cdcfluview,23424,44,2020-04-01T19:57:14Z,532.3636363636364
cdcsis,"Conditional distance correlation <doi:10.1080/01621459.2014.993081> is a novel conditional dependence measurement of two multivariate random variables given a confounding variable. This package provides conditional distance correlation, performs the conditional distance correlation sure independence screening procedure for ultrahigh dimensional data <http://www3.stat.sinica.edu.tw/statistica/J28N1/J28N114/J28N114.html>, and conducts conditional distance covariance test for conditional independence assumption of two multivariate variable.",2019-07-10,Wenhao Hu,https://github.com/Mamba413/cdcsis,TRUE,https://github.com/mamba413/cdcsis,20105,1,2019-07-11T02:07:51Z,20105
cde,"Facilitates searching, download and plotting of Water Framework
Directive (WFD) reporting data for all waterbodies within the UK Environment
Agency area. The types of data that can be downloaded are: WFD status
classification data, Reasons for Not Achieving Good (RNAG) status,
objectives set for waterbodies, measures put in place to improve water
quality and details of associated protected areas. The site accessed is
<https://environment.data.gov.uk/catchment-planning/>. The data are made
available under the Open Government Licence v3.0
<https://www.nationalarchives.gov.uk/doc/open-government-licence/version/3/>.
This package has been peer-reviewed by rOpenSci (v. 0.4.0).",2019-09-04,Rob Briers,https://github.com/ropensci/cde,TRUE,https://github.com/ropensci/cde,3845,4,2020-02-06T12:57:53Z,961.25
cder,"Connect to the California Data Exchange Center (CDEC)
Web Service <http://cdec.water.ca.gov/>. 'CDEC' provides a centralized
database to store, process, and exchange real-time hydrologic information
gathered by various cooperators throughout California. The 'CDEC' Web Service
<http://cdec.water.ca.gov/dynamicapp/wsSensorData> provides a data download
service for accessing historical records. ",2020-01-24,Michael Koohafkan,https://github.com/mkoohafkan/cder,TRUE,https://github.com/mkoohafkan/cder,4643,1,2020-01-24T17:59:37Z,4643
CDM,"
Functions for cognitive diagnosis modeling and multidimensional item response modeling
for dichotomous and polytomous item responses. This package enables the estimation of
the DINA and DINO model (Junker & Sijtsma, 2001, <doi:10.1177/01466210122032064>),
the multiple group (polytomous) GDINA model (de la Torre, 2011,
<doi:10.1007/s11336-011-9207-7>), the multiple choice DINA model (de la Torre, 2009,
<doi:10.1177/0146621608320523>), the general diagnostic model (GDM; von Davier, 2008,
<doi:10.1348/000711007X193957>), the structured latent class model (SLCA; Formann, 1992,
<doi:10.1080/01621459.1992.10475229>) and regularized latent class analysis
(Chen, Li, Liu, & Ying, 2017, <doi:10.1007/s11336-016-9545-6>).
See George, Robitzsch, Kiefer, Gross, and Uenlue (2017) <doi:10.18637/jss.v074.i02>
or Robitzsch and George (2019, <doi:10.1007/978-3-030-05584-4_26>)
for further details on estimation and the package structure.
For tutorials on how to use the CDM package see
George and Robitzsch (2015, <doi:10.20982/tqmp.11.3.p189>) as well as
Ravand and Robitzsch (2015).",2020-03-10,Alexander Robitzsch,"https://github.com/alexanderrobitzsch/CDM,
https://sites.google.com/site/alexanderrobitzsch2/software",TRUE,https://github.com/alexanderrobitzsch/cdm,231327,9,2020-03-11T14:58:36Z,25703
cdom,Wrapper functions to model and extract various quantitative information from absorption spectra of chromophoric dissolved organic matter (CDOM).,2016-03-04,Philippe Massicotte,https://github.com/PMassicotte/cdom,TRUE,https://github.com/pmassicotte/cdom,15408,4,2020-04-13T23:21:58Z,3852
censusapi,"A wrapper for the U.S. Census Bureau APIs that returns data frames of
Census data and metadata. Available datasets include the
Decennial Census, American Community Survey, Small Area Health Insurance Estimates,
Small Area Income and Poverty Estimates, Population Estimates and Projections, and more.",2019-04-13,Hannah Recht,https://github.com/hrecht/censusapi,TRUE,https://github.com/hrecht/censusapi,54265,100,2020-03-28T14:37:14Z,542.65
censusxy,"Provides access to the U.S. Census Bureau's A.P.I for matching American
street addresses with their longitude and latitude. This includes both single address matching
as well as batch functionality for multiple addresses. Census geographies can be appended to
addresses if desired, and reverse geocoding of point locations to census geographies is also
supported. ",2020-05-28,Christopher Prener,https://github.com/slu-openGIS/censusxy,TRUE,https://github.com/slu-opengis/censusxy,4609,6,2020-05-28T18:46:28Z,768.1666666666666
CePa,Use pathway topology information to assign weight to pathway nodes.,2020-02-25,Zuguang Gu,https://github.com/jokergoo/CePa,TRUE,https://github.com/jokergoo/cepa,25818,0,2020-05-23T20:01:40Z,NA
cepR,"
Retorna detalhes de dados de CEPs brasileiros, bairros, logradouros
e tal. (Returns info of Brazilian postal codes, city names, addresses
and so on.)",2020-06-02,Robert Myles McDonnell,https://github.com/RobertMyles/cepR,TRUE,https://github.com/robertmyles/cepr,10812,13,2020-06-02T15:29:11Z,831.6923076923077
cepreader,"Read Condensed Cornell Ecology Program ('CEP') and legacy
'CANOCO' files into R data frames.",2019-05-08,Jari Oksanen,"https://cran.r-project.org/,
https://github.com/vegandevs/cepreader/",TRUE,https://github.com/vegandevs/cepreader,9737,0,2020-02-06T12:46:34Z,NA
ceramic,"Download imagery tiles to a standard cache and load the data into raster objects.
Facilities for 'AWS' terrain <https://aws.amazon.com/public-datasets/terrain/> terrain and 'Mapbox'
<https://www.mapbox.com/> servers are provided. ",2019-07-20,Michael Sumner,https://github.com/hypertidy/ceramic,TRUE,https://github.com/hypertidy/ceramic,4260,64,2020-06-07T08:05:54Z,66.5625
cetcolor,"Collection of perceptually uniform colour maps made by Peter Kovesi
(2015) ""Good Colour Maps: How to Design Them"" <arXiv:1509.03700>
at the Centre for Exploration Targeting (CET).",2018-07-10,James Balamuta,"https://github.com/coatless/cetcolor,
http://thecoatlessprofessor.com/projects/cetcolor/,
http://peterkovesi.com/projects/colourmaps/",TRUE,https://github.com/coatless/cetcolor,11226,22,2020-01-07T19:51:51Z,510.27272727272725
ceterisParibus,"Ceteris Paribus Profiles (What-If Plots) are designed to present model
responses around selected points in a feature space.
For example around a single prediction for an interesting observation.
Plots are designed to work in a model-agnostic fashion, they are working
for any predictive Machine Learning model and allow for model comparisons.
Ceteris Paribus Plots supplement the Break Down Plots from 'breakDown' package.",2020-03-28,Przemyslaw Biecek,https://pbiecek.github.io/ceterisParibus/,TRUE,https://github.com/pbiecek/ceterisparibus,11941,38,2020-03-26T10:31:57Z,314.2368421052632
cgdsr,"Provides a basic set of R functions for querying the Cancer
Genomics Data Server (CGDS), hosted by the Computational Biology Center at
Memorial-Sloan-Kettering Cancer Center (MSKCC) at <www.cbioportal.org>.",2019-06-26,Anders Jacobsen,https://github.com/cBioPortal/cgdsr,TRUE,https://github.com/cbioportal/cgdsr,61715,17,2019-06-25T20:17:02Z,3630.294117647059
CGE,"Developing general equilibrium models, computing general equilibrium and simulating economic dynamics with structural dynamic models in LI (2019, ISBN: 9787521804225) ""General Equilibrium and Structural Dynamics: Perspectives of New Structural Economics. Beijing: Economic Science Press"". When developing complex general equilibrium models, GE package should be used in addition to this package.",2020-05-24,LI Wu,NA,TRUE,https://github.com/liwur/cge,13465,0,2020-01-31T02:42:46Z,NA
CGGP,"Run computer experiments using the adaptive composite grid
algorithm with a Gaussian process model.
The algorithm works best when running an experiment that can evaluate thousands
of points from a deterministic computer simulation.
This package is an implementation of a forthcoming paper by Plumlee,
Erickson, Ankenman, et al. For a preprint of the paper,
contact the maintainer of this package.",2020-03-29,Collin Erickson,https://github.com/CollinErickson/CGGP,TRUE,https://github.com/collinerickson/cggp,4674,1,2020-03-31T00:06:03Z,4674
cghRA,"Provides functions to import data from Agilent CGH arrays and process them according to the cghRA workflow. Implements several algorithms such as WACA, STEPS and cnvScore and an interactive graphical interface.",2017-03-03,Sylvain Mareschal,http://www.ovsa.fr/cghRA,TRUE,https://github.com/maressyl/r.cghra,13310,0,2020-05-03T10:33:49Z,NA
CGPfunctions,Miscellaneous functions useful for teaching statistics as well as actually practicing the art. They typically are not new methods but rather wrappers around either base R or other packages.,2020-05-27,Chuck Powell,https://github.com/ibecav/CGPfunctions,TRUE,https://github.com/ibecav/cgpfunctions,18777,10,2020-05-27T17:58:24Z,1877.7
cgraph,"Allows to create, evaluate, and differentiate computational graphs in R. A computational graph is a graph representation of a multivariate function decomposed by its (elementary) operations. Nodes in the graph represent arrays while edges represent dependencies among the arrays. An advantage of expressing a function as a computational graph is that this enables to differentiate the function by automatic differentiation. The 'cgraph' package supports various operations including basic arithmetic, trigonometry operations, and linear algebra operations. It differentiates computational graphs by reverse automatic differentiation. The flexible architecture of the package makes it applicable to solve a variety of problems including local sensitivity analysis, gradient-based optimization, and machine learning.",2020-02-09,Ron Triepels,https://cgraph.org/,TRUE,https://github.com/triepels/cgraph,18971,11,2020-04-16T12:19:50Z,1724.6363636363637
chandwich,"Performs adjustments of a user-supplied independence loglikelihood
function using a robust sandwich estimator of the parameter covariance
matrix, based on the methodology in Chandler and Bate (2007)
<doi:10.1093/biomet/asm015>. This can be used for cluster correlated data
when interest lies in the parameters of the marginal distributions or for
performing inferences that are robust to certain types of model
misspecification. Functions for profiling the adjusted loglikelihoods are
also provided, as are functions for calculating and plotting confidence
intervals, for single model parameters, and confidence regions, for pairs
of model parameters. Nested models can be compared using an adjusted
likelihood ratio test.",2019-07-11,Paul J. Northrop,http://github.com/paulnorthrop/chandwich,TRUE,https://github.com/paulnorthrop/chandwich,10354,1,2019-11-26T22:58:02Z,10354
changepoint,"Implements various mainstream and specialised changepoint methods for finding single and multiple changepoints within data. Many popular non-parametric and frequentist methods are included. The cpt.mean(), cpt.var(), cpt.meanvar() functions should be your first point of call.",2016-10-04,Rebecca Killick,https://github.com/rkillick/changepoint/,TRUE,https://github.com/rkillick/changepoint,167781,79,2019-07-19T09:31:01Z,2123.8101265822784
changepoint.geo,Implements the high-dimensional changepoint detection method GeomCP and the related mappings used for changepoint detection. These methods view the changepoint problem from a geometrical viewpoint and aim to extract relevant geometrical features in order to detect changepoints. The geomcp() function should be your first point of call. References: Grundy et al. (2020) <doi:10.1007/s11222-020-09940-y>. ,2020-03-31,Thomas Grundy,https://github.com/grundy95/changepoint.geo/,TRUE,https://github.com/grundy95/changepoint.geo,1026,2,2020-03-31T13:12:13Z,513
changer,Changing the name of an existing R package is annoying but common task especially in the early stages of package development. This package (mostly) automates this task.,2018-10-21,Jouni Helske,https://github.com/helske/changer,TRUE,https://github.com/helske/changer,7866,13,2020-02-17T16:03:08Z,605.0769230769231
cheatR,"A set of functions to compare texts for similarity, and plot a graph of similarities among the compared texts. These functions were originally developed for detection of overlap in course hand-in.",2020-05-06,Mattan S. Ben-Shachar,https://mattansb.github.io/cheatR,TRUE,https://github.com/mattansb/cheatr,525,16,2020-05-06T19:48:15Z,32.8125
chebpol,"Contains methods for creating multivariate/multidimensional
interpolations of functions on a hypercube. If available through fftw3, the DCT-II/FFT
is used to compute coefficients for a Chebyshev interpolation.
Other interpolation methods for arbitrary Cartesian grids are also provided, a piecewise multilinear,
and the Floater-Hormann barycenter method. For scattered data polyharmonic splines with a linear term
is provided. The time-critical parts are written in C for speed. All interpolants are parallelized if
used to evaluate more than one point.",2019-12-09,Simen Gaure,https://github.com/sgaure/chebpol,TRUE,https://github.com/sgaure/chebpol,41808,5,2019-12-09T11:45:16Z,8361.6
checkdown,"Creates auto checking check-fields and check-boxes for 'rmarkdown' html. It could be used in class, when teacher share materials and tasks, so student can solve some problems and check themselves. In contrast with the 'learnr' package the 'checkdown' package works without 'shiny'.",2020-05-17,George Moroz,https://agricolamz.github.io/checkdown/,TRUE,https://github.com/agricolamz/checkdown,2193,15,2020-05-20T13:01:06Z,146.2
checkLuhn,"Confirms if the number is Luhn compliant.
Can check if credit card, IMEI number or any other Luhn based number is correct.
For more info see: <https://en.wikipedia.org/wiki/Luhn_algorithm>.",2018-09-24,Adam Deacon,https://github.com/adamjdeacon/checkLuhn,TRUE,https://github.com/adamjdeacon/checkluhn,9871,2,2020-05-19T14:21:59Z,4935.5
checkmate,"Tests and assertions to perform frequent argument checks. A
substantial part of the package was written in C to minimize any worries
about execution time overhead.",2020-02-06,Michel Lang,https://github.com/mllg/checkmate,TRUE,https://github.com/mllg/checkmate,6056273,151,2020-06-06T20:14:30Z,40107.76821192053
checkpoint,"The goal of checkpoint is to solve the problem of package
reproducibility in R. Specifically, checkpoint allows you to install packages
as they existed on CRAN on a specific snapshot date as if you had a CRAN time
machine. To achieve reproducibility, the checkpoint() function installs the
packages required or called by your project and scripts to a local library
exactly as they existed at the specified point in time. Only those packages
are available to your project, thereby avoiding any package updates that came
later and may have altered your results. In this way, anyone using checkpoint's
checkpoint() can ensure the reproducibility of your scripts or projects at any
time. To create the snapshot archives, once a day (at midnight UTC) Microsoft
refreshes the Austria CRAN mirror on the ""Microsoft R Archived Network""
server (<https://mran.microsoft.com/>). Immediately after completion
of the rsync mirror process, the process takes a snapshot, thus creating the
archive. Snapshot archives exist starting from 2014-09-17.",2020-02-23,Hong Ooi,https://github.com/RevolutionAnalytics/checkpoint,TRUE,https://github.com/revolutionanalytics/checkpoint,103366,136,2020-04-24T09:03:37Z,760.0441176470588
checkr,"Expressive, assertive, pipe-friendly functions
to check the properties of common R objects.
In the case of failure the functions issue informative error messages.",2019-04-25,Joe Thorley,https://github.com/poissonconsulting/checkr,TRUE,https://github.com/poissonconsulting/checkr,21015,9,2020-05-12T19:54:40Z,2335
cheddar,"Provides a flexible, extendable representation of an ecological community and a range of functions for analysis and visualisation, focusing on food web, body mass and numerical abundance data. Allows inter-web comparisons such as examining changes in community structure over environmental, temporal or spatial gradients.",2020-02-13,Lawrence Hudson with contributions from Dan Reuman and Rob Emerson,https://github.com/quicklizard99/cheddar/,TRUE,https://github.com/quicklizard99/cheddar,30470,12,2020-02-12T20:37:21Z,2539.1666666666665
cheese,"Contains tools for working with data during statistical analysis, promoting flexible, intuitive, and reproducible workflows. There are functions designated for specific statistical tasks such building a custom univariate descriptive table, computing pairwise association statistics, etc. These are built on a collection of data manipulation tools designed for general use that are motivated by the functional programming concept.",2020-04-30,Alex Zajichek,"https://zajichek.github.io/cheese,
https://github.com/zajichek/cheese",TRUE,https://github.com/zajichek/cheese,6629,0,2020-04-30T13:07:00Z,NA
chemCal,"Simple functions for plotting linear
calibration functions and estimating standard errors for measurements
according to the Handbook of Chemometrics and Qualimetrics: Part A
by Massart et al. There are also functions estimating the limit
of detection (LOD) and limit of quantification (LOQ).
The functions work on model objects from - optionally weighted - linear
regression (lm) or robust linear regression ('rlm' from the 'MASS' package).",2018-07-17,Johannes Ranke,"https://pkgdown.jrwb.de/chemCal,
https://cgit.jrwb.de/chemCal/about",TRUE,https://github.com/jranke/chemcal,29528,2,2020-05-20T06:44:47Z,14764
ChemometricsWithR,"Functions and scripts used in the book ""Chemometrics with R - Multivariate Data Analysis in the Natural Sciences and Life Sciences"" by Ron Wehrens, Springer (2011). Data used in the package are available from github.",2019-01-07,Ron Wehrens,https://github.com/rwehrens/CWR,TRUE,https://github.com/rwehrens/cwr,42082,4,2019-12-18T11:24:57Z,10520.5
ChemoSpec,"A collection of functions for top-down exploratory data analysis
of spectral data including nuclear magnetic resonance (NMR), infrared (IR),
Raman, X-ray fluorescence (XRF) and other similar types of spectroscopy.
Includes functions for plotting and inspecting spectra, peak alignment,
hierarchical cluster analysis (HCA), principal components analysis (PCA) and
model-based clustering. Robust methods appropriate for this type of
high-dimensional data are available. ChemoSpec is designed for structured
experiments, such as metabolomics investigations, where the samples fall into
treatment and control groups. Graphical output is formatted consistently for
publication quality plots. ChemoSpec is intended to be very user friendly and
to help you get usable results quickly. A vignette covering typical operations
is available.",2020-01-24,Bryan A. Hanson,https://bryanhanson.github.io/ChemoSpec/,TRUE,https://github.com/bryanhanson/chemospec,58648,32,2020-01-24T20:03:09Z,1832.75
ChemoSpec2D,"A collection of functions for exploratory chemometrics of 2D spectroscopic data sets such as COSY (correlated spectroscopy) and HSQC (heteronuclear single quantum coherence) 2D NMR (nuclear magnetic resonance) spectra. 'ChemoSpec2D' deploys methods aimed primarily at classification of samples and the identification of spectral features which are important in distinguishing samples from each other. Each 2D spectrum (a matrix) is treated as the unit of observation, and thus the physical sample in the spectrometer corresponds to the sample from a statistical perspective. In addition to chemometric tools, a few tools are provided for plotting 2D spectra, but these are not intended to replace the functionality typically available on the spectrometer. 'ChemoSpec2D' takes many of its cues from 'ChemoSpec' and tries to create consistent graphical output and to be very user friendly.",2020-02-19,Bryan A. Hanson,https://github.com/bryanhanson/ChemoSpec2D,TRUE,https://github.com/bryanhanson/chemospec2d,8109,1,2020-02-19T16:50:01Z,8109
ChemoSpecUtils,Functions supporting the common needs of packages 'ChemoSpec' and 'ChemoSpec2D'.,2020-04-20,Bryan A. Hanson,https://github.com/bryanhanson/ChemoSpecUtils,TRUE,https://github.com/bryanhanson/chemospecutils,18241,0,2020-04-20T13:12:01Z,NA
childesr,"Tools for connecting to 'CHILDES', an open repository for
transcripts of parent-child interaction. For more information on the
underlying data, see <http://childes-db.stanford.edu>.",2019-10-17,Mika Braginsky,https://github.com/langcog/childesr,TRUE,https://github.com/langcog/childesr,9761,7,2019-10-16T23:04:44Z,1394.4285714285713
chilemapas,"Mapas terrestres con topologias simplificadas. Estos mapas no
tienen precision geodesica, por lo que aplica el DFL-83 de 1979 de la Republica
de Chile y se consideran referenciales sin validez legal.
No se incluyen los territorios antarticos y bajo ningun evento estos mapas
significan que exista una cesion u ocupacion de territorios soberanos en
contra del Derecho Internacional por parte de Chile. Esta paquete esta
documentado intencionalmente en castellano asciificado para que funcione sin
problema en diferentes plataformas.
(Terrestrial maps with simplified toplogies. These maps lack geodesic
precision, therefore DFL-83 1979 of the Republic of Chile applies and are
considered to have no legal validity.
Antartic territories are excluded and under no event these maps mean
there is a cession or occupation of sovereign territories against International
Laws from Chile. This package was intentionally documented in asciified
spanish to make it work without problem on different platforms.)",2020-03-28,Mauricio Vargas,https://pachamaltese.github.io/chilemapas/,TRUE,https://github.com/pachamaltese/chilemapas,2876,11,2020-04-23T17:32:41Z,261.45454545454544
chisq.posthoc.test,Perform post hoc analysis based on residuals of Pearson's Chi-squared Test for Count Data based on T. Mark Beasley & Randall E. Schumacker (1995) <doi: 10.1080/00220973.1995.9943797>.,2019-10-25,Daniel Ebbert,http://chisq-posthoc-test.ebbert.nrw/,TRUE,https://github.com/ebbertd/chisq.posthoc.test,4588,0,2019-11-06T11:01:56Z,NA
chk,"For developers to check user-supplied function
arguments. It is designed to be simple, fast and customizable. Error
messages follow the tidyverse style guide.",2020-05-29,Joe Thorley,https://github.com/poissonconsulting/chk,TRUE,https://github.com/poissonconsulting/chk,11567,23,2020-05-29T17:36:36Z,502.9130434782609
chlorpromazineR,"As different antipsychotic medications have different potencies,
the doses of different medications cannot be directly compared. Various
strategies are used to convert doses into a common reference so that
comparison is meaningful. Chlorpromazine (CPZ) has historically been used
as a reference medication into which other antipsychotic doses can be
converted, as ""chlorpromazine-equivalent doses"". Using conversion keys
generated from widely-cited scientific papers (Gardner et. al 2010
<doi:10.1176/appi.ajp.2009.09060802>, Leucht et al. 2016
<doi:10.1093/schbul/sbv167>), antipsychotic doses are converted
to CPZ (or any specified antipsychotic) equivalents. The use of the package
is described in the included vignette. Not for clinical use.",2019-10-11,Eric Brown,https://github.com/ropensci/chlorpromazineR,TRUE,https://github.com/ropensci/chlorpromaziner,3354,5,2020-02-12T01:47:27Z,670.8
cholera,"Amends errors, augments data and aids analysis of John Snow's map
of the 1854 London cholera outbreak.",2019-08-28,Peter Li,https://github.com/lindbrook/cholera,TRUE,https://github.com/lindbrook/cholera,15617,111,2020-06-05T21:37:29Z,140.6936936936937
chorrrds,"Extracts music chords from the 'CifraClub' website <https://www.cifraclub.com.br/>.
The package also has functions for cleaning the extracted data and
feature extraction.",2020-03-16,Bruna Wundervald,https://github.com/r-music/chorrrds,TRUE,https://github.com/r-music/chorrrds,14576,78,2020-03-16T12:24:23Z,186.87179487179486
chromer,"A programmatic interface to the Chromosome Counts Database
(http://ccdb.tau.ac.il/). This package is part of the rOpenSci suite
(http://ropensci.org)",2015-01-13,Matthew Pennell,http://www.github.com/ropensci/chromer,TRUE,https://github.com/ropensci/chromer,18400,6,2019-11-29T04:55:48Z,3066.6666666666665
chromseq,"Chromosome files in the 'Fasta' format usually contain large sequences like human genome.
Sometimes users have to split these chromosomes into different files according to their
chromosome number. The 'chromseq' can help to handle this. So the selected chromosome sequence can be
used for downstream analysis like motif finding. Howard Y. Chang(2019)
<doi:10.1038/s41587-019-0206-z>.",2020-05-11,Shaoqian Ma,https://github.com/MSQ-123/chromseq,TRUE,https://github.com/msq-123/chromseq,439,0,2020-05-19T08:34:02Z,NA
chunked,"Data stored in text file can be processed chunkwise using 'dplyr' commands. These
are recorded and executed per data chunk, so large files can be processed with
limited memory using the 'LaF' package.",2020-03-24,Edwin de Jonge,https://github.com/edwindj/chunked,TRUE,https://github.com/edwindj/chunked,20865,145,2020-05-14T13:58:52Z,143.89655172413794
cicerone,Provide step by step guided tours of 'Shiny' applications.,2020-02-29,John Coene,https://cicerone.john-coene.com/,TRUE,https://github.com/johncoene/cicerone,1758,69,2020-04-03T16:57:53Z,25.47826086956522
cimir,"Connect to the California Irrigation Management
Information System (CIMIS) Web API. See the CIMIS main page
<https://cimis.water.ca.gov> and web API documentation
<https://et.water.ca.gov> for more information.",2020-01-22,Michael Koohafkan,https://github.com/mkoohafkan/cimir,TRUE,https://github.com/mkoohafkan/cimir,7188,3,2020-01-24T18:00:33Z,2396
circlize,"Circular layout is an efficient way for the visualization of huge
amounts of information. Here this package provides an implementation
of circular layout generation in R as well as an enhancement of available
software. The flexibility of the package is based on the usage of low-level
graphics functions such that self-defined high-level graphics can be easily
implemented by users for specific purposes. Together with the seamless
connection between the powerful computational and visual environment in R,
it gives users more convenience and freedom to design figures for
better understanding complex patterns behind multiple dimensional data.
The package is described in Gu et al. 2014 <doi:10.1093/bioinformatics/btu393>.",2020-04-30,Zuguang Gu,"https://github.com/jokergoo/circlize,
http://jokergoo.github.io/circlize_book/book/",TRUE,https://github.com/jokergoo/circlize,562765,545,2020-06-09T12:21:15Z,1032.5963302752293
circumplex,"Tools for analyzing and visualizing circular data,
including scoring functions for relevant instruments and a
generalization of the bootstrapped structural summary method from
Zimmermann & Wright (2017) <doi:10.1177/1073191115621795> and
functions for creating publication-ready tables and figures from the
results. Future versions will include tools for circular fit and
reliability analyses, as well as visualization enhancements.",2020-04-29,Jeffrey Girard,https://github.com/jmgirard/circumplex,TRUE,https://github.com/jmgirard/circumplex,15600,6,2020-06-03T02:40:49Z,2600
cIRT,"Jointly model the accuracy of cognitive responses and item choices
within a Bayesian hierarchical framework as described by Culpepper and
Balamuta (2015) <doi:10.1007/s11336-015-9484-7>. In addition, the package
contains the datasets used within the analysis of the paper.",2020-03-23,Steven Andrew Culpepper,"https://tmsalab.github.io/cIRT, https://github.com/tmsalab/cIRT",TRUE,https://github.com/tmsalab/cirt,19292,3,2020-03-22T20:03:15Z,6430.666666666667
citecorp,"Client for the Open Citations Corpus (<http://opencitations.net/>).
Includes a set of functions for getting one identifier type from another,
as well as getting references and citations for a given identifier.",2020-04-16,Scott Chamberlain,"https://github.com/ropenscilabs/citecorp (devel),
https://docs.ropensci.org/citecorp/ (docs)",TRUE,https://github.com/ropenscilabs/citecorp,4740,10,2020-04-15T16:48:07Z,474
ciTools,"Functions to append confidence intervals, prediction intervals,
and other quantities of interest to data frames. All appended quantities
are for the response variable, after conditioning on the model and covariates.
This package has a data frame first syntax that allows for easy piping.
Currently supported models include (log-) linear, (log-) linear mixed,
generalized linear models, generalized linear mixed models, and
accelerated failure time models.",2019-01-08,John Haman,https://github.com/jthaman/ciTools,TRUE,https://github.com/jthaman/citools,30413,93,2019-07-10T22:42:16Z,327.02150537634407
citr,"Functions and an 'RStudio' add-in that search 'Bib(La)TeX'-files or
'Zotero' libraries (via the 'Better BibTeX' plugin) to insert formatted Markdown
citations into the current document.",2019-08-19,Frederik Aust,https://github.com/crsh/citr,TRUE,https://github.com/crsh/citr,42918,286,2020-06-04T08:42:08Z,150.06293706293707
civis,"A convenient interface for making
requests directly to the 'Civis Platform API' <https://www.civisanalytics.com/platform/>.
Full documentation available 'here' <https://civisanalytics.github.io/civis-r/>.",2020-02-24,Patrick Miller,https://github.com/civisanalytics/civis-r,TRUE,https://github.com/civisanalytics/civis-r,96642,13,2020-05-20T20:18:26Z,7434
classInt,Selected commonly used methods for choosing univariate class intervals for mapping or other graphics purposes.,2020-04-07,Roger Bivand,"https://r-spatial.github.io/classInt/,
https://github.com/r-spatial/classInt/",TRUE,https://github.com/r-spatial/classint,2930670,20,2020-03-27T19:03:29Z,146533.5
classyfireR,Access to the ClassyFire RESTful API <http://classyfire.wishartlab.com>. Retrieve existing entity classifications and submit new entities for classification. ,2020-02-18,Tom Wilson,https://github.com/aberHRML/classyfireR,TRUE,https://github.com/aberhrml/classyfirer,11564,2,2020-02-18T10:42:33Z,5782
cld2,"Bindings to Google's C++ library Compact Language Detector 2
(see <https://github.com/cld2owners/cld2#readme> for more information). Probabilistically
detects over 80 languages in plain text or HTML. For mixed-language input it returns the
top three detected languages and their approximate proportion of the total classified
text bytes (e.g. 80% English and 20% French out of 1000 bytes). There is also a 'cld3'
package on CRAN which uses a neural network model instead.",2018-05-11,Jeroen Ooms,"https://github.com/ropensci/cld2 (devel)
https://github.com/cld2owners/cld2 (upstream)",TRUE,https://github.com/ropensci/cld2,25117,31,2019-12-08T22:40:50Z,810.2258064516129
cld3,"Google's Compact Language Detector 3 is a neural network model for language
identification and the successor of 'cld2' (available from CRAN). The algorithm is still
experimental and takes a novel approach to language detection with different properties
and outcomes. It can be useful to combine this with the Bayesian classifier results
from 'cld2'. See <https://github.com/google/cld3#readme> for more information.",2020-01-31,Jeroen Ooms,"https://docs.ropensci.org/cld3, https://github.com/ropensci/cld3
(devel) https://github.com/google/cld3 (upstream)",TRUE,https://github.com/ropensci/cld3,20096,23,2020-01-31T11:29:27Z,873.7391304347826
clean,"A wrapper around the new 'cleaner' package, that allows
data cleaning functions for classes 'logical', 'factor', 'numeric',
'character', 'currency' and 'Date' to make data cleaning fast and
easy. Relying on very few dependencies, it provides smart guessing,
but with user options to override anything if needed.",2020-06-01,Matthijs S. Berends,https://github.com/msberends/cleaner,TRUE,https://github.com/msberends/cleaner,6775,9,2020-06-01T13:43:34Z,752.7777777777778
cleaner,"Data cleaning functions for classes logical,
factor, numeric, character, currency and Date to make
data cleaning fast and easy. Relying on very few dependencies, it
provides smart guessing, but with user options to override
anything if needed.",2020-06-01,Matthijs S. Berends,https://github.com/msberends/cleaner,TRUE,https://github.com/msberends/cleaner,7095,9,2020-06-01T13:43:34Z,788.3333333333334
cleangeo,"
Provides a set of utility tools to inspect spatial objects, facilitate
handling and reporting of topology errors and geometry validity issues.
Finally, it provides a geometry cleaner that will fix all geometry problems,
and eliminate (at least reduce) the likelihood of having issues when doing
spatial data processing.",2019-12-04,Emmanuel Blondel,https://github.com/eblondel/cleangeo,TRUE,https://github.com/eblondel/cleangeo,35268,39,2019-12-04T15:27:07Z,904.3076923076923
cleanNLP,"Provides a set of fast tools for converting a textual corpus into
a set of normalized tables. Users may make use of the 'udpipe' back end with
no external dependencies, or two Python back ends with 'spaCy'
<https://spacy.io> or 'CoreNLP' <http://stanfordnlp.github.io/CoreNLP/>.
Exposed annotation tasks include tokenization, part of speech tagging, named
entity recognition, and dependency parsing.",2020-03-08,Taylor B. Arnold,https://statsmaths.github.io/cleanNLP/,TRUE,https://github.com/statsmaths/cleannlp,27935,166,2020-03-30T08:37:41Z,168.28313253012047
clere,"Implements an empirical Bayes approach for
simultaneous variable clustering and regression. This version also
(re)implements in C++ an R script proposed by Howard Bondell that fits
the Pairwise Absolute Clustering and Sparsity (PACS) methodology (see
Sharma et al (2013) <DOI:10.1080/15533174.2012.707849>).",2020-02-06,Loic Yengo,https://github.com/mcanouil/clere,TRUE,https://github.com/mcanouil/clere,19466,0,2020-02-07T09:35:43Z,NA
clhs,"Conditioned Latin hypercube sampling, as published by Minasny and McBratney (2006) <DOI:10.1016/j.cageo.2005.12.009>. This method proposes to stratify sampling in presence of ancillary data. An extension of this method, which propose to associate a cost to each individual and take it into account during the optimisation process, is also proposed (Roudier et al., 2012, <DOI:10.1201/b12728>).",2020-04-15,Pierre Roudier,https://github.com/pierreroudier/clhs/,TRUE,https://github.com/pierreroudier/clhs,29382,7,2020-04-15T23:25:21Z,4197.428571428572
clifford,"A suite of routines for Clifford algebras, using the
'Map' class of the Standard Template Library. Canonical
reference: Hestenes (1987, ISBN 90-277-1673-0, ""Clifford algebra
to geometric calculus""). Special cases including Lorentz transforms,
quaternion multiplication, and Grassman algebra, are discussed.
Conformal geometric algebra theory is implemented.",2020-03-08,Robin K. S. Hankin,https://github.com/RobinHankin/clifford.git,TRUE,https://github.com/robinhankin/clifford,2406,0,2020-06-03T22:17:52Z,NA
clifro,"CliFlo is a web portal to the New Zealand National Climate
Database and provides public access (via subscription) to around 6,500
various climate stations (see <https://cliflo.niwa.co.nz/> for more
information). Collating and manipulating data from CliFlo
(hence clifro) and importing into R for further analysis, exploration and
visualisation is now straightforward and coherent. The user is required to
have an internet connection, and a current CliFlo subscription (free) if
data from stations, other than the public Reefton electronic weather
station, is sought.",2019-03-20,Blake Seers,https://github.com/ropensci/clifro,TRUE,https://github.com/ropensci/clifro,32944,19,2019-12-09T12:19:14Z,1733.8947368421052
climate,"Automatize downloading of meteorological and hydrological data from publicly available repositories:
OGIMET (<http://ogimet.com/index.phtml.en>),
University of Wyoming - atmospheric vertical profiling data (<http://weather.uwyo.edu/upperair>),
Polish Institute of Meterology and Water Management - National Research Institute (<https://dane.imgw.pl>),
and National Oceanic & Atmospheric Administration (NOAA).
This package also allows for adding geographical coordinates for each observation.",2020-06-03,Bartosz Czernecki,https://github.com/bczernecki/climate,TRUE,https://github.com/bczernecki/climate,5128,26,2020-05-27T10:40:27Z,197.23076923076923
climateStability,"Climate stability measures are not formalized in the literature and
tools for generating stability metrics from existing data are nascent.
This package provides tools for calculating climate stability from raster data
encapsulating climate change as a series of time slices. The methods follow
Owens and Guralnick <doi:10.17161/bi.v14i0.9786> Biodiversity Informatics.",2019-11-21,Hannah Owens,https://github.com/hannahlowens/climateStability,TRUE,https://github.com/hannahlowens/climatestability,4904,3,2020-02-18T16:02:12Z,1634.6666666666667
climatrends,"Supports analysis of trends in climate change, ecological and crop modelling.",2020-05-22,Kauê de Sousa,https://agrobioinfoservices.github.io/climatrends,TRUE,https://github.com/agrobioinfoservices/climatrends,2172,1,2020-05-26T06:55:24Z,2172
climdex.pcic,"PCIC's implementation of Climdex routines for computation of
extreme climate indices. Further details on the extreme climate indices
can be found at <http://etccdi.pacificclimate.org/list_27_indices.shtml>
and in the package manual.",2020-01-22,"David Bronaugh <[email protected]> for the Pacific Climate Impacts
Consortium",https://www.r-project.org,TRUE,https://github.com/pacificclimate/climdex.pcic,45447,9,2020-01-21T18:56:49Z,5049.666666666667
ClimMobTools,"API client for 'ClimMob', an open source software for crowdsourcing
citizen science in agriculture under the 'tricot' method <https://climmob.net/>.
Developed by van Etten et al. (2019) <doi:10.1017/S0014479716000739>, it turns the
research paradigm on its head; instead of a few researchers designing complicated
trials to compare several technologies in search of the best solutions,
it enables many farmers to carry out reasonably simple experiments that
taken together can offer even more information. 'ClimMobTools' enables project
managers to deep explore and analyse their 'ClimMob' data in R.",2020-05-08,Kaue de Sousa,https://agrobioinfoservices.github.io/ClimMobTools/,TRUE,https://github.com/agrobioinfoservices/climmobtools,4691,2,2020-05-08T14:14:07Z,2345.5
climwin,"Contains functions to detect and visualise periods of climate
sensitivity (climate windows) for a given biological response.
Please see van de Pol et al. (2016) <doi:10.1111/2041-210X.12590>
and Bailey and van de Pol (2016) <doi:10.1371/journal.pone.0167980> for details.",2020-05-26,Liam D. Bailey and Martijn van de Pol,https://github.com/LiamDBailey/climwin,TRUE,https://github.com/liamdbailey/climwin,25025,5,2020-05-20T16:16:34Z,5005
clipr,"Simple utility functions to read from and write to
the Windows, OS X, and X11 clipboards.",2019-07-23,Matthew Lincoln,https://github.com/mdlincoln/clipr,TRUE,https://github.com/mdlincoln/clipr,8582919,102,2019-09-14T12:47:43Z,84146.26470588235
CLME,"Estimation and inference for linear models where some or all of the
fixed-effects coefficients are subject to order restrictions. This package uses
the robust residual bootstrap methodology for inference, and can handle some
structure in the residual variance matrix.",2020-06-07,Casey M. Jelsema,NA,TRUE,https://github.com/jelsema/clme,24946,1,2020-06-07T17:55:32Z,24946
clubSandwich,"Provides several cluster-robust variance estimators (i.e.,
sandwich estimators) for ordinary and weighted least squares linear regression
models, including the bias-reduced linearization estimator introduced by Bell
and McCaffrey (2002)
<http://www.statcan.gc.ca/pub/12-001-x/2002002/article/9058-eng.pdf> and
developed further by Pustejovsky and Tipton (2017)
<DOI:10.1080/07350015.2016.1247004>. The package includes functions for estimating
the variance- covariance matrix and for testing single- and multiple-
contrast hypotheses based on Wald test statistics. Tests of single regression
coefficients use Satterthwaite or saddle-point corrections. Tests of multiple-
contrast hypotheses use an approximation to Hotelling's T-squared distribution.
Methods are provided for a variety of fitted models, including lm() and mlm
objects, glm(), ivreg() (from package 'AER'), plm() (from package 'plm'), gls()
and lme() (from 'nlme'), lmer() (from `lme4`), robu() (from 'robumeta'), and
rma.uni() and rma.mv() (from 'metafor').",2020-04-17,James Pustejovsky,https://github.com/jepusto/clubSandwich,TRUE,https://github.com/jepusto/clubsandwich,98753,30,2020-05-31T01:55:51Z,3291.766666666667
clustcurv,"A method for determining groups in multiple
curves with an automatic selection of their number based on k-means or
k-medians algorithms. The selection of the optimal number is provided by
bootstrap methods. The methodology can be applied both in regression and survival framework.
Implemented methods are:
Grouping multiple survival curves described by Villanueva et al. (2018) <doi:10.1002/sim.8016>.",2020-03-21,Nora M. Villanueva,https://github.com/noramvillanueva/clustcurv,TRUE,https://github.com/noramvillanueva/clustcurv,5586,0,2020-03-21T12:24:57Z,NA
Cluster.OBeu,"Estimate and return the needed parameters for visualisations designed for 'OpenBudgets' <http://openbudgets.eu/> data. Calculate cluster analysis measures in Budget data of municipalities across Europe, according to the 'OpenBudgets' data model. It involves a set of techniques and algorithms used to find and divide the data into groups of similar observations. Also, can be used generally to extract visualisation parameters convert them to 'JSON' format and use them as input in a different graphical interface.",2019-12-17,Kleanthis Koupidis,https://github.com/okgreece/Cluster.OBeu,TRUE,https://github.com/okgreece/cluster.obeu,10368,1,2019-12-17T12:37:55Z,10368
ClusterBootstrap,Provides functionality for the analysis of clustered data using the cluster bootstrap. ,2020-02-24,Mathijs Deen,https://github.com/mathijsdeen/ClusterBootstrap,TRUE,https://github.com/mathijsdeen/clusterbootstrap,14570,1,2020-02-24T14:05:33Z,14570
clusteredinterference,"Estimating causal effects from observational studies assuming
clustered (or partial) interference. These inverse probability-weighted
estimators target new estimands arising from population-level treatment
policies. The estimands and estimators are introduced in Barkley et al.
(2017) <arXiv:1711.04834>.",2019-03-18,Brian G. Barkley,http://github.com/BarkleyBG/clusteredinterference,TRUE,https://github.com/barkleybg/clusteredinterference,9225,3,2019-07-17T23:39:10Z,3075
clustermole,Assignment of cell type labels to single-cell RNA sequencing (scRNA-seq) clusters is often a time-consuming process that involves manual inspection of the cluster marker genes complemented with a detailed literature search. This is especially challenging when unexpected or poorly described populations are present. The clustermole R package provides methods to query thousands of human and mouse cell identity markers sourced from a variety of databases.,2020-01-27,Igor Dolgalev,https://github.com/igordot/clustermole,TRUE,https://github.com/igordot/clustermole,2709,1,2020-01-27T19:58:35Z,2709
clustermq,"Evaluate arbitrary function calls using workers on HPC schedulers
in single line of code. All processing is done on the network without
accessing the file system. Remote schedulers are supported via SSH.",2020-02-29,Michael Schubert,https://github.com/mschubert/clustermq,TRUE,https://github.com/mschubert/clustermq,37706,90,2020-05-10T10:10:10Z,418.9555555555556
ClusterR,"Gaussian mixture models, k-means, mini-batch-kmeans, k-medoids and affinity propagation clustering with the option to plot, validate, predict (new data) and estimate the optimal number of clusters. The package takes advantage of 'RcppArmadillo' to speed up the computationally intensive parts of the functions. For more information, see (i) ""Clustering in an Object-Oriented Environment"" by Anja Struyf, Mia Hubert, Peter Rousseeuw (1997), Journal of Statistical Software, <doi:10.18637/jss.v001.i04>; (ii) ""Web-scale k-means clustering"" by D. Sculley (2010), ACM Digital Library, <doi:10.1145/1772690.1772862>; (iii) ""Armadillo: a template-based C++ library for linear algebra"" by Sanderson et al (2016), The Journal of Open Source Software, <doi:10.21105/joss.00026>; (iv) ""Clustering by Passing Messages Between Data Points"" by Brendan J. Frey and Delbert Dueck, Science 16 Feb 2007: Vol. 315, Issue 5814, pp. 972-976, <doi:10.1126/science.1136800>.",2020-05-12,Lampros Mouselimis,https://github.com/mlampros/ClusterR,TRUE,https://github.com/mlampros/clusterr,122679,54,2020-05-25T08:37:54Z,2271.8333333333335
clustree,"Deciding what resolution to use can be a difficult question when
approaching a clustering analysis. One way to approach this problem is to
look at how samples move as the number of clusters increases. This package
allows you to produce clustering trees, a visualisation for interrogating
clusterings as resolution increases.",2020-01-29,Luke Zappia,https://github.com/lazappi/clustree,TRUE,https://github.com/lazappi/clustree,81037,88,2020-01-29T11:23:32Z,920.875
CLVTools,"Probabilistic latent customer attrition models (also known as ""buy-'til-you-die models"") are used to
predict future purchase behavior of customers. This package includes fast and accurate implementations of various
probabilistic latent customer attrition models for non-contractual settings (e.g., retail business) with and
without time-invariant and time-varying covariates. Currently, the package includes the Pareto/NBD model
(Pareto/Negative-Binomial-Distribution) for the purchase and the attrition processes as well as the Gamma/Gamma model
for the spending process. For reference to the Pareto/NBD model, see Schmittlein DC, Morrison DG, Colombo R (1987) <doi:10.1287/mnsc.33.1.1>.
For reference to the Gamma/Gamma model, see Fader PS, Hardie BG, Lee K (2005) <doi:10.1509/jmkr.2005.42.4.415>.",2020-05-08,Patrick Bachmann,https://github.com/bachmannpatrick/CLVTools,TRUE,https://github.com/bachmannpatrick/clvtools,424,5,2020-05-08T23:14:39Z,84.8
CMapViz,"Automatically displays graphical visualization for exported data table (permutated results) from Connectivity Map (CMap) (2006) <doi:10.1126/science.1132939>.
It allows the representation of the statistics (p-value and enrichment) according to each cell lines in the form of a bubble plot. ",2019-11-07,Raphaël Bonnet,NA,TRUE,https://github.com/peyronlab/cmapviz,3106,0,2019-11-07T10:10:04Z,NA
cmmr,"CEU (CEU San Pablo University) Mass Mediator is an on-line tool for aiding researchers in
performing metabolite annotation. 'cmmr' (CEU Mass Mediator RESTful API) allows
for programmatic access in R: batch search, batch advanced search, MS/MS (tandem mass spectrometry) search, etc.
For more information about the API Endpoint please go to <https://github.com/lzyacht/cmmr>.",2019-04-16,Yaoxiang Li,https://github.com/lzyacht/cmmr,TRUE,https://github.com/lzyacht/cmmr,4986,8,2019-10-24T19:16:31Z,623.25
cmna,"Provides the source and examples for James P. Howard, II,
""Computational Methods for Numerical Analysis with R,""
<http://howardjp.github.io/cmna/>, a book on numerical
methods in R.",2019-07-23,James Howard,https://howardjp.github.io/cmna/,TRUE,https://github.com/howardjp/cmna,19456,8,2019-07-23T16:42:58Z,2432
cmocean,"Perceptually uniform palettes for commonly used
variables in oceanography as functions taking an integer
and producing character vectors of colours.
See Thyng, K.M., Greene, C.A., Hetland, R.D., Zimmerle, H.M.
and S.F. DiMarco (2016) <doi:10.5670/oceanog.2016.66> for
the guidelines adhered to when creating the palettes.",2019-05-06,Ivan Krylov,https://matplotlib.org/cmocean/,TRUE,https://github.com/aitap/cmocean,6062,1,2020-02-11T18:02:02Z,6062
CMplot,"Manhattan plot, a type of scatter plot, was widely used to display the association results. However, it is usually time-consuming and laborious for a non-specialist user to write scripts and adjust parameters of an elaborate plot. Moreover, the ever-growing traits measured have necessitated the integration of results from different Genome-wide association study researches. Circle Manhattan Plot is the first open R package that can lay out. Genome-wide association study P-value results in both traditional rectangular patterns, QQ-plot and novel circular ones. United in only one bull's eye style plot, association results from multiple traits can be compared interactively, thereby to reveal both similarities and differences between signals. Additional functions include: highlight signals, a group of SNPs, chromosome visualization and candidate genes around SNPs.",2020-04-04,LiLin-Yin,https://github.com/YinLiLin/R-CMplot,TRUE,https://github.com/yinlilin/r-cmplot,41577,178,2020-06-08T05:46:47Z,233.57865168539325
cmvnorm,Various utilities for the complex multivariate Gaussian distribution.,2019-05-20,Robin K. S. Hankin,https://github.com/RobinHankin/cmvnorm.git,TRUE,https://github.com/robinhankin/cmvnorm,23153,2,2019-07-08T09:50:09Z,11576.5
cNORM,"Conventional methods for producing standard scores in psychometrics or biometrics
are often plagued with ""jumps"" or ""gaps"" (i.e., discontinuities) in norm tables and low
confidence for assessing extreme scores. The continuous norming method introduced by A.
Lenhard et al. (2016, <doi:10.1177/1073191116656437>; 2019, <doi:10.1371/journal.pone.0222279>) and generates continuous test norm
scores on the basis of the raw data from standardization samples, without requiring
assumptions about the distribution of the raw data: Norm scores are directly established
from raw data by modeling the latter ones as a function of both percentile scores and an
explanatory variable (e.g., age). The method minimizes bias arising from sampling and
measurement error, while handling marked deviations from normality, addressing bottom
or ceiling effects and capturing almost all of the variance in the original norm data
sample.",2019-09-19,Wolfgang Lenhard,"https://www.psychometrica.de/cNorm_en.html,
https://github.com/WLenhard/cNORM",TRUE,https://github.com/wlenhard/cnorm,11260,0,2020-05-15T07:44:30Z,NA
cnum,"Chinese numerals processing in R, such as conversion between
Chinese numerals and Arabic numerals as well as detection and extraction of
Chinese numerals in character objects and string. This package supports
the casual scale naming system and the respective SI prefix systems used
in mainland China and Taiwan:
""China Statutory Measurement Units""
State Administration for Market Regulation (2019) <http://gkml.samr.gov.cn/nsjg/jls/201902/t20190225_291134.html>
""Names, Definitions and Symbols of the Legal Units of Measurement and the Decimal Multiples and Submultiples""
Ministry of Economic Affairs (2019) <https://gazette.nat.gov.tw/egFront/detail.do?metaid=108965>.",2020-05-02,Elgar Teo,https://github.com/elgarteo/cnum/,TRUE,https://github.com/elgarteo/cnum,1905,4,2020-05-02T14:41:05Z,476.25
CNVScope,"Provides the ability to create interaction maps, discover CNV map domains (edges), gene annotate interactions, and create interactive visualizations of these CNV interaction maps.",2020-04-29,James Dalgeish,https://github.com/jamesdalg/CNVScope/,TRUE,https://github.com/jamesdalg/cnvscope,8603,4,2020-04-29T01:06:15Z,2150.75
coala,"Coalescent simulators can rapidly simulate biological sequences
evolving according to a given model of evolution.
You can use this package to specify such models, to conduct the simulations
and to calculate additional statistics from the results.
It relies on existing simulators for doing the simulation, and currently
supports the programs 'ms', 'msms' and 'scrm'. It also supports finite-sites
mutation models by combining the simulators with the program 'seq-gen'.",2020-01-19,Paul Staab,https://github.com/statgenlmu/coala,TRUE,https://github.com/statgenlmu/coala,24743,14,2020-01-19T13:10:30Z,1767.357142857143
coalitions,"An implementation of a Bayesian framework for the opinion poll
based estimation of event probabilities in multi-party electoral systems
(Bender and Bauer (2018) <doi:10.21105/joss.00606>).",2020-02-06,Andreas Bender,http://adibender.github.io/coalitions/,TRUE,https://github.com/adibender/coalitions,12723,11,2020-02-05T22:01:12Z,1156.6363636363637
cobalt,"Generate balance tables and plots for covariates of groups preprocessed through matching, weighting or subclassification, for example, using propensity scores. Includes integration with 'MatchIt', 'twang', 'Matching', 'optmatch', 'CBPS', 'ebal', 'WeightIt', 'cem', 'sbw', and 'designmatch' for assessing balance on the output of their preprocessing functions. Users can also specify data for balance assessment not generated through the above packages. Also included are methods for assessing balance in clustered or multiply imputed data sets or data sets with longitudinal treatments.",2020-06-04,Noah Greifer,https://github.com/ngreifer/cobalt,TRUE,https://github.com/ngreifer/cobalt,75258,20,2020-06-09T03:52:31Z,3762.9
coca,Contains the R functions needed to perform Cluster-Of-Clusters Analysis (COCA) and Consensus Clustering (CC). For further details please see Cabassi and Kirk (2019) <arXiv:1904.07701>.,2020-03-26,Alessandra Cabassi,http://github.com/acabassi/coca,TRUE,https://github.com/acabassi/coca,1157,2,2020-05-20T10:39:28Z,578.5
cocktailApp,"A 'shiny' app to discover cocktails. The
app allows one to search for cocktails by ingredient,
filter on rating, and number of ingredients. The
package also contains data with the ingredients of
nearly 26 thousand cocktails scraped from the web.",2019-07-02,Steven E. Pav,https://github.com/shabbychef/cocktailApp,TRUE,https://github.com/shabbychef/cocktailapp,9304,32,2019-07-02T05:42:53Z,290.75
cocorresp,"Fits predictive and symmetric co-correspondence analysis (CoCA) models to relate one data matrix to another data matrix. More specifically, CoCA maximises the weighted covariance between the weighted averaged species scores of one community and the weighted averaged species scores of another community. CoCA attempts to find patterns that are common to both communities.",2019-12-19,"Original Matlab routines by C.J.F. ter Braak and A.P. Schaffers. R port by Gavin L. Simpson.
Function simpls based on simpls.fit (package pls) by Ron Wehrens and Bjorn-Helge Mevik.",https://github.com/gavinsimpson/cocorresp,TRUE,https://github.com/gavinsimpson/cocorresp,33365,2,2020-03-25T21:36:55Z,16682.5
coda.base,"A minimum set of functions to perform compositional data analysis
using the log-ratio approach introduced by John Aitchison (1982) <http://www.jstor.org/stable/2345821>. Main functions
have been implemented in c++ for better performance.",2020-05-14,Marc Comas-Cufí,"https://mcomas.github.io/coda.base,
https://github.com/mcomas/coda.base",TRUE,https://github.com/mcomas/coda.base,21849,1,2020-06-03T08:22:50Z,21849
codebook,"Easily automate the following tasks to describe data frames:
Summarise the distributions, and labelled missings of variables graphically
and using descriptive statistics.
For surveys, compute and summarise reliabilities (internal consistencies,
retest, multilevel) for psychological scales.
Combine this information with metadata (such as item labels and labelled
values) that is derived from R attributes.
To do so, the package relies on 'rmarkdown' partials, so you can generate
HTML, PDF, and Word documents.
Codebooks are also available as tables (CSV, Excel, etc.) and in JSON-LD, so
that search engines can find your data and index the metadata.
The metadata are also available at your fingertips via RStudio Addins.",2020-06-06,Ruben Arslan,https://github.com/rubenarslan/codebook,TRUE,https://github.com/rubenarslan/codebook,20233,84,2020-06-08T14:29:30Z,240.86904761904762
CodeDepends,"Tools for analyzing R expressions
or blocks of code and determining the dependencies between them.
It focuses on R scripts, but can be used on the bodies of functions.
There are many facilities including the ability to summarize or get a high-level
view of code, determining dependencies between variables, code improvement
suggestions.",2018-07-17,Duncan Temple Lang,https://github.com/duncantl/CodeDepends,TRUE,https://github.com/duncantl/codedepends,25082,60,2020-01-06T23:44:55Z,418.03333333333336
codemetar,"The 'Codemeta' Project defines a 'JSON-LD' format for describing
software metadata, as detailed at <https://codemeta.github.io>. This package
provides utilities to generate, parse, and modify 'codemeta.json' files
automatically for R packages, as well as tools and examples for working with
'codemeta.json' 'JSON-LD' more generally.",2019-04-22,Carl Boettiger,"https://github.com/ropensci/codemetar,
https://ropensci.github.io/codemetar",TRUE,https://github.com/ropensci/codemetar,13814,43,2020-05-08T13:06:26Z,321.25581395348837
codyn,"Univariate and multivariate temporal and spatial diversity indices,
rank abundance curves, and community stability measures. The functions
implement measures that are either explicitly temporal and include the
option to calculate them over multiple replicates, or spatial and include
the option to calculate them over multiple time points. Functions fall into
five categories: static diversity indices, temporal diversity indices,
spatial diversity indices, rank abundance curves, and community stability
measures. The diversity indices are temporal and spatial analogs to
traditional diversity indices. Specifically, the package includes functions
to calculate community richness, evenness and diversity at a given point in
space and time. In addition, it contains functions to calculate species
turnover, mean rank shifts, and lags in community similarity between two
time points.",2020-05-06,Matthew B. Jones,https://github.com/NCEAS/codyn/,TRUE,https://github.com/nceas/codyn,22197,20,2020-05-06T06:29:49Z,1109.85
coga,"Evaluation for density and distribution function of convolution of gamma
distributions in R. Two related exact methods and one approximate method are
implemented with efficient algorithm and C++ code. A quick guide for choosing
correct method and usage of this package is given in package vignette. For the
detail of methods used in this package, we refer the user to
Mathai(1982)<doi:10.1007/BF02481056>,
Moschopoulos(1984)<doi:10.1007/BF02481123>,
Hu et al.(2019)<doi:10.1007/s00180-019-00924-9>.",2019-10-08,Chaoran Hu,https://github.com/ChaoranHu/coga,TRUE,https://github.com/chaoranhu/coga,16973,3,2019-10-22T03:50:31Z,5657.666666666667
cognitoR,Provides authentication for Shiny applications using 'Amazon Cognito' ( <https://aws.amazon.com/es/cognito/>).,2020-04-15,Pablo Pagnone,NA,TRUE,https://github.com/chi2labs/cognitor,1416,4,2020-04-14T16:28:32Z,354
coindeskr,Extract real-time Bitcoin price details by accessing 'CoinDesk' Bitcoin price Index API <https://www.coindesk.com/api/>. ,2018-01-05,AbdulMajedRaja RS,https://github.com/amrrs/coindeskr,TRUE,https://github.com/amrrs/coindeskr,13014,3,2020-04-18T20:54:29Z,4338
coinmarketcapr,Extract and monitor price and market cap of 'Cryptocurrencies' from 'Coin Market Cap' <https://coinmarketcap.com/api/>. ,2020-03-25,AbdulMajedRaja RS,http://github.com/amrrs/coinmarketcapr,TRUE,https://github.com/amrrs/coinmarketcapr,15661,34,2020-03-25T18:03:03Z,460.61764705882354
Coinprofile,"Builds the
coincident profile proposed by Martinez, W and Nieto, Fabio H and Poncela, P (2016)
<doi:10.1016/j.spl.2015.11.008>.
This methodology studies the relationship between a couple of
time series based on the the set of turning points of each
time series. The coincident profile establishes if two time
series are coincident, or one of them leads the second.",2019-08-25,Wilmer Martinez,https://github.com/WilmerMartinezR/Coinprofile,TRUE,https://github.com/wilmermartinezr/coinprofile,4041,0,2019-08-25T17:19:13Z,NA
collapse,"A C/C++ based package for advanced data transformation in R that is
extremely fast, flexible and parsimonious to code with and programmer
friendly. It is well integrated with 'dplyr', 'plm' and 'data.table'.
--- Key Features: ---
(1) Advanced data programming: A full set of fast statistical functions
supporting grouped and weighted computations on vectors, matrices and
data frames. Fast (ordered) and programmable grouping, factor
generation, manipulation of data frames and data object conversions.
(2) Advanced aggregation: Fast and easy multi-data-type, multi-function,
weighted, parallelized and fully customized data aggregation.
(3) Advanced transformations: Fast (grouped, weighted) replacing and
sweeping out of statistics, scaling / standardizing, centering (i.e.
between and within transformations), higher-dimensional centering
(i.e. multiple fixed effects transformations), linear
prediction and partialling-out.
(4) Advanced time-computations: Fast (sequences of) lags / leads, and
(lagged / leaded, iterated, quasi-, log-) differences and growth
rates on (unordered) time-series and panel data. Multivariate auto,
partial and cross-correlation functions for panel data.
Panel data to (ts-)array conversions.
(5) List processing: (Recursive) list search / identification, extraction /
subsetting, data-apply, and generalized row-binding / unlisting in 2D.
(6) Advanced data exploration: Fast (grouped, weighted, panel-decomposed)
summary statistics for complex multilevel / panel data.",2020-05-26,Sebastian Krantz,NA,TRUE,https://github.com/sebkrantz/collapse,2703,39,2020-06-08T23:57:19Z,69.3076923076923
collections,"Provides high performance container data types such
as queues, stacks, deques, dicts and ordered dicts. Benchmarks
<https://randy3k.github.io/collections/articles/benchmark.html> have
shown that these containers are asymptotically more efficient than
those offered by other packages.",2020-06-01,Randy Lai,https://github.com/randy3k/collections,TRUE,https://github.com/randy3k/collections,55387,59,2020-06-01T07:46:42Z,938.7627118644068
collector,"An open source process for collecting quantified data inputs from
subject matter experts. Intended for feeding into an OpenFAIR analysis
<https://www2.opengroup.org/ogsys/catalog/C13K> using
a tool such as 'evaluator' <https://evaluator.tidyrisk.org>.",2020-02-18,David Severski,https://collector.tidyrisk.org,TRUE,https://github.com/davidski/collector,3953,13,2020-02-17T20:29:00Z,304.0769230769231
collidr,Check for namespace collisions between a string input (your function or package name) and a quarter of a million packages and functions on CRAN.,2019-09-08,Steve Condylios,https://github.com/collidrpackage/collidr,TRUE,https://github.com/collidrpackage/collidr,4952,0,2019-07-16T16:11:25Z,NA
coloc,"Performs the colocalisation tests described in
Plagnol et al (2009) <doi:10.1093/biostatistics/kxn039>,
Wallace et al (2013) <doi:10.1002/gepi.21765> and
Giambartolomei et al (2013) <doi:10.1371/journal.pgen.1004383>.",2019-05-17,Chris Wallace,https://github.com/chr1swallace/coloc,TRUE,https://github.com/chr1swallace/coloc,25637,21,2020-05-27T05:57:59Z,1220.8095238095239
colocr,"Automate the co-localization analysis of fluorescence microscopy
images. Selecting regions of interest, extract pixel intensities from
the image channels and calculate different co-localization statistics. The
methods implemented in this package are based on Dunn et al. (2011)
<doi:10.1152/ajpcell.00462.2010>.",2020-05-08,Mahmoud Ahmed,"https://docs.ropensci.org/colocr,
https://github.com/ropensci/colocr",TRUE,https://github.com/ropensci/colocr,2608,17,2020-01-04T04:10:25Z,153.41176470588235
colorednoise,"Temporally autocorrelated populations are correlated in their vital rates (growth, death, etc.) from year to year. It is very common for populations, whether they be bacteria, plants, or humans, to be temporally autocorrelated. This poses a challenge for stochastic population modeling, because a temporally correlated population will behave differently from an uncorrelated one.
This package provides tools for simulating populations with white noise (no temporal autocorrelation), red noise (positive temporal autocorrelation), and blue noise (negative temporal autocorrelation). The algebraic formulation for autocorrelated noise comes from Ruokolainen et al. (2009) <doi:10.1016/j.tree.2009.04.009>. Models for unstructured populations and for structured populations (matrix models) are available.",2019-09-27,Julia Pilowsky,NA,TRUE,https://github.com/japilo/colorednoise,13869,3,2019-09-27T11:31:25Z,4623
colorist,"Color and visualize wildlife distributions in
space-time using raster data. In addition to enabling display of
sequential change in distributions through the use of small multiples,
'colorist' provides functions for extracting several features of
interest from a sequence of distributions and for visualizing those
features using HCL (hue-chroma-luminance) color palettes. Resulting
maps allow for ""fair"" visual comparison of intensity values (e.g.,
occurrence, abundance, or density) across space and time and can be
used to address questions about where, when, and how consistently a
species, group, or individual is likely to be found.",2020-03-26,Justin Schuetz,https://github.com/mstrimas/colorist,TRUE,https://github.com/mstrimas/colorist,1170,3,2020-03-31T15:52:16Z,390
colourpicker,"A colour picker that can be used as an input in Shiny apps
or Rmarkdown documents. The colour picker supports alpha opacity, custom
colour palettes, and many more options. A Plot Colour Helper tool is
available as an RStudio Addin, which helps you pick colours to use in your
plots. A more generic Colour Picker RStudio Addin is also provided to let
you select colours to use in your R code.",2017-09-27,Dean Attali,https://github.com/daattali/colourpicker,TRUE,https://github.com/daattali/colourpicker,658708,124,2020-06-09T02:27:52Z,5312.1612903225805
colourvalues,"Maps one of the viridis colour palettes, or a user-specified palette to values.
Viridis colour maps are created by Stéfan van der Walt and Nathaniel Smith,
and were set as the default palette for the 'Python' 'Matplotlib' library <https://matplotlib.org/>.
Other palettes available in this library have been derived from
'RColorBrewer' <https://CRAN.R-project.org/package=RColorBrewer> and
'colorspace' <https://CRAN.R-project.org/package=colorspace> packages.",2020-04-29,David Cooley,https://symbolixau.github.io/colourvalues/,TRUE,https://github.com/symbolixau/colourvalues,71919,33,2020-04-28T22:35:14Z,2179.3636363636365
comat,"Builds co-occurrence matrices based on spatial raster data.
It includes creation of weighted co-occurrence matrices (wecoma) and
integrated co-occurrence matrices
(incoma; Vadivel et al. (2007) <doi:10.1016/j.patrec.2007.01.004>).",2020-03-17,Jakub Nowosad,https://nowosad.github.io/comat/,TRUE,https://github.com/nowosad/comat,4651,3,2020-06-02T16:33:09Z,1550.3333333333333
cometr,"A convenient 'R' wrapper to the 'Comet' API, which is a cloud
platform allowing you to track, compare, explain and optimize machine
learning experiments and models. Experiments can be viewed on the 'Comet'
online dashboard at <https://www.comet.ml>.",2020-05-08,Doug Blank,https://github.com/comet-ml/cometr,TRUE,https://github.com/comet-ml/cometr,1059,5,2020-06-07T15:11:04Z,211.8
commonmark,"The CommonMark specification defines a rationalized version of markdown
syntax. This package uses the 'cmark' reference implementation for converting
markdown text into various formats including html, latex and groff man. In
addition it exposes the markdown parse tree in xml format. Also includes opt-in
support for GFM extensions including tables, autolinks, and strikethrough text.",2018-12-01,Jeroen Ooms,"http://github.com/jeroen/commonmark (devel)
https://github.github.com/gfm/ (spec)",TRUE,https://github.com/jeroen/commonmark,2896641,65,2019-10-14T18:14:58Z,44563.70769230769
commonsMath,Java JAR files for the Apache Commons Mathematics Library for use by users and other packages.,2020-02-10,David B. Dahl,https://github.com/dbdahl/commonsMath,TRUE,https://github.com/dbdahl/commonsmath,17447,3,2020-02-10T15:38:44Z,5815.666666666667
comorbidity,"Computing comorbidity scores such as the weighted Charlson score
(Charlson, 1987 <doi:10.1016/0021-9681(87)90171-8>) and the Elixhauser
comorbidity score (Elixhauser, 1998 <doi:10.1097/00005650-199801000-00004>)
using ICD-9-CM or ICD-10 codes (Quan, 2005 <doi:10.1097/01.mlr.0000182534.19832.83>).",2020-01-09,Alessandro Gasparini,https://ellessenne.github.io/comorbidity,TRUE,https://github.com/ellessenne/comorbidity,17787,27,2020-05-31T08:49:57Z,658.7777777777778
CompareCausalNetworks,"Unified interface for the estimation of causal networks, including
the methods 'backShift' (from package 'backShift'), 'bivariateANM' (bivariate
additive noise model), 'bivariateCAM' (bivariate causal additive model),
'CAM' (causal additive model) (from package 'CAM'; the package is
temporarily unavailable on the CRAN repository; formerly available versions
can be obtained from the archive), 'hiddenICP' (invariant
causal prediction with hidden variables), 'ICP' (invariant causal prediction)
(from package 'InvariantCausalPrediction'), 'GES' (greedy equivalence
search), 'GIES' (greedy interventional equivalence search), 'LINGAM', 'PC' (PC
Algorithm), 'FCI' (fast causal inference),
'RFCI' (really fast causal inference) (all from package 'pcalg') and
regression.",2020-02-17,Christina Heinze-Deml,https://github.com/christinaheinze/CompareCausalNetworks,TRUE,https://github.com/christinaheinze/comparecausalnetworks,24473,14,2020-02-17T16:24:33Z,1748.0714285714287
comparer,"Quickly run experiments to compare the run time and output of
code blocks. The function mbc() can make fast comparisons of code,
and will calculate statistics comparing the resulting outputs.
It can be used to compare model fits to the same data or
see which function runs faster.
The function ffexp() runs a function using all possible combinations
of selected inputs. This is useful for comparing the effect of
different parameter values. It can also run in parallel and
automatically save intermediate results, which is very useful
for long computations.",2020-03-25,Collin Erickson,https://github.com/CollinErickson/comparer,TRUE,https://github.com/collinerickson/comparer,9935,2,2020-06-06T01:50:26Z,4967.5
comperank,"Compute ranking and rating based on competition
results. Methods of different nature are implemented: with fixed
Head-to-Head structure, with variable Head-to-Head structure and with
iterative nature. All algorithms are taken from the book 'Who’s #1?:
The science of rating and ranking' by Amy N. Langville and Carl D.
Meyer (2012, ISBN:978-0-691-15422-0).",2020-03-03,Evgeni Chasnovski,https://github.com/echasnovski/comperank,TRUE,https://github.com/echasnovski/comperank,8464,10,2020-05-31T19:04:21Z,846.4
comperes,"Tools for storing and managing competition results.
Competition is understood as a set of games in which players gain some
abstract scores. There are two ways for storing results: in long (one
row per game-player) and wide (one row per game with fixed amount of
players) formats. This package provides functions for creation and
conversion between them. Also there are functions for computing their
summary and Head-to-Head values for players. They leverage grammar of
data manipulation from 'dplyr'.",2020-05-09,Evgeni Chasnovski,https://github.com/echasnovski/comperes,TRUE,https://github.com/echasnovski/comperes,10781,5,2020-06-08T12:52:57Z,2156.2
completejourney,"Retail shopping transactions for 2,469 households over one year.
Originates from the 84.51° Complete Journey 2.0 source files
<https://www.8451.com/area51> which also includes useful metadata on
products, coupons, campaigns, and promotions.",2019-09-28,Brad Boehmke,https://github.com/bradleyboehmke/completejourney,TRUE,https://github.com/bradleyboehmke/completejourney,4124,15,2020-01-16T14:19:39Z,274.93333333333334
complmrob,"Robust regression methods for compositional data.
The distribution of the estimates can be approximated with various bootstrap
methods. These bootstrap methods are available for the compositional as well
as for standard robust regression estimates. This allows for direct
comparison between them.",2019-09-17,David Kepplinger,https://github.com/dakep/complmrob,TRUE,https://github.com/dakep/complmrob,20997,0,2019-09-17T18:25:06Z,NA
COMPoissonReg,"Fit Conway-Maxwell Poisson (COM-Poisson or CMP) regression models
to count data (Sellers & Shmueli, 2010) <doi:10.1214/09-AOAS306>. The
package provides functions for model estimation, dispersion testing, and
diagnostics. Zero-inflated CMP regression (Sellers & Raim, 2016)
<doi:10.1016/j.csda.2016.01.007> is also supported.",2019-11-30,Kimberly Sellers,https://github.com/lotze/COMPoissonReg,TRUE,https://github.com/lotze/compoissonreg,30613,1,2020-04-10T19:40:10Z,30613
comprehenr,"Provides 'Python'-style list comprehensions.
List comprehension expressions use usual loops (for(), while() and repeat()) and
usual if() as list producers. In many cases it gives more concise notation than
standard ""*apply + filter"" strategy.",2019-06-17,Gregory Demin,https://github.com/gdemin/comprehenr,TRUE,https://github.com/gdemin/comprehenr,10801,9,2019-06-29T18:05:26Z,1200.111111111111
compstatr,"Provides a set of tools for creating yearly data sets of St. Louis
Metropolitan Police Department (SLMPD) crime data, which are available from
January 2008 onward as monthly CSV releases on their website
(<http:www.slmpd.org/Crimereports.shtml>). Once data are validated and created
(monthly data releases have varying numbers of columns
as well as different column names and formats), 'compstatr' also provides
functions for categorizing and mapping crimes in St. Louis. The categorization
tools that are provided will also work with any police department that uses 5
and 6 digit numeric codes to identify specific crimes. These data provide researchers
and policy makers detailed data for St. Louis, which in the last several years
has had some of the highest or the highest violent crime rates in the United States.",2020-05-14,Christopher Prener,https://github.com/slu-openGIS/compstatr,TRUE,https://github.com/slu-opengis/compstatr,5012,6,2020-05-13T14:39:47Z,835.3333333333334
comtradr,"Interface with and extract data from the United Nations Comtrade
API <https://comtrade.un.org/data/>. Comtrade provides country level shipping
data for a variety of commodities, these functions allow for easy API query
and data returned as a tidy data frame.",2018-10-05,Chris Muir,https://github.com/ropensci/comtradr,TRUE,https://github.com/ropensci/comtradr,17947,25,2020-05-16T04:43:41Z,717.88
concaveman,The concaveman function ports the 'concaveman' (<https://github.com/mapbox/concaveman>) library from 'mapbox'. It computes the concave polygon(s) for one or several set of points.,2020-05-11,Joël Gombin,"https://joelgombin.github.io/concaveman/,
http://www.github.com/joelgombin/concaveman/",TRUE,https://github.com/joelgombin/concaveman,66172,47,2020-05-10T21:56:49Z,1407.9148936170213
concordance,"A set of utilities for matching products in different
classification codes used in international trade
research. It supports concordance between the Harmonized
System (HS0, HS1, HS2, HS3, HS4, HS5, HS combined), the Standard
International Trade Classification (SITC1, SITC2, SITC3, SITC4),
the North American Industry Classification System (NAICS combined),
as well as the Broad Economic Categories (BEC), the International
Standard of Industrial Classification (ISIC), and the Standard Industrial
Classification (SIC). It also provides code nomenclature/descriptions
look-up, Rauch classification look-up (via concordance to SITC2), and
trade elasticity look-up (via concordance to HS0 or SITC3
codes).",2020-04-24,Steven Liao,NA,TRUE,https://github.com/insongkim/concordance,17491,3,2020-06-09T00:19:54Z,5830.333333333333
concorR,"Contains the CONCOR (CONvergence of iterated CORrelations)
algorithm and a series of supplemental functions for easy running,
plotting, and blockmodeling. The CONCOR algorithm is used on social network
data to identify network positions based off a definition of structural
equivalence; see Breiger, Boorman, and Arabie (1975)
<doi:10.1016/0022-2496(75)90028-0> and Wasserman and Faust's book Social
Network Analysis: Methods and Applications (1994). This version allows
multiple relationships for the same set of nodes and uses both incoming and
outgoing ties to find positions.",2020-06-03,Adrienne Traxler,https://github.com/ATraxLab/concorR,TRUE,https://github.com/atraxlab/concorr,0,0,2020-06-03T17:16:06Z,NA
concurve,"Allows one to compute compatibility (confidence)
intervals for various statistical tests along with their corresponding
P-values, S-values, and likelihoods. The intervals can be plotted to
create consonance, surprisal, and likelihood functions allowing one to
see what effect sizes are compatible with the test model at various
compatibility levels rather than being limited to one interval estimate
such as 95\%. Functions can also be compared to one another to see how much
they overlap with one another and differ. Results can also be exported for
Word, Powerpoint, and TeX documents. The package currently supports bootstrapping,
linear models, generalized linear models, linear mixed-effects models,
survival analysis, and meta-analysis. These methods are discussed by
Poole C. (1987) <doi:10.2105/AJPH.77.2.195>, Schweder T, Hjort NL. (2002)
<doi:10.1111/1467-9469.00285>, Singh K, Xie M, Strawderman WE. (2007)
<arXiv:0708.0976>, Rothman KJ, Greenland S, Lash TL. (2008,
ISBN:9781451190052), Greenland S. (2019)
<doi:10.1080/00031305.2018.1529625>, Chow ZR, Greenland S. (2019)
<arXiv:1909.08579>, and Greenland S, Chow ZR. (2019)
<arXiv:1909.08583>.",2020-04-20,Zad Rafi,"https://data.lesslikely.com/concurve/,
https://github.com/zadrafi/concurve, https://lesslikely.com/",TRUE,https://github.com/zadrafi/concurve,8676,12,2020-06-07T00:56:24Z,723
condformat,"Apply and visualize conditional formatting to data frames in R.
It renders a data frame with cells formatted according to
criteria defined by rules, using a tidy evaluation syntax. The table is
printed either opening a web browser or within the 'RStudio' viewer if
available. The conditional formatting rules allow to highlight cells
matching a condition or add a gradient background to a given column. This
package supports both 'HTML' and 'LaTeX' outputs in 'knitr' reports, and
exporting to an 'xlsx' file.",2020-05-14,Sergio Oller Moreno,http://github.com/zeehio/condformat,TRUE,https://github.com/zeehio/condformat,27766,14,2020-05-15T15:32:58Z,1983.2857142857142
CondIndTests,"Code for a variety of nonlinear conditional independence tests:
Kernel conditional independence test (Zhang et al., UAI 2011, <arXiv:1202.3775>),
Residual Prediction test (based on Shah and Buehlmann, <arXiv:1511.03334>),
Invariant environment prediction,
Invariant target prediction,
Invariant residual distribution test,
Invariant conditional quantile prediction (all from Heinze-Deml et al., <arXiv:1706.08576>).",2019-11-12,Christina Heinze-Deml,https://github.com/christinaheinze/nonlinearICP-and-CondIndTests,TRUE,https://github.com/christinaheinze/nonlinearicp-and-condindtests,13802,10,2019-11-12T14:45:32Z,1380.2
condir,Set of functions for the easy analyses of conditioning data.,2020-03-06,Angelos-Miltiadis Krypotos,https://github.com/AngelosPsy/condir,TRUE,https://github.com/angelospsy/condir,10844,1,2020-03-06T12:28:50Z,10844
conditionz,"Provides ability to control how many times in function
calls conditions are thrown (shown to the user). Includes control of
warnings and messages.",2019-04-24,Scott Chamberlain,https://github.com/ropenscilabs/conditionz,TRUE,https://github.com/ropenscilabs/conditionz,7448,1,2020-03-13T16:37:52Z,7448
condusco,"Runs a function iteratively over each row of either a dataframe
or the results of a query. Use the 'BigQuery' and 'DBI' wrappers to
iteratively pass each row of query results to a function. If a field
contains a 'JSON' string, it will be converted to an object. This is
helpful for queries that return 'JSON' strings that represent objects.
These fields can then be treated as objects by the pipeline.",2017-11-08,Roland Stevenson,https://github.com/ras44/condusco,TRUE,https://github.com/ras44/condusco,9610,10,2019-06-13T17:44:24Z,961
condvis2,"Constructs a shiny app function with interactive displays for conditional visualization of models,
data and density functions. An extended version of package 'condvis'.
Mark O'Connell, Catherine B. Hurley, Katarina Domijan (2017) <doi:10.18637/jss.v081.i05>.",2019-06-28,Catherine Hurley,https://github.com/cbhurley/condvis2,TRUE,https://github.com/cbhurley/condvis2,4438,5,2019-09-03T15:04:54Z,887.6
configural,"R functions for criterion profile analysis, Davison and Davenport (2002) <doi:10.1037/1082-989X.7.4.468> and meta-analytic criterion profile analysis, Wiernik, Wilmot, Davison, and Ones (2019). Sensitivity analyses to aid in interpreting criterion profile analysis results are also included.",2019-02-19,Brenton M. Wiernik,NA,TRUE,https://github.com/bwiernik/configural,5681,0,2020-05-31T01:00:43Z,NA
confintr,"Calculates classic and/or bootstrap confidence
intervals for many parameters such as the population mean, variance,
interquartile range (IQR), median absolute deviation (MAD), skewness,
kurtosis, Cramer's V, R-squared, quantiles (incl. median),
proportions, different types of correlation measures, difference in
means, quantiles and medians. Many of the classic confidence intervals
are described in Smithson, M. (2003, ISBN: 978-0761924999). Bootstrap
confidence intervals are calculated with the R package 'boot'. Both
one- and two-sided intervals are supported.",2020-06-04,Michael Mayer,https://github.com/mayer79/confintr,TRUE,https://github.com/mayer79/confintr,0,1,2020-06-05T13:50:11Z,0
conflicted,"R's default conflict management system gives the most recently
loaded package precedence. This can make it hard to detect conflicts,
particularly when they arise because a package update creates ambiguity
that did not previously exist. 'conflicted' takes a different approach,
making every conflict an error and forcing you to choose which function
to use.",2019-06-21,Hadley Wickham,https://github.com/r-lib/conflicted,TRUE,https://github.com/r-lib/conflicted,64175,172,2019-06-22T14:38:07Z,373.11046511627904
conflr,"Provides utilities for working with various 'Confluence' API
<https://docs.atlassian.com/ConfluenceServer/rest/latest/>, including a
functionality to convert an R Markdown document to 'Confluence' format and
upload it to 'Confluence' automatically.",2020-04-08,Hiroaki Yutani,"https://line.github.io/conflr/, https://github.com/line/conflr",TRUE,https://github.com/line/conflr,1804,79,2020-06-06T06:33:21Z,22.835443037974684
confoundr,"Implements three covariate-balance diagnostics for time-varying confounding and selection-bias in complex longitudinal data, as described in Jackson (2016) <doi:10.1097/EDE.0000000000000547> and Jackson (2019) <doi:10.1093/aje/kwz136>. Diagnostic 1 assesses measured confounding/selection-bias, diagnostic 2 assesses exposure-covariate feedback, and diagnostic 3 assesses residual confounding/selection-bias after inverse probability weighting or propensity score stratification. All diagnostics appropriately account for exposure history, can be adapted to assess a particular depth of covariate history, and can be implemented in right-censored data. Balance assessments can be obtained for all times, selected-times, or averaged across person-time. The balance measures are reported as tables or plots. These diagnostics can be applied to the study of multivariate exposures including time-varying exposures, direct effects, interaction, and censoring.",2019-09-20,John W. Jackson,NA,TRUE,https://github.com/jwjackson/confoundr,3523,8,2020-01-09T01:36:48Z,440.375
CongreveLamsdell2016,"Includes the 100 datasets simulated by Congreve and Lamsdell (2016)
<doi:10.1111/pala.12236>, and analyses of the partition and quartet distance of
reconstructed trees from the generative tree, as analysed by Smith (2019)
<doi:10.1098/rsbl.2018.0632>.",2020-01-07,Martin R. Smith,https://github.com/ms609/CongreveLamsdell2016,TRUE,https://github.com/ms609/congrevelamsdell2016,6844,0,2020-04-16T12:33:08Z,NA
conjurer,Builds synthetic data applicable across multiple domains. This package also provides flexibility to control data distribution to make it relevant to many industry examples.,2020-03-22,Sidharth Macherla,https://github.com/SidharthMacherla/conjurer,TRUE,https://github.com/sidharthmacherla/conjurer,2885,3,2020-04-20T01:33:13Z,961.6666666666666
connections,"Enables 'DBI' compliant packages to integrate with the 'RStudio' connections
pane, and the 'pins' package. It automates the display of schemata, tables, views, as well
as the preview of the table's top 1000 records. ",2020-02-07,Javier Luraschi,https://github.com/edgararuiz/connections,TRUE,https://github.com/edgararuiz/connections,2982,34,2020-02-07T14:43:20Z,87.70588235294117
ConnMatTools,"Collects several different methods for analyzing and
working with connectivity data in R. Though primarily oriented towards
marine larval dispersal, many of the methods are general and useful for
terrestrial systems as well.",2020-02-03,David M. Kaplan,https://github.com/dmkaplan2000/ConnMatTools.git,TRUE,https://github.com/dmkaplan2000/connmattools,19004,0,2020-02-03T09:21:10Z,NA
conquer,Fast and accurate convolution-type smoothed quantile regression. Implemented using Barzilai-Borwein gradient descent with a Huber regression warm start. Construct confidence intervals for regression coefficients using multiplier bootstrap.,2020-05-06,Xiaoou Pan,https://github.com/XiaoouPan/conquer,TRUE,https://github.com/xiaooupan/conquer,951,2,2020-05-06T05:04:38Z,475.5
ConR,"Multi-species estimation of geographical range parameters
for preliminary assessment of conservation status following Criterion B of the
International Union for Conservation of Nature (IUCN,
see <http://www.iucnredlist.org>).",2020-05-18,Gilles Dauby,https://gdauby.github.io/ConR/,TRUE,https://github.com/gdauby/conr,16938,3,2020-05-18T15:13:23Z,5646
ConsReg,"Fits or generalized linear models either a regression with Autoregressive moving-average (ARMA) errors for time series data.
The package makes it easy to incorporate constraints into the model's coefficients.
The model is specified by an objective function (Gaussian, Binomial or Poisson) or an ARMA order (p,q),
a vector of bound constraints
for the coefficients (i.e beta1 > 0) and the possibility to incorporate restrictions
among coefficients (i.e beta1 > beta2).
The references of this packages are the same as 'stats' package for glm() and arima() functions.
See Brockwell, P. J. and Davis, R. A. (1996, ISBN-10: 9783319298528).
For the different optimizers implemented, it is recommended to consult the documentation of the corresponding packages. ",2020-04-05,Josep Puig Sallés,https://github.com/puigjos/ConsReg,TRUE,https://github.com/puigjos/consreg,1050,0,2020-04-03T13:29:22Z,NA
contact,"Process spatially- and temporally-discrete data into contact and
social networks, and facilitate network analysis by randomizing
individuals' movement paths and/or related categorical variables. To use
this package, users need only have a dataset containing spatial data
(i.e., latitude/longitude, or planar x & y coordinates), individual IDs
relating spatial data to specific individuals, and date/time information
relating spatial locations to temporal locations. The functionality of this
package ranges from data ""cleaning"" via multiple filtration functions, to
spatial and temporal data interpolation, and network creation and analysis.
Functions within this package are not limited to describing interpersonal
contacts. Package functions can also identify and quantify ""contacts""
between individuals and fixed areas (e.g., home ranges, water bodies,
buildings, etc.). As such, this package is an incredibly useful resource
for facilitating epidemiological, ecological, ethological and sociological
research.",2020-06-02,Trevor Farthing,NA,TRUE,https://github.com/lanzaslab/contact,2727,1,2020-06-02T10:17:01Z,2727
contextual,"Facilitates the simulation and evaluation of context-free
and contextual multi-Armed Bandit policies or algorithms to ease the
implementation, evaluation, and dissemination of both existing and
new bandit algorithms and policies.",2020-03-04,Robin van Emden,https://github.com/Nth-iteration-labs/contextual,TRUE,https://github.com/nth-iteration-labs/contextual,10321,44,2020-05-20T14:20:40Z,234.5681818181818
contfrac,Various utilities for evaluating continued fractions.,2018-05-17,Robin K. S. Hankin,https://github.com/RobinHankin/contfrac.git,TRUE,https://github.com/robinhankin/contfrac,478915,0,2020-05-01T21:32:19Z,NA
ContourFunctions,"Provides functions for making contour plots.
The contour plot can be created from grid data, a function,
or a data set. If non-grid data is given, then a Gaussian
process is fit to the data and used to create the contour plot.",2019-05-20,Collin Erickson,https://github.com/CollinErickson/contour,TRUE,https://github.com/collinerickson/contour,13646,6,2019-08-11T14:28:13Z,2274.3333333333335
contrast,"One degree of freedom contrasts for 'lm', 'glm', 'gls', and 'geese' objects.",2020-03-19,Alan OCallaghan,https://github.com/topepo/contrast,TRUE,https://github.com/topepo/contrast,54815,1,2020-03-03T00:12:26Z,54815
contribution,"Contribution table for credit assignment based on 'ggplot2'.
This can improve the author contribution information in academic journals and personal CV. ",2019-07-18,Shixiang Wang,https://github.com/ShixiangWang/contribution,TRUE,https://github.com/shixiangwang/contribution,5094,3,2019-10-22T02:25:37Z,1698
control,"Solves control systems problems relating to time/frequency response, LTI systems design and analysis, transfer function manipulations, and system conversion.",2017-12-12,Ben C. Ubah,NA,TRUE,https://github.com/benubah/control,10336,12,2020-04-30T20:32:06Z,861.3333333333334
ConvergenceClubs,"Functions for clustering regions that form convergence clubs, according to the definition of Phillips and Sul (2009) <doi:10.1002/jae.1080>. A package description is available in Sichera and Pizzuto (2019).",2019-11-21,Roberto Sichera,https://CRAN.R-project.org/package=ConvergenceClubs,TRUE,https://github.com/rhobis/convergenceclubs,12367,1,2020-02-02T17:19:17Z,12367
convey,"Variance estimation on indicators of income concentration and
poverty using complex sample survey designs. Wrapper around the
'survey' package.",2020-05-22,Djalma Pessoa,https://guilhermejacob.github.io/context/,TRUE,https://github.com/djalmapessoa/convey,23962,9,2020-05-23T15:11:43Z,2662.4444444444443
CoordinateCleaner,"Automated flagging of common spatial and temporal errors in biological and paleontological collection data, for the use in conservation, ecology and paleontology. Includes automated tests to easily flag (and exclude) records assigned to country or province centroid, the open ocean, the headquarters of the Global Biodiversity Information Facility, urban areas or the location of biodiversity institutions (museums, zoos, botanical gardens, universities). Furthermore identifies per species outlier coordinates, zero coordinates, identical latitude/longitude and invalid coordinates. Also implements an algorithm to identify data sets with a significant proportion of rounded coordinates. Especially suited for large data sets. The reference for the methodology is: Zizka et al. (2019) doi:10.1111/2041-210X.13152.",2020-05-11,Alexander Zizka,https://ropensci.github.io/CoordinateCleaner/,TRUE,https://github.com/ropensci/coordinatecleaner,22998,44,2020-05-11T16:51:36Z,522.6818181818181
copent,"The nonparametric method for estimating copula entropy is implemented. The method composes of two simple steps: estimating empirical copula by rank statistic and estimating copula entropy with k-Nearest-Neighbour method. Copula Entropy is a mathematical concept for multivariate statistical independence measuring and testing, and proved to be equivalent to mutual information. Estimating copula entropy can be applied to many cases, including but not limited to variable selection and causal discovery (by estimating transfer entropy). Please refer to Ma and Sun (2011) <doi: 10.1016/S1007-0214(11)70008-6> for more information.",2020-04-16,MA Jian,https://github.com/majianthu/copent,TRUE,https://github.com/majianthu/copent,928,0,2020-05-24T00:51:48Z,NA
coppeCosenzaR,"The program implements the COPPE-Cosenza Fuzzy Hierarchy Model.
The model was based on the evaluation of local alternatives, representing
regional potentialities, so as to fulfill demands of economic projects.
After defining demand profiles in terms of their technological coefficients,
the degree of importance of factors is defined so as to represent
the productive activity. The method can detect a surplus of supply without
the restriction of the distance of classical algebra, defining a hierarchy
of location alternatives. In COPPE-Cosenza Model, the distance between
factors is measured in terms of the difference between grades of memberships
of the same factors belonging to two or more sets under comparison. The
required factors are classified under the following linguistic variables:
Critical (CR); Conditioning (C); Little Conditioning (LC); and Irrelevant
(I). And the alternatives can assume the following linguistic variables:
Excellent (Ex), Good (G), Regular (R), Weak (W), Empty (Em), Zero (Z) and
Inexistent (In). The model also provides flexibility, allowing different
aggregation rules to be performed and defined by the Decision Maker. Such
feature is considered in this package, allowing the user to define other
aggregation matrices, since it considers the same linguistic variables
mentioned. ",2017-10-28,Pier Taranti,https://github.com/ptaranti/coppeCosenzaR,TRUE,https://github.com/ptaranti/coppecosenzar,11992,0,2020-03-17T11:04:46Z,NA
coRanking,"Calculates the co-ranking matrix to assess the
quality of a dimensionality reduction.",2020-02-12,Guido Kraemer,https://github.com/gdkrmr/coRanking,TRUE,https://github.com/gdkrmr/coranking,35114,5,2020-02-12T13:14:27Z,7022.8
Corbi,"Provides a bundle of basic and fundamental bioinformatics tools,
such as network querying and alignment, subnetwork extraction and search,
network biomarker identification.",2019-11-22,Ling-Yun Wu,https://github.com/wulingyun/Corbi,TRUE,https://github.com/wulingyun/corbi,18969,4,2019-11-22T03:20:44Z,4742.25
coreCT,"Computed tomography (CT) imaging is a powerful tool for understanding the composition of sediment cores. This package streamlines and accelerates the analysis of CT data generated in the context of environmental science. Included are tools for processing raw DICOM images to characterize sediment composition (sand, peat, etc.). Root analyses are also enabled, including measures of external surface area and volumes for user-defined root size classes. For a detailed description of the application of computed tomography imaging for sediment characterization, see: Davey, E., C. Wigand, R. Johnson, K. Sundberg, J. Morris, and C. Roman. (2011) <DOI: 10.1890/10-2037.1>.",2019-11-28,Troy D. Hill,https://github.com/troyhill/coreCT,TRUE,https://github.com/troyhill/corect,12549,1,2019-11-27T22:10:22Z,12549
cornet,Implements lasso and ridge regression for dichotomised outcomes (Rauschenberger et al. 2019). Such outcomes are not naturally but artificially binary. They indicate whether an underlying measurement is greater than a threshold.,2020-03-18,Armin Rauschenberger,https://github.com/rauschenberger/cornet,TRUE,https://github.com/rauschenberger/cornet,6429,1,2020-03-18T07:51:37Z,6429
coroICA,"Contains an implementation of a confounding robust independent component analysis (ICA) for noisy and grouped data. The main function coroICA() performs a blind source separation, by maximizing an independence across sources and allows to adjust for varying confounding based on user-specified groups. Additionally, the package contains the function uwedge() which can be used to approximately jointly diagonalize a list of matrices. For more details see the project website <https://sweichwald.de/coroICA/>.",2020-05-15,Niklas Pfister and Sebastian Weichwald,https://github.com/sweichwald/coroICA-R,TRUE,https://github.com/sweichwald/coroica-r,6287,1,2020-05-15T08:05:37Z,6287
coronavirus,Provides a daily summary of the Coronavirus (COVID-19) cases by state/province. Data source: Johns Hopkins University Center for Systems Science and Engineering (JHU CCSE) Coronavirus <https://systems.jhu.edu/research/public-health/ncov/>.,2020-05-13,Rami Krispin,https://github.com/RamiKrispin/coronavirus,TRUE,https://github.com/ramikrispin/coronavirus,17006,314,2020-06-09T08:08:23Z,54.15923566878981
corporaexplorer,"Facilitates dynamic exploration of text collections through an
intuitive graphical user interface and the power of regular expressions.
The package contains 1) a helper function to convert a data frame to a
'corporaexplorerobject', 2) a 'Shiny' app for fast and flexible exploration
of a 'corporaexplorerobject', and 3) a 'Shiny' app for simple
retrieval/extraction of documents from a 'corporaexplorerobject' in a
reading-friendly format. The package also includes demo apps with which
one can explore Jane Austen's novels and the State of the Union Addresses
(data from the 'janeaustenr' and 'sotu' packages respectively).",2020-03-07,Kristian Lundby Gjerde,"https://kgjerde.github.io/corporaexplorer,
https://github.com/kgjerde/corporaexplorer",TRUE,https://github.com/kgjerde/corporaexplorer,4959,39,2020-06-04T09:00:53Z,127.15384615384616
CoRpower,"Calculates power for assessment of intermediate biomarker responses as correlates of risk in the active treatment group in clinical efficacy trials, as described in Gilbert, Janes, and Huang, Power/Sample Size Calculations for Assessing Correlates of Risk in Clinical Efficacy Trials (2016, Statistics in Medicine). The methods differ from past approaches by accounting for the level of clinical treatment efficacy overall and in biomarker response subgroups, which enables the correlates of risk results to be interpreted in terms of potential correlates of efficacy/protection. The methods also account for inter-individual variability of the observed biomarker response that is not biologically relevant (e.g., due to technical measurement error of the laboratory assay used to measure the biomarker response), which is important because power to detect a specified correlate of risk effect size is heavily affected by the biomarker's measurement error. The methods can be used for a general binary clinical endpoint model with a univariate dichotomous, trichotomous, or continuous biomarker response measured in active treatment recipients at a fixed timepoint after randomization, with either case-cohort Bernoulli sampling or case-control without-replacement sampling of the biomarker (a baseline biomarker is handled as a trivial special case). In a specified two-group trial design, the computeN() function can initially be used for calculating additional requisite design parameters pertaining to the target population of active treatment recipients observed to be at risk at the biomarker sampling timepoint. Subsequently, the power calculation employs an inverse probability weighted logistic regression model fitted by the tps() function in the 'osDesign' package. Power results as well as the relationship between the correlate of risk effect size and treatment efficacy can be visualized using various plotting functions. To link power calculations for detecting a correlate of risk and a correlate of treatment efficacy, a baseline immunogenicity predictor (BIP) can be simulated according to a specified classification rule (for dichotomous or trichotomous BIPs) or correlation with the biomarker response (for continuous BIPs), then outputted along with biomarker response data under assignment to treatment, and clinical endpoint data for both treatment and placebo groups.",2019-09-27,Michal Juraska,https://github.com/mjuraska/CoRpower,TRUE,https://github.com/mjuraska/corpower,7661,0,2019-09-27T21:39:07Z,NA
corpus,"Text corpus data analysis, with full support for international text (Unicode). Functions for reading data from newline-delimited 'JSON' files, for normalizing and tokenizing text, for searching for term occurrences, and for computing term occurrence frequencies, including n-grams.",2020-04-16,Leslie Huang,"https://leslie-huang.github.io/r-corpus/,
https://github.com/leslie-huang/r-corpus",TRUE,https://github.com/leslie-huang/r-corpus,64797,0,2020-04-15T18:53:36Z,NA
corpustools,"Provides text analysis in R, focusing on the use of a tokenized text format. In this format, the positions of tokens are maintained, and each token can be annotated (e.g., part-of-speech tags, dependency relations).
Prominent features include advanced Lucene-like querying for specific tokens or contexts (e.g., documents, sentences),
similarity statistics for words and documents, exporting to DTM for compatibility with many text analysis packages,
and the possibility to reconstruct original text from tokens to facilitate interpretation.",2020-01-23,Kasper Welbers and Wouter van Atteveldt,http://github.com/kasperwelbers/corpustools,TRUE,https://github.com/kasperwelbers/corpustools,17130,19,2020-03-24T13:58:22Z,901.578947368421
corrcoverage,"Using a computationally efficient method, the package can
be used to find the corrected coverage estimate of a credible set
of putative causal variants from Bayesian genetic fine-mapping.
The package can also be used to obtain a corrected credible set
if required; that is, the smallest set of variants required such
that the corrected coverage estimate of the resultant credible set is
within some user defined accuracy of the desired coverage.
Maller et al. (2012) <doi:10.1038/ng.2435>,
Wakefield (2009) <doi:10.1002/gepi.20359>,
Fortune and Wallace (2018) <doi:10.1093/bioinformatics/bty898>.",2019-12-06,Anna Hutchinson,https://annahutch.github.io/corrcoverage,TRUE,https://github.com/annahutch/corrcoverage,3137,2,2020-06-08T11:56:33Z,1568.5
correlation,"Lightweight package for computing different kinds of correlations, such as partial correlations, Bayesian correlations, multilevel correlations, polychoric correlations, biweight correlations, distance correlations and more. Relies on the easystats ecosystem (Lüdecke, Waggoner & Makowski (2019) <doi:10.21105/joss.01412>).",2020-05-05,Dominique Makowski,https://easystats.github.io/correlation/,TRUE,https://github.com/easystats/correlation,10606,88,2020-05-25T08:39:22Z,120.52272727272727
correlationfunnel,"
Speeds up exploratory data analysis (EDA)
by providing a succinct workflow and interactive visualization tools for understanding
which features have relationships to target (response). Uses binary correlation analysis
to determine relationship. Default correlation method is the Pearson method.
Lian Duan, W Nick Street, Yanchi Liu, Songhua Xu, and Brook Wu (2014) <doi:10.1145/2637484>.",2020-06-09,Matt Dancho,https://github.com/business-science/correlationfunnel,TRUE,https://github.com/business-science/correlationfunnel,7070,59,2020-06-09T00:32:54Z,119.83050847457628
corrgram,"Calculates correlation of variables and displays the results
graphically. Included panel functions can display points, shading, ellipses, and
correlation values with confidence intervals. See Friendly (2002) <doi:10.1198/000313002533>.",2018-07-09,Kevin Wright,https://github.com/kwstat/corrgram,TRUE,https://github.com/kwstat/corrgram,438270,11,2020-01-20T15:29:15Z,39842.72727272727
corrgrapher,"When exploring data or models we often examine variables one by one.
This analysis is incomplete if the relationship between these variables is
not taken into account. The 'corrgrapher' package facilitates simultaneous
exploration of the Partial Dependence Profiles and the correlation between
variables in the model.
The package 'corrgrapher' is a part of the 'DrWhy.AI' universe.",2020-06-04,Pawel Morgen,"https://modeloriented.github.io/corrgrapher/,
https://github.com/ModelOriented/corrgrapher",TRUE,https://github.com/modeloriented/corrgrapher,0,9,2020-05-05T07:32:39Z,0
corrplot,"A graphical display of a correlation matrix or general matrix.
It also contains some algorithms to do matrix reordering. In addition,
corrplot is good at details, including choosing color, text labels,
color labels, layout, etc.",2017-10-16,Taiyun Wei,https://github.com/taiyun/corrplot,TRUE,https://github.com/taiyun/corrplot,2792860,196,2020-06-06T17:47:39Z,14249.285714285714
corrr,"A tool for exploring correlations.
It makes it possible to easily perform routine tasks when
exploring correlation matrices such as ignoring the diagonal,
focusing on the correlations of certain variables against others,
or rearranging and visualizing the matrix in terms of the
strength of the correlations.",2020-03-22,Max Kuhn,https://github.com/tidymodels/corrr,TRUE,https://github.com/tidymodels/corrr,100628,375,2020-05-13T14:21:37Z,268.34133333333335
cort,"Provides S4 classes and methods to fit several copula models: The classic empirical checkerboard copula and the empirical checkerboard copula with known margins, see Cuberos, Masiello and Maume-Deschamps (2019) <doi:10.1080/03610926.2019.1586936> are proposed. These two models allow to fit copulas in high dimension with a small number of observations, and they are always proper copulas. Some flexibility is added via a possibility to differentiate the checkerboard parameter by dimension. The last model consist of the implementation of the Copula Recursive Tree algorithm proposed by Laverny, Maume-Deschamps, Masiello and Rullière (2020) <arXiv:2005.02912>, including the localised dimension reduction, which fits a copula by recursive splitting of the copula domain. We also provide an efficient way of mixing copulas, allowing to bag the algorithm into a forest, and a generic way of measuring d-dimensional boxes with a copula.",2020-05-13,Oskar Laverny,https://github.com/lrnv/cort,TRUE,https://github.com/lrnv/cort,1332,0,2020-05-14T07:28:48Z,NA
cosinor,"cosinor is a set of simple functions that transforms longitudinal
data to estimate the cosinor linear model as described in Tong (1976).
Methods are given to summarize the mean, amplitude and acrophase, to
predict the mean annual outcome value, and to test the coefficients.",2014-07-28,Michael Sachs,http://github.com/sachsmc/cosinor,TRUE,https://github.com/sachsmc/cosinor,20030,3,2020-05-09T10:50:14Z,6676.666666666667
costsensitive,"Reduction-based techniques for cost-sensitive multi-class classification, in which each observation has a different cost for classifying it into one class, and the goal is to predict the class with the minimum expected cost for each new observation.
Implements Weighted All-Pairs (Beygelzimer, A., Langford, J., & Zadrozny, B., 2008, <doi:10.1007/978-0-387-79361-0_1>), Weighted One-Vs-Rest (Beygelzimer, A., Dani, V., Hayes, T., Langford, J., & Zadrozny, B., 2005, <https://dl.acm.org/citation.cfm?id=1102358>) and Regression One-Vs-Rest.
Works with arbitrary classifiers taking observation weights, or with regressors. Also implements cost-proportionate rejection sampling for working with classifiers
that don't accept observation weights.",2019-07-28,David Cortes,https://github.com/david-cortes/costsensitive,TRUE,https://github.com/david-cortes/costsensitive,8199,24,2020-04-04T18:19:31Z,341.625
countfitteR,"A large number of measurements generate count data. This is a statistical data type that only assumes non-negative integer values and is generated by counting. Typically, counting data can be found in biomedical applications, such as the analysis of DNA double-strand breaks. The number of DNA double-strand breaks can be counted in individual cells using various bioanalytical methods. For diagnostic applications, it is relevant to record the distribution of the number data in order to determine their biomedical significance (Roediger, S. et al., 2018. Journal of Laboratory and Precision Medicine. <doi:10.21037/jlpm.2018.04.10>). The software offers functions for a comprehensive automated evaluation of distribution models of count data. In addition to programmatic interaction, a graphical user interface (web server) is included, which enables fast and interactive data-scientific analyses. The user is supported in selecting the most suitable counting distribution for his own data set.",2019-02-03,Jaroslaw Chilimoniuk,https://github.com/jarochi/countfitteR,TRUE,https://github.com/jarochi/countfitter,5836,2,2020-05-08T09:19:42Z,2918
countrycode,"Standardize country names, convert them into one of
40 different coding schemes, convert between coding schemes, and
assign region descriptors.",2020-05-22,Vincent Arel-Bundock,https://github.com/vincentarelbundock/countrycode,TRUE,https://github.com/vincentarelbundock/countrycode,230891,202,2020-05-26T01:52:03Z,1143.0247524752476
countToFPKM,"Implements the algorithm described in Trapnell,C. et al. (2010) <doi: 10.1038/nbt.1621>. This function takes read counts matrix of RNA-Seq data, feature lengths which can be retrieved using 'biomaRt' package, and the mean fragment lengths which can be calculated using the 'CollectInsertSizeMetrics(Picard)' tool. It then returns a matrix of FPKM normalised data by library size and feature effective length. It also provides the user with a quick and reliable function to generate FPKM heatmap plot of the highly variable features in RNA-Seq dataset.",2019-04-07,Ahmed Alhendi,https://github.com/AAlhendi1707/countToFPKM,TRUE,https://github.com/aalhendi1707/counttofpkm,13731,14,2019-08-07T17:09:31Z,980.7857142857143
covafillr,"Facilitates local polynomial regression for state dependent covariates in state-space models. The functionality can also be used from 'C++' based model builder tools such as 'Rcpp'/'inline', 'TMB', or 'JAGS'.",2019-11-22,Christoffer Moesgaard Albertsen,https://github.com/calbertsen/covafillr,TRUE,https://github.com/calbertsen/covafillr,17810,1,2019-11-22T10:44:46Z,17810
coveffectsplot,"Produce forest plots to visualize covariate effects using either
the command line or an interactive 'Shiny' application.",2020-04-03,Samer Mouksassi,https://github.com/smouksassi/interactiveforestplot,TRUE,https://github.com/smouksassi/interactiveforestplot,9011,9,2020-06-03T09:33:45Z,1001.2222222222222
COVID19,"Unified datasets for a better understanding of COVID-19.
The package collects COVID-19 data across governmental sources,
includes policy measures from 'Oxford COVID-19 Government Response Tracker' <https://www.bsg.ox.ac.uk/covidtracker>,
and extends the dataset via an interface to 'World Bank Open Data' <https://data.worldbank.org/>, 'Google Mobility Reports' <https://www.google.com/covid19/mobility/>, 'Apple Mobility Reports' <https://www.apple.com/covid19/mobility>.",2020-05-18,Emanuele Guidotti,https://covid19datahub.io,TRUE,https://github.com/covid19datahub/r,5220,2,2020-06-05T21:14:05Z,2610
covid19.analytics,"Load and analyze updated time series worldwide data of reported cases for the Novel CoronaVirus Disease (CoViD-19) from the Johns Hopkins University Center for Systems Science and Engineering (JHU CSSE) data repository <https://github.com/CSSEGISandData/COVID-19>. The datasets are available in two main modalities, as a time series sequences and aggregated for the last day with greater spatial resolution. Several analysis, visualization and modelling functions are available in the package that will allow the user to compute and visualize total number of cases, total number of changes and growth rate globally or for an specific geographical location, while at the same time generating models using these trends; generate interactive visualizations and generate Susceptible-Infected-Recovered (SIR) model for the disease spread.",2020-05-03,Marcelo Ponce,https://mponce0.github.io/covid19.analytics/,TRUE,https://github.com/mponce0/covid19.analytics,4573,13,2020-06-09T17:02:57Z,351.7692307692308
covid19italy,"Provides a daily summary of the Coronavirus (COVID-19) cases in Italy by country, region and province level. Data source: Presidenza del Consiglio dei Ministri - Dipartimento della Protezione Civile <http://www.protezionecivile.it/>.",2020-04-26,Rami Krispin,https://github.com/Covid19R/covid19italy,TRUE,https://github.com/covid19r/covid19italy,2312,3,2020-05-16T19:17:38Z,770.6666666666666
covid19nytimes,"Accesses the NY Times Covid-19 county-level data
for the US, described in
<https://www.nytimes.com/article/coronavirus-county-data-us.html> and
available at <https://github.com/nytimes/covid-19-data>. It then returns
the data in a tidy data format according to the Covid19R Project data
specification. If you plan to use the data or publicly display the data
or results, please make sure cite the original NY Times source. Please read and
follow the terms laid out in the data license at
<https://github.com/nytimes/covid-19-data/blob/master/LICENSE>.",2020-05-08,Jarrett Byrnes,https://github.com/Covid19R/covid19nytimes,TRUE,https://github.com/covid19r/covid19nytimes,513,14,2020-05-14T19:10:53Z,36.642857142857146
covr,"Track and report code coverage for your package and (optionally)
upload the results to a coverage service like 'Codecov' <http://codecov.io> or
'Coveralls' <http://coveralls.io>. Code coverage is a measure of the amount of
code being exercised by a set of tests. It is an indirect measure of test
quality and completeness. This package is compatible with any testing
methodology or framework and tracks coverage of both R code and compiled
C/C++/FORTRAN code.",2020-03-06,Jim Hester,"https://covr.r-lib.org, https://github.com/r-lib/covr",TRUE,https://github.com/r-lib/covr,5015537,267,2020-06-02T14:48:08Z,18784.78277153558
covTestR,"Testing functions for Covariance Matrices. These tests include high-dimension homogeneity of covariance
matrix testing described by Schott (2007) <doi:10.1016/j.csda.2007.03.004> and high-dimensional one-sample tests of
covariance matrix structure described by Fisher, et al. (2010) <doi:10.1016/j.jmva.2010.07.004>. Covariance matrix
tests use C++ to speed performance and allow larger data sets.",2018-08-17,Ben Barnard,https://covtestr.bearstatistics.com,TRUE,https://github.com/benbarnard/covtestr,16086,0,2019-11-30T17:37:18Z,NA
cowplot,"
Provides various features that help with creating publication-quality figures
with 'ggplot2', such as a set of themes, functions to align plots and arrange
them into complex compound figures, and functions that make it easy to annotate
plots and or mix plots with images. The package was originally written for
internal use in the Wilke lab, hence the name (Claus O. Wilke's plot package).
It has also been used extensively in the book Fundamentals of Data
Visualization.",2019-07-11,Claus O. Wilke,https://wilkelab.org/cowplot,TRUE,https://github.com/wilkelab/cowplot,3212371,480,2019-12-03T01:06:46Z,6692.439583333334
cowsay,"Allows printing of character strings as messages/warnings/etc.
with ASCII animals, including cats, cows, frogs, chickens, ghosts,
and more.",2020-02-06,Scott Chamberlain,https://github.com/sckott/cowsay,TRUE,https://github.com/sckott/cowsay,45988,205,2020-02-07T19:07:46Z,224.33170731707318
coxed,"Functions for generating, simulating, and visualizing expected durations and marginal changes in duration from the Cox proportional hazards model as described in Kropko and Harden (2017) <doi:10.1017/S000712341700045X> and Harden and Kropko (2018) <doi:10.1017/psrm.2018.19>.",2020-01-10,Kropko,https://github.com/jkropko/coxed,TRUE,https://github.com/jkropko/coxed,12501,7,2020-01-10T15:01:46Z,1785.857142857143
coxrt,Fits Cox regression based on retrospectively ascertained times-to-event. The method uses Inverse-Probability-Weighting estimating equations. ,2020-01-07,Bella Vakulenko-Lagun,https://github.com/Bella2001/coxrt,TRUE,https://github.com/bella2001/coxrt,9198,0,2019-07-31T19:52:32Z,NA
cppRouting,"Calculation of distances, shortest paths and isochrones on weighted graphs using several variants of Dijkstra algorithm.
Proposed algorithms are unidirectional Dijkstra (Dijkstra, E. W. (1959) <doi:10.1007/BF01386390>),
bidirectional Dijkstra (Goldberg, Andrew & Fonseca F. Werneck, Renato (2005) <https://pdfs.semanticscholar.org/0761/18dfbe1d5a220f6ac59b4de4ad07b50283ac.pdf>),
A* search (P. E. Hart, N. J. Nilsson et B. Raphael (1968) <doi:10.1109/TSSC.1968.300136>),
new bidirectional A* (Pijls & Post (2009) <http://repub.eur.nl/pub/16100/ei2009-10.pdf>),
Contraction hierarchies (R. Geisberger, P. Sanders, D. Schultes and D. Delling (2008) <doi:10.1007/978-3-540-68552-4_24>),
PHAST (D. Delling, A.Goldberg, A. Nowatzyk, R. Werneck (2011) <doi:10.1016/j.jpdc.2012.02.007>).",2020-01-07,Vincent Larmet,https://github.com/vlarmet/cppRouting,TRUE,https://github.com/vlarmet/cpprouting,5943,46,2020-01-03T10:58:44Z,129.19565217391303
cpr,"Implementation of the Control Polygon Reduction and Control Net
Reduction methods for finding parsimonious B-spline regression models.",2017-03-07,Peter DeWitt,https://github.com/dewittpe/cpr/,TRUE,https://github.com/dewittpe/cpr,11388,2,2019-09-06T15:47:15Z,5694
cptcity,Incorporates colour gradients from the 'cpt-city' web archive available at <http://soliton.vm.bytemark.co.uk/pub/cpt-city/>. ,2019-03-07,Sergio Ibarra-Espinosa,https://github.com/ibarraespinosa/cptcity,TRUE,https://github.com/ibarraespinosa/cptcity,11559,7,2019-11-07T04:56:40Z,1651.2857142857142
cqcr,"Access data from the 'Care Quality Commission', the health
and adult social care regulator for England. The 'Care Quality Commission'
operates an API
<https://www.cqc.org.uk/about-us/transparency/using-cqc-data#api>, with data
available under the Open Government License. Data includes information on
service providers, locations such as hospitals, care homes and
medical clinics, and ratings and inspection reports.",2019-10-07,Evan Odell,"https://github.com/evanodell/cqcr, https://docs.evanodell.com/cqcr",TRUE,https://github.com/evanodell/cqcr,3466,0,2020-04-22T12:51:11Z,NA
cranlike,"A set of functions to manage 'CRAN'-like repositories
efficiently.",2018-11-26,Gábor Csárdi,https://github.com/r-hub/cranlike,TRUE,https://github.com/r-hub/cranlike,14106,22,2020-03-07T10:03:26Z,641.1818181818181
cranlogs,"'API' to the database of 'CRAN' package downloads from the
'RStudio' 'CRAN mirror'. The database itself is at <http://cranlogs.r-pkg.org>,
see <https://github.com/r-hub/cranlogs.app> for the raw 'API'.",2019-04-29,Gábor Csárdi,"https://github.com/r-hub/cranlogs,
https://r-hub.github.io/cranlogs",TRUE,https://github.com/r-hub/cranlogs,41093,61,2019-12-03T09:04:10Z,673.655737704918
cranly,"Core visualizations and summaries for the CRAN package database. The package provides comprehensive methods for cleaning up and organizing the information in the CRAN package database, for building package directives networks (depends, imports, suggests, enhances, linking to) and collaboration networks, producing package dependence trees, and for computing useful summaries and producing interactive visualizations from the resulting networks and summaries. The resulting networks can be coerced to 'igraph' <https://CRAN.R-project.org/package=igraph> objects for further analyses and modelling.",2019-10-08,Ioannis Kosmidis,https://github.com/ikosmidis/cranly,TRUE,https://github.com/ikosmidis/cranly,11123,40,2019-10-08T16:41:24Z,278.075
CREAM,"Provides a new method for identification of clusters of genomic
regions within chromosomes. Primarily, it is used for calling clusters of
cis-regulatory elements (COREs). 'CREAM' uses genome-wide maps of genomic regions
in the tissue or cell type of interest, such as those generated from chromatin-based
assays including DNaseI, ATAC or ChIP-Seq. 'CREAM' considers proximity of the elements
within chromosomes of a given sample to identify COREs in the following steps:
1) It identifies window size or the maximum allowed distance between the elements
within each CORE, 2) It identifies number of elements which should be clustered
as a CORE, 3) It calls COREs, 4) It filters the COREs with lowest order which
does not pass the threshold considered in the approach.",2018-06-06,Benjamin Haibe-Kains,https://github.com/bhklab/CREAM,TRUE,https://github.com/bhklab/cream,10722,7,2020-01-31T20:59:21Z,1531.7142857142858
cregg,"Simple tidying, analysis, and visualization of conjoint (factorial) experiments, including estimation and visualization of average marginal component effects ('AMCEs') and marginal means ('MMs') for weighted and un-weighted survey data, along with useful reference category diagnostics and statistical tests. Estimation of 'AMCEs' is based upon methods described by Hainmueller, Hopkins, and Yamamoto (2014) <doi:10.1093/pan/mpt024>.",2018-07-30,Thomas J. Leeper,https://github.com/leeper/cregg,TRUE,https://github.com/leeper/cregg,7206,38,2020-05-23T12:26:41Z,189.6315789473684
cRegulome,"Builds a 'SQLite' database file of pre-calculated transcription
factor/microRNA-gene correlations (co-expression) in cancer from the
Cistrome Cancer Liu et al. (2011) <doi:10.1186/gb-2011-12-8-r83> and
'miRCancerdb' databases (in press). Provides custom classes and functions
to query, tidy and plot the correlation data.",2020-05-08,Mahmoud Ahmed,"https://docs.ropensci.org/cRegulome,
https://github.com/ropensci/cRegulome",TRUE,https://github.com/ropensci/cregulome,12431,2,2020-04-04T08:17:42Z,6215.5
CRF,"Implements modeling and computational tools for conditional
random fields (CRF) model as well as other probabilistic undirected
graphical models of discrete data with pairwise and unary potentials.",2019-12-01,Ling-Yun Wu,https://github.com/wulingyun/CRF,TRUE,https://github.com/wulingyun/crf,31768,12,2019-11-30T02:14:41Z,2647.3333333333335
crfsuite,"Wraps the 'CRFsuite' library <https://github.com/chokkan/crfsuite> allowing users
to fit a Conditional Random Field model and to apply it on existing data.
The focus of the implementation is in the area of Natural Language Processing where this R package allows you to easily build and apply models
for named entity recognition, text chunking, part of speech tagging, intent recognition or classification of any category you have in mind. Next to training, a small web application
is included in the package to allow you to easily construct training data.",2020-05-18,Jan Wijffels,https://github.com/bnosac/crfsuite,TRUE,https://github.com/bnosac/crfsuite,11037,44,2020-05-12T08:55:48Z,250.8409090909091
cricketr,"Tools for analyzing performances of cricketers based on stats in
ESPN Cricinfo Statsguru. The toolset can be used for analysis of Tests,ODIs
and Twenty20 matches of both batsmen and bowlers. The package can also be used to
analyze team performances.",2020-03-28,Tinniam V Ganesh,https://github.com/tvganesh/cricketr,TRUE,https://github.com/tvganesh/cricketr,23937,45,2020-03-28T14:12:21Z,531.9333333333333
crispRdesignR,"Designs guide sequences for CRISPR/Cas9 genome editing and
provides information on sequence features pertinent to guide
efficiency. Sequence features include annotated off-target
predictions in a user-selected genome and a predicted efficiency
score based on the model described in Doench et al. (2016)
<doi:10.1038/nbt.3437>. Users are able to import additional genomes
and genome annotation files to use when searching and annotating
off-target hits. All guide sequences and off-target data can be
generated through the 'R' console with sgRNA_Design() or through
'crispRdesignR's' user interface with crispRdesignRUI(). CRISPR
(Clustered Regularly Interspaced Short Palindromic Repeats) and the
associated protein Cas9 refer to a technique used in genome editing.",2020-05-26,Dylan Beeber,<https://github.com/dylanbeeber/crispRdesignR>,TRUE,https://github.com/dylanbeeber/crisprdesignr,121,5,2020-05-29T03:05:10Z,24.2
crminer,"Text mining client for 'Crossref' (<https://crossref.org>). Includes
functions for getting getting links to full text of articles, fetching full
text articles from those links or Digital Object Identifiers ('DOIs'),
and text extraction from 'PDFs'.",2020-04-07,Scott Chamberlain,"https://github.com/ropensci/crminer (devel)
https://docs.ropensci.org/crminer (docs)",TRUE,https://github.com/ropensci/crminer,22428,17,2020-06-01T17:56:50Z,1319.2941176470588
crmn,"Implements the Cross-contribution Compensating Multiple
standard Normalization (CCMN) method described in Redestig et
al. (2009) Analytical Chemistry <doi:10.1021/ac901143w>
and other normalization algorithms.",2020-02-10,Henning Redestig,https://github.com/hredestig/crmn,TRUE,https://github.com/hredestig/crmn,26422,0,2020-02-10T07:37:47Z,NA
crmPack,"Implements a wide range of model-based dose
escalation designs, ranging from classical and modern continual
reassessment methods (CRMs) based on dose-limiting toxicity endpoints to
dual-endpoint designs taking into account a biomarker/efficacy outcome. The
focus is on Bayesian inference, making it very easy to setup a new design
with its own JAGS code. However, it is also possible to implement 3+3
designs for comparison or models with non-Bayesian estimation. The whole
package is written in a modular form in the S4 class system, making it very
flexible for adaptation to new models, escalation or stopping rules.",2019-06-13,Giuseppe Palermo,https://github.com/roche/crmPack,TRUE,https://github.com/roche/crmpack,22851,3,2019-10-26T18:23:28Z,7617
crochet,"Functions to help implement the extraction / subsetting / indexing
function '[' and replacement function '[<-' of custom matrix-like types
(based on S3, S4, etc.), modeled as closely to the base matrix class as
possible (with tests to prove it).",2020-05-20,Alexander Grueneberg,https://github.com/agrueneberg/crochet,TRUE,https://github.com/agrueneberg/crochet,17466,4,2020-05-20T19:55:14Z,4366.5
CropDetectR,"A helpful tool for the identification of crop rows. Methods of this package include: Excess Green color scale <https://www.researchgate.net/publication/270613992_Color_Indices_for_Weed_Identification_Under_Various_Soil_Residue_and_Lighting_Conditions>, Otsu Thresholding <https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=4310076>, and Morphology <https://en.wikipedia.org/wiki/Mathematical_morphology>.",2019-09-20,Nicolaas VanSteenbergen,NA,TRUE,https://github.com/niconaut/cropdetectr,3823,0,2019-12-31T20:35:43Z,NA
crossrun,"Joint distribution of number of crossings and the
longest run in a series of independent Bernoulli trials. The
computations uses an iterative procedure where computations
are based on results from shorter series. The procedure
conditions on the start value and partitions by further
conditioning on the position of the first crossing (or none).",2018-10-08,Tore Wentzel-Larsen,https://github.com/ToreWentzel-Larsen/crossrun,TRUE,https://github.com/torewentzel-larsen/crossrun,6192,0,2020-06-07T07:02:07Z,NA
crosstalk,"Provides building blocks for allowing HTML widgets to communicate
with each other, with Shiny or without (i.e. static .html files). Currently
supports linked brushing and filtering.",2020-03-13,Joe Cheng,https://rstudio.github.io/crosstalk/,TRUE,https://github.com/rstudio/crosstalk,6323249,202,2020-05-19T16:16:16Z,31303.212871287127
crosswalkr,"A pair of functions for renaming and encoding data frames
using external crosswalk files. It is especially useful when
constructing master data sets from multiple smaller data
sets that do not name or encode variables consistently
across files. Based on similar commands in 'Stata'.",2020-01-08,Benjamin Skinner,https://github.com/btskinner/crosswalkr,TRUE,https://github.com/btskinner/crosswalkr,13059,6,2019-12-18T20:29:16Z,2176.5
crplyr,"In order to facilitate analysis of datasets hosted on the Crunch
data platform <http://crunch.io/>, the 'crplyr' package implements 'dplyr'
methods on top of the Crunch backend. The usual methods 'select', 'filter',
'group_by', 'summarize', and 'collect' are implemented in such a way as to
perform as much computation on the server and pull as little data locally
as possible.",2020-04-24,Greg Freedman Ellis,"https://crunch.io/r/crplyr/, https://github.com/Crunch-io/crplyr",TRUE,https://github.com/crunch-io/crplyr,13359,4,2020-04-27T14:14:37Z,3339.75
crs,"Regression splines that handle a mix of continuous and categorical (discrete) data often encountered in applied settings. I would like to gratefully acknowledge support from the Natural Sciences and Engineering Research Council of Canada (NSERC, <http://www.nserc-crsng.gc.ca>), the Social Sciences and Humanities Research Council of Canada (SSHRC, <http://www.sshrc-crsh.gc.ca>), and the Shared Hierarchical Academic Research Computing Network (SHARCNET, <https://www.sharcnet.ca>).",2019-11-25,Jeffrey S. Racine,https://github.com/JeffreyRacine/R-Package-crs,TRUE,https://github.com/jeffreyracine/r-package-crs,90612,10,2019-11-19T14:44:07Z,9061.2
crseEventStudy,"Based on Dutta et al. (2018) <doi:10.1016/j.jempfin.2018.02.004>, this package provides their standardized test for abnormal returns in long-horizon event studies. The methods used improve the major weaknesses of size, power, and robustness of long-run statistical tests described in Kothari/Warner (2007) <doi:10.1016/B978-0-444-53265-7.50015-9>. Abnormal returns are weighted by their statistical precision (i.e., standard deviation), resulting in abnormal standardized returns. This procedure efficiently captures the heteroskedasticity problem. Clustering techniques following Cameron et al. (2011) <10.1198/jbes.2010.07136> are adopted for computing cross-sectional correlation robust standard errors. The statistical tests in this package therefore accounts for potential biases arising from returns' cross-sectional correlation, autocorrelation, and volatility clustering without power loss.",2019-08-20,Siegfried Köstlmeier,https://github.com/skoestlmeier/crseEventStudy,TRUE,https://github.com/skoestlmeier/crseeventstudy,8486,1,2019-08-20T08:21:55Z,8486
crsmeta,"Obtain coordinate system metadata from various data formats. There
are functions to extract a 'CRS' (coordinate reference system,
<https://en.wikipedia.org/wiki/Spatial_reference_system>) in 'EPSG' (European
Petroleum Survey Group, <http://www.epsg.org/>), 'PROJ4' <https://proj.org/>,
or 'WKT2' (Well-Known Text 2,
<http://docs.opengeospatial.org/is/12-063r5/12-063r5.html>) forms. This is
purely for getting simple metadata from in-memory formats, please use other
tools for out of memory data sources. ",2020-03-29,Michael Sumner,https://github.com/hypertidy/crsmeta,TRUE,https://github.com/hypertidy/crsmeta,4213,5,2020-03-29T09:57:42Z,842.6
crsra,"Tidies and performs preliminary analysis of 'Coursera' research
export data. These export data can be downloaded by anyone who has classes
on Coursera and wants to analyze the data. Coursera is one of the leading
providers of MOOCs and was launched in January 2012. With over 25 million
learners, Coursera is the most popular provider in the world being followed
by EdX, the MOOC provider that was a result of a collaboration between
Harvard University and MIT, with over 10 million users. Coursera has over
150 university partners from 29 countries and offers a total of 2000+
courses from computer science to philosophy. Besides, Coursera offers 180+
specialization, Coursera's credential system, and four fully online Masters
degrees. For more information about Coursera check Coursera's
About page on <https://blog.coursera.org/about/>.",2018-05-05,Aboozar Hadavand,NA,TRUE,https://github.com/jhudsl/crsra,7671,1,2020-04-30T20:28:01Z,7671
CRUF,"Miscellaneous functions for clinical research data analysis. Format table of descriptive statistics, regression models, pvalues according to medical journals standards.",2020-03-05,Yves Gallien,https://github.com/Ygall/CRUF,TRUE,https://github.com/ygall/cruf,1608,0,2020-03-06T09:56:12Z,NA
crunch,"The Crunch.io service <http://crunch.io/> provides a cloud-based
data store and analytic engine, as well as an intuitive web interface.
Using this package, analysts can interact with and manipulate Crunch
datasets from within R. Importantly, this allows technical researchers to
collaborate naturally with team members, managers, and clients who prefer a
point-and-click interface.",2020-03-12,Greg Freedman Ellis,"https://crunch.io/r/crunch/, https://github.com/Crunch-io/rcrunch",TRUE,https://github.com/crunch-io/rcrunch,63831,7,2020-06-09T12:37:52Z,9118.714285714286
csa,"Integration of Earth system data from various sources is a challenging task. Except for their qualitative heterogeneity, different data records exist for describing similar Earth system process at different spatio-temporal scales. Data inter-comparison and validation are usually performed at a single spatial or temporal scale, which could hamper the identification of potential discrepancies in other scales. 'csa' package offers a simple, yet efficient, graphical method for synthesizing and comparing observed and modelled data across a range of spatio-temporal scales. Instead of focusing at specific scales, such as annual means or original grid resolution, we examine how their statistical properties change across spatio-temporal continuum. ",2020-05-16,Yannis Markonis,http://github.com/imarkonis/csa,TRUE,https://github.com/imarkonis/csa,345,0,2020-05-12T20:10:20Z,NA
cSEM,"Estimate, assess, test, and study linear, nonlinear, hierarchical
and multigroup structural equation models using composite-based approaches
and procedures, including estimation techniques such as partial least squares
path modeling (PLS-PM) and its derivatives (PLSc, ordPLSc, robustPLSc),
generalized structured component analysis (GSCA), generalized structured
component analysis with uniqueness terms (GSCAm), generalized canonical
correlation analysis (GCCA), principal component analysis (PCA),
factor score regression (FSR) using sum score, regression or
bartlett scores (including bias correction using Croon’s approach),
as well as several tests and typical postestimation procedures
(e.g., verify admissibility of the estimates, assess the model fit,
test the model fit etc.).",2020-03-29,Manuel E. Rademaker,"https://github.com/M-E-Rademaker/cSEM,
https://m-e-rademaker.github.io/cSEM/",TRUE,https://github.com/m-e-rademaker/csem,2760,8,2020-05-19T10:57:59Z,345
cstab,"Selection of the number of clusters in cluster analysis using
stability methods.",2018-06-19,Jonas M. B. Haslbeck,NA,TRUE,https://github.com/jmbh/cstab,16090,2,2019-07-27T21:36:17Z,8045
ctDNAtools,"Contains tools to analyze minimal residual disease and cell-free DNA fragmentation from aligned sequencing data.
More details on the methods can be found in:
Amjad Alkodsi, Leo Meriranta, Annika Pasanen, Sirpa Leppä (2020) <doi:10.1101/2020.01.27.912790>.",2020-03-04,Amjad Alkodsi,https://github.com/alkodsi/ctDNAtools,TRUE,https://github.com/alkodsi/ctdnatools,994,10,2020-03-05T18:34:49Z,99.4
ctmm,"Functions for identifying, fitting, and applying continuous-space, continuous-time stochastic movement models to animal tracking data.
The package is described in Calabrese et al (2016) <doi:10.1111/2041-210X.12559>, with models and methods based on those introduced in
Fleming & Calabrese et al (2014) <doi:10.1086/675504>,
Fleming et al (2014) <doi:10.1111/2041-210X.12176>,
Fleming et al (2015) <doi:10.1103/PhysRevE.91.032107>,
Fleming et al (2015) <doi:10.1890/14-2010.1>,
Fleming et al (2016) <doi:10.1890/15-1607>,
Péron & Fleming et al (2016) <doi:10.1186/s40462-016-0084-7>,
Fleming & Calabrese (2017) <doi:10.1111/2041-210X.12673>,
Péron et al (2017) <doi:10.1002/ecm.1260>,
Fleming et al (2017) <doi:10.1016/j.ecoinf.2017.04.008>,
Fleming et al (2018) <doi:10.1002/eap.1704>,
Winner & Noonan et al (2018) <doi:10.1111/2041-210X.13027>,
Fleming et al (2019) <doi:10.1111/2041-210X.13270>,
and
Noonan & Fleming et al (2019) <doi:10.1186/s40462-019-0177-1>.",2020-05-07,Christen H. Fleming,"https://github.com/ctmm-initiative/ctmm,
http://biology.umd.edu/movement.html",TRUE,https://github.com/ctmm-initiative/ctmm,48688,9,2020-05-15T21:51:44Z,5409.777777777777
ctrdata,"Provides functions for querying, retrieving and analysing
protocol- and results-related information on clinical trials from
two public registers, the European Union Clinical Trials Register
(EUCTR, <https://www.clinicaltrialsregister.eu/>) and
ClinicalTrials.gov (CTGOV, <https://clinicaltrials.gov/>). The
information is transformed and then stored in a database (nodbi).
Functions are provided for accessing and analysing the locally
stored information on the clinical trials, as well as for
identifying duplicate records. The package is motivated by the need
for aggregating and trend-analysing the design, conduct and outcomes
across clinical trials.",2020-05-18,Ralf Herold,https://github.com/rfhb/ctrdata,TRUE,https://github.com/rfhb/ctrdata,7703,13,2020-05-19T06:16:55Z,592.5384615384615
cubature,"R wrappers around the cubature C library of Steven
G. Johnson for adaptive multivariate integration over hypercubes
and the Cuba C library of Thomas Hahn for deterministic and
Monte Carlo integration. Scalar and vector interfaces for
cubature and Cuba routines are provided; the vector interfaces
are highly recommended as demonstrated in the package
vignette.",2019-12-04,Balasubramanian Narasimhan,https://bnaras.github.io/cubature,TRUE,https://github.com/bnaras/cubature,729949,5,2019-12-03T19:23:48Z,145989.8
cubelyr,"An implementation of a data cube extracted out of
'dplyr' for backward compatibility.",2020-02-29,Hadley Wickham,https://github.com/hadley/cubelyr,TRUE,https://github.com/hadley/cubelyr,9425,20,2020-03-02T23:37:33Z,471.25
Cubist,Regression modeling using rules with added instance-based corrections.,2020-01-10,Max Kuhn,https://topepo.github.io/Cubist,TRUE,https://github.com/topepo/cubist,980201,24,2020-01-09T19:48:50Z,40841.708333333336
CUFF,"Utility functions that provides wrapper to descriptive base functions
like cor, mean and table. It makes use of the formula interface to pass
variables to functions. It also provides operators to concatenate (%+%), to
repeat (%n%) and manage character vectors for nice display.",2019-01-22,Charles-Édouard Giguère,https://github.com/giguerch/CUFF,TRUE,https://github.com/giguerch/cuff,17751,0,2020-03-13T03:34:05Z,NA
cumulocityr,"Access the 'Cumulocity' API and retrieve data on devices, measurements, and events. Documentation for the API can be found at <https://www.cumulocity.com/guides/reference/rest-implementation/>.",2019-10-20,Dmitriy Bolotov,"https://softwareag.github.io/cumulocityr/,
https://github.com/SoftwareAG/cumulocityr",TRUE,https://github.com/softwareag/cumulocityr,2991,3,2020-04-29T20:37:46Z,997
cuRe,"Contains functions for estimating generalized parametric mixture and non-mixture cure models, loss of lifetime, mean residual lifetime, and crude event probabilities.",2020-04-23,Lasse Hjort Jakobsen,http://github.com/LasseHjort/cuRe,TRUE,https://github.com/lassehjort/cure,2408,0,2020-05-11T19:27:03Z,NA
customLayout,"Create complicated drawing areas for multiple elements by combining much simpler layouts. It is an extended version of layout function from the 'graphics' package, but it also works with 'grid' graphics. It also supports arranging elements inside 'PowerPoint' slides created using the 'officer' package.",2020-01-17,Zygmunt Zawadzki,"https://www.customlayout.zstat.pl/,
https://github.com/zzawadz/customLayout",TRUE,https://github.com/zzawadz/customlayout,12486,47,2020-01-17T13:05:20Z,265.6595744680851
cutoff,"Seek the significant cutoff value for a continuous variable, which will
be transformed into a classification, for linear regression,
logistic regression, logrank analysis and cox regression. First of all,
all combinations will be gotten by combn() function. Then n.per argument,
abbreviated of total number percentage, will be used to remove the combination
of smaller data group. In logistic, Cox regression and logrank analysis,
we will also use p.per argument, patient percentage, to filter the lower
proportion of patients in each group. Finally, p value in regression
results will be used to get the significant combinations and output
relevant parameters. In this package, there is no limit to the number of
cutoff points, which can be 1, 2, 3 or more. Still, we provide 2 methods,
typical Bonferroni and Duglas G (1994) <doi: 10.1093/jnci/86.11.829>, to
adjust the p value, Missing values will be deleted by na.omit() function
before analysis.",2019-12-20,Jing Zhang,https://github.com/yikeshu0611/cutoff,TRUE,https://github.com/yikeshu0611/cutoff,2955,1,2019-12-01T01:22:29Z,2955
cutpointr,"Estimate cutpoints that optimize a specified metric in binary classification tasks
and validate performance using bootstrapping. Some methods for more robust cutpoint
estimation and various plotting functions are included.",2020-04-14,Christian Thiele,https://github.com/thie1e/cutpointr,TRUE,https://github.com/thie1e/cutpointr,18228,51,2020-06-03T14:22:02Z,357.4117647058824
cvar,"Compute expected shortfall (ES) and Value at Risk (VaR) from a
quantile function, distribution function, random number generator or
probability density function. ES is also known as Conditional Value at
Risk (CVaR). Virtually any continuous distribution can be specified.
The functions are vectorized over the arguments. The computations are
done directly from the definitions, see e.g. Acerbi and Tasche (2002)
<doi:10.1111/1468-0300.00091>. Some support for GARCH models is provided,
as well.",2019-03-15,Georgi N. Boshnakov,https://github.com/GeoBosh/cvar https://geobosh.github.io/cvar/,TRUE,https://github.com/geobosh/cvar,17053,1,2020-03-07T20:58:37Z,17053
cvcqv,"Provides some easy-to-use functions and classes to calculate
variability measures such as coefficient of variation with confidence
intervals provided with all available methods. References are
Panichkitkosolkul (2013) <doi:10.1155/2013/324940> ,
Altunkaynak & Gamgam (2018) <doi:10.1080/03610918.2018.1435800> ,
Albatineh, Kibria, Wilcox & Zogheib (2014) <doi:10.1080/02664763.2013.847405> .",2019-08-06,Maani Beigy,https://github.com/MaaniBeigy/cvcqv,TRUE,https://github.com/maanibeigy/cvcqv,4286,6,2019-08-20T23:01:55Z,714.3333333333334
cvequality,"Contains functions for testing for significant differences between multiple coefficients of variation. Includes Feltz and Miller's (1996) <DOI:10.1002/(SICI)1097-0258(19960330)15:6%3C647::AID-SIM184%3E3.0.CO;2-P> asymptotic test and Krishnamoorthy and Lee's (2014) <DOI:10.1007/s00180-013-0445-2> modified signed-likelihood ratio test. See the vignette for more, including full details of citations.",2019-01-07,Ben Marwick,https://github.com/benmarwick/cvequality,TRUE,https://github.com/benmarwick/cvequality,15716,7,2019-11-06T04:50:41Z,2245.1428571428573
cvGEE,"Calculates predictions from generalized estimating equations and internally cross-validates them using the logarithmic, quadratic and spherical proper scoring rules; Kung-Yee Liang and Scott L. Zeger (1986) <doi:10.1093/biomet/73.1.13>.",2019-07-23,Dimitris Rizopoulos,"https://drizopoulos.github.io/cvGEE/,
https://github.com/drizopoulos/cvGEE",TRUE,https://github.com/drizopoulos/cvgee,3908,3,2019-07-30T00:55:30Z,1302.6666666666667
cvms,"Cross-validate one or multiple regression and classification models
and get relevant evaluation metrics in a tidy format. Validate the
best model on a test set and compare it to a baseline evaluation.
Alternatively, evaluate predictions from an external model. Currently
supports regression and classification (binary and multiclass).
Described in chp. 5 of Jeyaraman, B. P., Olsen, L. R.,
& Wambugu M. (2019, ISBN: 9781838550134).",2020-05-29,Ludvig Renbo Olsen,https://github.com/ludvigolsen/cvms,TRUE,https://github.com/ludvigolsen/cvms,6936,19,2020-05-29T12:47:56Z,365.05263157894734
CVXR,"An object-oriented modeling language for disciplined convex
programming (DCP). It allows the user to formulate convex optimization problems
in a natural way following mathematical convention and DCP rules. The system
analyzes the problem, verifies its convexity, converts it into a canonical form,
and hands it off to an appropriate solver to obtain the solution.",2020-04-02,Anqi Fu,"https://github.com/cvxgrp/CVXR, https://cvxr.rbind.io,
https://www.cvxgrp.org/CVXR/",TRUE,https://github.com/cvxgrp/cvxr,41932,112,2020-04-09T18:11:57Z,374.39285714285717
cwbtools,"The 'Corpus Workbench' ('CWB', <http://cwb.sourceforge.net/>) offers a classic and mature
approach for working with large, linguistically and structurally annotated corpora. The 'CWB'
is memory efficient and its design makes running queries fast (Evert and Hardie 2011,
<http://www.stefan-evert.de/PUB/EvertHardie2011.pdf>). The 'cwbtools' package offers
pure R tools to create indexed corpus files as well as high-level wrappers for the original C
implementation of CWB as exposed by the 'RcppCWB' package
<https://CRAN.R-project.org/package=RcppCWB>. Additional functionality to add and
modify annotations of corpora from within R makes working with CWB indexed corpora
much more flexible and convenient. The 'cwbtools' package in combination with the R packages
'RcppCWB' (<https://CRAN.R-project.org/package=RcppCWB>) and 'polmineR'
(<https://CRAN.R-project.org/package=polmineR>) offers a lightweight infrastructure
to support the combination of quantitative and qualitative approaches for working
with textual data.",2020-04-14,Andreas Blaette,https://www.github.com/PolMine/cwbtools,TRUE,https://github.com/polmine/cwbtools,4029,0,2019-12-10T09:01:43Z,NA
cyanoFilter,"An approach to filter out and/or identify synechococcus type cyanobacteria cells from all particles measured via flow cytometry.
It combines known characteristics of these cyanobacteria strains alongside gating techniques developed by Mehrnoush, M. et al. (2015) <doi:10.1093/bioinformatics/btu677>
in the 'flowDensity' package to identify and separate these cyanobacteria cells from other cell types.
Aside the gating techniques in the 'flowDensity' package, an EM style clustering technique
is also developed to identify these cyanobacteria cell populations.",2020-01-09,Oluwafemi Olusoji,https://github.com/fomotis/cyanoFilter,TRUE,https://github.com/fomotis/cyanofilter,3513,1,2020-03-10T21:12:27Z,3513
Cyclops,"This model fitting tool incorporates cyclic coordinate descent and
majorization-minimization approaches to fit a variety of regression models
found in large-scale observational healthcare data. Implementations focus
on computational optimization and fine-scale parallelization to yield
efficient inference in massive datasets. Please see:
Suchard, Simpson, Zorych, Ryan and Madigan (2013) <doi:10.1145/2414416.2414791>.",2020-06-05,Marc A. Suchard,https://github.com/ohdsi/cyclops,TRUE,https://github.com/ohdsi/cyclops,24043,25,2020-06-08T06:18:06Z,961.72
cyphr,"Encryption wrappers, using low-level support from
'sodium' and 'openssl'. 'cyphr' tries to smooth over some pain
points when using encryption within applications and data analysis
by wrapping around differences in function names and arguments in
different encryption providing packages. It also provides
high-level wrappers for input/output functions for seamlessly
adding encryption to existing analyses.",2020-03-09,Rich FitzJohn,"https://github.com/ropensci/cyphr, https://docs.ropensci.org/cyphr",TRUE,https://github.com/ropensci/cyphr,9010,81,2020-03-09T15:52:40Z,111.23456790123457
cystiSim,"The cystiSim package provides an agent-based model for Taenia solium transmission and control. cystiSim was developed within the framework of CYSTINET, the European Network on taeniosis/cysticercosis, COST ACTION TD1302.",2016-05-15,Brecht Devleesschauwer,https://github.com/brechtdv/cystiSim,TRUE,https://github.com/brechtdv/cystisim,11417,0,2020-02-26T14:38:11Z,NA
cytofan,"An implementation of Fan plots for cytometry data in 'ggplot2'.
For reference see Britton, E.; Fisher, P. & J. Whitley (1998) The Inflation Report Projections: Understanding the Fan Chart
<https://www.bankofengland.co.uk/quarterly-bulletin/1998/q1/the-inflation-report-projections-understanding-the-fan-chart>).",2018-07-30,Yann Abraham,https://github.com/yannabraham/cytofan,TRUE,https://github.com/yannabraham/cytofan,7632,2,2019-11-29T11:24:32Z,3816
cytometree,"Given the hypothesis of a bi-modal distribution of cells for
each marker, the algorithm constructs a binary tree, the nodes of which are
subpopulations of cells. At each node, observed cells and markers are modeled
by both a family of normal distributions and a family of bi-modal normal mixture
distributions. Splitting is done according to a normalized difference of AIC
between the two families. Method is detailed in: Commenges, Alkhassim, Gottardo,
Hejblum & Thiebaut (2018) <doi: 10.1002/cyto.a.23601>. ",2019-12-04,Chariff Alkhassim,NA,TRUE,https://github.com/sistm/cytometree,13953,4,2020-04-06T20:04:28Z,3488.25
cytominer,"Typical morphological profiling datasets have millions of cells
and hundreds of features per cell. When working with this data, you must
clean the data, normalize the features to make them comparable across
experiments, transform the features, select features based on their
quality, and aggregate the single-cell data, if needed. 'cytominer' makes
these steps fast and easy. Methods used in practice in the field are
discussed in Caicedo (2017) <doi:10.1038/nmeth.4397>. An overview of the
field is presented in Caicedo (2016) <doi:10.1016/j.copbio.2016.04.003>.",2020-05-09,Shantanu Singh,https://github.com/cytomining/cytominer,TRUE,https://github.com/cytomining/cytominer,5911,24,2020-05-09T12:00:49Z,246.29166666666666
czechrates,"
Interface to interest rates as published by the Czech National Bank. Currently supported are the PRIBOR rates (PRague InterBank Offered Rate - the CZK member of the IBOR family of rates) and two-week repo rate - a key policy rate of CNB.",2020-06-03,Jindra Lacko,https://github.com/jla-data/czechrates,TRUE,https://github.com/jla-data/czechrates,0,0,2020-06-09T17:27:11Z,NA
czso,"Get programmatic access to the open data provided by the
Czech Statistical Office (CZSO, <https://czso.cz>).",2020-04-07,Petr Bouchal,https://github.com/petrbouchal/czso,TRUE,https://github.com/petrbouchal/czso,1001,6,2020-06-06T11:09:24Z,166.83333333333334
d3r,"Provides a suite of functions to help ease the use of 'd3.js' in R.
These helpers include 'htmltools::htmlDependency' functions, hierarchy
builders, and conversion tools for 'partykit', 'igraph,' 'table',
and 'data.frame' R objects into the 'JSON' that 'd3.js' expects.",2020-05-25,Mike Bostock,https://github.com/timelyportfolio/d3r,TRUE,https://github.com/timelyportfolio/d3r,291339,125,2020-05-25T15:14:51Z,2330.712
d3Tree,"Create and customize interactive collapsible 'D3' trees using the 'D3'
JavaScript library and the 'htmlwidgets' package. These trees can be used
directly from the R console, from 'RStudio', in Shiny apps and R Markdown documents.
When in Shiny the tree layout is observed by the server and can be used as a reactive filter
of structured data.",2017-06-13,Jonathan Sidi,https://github.com/metrumresearchgroup/d3Tree,TRUE,https://github.com/metrumresearchgroup/d3tree,18250,68,2019-10-14T23:59:03Z,268.38235294117646
dabestr,"Data Analysis using Bootstrap-Coupled ESTimation.
Estimation statistics is a simple framework that avoids the pitfalls of
significance testing. It uses familiar statistical concepts: means,
mean differences, and error bars. More importantly, it focuses on the
effect size of one's experiment/intervention, as opposed to a false
dichotomy engendered by P values.
An estimation plot has two key features:
1. It presents all datapoints as a swarmplot, which orders each point to
display the underlying distribution.
2. It presents the effect size as a bootstrap 95% confidence interval on a
separate but aligned axes.
Estimation plots are introduced in Ho et al., Nature Methods 2019, 1548-7105.
<doi:10.1038/s41592-019-0470-3>.
The free-to-view PDF is located at <https://rdcu.be/bHhJ4>.",2020-04-20,Joses W. Ho,https://github.com/ACCLAB/dabestr,TRUE,https://github.com/acclab/dabestr,11654,151,2020-04-21T07:58:02Z,77.17880794701986
dagitty,"A port of the web-based software 'DAGitty', available at
<http://dagitty.net>, for analyzing structural causal models
(also known as directed acyclic graphs or DAGs).
This package computes covariate adjustment sets for estimating causal
effects, enumerates instrumental variables, derives testable
implications (d-separation and vanishing tetrads), generates equivalent
models, and includes a simple facility for data simulation. ",2016-08-26,Johannes Textor,"http://www.dagitty.net, https://github.com/jtextor/dagitty",TRUE,https://github.com/jtextor/dagitty,61540,115,2020-04-16T14:29:09Z,535.1304347826087
DALEX,"Unverified black box model is the path to the failure. Opaqueness leads to distrust.
Distrust leads to ignoration. Ignoration leads to rejection.
DALEX package xrays any model and helps to explore and explain its behaviour.
Machine Learning (ML) models are widely used and have various applications in classification
or regression. Models created with boosting, bagging, stacking or similar techniques are often
used due to their high performance. But such black-box models usually lack of direct interpretability.
DALEX package contains various methods that help to understand the link between input variables
and model output. Implemented methods help to explore model on the level of a single instance
as well as a level of the whole dataset.
All model explainers are model agnostic and can be compared across different models.
DALEX package is the cornerstone for 'DrWhy.AI' universe of packages for visual model exploration.
Find more details in (Biecek 2018) <arXiv:1806.08915>.",2020-04-25,Przemyslaw Biecek,"https://ModelOriented.github.io/DALEX/,
https://github.com/ModelOriented/DALEX",TRUE,https://github.com/modeloriented/dalex,58629,571,2020-06-05T09:54:48Z,102.67775831873905
DALEXtra,"Provides wrapper of various machine learning models.
In applied machine learning, there
is a strong belief that we need to strike a balance
between interpretability and accuracy.
However, in field of the interpretable machine learning,
there are more and more new ideas for explaining black-box models,
that are implemented in 'R'.
'DALEXtra' creates 'DALEX' Biecek (2018) <arXiv:1806.08915> explainer for many type of models
including those created using 'python' 'scikit-learn' and 'keras' libraries, 'java' 'h2o' library and
'mljar' API. Important part of the package is Champion-Challenger analysis and innovative approach
to model performance across subsets of test data presented in Funnel Plot.
Third branch of 'DALEXtra' package is aspect importance analysis
that provides instance-level explanations for the groups of explanatory variables.",2020-03-29,Szymon Maksymiuk,"https://ModelOriented.github.io/DALEXtra/,
https://github.com/ModelOriented/DALEXtra",TRUE,https://github.com/modeloriented/dalextra,4800,23,2020-05-29T17:17:52Z,208.69565217391303
damr,"Loads behavioural data from the widely used Drosophila Activity Monitor System (DAMS, TriKinetics <https://trikinetics.com/>) into the rethomics framework.",2019-07-15,Quentin Geissmann,https://github.com/rethomics/damr,TRUE,https://github.com/rethomics/damr,9533,2,2020-06-09T01:43:49Z,4766.5
dams,"The single largest source of dams in the United States is the
National Inventory of Dams (NID) <http://nid.usace.army.mil> from the US
Army Corps of Engineers. Entire data from the NID cannot be obtained all at
once and NID's website limits extraction of more than a couple of thousand
records at a time. Moreover, selected data from the NID's user interface
cannot not be saved to a file. In order to make the analysis of this data
easier, all the data from NID was extracted manually. Subsequently, the raw
data was checked for potential errors and cleaned. This package provides
sample cleaned data from the NID and provides functionality to access the
entire cleaned NID data.",2020-05-20,Joseph Stachelek,https://github.com/jsta/dams,TRUE,https://github.com/jsta/dams,17452,5,2020-05-20T17:43:35Z,3490.4
DAP,An implementation of Discriminant Analysis via Projections (DAP) method for high-dimensional binary classification in the case of unequal covariance matrices. See Irina Gaynanova and Tianying Wang (2018) <arXiv:1711.04817v2>.,2018-03-05,Tianying Wang and Irina Gaynanova,http://github.com/irinagain/DAP,TRUE,https://github.com/irinagain/dap,8185,1,2019-08-28T21:36:06Z,8185
dapr,"An easy-to-use, dependency-free set of functions for iterating over
elements of various input objects. Functions are wrappers around base
apply()/lapply()/vapply() functions but designed to have similar
functionality to the mapping functions in the 'purrr' package
<https://purrr.tidyverse.org/>. Specifically, function names more explicitly
communicate the expected class of the output and functions also allow for
the convenient shortcut of '~ .x' instead of the more verbose
'function(.x) .x'.",2019-05-06,Michael W. Kearney,https://github.com/mkearney/dapr,TRUE,https://github.com/mkearney/dapr,11772,53,2019-06-28T18:49:27Z,222.11320754716982
darksky,"Provides programmatic access to the 'Dark Sky' 'API'
<https://darksky.net/dev/docs>, which provides current or historical global
weather conditions.",2017-09-20,Bob Rudis,https://github.com/hrbrmstr/darksky,TRUE,https://github.com/hrbrmstr/darksky,17841,78,2020-04-04T11:37:15Z,228.73076923076923
dash,"A framework for building analytical web applications, 'dash' offers a pleasant and productive development experience. No JavaScript required.",2020-06-04,Ryan Patrick Kyle,https://github.com/plotly/dashR,TRUE,https://github.com/plotly/dashr,0,298,2020-06-04T15:04:43Z,0
dashCoreComponents,"'Dash' ships with supercharged components for interactive user interfaces. A core set of components, written and maintained by the 'Dash' team, is available in the 'dashCoreComponents' package. The source for this package is on GitHub: plotly/dash-core-components.",2020-05-06,Ryan Patrick Kyle,https://github.com/plotly/dash-core-components,TRUE,https://github.com/plotly/dash-core-components,823,201,2020-06-02T21:16:00Z,4.0945273631840795
dashHtmlComponents,"'Dash' is a web application framework that provides pure Python and R abstraction around HTML, CSS, and JavaScript. Instead of writing HTML or using an HTML templating engine, you compose your layout using R functions within the 'dashHtmlComponents' package. The source for this package is on GitHub: plotly/dash-html-components.",2020-05-06,Ryan Patrick Kyle,https://github.com/plotly/dash-html-components,TRUE,https://github.com/plotly/dash-html-components,813,112,2020-06-02T19:02:43Z,7.258928571428571
dashTable,"An interactive table component designed for editing and exploring large datasets, 'dashDataTable' is rendered with standard, semantic HTML <table/> markup, which makes it accessible, responsive, and easy to style. This component was written from scratch in 'React.js' specifically for the 'dash' community. Its API was designed to be ergonomic and its behaviour is completely customizable through its properties.",2020-05-14,Ryan Patrick Kyle,https://github.com/plotly/dash-table,TRUE,https://github.com/plotly/dash-table,504,322,2020-06-02T21:20:20Z,1.565217391304348
dat,"An implementation of common higher order functions with syntactic
sugar for anonymous function. Provides also a link to 'dplyr' and
'data.table' for common transformations on data frames to work around non
standard evaluation by default.",2020-05-15,Sebastian Warnholz,NA,TRUE,https://github.com/wahani/dat,19217,13,2020-05-16T06:33:48Z,1478.2307692307693
data.table,"Fast aggregation of large data (e.g. 100GB in RAM), fast ordered joins, fast add/modify/delete of columns by group using no copies at all, list columns, friendly and fast character-separated-value read/write. Offers a natural and flexible syntax, for faster development.",2019-12-09,Matt Dowle,"http://r-datatable.com, https://Rdatatable.gitlab.io/data.table,
https://github.com/Rdatatable/data.table",TRUE,https://github.com/rdatatable/data.table,17954368,2375,2020-06-09T06:46:06Z,7559.733894736843
data.tree,"Create tree structures from hierarchical data, and traverse the
tree in various orders. Aggregate, cumulate, print, plot, convert to and from
data.frame and more. Useful for decision trees, machine learning, finance,
conversion from and to JSON, and many other applications.",2019-11-09,Russ Hyde [ctb,http://github.com/gluc/data.tree,TRUE,https://github.com/gluc/data.tree,570369,152,2019-11-09T08:11:39Z,3752.4276315789475
data360r,"Makes it easy to engage with the Application Program Interface (API)
of the 'TCdata360' and 'Govdata360' platforms at <https://tcdata360.worldbank.org/>
and <https://govdata360.worldbank.org/>, respectively.
These application program interfaces provide access to over 5000 trade, competitiveness, and governance
indicator data, metadata, and related information from sources
both inside and outside the World Bank Group.
Package functions include easier download of data sets, metadata, and
related information, as well as searching based on user-inputted query.",2020-04-30,Ramin Aliyev,https://github.com/mrpsonglao/data360r,TRUE,https://github.com/mrpsonglao/data360r,5531,21,2020-04-29T13:15:51Z,263.3809523809524
DatabaseConnector,"An R 'DataBase Interface' ('DBI') compatible interface to various database platforms ('PostgreSQL', 'Oracle', 'Microsoft SQL Server',
'Amazon Redshift', 'Microsoft Parallel Database Warehouse', 'IBM Netezza', 'Apache Impala', 'Google BigQuery', and 'SQLite'). Also includes support for
fetching data as 'Andromeda' objects. Uses 'Java Database Connectivity' ('JDBC') to connect to databases (except SQLite).",2020-06-06,Martijn Schuemie,"https://ohdsi.github.io/DatabaseConnector,
https://github.com/OHDSI/DatabaseConnector",TRUE,https://github.com/ohdsi/databaseconnector,21654,25,2020-06-05T08:48:02Z,866.16
DatabionicSwarm,"Algorithms implementing populations of agents that interact with one another and sense their environment may exhibit emergent behavior such as self-organization and swarm intelligence. Here, a swarm system called Databionic swarm (DBS) is introduced which was published in Thrun, M.C., Ultsch A.: ""Swarm Intelligence for Self-Organized Clustering"" (2020), Artificial Intelligence, <DOI:10.1016/j.artint.2020.103237>. DBS is able to adapt itself to structures of high-dimensional data such as natural clusters characterized by distance and/or density based structures in the data space. The first module is the parameter-free projection method called Pswarm (Pswarm()), which exploits the concepts of self-organization and emergence, game theory, swarm intelligence and symmetry considerations. The second module is the parameter-free high-dimensional data visualization technique, which generates projected points on the topographic map with hypsometric tints defined by the generalized U-matrix (GeneratePswarmVisualization()). The third module is the clustering method itself with non-critical parameters (DBSclustering()). Clustering can be verified by the visualization and vice versa. The term DBS refers to the method as a whole. It enables even a non-professional in the field of data mining to apply its algorithms for visualization and/or clustering to data sets with completely different structures drawn from diverse research fields. The comparison to common projection methods can be found in the book of Thrun, M.C.: ""Projection Based Clustering through Self-Organization and Swarm Intelligence"" (2018) <DOI:10.1007/978-3-658-20540-9>. A comparison to 26 common clustering algorithms on 15 datasets is presented on the website.",2020-02-03,Michael Thrun,http://www.deepbionics.org,TRUE,https://github.com/mthrun/databionicswarm,17396,5,2020-05-05T07:33:59Z,3479.2
dataCompareR,"Easy comparison of two tabular data
objects in R. Specifically designed to show differences between two sets of
data in a useful way that should make it easier to understand the differences,
and if necessary, help you work out how to remedy them. Aims
to offer a more useful output than all.equal() when your two data sets do not
match, but isn't intended to replace all.equal() as a way to test for equality.",2020-04-30,Sarah Johnston,https://github.com/capitalone/dataCompareR,TRUE,https://github.com/capitalone/datacomparer,16923,54,2020-04-28T08:25:28Z,313.3888888888889
DataExplorer,"Automated data exploration process for analytic tasks and predictive modeling, so
that users could focus on understanding data and extracting insights. The package scans and
analyzes each variable, and visualizes them with typical graphical techniques. Common
data processing methods are also available to treat and format data.",2020-01-07,Boxuan Cui,http://boxuancui.github.io/DataExplorer/,TRUE,https://github.com/boxuancui/dataexplorer,174372,304,2020-01-10T15:22:14Z,573.5921052631579
datafsm,"Automatic generation of finite state machine models of dynamic
decision-making that both have strong predictive power and are
interpretable in human terms. We use an efficient model representation and
a genetic algorithm-based estimation process to generate simple
deterministic approximations that explain most of the structure of complex
stochastic processes. We have applied the software to empirical data, and
demonstrated it's ability to recover known data-generating processes by
simulating data with agent-based models and correctly deriving the
underlying decision models for multiple agent models and degrees of
stochasticity.",2019-11-28,Gilligan Jonathan M.,https://github.com/jonathan-g/datafsm,TRUE,https://github.com/jonathan-g/datafsm,17648,8,2019-12-05T06:40:52Z,2206
dataMaid,"Data screening is an important first step of any statistical
analysis. dataMaid auto generates a customizable data report with a thorough
summary of the checks and the results that a human can use to identify possible
errors. It provides an extendable suite of test for common potential
errors in a dataset. ",2019-12-10,Claus Thorn Ekstrøm,"https://github.com/ekstroem/dataMaid,
https://doi.org/10.18637/jss.v090.i06",TRUE,https://github.com/ekstroem/datamaid,41306,101,2020-03-06T00:47:32Z,408.970297029703
dataMeta,Designed to create a basic data dictionary and append to the original dataset's attributes list. The package makes use of a tidy dataset and creates a data frame that will serve as a linker that will aid in building the dictionary. The dictionary is then appended to the list of the original dataset's attributes. The user will have the option of entering variable and item descriptions by writing code or use alternate functions that will prompt the user to add these.,2017-08-12,Dania M. Rodriguez,https://github.com/dmrodz/dataMeta,TRUE,https://github.com/dmrodz/datameta,12634,14,2019-06-16T23:24:03Z,902.4285714285714
dataone,"Provides read and write access to data and metadata from
the DataONE network <https://www.dataone.org> of data repositories.
Each DataONE repository implements a consistent repository application
programming interface. Users call methods in R to access these remote
repository functions, such as methods to query the metadata catalog, get
access to metadata for particular data packages, and read the data objects
from the data repository. Users can also insert and update data objects on
repositories that support these methods.",2020-02-16,Matthew B. Jones,https://github.com/DataONEorg/rdataone,TRUE,https://github.com/dataoneorg/rdataone,25794,25,2020-06-09T22:15:35Z,1031.76
datapack,"Provides a flexible container to transport and manipulate complex
sets of data. These data may consist of multiple data files and associated
meta data and ancillary files. Individual data objects have associated system
level meta data, and data files are linked together using the OAI-ORE standard
resource map which describes the relationships between the files. The OAI-
ORE standard is described at <https://www.openarchives.org/ore>. Data packages
can be serialized and transported as structured files that have been created
following the BagIt specification. The BagIt specification is described at
<https://tools.ietf.org/html/draft-kunze-bagit-08>.",2019-10-15,Matthew B. Jones,NA,TRUE,https://github.com/ropensci/datapack,25343,36,2019-12-09T09:29:38Z,703.9722222222222
datapackage.r,"Work with 'Frictionless Data Packages' (<https://frictionlessdata.io/specs/data-package/>). Allows to load and validate any descriptor for a data package profile, create and modify descriptors and provides expose methods for reading and streaming data in the package. When a descriptor is a 'Tabular Data Package', it uses the 'Table Schema' package (<https://CRAN.R-project.org/package=tableschema.r>) and exposes its functionality, for each resource object in the resources field.",2020-05-06,Kleanthis Koupidis,https://github.com/frictionlessdata/datapackage-r,TRUE,https://github.com/frictionlessdata/datapackage-r,5906,23,2020-05-06T17:07:04Z,256.7826086956522
datapasta,RStudio addins and R functions that make copy-pasting vectors and tables to text painless.,2020-01-17,Miles McBain,https://github.com/milesmcbain/datapasta,TRUE,https://github.com/milesmcbain/datapasta,45927,611,2020-01-17T12:03:17Z,75.16693944353518
dataPreparation,Do most of the painful data preparation for a data science project with a minimum amount of code; Take advantages of data.table efficiency and use some algorithmic trick in order to perform data preparation in a time and RAM efficient way.,2020-02-12,Emmanuel-Lin Toulemonde,NA,TRUE,https://github.com/eltoulemonde/datapreparation,42851,23,2020-02-12T13:56:55Z,1863.0869565217392
dataRetrieval,"Collection of functions to help retrieve U.S. Geological Survey
(USGS) and U.S. Environmental Protection Agency (EPA) water quality and
hydrology data from web services. USGS web services are discovered from
National Water Information System (NWIS) <https://waterservices.usgs.gov/> and <https://waterdata.usgs.gov/nwis>.
Both EPA and USGS water quality data are obtained from the Water Quality Portal <https://www.waterqualitydata.us/>.",2020-03-11,Laura DeCicco,https://pubs.usgs.gov/tm/04/a10/,TRUE,https://github.com/usgs-r/dataretrieval,71924,148,2020-05-11T13:06:36Z,485.97297297297297
datarium,"Contains data organized by topics: categorical data, regression model,
means comparisons, independent and repeated measures ANOVA, mixed ANOVA and ANCOVA.",2019-05-21,Alboukadel Kassambara,NA,TRUE,https://github.com/kassambara/datarium,20108,5,2020-04-28T11:15:06Z,4021.6
datasauRus,"The Datasaurus Dozen is a set of datasets with the same summary statistics. They
retain the same summary statistics despite having radically different distributions.
The datasets represent a larger and quirkier object lesson that is typically taught
via Anscombe's Quartet (available in the 'datasets' package). Anscombe's Quartet
contains four very different distributions with the same summary statistics and as
such highlights the value of visualisation in understanding data, over and above
summary statistics. As well as being an engaging variant on the Quartet, the data
is generated in a novel way. The simulated annealing process used to derive datasets
from the original Datasaurus is detailed in ""Same Stats, Different Graphs: Generating
Datasets with Varied Appearance and Identical Statistics through Simulated Annealing""
<doi:10.1145/3025453.3025912>.",2018-09-20,Steph Locke,"https://github.com/lockedata/datasauRus,
https://itsalocke.com/datasaurus/",TRUE,https://github.com/lockedata/datasaurus,27533,177,2020-01-27T15:39:44Z,155.5536723163842
DataSpaceR,"Provides a convenient API interface to access immunological data
within 'the CAVD DataSpace'(<https://dataspace.cavd.org>), a data sharing
and discovery tool that facilitates exploration of HIV immunological data
from pre-clinical and clinical HIV vaccine studies.",2020-01-08,Ju Yeong Kim,"https://docs.ropensci.org/DataSpaceR,
https://github.com/ropensci/DataSpaceR",TRUE,https://github.com/ropensci/dataspacer,5783,5,2020-05-29T21:16:26Z,1156.6
dataverse,"Provides access to Dataverse version 4 APIs <https://dataverse.org/>,
enabling data search, retrieval, and deposit. For Dataverse versions <= 4.0,
use the deprecated 'dvn' package <https://cran.r-project.org/package=dvn>.",2017-06-15,Thomas J. Leeper,https://github.com/iqss/dataverse-client-r,TRUE,https://github.com/iqss/dataverse-client-r,12236,29,2020-05-29T18:16:09Z,421.9310344827586
DataVisualizations,"Gives access to data visualisation methods that are relevant from the data scientist's point of view. The flagship idea of 'DataVisualizations' is the mirrored density plot (MD-plot) for either classified or non-classified multivariate data presented in Thrun et al. (2019) <arXiv:1908.06081>. The MD-plot outperforms the box-and-whisker diagram (box plot), violin plot and bean plot. Furthermore, a collection of various visualization methods for univariate data is provided. In the case of exploratory data analysis, 'DataVisualizations' makes it possible to inspect the distribution of each feature of a dataset visually through a combination of four methods. One of these methods is the Pareto density estimation (PDE) of the probability density function (pdf). Additionally, visualizations of the distribution of distances using PDE, the scatter-density plot using PDE for two variables as well as the Shepard density plot and the Bland-Altman plot are presented here. Pertaining to classified high-dimensional data, a number of visualizations are described, such as f.ex. the heat map and silhouette plot. A political map of the world or Germany can be visualized with the additional information defined by a classification of countries or regions. By extending the political map further, an uncomplicated function for a Choropleth map can be used which is useful for measurements across a geographic area. For categorical features, the Pie charts, slope charts and fan plots, improved by the ABC analysis, become usable. More detailed explanations are found in the book by Thrun, M.C.: ""Projection-Based Clustering through Self-Organization and Swarm Intelligence"" (2018) <doi:10.1007/978-3-658-20540-9>.",2020-05-17,Michael Thrun,http://www.deepbionics.org,TRUE,https://github.com/mthrun/datavisualizations,18307,3,2020-06-07T14:44:49Z,6102.333333333333
daterangepicker,"A Shiny Input for date-ranges, which pops up two calendars for selecting dates, times, or predefined ranges like ""Last 30 Days"". It wraps the JavaScript library 'daterangepicker' which is available at <https://www.daterangepicker.com>.",2020-03-20,Sebastian Gatscha,"https://github.com/trafficonese/daterangepicker/,
https://www.daterangepicker.com",TRUE,https://github.com/trafficonese/daterangepicker,1395,7,2020-03-27T14:25:39Z,199.28571428571428
datetimeutils,"Utilities for handling dates and times, such
as selecting particular days of the week or month,
formatting timestamps as required by RSS feeds, or
converting timestamp representations of other software
(such as 'MATLAB' and 'Excel') to R. The package is
lightweight (no dependencies, pure R implementations) and
relies only on R's standard classes to represent dates
and times ('Date' and 'POSIXt'); it aims to provide
efficient implementations, through vectorisation and the
use of R's native numeric representations of timestamps
where possible.",2020-03-25,Enrico Schumann,"http://enricoschumann.net/R/packages/datetimeutils/,
https://github.com/enricoschumann/datetimeutils",TRUE,https://github.com/enricoschumann/datetimeutils,15502,0,2020-03-25T19:21:16Z,NA
datoramar,"A thin wrapper around the 'Datorama' API.
Ideal for analyzing marketing data from <https://datorama.com>.",2017-12-20,Kade Killary,https://github.com/beigebrucewayne/datoramar,TRUE,https://github.com/beigebrucewayne/datoramar,9074,5,2019-06-28T09:03:53Z,1814.8
datr,"Interface with the 'Dat' p2p network protocol <https://datproject.org>. Clone archives from the network, share your own files, and install packages from the network.",2018-03-26,Chris Hartgerink,https://github.com/libscie/datr,TRUE,https://github.com/libscie/datr,8105,53,2019-12-09T12:49:35Z,152.9245283018868
datrProfile,"Profiles datasets (collecting statistics and informative summaries
about that data) on data frames and 'ODBC' tables: maximum, minimum, mean, standard deviation, nulls,
distinct values, data patterns, data/format frequencies.",2019-08-02,Arnaldo Vitaliano,https://github.com/avitaliano/datrProfile,TRUE,https://github.com/avitaliano/datrprofile,3781,0,2019-07-31T12:48:39Z,NA
daymetr,"Programmatic interface to the 'Daymet' web services
(<http://daymet.ornl.gov>). Allows for easy downloads of
'Daymet' climate data directly to your R workspace or your computer.
Routines for both single pixel data downloads and
gridded (netCDF) data are provided.",2019-02-07,Hufkens Koen,https://github.com/khufkens/daymetr,TRUE,https://github.com/khufkens/daymetr,13012,8,2020-06-02T18:10:43Z,1626.5
dbarts,"Fits Bayesian additive regression trees (BART; Chipman, George, and McCulloch (2010) <doi:10.1214/09-AOAS285>) while allowing the updating of predictors or response so that BART can be incorporated as a conditional model in a Gibbs/Metropolis-Hastings sampler. Also serves as a drop-in replacement for package 'BayesTree'.",2020-03-20,Vincent Dorie,https://github.com/vdorie/dbarts,TRUE,https://github.com/vdorie/dbarts,37937,33,2020-03-27T21:59:22Z,1149.6060606060605
dbflobr,"Reads and writes files to SQLite databases <https://www.sqlite.org/index.html> as flobs
(a flob is a blob that preserves the file extension).",2020-05-13,Sebastian Dalgarno,https://github.com/poissonconsulting/dbflobr,TRUE,https://github.com/poissonconsulting/dbflobr,4252,4,2020-05-27T16:30:28Z,1063
DBI,"A database interface definition for communication
between R and relational database management systems. All classes in
this package are virtual and need to be extended by the various R/DBMS
implementations.",2019-12-15,Kirill Müller,"http://r-dbi.github.io/DBI, https://github.com/r-dbi/DBI",TRUE,https://github.com/r-dbi/dbi,12689394,192,2020-02-23T21:43:51Z,66090.59375
DBItest,"A helper that tests 'DBI' back ends for conformity
to the interface.",2019-12-16,Kirill Müller,"https://dbitest.r-dbi.org, https://github.com/r-dbi/DBItest",TRUE,https://github.com/r-dbi/dbitest,201694,14,2019-12-30T12:11:55Z,14406.714285714286
dbmss,"Simple computation of spatial statistic functions of distance to characterize the spatial structures of mapped objects, following Marcon, Traissac, Puech, and Lang (2015) <doi:10.18637/jss.v067.c03>.
Includes classical functions (Ripley's K and others) and more recent ones used by spatial economists (Duranton and Overman's Kd, Marcon and Puech's M).
Relies on 'spatstat' for some core calculation.",2020-01-08,Eric Marcon,https://github.com/EricMarcon/dbmss,TRUE,https://github.com/ericmarcon/dbmss,38844,2,2020-04-18T21:13:38Z,19422
dbnR,"Learning and inference over dynamic Bayesian networks of arbitrary
Markovian order. Extends some of the functionality offered by the 'bnlearn'
package to learn the networks from data and perform exact inference.
It offers a modification of Trabelsi (2013) <doi:10.1007/978-3-642-41398-8_34>
dynamic max-min hill climbing algorithm for structure learning and
the possibility to perform forecasts of arbitrary length. A tool for
visualizing the structure of the net is also provided via the 'visNetwork' package.",2020-03-25,David Quesada,https://github.com/dkesada/dbnR,TRUE,https://github.com/dkesada/dbnr,1298,0,2020-06-04T13:31:50Z,NA
dbparser,"This tool is for parsing the 'DrugBank' XML database <http://drugbank.ca/>. The parsed
data are then returned in a proper 'R' dataframe with the ability to save
them in a given database.",2020-06-08,Mohammed Ali,"https://docs.ropensci.org/dbparser,
https://github.com/ropensci/dbparser",TRUE,https://github.com/ropensci/dbparser,9591,19,2020-06-09T04:58:35Z,504.7894736842105
dbplot,"Leverages 'dplyr' to process the calculations of a plot inside a database.
This package provides helper functions that abstract the work at three levels:
outputs a 'ggplot', outputs the calculations, outputs the formula
needed to calculate bins.",2020-02-07,Edgar Ruiz,https://github.com/edgararuiz/dbplot,TRUE,https://github.com/edgararuiz/dbplot,40463,116,2020-02-06T21:31:14Z,348.8189655172414
dbplyr,"A 'dplyr' back end for databases that allows you to
work with remote database tables as if they are in-memory data frames.
Basic features works with any database that has a 'DBI' back end; more
advanced features require 'SQL' translation to be provided by the
package author.",2020-05-27,Hadley Wickham,"https://dbplyr.tidyverse.org/, https://github.com/tidyverse/dbplyr",TRUE,https://github.com/tidyverse/dbplyr,7558796,235,2020-05-27T12:10:57Z,32165.08936170213
dbscan,"A fast reimplementation of several density-based algorithms of
the DBSCAN family for spatial data. Includes the DBSCAN (density-based spatial
clustering of applications with noise) and OPTICS (ordering points to identify
the clustering structure) clustering algorithms HDBSCAN (hierarchical DBSCAN) and the LOF (local outlier
factor) algorithm. The implementations use the kd-tree data structure (from
library ANN) for faster k-nearest neighbor search. An R interface to fast kNN
and fixed-radius NN search is also provided.
See Hahsler M, Piekenbrock M and Doran D (2019) <doi:10.18637/jss.v091.i01>.",2019-10-23,Michael Hahsler,https://github.com/mhahsler/dbscan,TRUE,https://github.com/mhahsler/dbscan,450785,141,2020-05-18T16:22:56Z,3197.0567375886526
dbx,"Provides select, insert, update, upsert, and delete database operations. Supports 'PostgreSQL', 'MySQL', 'SQLite', and more, and plays nicely with the 'DBI' package.",2019-04-24,Andrew Kane,https://github.com/ankane/dbx,TRUE,https://github.com/ankane/dbx,15313,139,2020-02-17T03:02:29Z,110.16546762589928
dccvalidator,"Performs checks for common metadata quality issues. Used by the
data coordinating centers for the 'AMP-AD' consortium
(<https://adknowledgeportal.synapse.org>), 'PsychENCODE' consortium
(<http://www.psychencode.org>), and others to validate metadata prior to
data releases.",2020-03-13,Nicole Kauer,"https://sage-bionetworks.github.io/dccvalidator,
https://github.com/Sage-Bionetworks/dccvalidator",TRUE,https://github.com/sage-bionetworks/dccvalidator,2601,3,2020-06-02T22:31:08Z,867
dclust,"Contains a single function dclust() for divisive hierarchical clustering based on
recursive k-means partitioning (k = 2). Useful for clustering large datasets
where computation of a n x n distance matrix is not feasible (e.g. n > 10,000 records).
For further information see Steinbach, Karypis and Kumar (2000) <http://glaros.dtc.umn.edu/gkhome/fetch/papers/docclusterKDDTMW00.pdf>.",2019-09-05,Shaun Wilkinson,http://github.com/shaunpwilkinson/dclust,TRUE,https://github.com/shaunpwilkinson/dclust,3825,1,2019-08-28T23:46:59Z,3825
dde,"Solves ordinary and delay differential equations, where
the objective function is written in either R or C. Suitable only
for non-stiff equations, the solver uses a 'Dormand-Prince' method
that allows interpolation of the solution at any point. This
approach is as described by Hairer, Norsett and Wanner (1993)
<ISBN:3540604529>. Support is also included for iterating
difference equations.",2020-01-16,Rich FitzJohn,https://github.com/mrc-ide/dde,TRUE,https://github.com/mrc-ide/dde,6257,13,2020-03-27T13:14:36Z,481.3076923076923
ddi,"Implements Meng's data defect index (ddi), which represents
the degree of sample bias relative to an iid sample. The data defect
correlation (ddc) represents the correlation between the outcome of interest
and the selection into the sample; when the sample selection is independent
across the population, the ddc is zero. Details are in Meng (2018)
<doi:10.1214/18-AOAS1161SF>, ""Statistical Paradises and Paradoxes in Big Data (I):
Law of Large Populations, Big Data Paradox, and the 2016 US Presidential
Election."" Survey estimates from the Cooperative Congressional Election Study
(CCES) is included to replicate the article's results. ",2020-01-26,Shiro Kuriwaki,https://github.com/kuriwaki/ddi,TRUE,https://github.com/kuriwaki/ddi,2088,2,2020-05-09T01:04:05Z,1044
ddpcr,"An interface to explore, analyze, and visualize droplet digital PCR
(ddPCR) data in R. This is the first non-proprietary software for analyzing
two-channel ddPCR data. An interactive tool was also created and is available
online to facilitate this analysis for anyone who is not comfortable with
using R.",2020-06-02,Dean Attali,https://github.com/daattali/ddpcr,TRUE,https://github.com/daattali/ddpcr,22722,42,2020-06-09T04:10:18Z,541
deBInfer,"A Bayesian framework for parameter inference in differential equations.
This approach offers a rigorous methodology for parameter inference as well as
modeling the link between unobservable model states and parameters, and
observable quantities. Provides templates for the DE model, the
observation model and data likelihood, and the model parameters and their prior
distributions. A Markov chain Monte Carlo (MCMC) procedure processes these inputs
to estimate the posterior distributions of the parameters and any derived
quantities, including the model trajectories. Further functionality is provided
to facilitate MCMC diagnostics and the visualisation of the posterior distributions
of model parameters and trajectories.",2018-04-18,Philipp H Boersch-Supan,https://github.com/pboesu/debinfer,TRUE,https://github.com/pboesu/debinfer,14780,8,2020-03-02T14:48:00Z,1847.5
DeCAFS,"Detect abrupt changes in time series with local fluctuations as a random walk process and autocorrelated noise as an AR(1) process. See Romano, G., Rigaill, G., Runge, V., Fearnhead, P. (2020) <arXiv:2005.01379>.",2020-05-18,Gaetano Romano,NA,TRUE,https://github.com/gtromano/decafs,552,0,2020-05-12T19:55:03Z,NA
decido,"Provides constrained triangulation of polygons. Ear cutting (or
ear clipping) applies constrained triangulation by successively 'cutting'
triangles from a polygon defined by path/s. Holes are supported by introducing
a bridge segment between polygon paths. This package wraps the 'header-only'
library 'earcut.hpp' <https://github.com/mapbox/earcut.hpp.git> which includes
a reference to the method used by Held, M. (2001) <doi:10.1007/s00453-001-0028-4>. ",2020-05-19,Michael Sumner,https://hypertidy.github.io/decido,TRUE,https://github.com/hypertidy/decido,13155,13,2020-05-21T21:59:12Z,1011.9230769230769
deckgl,"Makes 'deck.gl' <https://deck.gl/>, a WebGL-powered open-source JavaScript framework
for visual exploratory data analysis of large datasets, available within R via the 'htmlwidgets' package.
Furthermore, it supports basemaps from 'mapbox' <https://www.mapbox.com/> via
'mapbox-gl-js' <https://github.com/mapbox/mapbox-gl-js>.",2020-05-06,Stefan Kuethe,"https://github.com/crazycapivara/deckgl/,
https://crazycapivara.github.io/deckgl/",TRUE,https://github.com/crazycapivara/deckgl,7145,53,2020-05-27T16:06:08Z,134.81132075471697
DeclareDesign,"Researchers can characterize and learn about the properties of
research designs before implementation using `DeclareDesign`. Ex ante
declaration and diagnosis of designs can help researchers clarify the
strengths and limitations of their designs and to improve their
properties, and can help readers evaluate a research strategy prior
to implementation and without access to results. It can also make it
easier for designs to be shared, replicated, and critiqued.",2020-03-24,Graeme Blair,"https://declaredesign.org,
https://github.com/DeclareDesign/DeclareDesign",TRUE,https://github.com/declaredesign/declaredesign,15599,75,2020-04-30T17:26:46Z,207.98666666666668
decompr,"Two global-value-chain decompositions are implemented. Firstly, the
Wang-Wei-Zhu (Wang, Wei, and Zhu, 2013) algorithm splits bilateral gross exports
into 16 value-added components. Secondly, the Leontief decomposition (default)
derives the value added origin of exports by country and industry, which is also
based on Wang, Wei, and Zhu (Wang, Z., S.-J. Wei, and K. Zhu. 2013. ""Quantifying
International Production Sharing at the Bilateral and Sector Levels."").",2016-08-17,Bastiaan Quast,"http://qua.st/decompr, https://github.com/bquast/decompr",TRUE,https://github.com/bquast/decompr,28017,6,2020-04-26T07:21:35Z,4669.5
deconstructSigs,"Takes sample information in the form of the fraction of mutations
in each of 96 trinucleotide contexts and identifies the weighted combination
of published signatures that, when summed, most closely reconstructs the
mutational profile.",2016-07-29,Rachel Rosenthal,https://github.com/raerose01/deconstructSigs,TRUE,https://github.com/raerose01/deconstructsigs,20999,83,2020-03-12T12:06:01Z,253
deductive,"Attempt to repair inconsistencies and missing values in data
records by using information from valid values and validation rules
restricting the data.",2019-04-10,Mark van der Loo,https://github.com/data-cleaning/deductive,TRUE,https://github.com/data-cleaning/deductive,16054,8,2019-06-25T13:11:26Z,2006.75
deepdep,"Provides tools for exploration of R package dependencies.
The main deepdep() function allows to acquire deep dependencies of any package and plot them in an elegant way.
It also adds some popularity measures for the packages e.g. in the form of download count through the 'cranlogs' package.
Uses the CRAN metadata database <http://crandb.r-pkg.org> and Bioconductor metadata <http://bioconductor.org>.
Other data acquire functions are: get_dependencies(), get_downloads() and get_description().
The deepdep_shiny() function runs shiny application that helps to produce a nice 'deepdep' plot. ",2020-05-06,Dominik Rafacz,"https://dominikrafacz.github.io/deepdep/,
https://github.com/DominikRafacz/deepdep",TRUE,https://github.com/dominikrafacz/deepdep,1922,20,2020-05-06T13:19:48Z,96.1
Delaporte,"Provides probability mass, distribution, quantile, random-variate generation, and method-of-moments parameter-estimation functions for the Delaporte distribution. The Delaporte is a discrete probability distribution which can be considered the convolution of a negative binomial distribution with a Poisson distribution. Alternatively, it can be considered a counting distribution with both Poisson and negative binomial components. It has been studied in actuarial science as a frequency distribution which has more variability than the Poisson, but less than the negative binomial.",2020-06-01,Avraham Adler,https://github.com/aadler/Delaporte,TRUE,https://github.com/aadler/delaporte,54141,0,2020-06-01T07:19:10Z,NA
delayed,"Mechanisms to parallelize dependent tasks in a manner that
optimizes the compute resources available. It provides access to ""delayed""
computations, which may be parallelized using futures. It is, to an extent,
a facsimile of the 'Dask' library (<https://dask.org/>), for the 'Python'
language.",2020-02-28,Jeremy Coyle,https://tlverse.org/delayed,TRUE,https://github.com/tlverse/delayed,1989,11,2020-02-29T20:24:59Z,180.8181818181818
DemoDecomp,"Two general demographic decomposition methods are offered: Pseudo-continuous decomposition proposed by Horiuchi, Wilmoth, and Pletcher (2008) <doi:10.1353/dem.0.0033> and stepwise replacement decomposition proposed by Andreev, Shkolnikov and Begun (2002) <doi:10.4054/DemRes.2002.7.14>.",2018-08-14,Tim Riffe,NA,TRUE,https://github.com/timriffe/demodecomp,7435,0,2020-01-02T10:39:42Z,NA
DemografixeR,"Connects to the <https://genderize.io/>, <https://agify.io/> and <https://nationalize.io/> APIs to estimate gender, age and nationality of a first name.",2020-05-06,Matthias Brenninkmeijer,"https://matbmeijer.github.io/DemografixeR,
https://github.com/matbmeijer/DemografixeR",TRUE,https://github.com/matbmeijer/demografixer,723,2,2020-05-14T11:11:15Z,361.5
dendextend,"Offers a set of functions for extending
'dendrogram' objects in R, letting you visualize and compare trees of
'hierarchical clusterings'. You can (1) Adjust a tree's graphical parameters
- the color, size, type, etc of its branches, nodes and labels. (2)
Visually and statistically compare different 'dendrograms' to one another.",2020-02-28,Tal Galili,"http://talgalili.github.io/dendextend/,
https://github.com/talgalili/dendextend/,
https://cran.r-project.org/package=dendextend,
https://www.r-statistics.com/tag/dendextend/,
https://academic.oup.com/bioinformatics/article/31/22/3718/240978/dendextend-an-R-package-for-visualizing-adjusting",TRUE,https://github.com/talgalili/dendextend,2369441,119,2020-05-31T21:11:40Z,19911.268907563026
dendroTools,"Provides novel dendroclimatological methods, primarily used by the
Tree-ring research community. There are four core functions. The first one is
daily_response(), which finds the optimal sequence of days that are related
to one or more tree-ring proxy records. Similar function is daily_response_seascorr(),
which implements partial correlations in the analysis of daily response functions.
For the enthusiast of monthly data, there is monthly_response() function.
The last core function is compare_methods(), which effectively compares several
linear and nonlinear regression algorithms on the task of climate reconstruction. ",2020-01-07,Jernej Jevsenak,http://github.com/jernejjevsenak/dendroTools,TRUE,https://github.com/jernejjevsenak/dendrotools,25016,2,2019-12-21T22:14:33Z,12508
densitr,"Provides various tools for analysing density profiles
obtained by resistance drilling. It can load individual or
multiple files and trim the starting and ending part of each
density profile. Tools are also provided to trim profiles
manually, to remove the trend from measurements using several
methods, to plot the profiles and to detect tree rings
automatically. Written with a focus on forestry use of resistance
drilling in standing trees.",2020-04-07,Luka Krajnc,https://github.com/krajnc/densitr,TRUE,https://github.com/krajnc/densitr,949,0,2020-04-06T09:14:09Z,NA
densratio,"Density ratio estimation.
The estimated density ratio function can be used in many applications such as
anomaly detection, change-point detection, covariate shift adaptation.
The implemented methods are uLSIF (Hido et al. (2011) <doi:10.1007/s10115-010-0283-2>),
RuLSIF (Yamada et al. (2011) <doi:10.1162/NECO_a_00442>),
and KLIEP (Sugiyama et al. (2007) <doi:10.1007/s10463-008-0197-x>).",2019-06-30,Koji Makiyama,https://github.com/hoxo-m/densratio,TRUE,https://github.com/hoxo-m/densratio,16712,11,2019-06-30T10:55:08Z,1519.2727272727273
DEploid,"Traditional phasing programs are limited to diploid organisms.
Our method modifies Li and Stephens algorithm with Markov chain Monte Carlo
(MCMC) approaches, and builds a generic framework that allows haplotype searches
in a multiple infection setting. This package is primarily developed as part of
the Pf3k project, which is a global collaboration using the latest
sequencing technologies to provide a high-resolution view of natural variation
in the malaria parasite Plasmodium falciparum. Parasite DNA are extracted from
patient blood sample, which often contains more than one parasite strain, with
unknown proportions. This package is used for deconvoluting mixed haplotypes,
and reporting the mixture proportions from each sample.",2020-04-21,Joe Zhu,https://github.com/DEploid-dev/DEploid-r,TRUE,https://github.com/deploid-dev/deploid-r,18477,1,2020-04-21T19:53:06Z,18477
DepthProc,"Data depth concept offers a variety of powerful and user friendly
tools for robust exploration and inference for multivariate data. The offered
techniques may be successfully used in cases of lack of our knowledge on
parametric models generating data due to their nature. The
package consist of among others implementations of several data depth techniques
involving multivariate quantile-quantile plots, multivariate scatter estimators,
multivariate Wilcoxon tests and robust regressions.",2020-02-19,Zygmunt Zawadzki,"https://www.depthproc.zstat.pl/,
https://github.com/zzawadz/DepthProc",TRUE,https://github.com/zzawadz/depthproc,32422,3,2020-02-19T19:36:32Z,10807.333333333334
Deriv,"R-based solution for symbolic differentiation. It admits
user-defined function as well as function substitution
in arguments of functions to be differentiated. Some symbolic
simplification is part of the work.",2019-12-10,Serguei Sokol,NA,TRUE,https://github.com/sgsokol/deriv,471508,18,2019-12-10T14:51:17Z,26194.88888888889
DescriptiveStats.OBeu,"Estimate and return the needed parameters for visualizations designed for 'OpenBudgets.eu' <http://openbudgets.eu/> datasets. Calculate descriptive statistical measures in budget data of municipalities across Europe, according to the 'OpenBudgets.eu' data model. There are functions for measuring central tendency and dispersion of amount variables along with their distributions and correlations and the frequencies of categorical variables for a given dataset. Also, can be used generally to other datasets, to extract visualization parameters, convert them to 'JSON' format and use them as input in a different graphical interface. ",2020-05-04,Kleanthis Koupidis,https://github.com/okgreece/DescriptiveStats.OBeu,TRUE,https://github.com/okgreece/descriptivestats.obeu,14724,1,2020-05-05T19:17:23Z,14724
descriptr,"Generate descriptive statistics such as measures of location,
dispersion, frequency tables, cross tables, group summaries and multiple
one/two way tables. ",2020-02-01,Aravind Hebbali,"https://descriptr.rsquaredacademy.com/,
https://github.com/rsquaredacademy/descriptr",TRUE,https://github.com/rsquaredacademy/descriptr,31314,29,2020-02-01T10:20:44Z,1079.7931034482758
desctable,"Easily create descriptive and comparative tables.
It makes use and integrates directly with the tidyverse family of packages, and pipes.
Tables are produced as data frames/lists of data frames for easy manipulation after creation,
and ready to be saved as csv, or piped to DT::datatable() or pander::pander() to integrate into reports.",2020-02-03,Maxime Wack,https://github.com/maximewack/desctable,TRUE,https://github.com/maximewack/desctable,18435,42,2020-02-03T15:53:32Z,438.92857142857144
DescTools,"A collection of miscellaneous basic statistic functions and convenience wrappers for efficiently describing data. The author's intention was to create a toolbox, which facilitates the (notoriously time consuming) first descriptive tasks in data analysis, consisting of calculating descriptive statistics, drawing graphical summaries and reporting the results. The package contains furthermore functions to produce documents using MS Word (or PowerPoint) and functions to import data from Excel. Many of the included functions can be found scattered in other packages and other sources written partly by Titans of R. The reason for collecting them here, was primarily to have them consolidated in ONE instead of dozens of packages (which themselves might depend on other packages which are not needed at all), and to provide a common and consistent interface as far as function and arguments naming, NA handling, recycling rules etc. are concerned. Google style guides were used as naming rules (in absence of convincing alternatives). The 'BigCamelCase' style was consequently applied to functions borrowed from contributed R packages as well.",2020-05-23,Andri Signorell,"https://andrisignorell.github.io/DescTools/,
https://github.com/AndriSignorell/DescTools/",TRUE,https://github.com/andrisignorell/desctools,974289,19,2020-06-09T23:24:35Z,51278.36842105263
DescToolsAddIns,"'RStudio' as of recently offers the option to define addins and assign shortcuts to them. This package contains addins for a few most frequently used functions in a data scientist's (at least mine) daily work (like str(), example(), plot(), head(), view(), Desc()). Most of these functions will use the current selection in the editor window and send the specific command to the console while instantly executing it. Assigning shortcuts to these addins will save you quite a few keystrokes.",2020-03-08,Andri Signorell,https://github.com/AndriSignorell/DescToolsAddIns/,TRUE,https://github.com/andrisignorell/desctoolsaddins,27318,1,2020-03-14T09:49:59Z,27318
DesignLibrary,"
A simple interface to build designs using the package 'DeclareDesign'.
In one line of code, users can specify the parameters of individual
designs and diagnose their properties. The designers can also be used
to compare performance of a given design across a range of combinations
of parameters, such as effect size, sample size, and assignment probabilities.",2019-06-17,Jasper Cooper,"https://declaredesign.org/library/,
https://github.com/DeclareDesign/DesignLibrary",TRUE,https://github.com/declaredesign/designlibrary,17397,23,2020-04-21T19:47:06Z,756.3913043478261
designr,Generate balanced factorial designs with crossed and nested random and fixed effects <https://github.com/mmrabe/designr>.,2020-05-25,Maximilian Rabe,https://maxrabe.com/designr,TRUE,https://github.com/mmrabe/designr,3261,0,2020-05-25T10:37:30Z,NA
desplot,"A function for plotting maps of agricultural field experiments that
are laid out in grids.",2019-09-13,Kevin Wright,https://github.com/kwstat/desplot,TRUE,https://github.com/kwstat/desplot,25810,11,2020-01-20T15:29:45Z,2346.3636363636365
details,"Create a details HTML tag around R objects to place
in a Markdown, 'Rmarkdown' and 'roxygen2' documentation.",2020-01-12,Jonathan Sidi,https://github.com/yonicd/details,TRUE,https://github.com/yonicd/details,8037,70,2020-04-21T13:52:25Z,114.81428571428572
detectseparation,"Provides pre-fit and post-fit methods for detecting separation and infinite maximum likelihood estimates in generalized linear models with categorical responses. The pre-fit methods apply on binomial-response generalized liner models such as logit, probit and cloglog regression, and can be directly supplied as fitting methods to the glm() function. They solve the linear programming problems for the detection of separation developed in Konis (2007, <https://ora.ox.ac.uk/objects/uuid:8f9ee0d0-d78e-4101-9ab4-f9cbceed2a2a>) using 'ROI' <https://cran.r-project.org/package=ROI> or 'lpSolveAPI' <https://cran.r-project.org/package=lpSolveAPI>. The post-fit methods apply to models with categorical responses, including binomial-response generalized linear models and multinomial-response models, such as baseline category logits and adjacent category logits models; for example, the models implemented in the 'brglm2' <https://cran.r-project.org/package=brglm2> package. The post-fit methods successively refit the model with increasing number of iteratively reweighted least squares iterations, and monitor the ratio of the estimated standard error for each parameter to what it has been in the first iteration. According to the results in Lesaffre & Albert (1989, <https://www.jstor.org/stable/2345845>), divergence of those ratios indicates data separation.",2020-03-25,Ioannis Kosmidis,https://github.com/ikosmidis/detectseparation,TRUE,https://github.com/ikosmidis/detectseparation,1306,1,2020-03-26T11:06:43Z,1306
detzrcr,"Compare detrital zircon suites by uploading univariate,
U-Pb age, or bivariate, U-Pb age and Lu-Hf data, in a 'shiny'-based
user-interface. Outputs publication quality figures using 'ggplot2', and
tables of statistics currently in use in the detrital zircon geochronology
community.",2020-01-09,Magnus Kristoffersen,https://github.com/magnuskristoffersen/detzrcr,TRUE,https://github.com/magnuskristoffersen/detzrcr,24072,6,2020-06-08T11:18:59Z,4012
devRate,"A set of functions to quantify the relationship between development
rate and temperature and to build phenological models. The package comprises
a set of models and estimated parameters borrowed from a literature review
in ectotherms. The methods and literature review are described in Rebaudo
et al. (2018) <doi:10.1111/2041-210X.12935> and Rebaudo and Rabhi (2018)
<doi:10.1111/eea.12693>. An example can be found in Rebaudo et al. (2017)
<doi:10.1007/s13355-017-0480-5>.",2019-05-27,Francois Rebaudo,https://github.com/frareb/devRate/,TRUE,https://github.com/frareb/devrate,22175,2,2020-06-09T08:39:58Z,11087.5
devtools,Collection of package development tools.,2020-04-10,Jim Hester,"https://devtools.r-lib.org/, https://github.com/r-lib/devtools",TRUE,https://github.com/r-lib/devtools,17883816,1956,2020-05-05T13:33:59Z,9143.055214723927
dexter,"A system for the management, assessment, and psychometric analysis of data from educational and psychological tests. ",2020-04-02,Gunter Maris,http://dexterities.netlify.com,TRUE,https://github.com/jessekps/dexter,34257,4,2020-04-06T11:14:00Z,8564.25
dextergui,"Classical Test and Item analysis,
Item Response analysis and data management for educational and psychological tests.",2020-02-20,jesse koops,NA,TRUE,https://github.com/jessekps/dexter,12765,4,2020-04-06T11:14:00Z,3191.25
dexterMST,"Conditional Maximum Likelihood Calibration and data management of multistage tests.
Functions for calibration of the Extended Nominal Response and the Interaction models, DIF and profile analysis.
See Robert J. Zwitser and Gunter Maris (2015)<doi:10.1007/s11336-013-9369-6>.",2019-08-20,Timo Bechger,http://dexterities.netlify.com,TRUE,https://github.com/jessekps/dexter,10243,4,2020-04-06T11:14:00Z,2560.75
dfadjust,"Computes small-sample degrees of freedom adjustment for
heteroskedasticity robust standard errors, and for clustered standard errors
in linear regression. See Imbens and Kolesár (2016)
<doi:10.1162/REST_a_00552> for a discussion of these adjustments.",2019-12-16,Michal Kolesár,https://github.com/kolesarm/Robust-Small-Sample-Standard-Errors,TRUE,https://github.com/kolesarm/robust-small-sample-standard-errors,4030,15,2019-12-16T16:13:42Z,268.6666666666667
dfoliatR,"Tools to identify, quantify, analyze, and visualize growth
suppression events in tree rings that are often produced by insect
defoliation.",2020-03-04,Chris Guiterman,https://chguiterman.github.io/dfoliatR/,TRUE,https://github.com/chguiterman/dfoliatr,1465,2,2020-05-21T14:33:38Z,732.5
dformula,"A tool for manipulating data using the generic formula. A single formula allows to easily add, replace and remove variables before running the analysis. ",2020-06-04,Alessio Serafini,https://github.com/dataallaround/dformula,TRUE,https://github.com/dataallaround/dformula,0,0,2020-06-04T19:55:06Z,NA
dfvad,"Decomposing value added growth into explanatory factors.
A cost constrained value added function is defined to specify the
production frontier. Industry estimates can also be aggregated
using a weighted average approach.
Details about the methodology and data can be found in Diewert and Fox (2018)
<doi:10.1093/oxfordhb/9780190226718.013.19>
and Zeng, Parsons, Diewert and Fox (2018)
<https://www.business.unsw.edu.au/research-site/centreforappliedeconomicresearch-site/Documents/emg2018-6_SZeng_EMG-Slides.pdf>.",2020-03-05,Shipei Zeng,https://github.com/shipei-zeng/dfvad,TRUE,https://github.com/shipei-zeng/dfvad,1531,0,2020-05-31T15:15:59Z,NA
dggridR,"Spatial analyses involving binning require that every bin have the same area, but this is impossible using a rectangular grid laid over the Earth or over any projection of the Earth. Discrete global grids use hexagons, triangles, and diamonds to overcome this issue, overlaying the Earth with equally-sized bins. This package provides utilities for working with discrete global grids, along with utilities to aid in plotting such data.",2020-04-29,Richard Barnes,https://github.com/r-barnes/dggridR/,TRUE,https://github.com/r-barnes/dggridr,19086,94,2020-04-29T17:15:36Z,203.04255319148936
dgumbel,"Gumbel distribution functions (De Haan L. (2007)
<doi:10.1007/0-387-34471-3>) implemented with the techniques of automatic
differentiation (Griewank A. (2008) <isbn:978-0-89871-659-7>).
With this tool, a user should be able to quickly model extreme
events for which the Gumbel distribution is the domain of attraction.
The package makes available the density function, the distribution
function the quantile function and a random generating function. In
addition, it supports gradient functions. The package combines 'Adept'
(C++ templated automatic differentiation) (Hogan R. (2017)
<doi:10.5281/zenodo.1004730>) and 'Eigen' (templated matrix-vector
library) for fast computations of both objective functions and exact
gradients. It relies on 'RcppEigen' for easy access to 'Eigen' and
bindings to R.",2020-04-16,Berent Ånund Strømnes Lunde,https://github.com/blunde1/dgumbel,TRUE,https://github.com/blunde1/dgumbel,1147,1,2020-04-13T18:57:50Z,1147
DHARMa,"The 'DHARMa' package uses a simulation-based approach to create
readily interpretable scaled (quantile) residuals for fitted (generalized) linear mixed
models. Currently supported are linear and generalized linear (mixed) models from 'lme4'
(classes 'lmerMod', 'glmerMod'), 'glmmTMB' and 'spaMM', generalized additive models ('gam' from
'mgcv'), 'glm' (including 'negbin' from 'MASS', but excluding quasi-distributions) and 'lm' model
classes. Moreover, externally created simulations, e.g. posterior predictive simulations
from Bayesian software such as 'JAGS', 'STAN', or 'BUGS' can be processed as well.
The resulting residuals are standardized to values between 0 and 1 and can be interpreted
as intuitively as residuals from a linear regression. The package also provides a number of
plot and test functions for typical model misspecification problems, such as
over/underdispersion, zero-inflation, and residual spatial and temporal autocorrelation.",2020-05-12,Florian Hartig,http://florianhartig.github.io/DHARMa/,TRUE,https://github.com/florianhartig/dharma,95760,104,2020-06-07T11:43:08Z,920.7692307692307
diagis,"Fast functions for effective sample size, weighted multivariate mean and variance computation,
and weight diagnostic plot for generic importance sampling type results.",2020-06-04,Jouni Helske,NA,TRUE,https://github.com/helske/diagis,17127,0,2020-06-04T12:23:51Z,NA
diagmeta,Provides methods by Steinhauser et al. (2016) <DOI:10.1186/s12874-016-0196-1> for meta-analysis of diagnostic accuracy studies with several cutpoints.,2020-04-02,Guido Schwarzer,https://github.com/guido-s/diagmeta,TRUE,https://github.com/guido-s/diagmeta,10701,2,2020-04-07T19:39:59Z,5350.5
diagonals,"Several tools for handling block-matrix diagonals and similar
constructs are implemented. Block-diagonal matrices can be extracted or removed
using two small functions implemented here. In addition, non-square matrices
are supported. Block diagonal matrices occur when two dimensions of a data set
are combined along one edge of a matrix. For example, trade-flow data in the
'decompr' and 'gvc' packages have each country-industry combination occur along
both edges of the matrix.",2020-04-28,Bastiaan Quast,"https://qua.st/diagonals, https://github.com/bquast/diagonals",TRUE,https://github.com/bquast/diagonals,24094,1,2020-04-28T06:08:51Z,24094
DiagrammeR,"
Build graph/network structures using functions for stepwise addition and
deletion of nodes and edges. Work with data available in tables for bulk
addition of nodes, edges, and associated metadata. Use graph selections
and traversals to apply changes to specific nodes or edges. A wide
selection of graph algorithms allow for the analysis of graphs. Visualize
the graphs and take advantage of any aesthetic properties assigned to
nodes and edges.",2020-05-08,Richard Iannone,https://github.com/rich-iannone/DiagrammeR,TRUE,https://github.com/rich-iannone/diagrammer,958256,1327,2020-05-15T01:17:22Z,722.1220798794272
DiagrammeRsvg,Allows for export of DiagrammeR Graphviz objects to SVG.,2016-02-04,Richard Iannone,https://github.com/rich-iannone/DiagrammeRsvg,TRUE,https://github.com/rich-iannone/diagrammersvg,108579,23,2019-12-10T14:02:56Z,4720.826086956522
dialr,"Parse, format, and validate international phone
numbers using Google's 'libphonenumber' java library,
<https://github.com/google/libphonenumber>.",2020-04-04,Danny Smith,"https://socialresearchcentre.github.io/dialr,
https://github.com/socialresearchcentre/dialr,
https://github.com/socialresearchcentre/dialrjars,
https://github.com/google/libphonenumber",TRUE,https://github.com/socialresearchcentre/dialr,6946,1,2020-05-16T09:28:07Z,6946
dialrjars,"Collects 'libphonenumber' jars required for the
'dialr' package.",2020-03-23,Danny Smith,"https://github.com/socialresearchcentre/dialrjars,
https://github.com/googlei18n/libphonenumber",TRUE,https://github.com/socialresearchcentre/dialrjars,8103,1,2020-05-16T08:40:01Z,8103
dials,"Many models contain tuning parameters (i.e. parameters that cannot be directly estimated from the data). These tools can be used to define objects for creating, simulating, or validating values for such parameters. ",2020-04-03,Max Kuhn,"https://tidymodels.github.io/dials,
https://github.com/tidymodels/dials",TRUE,https://github.com/tidymodels/dials,59050,71,2020-06-09T23:03:27Z,831.6901408450705
diceR,"Performs cluster analysis using an ensemble
clustering framework, Chiu & Talhouk (2018)
<doi:10.1186/s12859-017-1996-y>. Results from a diverse set of
algorithms are pooled together using methods such as majority voting,
K-Modes, LinkCluE, and CSPA. There are options to compare cluster
assignments across algorithms using internal and external indices,
visualizations such as heatmaps, and significance testing for the
existence of clusters.",2019-07-25,Derek Chiu,"https://github.com/AlineTalhouk/diceR,
https://alinetalhouk.github.io/diceR",TRUE,https://github.com/alinetalhouk/dicer,19245,21,2019-12-08T22:25:43Z,916.4285714285714
Dict,A key-value dictionary data structure based on R6 class which is designed to be similar usages with other languages dictionary (e.g. 'Python') with reference semantics and extendabilities by R6.,2020-06-02,Shun Asai,https://github.com/five-dots/Dict,TRUE,https://github.com/five-dots/dict,0,1,2020-06-03T05:50:03Z,0
dief,"An implementation of the metrics dief@t and dief@k to measure the diefficiency (or continuous efficiency) of incremental approaches, see Acosta, M., Vidal, M. E., & Sure-Vetter, Y. (2017) <doi:10.1007/978-3-319-68204-4_1>. The metrics dief@t and dief@k allow for measuring the diefficiency during an elapsed time period t or while k answers are produced, respectively. dief@t and dief@k rely on the computation of the area under the curve of answer traces, and thus capturing the answer rate concentration over a time interval. ",2019-02-28,Maribel Acosta,https://github.com/maribelacosta/dief,TRUE,https://github.com/maribelacosta/dief,10125,5,2019-10-03T10:56:44Z,2025
dietr,"Estimates fractional trophic level from quantitative and qualitative diet data and calculates electivity indices in R. Froese & Pauly (2000, ISBN: 9718709991).",2019-11-13,Samuel R. Borstein,https://github.com/sborstein/dietr,TRUE,https://github.com/sborstein/dietr,2726,0,2019-12-11T21:13:39Z,NA
diffdf,"Functions for comparing two data.frames against
each other. The core functionality is to provide a detailed breakdown of any differences
between two data.frames as well as providing utility functions to help narrow down the
source of problems and differences.",2020-03-17,Craig Gower-Page,https://github.com/gowerc/diffdf,TRUE,https://github.com/gowerc/diffdf,11564,9,2020-03-16T19:10:11Z,1284.888888888889
diffee,"This is an R implementation of Fast and Scalable Learning of Sparse Changes in High-Dimensional Gaussian Graphical Model Structure (DIFFEE). The DIFFEE algorithm can be used to fast estimate the differential network between two related datasets. For instance, it can identify differential gene network from datasets of case and control. By performing data-driven network inference from two high-dimensional data sets, this tool can help users effectively translate two aggregated data blocks into knowledge of the changes among entities between two Gaussian Graphical Model. Please run demo(diffeeDemo) to learn the basic functions provided by this package. For further details, please read the original paper: Beilun Wang, Arshdeep Sekhon, Yanjun Qi (2018) <arXiv:1710.11223>.",2018-07-03,Beilun Wang,https://github.com/QData/DIFFEE,TRUE,https://github.com/qdata/diffee,7995,0,2019-08-28T16:29:38Z,NA
diffEnrich,"Compare functional enrichment between two experimentally-derived groups of genes or proteins (Peterson, DR., et al.(2018)) <doi: 10.1371/journal.pone.0198139>. Given a list of gene symbols, 'diffEnrich' will
perform differential enrichment analysis using the Kyoto Encyclopedia of Genes
and Genomes (KEGG) REST API. This package provides a number of functions that are
intended to be used in a pipeline. Briefly, the user provides a KEGG formatted species id for either human, mouse or rat, and the package will
download and clean species specific ENTREZ gene IDs and map them to their respective
KEGG pathways by accessing KEGG's REST API. KEGG's API is used to guarantee the most up-to-date pathway data from KEGG. Next, the user will identify significantly
enriched pathways from two gene sets, and finally, the user will identify
pathways that are differentially enriched between the two gene sets. In addition to
the analysis pipeline, this package also provides a plotting function. ",2019-11-21,Harry Smith,https://github.com/SabaLab/diffEnrich,TRUE,https://github.com/sabalab/diffenrich,3060,0,2020-04-13T17:30:32Z,NA
diffeqr,"An interface to 'DifferentialEquations.jl' <http://docs.juliadiffeq.org/latest/> from the R programming language.
It has unique high performance methods for solving ordinary differential equations (ODE), stochastic differential equations (SDE),
delay differential equations (DDE), differential-algebraic equations (DAE), and more. Much of the functionality,
including features like adaptive time stepping in SDEs, are unique and allow for multiple orders of magnitude speedup over more common methods.
'diffeqr' attaches an R interface onto the package, allowing seamless use of this tooling by R users.",2019-09-22,Christopher Rackauckas,https://github.com/JuliaDiffEq/diffeqr,TRUE,https://github.com/juliadiffeq/diffeqr,10368,51,2020-04-22T13:39:59Z,203.2941176470588
diffeR,Metrics of difference for comparing pairs of variables or pairs of maps representing real or categorical variables at original and multiple resolutions.,2019-01-22,Robert Gilmore Pontius Jr.,"http://amsantac.co/software.html,
https://github.com/amsantac/diffeR",TRUE,https://github.com/amsantac/differ,20659,0,2019-12-11T17:01:28Z,NA
diffobj,"Generate a colorized diff of two R objects for an intuitive
visualization of their differences.",2020-05-11,Brodie Gaslam,https://github.com/brodieG/diffobj,TRUE,https://github.com/brodieg/diffobj,326416,168,2020-05-11T18:28:46Z,1942.952380952381
diffusion,"Various diffusion models to forecast new product growth. Currently
the package contains Bass, Gompertz and Gamma/Shifted Gompertz curves. See
Meade and Islam (2006) <doi:10.1016/j.ijforecast.2006.01.005>.",2018-01-05,Oliver Schaer,https://github.com/mamut86/diffusion,TRUE,https://github.com/mamut86/diffusion,10355,6,2020-04-30T19:52:45Z,1725.8333333333333
diffusionMap,"Implements diffusion map method of data
parametrization, including creation and visualization of
diffusion map, clustering with diffusion K-means and
regression using adaptive regression model.
Richards (2009) <doi:10.1088/0004-637X/691/1/32>.",2019-09-10,Joseph Richards,https://github.com/rcannood/diffusionMap,TRUE,https://github.com/rcannood/diffusionmap,76475,0,2019-09-10T11:16:27Z,NA
diffusr,"Implementation of network diffusion algorithms such as
heat diffusion or Markov random walks. Network diffusion algorithms generally
spread information in the form of node weights along the edges of a graph to other nodes.
These weights can for example be interpreted as temperature, an initial amount
of water, the activation of neurons in the brain, or the location of a random
surfer in the internet. The information (node weights) is iteratively propagated
to other nodes until a equilibrium state or stop criterion occurs.",2018-05-17,Simon Dirmeier,https://github.com/dirmeier/diffusr,TRUE,https://github.com/dirmeier/diffusr,14930,16,2019-09-22T19:17:06Z,933.125
difNLR,"Detection of differential item functioning (DIF) among dichotomously scored items and differential distractor functioning (DDF) among unscored items with non-linear regression procedures based on generalized logistic regression models (Drabinova and Martinkova, 2017, doi:10.1111/jedm.12158).",2020-05-04,Adela Hladka,NA,TRUE,https://github.com/adelahladka/difnlr,32686,2,2020-05-04T14:31:56Z,16343
digest,"Implementation of a function 'digest()' for the creation
of hash digests of arbitrary R objects (using the 'md5', 'sha-1', 'sha-256',
'crc32', 'xxhash', 'murmurhash' and 'spookyhash' algorithms) permitting easy
comparison of R language objects, as well as functions such as'hmac()' to
create hash-based message authentication code. Please note that this package
is not meant to be deployed for cryptographic purposes for which more
comprehensive (and widely tested) libraries such as 'OpenSSL' should be
used.",2020-02-23,"Dirk Eddelbuettel <[email protected]> with contributions
by Antoine Lucas",http://dirk.eddelbuettel.com/code/digest.html,TRUE,https://github.com/eddelbuettel/digest,26769060,71,2020-05-21T12:04:39Z,377029.0140845071
dimensionsR,A set of tools to extract bibliographic content from 'Digital Science Dimensions' using 'DSL' API <https://www.dimensions.ai/dimensions-apis/>.,2020-03-20,Massimo Aria,https://github.com/massimoaria/dimensionsR,TRUE,https://github.com/massimoaria/dimensionsr,5892,4,2020-05-17T06:15:02Z,1473
dimRed,"A collection of dimensionality reduction
techniques from R packages and a common
interface for calling the methods.",2019-05-08,Guido Kraemer,https://github.com/gdkrmr/dimRed,TRUE,https://github.com/gdkrmr/dimred,938457,59,2019-11-11T12:43:52Z,15906.050847457627
dina,"Estimate the Deterministic Input, Noisy ""And"" Gate (DINA)
cognitive diagnostic model parameters using the Gibbs sampler described
by Culpepper (2015) <doi:10.3102/1076998615595403>.",2019-02-01,Steven Andrew Culpepper,https://github.com/tmsalab/dina,TRUE,https://github.com/tmsalab/dina,17125,6,2020-03-22T17:01:22Z,2854.1666666666665
dint,"S3 classes and methods to create and work
with year-quarter, year-month and year-isoweek vectors. Basic
arithmetic operations (such as adding and subtracting) are supported,
as well as formatting and converting to and from standard R date
types.",2020-02-06,Stefan Fleck,https://github.com/s-fleck/dint,TRUE,https://github.com/s-fleck/dint,13833,9,2020-03-23T13:04:45Z,1537
dipsaus,"Works as an ""add-on"" to packages like 'shiny', 'future', as well as
'rlang', and provides utility functions. Just like dipping sauce adding
flavors to potato chips or pita bread, 'dipsaus' for data analysis and
visualizations adds handy functions and enhancements to popular packages.
The goal is to provide simple solutions that are frequently asked for
online, such as how to synchronize 'shiny' inputs without freezing the app,
or how to get memory size on 'Linux' or 'MacOS' system. The enhancements
roughly fall into these four categories: 1. 'shiny' input widgets; 2.
high-performance computing using 'RcppParallel' and 'future' package; 3.
modify R calls and convert among numbers, strings, and other objects. 4.
utility functions to get system information such like CPU chip-set, memory
limit, etc.",2020-05-12,Zhengjia Wang,https://github.com/dipterix/dipsaus,TRUE,https://github.com/dipterix/dipsaus,3818,5,2020-05-19T16:09:26Z,763.6
directlabels,"An extensible framework
for automatically placing direct labels onto multicolor 'lattice' or
'ggplot2' plots.
Label positions are described using Positioning Methods
which can be re-used across several different plots.
There are heuristics for examining ""trellis"" and ""ggplot"" objects
and inferring an appropriate Positioning Method.",2020-02-01,Toby Dylan Hocking,https://github.com/tdhock/directlabels,TRUE,https://github.com/tdhock/directlabels,184902,32,2020-01-31T17:00:59Z,5778.1875
dirichletprocess,"Perform nonparametric Bayesian analysis using Dirichlet
processes without the need to program the inference algorithms.
Utilise included pre-built models or specify custom
models and allow the 'dirichletprocess' package to handle the
Markov chain Monte Carlo sampling.
Our Dirichlet process objects can act as building blocks for a variety
of statistical models including and not limited to: density estimation,
clustering and prior distributions in hierarchical models.
See Teh, Y. W. (2011)
<https://www.stats.ox.ac.uk/~teh/research/npbayes/Teh2010a.pdf>,
among many other sources.",2020-04-03,Dean Markwick,https://github.com/dm13450/dirichletprocess,TRUE,https://github.com/dm13450/dirichletprocess,12670,27,2020-06-07T18:29:53Z,469.25925925925924
discgolf,"Client for the Discourse API. Discourse is a open source
discussion forum platform (<https://www.discourse.org/>). It comes with 'RESTful'
API access to an installation. This client requires that you are authorized
to access a Discourse installation, either yours or another.",2018-01-03,Scott Chamberlain,https://github.com/sckott/discgolf,TRUE,https://github.com/sckott/discgolf,14554,8,2019-09-12T22:14:54Z,1819.25
disclapmix,"Make inference in a mixture of discrete Laplace distributions using the EM algorithm. This can e.g. be used for modelling the distribution of Y chromosomal haplotypes as described in [1, 2] (refer to the URL section).",2019-03-12,Mikkel Meyer Andersen,"http://dx.doi.org/10.1016/j.jtbi.2013.03.009
http://arxiv.org/abs/1304.2129",TRUE,https://github.com/mikldk/disclapmix,25167,0,2020-02-11T13:55:30Z,NA
discord,"Functions for discordant kinship modeling (and other sibling-based quasi-experimental designs). Currently, the package contains data restructuring functions; functions for generating genetically- and environmentally-informed data for kin pairs.",2017-05-02,S. Mason Garrison; Cermet Ream,https://github.com/smasongarrison/discord,TRUE,https://github.com/smasongarrison/discord,9859,1,2020-05-27T18:42:56Z,9859
discrim,"Bindings for additional classification models for use with the
'parsnip' package. Models include flavors of discriminant analysis, such as
linear (Fisher (1936) <doi:10.1111/j.1469-1809.1936.tb02137.x>), regularized
(Friedman (1989) <doi:10.1080/01621459.1989.10478752>), and flexible
(Hastie, Tibshirani, and Buja (1994) <doi:10.1080/01621459.1994.10476866>),
as well as naive Bayes classifiers (Hand and Yu (2007)
<doi:10.1111/j.1751-5823.2001.tb00465.x>). ",2020-04-09,Max Kuhn,https://github.com/tidymodels/discrim,TRUE,https://github.com/tidymodels/discrim,5209,16,2020-05-05T16:13:52Z,325.5625
diseq,"Provides estimation methods for markets in equilibrium and
disequilibrium. Specifically, it supports the estimation of an equilibrium and
four disequilibrium models with both correlated and independent shocks.
It also provides post-estimation analysis tools, such as aggregation and
marginal effects calculations. The estimation methods are based on full
information maximum likelihood techniques given in Maddala and Nelson (1974)
<doi:10.2307/1914215>. They are implemented using the analytic derivative
expressions calculated in Karapanagiotis (2020) <doi:10.2139/ssrn.3525622>.
The equilibrium estimation constitutes a special case of
a system of simultaneous equations. The disequilibrium models, instead, replace
the market clearing condition with a short side rule and
allow for different specifications of price dynamics. ",2020-04-28,Pantelis Karapanagiotis,https://github.com/pi-kappa-devel/diseq/,TRUE,https://github.com/pi-kappa-devel/diseq,643,0,2020-05-26T22:09:35Z,NA
DisImpact,"Implements methods for calculating disproportionate impact: the percentage point gap, proportionality index, and the 80% index.
California Community Colleges Chancellor's Office (2017). Percentage Point Gap Method. <https://www.cccco.edu/-/media/CCCCO-Website/About-Us/Divisions/Digital-Innovation-and-Infrastructure/Research/Files/PercentagePointGapMethod2017.ashx>.
California Community Colleges Chancellor's Office (2014). Guidelines for Measuring Disproportionate Impact in Equity Plans. <https://www.cccco.edu/-/media/CCCCO-Website/Files/DII/guidelines-for-measuring-disproportionate-impact-in-equity-plans-tfa-ada.pdf>.",2020-06-02,Vinh Nguyen,https://github.com/vinhdizzo/DisImpact,TRUE,https://github.com/vinhdizzo/disimpact,10110,1,2020-06-01T16:43:51Z,10110
disk.frame,"A disk-based data manipulation tool for working with
large-than-RAM datasets. Aims to lower the barrier-to-entry for
manipulating large datasets by adhering closely to popular and
familiar data manipulation paradigms like dplyr verbs and
data.table syntax.",2020-05-08,Dai ZJ,https://diskframe.com,TRUE,https://github.com/xiaodaigh/disk.frame,10825,424,2020-05-08T00:17:26Z,25.53066037735849
dispRity,"A modular package for measuring disparity (multidimensional space occupancy). Disparity can be calculated from any matrix defining a multidimensional space. The package provides a set of implemented metrics to measure properties of the space and allows users to provide and test their own metrics (Guillerme (2018) <doi:10.1111/2041-210X.13022>). The package also provides functions for looking at disparity in a serial way (e.g. disparity through time - Guillerme and Cooper (2018) <doi:10.1111/pala.12364>) or per groups as well as visualising the results. Finally, this package provides several statistical tests for disparity analysis.",2020-06-03,Thomas Guillerme,https://github.com/TGuillerme/dispRity,TRUE,https://github.com/tguillerme/disprity,16012,11,2020-06-03T11:27:18Z,1455.6363636363637
dissever,"Spatial downscaling of coarse grid mapping to fine grid
mapping using predictive covariates and a model fitted using the 'caret'
package. The original dissever algorithm was published by Malone et al.
(2012) <doi:10.1016/j.cageo.2011.08.021>, and extended by Roudier et al.
(2017) <doi:10.1016/j.compag.2017.08.021>.",2018-04-20,Pierre Roudier,https://github.com/pierreroudier/dissever,TRUE,https://github.com/pierreroudier/dissever,10760,5,2020-06-09T09:28:04Z,2152
Distance,"A simple way of fitting detection functions to distance sampling
data for both line and point transects. Adjustment term selection, left and
right truncation as well as monotonicity constraints and binning are
supported. Abundance and density estimates can also be calculated (via a
Horvitz-Thompson-like estimator) if survey area information is provided.",2020-01-31,David Lawrence Miller,http://github.com/DistanceDevelopment/Distance/,TRUE,https://github.com/distancedevelopment/distance,44706,0,2020-06-08T13:37:24Z,NA
distances,"Provides tools for constructing, manipulating and using distance metrics.",2019-09-22,Fredrik Savje,https://github.com/fsavje/distances,TRUE,https://github.com/fsavje/distances,31080,13,2019-09-18T20:51:30Z,2390.769230769231
distill,"Scientific and technical article format for the web. 'Distill' articles
feature attractive, reader-friendly typography, flexible layout options
for visualizations, and full support for footnotes and citations.",2020-06-04,JJ Allaire,https://github.com/rstudio/distill,TRUE,https://github.com/rstudio/distill,9390,192,2020-06-05T14:03:26Z,48.90625
distr6,"An R6 object oriented distributions package. Unified interface for 42 probability distributions and 11 kernels including functionality for multiple scientific types. Additionally functionality for composite distributions and numerical imputation. Design patterns including wrappers and decorators are described in Gamma et al. (1994, ISBN:0-201-63361-2). For quick reference of probability distributions including d/p/q/r functions and results we refer to McLaughlin, M. P. (2001). Additionally Devroye (1986, ISBN:0-387-96305-7) for sampling the Dirichlet distribution, Gentle (2009) <doi:10.1007/978-0-387-98144-4> for sampling the Multivariate Normal distribution and Michael et al. (1976) <doi:10.2307/2683801> for sampling the Wald distribution.",2020-05-20,Raphael Sonabend,"https://alan-turing-institute.github.io/distr6/,
https://github.com/alan-turing-institute/distr6/",TRUE,https://github.com/alan-turing-institute/distr6,16813,41,2020-06-09T15:31:10Z,410.0731707317073
distreg.vis,"Functions for visualizing distributional regression models fitted using the 'gamlss', 'bamlss' or 'betareg' R package. The core of the package consists of a 'shiny' application, where the model results can be interactively explored and visualized.",2019-09-03,Stanislaus Stadlmann,https://github.com/Stan125/distreg.vis,TRUE,https://github.com/stan125/distreg.vis,8163,1,2020-05-15T01:55:07Z,8163
Distributacalcul,"Calculates expected values, variance, different moments (kth
moment, truncated mean), stop-loss, mean excess loss, Value-at-Risk (VaR)
and Tail Value-at-Risk (TVaR) as well as some density and cumulative
(survival) functions of continuous, discrete and compound distributions.
This package also includes a visual 'Shiny' component to enable students
to visualize distributions and understand the impact of their parameters.
This package is intended to expand the 'stats' and 'actuar' packages so as
to enable students to develop an intuition for probability.",2020-06-09,Alec James van Rassel,https://github.com/alec42/Distributacalcul_Package,TRUE,https://github.com/alec42/distributacalcul_package,0,0,2020-06-09T16:49:35Z,NA
distributional,"Vectorised distribution objects with tools for manipulating,
visualising, and using probability distributions. Designed to allow model
prediction outputs to return distributions rather than their parameters,
allowing users to directly interact with predictive distributions in a
data-oriented workflow. In addition to providing generic replacements for
p/d/q/r functions, other useful statistics can be computed including means,
variances, intervals, and highest density regions.",2020-06-09,Mitchell OHara-Wild,"https://pkg.mitchelloharawild.com/distributional/,
https://github.com/mitchelloharawild/distributional",TRUE,https://github.com/mitchelloharawild/distributional,0,7,2020-06-09T13:53:01Z,0
distributions3,"Tools to create and manipulate probability
distributions using S3. Generics random(), pdf(), cdf() and
quantile() provide replacements for base R's r/d/p/q style functions.
Functions and arguments have been named carefully to minimize
confusion for students in intro stats courses. The documentation for
each distribution contains detailed mathematical notes.",2019-09-03,Alex Hayes,https://github.com/alexpghayes/distributions3,TRUE,https://github.com/alexpghayes/distributions3,5861,76,2020-02-12T13:20:08Z,77.11842105263158
distrr,"Tools to estimate and manage empirical distributions,
which should work with survey data. One of the main features is the
possibility to create data cubes of estimated statistics, that include
all the combinations of the variables of interest (see for example functions
dcc5() and dcc6()).",2019-01-03,Sandro Petrillo Burri,"https://gibonet.github.io/distrr,
https://github.com/gibonet/distrr",TRUE,https://github.com/gibonet/distrr,10530,5,2020-06-09T14:48:10Z,2106
distTails,"A full definition for Weibull tails and Full-Tails Gamma and tools for fitting these distributions to empirical tails. This package build upon the paper by del Castillo, Joan & Daoudi, Jalila & Serra, Isabel. (2012) <doi:10.1017/asb.2017.9>.",2019-09-07,Sergi Vilardell,https://github.com/SergiVilardell/distTails,TRUE,https://github.com/sergivilardell/disttails,3389,0,2019-09-09T15:04:35Z,NA
divDyn,"Functions to describe sampling and diversity dynamics of fossil occurrence datasets (e.g. from the Paleobiology Database). The package includes methods to calculate range- and occurrence-based metrics of taxonomic richness, extinction and origination rates, along with traditional sampling measures. A powerful subsampling tool is also included that implements frequently used sampling standardization methods in a multiple bin-framework. The plotting of time series and the occurrence data can be simplified by the functions incorporated in the package, as well other calculations, such as environmental affinities and extinction selectivity testing. Details can be found in: Kocsis, A.T.; Reddin, C.J.; Alroy, J. and Kiessling, W. (2019) <doi:10.1101/423780>.",2019-06-12,Adam T. Kocsis,NA,TRUE,https://github.com/divdyn/r_package,8167,5,2019-09-30T12:43:19Z,1633.4
diveMove,"Utilities to represent, visualize, filter, analyse, and summarize
time-depth recorder (TDR) data. Miscellaneous functions for
handling location data are also provided.",2020-04-29,Sebastian P. Luque,https://github.com/spluque/diveMove,TRUE,https://github.com/spluque/divemove,109088,1,2020-04-29T09:00:24Z,109088
divest,"Provides tools to sort DICOM-format medical image files, and
convert them to NIfTI-1 format.",2020-01-10,Jon Clayden,https://github.com/jonclayden/divest,TRUE,https://github.com/jonclayden/divest,32498,9,2020-01-14T17:55:11Z,3610.8888888888887
diyar,"Perform multistage deterministic linkages, apply case definitions to datasets, and deduplicate records.
Records (rows) from datasets are linked by different matching criteria and sub-criteria (columns) in a specified order of certainty.
The linkage process handles missing data and conflicting matches based on this same order of certainty.
For episode grouping, rows of dated events (e.g. sample collection) or interval of events (e.g. hospital admission) are
grouped into chronological episodes beginning with a ""Case"". The process permits several options such as
episode lengths and recurrence periods which are used to build custom preferences for case assignment (definition).
The record linkage and episode grouping processes assign unique group IDs to matching records or those grouped into episodes.
This then allows for record deduplication or sub-analysis within these groups. ",2019-12-08,Olisaeloka Nsonwu,https://cran.r-project.org/package=diyar,TRUE,https://github.com/olisansonwu/diyar,3806,1,2020-06-04T18:53:20Z,3806
DLMtool,"Development, simulation testing, and implementation of management
procedures for data-limited fisheries
(see Carruthers & Hordyk (2018) <doi:10.1111/2041-210X.13081>).",2020-06-02,Tom Carruthers,http://www.datalimitedtoolkit.org/,TRUE,https://github.com/dlmtool/dlmtool,61777,12,2020-06-09T22:24:32Z,5148.083333333333
dlnm,Collection of functions for distributed lag linear and non-linear models.,2020-05-22,Antonio Gasparrini,"https://github.com/gasparrini/dlnm,
http://www.ag-myresearch.com/package-dlnm",TRUE,https://github.com/gasparrini/dlnm,75135,18,2020-05-22T15:32:31Z,4174.166666666667
dlookr,"A collection of tools that support data diagnosis, exploration, and transformation.
Data diagnostics provides information and visualization of missing values and outliers and
unique and negative values to help you understand the distribution and quality of your data.
Data exploration provides information and visualization of the descriptive statistics of
univariate variables, normality tests and outliers, correlation of two variables, and
relationship between target variable and predictor. Data transformation supports binning
for categorizing continuous variables, imputates missing values and outliers, resolving skewness.
And it creates automated reports that support these three tasks.",2020-01-09,Choonghyun Ryu,NA,TRUE,https://github.com/choonghyunryu/dlookr,40175,68,2020-04-27T06:12:54Z,590.8088235294117
dlstats,"Monthly download stats of 'CRAN' and 'Bioconductor' packages.
Download stats of 'CRAN' packages is from the 'RStudio' 'CRAN mirror', see <http://cranlogs.r-pkg.org>.
'Bioconductor' package download stats is at <https://bioconductor.org/packages/stats/>.",2019-11-14,Guangchuang Yu,https://github.com/GuangchuangYu/dlstats,TRUE,https://github.com/guangchuangyu/dlstats,22876,8,2019-11-14T02:11:53Z,2859.5
dm,"Provides tools for working with multiple related
tables, stored as data frames or in a relational database. Multiple
tables (data and metadata) are stored in a compound object, which can
then be manipulated with a pipe-friendly syntax.",2020-06-07,Kirill Müller,"https://krlmlr.github.io/dm, https://github.com/krlmlr/dm",TRUE,https://github.com/krlmlr/dm,2246,175,2020-06-09T07:18:41Z,12.834285714285715
dmacs,Computes measurement nonequivalence effect size indices described in Nye and Drasgow (2011) <doi:10.1037/a0022955>. ,2019-10-24,David Dueber,https://github.com/ddueber/dmacs,TRUE,https://github.com/ddueber/dmacs,2932,0,2020-03-02T14:23:02Z,NA
dmdScheme,"Forms the core for developing own domain specific metadata schemes.
It contains the basic functionality needed for all metadata schemes based on the
'dmdScheme'. See R.M. Krug and O.L. Petchey (2019) <DOI:10.5281/zenodo.3581970>.",2020-05-29,Rainer M. Krug,"https://exp-micro-ecol-hub.github.io/dmdScheme/,
https://github.com/Exp-Micro-Ecol-Hub/dmdScheme",TRUE,https://github.com/exp-micro-ecol-hub/dmdscheme,1982,1,2020-06-08T15:19:09Z,1982
dml,"The state-of-the-art algorithms for distance metric learning, including global and local methods such as Relevant Component Analysis, Discriminative Component Analysis, Local Fisher Discriminant Analysis, etc. These distance metric learning methods are widely applied in feature extraction, dimensionality reduction, clustering, classification, information retrieval, and computer vision problems.",2015-08-29,Yuan Tang,https://github.com/terrytangyuan/dml,TRUE,https://github.com/terrytangyuan/dml,26584,56,2019-12-19T03:32:41Z,474.7142857142857
dmtools,"For checking the dataset from EDC(Electronic Data Capture) in clinical trials.
'dmtools' can check laboratory, dates, WBCs(White Blood Cells) count and rename the dataset.
Laboratory - does the investigator correctly estimate the laboratory analyzes?
Dates - do all dates correspond to the protocol's timeline?
WBCs count - do absolute equal (all * relative) / 100?
If the clinical trial has different lab reference ranges, 'dmtools' also can help.",2020-05-12,Konstantin Ryabov,https://github.com/chachabooms/dmtools,TRUE,https://github.com/chachabooms/dmtools,373,0,2020-06-07T18:36:45Z,NA
DNAtools,"Computationally efficient tools for comparing all pairs of profiles
in a DNA database. The expectation and covariance of the summary statistic
is implemented for fast computing. Routines for estimating proportions of
close related individuals are available. The use of wildcards (also called F-
designation) is implemented. Dedicated functions ease plotting the results.",2020-03-03,Mikkel Meyer Andersen,NA,TRUE,https://github.com/mikldk/dnatools,24601,0,2020-03-03T14:47:33Z,NA
dnet,"The focus of the dnet by Fang and Gough (2014) <doi:10.1186/s13073-014-0064-8> is to make sense of omics data (such as gene expression and mutations) from different angles including: integration with molecular networks, enrichments using ontologies, and relevance to gene evolutionary ages. Integration is achieved to identify a gene subnetwork from the whole gene network whose nodes/genes are labelled with informative data (such as the significant levels of differential expression or survival risks). To help make sense of identified gene networks, enrichment analysis is also supported using a wide variety of pre-compiled ontologies and phylostratific gene age information in major organisms including: human, mouse, rat, chicken, C.elegans, fruit fly, zebrafish and arabidopsis. Add-on functionalities are supports for calculating semantic similarity between ontology terms (and between genes) and for calculating network affinity based on random walk; both can be done via high-performance parallel computing.",2020-02-20,Hai Fang and Julian Gough,"http://dnet.r-forge.r-project.org,
https://github.com/hfang-bristol/dnet",TRUE,https://github.com/hfang-bristol/dnet,65092,10,2020-02-20T09:42:01Z,6509.2
do,"Flexibly convert data between long and wide format using just two
functions: reshape_toLong() and reshape_toWide().",2020-04-01,Jing Zhang,https://github.com/yikeshu0611/do,TRUE,https://github.com/yikeshu0611/do,6400,2,2019-12-16T23:43:40Z,3200
docuSignr,"Connect to the 'DocuSign' Rest API <https://www.docusign.com/p/RESTAPIGuide/RESTAPIGuide.htm>,
which supports embedded signing, and sending of documents. ",2017-10-22,Carl Ganz,https://github.com/CannaData/docuSignr,TRUE,https://github.com/cannadata/docusignr,12448,4,2019-07-02T22:43:10Z,3112
docxtools,"A set of helper functions for using R Markdown to create documents
in docx format, especially documents for use in a classroom or workshop
setting.",2020-06-03,Richard Layton,https://github.com/graphdr/docxtools,TRUE,https://github.com/graphdr/docxtools,18381,18,2020-06-03T18:17:08Z,1021.1666666666666
dodgr,"Distances on dual-weighted directed graphs using priority-queue
shortest paths (Padgham (2019) <doi:10.32866/6945>). Weighted directed
graphs have weights from A to B which may differ from those from B to A.
Dual-weighted directed graphs have two sets of such weights. A canonical
example is a street network to be used for routing in which routes are
calculated by weighting distances according to the type of way and mode of
transport, yet lengths of routes must be calculated from direct distances.",2020-05-05,Mark Padgham,https://github.com/ATFutures/dodgr,TRUE,https://github.com/atfutures/dodgr,22164,76,2020-05-26T14:52:15Z,291.63157894736844
doFuture,"Provides a '%dopar%' adapter such that any type of futures can
be used as backends for the 'foreach' framework.",2020-01-11,Henrik Bengtsson,https://github.com/HenrikBengtsson/doFuture,TRUE,https://github.com/henrikbengtsson/dofuture,68128,52,2020-05-01T20:02:00Z,1310.1538461538462
donut,"Finds the k nearest neighbours in a dataset of specified points,
adding the option to wrap certain variables on a torus. The user chooses
the algorithm to use to find the nearest neighbours. Two such algorithms,
provided by the packages 'RANN' <https://cran.r-project.org/package=RANN>,
and 'nabor' <https://cran.r-project.org/package=nabor>, are suggested.",2020-02-18,Paul J. Northrop,"http://github.com/paulnorthrop/donut,
https://paulnorthrop.github.io/donut/",TRUE,https://github.com/paulnorthrop/donut,3292,0,2020-02-27T09:36:59Z,NA
doRedis,"A parallel back end for the 'foreach' package using the 'Redis'
database.",2020-01-28,B. W. Lewis,NA,TRUE,https://github.com/bwlewis/doredis,85047,64,2020-05-28T23:26:04Z,1328.859375
doRNG,"Provides functions to perform
reproducible parallel foreach loops, using independent
random streams as generated by L'Ecuyer's combined
multiple-recursive generator [L'Ecuyer (1999), <DOI:10.1287/opre.47.1.159>].
It enables to easily convert standard %dopar% loops into
fully reproducible loops, independently of the number
of workers, the task scheduling strategy, or the chosen
parallel environment and associated foreach backend.",2020-01-27,Renaud Gaujoux,https://renozao.github.io/doRNG,TRUE,https://github.com/renozao/dorng,508940,11,2020-01-26T15:57:16Z,46267.27272727273
dosresmeta,"Estimates dose-response relations from summarized dose-response
data and to combines them according to principles of (multivariate)
random-effects models. ",2017-09-12,Alessio Crippa,https://alecri.github.io/software/dosresmeta.html,TRUE,https://github.com/alecri/dosresmeta,26600,4,2019-07-25T08:43:31Z,6650
DOT,"Renders DOT diagram markup language in R and also provides the possibility to
export the graphs in PostScript and SVG (Scalable Vector Graphics) formats.
In addition, it supports literate programming packages such as 'knitr' and
'rmarkdown'.",2016-04-16,E. F. Haghish,http://haghish.com/dot,TRUE,https://github.com/haghish/dot,15725,3,2020-02-10T00:53:37Z,5241.666666666667
dotwhisker,Quick and easy dot-and-whisker plots of regression results.,2018-06-27,Frederick Solt,NA,TRUE,https://github.com/fsolt/dotwhisker,74507,45,2020-02-14T16:22:24Z,1655.7111111111112
downloadthis,Implement download buttons in HTML output from 'rmarkdown' without the need for 'runtime:shiny'.,2020-05-04,Felipe Mattioni Maturana,https://github.com/fmmattioni/downloadthis,TRUE,https://github.com/fmmattioni/downloadthis,2628,39,2020-05-04T18:54:30Z,67.38461538461539
dplR,"Perform tree-ring analyses such as detrending, chronology
building, and cross dating. Read and write standard file formats
used in dendrochronology.",2020-03-19,Andy Bunn,https://github.com/AndyBunn/dplR,TRUE,https://github.com/andybunn/dplr,129715,6,2020-03-20T21:42:35Z,21619.166666666668
dplyr,"A fast, consistent tool for working with data frame
like objects, both in memory and out of memory.",2020-05-29,Hadley Wickham,"https://dplyr.tidyverse.org, https://github.com/tidyverse/dplyr",TRUE,https://github.com/tidyverse/dplyr,28615272,3372,2020-06-09T10:08:50Z,8486.142348754449
dqrng,"Several fast random number generators are provided as C++
header only libraries: The PCG family by O'Neill (2014
<https://www.cs.hmc.edu/tr/hmc-cs-2014-0905.pdf>) as well as
Xoroshiro128+ and Xoshiro256+ by Blackman and Vigna (2018
<arXiv:1805.01407>). In addition fast functions for generating random
numbers according to a uniform, normal and exponential distribution
are included. The latter two use the Ziggurat algorithm originally
proposed by Marsaglia and Tsang (2000, <doi:10.18637/jss.v005.i08>).
These functions are exported to R and as a C++ interface and are
enabled for use with the default 64 bit generator from the PCG family,
Xoroshiro128+ and Xoshiro256+ as well as the 64 bit version of the 20 rounds
Threefry engine (Salmon et al., 2011 <doi:10.1145/2063384.2063405>) as
provided by the package 'sitmo'.",2019-05-17,Ralf Stubner,"https://www.daqana.org/dqrng, https://github.com/daqana/dqrng",TRUE,https://github.com/daqana/dqrng,106413,16,2020-06-04T13:01:06Z,6650.8125
dqshiny,"Provides highly customizable modules to enhance your shiny apps.
Includes layout independent collapsible boxes and value boxes, a very fast
autocomplete input, rhandsontable extensions for filtering and paging and
much more.",2020-05-10,Richard Kunze,https://github.com/daqana/dqshiny,TRUE,https://github.com/daqana/dqshiny,11099,46,2020-05-10T19:36:07Z,241.2826086956522
dragon,Visualization and manipulation of the mineral-chemistry network across deep time on earth. ,2019-10-20,Stephanie J. Spielman,https://github.com/spielmanlab/dragon,TRUE,https://github.com/spielmanlab/dragon,4635,1,2020-01-23T13:02:03Z,4635
drake,"A general-purpose computational engine for data
analysis, drake rebuilds intermediate data objects when their
dependencies change, and it skips work when the results are already up
to date. Not every execution starts from scratch, there is native
support for parallel and distributed computing, and completed projects
have tangible evidence that they are reproducible. Extensive
documentation, from beginner-friendly tutorials to practical examples
and more, is available at the reference website
<https://docs.ropensci.org/drake/> and the online manual
<https://books.ropensci.org/drake/>.",2020-06-02,William Michael Landau,"https://github.com/ropensci/drake,
https://docs.ropensci.org/drake,
https://books.ropensci.org/drake/",TRUE,https://github.com/ropensci/drake,78840,1134,2020-06-08T20:27:38Z,69.52380952380952
drat,"Creation and use of R Repositories via helper functions
to insert packages into a repository, and to add repository information
to the current R session. Two primary types of repositories are support:
gh-pages at GitHub, as well as local repositories on either the same machine
or a local network. Drat is a recursive acronym: Drat R Archive Template. ",2020-05-30,Dirk Eddelbuettel with contributions by Carl Boettiger,http://dirk.eddelbuettel.com/code/drat.html,TRUE,https://github.com/eddelbuettel/drat,221552,117,2020-05-30T00:21:44Z,1893.6068376068376
DRDID,"Implements the locally efficient doubly robust difference-in-differences (DiD)
estimators for the average treatment effect proposed by Sant'Anna and Zhao (2020)
<arXiv:1812.01723>. The estimator combines inverse probability weighting and outcome
regression estimators (also implemented in the package) to form estimators with
more attractive statistical properties. Two different estimation methods can be used
to estimate the nuisance functions.",2020-05-18,Pedro H. C. SantAnna,"https://pedrohcgs.github.io/DRDID/,
https://github.com/pedrohcgs/DRDID",TRUE,https://github.com/pedrohcgs/drdid,285,7,2020-05-12T17:44:49Z,40.714285714285715
dreamerr,"Set of tools to facilitate package development and make R a more user-friendly place. Mostly for developers (or anyone who writes/shares functions). Provides a simple, powerful and flexible way to check the arguments passed to functions.
The developer can easily describe the type of argument needed. If the user provides a wrong argument, then an informative error message is prompted with the requested type and the problem clearly stated--saving the user a lot of time in debugging. ",2020-06-08,Laurent Berge,NA,TRUE,https://github.com/lrberge/dreamerr,1093,5,2020-05-03T08:23:40Z,218.6
DriftBurstHypothesis,"Calculates the T-Statistic for the drift burst hypothesis from the working paper Christensen, Oomen and Reno (2018) <DOI:10.2139/ssrn.2842535>. The authors' MATLAB code is available upon request, see: <https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2842535>.",2020-03-25,Emil Sjoerup,https://github.com/emilsjoerup/DriftBurstHypothesis,TRUE,https://github.com/emilsjoerup/driftbursthypothesis,9883,2,2019-11-30T13:19:14Z,4941.5
drifter,"Concept drift refers to the change in the data distribution or
in the relationships between variables over time.
'drifter' calculates distances between variable distributions or
variable relations and identifies both types of drift.
Key functions are:
calculate_covariate_drift() checks distance between corresponding variables in two datasets,
calculate_residuals_drift() checks distance between residual distributions for two models,
calculate_model_drift() checks distance between partial dependency profiles for two models,
check_drift() executes all checks against drift.
'drifter' is a part of the 'DrWhy.AI' universe (Biecek 2018) <arXiv:1806.08915>.",2019-05-31,Przemyslaw Biecek,https://ModelOriented.github.io/drifter/,TRUE,https://github.com/modeloriented/drifter,5335,5,2019-09-24T08:31:48Z,1067
driftR,"A tidy implementation of equations that correct for instrumental drift in
continuous water quality monitoring data. There are many sources of water quality data
including private (ex: YSI instruments) and open source (ex: USGS and NDBC), each of
which are susceptible to errors/inaccuracies due to drift. This package allows the
user to correct their data using one or two standard reference values in a uniform,
reproducible way. The equations implemented are from Hasenmueller (2011)
<doi:10.7936/K7N014KS>.",2018-06-13,Andrew Shaughnessy,https://github.com/shaughnessyar/driftR,TRUE,https://github.com/shaughnessyar/driftr,10082,4,2020-03-04T22:21:35Z,2520.5
DRomics,"Several functions are provided for dose-response (or concentration-response) characterization from omics data. 'DRomics' is especially dedicated to omics data obtained using a typical dose-response design, favoring a great number of tested doses (or concentrations, at least 5, and the more the better) rather than a great number of replicates (no need of three replicates). 'DRomics' provides functions 1) to check, normalize and or transform data, 2) to select monotonic or biphasic significantly responding items (e.g. probes, metabolites), 3) to choose the best-fit model among a predefined family of monotonic and biphasic models to describe each selected item 4) to derive a benchmark dose or concentration and a typology of response from each fitted curve. In the available version data are supposed to be single-channel microarray data in log2, RNAseq data in raw counts or already pretreated metabolomic data in log scale. For further details see Larras et al (2018) <DOI:10.1021/acs.est.8b04752>.",2019-09-16,Aurelie Siberchicot,https://github.com/aursiber/DRomics,TRUE,https://github.com/aursiber/dromics,6293,0,2020-02-18T14:56:13Z,NA
DRR,"An Implementation of Dimensionality Reduction
via Regression using Kernel Ridge Regression.",2020-02-12,Guido Kraemer,https://github.com/gdkrmr/DRR,TRUE,https://github.com/gdkrmr/drr,891913,7,2020-02-12T13:07:59Z,127416.14285714286
drtmle,"Targeted minimum loss-based estimators of counterfactual means and
causal effects that are doubly-robust with respect both to consistency and
asymptotic normality (Benkeser et al (2017), <doi:10.1093/biomet/asx053>; MJ
van der Laan (2014), <doi:10.1515/ijb-2012-0038>).",2020-01-09,David Benkeser,https://github.com/benkeser/drtmle,TRUE,https://github.com/benkeser/drtmle,11547,9,2020-01-10T14:51:52Z,1283
ds4psy,"All datasets and functions required for the examples and exercises of the book ""Data Science for Psychologists"" (by Hansjoerg Neth, Konstanz University, 2020), available at <https://bookdown.org/hneth/ds4psy/>. The book and course introduce principles and methods of data science to students of psychology and other biological or social sciences. The 'ds4psy' package primarily provides datasets, but also functions for data generation and manipulation (e.g., of text and time data) and graphics that are used in the book and its exercises. All functions included in 'ds4psy' are designed to be instructive and entertaining, rather than elegant or efficient. ",2020-05-06,Hansjoerg Neth,"https://bookdown.org/hneth/ds4psy/,
https://github.com/hneth/ds4psy/",TRUE,https://github.com/hneth/ds4psy,4820,3,2020-05-30T07:20:22Z,1606.6666666666667
DSAIDE,"Exploration of simulation models (apps) of various infectious disease transmission dynamics scenarios.
The purpose of the package is to help individuals learn
about infectious disease epidemiology (ecology/evolution) from a dynamical systems perspective.
All apps include explanations of the underlying models and instructions on what to do with the models. ",2020-01-09,Andreas Handel,"https://ahgroup.github.io/DSAIDE,
https://github.com/ahgroup/DSAIDE",TRUE,https://github.com/ahgroup/dsaide,16538,9,2020-05-22T19:47:13Z,1837.5555555555557
DSAIRM,"A collection of 'shiny' apps that allow for the simulation and
exploration of various within-host immune response scenarios.
The purpose of the package is to help individuals learn
about within-host infection and immune response modeling from a dynamical systems perspective.
All apps include explanations of the underlying models and instructions on
what to do with the models.
The development of this package was partially supported by NIH grant U19AI117891.",2019-07-08,Andreas Handel,"https://ahgroup.github.io/DSAIRM,
https://github.com/ahgroup/DSAIRM/",TRUE,https://github.com/ahgroup/dsairm,9473,7,2020-05-31T01:03:07Z,1353.2857142857142
dscore,"The D-score is a quantitative measure of child development.
The D-score follows the Rasch model. See Jacobusse, van Buuren and
Verkerk (2006) <doi:10.1002/sim.2351>. The user can convert
milestone scores from 19 assessment instruments into the D-score
and the DAZ (D-score adjusted for age). Several tools assist in
mapping milestone names into the 9-position Global Scale of Early
Development (GSED) convention. Supports calculation of the D-score
using 'dutch' <doi:10.1177/0962280212473300>,
'gcdg' <doi:10.1136/bmjgh-2019-001724> and 'gsed' conversion keys.
The user can calculate DAZ using 'dutch' and 'gcdg' age-conditional
references.",2020-05-12,Stef van Buuren,"https://github.com/stefvanbuuren/dscore,
https://stefvanbuuren.name/dscore/,
https://stefvanbuuren.name/dbook1/",TRUE,https://github.com/stefvanbuuren/dscore,2967,2,2020-05-12T15:51:09Z,1483.5
DSI,"'DataSHIELD' is an infrastructure and series of R packages that
enables the remote and 'non-disclosive' analysis of sensitive research data.
This package defines the API that is to be implemented by 'DataSHIELD' compliant
data repositories.",2020-05-18,Yannick Marcon,http://datashield.ac.uk,TRUE,https://github.com/datashield/dsi,1500,0,2020-05-29T12:06:50Z,NA
DSLite,"'DataSHIELD' is an infrastructure and series of R packages that
enables the remote and 'non-disclosive' analysis of sensitive research data.
This 'DataSHIELD Interface' implementation is for analyzing datasets living
in the current R session. The purpose of this is primarily for lightweight
'DataSHIELD' analysis package development.",2020-05-18,Yannick Marcon,http://www.datashield.ac.uk https://doi.org/10.1093/ije/dyu188,TRUE,https://github.com/datashield/dslite,1352,0,2020-05-18T12:20:27Z,NA
dsm,"Density surface modelling of line transect data. A Generalized
Additive Model-based approach is used to calculate spatially-explicit estimates
of animal abundance from distance sampling (also presence/absence and strip
transect) data. Several utility functions are provided for model checking,
plotting and variance estimation.",2020-04-22,David L. Miller,http://github.com/DistanceDevelopment/dsm,TRUE,https://github.com/distancedevelopment/dsm,29611,4,2020-06-08T13:27:31Z,7402.75
DSOpal,"'DataSHIELD' is an infrastructure and series of R packages that
enables the remote and 'non-disclosive' analysis of sensitive research data.
This package is the 'DataSHIELD' interface implementation for 'Opal', which is
the data integration application for biobanks by 'OBiBa'. Participant data, once
collected from any data source, must be integrated and stored in a central
data repository under a uniform model. 'Opal' is such a central repository.
It can import, process, validate, query, analyze, report, and export data.
'Opal' is the reference implementation of the 'DataSHIELD' infrastructure.",2020-05-18,Yannick Marcon,"https://www.obiba.org https://www.obiba.org/pages/products/opal/
http://www.datashield.ac.uk https://doi.org/10.1093/ije/dyu188",TRUE,https://github.com/datashield/dsopal,1056,0,2020-05-18T12:40:07Z,NA
DSpoty,"You can retrieve 'Spotify' API Information such as artists, albums, tracks, features tracks, recommendations or related artists.
This package allows you to search all the information by name and also includes a distance based algorithm to find similar songs.
More information: <https://developer.spotify.com/documentation/web-api/> .",2020-01-16,Alberto Almuiña,https://github.com/AlbertoAlmuinha/DSpoty,TRUE,https://github.com/albertoalmuinha/dspoty,3285,1,2020-04-27T18:23:10Z,3285
dsr,"A set of functions to compute and compare directly standardized rates, rate differences and ratios. A variety of user defined options for analysis (e.g confidence intervals) and formatting are included.",2019-08-23,Matthew Kumar,https://github.com/mattkumar/dsr,TRUE,https://github.com/mattkumar/dsr,11619,2,2019-08-23T14:38:29Z,5809.5
DSSAT,"The purpose of this package is to provide a comprehensive
R interface to the Decision Support System for Agrotechnology
Transfer Cropping Systems Model (DSSAT-CSM) documented by
Jones et al (2003) <doi:10.1016/S1161-0301(02)00107-7>. The package
provides cross-platform functions to read and write input files,
run DSSAT-CSM, and read output files.",2020-05-18,Phillip D. Alderman,NA,TRUE,https://github.com/palderman/dssat,1603,6,2020-05-18T14:20:36Z,267.1666666666667
dssd,"Creates survey designs for distance sampling surveys. These
designs can be assessed for various effort and coverage statistics.
Once the user is satisfied with the design characteristics they can
generate a set of transects to use in their distance sampling survey.
Many of the designs implemented in this R package were first made
available in our 'Distance' for Windows software and are detailed in
Chapter 7 of Advanced Distance Sampling, Buckland et. al. (2008,
ISBN-13: 978-0199225873). Find out more about estimating animal/plant
abundance with distance sampling at <http://distancesampling.org/>. ",2020-02-20,Laura Marshall,NA,TRUE,https://github.com/distancedevelopment/dssd,4872,0,2020-02-19T18:21:40Z,NA
dst,"Using the Theory of Belief Functions for evidence calculus. Basic probability assignments, or mass functions, can be defined on the subsets of a set of possible values and combined. A mass function can be extended to a larger frame. Marginalization, i.e. reduction to a smaller frame can also be done. These features can be combined to analyze small belief networks and take into account situations where information cannot be satisfactorily described by probability distributions.",2020-03-28,Claude Boivin,NA,TRUE,https://github.com/rapler/dst-1,19526,2,2020-04-05T15:09:40Z,9763
dstack,"A native R package that allows to publish, share and track revisions
of plots using your favorite plotting package, e.g. 'ggplot2'. It also provides
a kind of interactivity for such plots by specifying certain parameters for any
specific plot view. See <https://docs.dstack.ai> for more information.",2020-04-09,Vitaly Khudobakhshov,https://dstack.ai,TRUE,https://github.com/dstackai/dstack-r,1327,1,2020-04-09T13:49:51Z,1327
DstarM,"A collection of functions to estimate parameters of a diffusion model via a D*M analysis. Build in models are: the Ratcliff diffusion model, the RWiener diffusion model, and Linear Ballistic Accumulator models. Custom models functions can be specified as long as they have a density function.",2018-05-18,Don van den Bergh,https://github.com/vandenman/DstarM,TRUE,https://github.com/vandenman/dstarm,14209,1,2020-01-03T10:33:44Z,14209
DT,"Data objects in R can be rendered as HTML tables using the
JavaScript library 'DataTables' (typically via R Markdown or Shiny). The
'DataTables' library has been included in this R package. The package name
'DT' is an abbreviation of 'DataTables'.",2020-03-23,Yihui Xie,https://github.com/rstudio/DT,TRUE,https://github.com/rstudio/dt,5158894,408,2020-05-20T19:28:06Z,12644.348039215687
DtD,"Provides fast methods to work with Merton's distance to default
model introduced in Merton (1974) <doi:10.1111/j.1540-6261.1974.tb03058.x>.
The methods includes simulation and estimation of the parameters.",2020-02-11,Benjamin Christoffersen,NA,TRUE,https://github.com/boennecd/dtd,12252,1,2020-02-07T09:31:26Z,12252
dtplyr,"Provides a data.table backend for 'dplyr'. The goal of 'dtplyr'
is to allow you to write 'dplyr' code that is automatically translated to
the equivalent, but usually much faster, data.table code.",2020-01-23,Hadley Wickham,https://github.com/tidyverse/dtplyr,TRUE,https://github.com/tidyverse/dtplyr,642700,384,2020-05-28T13:54:09Z,1673.6979166666667
DTSg,"Basic time series functionalities such as listing of missing
values, application of arbitrary aggregation as well as rolling (asymmetric)
window functions and automatic detection of periodicity. As it is mainly
based on 'data.table', it is fast and - in combination with the 'R6'
package - offers reference semantics. In addition to its native R6
interface, it provides an S3 interface inclusive an S3 wrapper method
generator for those who prefer the latter.",2020-06-09,Gerold Hepp,https://github.com/gisler/DTSg,TRUE,https://github.com/gisler/dtsg,6943,0,2020-06-09T18:44:08Z,NA
dttr2,"Manipulates date ('Date'), datetime ('POSIXct') and
time ('hms') vectors. Date/times are considered discrete and are
floored whenever encountered. Times are wrapped and time zones are
maintained unless explicitly altered by the user.",2020-05-01,Joe Thorley,https://github.com/poissonconsulting/dttr2,TRUE,https://github.com/poissonconsulting/dttr2,6064,7,2020-05-29T17:39:19Z,866.2857142857143
dtwclust,"Time series clustering along with optimized techniques related
to the Dynamic Time Warping distance and its corresponding lower bounds.
Implementations of partitional, hierarchical, fuzzy, k-Shape and TADPole
clustering are available. Functionality can be easily extended with
custom distance measures and centroid definitions. Implementations of
DTW barycenter averaging, a distance based on global alignment kernels,
and the soft-DTW distance and centroid routines are also provided.
All included distance functions have custom loops optimized for the
calculation of cross-distance matrices, including parallelization support.
Several cluster validity indices are included.",2019-12-11,Alexis Sarda-Espinosa,https://github.com/asardaes/dtwclust,TRUE,https://github.com/asardaes/dtwclust,123743,174,2020-03-23T13:40:55Z,711.1666666666666
dtwSat,"Provides an implementation of the Time-Weighted Dynamic Time
Warping (TWDTW) method for land cover mapping using satellite image time series.
TWDTW compares unclassified satellite image time series with a set of known
temporal patterns (e.g. phenological cycles associated with the vegetation).
Using 'dtwSat' the user can build temporal patterns for land cover types, apply
the TWDTW analysis for satellite datasets, visualize the results of the time
series analysis, produce land cover maps, create temporal plots for land cover
change, and compute accuracy assessment metrics.",2020-03-03,Victor Maus,https://github.com/vwmaus/dtwSat/,TRUE,https://github.com/vwmaus/dtwsat,22001,79,2020-03-03T09:09:49Z,278.49367088607596
duawranglr,"Create shareable data sets from raw data files that
contain protected elements. Relying on master crosswalk
files that list restricted variables, package functions
warn users about possible violations of data usage
agreement and prevent writing protected elements.",2019-11-19,Benjamin Skinner,https://github.com/btskinner/duawranglr,TRUE,https://github.com/btskinner/duawranglr,7550,0,2019-11-19T22:00:52Z,NA
dupree,"Identifies code blocks that have a high level of similarity
within a set of R files.",2020-04-21,Russ Hyde,https://github.com/russHyde/dupree,TRUE,https://github.com/russhyde/dupree,3082,8,2020-04-28T13:18:34Z,385.25
durmod,"Estimation of piecewise constant mixed proportional hazard competing risk model with NPMLE.
The model is described in S. Gaure et al. (2007) <doi:10.1016/j.jeconom.2007.01.015>,
J. Heckman and B. Singer (1984) <doi:10.2307/1911491>, and
B.G. Lindsay (1983) <doi:10.1214/aos/1176346059>.",2020-03-30,Simen Gaure,https://github.com/sgaure/durmod,TRUE,https://github.com/sgaure/durmod,5793,0,2019-12-10T10:18:46Z,NA
DVHmetrics,"Functionality for analyzing dose-volume histograms (DVH)
in radiation oncology: Read DVH text files, calculate DVH
metrics as well as generalized equivalent uniform dose (gEUD),
biologically effective dose (BED), equivalent dose in 2 Gy
fractions (EQD2), normal tissue complication probability
(NTCP), and tumor control probability (TCP). Show DVH
diagrams, check and visualize quality assurance constraints
for the DVH. Includes web-based graphical user interface.",2020-03-19,Daniel Wollschlaeger,https://github.com/dwoll/DVHmetrics/,TRUE,https://github.com/dwoll/dvhmetrics,21942,3,2020-04-30T06:53:25Z,7314
dynamac,"While autoregressive distributed lag (ARDL) models allow for extremely flexible dynamics, interpreting substantive significance of complex lag structures remains difficult. This package is designed to assist users in dynamically simulating and plotting the results of various ARDL models. It also contains post-estimation diagnostics, including a test for cointegration when estimating the error-correction variant of the autoregressive distributed lag model (Pesaran, Shin, and Smith 2001 <doi:10.1002/jae.616>).",2020-04-03,Soren Jordan,https://github.com/andyphilips/dynamac/,TRUE,https://github.com/andyphilips/dynamac,15059,2,2020-06-02T13:34:58Z,7529.5
dynamichazard,"Contains functions that lets you fit dynamic hazard models using
state space models. The first implemented model is described in Fahrmeir
(1992) <doi:10.1080/01621459.1992.10475232> and Fahrmeir (1994)
<doi:10.1093/biomet/81.2.317>. Extensions hereof are available where the
Extended Kalman filter is replaced by an unscented Kalman filter and other
options including particle filters. The implemented particle filters support
more general state space models. ",2019-10-14,Benjamin Christoffersen,https://github.com/boennecd/dynamichazard,TRUE,https://github.com/boennecd/dynamichazard,34175,4,2019-10-14T08:18:54Z,8543.75
DynaRankR,"Provides functions for inferring longitudinal dominance hierarchies, which describe dominance relationships and their dynamics in a single latent hierarchy over time. Strauss & Holekamp (in press). ",2020-02-13,Eli D. Strauss,https://github.com/straussed/DynaRankR,TRUE,https://github.com/straussed/dynarankr,6205,1,2020-02-13T14:33:37Z,6205
dyndimred,"
Provides a common interface for applying dimensionality reduction methods,
such as Principal Component Analysis ('PCA'), Independent Component Analysis ('ICA'), diffusion maps,
Locally-Linear Embedding ('LLE'), t-distributed Stochastic Neighbor Embedding ('t-SNE'),
and Uniform Manifold Approximation and Projection ('UMAP').
Has built-in support for sparse matrices.",2020-03-08,Robrecht Cannoodt (<https://orcid.org/0000-0003-3641-729X>,https://github.com/dynverse/dyndimred,TRUE,https://github.com/dynverse/dyndimred,5327,3,2020-02-24T13:51:34Z,1775.6666666666667
dynprog,"A domain-specific language for specifying translating recursions
into dynamic-programming algorithms. See
<https://en.wikipedia.org/wiki/Dynamic_programming> for a description
of dynamic programming.",2019-12-09,Thomas Mailund,https://github.com/mailund/dynprog,TRUE,https://github.com/mailund/dynprog,8182,11,2019-12-11T10:29:46Z,743.8181818181819
dynsurv,"Time-varying coefficient models for interval censored and
right censored survival data including
1) Bayesian Cox model with time-independent, time-varying or
dynamic coefficients for right censored and interval censored data studied by
Sinha et al. (1999) <doi:10.1111/j.0006-341X.1999.00585.x> and
Wang et al. (2013) <doi:10.1007/s10985-013-9246-8>,
2) Spline based time-varying coefficient Cox model for right censored data
proposed by Perperoglou et al. (2006) <doi:10.1016/j.cmpb.2005.11.006>, and
3) Transformation model with time-varying coefficients for right censored data
using estimating equations proposed by
Peng and Huang (2007) <doi:10.1093/biomet/asm058>.",2019-08-27,Wenjie Wang,https://github.com/wenjie2wang/dynsurv,TRUE,https://github.com/wenjie2wang/dynsurv,38836,5,2020-03-26T00:47:47Z,7767.2
dynutils,"
Provides common functionality for the 'dynverse' packages.
'dynverse' is created to support the development, execution, and benchmarking of trajectory inference methods.
For more information, check out <https://dynverse.org>.",2020-02-21,Robrecht Cannoodt (<https://orcid.org/0000-0003-3641-729X>,https://github.com/dynverse/dynutils,TRUE,https://github.com/dynverse/dynutils,16859,1,2020-02-21T12:58:51Z,16859
dynwrap,"Provides functionality to infer trajectories from single-cell data,
represent them into a common format, and adapt them. Other biological information
can also be added, such as cellular grouping, RNA velocity and annotation.
Saelens et al. (2019) <doi:10.1038/s41587-019-0071-9>.",2020-05-14,Robrecht Cannoodt (<https://orcid.org/0000-0003-3641-729X>,https://github.com/dynverse/dynwrap,TRUE,https://github.com/dynverse/dynwrap,4297,10,2020-05-15T08:58:06Z,429.7
eaf,"Computation and visualization of the empirical attainment function (EAF) for the analysis of random sets in multi-criterion optimization. M. Lopez-Ibanez, L. Paquete, and T. Stuetzle (2010) <doi:10.1007/978-3-642-02538-9_9>. ",2020-03-05,Manuel López-Ibáñez,"http://lopez-ibanez.eu/eaftools,
https://github.com/MLopez-Ibanez/eaf",TRUE,https://github.com/mlopez-ibanez/eaf,49032,8,2020-05-26T17:45:17Z,6129
earlyR,"Implements a simple, likelihood-based estimation of the reproduction number (R0) using a branching process with a Poisson likelihood. This model requires knowledge of the serial interval distribution, and dates of symptom onsets. Infectiousness is determined by weighting R0 by the probability mass function of the serial interval on the corresponding day. It is a simplified version of the model introduced by Cori et al. (2013) <doi:10.1093/aje/kwt133>.",2017-12-06,Thibaut Jombart,http://www.repidemicsconsortium.org/earlyR,TRUE,https://github.com/reconhub/earlyr,13558,5,2019-06-10T09:31:29Z,2711.6
earthtide,"This is a port of 'Fortran ETERNA 3.4'
<http://igets.u-strasbg.fr/soft_and_tool.php> by H.G. Wenzel
for calculating synthetic Earth tides using the
Hartmann and Wenzel (1994) <doi:10.1029/95GL03324> or
Kudryavtsev (2004) <doi:10.1007/s00190-003-0361-2> tidal catalogs. ",2020-03-13,Jonathan Kennel,https://github.com/jkennel/earthtide,TRUE,https://github.com/jkennel/earthtide,6549,5,2020-03-13T15:52:33Z,1309.8
easyalluvial,"Alluvial plots are similar to sankey diagrams and visualise categorical data
over multiple dimensions as flows. (Rosvall M, Bergstrom CT (2010) Mapping Change in
Large Networks. PLoS ONE 5(1): e8694. <doi:10.1371/journal.pone.0008694>
Their graphical grammar however is a bit more complex then that of a regular x/y
plots. The 'ggalluvial' package made a great job of translating that grammar into
'ggplot2' syntax and gives you many options to tweak the appearance of an alluvial
plot, however there still remains a multi-layered complexity that makes it difficult
to use 'ggalluvial' for explorative data analysis. 'easyalluvial' provides a simple
interface to this package that allows you to produce a decent alluvial plot from any
dataframe in either long or wide format from a single line of code while also handling
continuous data. It is meant to allow a quick visualisation of entire dataframes
with a focus on different colouring options that can make alluvial plots a great
tool for data exploration. ",2020-05-07,Bjoern Koneswarakantha,https://github.com/erblast/easyalluvial,TRUE,https://github.com/erblast/easyalluvial,14320,39,2020-05-06T22:01:47Z,367.1794871794872
easyCODA,"Univariate and multivariate methods for compositional data
analysis, based on logratios. The package implements the approach in the
book Compositional Data Analysis in Practice by Michael Greenacre (2018),
where accent is given to simple pairwise logratios. Selection can be made
of logratios that account for a maximum percentage of logratio variance.
Various multivariate analyses of logratios are included in the package. ",2019-03-10,Michael Greenacre,https://github.com/michaelgreenacre/CODAinPractice/,TRUE,https://github.com/michaelgreenacre/codainpractice,11102,5,2020-04-08T08:54:32Z,2220.4
easycsv,"Allows users to easily read multiple comma separated tables and create a data frame under the same name.
Is able to read multiple comma separated tables from a local directory, a zip file or a zip file on a remote directory. ",2018-05-21,Dror Bogin,https://github.com/bogind/easycsv,TRUE,https://github.com/bogind/easycsv,15120,4,2019-09-23T08:01:28Z,3780
easyr,"Makes difficult operations easy. Includes these types of functions:
shorthand, type conversion, data wrangling, and workflow.
Also includes some helpful data objects: NA strings, U.S. state list, color blind charting colors.
Built and shared by Oliver Wyman Actuarial Consulting. Accepting proposed contributions through GitHub.",2020-06-02,Bryce Chamberlain,https://github.com/oliver-wyman-actuarial/easyr,TRUE,https://github.com/oliver-wyman-actuarial/easyr,3358,8,2020-06-05T17:44:40Z,419.75
easySdcTable,"The main function, ProtectTable(), performs table suppression according to a
frequency rule with a data set as the only required input. Within this function,
protectTable(), protectLinkedTables() or runArgusBatchFile() in package 'sdcTable' is called.
Lists of level-hierarchy (parameter 'dimList') and other required input to these functions
are created automatically.
The function, PTgui(), starts a graphical user interface based on the shiny package.",2020-04-04,Øyvind Langsrud,https://github.com/statisticsnorway/easySdcTable,TRUE,https://github.com/statisticsnorway/easysdctable,15719,0,2020-04-30T06:44:53Z,NA
ebirdst,"Tools to download, map, plot and analyze eBird
Status and Trends data
(<https://ebird.org/science/status-and-trends>). eBird
(<https://ebird.org>) is a global database of bird observations
collected by citizen scientists. eBird Status and Trends uses these
data to analyze continental bird abundances, range boundaries,
habitats, and trends.",2020-03-23,Matthew Strimas-Mackey,https://github.com/CornellLabofOrnithology/ebirdst,TRUE,https://github.com/cornelllabofornithology/ebirdst,6370,24,2020-05-03T01:58:02Z,265.4166666666667
EBMAforecast,Create forecasts from multiple predictions using ensemble Bayesian model averaging (EBMA). EBMA models can be estimated using an expectation maximization (EM) algorithm or as fully Bayesian models via Gibbs sampling.,2020-05-20,Florian M. Hollenbach,<https://github.com/fhollenbach/EBMA/>,TRUE,https://github.com/fhollenbach/ebma,17755,0,2020-05-20T11:14:29Z,NA
echarts4r,"Easily create interactive charts by leveraging the 'Echarts Javascript' library which includes
34 chart types, themes, 'Shiny' proxies and animations.",2019-07-18,John Coene,http://echarts4r.john-coene.com/,TRUE,https://github.com/johncoene/echarts4r,45837,255,2020-06-09T17:04:46Z,179.75294117647059
eChem,"Simulates cyclic voltammetry, linear-sweep voltammetry
(both with and without stirring of the solution), and single-pulse
and double-pulse chronoamperometry and chronocoulometry
experiments using the implicit finite difference method outlined in
Gosser (1993, ISBN: 9781560810261) and in Brown (2015)
<doi:10.1021/acs.jchemed.5b00225>. Additional functions provide
ways to display and to examine the results of these simulations.
The primary purpose of this package is to provide tools for
use in courses in analytical chemistry.",2018-07-01,David Harvey,https://github.com/dtharvey/eChem,TRUE,https://github.com/dtharvey/echem,7418,2,2019-07-06T12:23:44Z,3709
echo.find,"Provides a function (echo_find()) designed to find rhythms
from data using extended harmonic oscillators. For more information,
see H. De los Santos et al. (2020) <doi:10.1093/bioinformatics/btz617> .",2020-05-28,Hannah De los Santos,https://github.com/delosh653/ECHO,TRUE,https://github.com/delosh653/echo,11255,3,2020-05-13T20:12:44Z,3751.6666666666665
echor,"An R interface to United States Environmental
Protection Agency (EPA) Environmental Compliance
History Online ('ECHO') Application Program Interface
(API). 'ECHO' provides information about EPA permitted
facilities, discharges, and other reporting info
associated with permitted entities. Data are obtained
from <https://echo.epa.gov/>. ",2020-01-29,Michael Schramm,NA,TRUE,https://github.com/mps9506/echor,10430,4,2020-01-31T19:48:56Z,2607.5
ecmwfr,"Programmatic interface to the European Centre for Medium-Range
Weather Forecasts dataset web services (ECMWF; <https://www.ecmwf.int/>)
and Copernicus's Climate Data Store (CDS;
<https://cds.climate.copernicus.eu>). Allows for easy downloads of weather
forecasts and climate reanalysis data in R.",2020-05-17,Koen Hufkens,https://github.com/khufkens/ecmwfr,TRUE,https://github.com/khufkens/ecmwfr,11440,41,2020-06-05T13:51:30Z,279.0243902439024
EcoDiet,"Biotracers and stomach content analyses are combined in a Bayesian hierarchical model
to estimate a probabilistic topology matrix (all trophic link probabilities) and a diet matrix
(all diet proportions).
The package relies on the JAGS software and the 'rjags' package to run a Markov chain Monte Carlo
approximation of the different variables.",2020-03-05,Pierre-Yves Hernvann,https://github.com/pyhernvann/EcoDiet,TRUE,https://github.com/pyhernvann/ecodiet,1238,1,2020-03-11T14:11:16Z,1238
ecodist,"Dissimilarity-based analysis functions including ordination and Mantel test functions, intended for use with spatial and community data. The original package description is in Goslee and Urban (2007) <doi:10.18637/jss.v022.i07>, with further statistical detail in Goslee (2010) <doi:10.1007/s11258-009-9641-0>.",2020-04-07,Sarah Goslee,NA,TRUE,https://github.com/phiala/ecodist,178635,2,2020-04-02T18:24:45Z,89317.5
EcoGenetics,"Management and exploratory analysis of spatial data in landscape genetics. Easy integration of information from multiple sources with ""ecogen"" objects.",2020-05-24,Leandro Roser,"https://github.com/cran/EcoGenetics,
https://leandroroser.github.io/EcoGenetics-Tutorial",TRUE,https://github.com/cran/ecogenetics,28606,1,2020-05-24T14:20:17Z,28606
ECoL,"Provides measures to characterize the complexity of classification
and regression problems based on aspects that quantify the linearity of the
data, the presence of informative feature, the sparsity and dimensionality
of the datasets. This package provides bug fixes, generalizations and
implementations of many state of the art measures. The measures are
described in the papers: Lorena et al. (2019) <doi:10.1145/3347711> and
Lorena et al. (2018) <doi:10.1007/s10994-017-5681-1>.",2019-11-05,Luis Garcia,https://github.com/lpfgarcia/ECoL/,TRUE,https://github.com/lpfgarcia/ecol,9782,29,2019-11-04T23:11:30Z,337.3103448275862
ecolottery,"Coalescent-Based Simulation of Ecological Communities as proposed
by Munoz et al. (2017) <doi:10.13140/RG.2.2.31737.26728>. The package includes
a tool for estimating parameters of community assembly by using Approximate
Bayesian Computation.",2017-07-03,François Munoz,https://github.com/frmunoz/ecolottery,TRUE,https://github.com/frmunoz/ecolottery,10053,9,2020-05-28T09:00:20Z,1117
EcoNetGen,"Randomly generate a wide range of interaction networks with
specified size, average degree, modularity, and topological
structure. Sample nodes and links from within simulated networks
randomly, by degree, by module, or by abundance. Simulations
and sampling routines are implemented in 'FORTRAN', providing
efficient generation times even for large networks. Basic
visualization methods also included. Algorithms implemented
here are described in de Aguiar et al. (2017) <arXiv:1708.01242>.",2019-07-13,Carl Boettiger,https://github.com/cboettig/EcoNetGen,TRUE,https://github.com/cboettig/econetgen,11495,7,2019-07-12T15:17:56Z,1642.142857142857
economiccomplexity,"A wrapper of different methods from Linear Algebra for the equations
introduced in The Atlas of Economic Complexity and related literature. This
package provides standard matrix and graph output that can be used seamlessly
with other packages.",2020-02-20,Mauricio Vargas,https://pachamaltese.github.io/economiccomplexity,TRUE,https://github.com/pachamaltese/economiccomplexity,5306,13,2020-02-20T03:38:30Z,408.15384615384613
ECOSolveR,"R interface to the Embedded COnic Solver (ECOS), an efficient
and robust C library for convex problems. Conic and equality
constraints can be specified in addition to integer and
boolean variable constraints for mixed-integer problems. This
R interface is inspired by the python interface and has
similar calling conventions.",2019-11-06,Balasubramanian Narasimhan,https://bnaras.github.io/ECOSolveR,TRUE,https://github.com/bnaras/ecosolver,48641,5,2019-11-05T19:47:05Z,9728.2
ecospat,"Collection of R functions and data sets for the support of spatial ecology analyses with a focus on pre, core and post modelling analyses of species distribution, niche quantification and community assembly. Written by current and former members and collaborators of the ecospat group of Antoine Guisan, Department of Ecology and Evolution (DEE) and Institute of Earth Surface Dynamics (IDYST), University of Lausanne, Switzerland. Read Di Cola et al. (2016) <doi:10.1111/ecog.02671> for details.",2020-03-25,Olivier Broennimann,http://www.unil.ch/ecospat/home/menuguid/ecospat-resources/tools.html,TRUE,https://github.com/ecospat/ecospat,38763,12,2020-03-23T08:08:05Z,3230.25
ecr,"Framework for building evolutionary algorithms for both single- and multi-objective continuous or discrete optimization problems. A set of predefined evolutionary building blocks and operators is included. Moreover, the user can easily set up custom objective functions, operators, building blocks and representations sticking to few conventions. The package allows both a black-box approach for standard tasks (plug-and-play style) and a much more flexible white-box approach where the evolutionary cycle is written by hand.",2017-07-10,Jakob Bossek,https://github.com/jakobbossek/ecr2,TRUE,https://github.com/jakobbossek/ecr2,16771,24,2020-03-24T13:26:58Z,698.7916666666666
edbuildmapr,"Import US Census Bureau, Education Demographic and Geographic Estimates Program,
Composite School District Boundaries Files for 2013-2017 with the option to attach the 'EdBuild'
master dataset of school district finance, student demographics, and community economic
indicators for every school district in the United States. The master dataset is built
from the US Census, Annual Survey of School System Finances (F33) and joins data from the
National Center for Education Statistics, Common Core of Data; the US Census, Small Area
Income and Poverty Estimates; and the US Census, Education Demographic and Geographic
Estimates. Additional functions in the package create a dataset of all pairs of school
district neighbors as either a dataframe or a shapefile and create formatted maps of
selected districts at the state or neighbor level, symbolized by a selected variable
in the 'EdBuild' master dataset. For full details about 'EdBuild' data processing please
see 'EdBuild' (2019) <https://edbuild.org/content/dividing-lines/main/methodology>. ",2020-04-16,Megan Brodzik,"https://github.com/EdBuild/edbuildmapr, https://edbuild.github.io/",TRUE,https://github.com/edbuild/edbuildmapr,1646,1,2020-04-16T17:11:37Z,1646
eddi,"Finds and downloads raw Evaporative Demand Drought
Index (EDDI) data, then reads the data into 'R' using the 'raster'
package. The EDDI product detects drought at multiple time scales,
from weekly ""flash droughts"" to long-term droughts. More information
about the EDDI data product can be found at
<https://www.esrl.noaa.gov/psd/eddi/>.",2019-05-22,Max Joseph,https://github.com/earthlab/eddi,TRUE,https://github.com/earthlab/eddi,4350,0,2019-12-17T17:01:01Z,NA
eddington,"Compute a cyclist's Eddington number, including efficiently
computing cumulative E over a vector. A cyclist's Eddington number
<https://en.wikipedia.org/wiki/Arthur_Eddington#Eddington_number_for_cycling>
is the maximum number satisfying the condition such that a cyclist has
ridden E miles or greater in E days. The algorithm in this package is an
improvement over the conventional approach because both summary statistics
and cumulative statistics can be computed in linear time, since it does not
require initial sorting of the data. These functions may also be used for
computing h-indices for authors, a metric described by Hirsch (2005)
<doi:10.1073/pnas.0507655102>. Both are specific applications of computing
the side length of a Durfee square
<https://en.wikipedia.org/wiki/Durfee_square>.",2020-03-24,Paul Egeler,https://github.com/pegeler/eddington2,TRUE,https://github.com/pegeler/eddington2,1105,1,2020-06-06T07:24:38Z,1105
edeaR,"Exploratory and descriptive analysis of event based data. Provides methods for describing and selecting process data, and for preparing event log data for process mining. Builds on the S3-class for event logs implemented in the package 'bupaR'.",2020-02-25,Gert Janssenswillen,"https://www.bupar.net, https://github.com/bupaverse/edeaR",TRUE,https://github.com/bupaverse/edear,44900,2,2020-04-30T12:18:35Z,22450
edina,"Perform a Bayesian estimation of the exploratory
deterministic input, noisy and gate (EDINA)
cognitive diagnostic model described by Chen et al. (2018)
<doi:10.1007/s11336-017-9579-4>.",2020-03-25,James Joseph Balamuta,https://github.com/tmsalab/edina,TRUE,https://github.com/tmsalab/edina,1221,0,2020-03-24T14:57:22Z,NA
editData,"An 'RStudio' addin for editing a 'data.frame' or a 'tibble'. You can delete, add or update a 'data.frame'
without coding. You can get resultant data as a 'data.frame'. In the package, modularized 'shiny' app codes are provided.
These modules are intended for reuse across applications.",2017-10-07,Keon-Woong Moon,https://github.com/cardiomoon/editData,TRUE,https://github.com/cardiomoon/editdata,37902,10,2020-03-06T05:46:48Z,3790.2
EDOIF,"A non-parametric framework based on estimation statistics principle. Its main purpose is to infer orders of empirical distributions from different categories based on a probability of finding a value in one distribution that is greater than an expectation of another distribution. Given a set of ordered-pair of real-category values the framework is capable of 1) inferring orders of domination of categories and representing orders in the form of a graph; 2) estimating magnitude of difference between a pair of categories in forms of mean-difference confidence intervals; and 3) visualizing domination orders and magnitudes of difference of categories. The publication of this package is at Chainarong Amornbunchornvej, Navaporn Surasvadi, Anon Plangprasopchok, and Suttipong Thajchayapong (2019) <arXiv:1911.06723>.",2019-12-02,Chainarong Amornbunchornvej,https://github.com/DarkEyes/EDOIF,TRUE,https://github.com/darkeyes/edoif,2596,0,2020-03-16T04:03:39Z,NA
edwards97,"Implements the Edwards (1997) <doi:10.1002/j.1551-8833.1997.tb08229.x>
Langmuir-based semi-empirical coagulation model, which predicts the concentration
of organic carbon remaining in water after treatment with an Al- or Fe-based
coagulant. Data and methods are provided to optimise empirical coefficients.",2020-03-23,Dewey Dunnington,"https://paleolimbot.github.io/edwards97/,
https://github.com/paleolimbot/edwards97",TRUE,https://github.com/paleolimbot/edwards97,1073,0,2020-03-20T16:37:07Z,NA
eechidna,"Data from the seven Australian Federal Elections (House of
Representatives) between 2001 and 2019, and from the four Australian
Censuses over the same period. Includes tools for visualizing and
analysing the data, as well as imputing Census data for years in
which a Census does not occur. This package incorporates
data that is copyright Commonwealth of Australia (Australian
Electoral Commission and Australian Bureau of Statistics) 2019.",2019-11-08,Jeremy Forbes,https://github.com/ropenscilabs/eechidna,TRUE,https://github.com/ropenscilabs/eechidna,13841,30,2020-04-19T07:36:19Z,461.3666666666667
eemR,"Provides various tools for preprocessing Emission-Excitation-Matrix (EEM) for Parallel Factor Analysis (PARAFAC). Different
methods are also provided to calculate common metrics such as humification index and fluorescence index.",2019-06-26,Philippe Massicotte,https://github.com/PMassicotte/eemR,TRUE,https://github.com/pmassicotte/eemr,18779,10,2020-03-19T14:24:53Z,1877.9
eeptools,"Collection of convenience functions to make working with
administrative records easier and more consistent. Includes functions to
clean strings, and identify cut points. Also includes three example data
sets of administrative education records for learning how to process records
with errors.",2020-05-02,Jared E. Knowles,https://github.com/jknowles/eeptools,TRUE,https://github.com/jknowles/eeptools,83218,25,2020-04-02T15:24:42Z,3328.72
EffectLiteR,"Use structural equation modeling to estimate average and
conditional effects of a treatment variable on an outcome variable, taking into
account multiple continuous and categorical covariates.",2019-12-10,Axel Mayer,https://github.com/amayer2010/EffectLiteR,TRUE,https://github.com/amayer2010/effectliter,21970,3,2020-01-17T10:06:18Z,7323.333333333333
effectsize,"Provide utilities to work with indices of effect size and standardized parameters for a wide variety of models (see support list of insight; Lüdecke, Waggoner & Makowski (2019) <doi:10.21105/joss.01412>), allowing computation and conversion of indices such as Cohen's d, r, odds, etc.",2020-05-19,Mattan S. Ben-Shachar,https://easystats.github.io/effectsize,TRUE,https://github.com/easystats/effectsize,132818,76,2020-06-09T19:38:50Z,1747.6052631578948
effsize,"A collection of functions to compute the standardized
effect sizes for experiments (Cohen d, Hedges g, Cliff delta, Vargha-Delaney A).
The computation algorithms have been optimized to allow efficient computation even
with very large data sets.",2020-04-09,Marco Torchiano,http://github.com/mtorchiano/effsize/,TRUE,https://github.com/mtorchiano/effsize,170843,92,2020-04-09T21:05:02Z,1856.9891304347825
egor,"Tools for importing, analyzing and visualizing ego-centered
network data. Supports several data formats, including the export formats of
'EgoNet', 'EgoWeb 2.0' and 'openeddi'. An interactive (shiny) app for the
intuitive visualization of ego-centered networks is provided. Also included
are procedures for creating and visualizing Clustered Graphs
(Lerner 2008 <DOI:10.1109/PACIFICVIS.2008.4475458>).",2020-03-03,Till Krenz,"https://github.com/tilltnet/egor, https://tilltnet.github.io/egor/",TRUE,https://github.com/tilltnet/egor,10357,10,2020-06-03T18:12:10Z,1035.7
EGRETci,"Collection of functions to evaluate uncertainty of results from
water quality analysis using the Weighted Regressions on Time Discharge and
Season (WRTDS) method. This package is an add-on to the EGRET package that
performs the WRTDS analysis. The WRTDS modeling
method was initially introduced and discussed in Hirsch et al. (2010) <doi:10.1111/j.1752-1688.2010.00482.x>,
and expanded in Hirsch and De Cicco (2015) <doi:10.3133/tm4A10>. The
paper describing the uncertainty and confidence interval calculations
is Hirsch et al. (2015) <doi:10.1016/j.envsoft.2015.07.017>.",2019-03-15,Laura DeCicco,https://github.com/USGS-R/EGRETci,TRUE,https://github.com/usgs-r/egretci,18193,5,2019-12-20T17:36:24Z,3638.6
eGST,"Genetic predisposition for complex traits is often manifested through multiple tissues of interest at different time points in the development. As an example, the genetic predisposition for obesity could be manifested through inherited variants that control metabolism through regulation of genes expressed in the brain and/or through the control of fat storage in the adipose tissue by dysregulation of genes expressed in adipose tissue. We present a method eGST (eQTL-based genetic subtyper) that integrates tissue-specific eQTLs with GWAS data for a complex trait to probabilistically assign a tissue of interest to the phenotype of each individual in the study. eGST estimates the posterior probability that an individual's phenotype can be assigned to a tissue based on individual-level genotype data of tissue-specific eQTLs and marginal phenotype data in a genome-wide association study (GWAS) cohort. Under a Bayesian framework of mixture model, eGST employs a maximum a posteriori (MAP) expectation-maximization (EM) algorithm to estimate the tissue-specific posterior probability across individuals. Methodology is available from: A Majumdar, C Giambartolomei, N Cai, MK Freund, T Haldar, T Schwarz, J Flint, B Pasaniuc (2019) <doi:10.1101/674226>.",2019-07-02,Arunabha Majumdar,https://github.com/ArunabhaCodes/eGST,TRUE,https://github.com/arunabhacodes/egst,3992,0,2019-07-01T06:43:24Z,NA
eha,"Sampling of risk sets in Cox regression, selections in
the Lexis diagram, bootstrapping. Parametric proportional
hazards fitting with left truncation and right censoring for
common families of distributions, piecewise constant hazards,
and discrete models. Parametric accelerated failure time models
for left truncated and right censored data.",2020-04-01,Göran Broström,https://goranbrostrom.github.io/eha/,TRUE,https://github.com/goranbrostrom/eha,165326,1,2020-03-31T20:11:40Z,165326
ehelp,"By overloading the R help() function, this package allows users to use ""docstring"" style comments within their own defined functions. The package also provides additional functions to mimic the R basic example() function and the prototyping of packages.",2020-04-05,Marcelo Ponce,https://github.com/mponce0/eHelp,TRUE,https://github.com/mponce0/ehelp,3567,1,2020-04-12T23:45:46Z,3567
EHRtemporalVariability,"Functions to delineate temporal dataset shifts in Electronic Health
Records through the projection and visualization of dissimilarities
among data temporal batches. This is done through the estimation of
data statistical distributions over time and their projection in
non-parametric statistical manifolds, uncovering the patterns of the
data latent temporal variability. 'EHRtemporalVariability' is
particularly suitable for multi-modal data and categorical variables
with a high number of values, common features of biomedical data where
traditional statistical process control or time-series methods may not
be appropriate. 'EHRtemporalVariability' allows you to explore and
identify dataset shifts through visual analytics formats such as
Data Temporal heatmaps and Information Geometric Temporal (IGT) plots.
An additional 'EHRtemporalVariability' Shiny app can be used to load
and explore the package results and even to allow the use of these
functions to those users non-experienced in R coding. Preprint published
in medRxiv (Sáez et al. 2020) <doi:10.1101/2020.04.07.20056564>.",2020-05-25,Carlos Sáez,http://github.com/hms-dbmi/EHRtemporalVariability,TRUE,https://github.com/hms-dbmi/ehrtemporalvariability,6641,3,2020-05-25T08:50:18Z,2213.6666666666665
EIAdata,"An R wrapper to allow the user to query categories and Series IDs, and import data, from the EIA's API <https://www.eia.gov/opendata/>.",2020-05-12,Matthew Brigida and others,https://github.com/Matt-Brigida/EIAdata,TRUE,https://github.com/matt-brigida/eiadata,24362,11,2020-05-12T18:04:26Z,2214.7272727272725
eicm,"Model fitting and species biotic interaction network topology selection for explicit
interaction community models. Explicit interaction community models are an extension of binomial
linear models for joint modelling of species communities, that incorporate both the effects of
species biotic interactions and the effects of missing covariates. Species interactions are modelled
as direct effects of each species on each of the others, and are estimated alongside the effects of
missing covariates, modelled as latent factors. The package includes a penalized maximum likelihood
fitting function, and a genetic algorithm for selecting the most parsimonious species interaction
network topology.",2020-03-26,Miguel Porto,https://github.com/miguel-porto/eicm,TRUE,https://github.com/miguel-porto/eicm,1103,2,2020-04-01T15:18:08Z,551.5
eikosograms,"An eikosogram (ancient Greek for probability picture) divides the unit square
into rectangular regions whose areas, sides, and widths, represent various probabilities
associated with the values of one or more categorical variates.
Rectangle areas are joint probabilities, widths are always marginal (though possibly joint
margins, i.e. marginal joint distributions of two or more variates), and heights of rectangles
are always conditional probabilities.
Eikosograms embed the rules of probability and are useful for introducing elementary probability
theory, including axioms, marginal, conditional, and joint probabilities, and their
relationships (including Bayes theorem as a completely trivial consequence).
They are markedly superior to Venn diagrams for this purpose, especially in distinguishing
probabilistic independence, mutually exclusive events, coincident events, and associations.
They also are useful for identifying and understanding conditional independence structure.
As data analysis tools, eikosograms display categorical data in a manner similar
to Mosaic plots, especially when only two variates are involved (the only case in which
they are essentially identical, though eikosograms purposely disallow spacing between rectangles).
Unlike Mosaic plots, eikosograms do not alternate axes as each new categorical variate
(beyond two) is introduced.
Instead, only one categorical variate, designated the ""response"", presents on the vertical axis
and all others, designated the ""conditioning"" variates, appear on the horizontal.
In this way, conditional probability appears only as height and marginal probabilities as widths.
The eikosogram is therefore much better suited to a response model analysis (e.g. logistic model)
than is a Mosaic plot.
Mosaic plots are better suited to log-linear style modelling as in discrete multivariate analysis.
Of course, eikosograms are also suited to discrete multivariate analysis with each variate in turn
appearing as the response.
This makes it better suited than Mosaic plots to discrete graphical models based on conditional
independence graphs (i.e. ""Bayesian Networks"" or ""BayesNets"").
The eikosogram and its superiority to Venn diagrams in teaching probability is described in
W.H. Cherry and R.W. Oldford (2003) <https://math.uwaterloo.ca/~rwoldfor/papers/eikosograms/paper.pdf>,
its value in exploring conditional independence structure and relation to graphical and log-linear models
is described in R.W. Oldford (2003) <https://math.uwaterloo.ca/~rwoldfor/papers/eikosograms/independence/paper.pdf>,
and a number of problems, puzzles, and paradoxes that are easily explained with eikosograms are given in
R.W. Oldford (2003) <https://math.uwaterloo.ca/~rwoldfor/papers/eikosograms/examples/paper.pdf>.",2018-08-22,Wayne Oldford,https://github.com/rwoldford/eikosograms,TRUE,https://github.com/rwoldford/eikosograms,7750,2,2019-07-20T15:30:05Z,3875
einet,"Methods and utilities for causal emergence.
Used to explore and compute various information theory metrics for networks, such as effective information, effectiveness and causal emergence.",2020-04-23,Travis Byrum,https://github.com/travisbyrum/einet,TRUE,https://github.com/travisbyrum/einet,643,1,2020-05-17T21:07:59Z,643
EIX,"Structure mining from 'XGBoost' and 'LightGBM' models.
Key functionalities of this package cover: visualisation of tree-based ensembles models,
identification of interactions, measuring of variable importance,
measuring of interaction importance, explanation of single prediction
with break down plots (based on 'xgboostExplainer' and 'iBreakDown' packages).
To download the 'LightGBM' use the following link: <https://github.com/Microsoft/LightGBM>.
'EIX' is a part of the 'DrWhy.AI' universe.",2020-02-18,Ewelina Karbowiak,https://github.com/ModelOriented/EIX,TRUE,https://github.com/modeloriented/eix,6115,9,2020-02-18T12:21:23Z,679.4444444444445
eixport,"Emissions are the mass of pollutants released into the atmosphere. Air quality models need emissions data, with spatial and temporal distribution, to represent air pollutant concentrations. This package, eixport, creates inputs for the air quality models 'WRF-Chem' Grell et al (2005) <doi:10.1016/j.atmosenv.2005.04.027>, 'BRAMS-SPM' Freitas et al (2005) <doi:10.1016/j.atmosenv.2005.07.017> and 'RLINE' Snyder et al (2013) <doi:10.1016/j.atmosenv.2013.05.074>. See the eixport website (<https://atmoschem.github.io/eixport/>) for more information, documentations and examples. More details in Ibarra-Espinosa et al (2018) <doi.org/10.21105/joss.00607>.",2020-04-16,Sergio Ibarra-Espinosa,https://atmoschem.github.io/eixport,TRUE,https://github.com/atmoschem/eixport,11002,9,2020-04-07T18:54:26Z,1222.4444444444443
elastic,"Connect to 'Elasticsearch', a 'NoSQL' database built on the 'Java'
Virtual Machine. Interacts with the 'Elasticsearch' 'HTTP' API
(<https://www.elastic.co/products/elasticsearch>), including functions for
setting connection details to 'Elasticsearch' instances, loading bulk data,
searching for documents with both 'HTTP' query variables and 'JSON' based body
requests. In addition, 'elastic' provides functions for interacting with API's
for 'indices', documents, nodes, clusters, an interface to the cat API, and
more.",2020-01-11,Scott Chamberlain,"https://docs.ropensci.org/elastic (website),
https://github.com/ropensci/elastic",TRUE,https://github.com/ropensci/elastic,70994,211,2020-06-01T19:15:47Z,336.46445497630333
elasticsearchr,"A lightweight R interface to 'Elasticsearch' - a NoSQL search-engine and
column store database (see <https://www.elastic.co/products/elasticsearch> for more
information). This package implements a simple Domain-Specific Language (DSL) for indexing,
deleting, querying, sorting and aggregating data using 'Elasticsearch'.",2019-07-30,Alex Ioannides,https://github.com/alexioannides/elasticsearchr,TRUE,https://github.com/alexioannides/elasticsearchr,22553,45,2019-07-30T20:36:01Z,501.1777777777778
electionsBR,"Offers a set of functions to easily download and clean
Brazilian electoral data from the Superior Electoral Court website.
Among others, the package retrieves data on local and
federal elections for all positions (city councilor, mayor, state deputy,
federal deputy, governor, and president) aggregated by
state, city, and electoral zones. ",2019-07-09,Denisson Silva,http://electionsbr.com/,TRUE,https://github.com/silvadenisson/electionsbr,16637,34,2019-07-16T19:45:03Z,489.3235294117647
electivity,"Provides all electivity algorithms (including Vanderploeg and Scavia
electivity) that were examined in Lechowicz (1982) <doi:10.1007/BF00349007>,
plus the example data that were provided for moth resource utilisation.",2019-08-20,Desi Quintans,https://github.com/DesiQuintans/electivity,TRUE,https://github.com/desiquintans/electivity,3502,0,2019-08-21T22:03:54Z,NA
elementR,Aims to facilitate the reduction of elemental microchemistry data from solid-phase LAICPMS analysis (laser ablation inductive coupled plasma mass spectrometry). The 'elementR' package provides a reactive and user friendly interface (based on a 'shiny' application) and a set of 'R6' classes for conducting all steps needed for an optimal data reduction while leaving maximum control for user.,2018-05-03,Charlotte Sirot,https://github.com/charlottesirot/elementR,TRUE,https://github.com/charlottesirot/elementr,17187,7,2020-05-26T09:04:39Z,2455.285714285714
elevatr,"Several web services are available that provide access to elevation
data. This package provides access to several of those services and
returns elevation data either as a SpatialPointsDataFrame from
point elevation services or as a raster object from raster
elevation services. Currently, the package supports access to the
Amazon Web Services Terrain Tiles <https://aws.amazon.com/public-datasets/terrain/>
and the USGS Elevation Point Query Service <http://ned.usgs.gov/epqs/>.",2018-11-28,Jeffrey Hollister,https://www.github.com/jhollist/elevatr,TRUE,https://github.com/jhollist/elevatr,24078,80,2020-05-21T20:44:40Z,300.975
elfDistr,"Density, distribution function, quantile function
and random generation for the Kumaraswamy Complementary Weibull
Geometric (Kw-CWG) lifetime probability distribution proposed
in Afify, A.Z. et al (2017) <doi:10.1214/16-BJPS322>.",2019-10-07,Matheus H. J. Saldanha,https://github.com/matheushjs/elfDistr,TRUE,https://github.com/matheushjs/elfdistr,3024,0,2020-01-24T08:44:12Z,NA
ellipsis,"The ellipsis is a powerful tool for extending functions. Unfortunately
this power comes at a cost: misspelled arguments will be silently ignored.
The ellipsis package provides a collection of functions to catch problems
and alert the user.",2020-05-15,Hadley Wickham,"https://ellipsis.r-lib.org, https://github.com/r-lib/ellipsis",TRUE,https://github.com/r-lib/ellipsis,12002228,114,2020-05-15T12:31:31Z,105282.70175438597
elliptic,"
A suite of elliptic and related functions including Weierstrass and
Jacobi forms. Also includes various tools for manipulating and
visualizing complex functions.",2019-03-14,Robin K. S. Hankin,https://github.com/RobinHankin/elliptic.git,TRUE,https://github.com/robinhankin/elliptic,489612,0,2020-02-16T04:43:51Z,NA
elo,"A flexible framework for calculating Elo ratings and resulting
rankings of any two-team-per-matchup system (chess, sports leagues, 'Go',
etc.). This implementation is capable of evaluating a variety of matchups,
Elo rating updates, and win probabilities, all based on the basic Elo
rating system. It also includes methods to benchmark performance,
including logistic regression and Markov chain models.",2020-01-14,Ethan Heinzen,"https://github.com/eheinzen/elo,
https://cran.r-project.org/package=elo,
https://eheinzen.github.io/elo/",TRUE,https://github.com/eheinzen/elo,15081,16,2020-03-09T18:15:19Z,942.5625
EloChoice,"Allows calculating global scores for characteristics of visual stimuli as assessed by human raters. Stimuli are presented as sequence of pairwise comparisons ('contests'), during each of which a rater expresses preference for one stimulus over the other (forced choice). The algorithm for calculating global scores is based on Elo rating, which updates individual scores after each single pairwise contest. Elo rating is widely used to rank chess players according to their performance. Its core feature is that dyadic contests with expected outcomes lead to smaller changes of participants' scores than outcomes that were unexpected. As such, Elo rating is an efficient tool to rate individual stimuli when a large number of such stimuli are paired against each other in the context of experiments where the goal is to rank stimuli according to some characteristic of interest. Clark et al (2018) <doi:10.1371/journal.pone.0190393> provide details.",2019-07-04,Christof Neumann,https://github.com/gobbios/EloChoice,TRUE,https://github.com/gobbios/elochoice,13209,4,2019-07-02T10:12:48Z,3302.25
EloOptimized,"Provides an implementation of the maximum likelihood methods for deriving
Elo scores as published in Foerster, Franz et al. (2016) <DOI:10.1038/srep35404>.",2018-09-17,Joseph Feldblum,https://github.com/jtfeld/EloOptimized,TRUE,https://github.com/jtfeld/elooptimized,6851,0,2020-05-02T21:59:45Z,NA
EloRating,"Provides functions to quantify animal dominance hierarchies. The major focus is on Elo rating and its ability to deal with temporal dynamics in dominance interaction sequences. For static data, David's score and de Vries' I&SI are also implemented. In addition, the package provides functions to assess transitivity, linearity and stability of dominance networks. See Neumann et al (2011) <doi:10.1016/j.anbehav.2011.07.016> for an introduction.",2020-03-12,Christof Neumann,https://github.com/gobbios/EloRating,TRUE,https://github.com/gobbios/elorating,18776,1,2020-01-14T22:17:10Z,18776
elsa,"A framework that provides the methods for quantifying entropy-based local indicator of spatial association (ELSA) that can be used for both continuous and categorical data. In addition, this package offers other methods to measure local indicators of spatial associations (LISA). Furthermore, global spatial structure can be measured using a variogram-like diagram, called entrogram. For more information, please check that paper: Naimi, B., Hamm, N. A., Groen, T. A., Skidmore, A. K., Toxopeus, A. G., & Alibakhshi, S. (2019) <doi:10.1016/j.spasta.2018.10.001>.",2020-03-19,Babak Naimi,http://r-gis.net,TRUE,https://github.com/babaknaimi/elsa,1460,3,2020-03-12T22:54:19Z,486.6666666666667
emayili,"A light, simple tool for sending emails with minimal dependencies.",2020-06-03,Andrew B. Collier,https://datawookie.github.io/emayili/,TRUE,https://github.com/datawookie/emayili,2992,48,2020-06-03T13:33:58Z,62.333333333333336
emba,"Analysis and visualization of an ensemble of boolean models for
biomarker discovery in cancer cell networks. The package allows to easily
import the data results of a software pipeline that predicts synergistic drug
combinations in cancer cell lines, developed by the DrugLogics research group
in NTNU. It has generic functions that can be used to split a boolean model
dataset to model groups with regards to the models predictive performance (number of true
positive predictions or Matthews correlation coefficient score) or synergy prediction based on a given set
of observed synergies and find the average activity difference per network
node between all group pairs. Thus, given user-specific thresholds,
important nodes (biomarkers) can be accessed in the sense that they make the
models predict specific synergies (synergy biomarkers) or have better
performance in general (performance biomarkers). Lastly, if the
boolean models have a specific equation form and differ only in their link operator,
link operator biomarkers can also be assessed.",2020-04-14,John Zobolas,https://github.com/bblodfon/emba,TRUE,https://github.com/bblodfon/emba,2782,0,2020-06-07T19:07:35Z,NA
embed,Predictors can be converted to one or more numeric representations using simple generalized linear models <arXiv:1611.09477> or nonlinear models <arXiv:1604.06737>. Most encoding methods are supervised.,2020-05-25,Max Kuhn,"https://embed.tidymodels.org, https://github.com/tidymodels/embed",TRUE,https://github.com/tidymodels/embed,10962,61,2020-05-25T16:43:47Z,179.70491803278688
EMCluster,"EM algorithms and several efficient
initialization methods for model-based clustering of finite
mixture Gaussian distribution with unstructured dispersion
in both of unsupervised and semi-supervised learning.",2019-03-22,Wei-Chen Chen,https://github.com/snoweye/EMCluster,TRUE,https://github.com/snoweye/emcluster,53271,13,2020-05-03T00:26:01Z,4097.7692307692305
emdi,"Functions that support estimating, assessing and mapping regional
disaggregated indicators. So far, estimation methods comprise direct estimation
and the model-based approach Empirical Best Prediction (see ""Small area
estimation of poverty indicators"" by Molina and Rao (2010) <doi:10.1002/cjs.10051>),
as well as their precision estimates. The assessment of the used model
is supported by a summary and diagnostic plots. For a suitable presentation of
estimates, map plots can be easily created. Furthermore, results can easily be
exported to excel. For a detailed description of the package and the methods used
see ""The {R} Package {emdi} for Estimating and Mapping Regionally Disaggregated Indicators""
by Kreutzmann et al. (2019) <doi:10.18637/jss.v091.i07>.",2020-03-18,Soeren Pannier,https://github.com/SoerenPannier/emdi,TRUE,https://github.com/soerenpannier/emdi,18961,4,2020-05-07T08:35:05Z,4740.25
EmissV,"Creates emissions for use in air quality models. Vehicular emissions
are estimated by a top-down approach, total emissions are calculated using the
statistical description of the fleet of vehicles, the emission is spatially
distributed according to satellite images or openstreetmap data
<https://www.openstreetmap.org> and then distributed temporarily
(Vara-Vela et al., 2016, <doi:10.5194/acp-16-777-2016>).",2020-04-01,Daniel Schuch,https://atmoschem.github.io/EmissV,TRUE,https://github.com/atmoschem/emissv,9411,14,2020-04-01T17:21:04Z,672.2142857142857
EML,"Work with Ecological Metadata Language ('EML') files.
'EML' is a widely used metadata standard in the ecological and
environmental sciences, described in Jones et al. (2006),
<doi:10.1146/annurev.ecolsys.37.091305.110031>.",2020-02-08,Carl Boettiger,"https://docs.ropensci.org/EML, https://github.com/ropensci/EML",TRUE,https://github.com/ropensci/eml,23328,81,2020-02-20T17:00:46Z,288
emld,"This is a utility for transforming Ecological Metadata Language
('EML') files into 'JSON-LD' and back into 'EML.' Doing so creates a
list-based representation of 'EML' in R, so that 'EML' data can easily
be manipulated using standard 'R' tools. This makes this package an
effective backend for other 'R'-based tools working with 'EML.' By
abstracting away the complexity of 'XML' Schema, developers can
build around native 'R' list objects and not have to worry about satisfying
many of the additional constraints of set by the schema (such as element
ordering, which is handled automatically). Additionally, the 'JSON-LD'
representation enables the use of developer-friendly 'JSON' parsing and
serialization that may facilitate the use of 'EML' in contexts outside of 'R,'
as well as the informatics-friendly serializations such as 'RDF' and
'SPARQL' queries.",2020-02-05,Carl Boettiger,"https://docs.ropensci.org/emld, https://github.com/ropensci/emld",TRUE,https://github.com/ropensci/emld,12565,10,2020-06-05T00:15:01Z,1256.5
emmeans,"Obtain estimated marginal means (EMMs) for many linear, generalized
linear, and mixed models. Compute contrasts or linear functions of EMMs,
trends, and comparisons of slopes. Plots and other displays.
Least-squares means are discussed, and the term ""estimated marginal means""
is suggested, in Searle, Speed, and Milliken (1980) Population marginal means
in the linear model: An alternative to least squares means, The American
Statistician 34(4), 216-221 <doi:10.1080/00031305.1980.10483031>.",2020-05-25,Russell Lenth,https://github.com/rvlenth/emmeans,TRUE,https://github.com/rvlenth/emmeans,922285,119,2020-06-08T19:45:17Z,7750.294117647059
emojifont,"An implementation of using emoji and fontawesome for using in both
base and 'ggplot2' graphics.",2019-12-12,Guangchuang Yu,https://guangchuangyu.github.io/emojifont,TRUE,https://github.com/guangchuangyu/emojifont,44057,57,2019-12-12T03:20:46Z,772.9298245614035
EmpiricalCalibration,"Routines for performing empirical calibration of observational
study estimates. By using a set of negative control hypotheses we can
estimate the empirical null distribution of a particular observational
study setup. This empirical null distribution can be used to compute a
calibrated p-value, which reflects the probability of observing an
estimated effect size when the null hypothesis is true taking both random
and systematic error into account. A similar approach can be used to
calibrate confidence intervals, using both negative and positive controls.",2020-04-07,Martijn Schuemie,"https://ohdsi.github.io/EmpiricalCalibration,
https://github.com/OHDSI/EmpiricalCalibration",TRUE,https://github.com/ohdsi/empiricalcalibration,20205,6,2020-06-08T06:42:43Z,3367.5
emstreeR,"Computes Euclidean Minimum Spanning Trees (EMST) using the fast
Dual-Tree Boruvka algorithm (March, Ram, Gray, 2010,
<doi:10.1145/1835804.1835882>) implemented in 'mlpack' - the C++ Machine
Learning library (Curtin et al., 2013). 'emstreeR' heavily relies on
'RcppMLPACK' and 'Rcpp', working as a wrapper to the C++ fast EMST
algorithm. Thus, R users do not have to deal with the R-'Rcpp'-C++
integration. The package also provides functions and an S3 method for
readily plotting Minimum Spanning Trees (MST) using either 'base' R,
'scatterplot3d' or 'ggplot2' style.",2019-05-08,Allan Quadros,NA,TRUE,https://github.com/allanvc/emstreer,7929,3,2019-08-20T21:24:09Z,2643
emuR,"Provides the next iteration of the EMU Speech
Database Management System (EMU-SDMS) with database management, data
extraction, data preparation and data visualization facilities.",2019-11-06,Raphael Winkelmann,https://github.com/IPS-LMU/emuR,TRUE,https://github.com/ips-lmu/emur,28793,18,2020-06-08T15:55:02Z,1599.611111111111
enc,"
Implements an S3 class for storing 'UTF-8' strings, based on regular character vectors.
Also contains routines to portably read and write 'UTF-8' encoded text files,
to convert all strings in an object to 'UTF-8',
and to create character vectors with various encodings.",2019-12-19,Kirill Müller,https://github.com/krlmlr/enc,TRUE,https://github.com/krlmlr/enc,63654,11,2019-12-19T13:57:53Z,5786.727272727273
encryptr,"It is important to ensure that sensitive data is protected.
This straightforward package is aimed at the end-user.
Strong RSA encryption using a public/private key pair is used to encrypt data frame or tibble columns.
A public key can be shared to allow others to encrypt data to be sent to you.
This is particularly aimed a healthcare settings so patient data can be pseudonymised. ",2019-04-25,Ewen Harrison,https://github.com/SurgicalInformatics/encryptr,TRUE,https://github.com/surgicalinformatics/encryptr,7133,78,2019-11-07T12:12:44Z,91.44871794871794
endoSwitch,Maximum likelihood estimation of endogenous switching regression models from Heckman (1979) <doi:10.2307/1912352> and estimation of treatment effects. ,2020-02-21,Bowen Chen,https://github.com/cbw1243/endoSwitch,TRUE,https://github.com/cbw1243/endoswitch,1285,0,2020-01-17T15:35:22Z,NA
energy,"E-statistics (energy) tests and statistics for multivariate and univariate inference,
including distance correlation, one-sample, two-sample, and multi-sample tests for
comparing multivariate distributions, are implemented. Measuring and testing
multivariate independence based on distance correlation, partial distance correlation,
multivariate goodness-of-fit tests, k-groups and hierarchical clustering based on energy
distance, testing for multivariate normality, distance components (disco) for non-parametric
analysis of structured data, and other energy statistics/methods are implemented.",2019-12-07,Maria Rizzo,https://github.com/mariarizzo/energy,TRUE,https://github.com/mariarizzo/energy,315151,23,2019-12-08T08:48:48Z,13702.217391304348
enpls,"An algorithmic framework for measuring feature importance,
outlier detection, model applicability domain evaluation,
and ensemble predictive modeling with (sparse)
partial least squares regressions.",2019-05-18,Nan Xiao,"https://nanx.me/enpls/, https://github.com/nanxstats/enpls",TRUE,https://github.com/nanxstats/enpls,46025,13,2020-04-23T23:02:01Z,3540.3846153846152
enrichwith,"Provides the ""enrich"" method to enrich list-like R objects with new, relevant components. The current version has methods for enriching objects of class 'family', 'link-glm', 'lm', 'glm' and 'betareg'. The resulting objects preserve their class, so all methods associated with them still apply. The package also provides the 'enriched_glm' function that has the same interface as 'glm' but results in objects of class 'enriched_glm'. In addition to the usual components in a `glm` object, 'enriched_glm' objects carry an object-specific simulate method and functions to compute the scores, the observed and expected information matrix, the first-order bias, as well as model densities, probabilities, and quantiles at arbitrary parameter values. The package can also be used to produce customizable source code templates for the structured implementation of methods to compute new components and enrich arbitrary objects.",2020-01-10,Ioannis Kosmidis,https://github.com/ikosmidis/enrichwith,TRUE,https://github.com/ikosmidis/enrichwith,27499,5,2019-12-27T15:47:42Z,5499.8
ensr,"Elastic net regression models are controlled by two parameters,
lambda, a measure of shrinkage, and alpha, a metric defining the model's
location on the spectrum between ridge and lasso regression.
glmnet provides tools for selecting lambda via cross
validation but no automated methods for selection of alpha. Elastic Net
SearcheR automates the simultaneous selection of both lambda and alpha.
Developed, in part, with support by NICHD R03 HD094912.",2019-01-21,Peter DeWitt,https://github.com/dewittpe/ensr,TRUE,https://github.com/dewittpe/ensr,5316,0,2020-03-02T20:50:34Z,NA
entropart,"Measurement and partitioning of diversity, based on Tsallis entropy, following Marcon and Herault (2015) <doi:10.18637/jss.v067.i08>.
'entropart' provides functions to calculate alpha, beta and gamma diversity of communities, including phylogenetic and functional diversity.
Estimation-bias corrections are available.",2020-01-22,Eric Marcon,https://github.com/EricMarcon/entropart,TRUE,https://github.com/ericmarcon/entropart,36765,3,2020-04-18T21:20:29Z,12255
envalysis,"Small toolbox for data analyses in environmental chemistry and
ecotoxicology. Provides, for example, calibration() to calculate calibration
curves and corresponding limits of detection (LODs) and quantification
(LOQs) according to German DIN 32645:2008-11. texture() makes it easy to
estimate soil particle size distributions from hydrometer measurements (ASTM
D422-63(2007)e2).",2020-04-17,Zacharias Steinmetz,https://github.com/zsteinmetz/envalysis,TRUE,https://github.com/zsteinmetz/envalysis,8019,0,2020-04-17T16:59:31Z,NA
envDocument,"Prints out information about the R working environment
(system, R version,loaded and attached packages and versions) from a single
function ""env_doc()"". Optionally adds information on git repository,
tags, commits and remotes (if available).",2019-08-19,Donald Jackson,https://github.com/dgJacks0n/envDocument,TRUE,https://github.com/dgjacks0n/envdocument,16932,1,2019-09-04T12:47:34Z,16932
enviGCMS,"Gas/Liquid Chromatography-Mass Spectrometer(GC/LC-MS) Data Analysis for Environmental Science. This package covered topics such molecular isotope ratio, matrix effects and Short-Chain Chlorinated Paraffins analysis etc. in environmental analysis.",2020-06-04,Miao YU,https://github.com/yufree/enviGCMS,TRUE,https://github.com/yufree/envigcms,13602,6,2020-06-04T14:28:02Z,2267
envirem,Generation of bioclimatic rasters that are complementary to the typical 19 bioclim variables. ,2020-06-04,Pascal O. Title,http://envirem.github.io,TRUE,https://github.com/ptitle/envirem,16788,5,2020-06-03T22:21:26Z,3357.6
envnames,"Set of functions to keep track of user-defined environment names
(which cannot be retrieved with the built-in function environmentName()).
The package also provides functionality to search for objects in environments,
deal with function calling chains, and retrieve an object's
memory address.",2019-01-04,Daniel Mastropietro,https://github.com/mastropi/envnames,TRUE,https://github.com/mastropi/envnames,7521,0,2019-08-05T08:57:31Z,NA
EPGMr,"Everglades Phosphorus Gradient Model predicts variations in water-column P concentration, peat accretion rate, and soil P concentration along a horizontal gradient imposed by an external phosphorus load and sheet-flow conditions. Potential biological responses are expressed in terms of marsh surface area exceeding threshold criteria for water-column and soil phosphorus concentrations. More information of the model can be found at <http://www.wwwalker.net/epgm/>.",2020-05-05,Paul Julian,https://github.com/swampthingpaul/EPGMr,TRUE,https://github.com/swampthingpaul/epgmr,495,0,2020-06-04T11:31:11Z,NA
eph,"Tools to download and manipulate the Permanent Household Survey from Argentina
(EPH is the Spanish acronym for Permanent Household Survey).
e.g: get_microdata() for downloading the datasets, get_poverty_lines() for downloading the official poverty baskets,
calculate_poverty() for the calculation of stating if a household is in poverty or not, following the official methodology.
organize_panels() is used to concatenate observations from different periods, and organize_labels()
adds the official labels to the data. The implemented methods are based on INDEC (2016) <http://www.estadistica.ec.gba.gov.ar/dpe/images/SOCIEDAD/EPH_metodologia_22_pobreza.pdf>.
As this package works with the argentinian Permanent Household Survey and its main audience is from this country,
the documentation was written in Spanish.",2020-05-24,Diego Kozlowski,https://github.com/holatam/eph,TRUE,https://github.com/holatam/eph,5803,23,2020-05-24T08:39:38Z,252.30434782608697
epicontacts,"A collection of tools for representing epidemiological contact data, composed of case line lists and contacts between cases. Also contains procedures for data handling, interactive graphics, and statistics.",2017-11-21,VP Nagraj,http://www.repidemicsconsortium.org/epicontacts/,TRUE,https://github.com/reconhub/epicontacts,14109,8,2020-02-14T13:03:23Z,1763.625
EpiContactTrace,"Routines for epidemiological contact tracing
and visualisation of network of contacts.",2020-03-15,Stefan Widgren,https://github.com/stewid/EpiContactTrace,TRUE,https://github.com/stewid/epicontacttrace,27680,3,2020-03-03T10:18:11Z,9226.666666666666
EpiILM,Provides tools for simulating from discrete-time individual level models for infectious disease data analysis. This epidemic model class contains spatial and contact-network based models with two disease types: Susceptible-Infectious (SI) and Susceptible-Infectious-Removed (SIR).,2020-03-12,Vineetha Warriyar. K. V.,https://github.com/vineetha-warriyar/EpiILM,TRUE,https://github.com/vineetha-warriyar/epiilm,15856,2,2020-05-21T22:38:27Z,7928
EpiILMCT,"Provides tools for simulating from continuous-time individual level models of disease transmission, and carrying out infectious disease data analyses with the same models. The epidemic models considered are distance-based and/or contact network-based models within Susceptible-Infectious-Removed (SIR) or Susceptible-Infectious-Notified-Removed (SINR) compartmental frameworks. An overview of the implemented continuous-time individual level models for epidemics is given by Almutiry and Deardon (2019) <doi:10.1515/ijb-2017-0092>.",2020-01-21,Waleed Almutiry,https://github.com/waleedalmutiry/EpiILMCT/,TRUE,https://github.com/waleedalmutiry/epiilmct,12776,2,2020-01-21T21:51:20Z,6388
epikit,"Contains tools for formatting inline code, renaming redundant
columns, aggregating age categories, and calculating proportions with
confidence intervals. This is part of the 'R4Epis' project
<https://r4epis.netlify.com>.",2020-04-13,Zhian N. Kamvar,"https://github.com/R4EPI/epikit, https://r4epis.netlify.com,
https://r4epi.github.io/epikit",TRUE,https://github.com/r4epi/epikit,1860,1,2020-04-10T22:59:41Z,1860
epimdr,"Functions, data sets and shiny apps for ""Epidemics: Models and Data in R"" by Ottar N. Bjornstad (ISBN 978-3-319-97487-3) <https://www.springer.com/gp/book/9783319974866>. The package contains functions to study the S(E)IR model, spatial and age-structured SIR models; time-series SIR and chain-binomial stochastic models; catalytic disease models; coupled map lattice models of spatial transmission and network models for social spread of infection. The package is also an advanced quantitative companion to the coursera Epidemics Massive Online Open Course <https://www.coursera.org/learn/epidemics>.",2020-01-25,Ottar N. Bjornstad,"https://github.com/objornstad/epimdr,
https://www.springer.com/gp/book/9783319974866,
http://ento.psu.edu/directory/onb1",TRUE,https://github.com/objornstad/epimdr,9770,36,2020-02-13T21:29:42Z,271.3888888888889
EpiModel,"Tools for simulating mathematical models of infectious disease dynamics.
Epidemic model classes include deterministic compartmental models, stochastic
individual-contact models, and stochastic network models. Network models use the
robust statistical methods of exponential-family random graph models (ERGMs)
from the Statnet suite of software packages in R. Standard templates for epidemic
modeling include SI, SIR, and SIS disease types. EpiModel features
an API for extending these templates to address novel scientific research aims.",2020-05-08,Samuel Jenness,"http://epimodel.org/, http://github.com/statnet/EpiModel",TRUE,https://github.com/statnet/epimodel,108537,127,2020-05-21T17:00:59Z,854.6220472440945
episensr,"Basic sensitivity analysis of the observed relative risks adjusting
for unmeasured confounding and misclassification of the
exposure/outcome, or both. It follows the bias analysis methods and
examples from the book by Lash T.L, Fox M.P, and Fink A.K.
""Applying Quantitative Bias Analysis to Epidemiologic Data"",
('Springer', 2009).",2020-03-06,Denis Haine,NA,TRUE,https://github.com/dhaine/episensr,22722,5,2020-03-17T03:30:54Z,4544.4
episheet,"A collection of R functions supporting the text book
Modern Epidemiology, Second Edition, by Kenneth J.Rothman and Sander Greenland.
ISBN 13: 978-0781755641 See <http://www.krothman.org/> for more information.",2019-01-23,James Black,https://github.com/epijim/episheet,TRUE,https://github.com/epijim/episheet,13745,3,2020-05-14T14:25:58Z,4581.666666666667
epitrix,"A collection of small functions useful for epidemics analysis and infectious disease modelling. This includes computation of basic reproduction numbers from growth rates, generation of hashed labels to anonymise data, and fitting discretised Gamma distributions.",2019-01-15,Thibaut Jombart,http://www.repidemicsconsortium.org/epitrix,TRUE,https://github.com/reconhub/epitrix,25111,8,2019-09-23T06:51:19Z,3138.875
eplusr,"A rich toolkit of using the whole building
simulation program 'EnergyPlus'(<https://energyplus.net>), which
enables programmatic navigation, modification of 'EnergyPlus' models
and makes it less painful to do parametric simulations and analysis.",2020-02-20,Hongyuan Jia,"https://hongyuanjia.github.io/eplusr,
https://github.com/hongyuanjia/eplusr",TRUE,https://github.com/hongyuanjia/eplusr,16234,24,2020-06-09T23:57:00Z,676.4166666666666
eponge,"Provides a set of functions, which facilitates removing objects from an environment.
It allows to delete objects specified with regular expression or with other conditions (e.g. if object is numeric),
using one function call. ",2020-03-24,Krzysztof Joachimiak,"https://github.com/krzjoa/eponge, https://krzjoa.github.io/eponge/",TRUE,https://github.com/krzjoa/eponge,1141,0,2020-03-26T19:21:30Z,NA
epsiwal,"Implements the conditional estimation procedure of
Lee, Sun, Sun and Taylor (2016) <doi:10.1214/15-AOS1371>.
This procedure allows hypothesis testing on the mean of
a normal random vector subject to linear constraints.",2019-07-02,Steven E. Pav,https://github.com/shabbychef/epsiwal,TRUE,https://github.com/shabbychef/epsiwal,3907,0,2019-06-29T04:35:20Z,NA
epuR,"Provides functions to collect the economic policy uncertainty and related index data from the official
website <https://www.policyuncertainty.com/index.html> in real time. Deals with date format and returns an time series object to
facilitate further data manipulation and analysis.",2020-04-10,Lingbing Feng,https://github.com/Lingbing/epuR,TRUE,https://github.com/lingbing/epur,930,1,2020-04-29T15:19:41Z,930
eq5d,"EQ-5D is a popular health related quality of life instrument used
in the clinical and economic evaluation of health care. Developed by the
EuroQol group <https://www.euroqol.org>, the instrument consists of two
components: health state description and evaluation. For the description
component a subject self-rates their health in terms of five dimensions;
mobility, self-care, usual activities, pain/discomfort, and
anxiety/depression using either a three-level (EQ-5D-3L,
<https://www.euroqol.org/eq-5d-instruments/eq-5d-3l-about>) or a five-level
(EQ-5D-5L, <https://www.euroqol.org/eq-5d-instruments/eq-5d-5l-about>)
scale. Frequently the scores on these five dimensions are converted to a
single utility index using country specific value sets, which can be used
in the clinical and economic evaluation of health care as well as in
population health surveys. The eq5d package provides methods to calculate
index scores from a subject's dimension scores. 25 TTO and 11 VAS EQ-5D-3L value
sets including those for countries in Szende et al (2007)
<doi:10.1007/1-4020-5511-0> and Szende et al (2014)
<doi:10.1007/978-94-007-7596-1>, 21 EQ-5D-5L EQ-VT value sets from the EuroQol
website, and the EQ-5D-5L crosswalk value sets developed by van Hout et al. (2012)
<doi:10.1016/j.jval.2012.02.008> are included. Additionally, a shiny web tool is
included to enable the calculation, visualisation and automated statistical
analysis of EQ-5D index values via a web browser using EQ-5D dimension scores
stored in CSV or Excel files. ",2020-06-07,Fraser Morton,https://github.com/fragla/eq5d,TRUE,https://github.com/fragla/eq5d,6166,6,2020-06-06T13:35:24Z,1027.6666666666667
equaltestMI,"Functions for examining measurement invariance via equivalence testing are included in this package. The traditionally used RMSEA (Root Mean Square Error of Approximation) cutoff values are adjusted based on simulation results. In addition, a projection-based method is implemented to test the equality of latent factor means across groups without assuming the equality of intercepts. For more information, see Yuan, K. H., & Chan, W. (2016) <doi:10.1037/met0000080>, Deng, L., & Yuan, K. H. (2016) <doi:10.1007/s11336-015-9491-8>, and Jiang, G., Mai, Y., & Yuan, K. H. (2017) < doi:10.3389/fpsyg.2017.01823>. ",2020-06-05,Ge Jiang,NA,TRUE,https://github.com/gabriellajg/equaltestmi,9918,1,2020-06-05T17:04:43Z,9918
equate,"Contains methods for observed-score linking
and equating under the single-group, equivalent-groups,
and nonequivalent-groups with anchor test(s) designs.
Equating types include identity, mean, linear, general
linear, equipercentile, circle-arc, and composites of
these. Equating methods include synthetic, nominal
weights, Tucker, Levine observed score, Levine true
score, Braun/Holland, frequency estimation, and chained
equating. Plotting and summary methods, and methods for
multivariate presmoothing and bootstrap error estimation
are also provided.",2018-04-08,Anthony Albano,https://github.com/talbano/equate,TRUE,https://github.com/talbano/equate,37968,2,2020-03-04T17:44:35Z,18984
ergm,An integrated set of tools to analyze and simulate networks based on exponential-family random graph models (ERGMs). 'ergm' is a part of the Statnet suite of packages for network analysis.,2019-06-10,Pavel N. Krivitsky,https://statnet.org,TRUE,https://github.com/statnet/ergm,320683,44,2020-06-01T11:00:10Z,7288.25
ergm.ego,Utilities for managing egocentrically sampled network data and a wrapper around the 'ergm' package to facilitate ERGM inference and simulation from such data.,2019-05-31,Pavel N. Krivitsky,https://statnet.org,TRUE,https://github.com/statnet/ergm.ego,31002,9,2020-04-13T09:40:48Z,3444.6666666666665
ergmito,"Simulation and estimation of Exponential Random Graph Models (ERGMs)
for small networks using exact statistics. As a difference from the 'ergm'
package, 'ergmito' circumvents using Markov-Chain Maximum Likelihood Estimator
(MC-MLE) and instead uses Maximum Likelihood Estimator (MLE) to fit ERGMs
for small networks. As exhaustive enumeration is computationally feasible for
small networks, this R package takes advantage of this and provides tools for
calculating likelihood functions, and other relevant functions, directly,
meaning that in many cases both estimation and simulation of ERGMs for
small networks can be faster and more accurate than simulation-based
algorithms.",2020-02-12,George Vega Yon,https://muriteams.github.io/ergmito,TRUE,https://github.com/muriteams/ergmito,2324,5,2020-05-18T18:42:28Z,464.8
err,"Messages should provide users with readable information
about R objects without flooding their console.
'cc()' concatenates vector and data frame values
into a grammatically correct string using commas, an ellipsis and conjunction.
'cn()' allows the user to define a string which varies based on a count.
'co()' combines the two to produce a customizable object aware string.
The package further facilitates this process by providing five 'sprintf'-like
types such as '%n' for the length of an object and '%o' for its name as
well as wrappers for pasting objects and issuing errors, warnings and messages.",2019-04-25,Joe Thorley,https://github.com/poissonconsulting/err,TRUE,https://github.com/poissonconsulting/err,14532,6,2020-05-08T01:12:39Z,2422
errorist,"Provides environment hooks that obtain errors and warnings which
occur during the execution of code to automatically search for solutions.",2020-02-24,James Balamuta,https://github.com/r-assist/errorist,TRUE,https://github.com/r-assist/errorist,9099,18,2020-02-23T21:28:12Z,505.5
errorlocate,Errors in data can be located and removed using validation rules from package 'validate'.,2020-02-06,Edwin de Jonge,https://github.com/data-cleaning/errorlocate,TRUE,https://github.com/data-cleaning/errorlocate,15714,12,2020-02-18T09:01:59Z,1309.5
errors,"Support for measurement errors in R vectors, matrices and arrays:
automatic uncertainty propagation and reporting.
Documentation about 'errors' is provided in the paper by Ucar, Pebesma &
Azcorra (2018, <doi:10.32614/RJ-2018-075>), included in this package as a
vignette; see 'citation(""errors"")' for details.",2020-01-08,Iñaki Ucar,https://github.com/r-quantities/errors,TRUE,https://github.com/r-quantities/errors,18613,28,2020-06-08T17:59:43Z,664.75
errum,"Perform a Bayesian estimation of the exploratory reduced
reparameterized unified model (ErRUM) described by Culpepper and Chen (2018)
<doi:10.3102/1076998618791306>.",2020-03-20,James Joseph Balamuta,https://github.com/tmsalab/errum,TRUE,https://github.com/tmsalab/errum,1397,0,2020-03-20T02:39:55Z,NA
eRTG3D,"Creates realistic random trajectories in a 3-D space between two given fix points, so-called conditional empirical random walks (CERWs). The trajectory generation is based on empirical distribution functions extracted from observed trajectories (training data) and thus reflects the geometrical movement characteristics of the mover. A digital elevation model (DEM), representing the Earth's surface, and a background layer of probabilities (e.g. food sources, uplift potential, waterbodies, etc.) can be used to influence the trajectories.
Unterfinger M (2018). ""3-D Trajectory Simulation in Movement Ecology: Conditional Empirical Random Walk"". Master's thesis, University of Zurich. <https://www.geo.uzh.ch/dam/jcr:6194e41e-055c-4635-9807-53c5a54a3be7/MasterThesis_Unterfinger_2018.pdf>.
Technitis G, Weibel R, Kranstauber B, Safi K (2016). ""An algorithm for empirically informed random trajectory generation between two endpoints"". GIScience 2016: Ninth International Conference on Geographic Information Science, 9, online. <doi:10.5167/uzh-130652>.",2019-09-19,Merlin Unterfinger,"https://munterfinger.github.io/eRTG3D/,
https://github.com/munterfinger/eRTG3D",TRUE,https://github.com/munterfinger/ertg3d,3188,2,2019-12-21T13:01:36Z,1594
esaddle,Tools for fitting the Extended Empirical Saddlepoint (EES) density of Fasiolo et al. (2018) <doi:10.1214/18-EJS1433>.,2020-01-10,Matteo Fasiolo and Simon N. Wood,https://github.com/mfasiolo/esaddle,TRUE,https://github.com/mfasiolo/esaddle,11521,1,2020-01-10T16:13:51Z,11521
esaps,"It allows to construct two types of indicators used in the study of
Electoral Systems and Party Systems starting from electoral results data.
The Effective Number of Parties (Laakso and Taagepera (1979) <doi:10.1177/001041407901200101>)
and Electoral Volatility in its three versions (Pedersen (1979) <doi:10.1111/j.1475-6765.1979.tb01267.x>,
Powell and Tucker (2014) <doi:10.1017/S0007123412000531> and Torcal and Lago (2015, ISBN:9788415260356)).",2018-03-15,Nicolas Schmidt,https://github.com/Nicolas-Schmidt/esaps,TRUE,https://github.com/nicolas-schmidt/esaps,7803,3,2020-04-20T15:10:51Z,2601
esc,"Implementation of the web-based 'Practical Meta-Analysis Effect Size
Calculator' from David B. Wilson (<http://www.campbellcollaboration.org/escalc/html/EffectSizeCalculator-Home.php>)
in R. Based on the input, the effect size can be returned as standardized mean
difference, Cohen's f, Hedges' g, Pearson's r or Fisher's
transformation z, odds ratio or log odds, or eta squared effect size.",2019-12-04,Daniel Lüdecke,https://strengejacke.github.io/esc,TRUE,https://github.com/strengejacke/esc,30315,12,2020-05-28T14:30:53Z,2526.25
eSDM,"A tool which allows users to create and evaluate ensembles
of species distribution model (SDM) predictions.
Functionality is offered through R functions or a GUI (R Shiny app).
This tool can assist users in identifying spatial uncertainties and
making informed conservation and management decisions. The package is
further described in Woodman et al (2019) <doi:10.1111/2041-210X.13283>.",2020-04-26,Sam Woodman,"https://smwoodman.github.io/eSDM,
https://github.com/smwoodman/eSDM",TRUE,https://github.com/smwoodman/esdm,4677,4,2020-04-26T21:18:00Z,1169.25
esmprep,"Support in preparing a raw ESM dataset for statistical analysis. Preparation includes the handling of errors (mostly due to technological reasons) and the generating of new variables that are necessary and/or helpful in meeting the conditions when statistically analyzing ESM data. The functions in 'esmprep' are meant to hierarchically lead from bottom, i.e. the raw (separated) ESM dataset(s), to top, i.e. a single ESM dataset ready for statistical analysis. This hierarchy evolved out of my personal experience in working with ESM data.",2019-07-05,Marcel Miché,https://github.com/mmiche/esmprep,TRUE,https://github.com/mmiche/esmprep,11702,1,2019-07-05T11:15:49Z,11702
esquisse,"A 'shiny' gadget to create 'ggplot2' charts interactively with drag-and-drop to map your variables.
You can quickly visualize your data accordingly to their type, export to 'PNG' or 'PowerPoint',
and retrieve the code to reproduce the chart.",2020-01-27,Victor Perrier,https://github.com/dreamRs/esquisse,TRUE,https://github.com/dreamrs/esquisse,100050,923,2020-05-29T12:13:44Z,108.39653304442037
ess,"An implementation of the ESS algorithm following Amol Deshpande, Minos Garofalakis,
Michael I Jordan (2013) <arXiv:1301.2267>. The ESS algorithm
is used for model selection in decomposable graphical models.",2020-05-24,Mads Lindskou,https://github.com/mlindsk/ess,TRUE,https://github.com/mlindsk/ess,2312,0,2020-05-24T16:04:31Z,NA
essurvey,Download data from the European Social Survey directly from their website <http://www.europeansocialsurvey.org/>. There are two families of functions that allow you to download and interactively check all countries and rounds available.,2019-12-11,Jorge Cimentada,"https://docs.ropensci.org/essurvey/,
https://github.com/ropensci/essurvey",TRUE,https://github.com/ropensci/essurvey,13053,33,2020-05-24T09:01:09Z,395.54545454545456
estatapi,"Provides an interface to e-Stat API, the one-stop service for official statistics of the Japanese government.",2020-04-12,Hiroaki Yutani,https://yutannihilation.github.io/estatapi/,TRUE,https://github.com/yutannihilation/estatapi,16755,14,2020-04-12T06:19:38Z,1196.7857142857142
EstimationTools,"A routine for parameter estimation for any probability density or
mass function implemented in R via maximum likelihood (ML) given a data set. This
routine is a wrapper function specifically developed for ML estimation. There are
included optimization procedures such as 'nlminb' and 'optim' from base package,
and 'DEoptim' Mullen (2011) <doi: 10.18637/jss.v040.i06>. Standard errors are
estimated with 'numDeriv' Gilbert (2011) <http://CRAN.R-project.org/package=numDeriv>
or the option 'Hessian = TRUE' of 'optim' function.",2019-10-24,Jaime Mosquera,"https://jaimemosg.github.io/EstimationTools/,
https://github.com/Jaimemosg/EstimationTools",TRUE,https://github.com/jaimemosg/estimationtools,5388,1,2020-04-14T18:04:50Z,5388
estimatr,"Fast procedures for small set of commonly-used, design-appropriate estimators with robust standard errors and confidence intervals. Includes estimators for linear regression, instrumental variables regression, difference-in-means, Horvitz-Thompson estimation, and regression improving precision of experimental estimates by interacting treatment with centered pre-treatment covariates introduced by Lin (2013) <doi:10.1214/12-AOAS583>.",2020-03-19,Graeme Blair,"https://declaredesign.org/r/estimatr/,
https://github.com/DeclareDesign/estimatr",TRUE,https://github.com/declaredesign/estimatr,62302,104,2020-06-09T17:39:14Z,599.0576923076923
estudy2,"An implementation of a most commonly used event study methodology,
including both parametric and nonparametric tests. It contains variety
aspects of the rate of return estimation (the core calculation is done in
C++), as well as three classical for event study market models: mean
adjusted returns, market adjusted returns and single-index market models.
There are 6 parametric and 6 nonparametric tests provided, which examine
cross-sectional daily abnormal return (see the documentation of the
functions for more information). Parametric tests include tests proposed by
Brown and Warner (1980) <DOI:10.1016/0304-405X(80)90002-1>, Brown and Warner
(1985) <DOI:10.1016/0304-405X(85)90042-X>, Boehmer et al. (1991)
<DOI:10.1016/0304-405X(91)90032-F>, Patell (1976) <DOI:10.2307/2490543>, and
Lamb (1995) <DOI:10.2307/253695>. Nonparametric tests covered in estudy2 are
tests described in Corrado and Zivney (1992) <DOI:10.2307/2331331>,
McConnell and Muscarella (1985) <DOI:10.1016/0304-405X(85)90006-6>,
Boehmer et al. (1991) <DOI:10.1016/0304-405X(91)90032-F>, Cowan (1992)
<DOI:10.1007/BF00939016>, Corrado (1989) <DOI:10.1016/0304-405X(89)90064-0>,
Campbell and Wasley (1993) <DOI:10.1016/0304-405X(93)90025-7>, Savickas (2003)
<DOI:10.1111/1475-6803.00052>, Kolari and Pynnonen (2010)
<DOI:10.1093/rfs/hhq072>. Furthermore, tests for the cumulative
abnormal returns proposed by Brown and Warner (1985)
<DOI:10.1016/0304-405X(85)90042-X> and Lamb (1995) <DOI:10.2307/253695>
are included.",2020-04-30,Iegor Rudnytskyi,"http://github.com/irudnyts/estudy2,
http://irudnyts.github.io/estudy2/",TRUE,https://github.com/irudnyts/estudy2,12639,1,2020-04-30T18:22:04Z,12639
esvis,"A variety of methods are provided to estimate and visualize
distributional differences in terms of effect sizes. Particular emphasis
is upon evaluating differences between two or more distributions across
the entire scale, rather than at a single point (e.g., differences in
means). For example, Probability-Probability (PP) plots display the
difference between two or more distributions, matched by their empirical
CDFs (see Ho and Reardon, 2012; <doi:10.3102/1076998611411918>), allowing
for examinations of where on the scale distributional differences are
largest or smallest. The area under the PP curve (AUC) is an effect-size
metric, corresponding to the probability that a randomly selected
observation from the x-axis distribution will have a higher value
than a randomly selected observation from the y-axis distribution.
Binned effect size plots are also available, in which the distributions
are split into bins (set by the user) and separate effect sizes (Cohen's
d) are produced for each bin - again providing a means to evaluate the
consistency (or lack thereof) of the difference between two or more
distributions at different points on the scale. Evaluation of empirical
CDFs is also provided, with built-in arguments for providing annotations
to help evaluate distributional differences at specific points (e.g.,
semi-transparent shading). All function take a consistent argument
structure. Calculation of specific effect sizes is also possible. The
following effect sizes are estimable: (a) Cohen's d, (b) Hedges' g,
(c) percentage above a cut, (d) transformed (normalized) percentage above
a cut, (e) area under the PP curve, and (f) the V statistic (see Ho,
2009; <doi:10.3102/1076998609332755>), which essentially transforms the
area under the curve to standard deviation units. By default, effect sizes
are calculated for all possible pairwise comparisons, but a reference
group (distribution) can be specified.",2020-04-30,Daniel Anderson,https://github.com/datalorax/esvis,TRUE,https://github.com/datalorax/esvis,10884,43,2020-04-30T20:03:53Z,253.11627906976744
ETAS,"Fits the space-time Epidemic Type Aftershock Sequence
('ETAS') model to earthquake catalogs using a stochastic 'declustering'
approach. The 'ETAS' model is a 'spatio-temporal' marked point process
model and a special case of the 'Hawkes' process. The package is based
on a Fortran program by 'Jiancang Zhuang'
(available at <http://bemlar.ism.ac.jp/zhuang/software.html>),
which is modified and translated into C++ and C such that it
can be called from R. Parallel computing with 'OpenMP' is possible
on supported platforms.",2019-01-25,Abdollah Jalilian,https://github.com/jalilian/ETAS,TRUE,https://github.com/jalilian/etas,26345,6,2019-10-15T15:42:51Z,4390.833333333333
ethnobotanyR,"An implementation of the quantitative ethnobotany indices in R. The goal is to provide an easy-to-use platform for ethnobotanists to assess the cultural significance of plant species based on informant consensus. The package closely follows the paper by Tardio and Pardo-de-Santayana (2008). Tardio, J., and M. Pardo-de-Santayana, 2008. Cultural Importance Indices: A Comparative Analysis Based on the Useful Wild Plants of Southern Cantabria (Northern Spain) 1. Economic Botany, 62(1), 24-39. <doi:10.1007/s12231-007-9004-5>.",2020-01-30,Cory Whitney,https://CRAN.R-project.org/package=ethnobotanyR,TRUE,https://github.com/cwwhitney/ethnobotanyr,8489,3,2020-06-09T11:36:26Z,2829.6666666666665
etl,"A predictable and pipeable framework for performing ETL
(extract-transform-load) operations on publicly-accessible medium-sized data
set. This package sets up the method structure and implements generic
functions. Packages that depend on this package download specific data sets
from the Internet, clean them up, and import them into a local or remote
relational database management system.",2020-06-02,Ben Baumer,http://github.com/beanumber/etl,TRUE,https://github.com/beanumber/etl,22725,109,2020-06-04T19:35:30Z,208.4862385321101
ETLUtils,"Provides functions to facilitate the use of the 'ff' package
in interaction with big data in 'SQL' databases (e.g. in 'Oracle', 'MySQL',
'PostgreSQL', 'Hive') by allowing easy importing directly into 'ffdf' objects
using 'DBI', 'RODBC' and 'RJDBC'. Also contains some basic utility functions to
do fast left outer join merging based on 'match', factorisation of data and a
basic function for re-coding vectors.",2018-01-25,Jan Wijffels,https://github.com/jwijffels/ETLUtils,TRUE,https://github.com/jwijffels/etlutils,31876,16,2020-04-28T21:29:12Z,1992.25
eudract,"The remit of the European Clinical Trials Data Base (EudraCT <https://eudract.ema.europa.eu/> ) is to provide open access to summaries of all registered clinical trial results; thus aiming to prevent non-reporting of negative results and provide open-access to results to inform future research. The amount of information required and the format of the results, however, imposes a large extra workload at the end of studies on clinical trial units. In particular, the adverse-event-reporting component requires entering: each unique combination of treatment group and safety event; for every such event above, a further 4 pieces of information (body system, number of occurrences, number of subjects, number exposed) for non-serious events, plus an extra three pieces of data for serious adverse events (numbers of causally related events, deaths, causally related deaths). This package prepares the required statistics needed by EudraCT and formats them into the precise requirements to directly upload an XML file into the web portal, with no further data entry by hand.",2020-04-06,Simon Bond,https://eudract-tool.medschl.cam.ac.uk/,TRUE,https://github.com/shug0131/eudract,3313,2,2020-04-03T08:09:19Z,1656.5
eulerr,"Generate area-proportional Euler diagrams
using numerical optimization. An Euler diagram is a generalization of a Venn
diagram, relaxing the criterion that all interactions need to be
represented. Diagrams may be fit with ellipses and circles via
a wide range of inputs and can be visualized in numerous ways.",2020-03-09,Johan Larsson,"https://github.com/jolars/eulerr, https://jolars.github.io/eulerr/",TRUE,https://github.com/jolars/eulerr,48085,69,2020-03-09T10:35:38Z,696.8840579710145
europepmc,"An R Client for the Europe PubMed Central RESTful Web Service
(see <https://europepmc.org/RestfulWebService> for more information). It
gives access to both metadata on life science literature and open access
full texts. Europe PMC indexes all PubMed content and other literature
sources including Agricola, a bibliographic database of citations to the
agricultural literature, or Biological Patents. In addition to bibliographic
metadata, the client allows users to fetch citations and reference lists.
Links between life-science literature and other EBI databases, including
ENA, PDB or ChEMBL are also accessible. No registration or API key is
required. See the vignettes for usage examples.",2020-05-31,Najko Jahn,"https://docs.ropensci.org/europepmc,
http://github.com/ropensci/europepmc/",TRUE,https://github.com/ropensci/europepmc,107926,17,2020-06-05T13:03:51Z,6348.588235294118
eurostat,"Tools to download data from the Eurostat database
<http://ec.europa.eu/eurostat> together with search and
manipulation utilities.",2020-02-11,Leo Lahti,https://ropengov.github.io/eurostat,TRUE,https://github.com/ropengov/eurostat,82389,153,2020-05-12T12:15:33Z,538.4901960784314
evabic,"Evaluates the performance of binary classifiers.
Computes confusion measures (TP, TN, FP, FN), derived measures (TPR,
FDR, accuracy, F1, DOR, ..), and area under the curve. Outputs are
well suited for nested dataframes.",2020-03-08,Antoine Bichat,"https://abichat.github.io/evabic,
https://github.com/abichat/evabic",TRUE,https://github.com/abichat/evabic,1482,5,2020-04-04T09:49:51Z,296.4
evalITR,"A collection of statistical methods for evaluating individualized treatment rules under randomized data. The provided metrics include PAV (Population Average Value), PAPE (Population Average Prescription Effect), and AUPEC (Area Under Prescription Effect Curve). It also provides the tools to analyze individualized treatment rules under budget constraints. Imai and Li (2019) <arXiv:1905.05389>.",2020-02-20,Michael Lingzhi Li,https://github.com/MichaelLLi/evalITR,TRUE,https://github.com/michaellli/evalitr,1733,1,2020-05-24T16:11:44Z,1733
evaluate,"Parsing and evaluation tools that make it easy to recreate the
command line behaviour of R.",2019-05-28,Yihui Xie,https://github.com/r-lib/evaluate,TRUE,https://github.com/r-lib/evaluate,17577584,85,2020-02-06T16:35:46Z,206795.10588235295
evaluator,"An open source risk analysis toolkit based on the OpenFAIR ontology
<https://www2.opengroup.org/ogsys/catalog/C13K> and risk assessment standard
<https://www2.opengroup.org/ogsys/catalog/C13G>. Empowers an organization to
perform a quantifiable, repeatable, and data-driven risk review.",2020-04-16,David Severski,https://evaluator.tidyrisk.org,TRUE,https://github.com/davidski/evaluator,18367,77,2020-04-15T22:41:15Z,238.53246753246754
EventDetectR,Detect events in time-series data. Combines multiple well-known R packages like 'forecast' and 'neuralnet' to deliver an easily configurable tool for multivariate event detection.,2020-01-23,Sowmya Chandrasekaran,https://github.com/frehbach/EventDetectR,TRUE,https://github.com/frehbach/eventdetectr,7771,9,2020-06-08T18:49:55Z,863.4444444444445
eventstudies,"A platform for conducting event studies (Fama, Fisher,
Jensen, Roll (1969) <doi:10.2307/2525569>) and for
methodological research on event studies. The package
supports market model, augmented market model, and excess
returns methods for data modelling along with Wilcox,
classical t-test, and Bootstrap as inference procedures.",2020-06-02,Chirag Anand,https://github.com/nipfpmf/eventstudies,TRUE,https://github.com/nipfpmf/eventstudies,11304,14,2020-06-03T05:17:54Z,807.4285714285714
EventStudy,"Perform Event Studies from through our <http://EventStudyTools.com> Application Programming Interface, parse the results, visualize it, and / or use the results in further analysis.",2019-03-14,Dr. Simon Mueller,http://eventstudytools.com,TRUE,https://github.com/eventstudytools/api-wrapper.r,17171,8,2019-08-02T13:50:28Z,2146.375
EviewsR,It allows running 'EViews'(<https://eviews.com>) program from R Markdown. 'EViews' (Econometric Views) is a statistical software for Econometric analysis. This package serves as an 'EViews' Knit-Engine for 'knitr' package. Write all your 'EViews' commands in R Markdown chunk.,2020-06-04,Sagiru Mati,https://smati.com.ng,TRUE,https://github.com/sagirumati/eviewsr,0,1,2020-06-08T08:32:28Z,0
evolqg,"Provides functions for covariance matrix comparisons, estimation
of repeatabilities in measurements and matrices, and general evolutionary
quantitative genetics tools.",2020-02-06,Ana Paula Assis,NA,TRUE,https://github.com/lem-usp/evolqg,22573,6,2020-02-13T16:25:14Z,3762.1666666666665
ewoc,"An implementation of a variety of escalation with overdose control designs introduced by Babb, Rogatko and Zacks (1998) <doi:10.1002/(SICI)1097-0258(19980530)17:10%3C1103::AID-SIM793%3E3.0.CO;2-9>. It calculates the next dose as a clinical trial proceeds and performs simulations to obtain operating characteristics.",2020-06-07,Marcio A. Diniz,https://github.com/dnzmarcio/ewoc/,TRUE,https://github.com/dnzmarcio/ewoc,10196,2,2020-06-07T06:46:48Z,5098
exactextractr,"Provides a replacement for the 'extract' function from the 'raster' package
that is suitable for extracting raster values using 'sf' polygons.",2020-05-07,Daniel Baston,"https://isciences.gitlab.io/exactextractr/,
https://github.com/isciences/exactextractr",TRUE,https://github.com/isciences/exactextractr,11867,66,2020-05-26T23:32:29Z,179.8030303030303
exams.mylearn,"Randomized multiple-select and single-select
question generation for the 'MyLearn' teaching and learning
platform. Question templates
in the form of the R/exams package (see <http://www.r-exams.org/>)
are transformed into XML format required by 'MyLearn'.",2020-05-25,Darjus Hosszejni,https://github.com/hdarjus/WU-MyLearn-QGen,TRUE,https://github.com/hdarjus/wu-mylearn-qgen,152,1,2020-05-26T08:28:17Z,152
exceedProb,Computes confidence intervals for the exceedance probability of normally distributed estimators. Currently only supports general linear models. Please see Segal (2019) <arXiv:1803.03356> for more information.,2019-08-27,Brian D. Segal,https://github.com/bdsegal/exceedProb,TRUE,https://github.com/bdsegal/exceedprob,3598,0,2019-12-13T04:22:19Z,NA
excelR,An R interface to 'jExcel' library to create web-based interactive tables and spreadsheets compatible with 'Excel' or any other spreadsheet software.,2020-03-09,Swechhya Bista,https://github.com/Swechhya/excelR,TRUE,https://github.com/swechhya/excelr,11416,99,2020-05-08T12:26:18Z,115.31313131313131
exdex,"Performs frequentist inference for the extremal index of a
stationary time series. Two types of methodology are used. One type is
based on a model that relates the distribution of block maxima to the
marginal distribution of series and leads to the semiparametric maxima
estimators described in Northrop (2015) <doi:10.1007/s10687-015-0221-5> and
Berghaus and Bucher (2018) <doi:10.1214/17-AOS1621>. Sliding block maxima
are used to increase precision of estimation. The other type of methodology
uses a model for the distribution of threshold inter-exceedance times
(Ferro and Segers (2003) <doi:10.1111/1467-9868.00401>). Two
versions of this type of approach are provided, following Suveges (2007)
<doi:10.1007/s10687-007-0034-2> and Suveges and Davison (2010)
<doi:10.1214/09-AOAS292>.",2019-08-06,Paul J. Northrop,http://github.com/paulnorthrop/exdex,TRUE,https://github.com/paulnorthrop/exdex,4185,0,2019-12-03T21:00:25Z,NA
ExPanDaR,"Provides a shiny-based front end (the 'ExPanD' app) and
a set of functions for exploratory data analysis. Run as a web-based
app, 'ExPanD' enables users to assess the robustness of empirical evidence
without providing them access to the underlying data. You can export a
notebook containing the analysis of 'ExPanD' and/or use the functions of the
package to support your exploratory data analysis workflow. Refer to the
vignettes of the package for more information on how to use 'ExPanD' and/or
the functions of this package.",2020-01-29,Joachim Gassen,https://joachim-gassen.github.io/ExPanDaR,TRUE,https://github.com/joachim-gassen/expandar,14481,68,2020-06-09T19:28:12Z,212.9558823529412
experiment,"Provides various statistical methods for
designing and analyzing randomized experiments. One functionality
of the package is the implementation of randomized-block and
matched-pair designs based on possibly multivariate pre-treatment
covariates. The package also provides the tools to analyze various
randomized experiments including cluster randomized experiments,
two-stage randomized experiments, randomized experiments with
noncompliance, and randomized experiments with missing data.",2019-08-05,Kosuke Imai,https://github.com/kosukeimai/experiment,TRUE,https://github.com/kosukeimai/experiment,26441,7,2019-08-05T16:03:10Z,3777.285714285714
ExpertChoice,Supports designing efficient discrete choice experiments (DCEs). Experimental designs can be formed on the basis of orthogonal arrays or search methods for optimal designs (Federov or mixed integer programs). Various methods for converting these experimental designs into a discrete choice experiment. Many efficiency measures! Draws from literature of Kuhfeld (2010) and Street et. al (2005) <doi:10.1016/j.ijresmar.2005.09.003>.,2020-04-03,Jed Stephens,NA,TRUE,https://github.com/jedstephens/expertchoice,951,3,2020-04-07T17:18:11Z,317
explor,Shiny interfaces and graphical functions for multivariate analysis results exploration.,2020-05-02,Julien Barnier,https://juba.github.io/explor/,TRUE,https://github.com/juba/explor,42298,150,2020-05-26T12:58:54Z,281.9866666666667
explore,"Interactive data exploration with one line of code or use an easy to remember set of tidy functions for exploratory data analysis. Introduces three main verbs. explore() to graphically explore a variable or table, describe() to describe a variable or table and report() to create an automated report.",2020-04-06,Roland Krasser,http://github.com/rolkra/explore,TRUE,https://github.com/rolkra/explore,9097,33,2020-04-05T21:48:59Z,275.6666666666667
ExPosition,"A variety of descriptive multivariate analyses with the singular value decomposition,
such as principal components analysis, correspondence analysis, and multidimensional scaling.
See An ExPosition of the Singular Value Decomposition in R (Beaton et al 2014) <doi:10.1016/j.csda.2013.11.006>.",2019-01-07,Derek Beaton,NA,TRUE,https://github.com/derekbeaton/exposition-family_old,66239,2,2019-11-16T21:33:20Z,33119.5
expss,"Package computes and displays tables with support for 'SPSS'-style
labels, multiple and nested banners, weights, multiple-response variables
and significance testing. There are facilities for nice output of tables
in 'knitr', 'Shiny', '*.xlsx' files, R and 'Jupyter' notebooks. Methods
for labelled variables add value labels support to base R functions and to
some functions from other packages. Additionally, the package brings
popular data transformation functions from 'SPSS' Statistics and 'Excel':
'RECODE', 'COUNT', 'COMPUTE', 'DO IF', 'COUNTIF', 'VLOOKUP' and etc.
These functions are very useful for data processing in marketing research
surveys. Package intended to help people to move data
processing from 'Excel' and 'SPSS' to R.",2020-03-25,Gregory Demin,https://gdemin.github.io/expss/,TRUE,https://github.com/gdemin/expss,136199,48,2020-04-04T11:13:29Z,2837.4791666666665
extraDistr,"Density, distribution function, quantile function
and random generation for a number of univariate
and multivariate distributions. This package implements the
following distributions: Bernoulli, beta-binomial, beta-negative
binomial, beta prime, Bhattacharjee, Birnbaum-Saunders,
bivariate normal, bivariate Poisson, categorical, Dirichlet,
Dirichlet-multinomial, discrete gamma, discrete Laplace,
discrete normal, discrete uniform, discrete Weibull, Frechet,
gamma-Poisson, generalized extreme value, Gompertz,
generalized Pareto, Gumbel, half-Cauchy, half-normal, half-t,
Huber density, inverse chi-squared, inverse-gamma, Kumaraswamy,
Laplace, location-scale t, logarithmic, Lomax, multivariate
hypergeometric, multinomial, negative hypergeometric,
non-standard beta, normal mixture, Poisson mixture, Pareto,
power, reparametrized beta, Rayleigh, shifted Gompertz, Skellam,
slash, triangular, truncated binomial, truncated normal,
truncated Poisson, Tukey lambda, Wald, zero-inflated binomial,
zero-inflated negative binomial, zero-inflated Poisson.",2019-06-10,Tymoteusz Wolodzko,https://github.com/twolodzko/extraDistr,TRUE,https://github.com/twolodzko/extradistr,432331,22,2020-02-12T23:00:44Z,19651.409090909092
extrafont,"Tools to using fonts other than the standard PostScript fonts.
This package makes it easy to use system TrueType fonts and with PDF or
PostScript output files, and with bitmap output files in Windows. extrafont
can also be used with fonts packaged specifically to be used with, such as
the fontcm package, which has Computer Modern PostScript fonts with math
symbols. See https://github.com/wch/extrafont for instructions and
examples.",2014-12-08,Winston Chang,https://github.com/wch/extrafont,TRUE,https://github.com/wch/extrafont,930005,227,2020-01-20T18:17:21Z,4096.938325991189
extraoperators,"Speed up common tasks, particularly logical or
relational comparisons and routine follow up tasks such as finding the
indices and subsetting. Inspired by mathematics, where something like:
3 < x < 6 is a standard, elegant and clear way to assert that
x is both greater than 3 and less than 6
(see for example <https://en.wikipedia.org/wiki/Relational_operator>),
a chaining operator is implemented. The chaining operator, %c%,
allows multiple relational operations to be used in quotes on the right
hand side for the same object, on the left hand side.
The %e% operator allows something like set-builder notation
(see for example <https://en.wikipedia.org/wiki/Set-builder_notation>)
to be used on the right hand side.
All operators have built in prefixes defined for all, subset, and which
to reduce the amount of code needed for common tasks, such as return those
values that are true.",2019-11-04,Joshua F. Wiley,"http://joshuawiley.com/extraoperators,
https://github.com/JWiley/extraoperators",TRUE,https://github.com/jwiley/extraoperators,3773,3,2019-11-11T06:01:07Z,1257.6666666666667
extremeStat,"Code to fit, plot and compare several (extreme value)
distribution functions. Can also compute (truncated) distribution quantile estimates and
draw a plot with return periods on a linear scale.",2017-11-05,Berry Boessenkool,https://github.com/brry/extremeStat,TRUE,https://github.com/brry/extremestat,17590,7,2020-06-09T07:56:36Z,2512.8571428571427
exuber,"Testing for and dating periods of explosive
dynamics (exuberance) in time series using the univariate and panel
recursive unit root tests proposed by Phillips et al. (2015)
<doi:10.1111/iere.12132> and Pavlidis et al. (2016)
<doi:10.1007/s11146-015-9531-2>. The recursive least-squares
algorithm utilizes the matrix inversion lemma to avoid matrix
inversion which results in significant speed improvements. Simulation
of a variety of periodically-collapsing bubble processes.",2020-05-12,Kostas Vasilopoulos,https://github.com/kvasilopoulos/exuber,TRUE,https://github.com/kvasilopoulos/exuber,10022,9,2020-05-12T12:55:06Z,1113.5555555555557
eyelinker,"Imports plain-text ASC data files from EyeLink eye trackers
into (relatively) tidy data frames for analysis and visualization.",2019-09-22,Simon Barthelme,https://github.com/a-hurst/eyelinker,TRUE,https://github.com/a-hurst/eyelinker,13966,1,2019-12-01T18:02:45Z,13966
eyetrackingR,"A set of tools that address tasks along the pipeline from raw
data to analysis and visualization for eye-tracking data. Offers several
popular types of analyses, including linear and growth curve time analyses,
onset-contingent reaction time analyses, as well as several non-parametric
bootstrapping approaches.",2018-12-03,Jacob Dink,http://eyetracking-r.com,TRUE,https://github.com/jwdink/eyetrackingr,20220,57,2020-01-11T22:02:46Z,354.7368421052632
ezcox,"A tool to operate a batch of univariate or multivariate
Cox models and return tidy result.",2020-06-03,Shixiang Wang,https://github.com/ShixiangWang/ezcox,TRUE,https://github.com/shixiangwang/ezcox,3762,5,2020-06-04T13:32:33Z,752.4
ezknitr,"An extension of 'knitr' that adds flexibility in several
ways. One common source of frustration with 'knitr' is that it assumes
the directory where the source file lives should be the working directory,
which is often not true. 'ezknitr' addresses this problem by giving you
complete control over where all the inputs and outputs are, and adds several
other convenient features to make rendering markdown/HTML documents easier.",2016-09-16,Dean Attali,https://github.com/ropenscilabs/ezknitr,TRUE,https://github.com/ropenscilabs/ezknitr,22077,86,2019-12-09T12:23:15Z,256.7093023255814
ezpickr,"Choosing any rectangular data file using interactive GUI dialog box, and seamlessly manipulating tidy data between an 'Excel' window and R session.",2019-11-17,JooYoung Seo,https://github.com/jooyoungseo/ezpickr,TRUE,https://github.com/jooyoungseo/ezpickr,12826,4,2020-05-16T14:58:16Z,3206.5
fable,"Provides a collection of commonly used univariate and multivariate
time series forecasting models including automatically selected exponential
smoothing (ETS) and autoregressive integrated moving average (ARIMA) models.
These models work within the 'fable' framework provided by the 'fabletools'
package, which provides the tools to evaluate, visualise, and combine models
in a workflow consistent with the tidyverse.",2020-04-22,Mitchell OHara-Wild,"https://fable.tidyverts.org, https://github.com/tidyverts/fable",TRUE,https://github.com/tidyverts/fable,48174,296,2020-06-04T11:13:16Z,162.75
fabletools,"Provides tools, helpers and data structures for developing models and time series functions for 'fable' and extension packages. These tools support a consistent and tidy interface for time series modelling and analysis.",2020-03-24,Mitchell OHara-Wild,"http://fabletools.tidyverts.org/,
https://github.com/tidyverts/fabletools",TRUE,https://github.com/tidyverts/fabletools,56056,42,2020-06-08T10:11:10Z,1334.6666666666667
fabMix,"Model-based clustering of multivariate continuous data using Bayesian mixtures of factor analyzers (Papastamoulis (2019) <DOI:10.1007/s11222-019-09891-z> (2018) <DOI:10.1016/j.csda.2018.03.007>). The number of clusters is estimated using overfitting mixture models (Rousseau and Mengersen (2011) <DOI:10.1111/j.1467-9868.2011.00781.x>): suitable prior assumptions ensure that asymptotically the extra components will have zero posterior weight, therefore, the inference is based on the ``alive'' components. A Gibbs sampler is implemented in order to (approximately) sample from the posterior distribution of the overfitting mixture. A prior parallel tempering scheme is also available, which allows to run multiple parallel chains with different prior distributions on the mixture weights. These chains run in parallel and can swap states using a Metropolis-Hastings move. Eight different parameterizations give rise to parsimonious representations of the covariance per cluster (following Mc Nicholas and Murphy (2008) <DOI:10.1007/s11222-008-9056-0>). The model parameterization and number of factors is selected according to the Bayesian Information Criterion. Identifiability issues related to label switching are dealt by post-processing the simulated output with the Equivalence Classes Representatives algorithm (Papastamoulis and Iliopoulos (2010) <https://www.jstor.org/stable/25703571>, Papastamoulis (2016) <DOI:10.18637/jss.v069.c01>). ",2020-02-19,Panagiotis Papastamoulis,https://github.com/mqbssppe/overfittingFABMix,TRUE,https://github.com/mqbssppe/overfittingfabmix,8398,1,2020-02-20T07:01:43Z,8398
fabricatr,"Helps you imagine your data before you collect it. Hierarchical data structures
and correlated data can be easily simulated, either from random number generators or
by resampling from existing data sources. This package is faster with 'data.table' and
'mvnfast' installed.",2019-09-04,Graeme Blair,"https://declaredesign.org/r/fabricatr,
https://github.com/DeclareDesign/fabricatr",TRUE,https://github.com/declaredesign/fabricatr,22867,64,2019-11-01T23:42:53Z,357.296875
facerec,"Provides an interface to the 'Kairos' Face Recognition API <https://kairos.com/face-recognition-api>. The API detects faces in images and returns estimates for demographics like gender, ethnicity and age. ",2018-05-14,Carsten Schwemmer,https://github.com/methodds/facerec,TRUE,https://github.com/methodds/facerec,7902,30,2019-06-18T09:50:03Z,263.4
factoextra,"Provides some easy-to-use functions to extract and visualize the
output of multivariate data analyses, including 'PCA' (Principal Component
Analysis), 'CA' (Correspondence Analysis), 'MCA' (Multiple Correspondence
Analysis), 'FAMD' (Factor Analysis of Mixed Data), 'MFA' (Multiple Factor Analysis) and 'HMFA' (Hierarchical Multiple
Factor Analysis) functions from different R packages. It contains also functions
for simplifying some clustering analysis steps and provides 'ggplot2' - based
elegant data visualization.",2020-04-01,Alboukadel Kassambara,http://www.sthda.com/english/rpkgs/factoextra,TRUE,https://github.com/kassambara/factoextra,1262178,205,2020-04-01T21:19:20Z,6156.965853658537
FactorAssumptions,"Tests for Kaiser-Meyer-Olkin (KMO) and
communalities in a dataset. It provides a final sample by removing
variables in a iterable manner while keeping account of the variables
that were removed in each step. It follows the best practices and assumptions according to
Hair, Black, Babin & Anderson (2018, ISBN:9781473756540).",2020-03-06,Jose Storopoli,https://github.com/storopoli/FactorAssumptions,TRUE,https://github.com/storopoli/factorassumptions,1206,1,2020-02-29T15:31:18Z,1206
factorEx,"Provides design-based and model-based estimators for the population average marginal component effects in general factorial experiments, including conjoint analysis. The package also implements a series of recommendations offered in de la Cuesta, Egami, and Imai (2019+), and Egami and Imai (2019) <doi:10.1080/01621459.2018.1476246>.",2020-05-11,Naoki Egami,https://github.com/naoki-egami/factorEx,TRUE,https://github.com/naoki-egami/factorex,3969,0,2020-05-11T15:58:50Z,NA
factorMerger,"
The Merging Path Plot is a methodology for adaptive fusing of k-groups
with likelihood-based model selection. This package contains tools for
exploration and visualization of k-group dissimilarities.
Comparison of k-groups is one of the most important issues
in exploratory analyses and it has zillions of applications.
The traditional approach is to use pairwise post hoc tests
in order to verify which groups differ significantly. However,
this approach fails with a large number of groups in both interpretation
and visualization layer.
The Merging Path Plot solves this problem by using an easy-to-understand
description of dissimilarity among groups based on Likelihood Ratio Test (LRT) statistic (Sitko, Biecek 2017) <arXiv:1709.04412>.
'factorMerger' is a part of the 'DrWhy.AI' universe (Biecek 2018) <arXiv:1806.08915>.
Work on this package was financially supported by the 'NCN Opus grant 2016/21/B/ST6/02176'.",2019-07-03,Tomasz Mikołajczyk,https://github.com/MI2DataLab/factorMerger,TRUE,https://github.com/mi2datalab/factormerger,24313,22,2019-11-18T10:04:39Z,1105.1363636363637
factory,"Function factories are functions that make functions. They can be
confusing to construct. Straightforward techniques can produce functions
that are fragile or hard to understand. While more robust techniques exist
to construct function factories, those techniques can be confusing. This
package is designed to make it easier to construct function factories.",2019-08-21,Jon Harmon,https://github.com/jonthegeek/factory,TRUE,https://github.com/jonthegeek/factory,3638,37,2020-06-03T19:23:20Z,98.32432432432432
factset.analyticsapi.engines,"Allow clients to fetch 'analytics' through API for Portfolio
'Analytics'('PA'), Style Performance Risk('SPAR') and 'Vault' products of
'FactSet'. Visit
<https://github.com/factset/analyticsapi-engines-r-sdk/tree/master/Engines>
for more information on the usage of package. Visit
<https://developer.factset.com/> for more information on products.",2020-02-02,Akshay Sheth,https://github.com/factset/analyticsapi-engines-r-sdk,TRUE,https://github.com/factset/analyticsapi-engines-r-sdk,2051,1,2020-01-23T11:53:39Z,2051
factset.protobuf.stach,"Generates 'RProtobuf' classes for 'FactSet' 'STACH' tabular
format which represents complex multi-dimensional array of data. These
classes help in the 'serialization' and 'deserialization' of 'STACH'
formatted data. See 'GitHub' repository documentation for more
information.",2020-01-14,analytics-reporting,https://github.com/factset/stachschema,TRUE,https://github.com/factset/stachschema,2435,3,2020-04-27T10:05:18Z,811.6666666666666
fad,"Compute maximum likelihood estimators of parameters in a Gaussian factor model using
the the matrix-free methodology described in Dai et al. (2019) <doi:10.1080/10618600.2019.1704296>.
In contrast to the factanal() function from 'stats' package, fad() can handle high-dimensional datasets where
number of variables exceed the sample size and is also substantially faster than the EM algorithms.",2020-01-24,Somak Dutta,https://github.com/somakd/fad,TRUE,https://github.com/somakd/fad,2180,2,2020-02-01T06:15:49Z,1090
FAdist,Probability distributions that are sometimes useful in hydrology.,2020-04-15,Francois Aucoin,https://github.com/tpetzoldt/FAdist,TRUE,https://github.com/tpetzoldt/fadist,35553,4,2020-04-15T05:45:10Z,8888.25
fairness,"Offers various metrics of algorithmic fairness. Fairness in machine learning is an emerging topic with the overarching aim to critically assess algorithms (predictive and classification models) whether their results reinforce existing social biases. While unfair algorithms can propagate such biases and offer prediction or classification results with a disparate impact on various sensitive subgroups of populations (defined by sex, gender, ethnicity, religion, income, socioeconomic status, physical or mental disabilities), fair algorithms possess the underlying foundation that these groups should be treated similarly / should have similar outcomes. The fairness R package offers the calculation and comparisons of commonly and less commonly used fairness metrics in population subgroups. These methods are described by Calders and Verwer (2010) <doi:10.1007/s10618-010-0190-x>, Chouldechova (2017) <doi:10.1089/big.2016.0047>, Feldman et al. (2015) <doi:10.1145/2783258.2783311> , Friedler et al. (2018) <doi:10.1145/3287560.3287589> and Zafar et al. (2017) <doi:10.1145/3038912.3052660>. The package also offers convenient visualizations to help understand fairness metrics.",2020-05-01,Nikita Kozodoi,NA,TRUE,https://github.com/kozodoi/fairness,3790,10,2020-05-15T16:48:45Z,379
fanplot,"Visualise sequential distributions using a range of plotting
styles. Sequential distribution data can be input as either simulations or
values corresponding to percentiles over time. Plots are added to
existing graphic devices using the fan function. Users can choose from four
different styles, including fan chart type plots, where a set of coloured
polygon, with shadings corresponding to the percentile values are layered
to represent different uncertainty levels. Full details in R Journal article; Abel (2015) <doi:10.32614/RJ-2015-002>.",2019-12-18,Guy J. Abel,https://github.com/gjabel/fanplot,TRUE,https://github.com/gjabel/fanplot,47034,0,2019-12-16T12:55:36Z,NA
fansi,"Counterparts to R string manipulation functions that account for
the effects of ANSI text formatting control sequences.",2020-01-08,Brodie Gaslam,https://github.com/brodieG/fansi,TRUE,https://github.com/brodieg/fansi,14247072,37,2020-01-09T13:43:52Z,385056
faoutlier,"Tools for detecting and summarize influential cases that
can affect exploratory and confirmatory factor analysis models as well as
structural equation models more generally.",2017-07-22,Phil Chalmers,https://github.com/philchalmers/faoutlier,TRUE,https://github.com/philchalmers/faoutlier,25607,5,2020-06-08T18:42:43Z,5121.4
FarmTest,"Performs robust multiple testing for means in the presence of known and unknown latent factors presented in Fan et al.(2019) ""FarmTest: Factor-Adjusted Robust Multiple Testing With Approximate False Discovery Control"" <doi:10.1080/01621459.2018.1527700>.
Implements a series of adaptive Huber methods combined with fast data-drive tuning schemes proposed in Ke et al.(2019) ""User-Friendly Covariance Estimation for Heavy-Tailed Distributions"" <doi:10.1214/19-STS711> to estimate model parameters and construct test statistics that are robust against heavy-tailed and/or asymmetric error distributions.
Extensions to two-sample simultaneous mean comparison problems are also included.
As by-products, this package contains functions that compute adaptive Huber mean, covariance and regression estimators that are of independent interest.",2020-04-28,Xiaoou Pan,https://github.com/XiaoouPan/FarmTest,TRUE,https://github.com/xiaooupan/farmtest,11649,2,2020-04-28T03:26:58Z,5824.5
farrell,"Allows the user to execute interactively radial data envelopment analysis models. The user has the ability to upload a data frame,
select the input/output variables, choose the technology assumption to adopt and decide whether to run an input or an output oriented model.
When the model is executed a set of results are displayed which include efficiency scores, peers' determination, scale efficiencies' evaluation
and slacks' calculation. Fore more information about the theoretical background of the package,
please refer to Bogetoft & Otto (2011) <doi:10.1007/978-1-4419-7961-2>.",2020-06-03,Mohamed El Fodil Ihaddaden,https://github.com/feddelegrand7/farrell,TRUE,https://github.com/feddelegrand7/farrell,0,3,2020-06-03T12:44:20Z,0
farver,"The encoding of colour can be handled in many different ways, using
different colour spaces. As different colour spaces have different uses,
efficient conversion between these representations are important. The
'farver' package provides a set of functions that gives access to very fast
colour space conversion and comparisons implemented in C++, and offers
speed improvements over the 'convertColor' function in the 'grDevices'
package.",2020-01-16,Thomas Lin Pedersen,"https://farver.data-imaginist.com,
https://github.com/thomasp85/farver",TRUE,https://github.com/thomasp85/farver,4618709,58,2020-01-16T13:42:05Z,79632.91379310345
fasjem,"This is an R implementation of ""A Fast and Scalable Joint Estimator for Learning Multiple Related Sparse Gaussian Graphical Models"" (FASJEM). The FASJEM algorithm can be used to estimate multiple related precision matrices. For instance, it can identify context-specific gene networks from multi-context gene expression datasets. By performing data-driven network inference from high-dimensional and heterogonous data sets, this tool can help users effectively translate aggregated data into knowledge that take the form of graphs among entities. Please run demo(fasjem) to learn the basic functions provided by this package. For more details, please see <http://proceedings.mlr.press/v54/wang17e/wang17e.pdf>.",2017-08-01,Beilun Wang,https://github.com/QData/JEM,TRUE,https://github.com/qdata/jem,11736,0,2019-08-28T16:28:07Z,NA
fasstr,"The Flow Analysis Summary Statistics Tool for R, 'fasstr', provides various
functions to clean and screen daily stream discharge data; calculate and visualize various summary statistics
and metrics; and compute annual trending (using 'zyp' package methods <https://CRAN.R-project.org/package=zyp>)
and volume frequency analyses (using methods similar to HEC-SSP (2019)
<https://www.hec.usace.army.mil/software/hec-ssp/>). It features useful function arguments for filtering of and
handling dates, customizing data and metrics, and the ability to pull daily data directly from the Water Survey
of Canada hydrometric database (<https://collaboration.cmc.ec.gc.ca/cmc/hydrometrics/www/>).",2020-01-09,Jon Goetz,"https://github.com/bcgov/fasstr,
https://www2.gov.bc.ca/gov/content/environment/air-land-water/water",TRUE,https://github.com/bcgov/fasstr,2602,29,2020-05-29T16:57:12Z,89.72413793103448
fastDummies,"Creates dummy columns from columns that have categorical variables (character or factor types). You can also specify which columns to make dummies out of, or which columns to ignore. Also creates dummy rows from character, factor, and Date columns. This package provides a significant speed increase from creating dummy variables through model.matrix().",2020-03-07,Jacob Kaplan,https://github.com/jacobkap/fastDummies,TRUE,https://github.com/jacobkap/fastdummies,170328,25,2020-03-07T18:05:37Z,6813.12
fasterize,"Provides a drop-in replacement for rasterize() from the 'raster'
package that takes 'sf'-type objects, and is much faster. There is support
for the main options provided by the rasterize() function, including
setting the field used and background value, and options for
aggregating multi-layer rasters. Uses the scan line algorithm attributed to
Wylie et al. (1967) <doi:10.1145/1465611.1465619>.",2020-03-25,Noam Ross,https://github.com/ecohealthalliance/fasterize,TRUE,https://github.com/ecohealthalliance/fasterize,58374,131,2020-03-24T12:36:47Z,445.60305343511453
fastLink,"Implements a Fellegi-Sunter probabilistic record linkage model that allows for missing data
and the inclusion of auxiliary information. This includes functionalities to conduct a merge of two
datasets under the Fellegi-Sunter model using the Expectation-Maximization algorithm. In addition,
tools for preparing, adjusting, and summarizing data merges are included. The package implements methods
described in Enamorado, Fifield, and Imai (2019) ''Using a Probabilistic Model to Assist Merging of
Large-scale Administrative Records'', American Political Science Review and is available
at <http://imai.fas.harvard.edu/research/linkage.html>.",2020-04-29,Ted Enamorado,NA,TRUE,https://github.com/kosukeimai/fastlink,17405,143,2020-04-29T22:45:57Z,121.7132867132867
fastlogranktest,"A very fast Log-Rank-Test implementation that is several orders of magnitude faster than the implementation in the 'survival' package.
Log-Rank-Tests can be computed individually or concurrently using threading.",2020-06-04,Andreas Stelzer,https://github.com/compsysmed/fastlogranktest.git,TRUE,https://github.com/compsysmed/fastlogranktest,3043,0,2020-04-03T14:36:55Z,NA
fastmap,"Fast implementation of a key-value store. Environments are commonly
used as key-value stores, but every time a new key is used, it is added to
R's global symbol table, causing a small amount of memory leakage. This can
be problematic in cases where many different keys are used. Fastmap avoids
this memory leak issue by implementing the map using data structures in C++.",2019-10-08,Winston Chang,"https://r-lib.github.io/fastmap/, https://github.com/r-lib/fastmap",TRUE,https://github.com/r-lib/fastmap,3136924,80,2019-12-16T05:24:27Z,39211.55
fastNaiveBayes,"This is an extremely fast implementation of a Naive Bayes classifier. This
package is currently the only package that supports a Bernoulli distribution, a Multinomial
distribution, and a Gaussian distribution, making it suitable for both binary features,
frequency counts, and numerical features. Another feature is the support of a mix of
different event models. Only numerical variables are allowed, however, categorical variables
can be transformed into dummies and used with the Bernoulli distribution.
The implementation is largely based on the paper
""A comparison of event models for Naive Bayes anti-spam e-mail filtering""
written by K.M. Schneider (2003) <doi:10.3115/1067807.1067848>. Any issues can be
submitted to: <https://github.com/mskogholt/fastNaiveBayes/issues>.",2020-05-04,Martin Skogholt,https://github.com/mskogholt/fastNaiveBayes,TRUE,https://github.com/mskogholt/fastnaivebayes,11623,40,2020-05-04T10:17:26Z,290.575
fastpos,"Finds the critical sample size (""critical point of stability"") for a
correlation to stabilize in Schoenbrodt and Perugini's definition of
sequential stability (see <doi:10.1016/j.jrp.2013.05.009>).",2020-02-17,Johannes Titz,https://github.com/johannes-titz/fastpos,TRUE,https://github.com/johannes-titz/fastpos,3158,0,2020-02-17T10:33:33Z,NA
fastshap,"Computes fast (relative to other implementations) approximate
Shapley values for any supervised learning model. Shapley values help to
explain the predictions from any black box model using ideas from game
theory; see Strumbel and Kononenko (2014) <doi:10.1007/s10115-013-0679-x>
for details.",2020-02-02,Brandon Greenwell,https://github.com/bgreenwell/fastshap,TRUE,https://github.com/bgreenwell/fastshap,8789,32,2020-02-02T00:24:15Z,274.65625
fastStat,"When we do statistic work, we need to see the structure of the data.
list.str() function will help you see the structure of the data quickly.
list.plot() function can help you check every variable in your dataframe.
table_one() function will make it easy to make a baseline table including
difference tests. uv_linear(), uv_logit(), uv_cox(), uv_logrank() will give
you a hand to do univariable regression analysis, while mv_linear(),
mv_logit() and mv_cox() will carry out multivariable regression analysis.",2019-11-22,Jing Zhang,https://github.com/yikeshu0611/fastStat,TRUE,https://github.com/yikeshu0611/faststat,2832,0,2019-11-16T12:43:45Z,NA
fauxnaif,"Provides a replacement for dplyr::na_if(). Allows you to specify
multiple values to be replaced with NA using a single function.",2020-03-01,Alexander Rossell Hayes,https://github.com/rossellhayes/fauxnaif,TRUE,https://github.com/rossellhayes/fauxnaif,1631,0,2020-05-13T05:19:24Z,NA
fauxpas,"HTTP error helpers. Methods included for general purpose HTTP
error handling, as well as individual methods for every HTTP status
code, both via status code numbers as well as their descriptive names.
Supports ability to adjust behavior to stop, message or warning.
Includes ability to use custom whisker template to have any configuration
of status code, short description, and verbose message. Currently
supports integration with 'crul', 'curl', and 'httr'.",2020-04-13,Scott Chamberlain,"https://docs.ropensci.org/fauxpas,
https://github.com/ropensci/fauxpas",TRUE,https://github.com/ropensci/fauxpas,38178,11,2020-04-13T15:49:57Z,3470.7272727272725
fbRads,"Wrapper functions around the Facebook Marketing 'API' to create, read, update and delete custom audiences, images, campaigns, ad sets, ads and related content.",2016-04-06,Ajaykumar Gopal,https://github.com/cardcorp/fbRads,TRUE,https://github.com/cardcorp/fbrads,19047,114,2020-03-31T17:45:28Z,167.07894736842104
fc,"Provides a streamlined, standard evaluation-based approach to multivariate function composition. Allows for chaining commands via a forward-pipe operator, %>%.",2018-08-14,Xiaofei (Susan) Wang,https://github.com/swang87/fc,TRUE,https://github.com/swang87/fc,6996,0,2019-10-20T22:53:12Z,NA
fcaR,"Provides tools to perform fuzzy formal concept analysis, presented in Wille (1982) <doi:10.1007/978-3-642-01815-2_23> and in Ganter and Obiedkov (2016) <doi:10.1007/978-3-662-49291-8>.
It provides functions to load and save a formal context, extract its concept lattice and implications.
In addition, one can use the implications to compute semantic closures of fuzzy sets and, thus, build recommendation systems.",2020-01-19,Domingo Lopez Rodriguez,https://github.com/neuroimaginador/fcaR,TRUE,https://github.com/neuroimaginador/fcar,2604,4,2020-05-25T18:21:36Z,651
FcircSEC,"Extract full length circular RNA sequences and classify circular RNA
using the output of circular RNA prediction tools, reference genome and the annotation file corresponding to the reference genome.
This package uses the output of circular RNA prediction tools such as 'CIRI', 'CIRCExplorer' and the output of other state-of-the-art circular RNA prediction tools.
Details about the circular RNA prediction procedure can be found in
'Yuan Gao, Jinfeng Wang and Fangqing Zhao' (2015) <doi:10.1186/s13059-014-0571-3>
and 'Zhang XO, Wang HB, Zhang Y, Lu X, Chen LL and Yang L' (2014) <doi:10.1016/j.cell.2014.09.001>.",2020-01-31,Md. Tofazzal Hossain,https://github.com/tofazzal4720/FcircSEC,TRUE,https://github.com/tofazzal4720/fcircsec,1726,0,2020-01-21T06:34:17Z,NA
FCPS,"Many conventional clustering algorithms are provided in this package with consistent input and output, which enables the user to try out algorithms swiftly. Additionally, 26 statistical approaches for the estimation of the number of clusters as well as the the mirrored density plot (MD-plot) of clusterability are implemented. Moreover, the fundamental clustering problems suite (FCPS) offers a variety of clustering challenges any algorithm should handle when facing real world data, see Thrun, M.C., Ultsch A.: ""Clustering Benchmark Datasets Exploiting the Fundamental Clustering Problems"" (2020), Data in Brief, <DOI:10.1016/j.dib.2020.105501>.",2020-06-07,Michael Thrun,http://www.deepbionics.org,TRUE,https://github.com/mthrun/fcps,2257,3,2020-06-07T14:35:58Z,752.3333333333334
fda.usc,"Routines for exploratory and descriptive analysis of functional data such as depth measurements, atypical curves detection, regression models, supervised classification, unsupervised classification and functional analysis of variance.",2020-02-17,Manuel Oviedo de la Fuente,"https://github.com/moviedo5/fda.usc,
http://www.jstatsoft.org/v51/i04/",TRUE,https://github.com/moviedo5/fda.usc,88248,1,2020-02-17T09:54:39Z,88248
fdaACF,"Quantify the serial correlation across lags of a given functional
time series using an autocorrelation function for functional time series.
The autocorrelation function is based on the L2 norm of the lagged covariance
operators of the series. Functions are available for estimating the
distribution of the autocorrelation function under the assumption
of strong functional white noise.",2020-01-24,Guillermo Mestre Marcos,https://github.com/GMestreM/fdaACF,TRUE,https://github.com/gmestrem/fdaacf,2337,3,2020-03-24T10:15:56Z,779
fdapace,"A versatile package that provides implementation of various
methods of Functional Data Analysis (FDA) and Empirical Dynamics. The core of this
package is Functional Principal Component Analysis (FPCA), a key technique for
functional data analysis, for sparsely or densely sampled random trajectories
and time courses, via the Principal Analysis by Conditional Estimation
(PACE) algorithm. This core algorithm yields covariance and mean functions,
eigenfunctions and principal component (scores), for both functional data and
derivatives, for both dense (functional) and sparse (longitudinal) sampling designs.
For sparse designs, it provides fitted continuous trajectories with confidence bands,
even for subjects with very few longitudinal observations. PACE is a viable and
flexible alternative to random effects modeling of longitudinal data. There is also a
Matlab version (PACE) that contains some methods not available on fdapace and vice
versa. Please cite our package if you use it (You may run the command
citation(""fdapace"") to get the citation format and bibtex entry).
References: Wang, J.L., Chiou, J., Müller, H.G. (2016) <doi:10.1146/annurev-statistics-041715-033624>;
Chen, K., Zhang, X., Petersen, A., Müller, H.G. (2017) <doi:10.1007/s12561-015-9137-5>.",2020-05-15,Cody Carroll,https://github.com/functionaldata/tPACE,TRUE,https://github.com/functionaldata/tpace,23955,15,2020-06-01T20:30:50Z,1597
fdistr,"Provides functionality to generate a frequency
distribution table from a set of observations and plot the frequency
distribution using a Pareto chart.",2019-12-02,Donnie Minnick,https://github.com/dtminnick/fdistr,TRUE,https://github.com/dtminnick/fdistr,1722,1,2019-12-24T14:58:02Z,1722
feasts,"Provides a collection of features, decomposition methods,
statistical summaries and graphics functions for the analysing tidy time
series data. The package name 'feasts' is an acronym comprising of its key
features: Feature Extraction And Statistics for Time Series.",2020-03-18,Mitchell OHara-Wild,"http://feasts.tidyverts.org/, https://github.com/tidyverts/feasts/",TRUE,https://github.com/tidyverts/feasts,44061,157,2020-06-05T11:32:21Z,280.64331210191085
FeatureHashing,"Feature hashing, also called as the hashing trick, is a method to transform
features of a instance to a vector. Thus, it is a method to transform a real dataset to a matrix.
Without looking up the indices in an associative array,
it applies a hash function to the features and uses their hash values as indices directly.
The method of feature hashing in this package was proposed in Weinberger et al. (2009) <arXiv:0902.2206>.
The hashing algorithm is the murmurhash3 from the 'digest' package.
Please see the README in <https://github.com/wush978/FeatureHashing> for more information.",2019-11-24,Wush Wu,https://github.com/wush978/FeatureHashing,TRUE,https://github.com/wush978/featurehashing,27791,94,2019-11-25T02:33:29Z,295.6489361702128
featuretoolsR,"A 'reticulate'-based interface to the 'Python' module 'Featuretools'.
The package grants functionality to interact with 'Pythons' 'Featuretools' module, which allows
for automated feature engineering on any data frame. Valid features and new data sets can, after
feature synthesis, easily be extracted.",2020-04-25,Magnus Furugård,https://github.com/magnusfurugard/featuretoolsR,TRUE,https://github.com/magnusfurugard/featuretoolsr,4206,39,2020-04-25T10:06:25Z,107.84615384615384
febr,"
Making the access to the Free Brazilian Repository for Open Soil Data <http://www.ufsm.br/febr/> as easy
as possible.",2020-03-17,Alessandro Samuel-Rosa,https://github.com/febr-team/febr-package/,TRUE,https://github.com/febr-team/febr-package,9861,3,2020-04-15T02:34:49Z,3287
FedData,"Functions to automate downloading geospatial data available from
several federated data sources (mainly sources maintained by the US Federal
government). Currently, the package enables extraction from seven datasets:
The National Elevation Dataset digital elevation models (1 and 1/3 arc-second;
USGS); The National Hydrography Dataset (USGS); The Soil Survey Geographic
(SSURGO) database from the National Cooperative Soil Survey (NCSS), which is
led by the Natural Resources Conservation Service (NRCS) under the USDA; the
Global Historical Climatology Network (GHCN), coordinated by National Climatic
Data Center at NOAA; the Daymet gridded estimates of daily weather parameters
for North America, version 3, available from the Oak Ridge National Laboratory's
Distributed Active Archive Center (DAAC); the International Tree Ring Data Bank;
and the National Land Cover Database (NLCD).",2019-04-22,R. Kyle Bocinsky,https://github.com/ropensci/FedData,TRUE,https://github.com/ropensci/feddata,56667,65,2020-04-27T20:07:30Z,871.8
fedregs,"The Code of Federal Regulations (CFR) annual edition is the codification
of the general and permanent rules published in the Federal Register by the departments
and agencies of the Federal Government of the United States of America. Simply, the
'fedregs' package facilitates word processing and sentiment analysis of the CFR using tidy
principles. Note: According to the Code of Federal Regulations XML Rendition User Guide Document:
""In general, there are no restrictions on re-use of information in Code of Federal Regulations
material because U.S. Government works are not subject to copyright. OFR and GPO do not
restrict downstream uses of Code of Federal Regulations data, except that independent providers
should be aware that only the OFR and GPO are entitled to represent that they are the providers
of the official versions of the Code of Federal Regulations and related Federal Register
publications.""",2019-09-16,Scott Large,NA,TRUE,https://github.com/slarge/fedregs,8310,1,2020-05-27T13:34:26Z,8310
feedeR,Retrieve data from RSS/Atom feeds.,2020-04-19,Andrew Collier,https://github.com/datawookie/feedeR,TRUE,https://github.com/datawookie/feeder,19985,19,2020-04-19T14:09:00Z,1051.842105263158
feisr,"Provides the function feis() to estimate fixed effects individual
slope (FEIS) models. The FEIS model constitutes a more general version of
the often-used fixed effects (FE) panel model, as implemented in the
package 'plm' by Croissant and Millo (2008) <doi:10.18637/jss.v027.i02>.
In FEIS models, data are not only person ""demeaned"" like in conventional
FE models, but ""detrended"" by the predicted individual slope of each
person or group. Estimation is performed by applying least squares lm()
to the transformed data. For more details on FEIS models see Bruederl and
Ludwig (2015, ISBN:1446252442); Frees (2001) <doi:10.2307/3316008>;
Polachek and Kim (1994) <doi:10.1016/0304-4076(94)90075-2>;
Wooldridge (2010, ISBN:0262294354). To test consistency of conventional FE
and random effects estimators against heterogeneous slopes, the package
also provides the functions feistest() for an artificial regression test
and bsfeistest() for a bootstrapped version of the Hausman test.",2019-03-01,Tobias Ruettenauer,https://github.com/ruettenauer/feisr,TRUE,https://github.com/ruettenauer/feisr,11388,4,2020-06-09T19:55:38Z,2847
felp,"
Provides pseudo-postfix operators and more to enhance displaying documents.
The `?.` pseudo-postfix operator and the `?` prefix operator displays documents and contents (source or structure) of objects simultaneously to help understanding the objects.
The `?p` pseudo-postfix operator displays package documents, and is shorter than help(package = foo).",2019-12-06,Atsushi Yasumoto,https://github.com/atusy/felp,TRUE,https://github.com/atusy/felp,4195,11,2020-05-10T23:52:42Z,381.3636363636364
FEprovideR,"A structured profile likelihood algorithm for the logistic fixed effects model and an approximate expectation maximization (EM) algorithm for the logistic mixed effects model. Based on He, K., Kalbfleisch, J.D., Li, Y. and Li, Y. (2013) <doi:10.1007/s10985-013-9264-6>.",2019-07-30,Michael Kleinsasser,NA,TRUE,https://github.com/umich-biostatistics/feprovider,4690,1,2019-07-30T13:09:32Z,4690
ffbase,"Extends the out of memory vectors of 'ff' with
statistical functions and other utilities to ease their usage.",2020-03-18,Edwin de Jonge,http://github.com/edwindj/ffbase,TRUE,https://github.com/edwindj/ffbase,421150,28,2020-02-29T11:34:53Z,15041.07142857143
FFTrees,"Create, visualize, and test fast-and-frugal decision trees (FFTs). FFTs are very simple decision trees for
binary classification problems. FFTs can be preferable to more complex algorithms because they are easy to communicate,
require very little information, and are robust against overfitting.",2020-06-08,Nathaniel Phillips,NA,TRUE,https://github.com/ndphillips/fftrees,34922,108,2020-06-08T18:40:23Z,323.35185185185185
fgdr,"Read and Parse for Fundamental Geo-Spatial Data (FGD) which downloads XML file
from providing site (<https://fgd.gsi.go.jp/download/menu.php>). The JPGIS format file
provided by FGD so that it can be handled as an R spatial object such as 'sf' and 'raster' or 'stars'.
Supports the FGD version 4.1, and accepts fundamental items and digital elevation models.",2020-05-06,Shinya Uryu,https://github.com/uribo/fgdr,TRUE,https://github.com/uribo/fgdr,3607,5,2020-06-01T09:04:31Z,721.4
fgeo,"To help you access, transform, analyze, and
visualize ForestGEO data, we developed a collection of R packages
(<https://forestgeo.github.io/fgeo/>). This package, in particular,
helps you to install and load the entire package-collection with a
single R command, and provides convenient ways to find relevant
documentation. Most commonly, you should not worry about the
individual packages that make up the package-collection as you can
access all features via this package. To learn more about ForestGEO
visit <http://www.forestgeo.si.edu/>.",2019-06-19,Mauro Lepore,"http://forestgeo.github.io/fgeo, https://github.com/forestgeo/fgeo",TRUE,https://github.com/forestgeo/fgeo,4859,16,2019-12-11T18:02:06Z,303.6875
fgeo.analyze,"To help you access, transform, analyze, and
visualize ForestGEO data, we developed a collection of R packages
(<https://forestgeo.github.io/fgeo/>). This package, in particular,
helps you to implement analyses of plot species distributions,
topography, demography, and biomass. It also includes a torus
translation test to determine habitat associations of tree species as
described by Zuleta et al. (2018) <doi:10.1007/s11104-018-3878-0>. To
learn more about ForestGEO visit <http://www.forestgeo.si.edu/>.",2020-03-23,Mauro Lepore,https://github.com/forestgeo/fgeo.analyze,TRUE,https://github.com/forestgeo/fgeo.analyze,5653,1,2020-03-23T18:18:37Z,5653
fgeo.plot,"To help you access, transform, analyze, and
visualize ForestGEO data, we developed a collection of R packages
(<https://forestgeo.github.io/fgeo/>). This package, in particular,
helps you to plot ForestGEO data. To learn more about ForestGEO visit
<http://www.forestgeo.si.edu/>.",2019-06-18,Mauro Lepore,"https://github.com/forestgeo/fgeo.plot,
https://forestgeo.github.io/fgeo.plot/",TRUE,https://github.com/forestgeo/fgeo.plot,5348,2,2019-06-19T15:15:40Z,2674
fgeo.tool,"To help you access, transform, analyze, and
visualize ForestGEO data, we developed a collection of R packages
(<https://forestgeo.github.io/fgeo/>). This package, in particular,
helps you to easily import, filter, and modify ForestGEO data. To
learn more about ForestGEO visit <http://www.forestgeo.si.edu/>.",2020-03-23,Mauro Lepore,https://github.com/forestgeo/fgeo.tool,TRUE,https://github.com/forestgeo/fgeo.tool,6378,2,2020-03-23T18:04:26Z,3189
fgeo.x,"Access small example datasets from Luquillo, a
ForestGEO site in Puerto Rico
(<https://forestgeo.si.edu/sites/north-america/luquillo>).",2019-06-07,Mauro Lepore,https://github.com/forestgeo/fgeo.x,TRUE,https://github.com/forestgeo/fgeo.x,5511,1,2019-06-17T19:11:18Z,5511
fic,"Compares how well different models estimate a quantity of interest (the ""focus"") so that different models may be preferred for different purposes. Comparisons within any class of models fitted by maximum likelihood are supported, with shortcuts for commonly-used classes such as generalised linear models and parametric survival models. The methods originate from Claeskens and Hjort (2003) <doi:10.1198/016214503000000819> and Claeskens and Hjort (2008, ISBN:9780521852258).",2019-04-13,Christopher Jackson,https://github.com/chjackson/fic,TRUE,https://github.com/chjackson/fic,4840,5,2019-12-02T09:29:42Z,968
fieldRS,"In remote sensing, designing a field campaign to collect ground-truth data can be a challenging task. We need to collect representative samples while accounting for issues such as budget constraints and limited accessibility created by e.g. poor infrastructure. As suggested by Olofsson et al. (2014) <doi:10.1016/j.rse.2014.02.015>, this demands the establishment of best-practices to collect ground-truth data that avoid the waste of time and funds. 'fieldRS' addresses this issue by helping scientists and practitioners design field campaigns through the identification of priority sampling sites, the extraction of potential sampling plots and the conversion of plots into consistent training and validation samples that can be used in e.g. land cover classification.",2020-06-02,Ruben Remelgado,https://github.com/RRemelgado/fieldRS/,TRUE,https://github.com/rremelgado/fieldrs,7702,10,2020-06-02T13:27:19Z,770.2
fields,"For curve, surface and function fitting with an emphasis
on splines, spatial data, geostatistics, and spatial statistics. The major methods
include cubic, and thin plate splines, Kriging, and compactly supported
covariance functions for large data sets. The splines and Kriging methods are
supported by functions that can determine the smoothing parameter
(nugget and sill variance) and other covariance function parameters by cross
validation and also by restricted maximum likelihood. For Kriging
there is an easy to use function that also estimates the correlation
scale (range parameter). A major feature is that any covariance function
implemented in R and following a simple format can be used for
spatial prediction. There are also many useful functions for plotting
and working with spatial data as images. This package also contains
an implementation of sparse matrix methods for large spatial data
sets and currently requires the sparse matrix (spam) package. Use
help(fields) to get started and for an overview. The fields source
code is deliberately commented and provides useful explanations of
numerical details as a companion to the manual pages. The commented
source code can be viewed by expanding source code version
and looking in the R subdirectory. The reference for fields can be generated
by the citation function in R and has DOI <doi:10.5065/D6W957CT>. Development
of this package was supported in part by the National Science Foundation Grant
1417857 and the National Center for Atmospheric Research. See the Fields URL
for a vignette on using this package and some background on spatial statistics.",2020-02-04,Douglas Nychka,https://github.com/NCAR/Fields,TRUE,https://github.com/ncar/fields,1456365,10,2020-02-03T18:12:51Z,145636.5
fiery,"A very flexible framework for building server side logic in R. The
framework is unopinionated when it comes to how HTTP requests and WebSocket
messages are handled and supports all levels of app complexity; from serving
static content to full-blown dynamic web-apps. Fiery does not hold your hand
as much as e.g. the shiny package does, but instead sets you free to create
your web app the way you want.",2019-09-27,Thomas Lin Pedersen,"https://fiery.data-imaginist.com,
https://github.com/thomasp85/fiery",TRUE,https://github.com/thomasp85/fiery,27268,185,2019-10-02T20:10:14Z,147.3945945945946
fillr,"Edit vectors to fill missing values, based on the vector itself.",2020-01-28,Jelger van Zaane,https://jelger12.github.io/fillr/,TRUE,https://github.com/jelger12/fillr,4071,1,2020-01-28T15:06:45Z,4071
finalfit,"Generate regression results tables and plots in final
format for publication. Explore models and export directly to PDF
and 'Word' using 'RMarkdown'. ",2020-04-21,Ewen Harrison,https://github.com/ewenharrison/finalfit,TRUE,https://github.com/ewenharrison/finalfit,41392,202,2020-05-22T17:03:25Z,204.9108910891089
finbif,"A programmatic interface to the 'Finnish Biodiversity Information
Facility' ('FinBIF') API (<https://api.laji.fi>). 'FinBIF' aggregates
Finnish biodiversity data from multiple sources in a single open access
portal for researchers, citizen scientists, industry and government.
'FinBIF' allows users of biodiversity information to find, access, combine
and visualise data on Finnish plants, animals and microorganisms. The
'finbif' package makes the publicly available data in 'FinBIF' easily
accessible to programmers. Biodiversity information is available on taxonomy
and taxon occurrence. Occurrence data can be filtered by taxon, time,
location and other variables. The data accessed are conveniently
preformatted for subsequent analyses.",2020-04-23,William Morris,"https://github.com/luomus/finbif, https://luomus.github.io/finbif",TRUE,https://github.com/luomus/finbif,3114,1,2020-04-30T08:19:00Z,3114
finch,"Parse and create Darwin Core (<http://rs.tdwg.org/dwc/>) Simple
and Archives. Functionality includes reading and parsing all the
files in a Darwin Core Archive, including the datasets and metadata;
read and parse simple Darwin Core files; and validation of Darwin
Core Archives.",2019-04-25,Scott Chamberlain,https://github.com/ropensci/finch,TRUE,https://github.com/ropensci/finch,15909,19,2019-12-09T13:08:56Z,837.3157894736842
findpython,Package designed to find an acceptable python binary.,2019-03-08,Trevor L Davis,https://github.com/trevorld/findpython,TRUE,https://github.com/trevorld/findpython,91805,5,2019-11-25T22:42:20Z,18361
findR,"Scans all directories and subdirectories of a path for code snippets, R scripts,
R Markdown, PDF or text files containing a specific pattern. Files found can be copied to a new folder.",2018-03-13,David Zumbach,NA,TRUE,https://github.com/zumbov2/findr,12256,1,2019-12-09T08:11:11Z,12256
fingerprint,"Functions to manipulate binary fingerprints
of arbitrary length. A fingerprint is represented by an object of S4 class 'fingerprint'
which is internally represented a vector of integers, such
that each element represents the position in the fingerprint that is set to 1.
The bitwise logical functions in R are overridden so that they can be used directly
with 'fingerprint' objects. A number of distance metrics are also
available (many contributed by Michael Fadock). Fingerprints
can be converted to Euclidean vectors (i.e., points on the unit hypersphere) and
can also be folded using OR. Arbitrary fingerprint formats can be handled via line
handlers. Currently handlers are provided for CDK, MOE and BCI fingerprint data.",2018-01-07,Rajarshi Guha,NA,TRUE,https://github.com/rajarshi/cdkr,57277,30,2020-03-16T02:38:29Z,1909.2333333333333
fingertipscharts,"Use Fingertips charts to recreate the visualisations
that are displayed on the Fingertips website (<http://fingertips.phe.org.uk/>).",2020-06-05,Sebastian Fox,NA,TRUE,https://github.com/publichealthengland/fingertipscharts,13579,5,2020-06-04T20:03:34Z,2715.8
fingertipsR,Fingertips (<http://fingertips.phe.org.uk/>) contains data for many indicators of public health in England. The underlying data is now more easily accessible by making use of the API.,2020-06-06,Sebastian Fox,"https://fingertips.phe.org.uk,
https://github.com/ropensci/fingertipsR,
https://docs.ropensci.org/fingertipsR/",TRUE,https://github.com/ropensci/fingertipsr,33730,43,2020-06-05T08:35:38Z,784.4186046511628
fipe,"The Brazilian vehicle purchase pricing table is provided by
the Institute of Economic Research Foundation (Fipe) and used in purchase
negotiations according to region, vehicle’s conservation, color, accessories
or any other factor that might influence the demand and supply for a specific
vehicle. For more on the data themselves and web access, please see
<https://www.fipe.org.br/en-us/home/>.",2019-08-25,Italo Cegatta,https://italocegatta.github.io/fipe/,TRUE,https://github.com/italocegatta/fipe,3712,1,2019-09-03T00:51:14Z,3712
FiRE,"The algorithm assigns rareness/ outlierness score to every sample in voluminous datasets.
The algorithm makes multiple estimations of the proximity between a pair of samples, in low-dimensional spaces. To compute proximity, FiRE uses Sketching, a variant of locality sensitive hashing. For more details: Jindal, A., Gupta, P., Jayadeva and Sengupta, D., 2018. Discovery of rare cells from voluminous single cell expression data. Nature Communications, 9(1), p.4719. <doi:10.1038/s41467-018-07234-6>.",2019-01-02,Prashant Gupta,https://github.com/princethewinner/FiRE,TRUE,https://github.com/princethewinner/fire,5638,15,2019-08-09T04:25:48Z,375.8666666666667
firebase,"Authenticate users in 'Shiny' applications using 'Google Firebase'
with any of the many methods provided; email and password, email link, or
using a third-party provider such as 'Github', 'Twitter', or 'Google'.",2020-03-30,John Coene,"https://firebase.john-coene.com/,
https://github.com/JohnCoene/firebase",TRUE,https://github.com/johncoene/firebase,1082,47,2020-04-04T09:35:49Z,23.02127659574468
fishbc,"Provides raw and curated data on the codes,
classification and conservation status of freshwater fishes in British
Columbia. Marine fishes will be added in a future release.",2020-06-04,Evan Amies-Galonski,https://github.com/poissonconsulting/fishbc,TRUE,https://github.com/poissonconsulting/fishbc,0,2,2020-06-09T00:17:11Z,0
fishtree,"An interface to the Fish Tree of Life API to download taxonomies,
phylogenies, fossil calibrations, and diversification rate information for
ray-finned fishes.",2019-12-17,Jonathan Chang,https://fishtreeoflife.org/,TRUE,https://github.com/jonchang/fishtree,7961,3,2020-06-09T05:45:38Z,2653.6666666666665
fishualize,Implementation of color palettes based on fish species. ,2020-04-20,Nina M. D. Schiettekatte,https://github.com/nschiett/fishualize,TRUE,https://github.com/nschiett/fishualize,3965,101,2020-05-27T22:47:20Z,39.257425742574256
fitdistrplus,"Extends the fitdistr() function (of the MASS package) with several functions to help the fit of a parametric distribution to non-censored or censored data. Censored data may contain left censored, right censored and interval censored values, with several lower and upper bounds. In addition to maximum likelihood estimation (MLE), the package provides moment matching (MME), quantile matching (QME) and maximum goodness-of-fit estimation (MGE) methods (available only for non-censored data). Weighted versions of MLE, MME and QME are available. See e.g. Casella & Berger (2002). Statistical inference. Pacific Grove.",2020-05-19,Aurelie Siberchicot,"https://lbbe.univ-lyon1.fr/fitdistrplus.html,
https://github.com/aursiber/fitdistrplus",TRUE,https://github.com/aursiber/fitdistrplus,753470,3,2020-05-19T11:44:34Z,251156.66666666666
fitHeavyTail,"Robust estimation methods for the mean vector and covariance matrix
from data (possibly containing NAs) under multivariate heavy-tailed
distributions such as angular Gaussian (via Tyler's method), Cauchy,
and Student's t.
Additionally, a factor model structure can be specified for the covariance
matrix.
The package is based on the papers: Sun, Babu, and Palomar (2014),
Sun, Babu, and Palomar (2015), Liu and Rubin (1995), and
Zhou, Liu, Kumar, and Palomar (2019).",2020-01-07,Daniel P. Palomar,https://github.com/dppalomar/fitHeavyTail,TRUE,https://github.com/dppalomar/fitheavytail,2925,6,2020-04-16T05:39:11Z,487.5
fitODBOD,"Contains Probability Mass Functions, Cumulative Mass Functions, Negative Log Likelihood value, parameter estimation and modeling data using Binomial Mixture Distributions (BMD) (Manoj et al (2013) <doi:10.5539/ijsp.v2n2p24>) and Alternate Binomial Distributions (ABD) (Paul (1985) <doi:10.1080/03610928508828990>), also Journal article to use the package(<doi:10.21105/joss.01505>).",2020-01-16,Amalan Mahendran,"https://github.com/Amalan-ConStat/R-fitODBOD,https://amalan-constat.github.io/R-fitODBOD/index.html",TRUE,https://github.com/amalan-constat/r-fitodbod,8792,1,2019-07-02T15:22:47Z,8792
FitUltD,"Extends the fitdist() (from 'fitdistrplus') adding the Anderson-Darling ad.test() (from 'ADGofTest') and Kolmogorov Smirnov Test ks.test() inside, trying the distributions from 'stats' package by default and offering a second function which uses mixed distributions to fit, this distributions are split with unsupervised learning, with Mclust() function (from 'mclust').",2019-09-11,José Carlos Del Valle,https://github.com/jcval94/FitUltD,TRUE,https://github.com/jcval94/fitultd,4866,0,2020-03-24T04:32:33Z,NA
fitzRoy,"An easy package for scraping and processing Australia Rules Football (AFL)
data. 'fitzRoy' provides a range of functions for accessing publicly available data
from 'AFL Tables' <https://afltables.com>, 'Footy Wire' <https://www.footywire.com> and
'The Squiggle' <https://squiggle.com.au>. Further functions allow for easy processing,
cleaning and transformation of this data into formats that can be used for analysis. ",2020-05-23,James Day,"https://jimmyday12.github.io/fitzRoy/,
https://github.com/jimmyday12/fitzRoy",TRUE,https://github.com/jimmyday12/fitzroy,3618,67,2020-06-09T10:43:26Z,54
fivethirtyeight,"Datasets and code published by the data journalism website
'FiveThirtyEight' available at <https://github.com/fivethirtyeight/data>.
Note that while we received guidance from editors at 'FiveThirtyEight', this
package is not officially published by 'FiveThirtyEight'.",2019-07-31,Albert Y. Kim,https://github.com/rudeboybert/fivethirtyeight,TRUE,https://github.com/rudeboybert/fivethirtyeight,73520,396,2020-06-05T18:03:36Z,185.65656565656565
fixest,"Fast and user-friendly estimation of econometric models with multiple fixed-effects. Includes ordinary least squares (OLS), generalized linear models (GLM) and the negative binomial.
The core of the package is based on optimized parallel C++ code, scaling especially well for large data sets. The method to obtain the fixed-effects coefficients is based on Berge (2018) <https://wwwen.uni.lu/content/download/110162/1299525/file/2018_13>.
Further provides tools to export and view the results of several estimations with intuitive design to cluster the standard-errors.",2020-04-14,Laurent Berge,NA,TRUE,https://github.com/lrberge/fixest,10465,43,2020-06-09T20:43:13Z,243.37209302325581
FixSeqMTP,"Several generalized / directional Fixed Sequence Multiple Testing
Procedures (FSMTPs) are developed for testing a sequence of pre-ordered
hypotheses while controlling the FWER, FDR and Directional Error (mdFWER).
All three FWER controlling generalized FSMTPs are designed under arbitrary
dependence, which allow any number of acceptances. Two FDR controlling
generalized FSMTPs are respectively designed under arbitrary dependence and
independence, which allow more but a given number of acceptances. Two mdFWER
controlling directional FSMTPs are respectively designed under arbitrary
dependence and independence, which can also make directional decisions based
on the signs of the test statistics. The main functions for each proposed
generalized / directional FSMTPs are designed to calculate adjusted p-values
and critical values, respectively. For users' convenience, the functions also
provide the output option for printing decision rules.",2017-01-05,Yalin Zhu,NA,TRUE,https://github.com/allenzhuaz/fixseqmtp,12466,1,2019-08-06T19:11:38Z,12466
FKF,"This is a fast and flexible implementation of the Kalman
filter, which can deal with NAs. It is entirely written in C
and relies fully on linear algebra subroutines contained in
BLAS and LAPACK. Due to the speed of the filter, the fitting of
high-dimensional linear state space models to large datasets
becomes possible. This package also contains a plot function
for the visualization of the state vector and graphical
diagnostics of the residuals.",2020-06-01,Paul Smith,"https://waternumbers.github.io/FKF/,
https://github.com/waternumbers/FKF",TRUE,https://github.com/waternumbers/fkf,47059,0,2020-06-04T22:05:10Z,NA
flacco,"Tools and features for ""Exploratory Landscape Analysis (ELA)"" of
single-objective continuous optimization problems.
Those features are able to quantify rather complex properties, such as the
global structure, separability, etc., of the optimization problems.",2020-03-31,Pascal Kerschke,https://github.com/kerschke/flacco,TRUE,https://github.com/kerschke/flacco,19782,24,2020-03-31T18:14:14Z,824.25
flair,"Facilitates easier formatting and highlighting of R
source code in a R Markdown-based presentation. The main goal of the
package is to allow users to preserve their code creation process
within code chunks, then to specify formatting details for the source
code, such as highlighting of particular syntactical elements.",2020-04-23,Kelly Bodwin,"https://github.com/kbodwin/flair,
https://kbodwin.github.io/flair/index.html",TRUE,https://github.com/kbodwin/flair,807,96,2020-05-04T09:54:40Z,8.40625
FLAME,"Efficient implementations of the algorithms in the
Almost-Matching-Exactly framework for interpretable matching in causal
inference. These algorithms match units via a learned, weighted Hamming
distance that determines which covariates are more important to match on.
For more information and examples, see the Almost-Matching-Exactly website. ",2020-04-15,Vittorio Orlandi,NA,TRUE,https://github.com/vittorioorlandi/flame,6135,3,2020-05-29T15:03:03Z,2045
flamingos,"Provides a variety of original and flexible user-friendly
statistical latent variable models for the simultaneous clustering and
segmentation of heterogeneous functional data (i.e time series, or more
generally longitudinal data, fitted by unsupervised algorithms, including
EM algorithms. Functional Latent Data Models for Clustering heterogeneous
curves ('FLaMingos') are originally introduced and written in 'Matlab' by
Faicel Chamroukhi
<https://github.com/fchamroukhi?utf8=?&tab=repositories&q=mix&type=public&language=matlab>.
The references are mainly the following ones.
Chamroukhi F. (2010) <https://chamroukhi.com/FChamroukhi-PhD.pdf>.
Chamroukhi F., Same A., Govaert, G. and Aknin P. (2010) <doi:10.1016/j.neucom.2009.12.023>.
Chamroukhi F., Same A., Aknin P. and Govaert G. (2011). <doi:10.1109/IJCNN.2011.6033590>.
Same A., Chamroukhi F., Govaert G. and Aknin, P. (2011) <doi:10.1007/s11634-011-0096-5>.
Chamroukhi F., and Glotin H. (2012) <doi:10.1109/IJCNN.2012.6252818>.
Chamroukhi F., Glotin H. and Same A. (2013) <doi:10.1016/j.neucom.2012.10.030>.
Chamroukhi F. (2015) <https://chamroukhi.com/FChamroukhi-HDR.pdf>.
Chamroukhi F. and Nguyen H-D. (2019) <doi:10.1002/widm.1298>.",2019-08-06,Faicel Chamroukhi,https://github.com/fchamroukhi/FLaMingos,TRUE,https://github.com/fchamroukhi/flamingos,3486,0,2020-01-22T17:39:26Z,NA
flan,Tools for fluctuations analysis of mutant cells counts.,2020-04-29,Adrien Mazoyer,"https://www.r-project.org, https://github.com/AdriMaz/flan",TRUE,https://github.com/adrimaz/flan,12878,1,2020-05-04T12:27:43Z,12878
flashlight,"Shed light on black box machine learning models by
the help of model performance, variable importance, global surrogate
models, ICE profiles, partial dependence (Friedman J. H. (2001)
<doi:10.1214/aos/1013203451>), accumulated local effects (Apley D. W.
(2016) <arXiv:1612.08468>), further effects plots, scatter plots,
interaction strength, and variable contribution breakdown (approximate
SHAP) for single observations (Gosiewska and Biecek (2019)
<arxiv:1903.11420>). All tools are implemented to work with case
weights and allow for stratified analysis. Furthermore, multiple
flashlights can be combined and analyzed together.",2020-04-14,Michael Mayer,https://github.com/mayer79/flashlight,TRUE,https://github.com/mayer79/flashlight,5885,7,2020-05-09T09:18:14Z,840.7142857142857
flexdashboard,"Format for converting an R Markdown document to a grid oriented
dashboard. The dashboard flexibly adapts the size of it's components to the
containing web page.",2018-06-29,Richard Iannone,http://rmarkdown.rstudio.com/flexdashboard,TRUE,https://github.com/rstudio/flexdashboard,354573,399,2020-05-27T19:08:08Z,888.6541353383459
flexsurv,"Flexible parametric models for time-to-event data,
including the Royston-Parmar spline model, generalized gamma and
generalized F distributions. Any user-defined parametric
distribution can be fitted, given at least an R function defining
the probability density or hazard. There are also tools for
fitting and predicting from fully parametric multi-state models.",2019-03-18,Christopher Jackson,https://github.com/chjackson/flexsurv-dev,TRUE,https://github.com/chjackson/flexsurv-dev,152129,20,2020-05-16T16:07:15Z,7606.45
flexsurvcure,Flexible parametric mixture and non-mixture cure models for time-to-event data.,2020-04-09,Jordan Amdahl,https://github.com/jrdnmdhl/flexsurvcure,TRUE,https://github.com/jrdnmdhl/flexsurvcure,13689,4,2020-04-09T18:50:15Z,3422.25
flextable,"Create pretty tables for 'HTML', 'Microsoft Word' and 'Microsoft PowerPoint' documents.
Functions are provided to let users create tables, modify and format their content.
It extends package 'officer' that does not contain any feature for customized tabular reporting
and can be used within R markdown documents.",2020-05-15,David Gohel,https://davidgohel.github.io/flextable,TRUE,https://github.com/davidgohel/flextable,267279,203,2020-05-25T16:51:06Z,1316.6453201970444
flightplanning,"Utility functions for creating flight plans for unmanned aerial vehicles (UAV), specially for the Litchi Hub platform. It calculates the flight and camera settings based on the camera specifications, exporting the flight plan CSV format ready to import into Litchi Hub.",2020-03-13,Caio Hamamura,https://github.com/caiohamamura/flightplanning-R.git,TRUE,https://github.com/caiohamamura/flightplanning-r,4219,0,2020-04-29T23:36:56Z,NA
FLightR,"Spatio-temporal locations of an animal are computed
from annotated data with a hidden Markov model via particle
filter algorithm. The package is relatively robust to varying
degrees of shading.
The hidden Markov model is described in Movement Ecology (Rakhimberdiev et al., 2015) <doi:10.1186/s40462-015-0062-5>,
general package description is in the Methods in Ecology and Evolution (Rakhimberdiev et al., 2017) <doi:10.1111/2041-210X.12765>
and package accuracy assessed in the Journal of Avian Biology (Rakhimberdiev et al. 2016) <doi:10.1111/jav.00891>.",2020-05-15,Eldar Rakhimberdiev,https://CRAN.R-project.org/package=FLightR,TRUE,https://github.com/eldarrak/flightr,15732,14,2020-05-15T07:55:58Z,1123.7142857142858
float,"R comes with a suite of utilities for linear algebra with ""numeric""
(double precision) vectors/matrices. However, sometimes single precision (or
less!) is more than enough for a particular task. This package extends R's
linear algebra facilities to include 32-bit float (single precision) data.
Float vectors/matrices have half the precision of their ""numeric""-type
counterparts but are generally faster to numerically operate on, for a
performance vs accuracy trade-off. The internal representation is an S4
class, which allows us to keep the syntax identical to that of base R's.
Interaction between floats and base types for binary operators is generally
possible; in these cases, type promotion always defaults to the higher
precision. The package ships with copies of the single precision 'BLAS' and
'LAPACK', which are automatically built in the event they are not available
on the system.",2020-04-22,Drew Schmidt,https://github.com/wrathematics/float,TRUE,https://github.com/wrathematics/float,54243,35,2020-06-06T17:42:58Z,1549.8
flobr,"Converts files to and from flobs.
A flob is a file that was
read into binary in integer-mode as little endian,
saved as the single element of a named list (where the name is the name
of the original file) and then serialized before being coerced into a blob.
Flobs are useful for writing and reading files to and from databases.",2020-05-15,Joe Thorley,https://github.com/poissonconsulting/flobr,TRUE,https://github.com/poissonconsulting/flobr,7170,6,2020-05-15T17:15:37Z,1195
flora,"Tools to quickly compile taxonomic and distribution data from
the Brazilian Flora 2020.",2020-04-28,Gustavo Carvalho,http://www.github.com/gustavobio/flora,TRUE,https://github.com/gustavobio/flora,20271,14,2020-05-07T13:07:47Z,1447.9285714285713
fma,"All data sets from ""Forecasting: methods and applications"" by Makridakis, Wheelwright & Hyndman (Wiley, 3rd ed., 1998) <https://robjhyndman.com/forecasting/>.",2020-01-14,Rob Hyndman,"https://pkg.robjhyndman.com/fma/,
https://github.com/robjhyndman/fma",TRUE,https://github.com/robjhyndman/fma,1334997,5,2020-03-12T21:18:02Z,266999.4
fmbasics,"Implements basic financial market objects like currencies, currency
pairs, interest rates and interest rate indices. You will be able to use
Benchmark instances of these objects which have been defined using their most
common conventions or those defined by International Swap Dealer Association
(ISDA, <https://www.isda.org>) legal documentation. ",2018-01-06,Imanuel Costigan,"https://github.com/imanuelcostigan/fmbasics,
https://imanuelcostigan.github.io/fmbasics/",TRUE,https://github.com/imanuelcostigan/fmbasics,12377,7,2019-12-03T04:37:24Z,1768.142857142857
fmcmc,"Provides a friendly (flexible) Markov Chain Monte Carlo (MCMC)
framework for implementing Metropolis-Hastings algorithm in a modular way
allowing users to specify automatic convergence checker, personalized
transition kernels, and out-of-the-box multiple MCMC chains using
parallel computing. Most of the methods implemented in this package can
be found in Brooks et al. (2011, ISBN 9781420079425). Among the methods
included, we have: Haario (2001) <doi:10.1007/s11222-011-9269-5>
Adaptive Metropolis, Vihola (2012) <doi:10.1007/s11222-011-9269-5>
Robust Adaptive Metropolis, and Thawornwattana et
al. (2018) <doi:10.1214/17-BA1084> Mirror transition kernels.",2020-04-23,George Vega Yon,https://github.com/USCbiostats/fmcmc,TRUE,https://github.com/uscbiostats/fmcmc,4049,9,2020-04-10T18:40:47Z,449.8888888888889
fmdates,"Implements common date calculations relevant for specifying
the economic nature of financial market contracts that are typically defined
by International Swap Dealer Association (ISDA, <http://www2.isda.org>) legal
documentation. This includes methods to check whether dates are business
days in certain locales, functions to adjust and shift dates and time length
(or day counter) calculations.",2018-01-04,Imanuel Costigan,"https://github.com/imanuelcostigan/fmdates,
https://imanuelcostigan.github.io/fmdates/",TRUE,https://github.com/imanuelcostigan/fmdates,18542,6,2020-03-22T04:38:43Z,3090.3333333333335
FMradio,"Functions that support stable prediction and classification with radiomics data through factor-analytic modeling. For details, see Peeters et al. (2019) <arXiv:1903.11696>.",2019-12-16,Carel F.W. Peeters,https://github.com/CFWP/FMradio,TRUE,https://github.com/cfwp/fmradio,4791,3,2019-12-17T09:28:34Z,1597
fmriqa,"Methods for performing fMRI quality assurance (QA) measurements of
test objects. Heavily based on the fBIRN procedures detailed by Friedman and
Glover (2006) <doi:10.1002/jmri.20583>.",2018-02-19,Martin Wilson,NA,TRUE,https://github.com/martin3141/fmriqa,9779,0,2019-10-23T12:36:28Z,NA
foghorn,"The CRAN check results and where your package stands in the
CRAN submission queue in your R terminal.",2020-05-05,Francois Michonneau,https://github.com/fmichonneau/foghorn,TRUE,https://github.com/fmichonneau/foghorn,362516,47,2020-05-05T09:14:46Z,7713.106382978724
foieGras,"Fits continuous-time random walk and correlated random walk state-space models to filter animal tracking data ('Argos', processed light-level 'geolocation', 'GPS'). Template Model Builder ('TMB') is used for fast estimation. The 'Argos' data can be: (older) least squares-based locations; (newer) Kalman filter-based locations with error ellipse information; or a mixture of both. The models estimate two sets of location states corresponding to: 1) each observation, which are (usually) irregularly timed; and 2) user-specified time intervals (regular or irregular). Latent variable models are provided to estimate move persistence along tracks as an index of behaviour. 'Jonsen I', 'McMahon CR', 'Patterson TA', 'Auger-Methe M', 'Harcourt R', 'Hindell MA', 'Bestley S' (2019) Movement responses to environment: fast inference of variation among southern elephant seals with a mixed effects model. Ecology 100:e02566 <doi:10.1002/ecy.2566>.",2019-10-07,Ian Jonsen,https://cran.r-project.org/package=foieGras,TRUE,https://github.com/ianjonsen/foiegras,6164,5,2020-06-08T11:00:01Z,1232.8
folderfun,"If you find yourself working on multiple different projects in R, you'll want a
series of folders pointing to raw data, processed data, plot results, intermediate table
outputs, etc. This package makes it easier to do that by providing a quick and easy way
to create and use functions for project-level directories.",2019-06-12,Nathan C. Sheffield,http://code.databio.org/folderfun,TRUE,https://github.com/databio/folderfun,6115,6,2019-06-12T11:38:19Z,1019.1666666666666
foodingraph,"Displays a weighted undirected food graph from an adjacency matrix.
Can perform confidence-interval bootstrap inference with mutual information
or maximal information coefficient.
Based on my Master 1 internship at the Bordeaux Population Health center.
References : Reshef et al. (2011) <doi:10.1126/science.1205438>,
Meyer et al. (2008) <doi:10.1186/1471-2105-9-461>,
Liu et al. (2016) <doi:10.1371/journal.pone.0158247>.",2019-10-06,Victor Gasque,https://github.com/vgasque/foodingraph/,TRUE,https://github.com/vgasque/foodingraph,3166,4,2019-10-01T16:12:05Z,791.5
forcats,"Helpers for reordering factor levels (including
moving specified levels to front, ordering by first appearance,
reversing, and randomly shuffling), and tools for modifying factor
levels (including collapsing rare levels into other, 'anonymising',
and manually 'recoding').",2020-03-01,Hadley Wickham,"http://forcats.tidyverse.org, https://github.com/tidyverse/forcats",TRUE,https://github.com/tidyverse/forcats,8233222,366,2020-05-28T13:10:03Z,22495.142076502732
foreach,"Support for the foreach looping construct. Foreach is an
idiom that allows for iterating over elements in a collection,
without the use of an explicit loop counter. This package in
particular is intended to be used for its return value, rather
than for its side effects. In that sense, it is similar to the
standard lapply function, but doesn't require the evaluation
of a function. Using foreach without side effects also
facilitates executing the loop in parallel.",2020-03-30,Hong Ooi,https://github.com/RevolutionAnalytics/foreach,TRUE,https://github.com/revolutionanalytics/foreach,7031329,18,2020-05-07T10:48:24Z,390629.3888888889
forecast,"Methods and tools for displaying and analysing
univariate time series forecasts including exponential smoothing
via state space models and automatic ARIMA modelling.",2020-03-31,Rob Hyndman,"http://pkg.robjhyndman.com/forecast,
https://github.com/robjhyndman/forecast",TRUE,https://github.com/robjhyndman/forecast,6427355,823,2020-05-26T04:44:45Z,7809.665856622114
forecastHybrid,"Convenient functions for ensemble forecasts in R combining
approaches from the 'forecast' package. Forecasts generated from auto.arima(), ets(),
thetaf(), nnetar(), stlm(), tbats(), and snaive() can be combined with equal weights, weights
based on in-sample errors (introduced by Bates & Granger (1969) <doi:10.1057/jors.1969.103>),
or cross-validated weights. Cross validation for time series data with user-supplied models
and forecasting functions is also supported to evaluate model accuracy.",2020-04-02,David Shaub,"https://gitlab.com/dashaub/forecastHybrid,
https://github.com/ellisp/forecastHybrid",TRUE,https://github.com/ellisp/forecasthybrid,94217,65,2020-04-01T16:41:20Z,1449.4923076923078
forecastML,"The purpose of 'forecastML' is to simplify the process of multi-step-ahead forecasting with standard machine learning algorithms. 'forecastML' supports lagged, dynamic, static, and grouping features for modeling single and grouped numeric or factor/sequence time series. In addition, simple wrapper functions are used to support model-building with most R packages. This approach to forecasting is inspired by Bergmeir, Hyndman, and Koo's (2018) paper ""A note on the validity of cross-validation for evaluating autoregressive time series prediction"" <doi:10.1016/j.csda.2017.11.003>.",2020-05-07,Nickalus Redell,https://github.com/nredell/forecastML/,TRUE,https://github.com/nredell/forecastml,6311,73,2020-06-06T21:52:56Z,86.45205479452055
foreSIGHT,"A tool to create hydroclimate scenarios, stress test systems and visualize system performance in scenario-neutral climate change impact assessments. Scenario-neutral approaches 'stress-test' the performance of a modelled system by applying a wide range of plausible hydroclimate conditions (see Brown & Wilby (2012) <doi:10.1029/2012EO410001> and Prudhomme et al. (2010) <doi:10.1016/j.jhydrol.2010.06.043>). These approaches allow the identification of hydroclimatic variables that affect the vulnerability of a system to hydroclimate variation and change. This tool enables the generation of perturbed time series using a range of approaches including simple scaling of observed time series (e.g. Culley et al. (2016) <doi:10.1002/2015WR018253>) and stochastic simulation of perturbed time series via an inverse approach (see Guo et al. (2018) <doi:10.1016/j.jhydrol.2016.03.025>). It incorporates a number of stochastic weather models to generate hydroclimate variables on a daily basis (e.g. precipitation, temperature, potential evapotranspiration) and allows a variety of different hydroclimate variable properties, herein called attributes, to be perturbed. Options are included for the easy integration of existing system models both internally in R and externally for seamless 'stress-testing'. A suite of visualization options for the results of a scenario-neutral analysis (e.g. plotting performance spaces and overlaying climate projection information) are also included. As further developments in scenario-neutral approaches occur the tool will be updated to incorporate these advances.",2019-12-04,Bree Bennett,NA,TRUE,https://github.com/bsbennett/foresight,9253,0,2019-12-05T03:41:52Z,NA
forestControl,"Approximate false positive rate control in selection frequency for
random forest using the methods described by Ender Konukoglu and Melanie Ganz (2014) <arXiv:1410.2838>.
Methods for calculating the selection frequency threshold at false positive rates
and selection frequency false positive rate feature selection.",2019-11-18,Tom Wilson,https://github.com/aberHRML/forestControl,TRUE,https://github.com/aberhrml/forestcontrol,10625,2,2019-11-18T09:51:17Z,5312.5
forestplot,"A forest plot that allows for
multiple confidence intervals per row,
custom fonts for each text element,
custom confidence intervals,
text mixed with expressions, and more.
The aim is to extend the use of forest plots beyond meta-analyses.
This is a more general version of the original 'rmeta' package's forestplot()
function and relies heavily on the 'grid' package.",2019-06-24,Max Gordon,http://gforge.se/packages/,TRUE,https://github.com/gforge/forestplot,136102,20,2020-03-07T08:56:43Z,6805.1
forestr,"Provides a toolkit for calculating forest and canopy structural complexity metrics from
terrestrial LiDAR (light detection and ranging). References: Atkins et al. 2018 <doi:10.1111/2041-210X.13061>; Hardiman et al. 2013 <doi:10.3390/f4030537>;
Parker et al. 2004 <doi:10.1111/j.0021-8901.2004.00925.x>.",2020-04-14,Jeff Atkins,https://github.com/atkinsjeff/forestr,TRUE,https://github.com/atkinsjeff/forestr,7988,13,2020-06-05T01:38:13Z,614.4615384615385
ForestTools,"Provides tools for analyzing remotely sensed forest data, including functions for detecting treetops from canopy models (Popescu & Wynne, 2004), outlining tree crowns (Meyer & Beucher, 1990) and generating spatial statistics.",2020-05-06,Andrew Plowright,https://github.com/andrew-plowright/ForestTools,TRUE,https://github.com/andrew-plowright/foresttools,17164,15,2020-05-06T22:29:55Z,1144.2666666666667
formatR,"Provides a function tidy_source() to format R source code. Spaces
and indent will be added to the code automatically, and comments will be
preserved under certain conditions, so that R code will be more
human-readable and tidy. There is also a Shiny app as a user interface in
this package (see tidy_app()).",2019-06-11,Yihui Xie,https://github.com/yihui/formatR,TRUE,https://github.com/yihui/formatr,4172836,174,2019-11-18T05:14:50Z,23981.816091954024
formattable,"Provides functions to create formattable vectors and data frames.
'Formattable' vectors are printed with text formatting, and formattable
data frames are printed with multiple types of formatting in HTML
to improve the readability of data presented in tabular form rendered in
web pages.",2016-08-05,Kun Ren,"https://renkun.me/formattable,
https://github.com/renkun-ken/formattable",TRUE,https://github.com/renkun-ken/formattable,415201,547,2020-04-25T12:18:12Z,759.0511882998172
formulaic,"Many statistical models and analyses in R are implemented through formula objects. The formulaic package creates a unified approach for programmatically and dynamically generating formula objects. Users may specify the outcome and inputs of a model directly, search for variables to include based upon naming patterns, incorporate interactions, and identify variables to exclude. A wide range of quality checks are implemented to identify issues such as misspecified variables, duplication, a lack of contrast in the inputs, and a large number of levels in categorical data. Variables that do not meet these quality checks can be automatically excluded from the model. These issues are documented and reported in a manner that provides greater accountability and useful information to guide an investigation of the data.",2020-05-04,David Shilane,https://dachosen1.github.io/formulaic/index.html,TRUE,https://github.com/dachosen1/formulaic,5092,3,2020-05-07T19:51:27Z,1697.3333333333333
formulops,"Perform mathematical operations on R formula (add, subtract, multiply, etc.) and substitute parts of formula.",2020-02-22,Bill Denney,https://github.com/billdenney/formulops,TRUE,https://github.com/billdenney/formulops,1377,0,2020-02-15T19:52:17Z,NA
forrel,"Forensic applications of pedigree analysis, including likelihood ratios
for relationship testing, general relatedness inference, marker simulation, and
power analysis. General computation of exclusion powers is based on Egeland et
al. (2014) <doi:10.1016/j.fsigen.2013.05.001>. Several functions deal
specifically with family reunion cases, implementing and developing ideas from
Kling et al. (2017) <doi:10.1016/j.fsigen.2017.08.006>. A novelty of 'forrel'
is the ability to model background inbreeding in forensic pedigree computations.
This can have significant impact in applications, as exemplified in Vigeland
and Egeland (2019) <doi:10.1016/j.fsigss.2019.10.175>. 'forrel' is part of the
ped suite, a collection of packages for pedigree analysis. In particular,
'forrel' imports 'pedtools' for creating and manipulating pedigrees and markers,
'pedprobr' for likelihood computations, and 'pedmut' for mutation modelling.
Pedigree data may be created from scratch, or loaded from text files. Data
import from the 'Familias' software (Egeland et al. (2000)
<doi:10.1016/S0379-0738(00)00147-X>) is supported. ",2020-03-22,Magnus Dehli Vigeland,https://github.com/magnusdv/forrel,TRUE,https://github.com/magnusdv/forrel,2213,5,2020-06-05T11:47:55Z,442.6
forwards,"Anonymized data from surveys conducted by Forwards <https://forwards.github.io/>, the R Foundation task force on women and other under-represented groups. Currently, a single data set of responses to a survey of attendees at useR! 2016 <https://www.r-project.org/useR-2016/>, the R user conference held at Stanford University, Stanford, California, USA, June 27 - June 30 2016.",2019-07-30,Heather Turner,https://github.com/forwards/forwards,TRUE,https://github.com/forwards/forwards,11127,1,2019-07-30T20:42:17Z,11127
foto,"The Fourier Transform Textural Ordination method
uses a principal component analysis on radially averaged
two dimensional Fourier spectra to characterize image texture.",2019-01-17,Koen Hufkens,https://github.com/khufkens/foto,TRUE,https://github.com/khufkens/foto,5406,3,2019-07-16T10:22:31Z,1802
fourPNO,"Estimate Barton & Lord's (1981) <doi:10.1002/j.2333-8504.1981.tb01255.x>
four parameter IRT model with lower and upper asymptotes using Bayesian
formulation described by Culpepper (2016) <doi:10.1007/s11336-015-9477-6>.",2019-09-24,Steven Andrew Culpepper,https://github.com/tmsalab/fourPNO,TRUE,https://github.com/tmsalab/fourpno,19800,1,2019-11-05T22:49:31Z,19800
fpCompare,"Comparisons of floating point numbers are problematic due to errors
associated with the binary representation of decimal numbers.
Despite being aware of these problems, people still use numerical methods
that fail to account for these and other rounding errors (this pitfall is
the first to be highlighted in Circle 1 of Burns (2012)
'The R Inferno' <http://www.burns-stat.com/pages/Tutor/R_inferno.pdf>).
This package provides new relational operators useful for performing
floating point number comparisons with a set tolerance.",2019-09-10,Alex M Chubaty,https://github.com/PredictiveEcology/fpCompare,TRUE,https://github.com/predictiveecology/fpcompare,64644,4,2019-09-06T19:24:26Z,16161
fplyr,"Read and process a large delimited file block by
block. A block consists of all the contiguous rows that have the same value
in the first field. The result can be returned as a list or a data.table,
or even directly printed to an output file.",2020-05-06,Federico Marotta,https://github.com/fmarotta/fplyr,TRUE,https://github.com/fmarotta/fplyr,1581,0,2020-05-06T20:15:20Z,NA
fpp3,"
All data sets required for the examples and exercises in the book
""Forecasting: principles and practice"" by Rob J Hyndman and George Athanasopoulos
<http://OTexts.org/fpp3/>. All packages required to run the examples are also
loaded.",2020-06-07,Rob Hyndman,"https://github.com/robjhyndman/fpp3-package,
https://OTexts.org/fpp3/",TRUE,https://github.com/robjhyndman/fpp3-package,12704,30,2020-06-07T05:06:49Z,423.46666666666664
fracdiff,"Maximum likelihood estimation of the parameters of a fractionally
differenced ARIMA(p,d,q) model (Haslett and Raftery, Appl.Statistics, 1989);
including inference and basic methods. Some alternative algorithms to estimate ""H"".",2020-01-24,Martin Maechler,https://github.com/mmaechler/fracdiff,TRUE,https://github.com/mmaechler/fracdiff,3239007,5,2020-01-20T17:01:51Z,647801.4
FractalParameterEstimation,"The parameters p and q are estimated with the aid of a randomized Sierpinski Carpet which is built on a [p-p-p-q]-model. Thereby, for three times a simulation with a p-value and once with a q-value is assumed. Hence, these parameters are estimated and displayed. Moreover, functions for simulating random Sierpinski-Carpets with constant and variable probabilities are included. For more details on the method please see Hermann et al. (2015) <doi:10.1002/sim.6497>. ",2019-07-10,Philipp Hermann,NA,TRUE,https://github.com/phhermann/fractalparameterestimation,19845,0,2019-07-10T12:50:04Z,NA
frailtyEM,"Contains functions for fitting shared frailty models with a semi-parametric
baseline hazard with the Expectation-Maximization algorithm. Supported data formats
include clustered failures with left truncation and recurrent events in gap-time
or Andersen-Gill format. Several frailty distributions, such as the the gamma, positive stable
and the Power Variance Family are supported. ",2019-09-22,Theodor Adrian Balan,https://github.com/tbalan/frailtyEM,TRUE,https://github.com/tbalan/frailtyem,21304,1,2019-09-19T20:04:47Z,21304
freealg,The free algebra in R; multivariate polynomials with non-commuting indeterminates.,2019-09-23,Robin K. S. Hankin,https://github.com/RobinHankin/freealg.git,TRUE,https://github.com/robinhankin/freealg,3189,1,2020-03-14T22:16:53Z,3189
freegroup,"Provides functionality for manipulating elements of the free group (juxtaposition is represented by a plus) including inversion, multiplication by a scalar, group-theoretic power operation, and Tietze forms. The package is fully vectorized.",2018-09-25,Robin K. S. Hankin,https://github.com/RobinHankin/freegroup.git,TRUE,https://github.com/robinhankin/freegroup,9187,0,2020-05-01T22:46:49Z,NA
freesurfer,"Wrapper functions that interface with 'Freesurfer'
<https://surfer.nmr.mgh.harvard.edu/>, a powerful and
commonly-used 'neuroimaging'
software, using system commands. The goal is to be able to interface with
'Freesurfer' completely in R, where you pass R objects of class 'nifti',
implemented by package 'oro.nifti', and the function executes an 'Freesurfer'
command and returns an R object of class 'nifti' or necessary output.",2020-03-30,John Muschelli,NA,TRUE,https://github.com/muschellij2/freesurfer,13431,5,2020-04-29T23:35:53Z,2686.2
freesurferformats,"Provides functions to read and write data from neuroimaging files in 'FreeSurfer' <http://freesurfer.net/> binary formats. This includes, but is not limited to, the following file formats: 1) MGH/MGZ format files, which can contain multi-dimensional images or other data. Typically they contain time-series of three-dimensional brain scans acquired by magnetic resonance imaging (MRI). They can also contain vertex-wise measures of surface morphometry data. The MGH format is named after the Massachusetts General Hospital, and the MGZ format is a compressed version of the same format. 2) 'FreeSurfer' morphometry data files in binary 'curv' format. These contain vertex-wise surface measures, i.e., one scalar value for each vertex of a brain surface mesh. These are typically values like the cortical thickness or brain surface area at each vertex. 3) Annotation file format. This contains a brain surface parcellation derived from a cortical atlas. 4) Surface file format. Contains a brain surface mesh, given by a list of vertices and a list of faces.",2020-05-13,Tim Schäfer,https://github.com/dfsp-spirit/freesurferformats,TRUE,https://github.com/dfsp-spirit/freesurferformats,8488,6,2020-06-09T07:51:52Z,1414.6666666666667
frequency,"Generate 'SPSS'/'SAS' styled frequency tables. Frequency tables are
generated with variable and value label attributes where applicable with optional
html output to quickly examine datasets.",2020-04-05,Alistair Wilcox,https://github.com/wilcoxa/frequency,TRUE,https://github.com/wilcoxa/frequency,13974,2,2020-04-05T09:26:16Z,6987
frequencyConnectedness,"Accompanies a paper (Barunik, Krehlik (2018) <doi:10.1093/jjfinec/nby001>) dedicated to spectral decomposition of connectedness measures and their interpretation. We implement all the developed estimators as well as the historical counterparts. For more information, see the help or GitHub page (<https://github.com/tomaskrehlik/frequencyConnectedness>) for relevant information.",2020-02-16,Tomas Krehlik,https://github.com/tomaskrehlik/frequencyConnectedness,TRUE,https://github.com/tomaskrehlik/frequencyconnectedness,14161,24,2020-02-04T21:58:32Z,590.0416666666666
fresh,"Customize 'Bootstrap' and 'Bootswatch' themes, like colors, fonts, grid layout,
to use in 'Shiny' applications, 'rmarkdown' documents and 'flexdashboard'.",2020-05-29,Victor Perrier,https://github.com/dreamRs/fresh,TRUE,https://github.com/dreamrs/fresh,3773,130,2020-05-29T14:20:54Z,29.023076923076925
FRK,"Fixed Rank Kriging is a tool for spatial/spatio-temporal modelling
and prediction with large datasets. The approach, discussed in Cressie and
Johannesson (2008) <DOI:10.1111/j.1467-9868.2007.00633.x>, decomposes the field,
and hence the covariance function, using a fixed set of n basis functions,
where n is typically much smaller than the number of data points (or polygons) m.
The method naturally allows for non-stationary, anisotropic covariance functions
and the use of observations with varying support (with known error variance). The
projected field is a key building block of the Spatial Random Effects (SRE) model,
on which this package is based. The package FRK provides helper functions to model,
fit, and predict using an SRE with relative ease.",2020-04-01,Andrew Zammit-Mangion,NA,TRUE,https://github.com/andrewzm/frk,27033,24,2020-05-22T05:20:07Z,1126.375
frost,"A compilation of empirical methods used by farmers and agronomic engineers to predict the minimum temperature to detect a frost night. These functions use variables such as environmental temperature, relative humidity, and dew point. See <http://sedici.unlp.edu.ar/handle/10915/72102> <http://www.fao.org/docrep/008/y7223e/y7223e0b.htm#bm11.8> for details.",2019-04-12,Ana Laura Diedrichs,https://github.com/anadiedrichs/frost,TRUE,https://github.com/anadiedrichs/frost,4911,2,2019-10-15T20:10:12Z,2455.5
fs,"A cross-platform interface to file system operations, built on
top of the 'libuv' C library.",2020-04-04,Jim Hester,"http://fs.r-lib.org, https://github.com/r-lib/fs",TRUE,https://github.com/r-lib/fs,10063013,247,2020-04-28T12:59:42Z,40740.94331983806
FSA,"A variety of simple fish stock assessment methods.
Detailed vignettes are available on the fishR website <http://derekogle.com/fishR/>.",2020-03-09,Derek Ogle,https://github.com/droglenc/FSA,TRUE,https://github.com/droglenc/fsa,225619,29,2020-06-01T00:54:07Z,7779.9655172413795
FSAdata,The datasets to support the Fish Stock Assessment ('FSA') package.,2019-05-18,Derek Ogle,"http://derekogle.com/fishR/, https://github.com/droglenc/FSAdata",TRUE,https://github.com/droglenc/fsadata,39700,4,2020-03-17T22:55:17Z,9925
fsbrain,"Provides high-level access to 'FreeSurfer' <http://freesurfer.net/> neuroimaging data on the level of subjects and groups. Load morphometry data, surfaces and brain parcellations based on atlases. Mask data using labels, load data for specific atlas regions only, and visualize data and results directly in 'R'.",2020-05-27,Tim Schäfer,https://github.com/dfsp-spirit/fsbrain,TRUE,https://github.com/dfsp-spirit/fsbrain,2723,6,2020-06-04T07:05:54Z,453.8333333333333
FSelectorRcpp,"'Rcpp' (free of 'Java'/'Weka') implementation of 'FSelector' entropy-based feature selection
algorithms based on an MDL discretization (Fayyad U. M., Irani K. B.: Multi-Interval Discretization of Continuous-Valued Attributes for Classification Learning.
In 13'th International Joint Conference on Uncertainly in Artificial Intelligence (IJCAI93), pages 1022-1029, Chambery, France, 1993.) <https://www.ijcai.org/Proceedings/93-2/Papers/022.pdf>
with a sparse matrix support.",2020-01-24,Zygmunt Zawadzki,http://mi2-warsaw.github.io/FSelectorRcpp/,TRUE,https://github.com/mi2-warsaw/fselectorrcpp,43607,28,2020-02-25T17:37:53Z,1557.392857142857
fslr,"Wrapper functions that interface with 'FSL'
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/>, a powerful and commonly-used 'neuroimaging'
software, using system commands. The goal is to be able to interface with 'FSL'
completely in R, where you pass R objects of class 'nifti', implemented by
package 'oro.nifti', and the function executes an 'FSL' command and returns an R
object of class 'nifti' if desired.",2019-08-05,John Muschelli,NA,TRUE,https://github.com/muschellij2/fslr,30912,30,2020-03-28T15:10:19Z,1030.4
fssemR,"An optimizer of Fused-Sparse Structural Equation Models, which is
the state of the art jointly fused sparse maximum likelihood function
for structural equation models proposed by Xin Zhou and Xiaodong Cai (2018
<doi:10.1101/466623>).",2019-12-04,Xin Zhou,https://github.com/Ivis4ml/fssemR,TRUE,https://github.com/ivis4ml/fssemr,6013,2,2020-01-10T20:27:01Z,3006.5
fst,"Multithreaded serialization of compressed data frames using the
'fst' format. The 'fst' format allows for random access of stored data and
compression with the LZ4 and ZSTD compressors created by Yann Collet. The ZSTD
compression library is owned by Facebook Inc.",2020-04-01,Mark Klik,https://fstpackage.github.io,TRUE,https://github.com/fstpackage/fst,298522,459,2020-06-02T21:47:31Z,650.3747276688454
fstcore,"The 'fstlib' library provides multithreaded serialization of compressed data frames using the
'fst' format. The 'fst' format allows for random access of stored data and compression with the 'LZ4' and 'ZSTD'
compressors.",2020-05-04,Mark Klik,https://fstpackage.github.io/fstcore,TRUE,https://github.com/fstpackage/fst,594,459,2020-06-02T21:47:31Z,1.2941176470588236
ftExtra,"Build display tables easily by extending the
functionality of the 'flextable' package. Features include spanning
headers, grouping rows, parsing markdown texts and so on.",2020-03-20,Atsushi Yasumoto,https://github.com/atusy/ftExtra,TRUE,https://github.com/atusy/ftextra,1392,24,2020-04-05T23:36:35Z,58
fueleconomy,"Fuel economy data from the EPA, 1985-2015,
conveniently packaged for consumption by R users.",2020-03-23,Hadley Wickham,https://github.com/hadley/fueleconomy,TRUE,https://github.com/hadley/fueleconomy,35759,16,2020-03-23T16:04:06Z,2234.9375
fun,"This is a collection of R games and other funny stuff, such as the
classic Mine sweeper and sliding puzzles.",2018-12-05,Yihui Xie,https://github.com/yihui/fun,TRUE,https://github.com/yihui/fun,48780,36,2020-02-06T16:36:45Z,1355
funchir,YACFP (Yet Another Convenience Function Package). get_age() is a fast & accurate tool for measuring fractional years between two dates. abbr_to_colClass() is a much more concise way of feeding many types to a colClass argument in a data reader. stale_package_check() tries to identify any library() calls to unused packages.,2020-04-13,Michael Chirico,https://github.com/MichaelChirico/funchir,TRUE,https://github.com/michaelchirico/funchir,10934,3,2020-04-12T15:45:08Z,3644.6666666666665
funData,"S4 classes for univariate and multivariate functional data with
utility functions. See <doi:10.18637/jss.v093.i05> for a detailed description
of the package functionalities and its interplay with the MFPCA package for
multivariate functional principal component analysis
<https://CRAN.R-project.org/package=MFPCA>. ",2020-04-25,Clara Happ-Kurz,https://github.com/ClaraHapp/funData,TRUE,https://github.com/clarahapp/fundata,20477,5,2020-04-25T11:21:37Z,4095.4
funModeling,"Around 10% of almost any predictive modeling project is spent in predictive modeling, 'funModeling' and the book Data Science Live Book (<https://livebook.datascienceheroes.com/>) are intended to cover remaining 90%: data preparation, profiling, selecting best variables 'dataViz', assessing model performance and other functions.",2019-10-09,Pablo Casas,https://livebook.datascienceheroes.com,TRUE,https://github.com/pablo14/funmodeling,97672,69,2020-04-06T21:00:49Z,1415.536231884058
FunnelPlotR,"An implementation of the Spiegelhalter (2005) <doi:10.1002/sim.1970> Funnel plots for reporting standardised ratios, with overdispersion adjustment.",2020-02-25,Chris Mainey,"https://chrismainey.github.io/FunnelPlotR,
https://github.com/chrismainey/FunnelPlotR",TRUE,https://github.com/chrismainey/funnelplotr,5038,16,2020-05-18T13:56:24Z,314.875
funrar,"Computes functional rarity indices as proposed by Violle et al.
(2017) <doi:10.1016/j.tree.2017.02.002>. Various indices can be computed
using both regional and local information. Functional Rarity combines both
the functional aspect of rarity as well as the extent aspect of rarity.
'funrar' is presented in Grenié et al. (2017) <doi:10.1111/ddi.12629>.",2020-04-20,Matthias Grenié,https://rekyt.github.io/funrar/,TRUE,https://github.com/rekyt/funrar,23052,12,2020-04-20T14:52:53Z,1921
future,"The purpose of this package is to provide a lightweight and
unified Future API for sequential and parallel processing of R
expression via futures. The simplest way to evaluate an expression
in parallel is to use `x %<-% { expression }` with `plan(multiprocess)`.
This package implements sequential, multicore, multisession, and
cluster futures. With these, R expressions can be evaluated on the
local machine, in parallel a set of local machines, or distributed
on a mix of local and remote machines.
Extensions to this package implement additional backends for
processing futures via compute cluster schedulers etc.
Because of its unified API, there is no need to modify any code in order
switch from sequential on the local machine to, say, distributed
processing on a remote compute cluster.
Another strength of this package is that global variables and functions
are automatically identified and exported as needed, making it
straightforward to tweak existing code to make use of futures.",2020-04-18,Henrik Bengtsson,https://github.com/HenrikBengtsson/future,TRUE,https://github.com/henrikbengtsson/future,1089535,648,2020-06-03T15:43:56Z,1681.3811728395062
future.apply,"Implementations of apply(), by(), eapply(), lapply(), Map(), mapply(), replicate(), sapply(), tapply(), and vapply() that can be resolved using any future-supported backend, e.g. parallel on the local machine or distributed on a compute cluster. These future_*apply() functions come with the same pros and cons as the corresponding base-R *apply() functions but with the additional feature of being able to be processed via the future framework.",2020-04-17,Henrik Bengtsson,https://github.com/HenrikBengtsson/future.apply,TRUE,https://github.com/henrikbengtsson/future.apply,329703,139,2020-05-13T01:15:51Z,2371.9640287769785
future.BatchJobs,"Implementation of the Future API on top of the 'BatchJobs' package.
This allows you to process futures, as defined by the 'future' package,
in parallel out of the box, not only on your local machine or ad-hoc
cluster of machines, but also via high-performance compute ('HPC') job
schedulers such as 'LSF', 'OpenLava', 'Slurm', 'SGE', and 'TORQUE' / 'PBS',
e.g. 'y <- future.apply::future_lapply(files, FUN = process)'.
NOTE: The 'BatchJobs' package is deprecated in favor of the 'batchtools'
package. Because of this, it is recommended to use the 'future.batchtools'
package instead of this package.",2019-09-29,Henrik Bengtsson,https://github.com/HenrikBengtsson/future.BatchJobs,TRUE,https://github.com/henrikbengtsson/future.batchjobs,23305,8,2020-03-21T03:03:26Z,2913.125
future.batchtools,"Implementation of the Future API on top of the 'batchtools' package.
This allows you to process futures, as defined by the 'future' package,
in parallel out of the box, not only on your local machine or ad-hoc
cluster of machines, but also via high-performance compute ('HPC') job
schedulers such as 'LSF', 'OpenLava', 'Slurm', 'SGE', and 'TORQUE' / 'PBS',
e.g. 'y <- future.apply::future_lapply(files, FUN = process)'.",2020-04-14,Henrik Bengtsson,https://github.com/HenrikBengtsson/future.batchtools,TRUE,https://github.com/henrikbengtsson/future.batchtools,49975,64,2020-05-06T00:29:17Z,780.859375
future.callr,"Implementation of the Future API on top of the 'callr' package. This allows you to process futures, as defined by the 'future' package, in parallel out of the box, on your local (Linux, macOS, Windows, ...) machine. Contrary to backends relying on the 'parallel' package (e.g. 'future::multisession'), the 'callr' backend provided here can run more than 125 parallel R processes.",2019-09-28,Henrik Bengtsson,https://github.com/HenrikBengtsson/future.callr,TRUE,https://github.com/henrikbengtsson/future.callr,22883,35,2020-03-28T02:58:39Z,653.8
future.tests,"Backends implementing the 'Future' API, as defined by the 'future' package, should use the tests provided by this package to validate that they meet the minimal requirements of the 'Future' API. The tests can be performed easily from within R or from outside of R from the command line making it easy to include them package tests and in Continuous Integration (CI) pipelines.",2020-03-20,Henrik Bengtsson,https://github.com/HenrikBengtsson/future.tests,TRUE,https://github.com/henrikbengtsson/future.tests,1401,9,2020-05-02T22:32:54Z,155.66666666666666
FuzzyAHP,"Calculation of AHP (Analytic Hierarchy Process -
<http://en.wikipedia.org/wiki/Analytic_hierarchy_process>)
with classic and fuzzy weights based on Saaty's pairwise
comparison method for determination of weights.",2019-12-06,Jan Caha,http://github.com/JanCaha/FuzzyAHP/,TRUE,https://github.com/jancaha/fuzzyahp,17281,3,2019-12-10T07:09:58Z,5760.333333333333
fuzzyjoin,"Join tables together based not on whether columns
match exactly, but whether they are similar by some comparison.
Implementations include string distance and regular expression
matching.",2020-05-15,David Robinson,https://github.com/dgrtwo/fuzzyjoin,TRUE,https://github.com/dgrtwo/fuzzyjoin,89734,487,2020-05-16T02:20:43Z,184.258726899384
fxtract,"An R6 implementation for calculating features from grouped data.
The output will be one row for each group.
This functionality is often needed in the feature extraction process of machine learning problems.
Very large datasets are supported, since data is only read into RAM when needed.
Calculation can be done in parallel and the process can be monitored.
Global error handling is supported.
Results are available in one final dataframe.",2020-06-05,Quay Au,https://github.com/QuayAu/fxtract,TRUE,https://github.com/quayau/fxtract,6016,16,2020-06-05T13:49:28Z,376
GADMTools,"Manipulate, assemble, export <https://gadm.org/> maps. Create 'choropleth', 'isopleth', dots plot, proportional dots,
dot-density and more.",2020-03-05,Jean Pierre Decorps,https://github.com/IamKDO/GADMTools,TRUE,https://github.com/iamkdo/gadmtools,22900,1,2020-03-06T17:43:13Z,22900
gambin,"Fits unimodal and multimodal gambin distributions to species-abundance distributions
from ecological data, as in in Matthews et al. (2014) <DOI:10.1111/ecog.00861>.
'gambin' is short for 'gamma-binomial'. The main function is fit_abundances(), which estimates
the 'alpha' parameter(s) of the gambin distribution using maximum likelihood. Functions are
also provided to generate the gambin distribution and for calculating likelihood statistics.",2020-06-02,Thomas Matthews,https://github.com/txm676/gambin/,TRUE,https://github.com/txm676/gambin,26957,4,2020-06-02T11:38:07Z,6739.25
gamCopula,"Implementation of various inference and simulation tools to
apply generalized additive models to bivariate dependence structures and
non-simplified vine copulas.",2020-02-05,Thibault Vatter,https://github.com/tvatter/gamCopula,TRUE,https://github.com/tvatter/gamcopula,13060,4,2019-12-04T20:44:20Z,3265
gameofthrones,Implementation of the characteristic palettes from the TV show 'Game of Thrones'.,2020-02-23,Alejandro Jimenez Rico,https://github.com/aljrico/gameofthrones,TRUE,https://github.com/aljrico/gameofthrones,46822,53,2020-02-23T09:26:17Z,883.433962264151
gamesGA,"Finds adaptive strategies for sequential symmetric
games using a genetic algorithm. Currently, any symmetric two by two matrix
is allowed, and strategies can remember the history of an opponent's play
from the previous three rounds of moves in iterated interactions between
players. The genetic algorithm returns a list of adaptive strategies given
payoffs, and the mean fitness of strategies in each generation.",2020-03-01,A. Bradley Duthie,https://bradduthie.github.io/gamesGA/,TRUE,https://github.com/bradduthie/gamesga,9470,1,2020-03-01T19:37:27Z,9470
gamlr,"The gamma lasso algorithm provides regularization paths corresponding to a range of non-convex cost functions between L0 and L1 norms. As much as possible, usage for this package is analogous to that for the glmnet package (which does the same thing for penalization between L1 and L2 norms). For details see: Taddy (2017 JCGS), 'One-Step Estimator Paths for Concave Regularization', <arXiv:1308.5623>.",2020-06-08,Matt Taddy,http://github.com/TaddyLab/gamlr,TRUE,https://github.com/taddylab/gamlr,50094,14,2020-06-02T22:29:18Z,3578.1428571428573
gap,"It is designed as an integrated package for genetic data
analysis of both population and family data. Currently, it
contains functions for sample size calculations of both
population-based and family-based designs, probability of
familial disease aggregation, kinship calculation, statistics
in linkage analysis, and association analysis involving genetic
markers including haplotype analysis with or without environmental
covariates.",2020-02-02,"Jing Hua Zhao and colleagues with inputs from Kurt Hornik and
Brian Ripley",https://github.com/jinghuazhao/R,TRUE,https://github.com/jinghuazhao/r,131042,5,2020-04-28T11:28:17Z,26208.4
GAparsimony,"Methodology that combines feature selection, model tuning, and parsimonious model selection with Genetic Algorithms (GA) proposed in {Martinez-de-Pison} (2015) <DOI:10.1016/j.asoc.2015.06.012>. To this objective, a novel GA selection procedure is introduced based on separate cost and complexity evaluations.",2019-12-03,F.J. Martinez-de-Pison,https://github.com/jpison/GAparsimony,TRUE,https://github.com/jpison/gaparsimony,10398,3,2020-01-28T12:24:17Z,3466
garchx,"Flexible and robust estimation and inference of generalised autoregressive conditional heteroscedasticity (GARCH) models with covariates based on the results by Francq and Thieu (2018) <doi:10.1017/S0266466617000512>. Coefficients can straightforwardly be set to zero by omission, and quasi maximum likelihood methods ensure estimates are generally consistent and inference valid, even when the standardised innovations are non-normal and/or dependent over time.",2020-05-10,Genaro Sucarrat,"https://CRAN.R-project.org/package=garchx,
http://www.sucarrat.net/",TRUE,https://github.com/gsucarrat/garchx,1465,0,2020-05-10T14:08:02Z,NA
gargle,"Provides utilities for working with Google APIs
<https://developers.google.com/apis-explorer>. This includes
functions and classes for handling common credential types and for
preparing, executing, and processing HTTP requests.",2020-05-06,Jennifer Bryan,"https://gargle.r-lib.org, https://github.com/r-lib/gargle",TRUE,https://github.com/r-lib/gargle,1981912,76,2020-05-28T15:28:28Z,26077.78947368421
GAS,"Simulate, estimate and forecast using univariate and multivariate GAS models
as described in Ardia et al. (2019) <doi:10.18637/jss.v088.i06>.",2020-04-27,Leopoldo Catania,https://github.com/LeopoldoCatania/GAS,TRUE,https://github.com/leopoldocatania/gas,40027,18,2020-04-26T18:30:24Z,2223.722222222222
gaselect,"Provides a genetic algorithm for finding variable
subsets in high dimensional data with high prediction performance. The
genetic algorithm can use ordinary least squares (OLS) regression models or
partial least squares (PLS) regression models to evaluate the prediction
power of variable subsets. By supporting different cross-validation
schemes, the user can fine-tune the tradeoff between speed and quality of
the solution.",2020-02-06,David Kepplinger,https://github.com/dakep/gaselect,TRUE,https://github.com/dakep/gaselect,16500,1,2020-02-07T15:48:54Z,16500
gastempt,"Fits gastric emptying time series from MRI or scintigraphic measurements
using nonlinear mixed-model population fits with 'nlme' and Bayesian methods with
Stan; computes derived parameters such as t50 and AUC.",2020-05-01,Dieter Menne,http://github.com/dmenne/gastempt,TRUE,https://github.com/dmenne/gastempt,9327,2,2020-05-01T09:38:47Z,4663.5
gaussfacts,"Display a random fact about Carl Friedrich Gauss
based the on collection curated by Mike Cavers via the
<http://gaussfacts.com> site.",2016-08-03,Dirk Eddelbuettel,NA,TRUE,https://github.com/eddelbuettel/gaussfacts,13696,2,2020-05-03T23:12:04Z,6848
gbfs,"Supplies a set of functions to interface with bikeshare data
following the General Bikeshare Feed Specification, allowing users to query
and accumulate tidy datasets for specified cities/bikeshare programs.",2020-06-07,Simon P. Couch,https://github.com/simonpcouch/gbfs,TRUE,https://github.com/simonpcouch/gbfs,10253,13,2020-06-07T19:34:37Z,788.6923076923077
GCalignR,"Aligns peak based on peak retention times and matches homologous peaks
across samples. The underlying alignment procedure comprises three sequential steps.
(1) Full alignment of samples by linear transformation of retention times to
maximise similarity among homologous peaks (2) Partial alignment of peaks within
a user-defined retention time window to cluster homologous peaks (3) Merging rows
that are likely representing homologous substances (i.e. no sample shows peaks in
both rows and the rows have similar retention time means). The algorithm is described in detail
in Ottensmann et al., 2018 <doi:10.1371/journal.pone.0198311>. ",2018-07-16,Meinolf Ottensmann,https://github.com/mottensmann/GCalignR,TRUE,https://github.com/mottensmann/gcalignr,13455,1,2019-12-18T14:34:59Z,13455
gclm,"Estimation of covariance matrices as solutions of
continuous time Lyapunov equations.
Sparse coefficient matrix and diagonal noise are estimated
with a proximal gradient
method for an l1-penalized loss minimization problem.
Varando G, Hansen NR (2020) <arXiv:2005.10483>.",2020-06-04,Gherardo Varando,https://github.com/gherardovarando/gclm,TRUE,https://github.com/gherardovarando/gclm,0,0,2020-05-25T06:55:06Z,NA
gdalcubes,"Processing collections of Earth observation images as on-demand multispectral, multitemporal raster data cubes. Users
define cubes by spatiotemporal extent, resolution, and spatial reference system and let 'gdalcubes' automatically apply cropping, reprojection, and
resampling using the 'Geospatial Data Abstraction Library' ('GDAL'). Implemented functions on data cubes include reduction over space and time,
applying arithmetic expressions on pixel band values, moving window aggregates over time, filtering by space, time, bands, and predicates on pixel values,
exporting data cubes as 'netCDF' or 'GeoTIFF' files, and plotting. The package implements lazy evaluation and
multithreading. All computational parts are implemented in C++, linking to the 'GDAL', 'netCDF', 'CURL', and 'SQLite' libraries.
See Appel and Pebesma (2019) <doi:10.3390/data4030092> for further details.",2020-05-17,Marius Appel,https://github.com/appelmar/gdalcubes_R,TRUE,https://github.com/appelmar/gdalcubes_r,6821,52,2020-05-18T11:39:25Z,131.17307692307693
gdalUtilities,"R's 'sf' package ships with self-contained 'GDAL'
executables, including a bare bones interface to several of the
'GDAL'-related utility programs collectively known as the 'GDAL'
utilities. For each of those utilities, this package provides an R
wrapper whose formal arguments closely mirror those of the 'GDAL'
command line interface. All of the utilities operate on data
stored in files and typically write their output to other
files. As a result, processing data stored in any of R's more
common spatial formats (i.e. those supported by the 'sp', 'sf',
and 'raster' packages) will require first writing to disk, then
processing with the package's wrapper functions before reading
back into R.",2020-04-29,Joshua OBrien,https://github.com/JoshOBrien/gdalUtilities/,TRUE,https://github.com/joshobrien/gdalutilities,6127,16,2020-04-29T16:06:50Z,382.9375
gdalUtils,"Wrappers for the Geospatial Data Abstraction Library (GDAL)
Utilities.",2020-02-13,Jonathan Asher Greenberg and Matteo Mattiuzzi,NA,TRUE,https://github.com/gearslaboratory/gdalutils,359922,9,2020-02-13T19:25:43Z,39991.333333333336
GDINA,"A set of psychometric tools for cognitive diagnosis modeling based on the generalized deterministic inputs, noisy and gate (G-DINA) model by de la Torre (2011) <DOI:10.1007/s11336-011-9207-7> and its extensions, including the sequential G-DINA model by Ma and de la Torre (2016) <DOI:10.1111/bmsp.12070> for polytomous responses, and the polytomous G-DINA model by Chen and de la Torre <DOI:10.1177/0146621613479818> for polytomous attributes. Joint attribute distribution can be independent, saturated, higher-order, loglinear smoothed or structured. Q-matrix validation, item and model fit statistics, model comparison at test and item level and differential item functioning can also be conducted. A graphical user interface is also provided. For tutorials, please check Ma and de la Torre (2020) <DOI:10.18637/jss.v093.i14>, Ma and de la Torre (2019) <DOI:10.1111/emip.12262>, Ma (2019) <DOI:10.1007/978-3-030-05584-4_29> and de la Torre and Akbay (2019) <DOI:10.14689/ejer.2019.80.9>. ",2020-05-24,Wenchao Ma,"https://github.com/Wenchao-Ma/GDINA,
https://wenchao-ma.github.io/GDINA",TRUE,https://github.com/wenchao-ma/gdina,55094,12,2020-06-04T16:21:47Z,4591.166666666667
gdistance,Calculate distances and routes on geographic grids.,2020-02-29,Jacob van Etten,https://agrobioinfoservices.github.io/gdistance/,TRUE,https://github.com/agrobioinfoservices/gdistance,108502,0,2020-02-28T19:30:15Z,NA
gdtools,Useful tools for writing vector graphics devices.,2020-04-03,David Gohel,NA,TRUE,https://github.com/davidgohel/gdtools,1538739,23,2020-04-03T11:41:58Z,66901.69565217392
GE,"Some tools for developing general equilibrium models and some general equilibrium models. These models can be used for teaching economic theory and are built by the methods of new structural economics (see <https://www.nse.pku.edu.cn/> and LI Wu, 2019, General Equilibrium and Structural Dynamics: Perspectives of New Structural Economics. Beijing: Economic Science Press).",2020-06-02,LI Wu,NA,TRUE,https://github.com/liwur/cge,1638,0,2020-01-31T02:42:46Z,NA
GEEmediate,"Causal mediation analysis for a single exposure/treatment and a
single mediator, both allowed to be either continuous or binary. The package
implements the difference method and provide point and interval estimates as
well as testing for the natural direct and indirect effects and the mediation
proportion. Nevo, Xiao and Spiegelman (2019) <doi:10.1515/ijb-2017-0006>.",2019-07-18,Daniel Nevo,NA,TRUE,https://github.com/daniel258/geemediate,12234,0,2019-07-16T11:29:08Z,NA
geex,"Provides a general, flexible framework for estimating parameters
and empirical sandwich variance estimator from a set of unbiased estimating
equations (i.e., M-estimation in the vein of Stefanski & Boos (2002)
<doi:10.1198/000313002753631330>). All examples from Stefanski & Boos (2002)
are published in the corresponding Journal of Statistical Software paper
<doi:10.18637/jss.v092.i02>. Also provides an API to compute finite-sample
variance corrections.",2020-02-17,Bradley Saul,"https://github.com/bsaul/geex, https://bsaul.github.io/geex/",TRUE,https://github.com/bsaul/geex,10297,4,2020-02-17T14:08:37Z,2574.25
geiger,"Methods for fitting macroevolutionary models to phylogenetic trees
Pennell (2014) <doi:10.1093/bioinformatics/btu181>.",2020-06-02,Luke Harmon,https://github.com/mwpennell/geiger-v2,TRUE,https://github.com/mwpennell/geiger-v2,158324,14,2020-06-01T20:38:01Z,11308.857142857143
gemma2,"Fits a multivariate linear mixed effects model that uses a polygenic term, after Zhou & Stephens (2014) (<https://www.nature.com/articles/nmeth.2848>). Of particular interest is the estimation of variance components with restricted maximum likelihood (REML) methods. Genome-wide efficient mixed-model association (GEMMA), as implemented in the package 'gemma2', uses an expectation-maximization algorithm for variance components inference for use in quantitative trait locus studies.",2019-10-01,Frederick Boehm,https://github.com/fboehm/gemma2,TRUE,https://github.com/fboehm/gemma2,3384,0,2019-10-01T19:22:18Z,NA
genalg,"R based genetic algorithm for binary and floating point
chromosomes.",2015-03-16,Egon Willighagen and Michel Ballings,https://github.com/egonw/genalg,TRUE,https://github.com/egonw/genalg,98635,15,2019-07-15T08:15:37Z,6575.666666666667
gender,"Infers state-recorded gender categories from first names and dates of birth using historical
datasets. By using these datasets instead of lists of male and female names,
this package is able to more accurately infer the gender of a name, and it
is able to report the probability that a name was male or female. GUIDELINES:
This method must be used cautiously and responsibly. Please be sure to see the
guidelines and warnings about usage in the 'README' or the package documentation.
See Blevins and Mullen (2015) <http://www.digitalhumanities.org/dhq/vol/9/3/000223/000223.html>.",2020-05-15,Lincoln Mullen,"https://docs.ropensci.org/gender/,
https://github.com/ropensci/gender",TRUE,https://github.com/ropensci/gender,334276,134,2020-05-14T21:51:04Z,2494.597014925373
geneHummus,"A pipeline with high specificity and sensitivity in extracting
proteins from the RefSeq database (National Center for Biotechnology
Information). Manual identification of gene families is highly
time-consuming and laborious, requiring an iterative process of manual and
computational analysis to identify members of a given family. The pipelines
implements an automatic approach for the identification of gene families
based on the conserved domains that specifically define that family. See
Die et al. (2018) <doi:10.1101/436659> for more information and examples.",2019-04-04,Jose V. Die,https://github.com/NCBI-Hackathons/GeneHummus,TRUE,https://github.com/ncbi-hackathons/genehummus,5168,5,2020-02-01T16:34:03Z,1033.6
GeneralizedUmatrix,"Projections are common dimensionality reduction methods, which represent high-dimensional data in a two-dimensional space. However, when restricting the output space to two dimensions, which results in a two dimensional scatter plot (projection) of the data, low dimensional similarities do not represent high dimensional distances coercively [Thrun, 2018]. This could lead to a misleading interpretation of the underlying structures [Thrun, 2018]. By means of the 3D topographic map the generalized Umatrix is able to depict errors of these two-dimensional scatter plots. The package is based on the book of Thrun, M.C.: ""Projection Based Clustering through Self-Organization and Swarm Intelligence"" (2018) <DOI:10.1007/978-3-658-20540-9>.",2020-03-23,Michael Thrun,http://www.deepbionics.org,TRUE,https://github.com/mthrun/generalizedumatrix,16574,0,2020-06-02T12:21:23Z,NA
genero,"Estimate gender from names in Spanish and Portuguese.
Works with vectors and dataframes. The estimation works not only
for first names but also full names. The package relies on a
compilation of common names with it's most frequent associated
gender in both languages which are used as look up tables for gender
inference.",2020-03-09,Juan Pablo Marin Diaz,https://github.com/datasketch/genero,TRUE,https://github.com/datasketch/genero,1461,1,2020-06-01T00:43:42Z,1461
GeNetIt,"Implementation of spatial graph-theoretic genetic gravity models.
The model framework is applicable for other types of spatial flow questions.
Includes functions for constructing spatial graphs, sampling and summarizing
associated raster variables and building unconstrained and singly constrained
gravity models.",2020-04-01,Jeffrey S. Evans,https://github.com/jeffreyevans/GeNetIt,TRUE,https://github.com/jeffreyevans/genetit,15390,0,2020-04-01T13:58:31Z,NA
genie,"A new hierarchical clustering linkage criterion:
the Genie algorithm links two clusters in such a way that a chosen
economic inequity measure (e.g., the Gini index) of the cluster
sizes does not increase drastically above a given threshold. Benchmarks
indicate a high practical usefulness of the introduced method:
it most often outperforms the Ward or average linkage in terms of
the clustering quality while retaining the single linkage speed,
see (Gagolewski et al. 2016a <DOI:10.1016/j.ins.2016.05.003>,
2016b <DOI:10.1007/978-3-319-45656-0_16>)
for more details.",2017-04-27,Marek Gagolewski,http://www.gagolewski.com/software/genie/,TRUE,https://github.com/gagolews/genie,16606,19,2020-01-16T05:54:19Z,874
genio,"Implements readers and writers for file formats associated with genetics data. Reading and writing plink BED/BIM/FAM formats is fully supported, including a lightning-fast BED reader and writer implementations. Other functions are 'readr' wrappers that are more constrained, user-friendly, and efficient for these particular applications; handles plink and eigenstrat tables (FAM, BIM, IND, and SNP files). There are also ""make"" functions for FAM and BIM tables with default values to go with simulated genotype data.",2019-12-17,Alejandro Ochoa,https://github.com/OchoaLab/genio,TRUE,https://github.com/ochoalab/genio,4925,6,2020-05-22T02:10:23Z,820.8333333333334
genius,Easily access song lyrics in a tidy way.,2020-05-28,Josiah Parry,https://github.com/josiahparry/genius,TRUE,https://github.com/josiahparry/genius,13058,92,2020-05-09T21:01:04Z,141.93478260869566
geniusr,"Provides tools to interact nicely with the 'Genius' API
<https://docs.genius.com/>.
Search hosted content, extract associated metadata and retrieve lyrics with ease.",2020-04-13,Ewen Henderson,"https://ewenme.github.io/geniusr/,
https://github.com/ewenme/geniusr",TRUE,https://github.com/ewenme/geniusr,11460,30,2020-04-13T16:34:16Z,382
genlasso,"Provides fast algorithms for computing the solution path for generalized lasso problems. Important use cases are the fused lasso over an arbitrary graph, and trend fitting of any given polynomial order. Specialized implementations for the latter two problems are given to improve stability and speed.",2019-07-11,Taylor B. Arnold and Ryan J. Tibshirani,https://github.com/statsmaths/genlasso,TRUE,https://github.com/statsmaths/genlasso,41457,20,2019-06-20T15:45:36Z,2072.85
genridge,"
The genridge package introduces generalizations of the standard univariate
ridge trace plot used in ridge regression and related methods. These graphical methods
show both bias (actually, shrinkage) and precision, by plotting the covariance ellipsoids of the estimated
coefficients, rather than just the estimates themselves. 2D and 3D plotting methods are provided,
both in the space of the predictor variables and in the transformed space of the PCA/SVD of the
predictors. ",2020-01-29,Michael Friendly,NA,TRUE,https://github.com/friendly/genridge,22874,0,2020-01-29T14:38:27Z,NA
genscore,"Implementation of the Generalized Score Matching estimator in Yu et al. (2019) <http://jmlr.org/papers/v20/18-278.html> for non-negative graphical models (truncated Gaussian, exponential square-root, gamma, a-b models) and univariate truncated Gaussian distributions. Also includes the original estimator for untruncated Gaussian graphical models from Lin et al. (2016) <doi:10.1214/16-EJS1126>, with the addition of a diagonal multiplier.",2020-04-27,Shiqing Yu,https://github.com/sqyu/genscore,TRUE,https://github.com/sqyu/genscore,627,0,2020-04-24T19:19:49Z,NA
genvar,"Implements tools for manipulating data sets and performing regressions in a way that is familiar to users of a popular, but proprietary, statistical package commonly used in the social sciences. Loads a single dataset into memory and implements a set of imperative commands to modify that data and perform regressions and other analysis on the dataset. Offers an alternative to standard R's function-based approach to data manipulation.",2020-01-21,Zach Flynn,NA,TRUE,https://github.com/flynnzac/genvar,3486,4,2020-04-09T01:29:51Z,871.5
geoaxe,"Split 'geospatial' objects into pieces. Includes
support for some spatial object inputs, 'Well-Known Text', and
'GeoJSON'.",2016-02-19,Scott Chamberlain,https://github.com/ropenscilabs/geoaxe,TRUE,https://github.com/ropenscilabs/geoaxe,73540,14,2019-12-09T13:12:05Z,5252.857142857143
geobr,"Easy access to official spatial data sets of Brazil as 'sf' objects in R.
The package includes a wide range of geospatial data available at various
geographic scales and for various years with harmonized attributes, projection and topology.",2020-03-29,Rafael H. M. Pereira,https://github.com/ipeaGIT/geobr,TRUE,https://github.com/ipeagit/geobr,8679,313,2020-06-05T20:24:01Z,27.728434504792332
geodaData,"Stores small spatial datasets used to teach basic spatial analysis
concepts. Datasets are based off of the 'GeoDa' software workbook and data
site <https://geodacenter.github.io/data-and-lab/> developed by Luc Anselin
and team at the University of Chicago. Datasets are stored as 'sf' objects.",2020-05-27,Angela Li,https://github.com/spatialanalysis/geodaData,TRUE,https://github.com/spatialanalysis/geodadata,111,11,2020-05-20T01:06:43Z,10.090909090909092
geodist,"Dependency-free, ultra fast calculation of geodesic distances.
Includes the reference nanometre-accuracy geodesic distances of Karney
(2013) <doi:10.1007/s00190-012-0578-z>, as used by the 'sf' package, as well
as Haversine and Vincenty distances. Default distance measure is the ""Mapbox
cheap ruler"" which is generally more accurate than Haversine or Vincenty for
distances out to a few hundred kilometres, and is considerably faster. The
main function accepts one or two inputs in almost any generic rectangular
form, and returns either matrices of pairwise distances, or vectors of
sequential distances.",2020-05-18,Mark Padgham,https://github.com/hypertidy/geodist,TRUE,https://github.com/hypertidy/geodist,16348,55,2020-05-19T09:59:21Z,297.23636363636365
geofacet,Provides geofaceting functionality for 'ggplot2'. Geofaceting arranges a sequence of plots of data for different geographical entities into a grid that preserves some of the geographical orientation.,2020-05-26,Ryan Hafen,https://github.com/hafen/geofacet,TRUE,https://github.com/hafen/geofacet,23093,244,2020-05-25T22:45:17Z,94.64344262295081
geohashTools,"Tools for working with Gustavo Niemeyer's geohash coordinate system, including API for interacting with other common R GIS libraries.",2020-05-26,Michael Chirico,https://github.com/MichaelChirico/geohashTools,TRUE,https://github.com/michaelchirico/geohashtools,6461,39,2020-05-26T10:43:28Z,165.66666666666666
geojson,"Classes for 'GeoJSON' to make working with 'GeoJSON' easier.
Includes S3 classes for 'GeoJSON' classes with brief summary output,
and a few methods such as extracting and adding bounding boxes,
properties, and coordinate reference systems; working with
newline delimited 'GeoJSON'; linting through the 'geojsonlint'
package; and serializing to/from 'Geobuf' binary 'GeoJSON'
format.",2019-01-31,Scott Chamberlain,https://github.com/ropensci/geojson,TRUE,https://github.com/ropensci/geojson,221000,30,2019-12-09T13:12:23Z,7366.666666666667
geojsonio,"Convert data to 'GeoJSON' or 'TopoJSON' from various R classes,
including vectors, lists, data frames, shape files, and spatial classes.
'geojsonio' does not aim to replace packages like 'sp', 'rgdal', 'rgeos',
but rather aims to be a high level client to simplify conversions of data
from and to 'GeoJSON' and 'TopoJSON'.",2020-04-07,Scott Chamberlain,"https://github.com/ropensci/geojsonio (devel),
https://docs.ropensci.org/geojsonio (docs)",TRUE,https://github.com/ropensci/geojsonio,325128,128,2020-04-08T15:31:48Z,2540.0625
geojsonlint,"Tools for linting 'GeoJSON'. Includes tools for interacting with the
online tool <http://geojsonlint.com>, the 'Javascript' library 'geojsonhint'
(<https://www.npmjs.com/package/geojsonhint>), and validating against a
'GeoJSON' schema via the 'Javascript' library
(<https://www.npmjs.com/package/is-my-json-valid>). Some tools work locally
while others require an internet connection.",2020-02-13,Scott Chamberlain,"https://github.com/ropensci/geojsonlint (devel)
https://docs.ropensci.org/geojsonlint (docs)",TRUE,https://github.com/ropensci/geojsonlint,151169,9,2020-02-13T16:04:02Z,16796.555555555555
geojsonR,"Includes functions for processing GeoJson objects <https://en.wikipedia.org/wiki/GeoJSON> relying on 'RFC 7946' <https://tools.ietf.org/pdf/rfc7946.pdf>. The geojson encoding is based on 'json11', a tiny JSON library for 'C++11' <https://github.com/dropbox/json11>. Furthermore, the source code is exported in R through the 'Rcpp' and 'RcppArmadillo' packages.",2020-03-18,Lampros Mouselimis,https://github.com/mlampros/geojsonR,TRUE,https://github.com/mlampros/geojsonr,29809,9,2020-03-18T16:47:27Z,3312.1111111111113
geojsonsf,Converts Between GeoJSON and simple feature objects. ,2020-03-18,David Cooley,https://github.com/SymbolixAU/geojsonsf,TRUE,https://github.com/symbolixau/geojsonsf,76359,54,2020-06-07T00:19:48Z,1414.0555555555557
geoknife,"Processes gridded datasets found on the U.S. Geological Survey
Geo Data Portal web application or elsewhere, using a web-enabled workflow
that eliminates the need to download and store large datasets that are reliably
hosted on the Internet. The package provides access to several data subset and
summarization algorithms that are available on remote web processing servers (Read et al. (2015) <doi:10.1111/ecog.01880>).",2020-06-02,Jordan Read,https://github.com/USGS-R/geoknife,TRUE,https://github.com/usgs-r/geoknife,36963,51,2020-05-30T10:21:23Z,724.7647058823529
geometr,"Provides tools that generate and process fully accessible and tidy
geometric shapes. The package improves interoperability of spatial and
other geometric classes by providing getters and setters that produce
identical output from various classes.",2020-03-30,Steffen Ehrmann,https://github.com/EhrmannS/geometr,TRUE,https://github.com/ehrmanns/geometr,2237,13,2020-05-15T13:03:55Z,172.07692307692307
geometry,"Makes the 'Qhull' library <http://www.qhull.org>
available in R, in a similar manner as in Octave and MATLAB. Qhull
computes convex hulls, Delaunay triangulations, halfspace
intersections about a point, Voronoi diagrams, furthest-site
Delaunay triangulations, and furthest-site Voronoi diagrams. It
runs in 2D, 3D, 4D, and higher dimensions. It implements the
Quickhull algorithm for computing the convex hull. Qhull does not
support constrained Delaunay triangulations, or mesh generation of
non-convex objects, but the package does include some R functions
that allow for this.",2019-12-04,Jean-Romain Roussel [cph,https://davidcsterratt.github.io/geometry,TRUE,https://github.com/davidcsterratt/geometry,945355,12,2019-12-03T18:20:44Z,78779.58333333333
geomorph,"Read, manipulate, and digitize landmark data, generate shape
variables via Procrustes analysis for points, curves and surfaces, perform
shape analyses, and provide graphical depictions of shapes and patterns of
shape variation.",2020-06-03,Dean Adams,https://github.com/geomorphR/geomorph,TRUE,https://github.com/geomorphr/geomorph,103997,42,2020-04-21T12:55:45Z,2476.1190476190477
geonetwork,"Provides classes and methods for handling networks or
graphs whose nodes are geographical (i.e. locations in the globe).
The functionality includes the creation of objects of class geonetwork
as a graph with node coordinates, the computation of network measures,
the support of spatial operations (projection to different Coordinate
Reference Systems, handling of bounding boxes, etc.) and the plotting of
the geonetwork object combined with supplementary cartography for spatial
representation.",2019-04-05,Facundo Muñoz,https://github.com/Cirad-ASTRE/geonetwork,TRUE,https://github.com/cirad-astre/geonetwork,5115,1,2019-08-23T15:29:20Z,5115
geoops,"Tools for doing calculations and manipulations on 'GeoJSON',
a 'geospatial' data interchange format (<https://tools.ietf.org/html/rfc7946>).
'GeoJSON' is also valid 'JSON'.",2020-05-17,Scott Chamberlain,"https://docs.ropensci.org/geoops,
https://github.com/ropensci/geoops",TRUE,https://github.com/ropensci/geoops,10190,18,2020-05-18T14:17:59Z,566.1111111111111
geospark,"R binds 'GeoSpark' <http://geospark.datasyslab.org/> extending 'sparklyr'
<https://spark.rstudio.com/> R package to make distributed 'geocomputing' easier. Sf is a
package that provides [simple features] <https://en.wikipedia.org/wiki/Simple_Features> access
for R and which is a leading 'geospatial' data processing tool. 'Geospark' R package bring
the same simple features access like sf but running on Spark distributed system.",2020-03-02,Harry Zhu,NA,TRUE,https://github.com/harryprince/geospark,5346,38,2020-03-02T03:26:49Z,140.68421052631578
geoSpectral,"Provides S4 classes and data import, preprocessing, graphing,
manipulation and export methods for geo-Spectral datasets (datasets with space/time/spectral
dimensions). These type of data are frequently collected within earth observation projects
(remote sensing, spectroscopy, bio-optical oceanography, mining, agricultural, atmospheric,
environmental or similar branch of science).",2020-02-20,Servet Ahmet Cizmeli,https://github.com/PranaGeo/geoSpectral,TRUE,https://github.com/pranageo/geospectral,11461,2,2020-02-21T08:06:27Z,5730.5
geotopbricks,"It analyzes raster maps and other information as input/output
files from the Hydrological Distributed Model GEOtop. It contains functions
and methods to import maps and other keywords from geotop.inpts file. Some
examples with simulation cases of GEOtop 2.x/3.x are presented in the package.
Any information about the GEOtop Distributed Hydrological Model source code
is available on www.geotop.org. Technical details about the model are
available in Endrizzi et al, 2014
(<http://www.geosci-model-dev.net/7/2831/2014/gmd-7-2831-2014.html>).",2020-02-11,Emanuele Cordano,"http://www.geotop.org, https://github.com/ecor/geotopbricks",TRUE,https://github.com/ecor/geotopbricks,19197,3,2020-05-05T01:00:45Z,6399
geouy,"The toolbox have functions to load and process geographic information for Uruguay.
And extra-function to get address coordinates and orthophotos through the uruguayan 'IDE' API <https://www.gub.uy/infraestructura-datos-espaciales/tramites-y-servicios/servicios/servicio-direcciones-geograficas>.",2020-06-03,Richard Detomasi,NA,TRUE,https://github.com/richdeto/geouy,1976,2,2020-06-09T23:59:50Z,988
geoviz,Simpler processing of digital elevation model and GPS trace data for use with the 'rayshader' package.,2020-01-12,Neil Charles,https://github.com/neilcharles/geoviz/,TRUE,https://github.com/neilcharles/geoviz,6086,83,2020-04-28T15:43:45Z,73.32530120481928
GermaParl,"Data package to disseminate the 'GermaParl' corpus of parliamentary debates of
the German Bundestag prepared in the 'PolMine Project'. The package includes a small subset of
the corpus for demonstration and testing purposes. The package includes functionality to load
the full corpus from the open science repository 'Zenodo' and some auxiliary functions to
enhance the corpus.",2020-05-25,Andreas Blaette,https://github.com/polmine/GermaParl,TRUE,https://github.com/polmine/germaparl,185,1,2020-04-09T20:05:07Z,185
GerminaR,A collection of different indices and visualization techniques for evaluate the seed germination process in ecophysiological studies (Lozano-Isla et al. 2019) <doi:10.1111/1440-1703.1275>.,2020-03-28,Flavio Lozano Isla,https://flavjack.github.io/germinaquant/,TRUE,https://github.com/flavjack/germinar,15301,2,2020-06-02T17:47:08Z,7650.5
germinationmetrics,"Provides functions to compute various germination indices such as
germinability, median germination time, mean germination time, mean
germination rate, speed of germination, Timson's index, germination value,
coefficient of uniformity of germination, uncertainty of germination
process, synchrony of germination etc. from germination count data. Includes
functions for fitting cumulative seed germination curves using
four-parameter hill function and computation of associated parameters. See
the vignette for more, including full list of citations for the methods
implemented.",2019-01-19,J. Aravind,"https://github.com/aravind-j/germinationmetrics,
https://aravind-j.github.io/germinationmetrics/
https://CRAN.R-project.org/package=germinationmetrics
https://doi.org/10.5281/zenodo.1219630",TRUE,https://github.com/aravind-j/germinationmetrics,12302,1,2020-02-27T18:32:47Z,12302
gert,"Simple git client based on 'libgit2' with user-friendly authentication
and support for both SSH and HTTPS remotes on all platforms. User credentials
are shared with command line 'git' through the git-credential store and ssh keys
stored on disk or ssh-agent. On Linux, a somewhat recent version of 'libgit2' is
required.",2019-10-29,Jeroen Ooms,"https://jeroen.cran.dev/gert (website),
httsp://github.com/r-lib/gert (devel), https://libgit2.org
(upstream)",TRUE,https://github.com/r-lib/gert,7637,58,2020-06-09T14:29:42Z,131.67241379310346
gestalt,"Provides a suite of function-building tools centered around a
(forward) composition operator, %>>>%, which extends the semantics of the
'magrittr' %>% operator and supports 'tidyverse' quasiquotation. It enables
you to construct composite functions that can be inspected and transformed as
list-like objects. In conjunction with %>>>%, a compact function constructor,
fn(), and a function that performs partial application, partial(), are also
provided. Both support quasiquotation.",2019-06-27,Eugene Ha,https://github.com/egnha/gestalt,TRUE,https://github.com/egnha/gestalt,9842,33,2019-06-27T06:32:44Z,298.24242424242425
GetBCBData,"Downloads and organizes datasets using BCB's API <https://www.bcb.gov.br/>. Offers options for caching with the 'memoise' package and
, multicore/multisession with 'furrr' and format of output data (long/wide). ",2019-04-23,Marcelo Perlin,https://github.com/msperlin/GetBCBData/,TRUE,https://github.com/msperlin/getbcbdata,5174,4,2019-11-20T11:36:19Z,1293.5
getCRUCLdata,"Provides functions that automate downloading and importing
University of East Anglia Climate Research Unit ('CRU') 'CL' v. 2.0
climatology data, facilitates the calculation of minimum temperature and
maximum temperature and formats the data into a tidy data frame as a
'tibble' or a list of 'raster' 'stack' objects for use. 'CRU' 'CL' v. 2.0
data are a gridded climatology of 1961-1990 monthly means released in 2002
and cover all land areas (excluding Antarctica) at 10 arcminutes
(0.1666667 degree) resolution. For more information see the description of
the data provided by the University of East Anglia Climate Research Unit,
<https://crudata.uea.ac.uk/cru/data/hrg/tmc/readme.txt>.",2019-08-29,Adam Sparks,https://docs.ropensci.org/getCRUCLdata/,TRUE,https://github.com/ropensci/getcrucldata,34859,11,2020-03-22T08:32:58Z,3169
GetDFPData,"Reads annual financial reports including assets, liabilities, dividends history, stockholder composition and much more from Bovespa's DFP, FRE and FCA systems <http://www.bmfbovespa.com.br/en_us/products/listed-equities-and-derivatives/equities/listed-companies.htm>.
These are web based interfaces for all financial reports of companies traded at Bovespa. The package is specially designed for large scale data importation, keeping a tabular (long) structure for easier processing. ",2020-05-18,Marcelo Perlin,https://github.com/msperlin/GetDFPData/,TRUE,https://github.com/msperlin/getdfpdata,21345,29,2020-05-18T15:58:23Z,736.0344827586207
GetHFData,Downloads and aggregates high frequency trading data for Brazilian instruments directly from Bovespa ftp site <ftp://ftp.bmf.com.br/MarketData/>.,2019-04-08,Marcelo Perlin,https://github.com/msperlin/GetHFData/,TRUE,https://github.com/msperlin/gethfdata,30243,30,2020-06-09T19:03:09Z,1008.1
getlandsat,"Get Landsat 8 Data from Amazon Web Services ('AWS')
public data sets (<https://registry.opendata.aws/landsat-8/>).
Includes functions for listing images and fetching them, and handles
caching to prevent unnecessary additional requests.",2018-04-30,Scott Chamberlain,https://github.com/ropensci/getlandsat,TRUE,https://github.com/ropensci/getlandsat,12915,47,2019-12-09T13:16:22Z,274.78723404255317
GetLattesData,A simple API for downloading and reading xml data directly from Lattes <http://lattes.cnpq.br/>.,2020-03-07,Marcelo Perlin,https://github.com/msperlin/GetLattesData/,TRUE,https://github.com/msperlin/getlattesdata,15697,6,2020-03-07T15:22:09Z,2616.1666666666665
getmstatistic,"Quantifying systematic heterogeneity in meta-analysis using R.
The M statistic aggregates heterogeneity information across multiple
variants to, identify systematic heterogeneity patterns and their direction
of effect in meta-analysis. It's primary use is to identify outlier studies,
which either show ""null"" effects or consistently show stronger or weaker
genetic effects than average across, the panel of variants examined in a
GWAS meta-analysis. In contrast to conventional heterogeneity metrics
(Q-statistic, I-squared and tau-squared) which measure random heterogeneity
at individual variants, M measures systematic (non-random)
heterogeneity across multiple independently associated variants. Systematic
heterogeneity can arise in a meta-analysis due to differences in the study
characteristics of participating studies. Some of the differences may
include: ancestry, allele frequencies, phenotype definition, age-of-disease
onset, family-history, gender, linkage disequilibrium and quality control
thresholds. See <https://magosil86.github.io/getmstatistic/> for statistical
statistical theory, documentation and examples.",2020-03-30,Lerato E Magosi,https://magosil86.github.io/getmstatistic/,TRUE,https://github.com/magosil86/getmstatistic,11430,2,2020-03-29T22:43:47Z,5715
getopt,"Package designed to be used with Rscript to write
``#!'' shebang scripts that accept short and long flags/options.
Many users will prefer using instead the packages optparse or argparse
which add extra features like automatically generated help option and usage,
support for default values, positional argument support, etc.",2019-03-22,Trevor L Davis,https://github.com/trevorld/r-getopt,TRUE,https://github.com/trevorld/r-getopt,442998,10,2019-11-26T01:09:33Z,44299.8
GetoptLong,"This is yet another command-line argument parser which wraps the
powerful Perl module Getopt::Long and with some adaptation for easier use
in R. It also provides a simple way for variable interpolation in R.",2020-01-08,Zuguang Gu,https://github.com/jokergoo/GetoptLong,TRUE,https://github.com/jokergoo/getoptlong,228934,7,2020-06-06T17:32:31Z,32704.85714285714
getProxy,"Allows get address and port
of the free proxy server, from one of two services
<http://gimmeproxy.com/> or <https://getproxylist.com/>.
And it's easy to redirect your Internet connection through
a proxy server.",2018-08-20,Alexey Seleznev,http://selesnow.github.io/getProxy,TRUE,https://github.com/selesnow/getproxy,7163,6,2020-05-08T14:58:59Z,1193.8333333333333
GetQuandlData,"Imports time series data from the 'Quandl' database <https://www.quandl.com>. The package uses the 'json api' at <https://www.quandl.com/tools/api>, local caching ('memoise' package) and the tidy format by default.
Also allows queries of databases, allowing the user to see which time series are available for each database id. In short, it is an alternative to package 'Quandl', with faster data importation in the tidy/long format.",2019-10-20,Marcelo S. Perlin,https://github.com/msperlin/GetQuandlData/,TRUE,https://github.com/msperlin/getquandldata,3038,7,2019-10-16T19:04:52Z,434
gets,"Automated General-to-Specific (GETS) modelling of the mean and variance of a regression, and indicator saturation methods for detecting and testing for structural breaks in the mean.",2020-05-04,Genaro Sucarrat,"https://CRAN.R-project.org/package=gets,
http://www.sucarrat.net/R/gets",TRUE,https://github.com/gsucarrat/gets,60468,3,2020-04-30T13:02:55Z,20156
getspres,"An implementation of SPRE (standardised predicted random-effects)
statistics in R to explore heterogeneity in genetic association meta-
analyses, as described by Magosi et al. (2019)
<doi:10.1093/bioinformatics/btz590>. SPRE statistics are precision
weighted residuals that indicate the direction and extent with which
individual study-effects in a meta-analysis deviate from the average
genetic effect. Overly influential positive outliers have the potential
to inflate average genetic effects in a meta-analysis whilst negative
outliers might lower or change the direction of effect. See the 'getspres'
website for documentation and examples
<https://magosil86.github.io/getspres/>.",2020-03-31,Lerato E Magosi,https://magosil86.github.io/getspres/,TRUE,https://github.com/magosil86/getspres,3678,0,2020-03-31T12:38:39Z,NA
getTBinR,"Quickly and easily import analysis ready
Tuberculosis (TB) burden data, from the World Health Organisation
(WHO), into R. The aim of getTBinR is to allow researchers, and other
interested individuals, to quickly and easily gain access to a
detailed TB data set and to start using it to derive key insights. It
provides a consistent set of tools that can be used to rapidly
evaluate hypotheses on a widely used data set before they are explored
further using more complex methods or more detailed data. These tools
include: generic plotting and mapping functions; a data dictionary
search tool; an interactive shiny dashboard; and an automated, country
level, TB report. For newer R users, this package reduces the barrier
to entry by handling data import, munging, and visualisation. All
plotting and mapping functions are built with ggplot2 so can be
readily extended.",2019-09-03,Sam Abbott,"https://www.samabbott.co.uk/getTBinR,
https://github.com/seabbs/getTBinR",TRUE,https://github.com/seabbs/gettbinr,15627,13,2020-02-23T17:24:33Z,1202.076923076923
GetTDData,Downloads and aggregates data for Brazilian government issued bonds directly from the website of Tesouro Direto <http://www.tesouro.fazenda.gov.br/tesouro-direto-balanco-e-estatisticas>.,2019-10-01,Marcelo Perlin,https://github.com/msperlin/GetTDData/,TRUE,https://github.com/msperlin/gettddata,31329,6,2019-10-01T19:22:18Z,5221.5
gettz,"A function to retrieve the system timezone on Unix systems
which has been found to find an answer when 'Sys.timezone()' has failed.
It is based on an answer by Duane McCully posted on 'StackOverflow', and
adapted to be callable from R. The package also builds on Windows, but
just returns NULL.",2020-04-14,Dirk Eddelbuettel,http://dirk.eddelbuettel.com/code/gettz.html,TRUE,https://github.com/eddelbuettel/gettz,17968,1,2020-05-16T13:39:24Z,17968
gexp,"Generates experiments - simulating structured or experimental data as:
completely randomized design, randomized block design, latin square design,
factorial and split-plot experiments (Ferreira, 2008, ISBN:8587692526;
Naes et al., 2007 <doi:10.1002/qre.841>; Rencher et al., 2007, ISBN:9780471754985;
Montgomery, 2001, ISBN:0471316490).",2020-04-02,Ivan Bezerra Allaman,https://github.com/ivanalaman/gexp,TRUE,https://github.com/ivanalaman/gexp,5117,1,2020-04-01T22:33:18Z,5117
gfcanalysis,"Supports analyses using the Global Forest Change dataset released
by Hansen et al. gfcanalysis was originally written for the Tropical Ecology
Assessment and Monitoring (TEAM) Network. For additional details on the
Global Forest Change dataset, see: Hansen, M. et al. 2013. ""High-Resolution
Global Maps of 21st-Century Forest Cover Change."" Science 342 (15
November): 850-53. The forest change data and more information on the
product is available at <http://earthenginepartners.appspot.com>.",2019-03-12,Matthew Cooper,https://github.com/azvoleff/gfcanalysis,TRUE,https://github.com/azvoleff/gfcanalysis,18776,10,2020-02-05T03:20:56Z,1877.6
gfonts,"Download 'Google' fonts and generate CSS to use in 'rmarkdown' documents and
'shiny' applications. Some popular fonts are included and ready to use.",2020-05-09,Victor Perrier,https://github.com/dreamRs/gfonts,TRUE,https://github.com/dreamrs/gfonts,488,78,2020-05-09T17:51:52Z,6.256410256410256
gfoRmula,"Implements the parametric g-formula algorithm of Robins (1986)
<doi:10.1016/0270-0255(86)90088-6>. The g-formula can be used to estimate
the causal effects of hypothetical time-varying treatment interventions on
the mean or risk of an outcome from longitudinal data with time-varying
confounding. This package allows: 1) binary or continuous/multi-level
time-varying treatments; 2) different types of outcomes (survival or
continuous/binary end of follow-up); 3) data with competing events or
truncation by death and loss to follow-up and other types of censoring
events; 4) different options for handling competing events in the case of
survival outcomes; 5) a random measurement/visit process; 6) joint
interventions on multiple treatments; and 7) general incorporation of a
priori knowledge of the data structure.",2020-03-23,Victoria Lin,"https://github.com/CausalInference/gfoRmula,
https://arxiv.org/abs/1908.07072",TRUE,https://github.com/causalinference/gformula,4675,25,2020-03-22T15:36:43Z,187
gg.gap,It is not very easy to define segments for y-axis in a 'ggplot2' plot. gg.gap() function in this package can carry it out.,2019-09-30,Jiacheng Lou,https://github.com/ChrisLou-bioinfo/gg.gap,TRUE,https://github.com/chrislou-bioinfo/gg.gap,3723,8,2019-10-06T09:26:22Z,465.375
ggallin,"Extra geoms and scales for 'ggplot2', including geom_cloud(),
a Normal density cloud replacement for errorbars;
transforms ssqrt_trans and pseudolog10_trans, which are loglike but
appropriate for negative data; interp_trans() and warp_trans() which
provide scale transforms based on interpolation;
and an infix compose operator for scale transforms.",2017-10-02,Steven E. Pav,https://github.com/shabbychef/ggallin,TRUE,https://github.com/shabbychef/ggallin,9880,4,2019-10-04T05:22:41Z,2470
ggalluvial,"Alluvial plots use variable-width ribbons and stacked bar plots to
represent multi-dimensional or repeated-measures data with categorical or
ordinal variables; see Riehmann, Hanfler, and Froehlich (2005)
<doi:10.1109/INFVIS.2005.1532152> and Rosvall and Bergstrom (2010)
<doi:10.1371/journal.pone.0008694>.
Alluvial plots are statistical graphics in the sense of Wilkinson (2006)
<doi:10.1007/0-387-28695-0>; they share elements with Sankey diagrams and
parallel sets plots but are uniquely determined from the data and a small
set of parameters. This package extends Wickham's (2010)
<doi:10.1198/jcgs.2009.07098> layered grammar of graphics to generate
alluvial plots from tidy data.",2020-04-16,Jason Cory Brunson,http://corybrunson.github.io/ggalluvial/,TRUE,https://github.com/corybrunson/ggalluvial,58607,229,2020-05-29T01:49:43Z,255.92576419213975
GGally,"
The R package 'ggplot2' is a plotting system based on the grammar of graphics.
'GGally' extends 'ggplot2' by adding several functions
to reduce the complexity of combining geometric objects with transformed data.
Some of these functions include a pairwise plot matrix, a two group pairwise plot
matrix, a parallel coordinates plot, a survival plot, and several functions to
plot networks.",2020-06-06,Barret Schloerke,"https://ggobi.github.io/ggally, https://github.com/ggobi/ggally",TRUE,https://github.com/ggobi/ggally,1995490,351,2020-06-08T14:45:47Z,5685.156695156696
ggalt,"A compendium of new geometries, coordinate systems, statistical
transformations, scales and fonts for 'ggplot2', including splines, 1d and 2d densities,
univariate average shifted histograms, a new map coordinate system based on the
'PROJ.4'-library along with geom_cartogram() that mimics the original functionality of
geom_map(), formatters for ""bytes"", a stat_stepribbon() function, increased 'plotly'
compatibility and the 'StateFace' open source font 'ProPublica'. Further new
functionality includes lollipop charts, dumbbell charts, the ability to encircle
points and coordinate-system-based text annotations.",2017-02-15,Bob Rudis,https://github.com/hrbrmstr/ggalt,TRUE,https://github.com/hrbrmstr/ggalt,112335,512,2019-07-30T10:46:36Z,219.404296875
ggamma,"Density, distribution function, quantile function and random generation for the Generalized Gamma proposed in Stacy, E. W. (1962) <doi:10.1214/aoms/1177704481>.",2019-12-15,Matheus H. J. Saldanha,https://mjsaldanha.com/posts/ggamma,TRUE,https://github.com/matheushjs/ggamma,2661,0,2019-12-15T08:24:18Z,NA
gganimate,"The grammar of graphics as implemented in the 'ggplot2' package has
been successful in providing a powerful API for creating static
visualisation. In order to extend the API for animated graphics this package
provides a completely new set of grammar, fully compatible with 'ggplot2'
for specifying transitions and animations in a flexible and extensible way.",2020-02-09,Thomas Lin Pedersen,"https://gganimate.com, https://github.com/thomasp85/gganimate",TRUE,https://github.com/thomasp85/gganimate,198098,1548,2020-05-13T18:37:47Z,127.9702842377261
ggbeeswarm,"Provides two methods of plotting categorical scatter plots such
that the arrangement of points within a category reflects the density of
data at that region, and avoids over-plotting.",2017-08-07,Erik Clarke,https://github.com/eclarke/ggbeeswarm,TRUE,https://github.com/eclarke/ggbeeswarm,183110,326,2019-08-14T20:39:38Z,561.6871165644171
ggcharts,"Streamline the creation of common charts by taking care of a lot of
data preprocessing and plot customization for the user. Provides a
high-level interface to create plots using 'ggplot2'.",2020-05-20,Thomas Neitmann,https://github.com/thomas-neitmann/ggcharts,TRUE,https://github.com/thomas-neitmann/ggcharts,2701,121,2020-06-01T17:30:02Z,22.322314049586776
ggcorrplot,"The 'ggcorrplot' package can be used to visualize easily a
correlation matrix using 'ggplot2'. It provides a solution for reordering the
correlation matrix and displays the significance level on the plot. It also
includes a function for computing a matrix of correlation p-values.",2019-05-19,Alboukadel Kassambara,http://www.sthda.com/english/wiki/ggcorrplot,TRUE,https://github.com/kassambara/ggcorrplot,300513,119,2019-10-02T17:42:09Z,2525.3193277310925
ggdag,"Tidy, analyze, and plot directed acyclic graphs
(DAGs). 'ggdag' is built on top of 'dagitty', an R package that uses
the 'DAGitty' web tool (<http://dagitty.net>) for creating and
analyzing DAGs. 'ggdag' makes it easy to tidy and plot 'dagitty'
objects using 'ggplot2' and 'ggraph', as well as common analytic and
graphical functions, such as determining adjustment sets and node
relationships.",2020-02-13,Malcolm Barrett,https://github.com/malcolmbarrett/ggdag,TRUE,https://github.com/malcolmbarrett/ggdag,24217,280,2020-06-05T18:11:00Z,86.48928571428571
ggdemetra,"Provides 'ggplot2' functions to return the results of seasonal and trading day adjustment
made by 'RJDemetra'. 'RJDemetra' is an 'R' interface around 'JDemetra+' (<https://github.com/jdemetra/jdemetra-app>),
the seasonal adjustment software officially recommended to the members of the European Statistical System and
the European System of Central Banks.",2019-09-12,Alain Quartier-la-Tente,https://github.com/AQLT/ggdemetra,TRUE,https://github.com/aqlt/ggdemetra,6436,9,2019-09-12T21:16:50Z,715.1111111111111
ggdmc,"Hierarchical Bayesian models. The package provides tools to fit two response time models, using the population-based Markov Chain Monte Carlo. ",2019-04-29,Yi-Shin Lin,https://github.com/yxlin/ggdmc,TRUE,https://github.com/yxlin/ggdmc,13619,4,2020-06-01T12:15:52Z,3404.75
gge,"Create biplots for GGE (genotype plus genotype-by-environment) and
GGB (genotype plus genotype-by-block-of-environments) models.",2018-05-15,Kevin Wright,https://github.com/kwstat/gge,TRUE,https://github.com/kwstat/gge,23299,5,2020-01-20T15:31:51Z,4659.8
ggeasy,"Provides a series of aliases to commonly used but difficult
to remember 'ggplot2' sequences.",2020-03-19,Jonathan Carroll,https://github.com/jonocarroll/ggeasy,TRUE,https://github.com/jonocarroll/ggeasy,3318,156,2020-05-21T12:53:33Z,21.26923076923077
ggedit,Interactively edit 'ggplot2' layer and theme aesthetics definitions.,2020-06-02,Jonathan Sidi,https://github.com/yonicd/ggedit,TRUE,https://github.com/yonicd/ggedit,28629,207,2020-06-02T01:51:11Z,138.30434782608697
ggeffects,"Compute marginal effects from statistical models and returns the
result as tidy data frames. These data frames are ready to use with the
'ggplot2'-package. Marginal effects can be calculated for many different
models. Interaction terms, splines and polynomial terms are also supported.
The main functions are ggpredict(), ggemmeans() and ggeffect(). There is a
generic plot()-method to plot the results using 'ggplot2'.",2020-04-20,Daniel Lüdecke,https://strengejacke.github.io/ggeffects,TRUE,https://github.com/strengejacke/ggeffects,365148,264,2020-06-09T20:17:45Z,1383.1363636363637
ggetho,"Extension of 'ggplot2' providing layers, scales and preprocessing functions
useful to represent behavioural variables that are recorded over multiple animals and days.
This package is part of the 'rethomics' framework <http://rethomics.github.io/>.",2020-04-29,Quentin Geissmann,https://github.com/rethomics/ggetho,TRUE,https://github.com/rethomics/ggetho,11914,6,2020-06-09T01:42:23Z,1985.6666666666667
ggExtra,"Collection of functions and layers to enhance 'ggplot2'. The
flagship function is 'ggMarginal()', which can be used to add marginal
histograms/boxplots/density plots to 'ggplot2' scatterplots.",2019-08-27,Dean Attali,https://github.com/daattali/ggExtra,TRUE,https://github.com/daattali/ggextra,242853,275,2020-06-09T04:14:23Z,883.1018181818182
ggfittext,"Provides 'ggplot2' geoms to fit text into a box by growing, shrinking
or wrapping the text.",2019-07-18,David Wilkins,https://wilkox.org/ggfittext,TRUE,https://github.com/wilkox/ggfittext,111726,176,2020-05-28T10:30:51Z,634.8068181818181
ggfocus,"A 'ggplot2' extension that provides tools for automatically
creating scales to focus on subgroups of the data plotted
without losing other information.",2020-01-23,Victor Freguglia,https://github.com/Freguglia/ggfocus,TRUE,https://github.com/freguglia/ggfocus,8050,13,2020-01-27T20:23:04Z,619.2307692307693
ggforce,"The aim of 'ggplot2' is to aid in visual data investigations. This
focus has led to a lack of facilities for composing specialised plots.
'ggforce' aims to be a collection of mainly new stats and geoms that fills
this gap. All additional functionality is aimed to come through the official
extension system so using 'ggforce' should be a stable experience.",2019-08-20,Thomas Lin Pedersen,https://ggforce.data-imaginist.com,TRUE,https://github.com/thomasp85/ggforce,867324,542,2020-01-06T10:08:01Z,1600.228782287823
ggformula,Provides a formula interface to 'ggplot2' graphics.,2020-03-04,Randall Pruim,https://github.com/ProjectMOSAIC/ggformula,TRUE,https://github.com/projectmosaic/ggformula,441587,29,2020-03-04T07:21:38Z,15227.137931034482
ggfortify,"Unified plotting tools for statistics commonly used, such as GLM,
time series, PCA families, clustering and survival analysis. The package offers
a single plotting interface for these analysis results and plots in a unified
style using 'ggplot2'.",2020-04-26,Yuan Tang,https://github.com/sinhrks/ggfortify,TRUE,https://github.com/sinhrks/ggfortify,857992,433,2020-05-12T16:20:31Z,1981.5057736720555
gggenes,"Provides a 'ggplot2' geom and helper functions for drawing gene
arrow maps.",2019-06-24,David Wilkins,https://wilkox.org/gggenes,TRUE,https://github.com/wilkox/gggenes,15751,160,2020-04-25T08:17:44Z,98.44375
gggibbous,"Moon charts are like pie charts except that the proportions are
shown as crescent or gibbous portions of a circle, like the lit and unlit
portions of the moon. As such, they work best with only one or two groups.
'gggibbous' extends 'ggplot2' to allow for plotting multiple moon charts in
a single panel and does not require a square coordinate system.",2019-12-02,Michael Bramson,https://github.com/mnbram/gggibbous,TRUE,https://github.com/mnbram/gggibbous,2058,42,2019-11-23T21:31:50Z,49
gghalves,"A 'ggplot2' extension for easy plotting of half-half geom combinations. Think half boxplot and half jitterplot, or half violinplot and half dotplot.",2020-03-28,Frederik Tiedemann,https://github.com/erocoar/gghalves,TRUE,https://github.com/erocoar/gghalves,4939,149,2020-03-28T12:12:37Z,33.147651006711406
gghighlight,Make it easier to explore data with highlights.,2020-03-29,Hiroaki Yutani,https://github.com/yutannihilation/gghighlight/,TRUE,https://github.com/yutannihilation/gghighlight,41805,393,2020-06-06T13:33:39Z,106.37404580152672
ggimage,"Supports image files and graphic objects to be visualized in
'ggplot2' graphic system.",2020-04-02,Guangchuang Yu,"https://github.com/GuangchuangYu/ggimage (devel),
https://guangchuangyu.github.io/pkgdocs/ggimage.html (vignette)",TRUE,https://github.com/guangchuangyu/ggimage,73156,102,2020-04-20T07:27:31Z,717.2156862745098
ggimg,"Provides two new layer types for displaying image data as layers
within the Grammar of Graphics framework. Displays images using either a
rectangle interface, with a fixed bounding box, or a point interface using a
central point and general size parameter. Images can be given as local
JPEG or PNG files, external resources, or as a list column containing
raster image data.",2020-03-20,Taylor B. Arnold,https://github.com/statsmaths/ggimg,TRUE,https://github.com/statsmaths/ggimg,1386,25,2020-03-23T14:17:40Z,55.44
gginference,"Visualise the results of F test to compare two variances, Student's t-test, test of equal or given proportions, Pearson's chi-squared test for count data and test for association/correlation between paired samples.",2020-03-21,Kleanthis Koupidis,https://github.com/okgreece/gginference,TRUE,https://github.com/okgreece/gginference,8165,5,2020-04-30T22:49:47Z,1633
GGIR,"A tool to process and analyse data collected with wearable raw acceleration sensors as described in Migueles and colleagues (2019) <doi: 10.1123/jmpb.2018-0063>, van Hees and colleagues (2014) <doi: 10.1152/japplphysiol.00421.2014>, and (2015) <doi: 10.1371/journal.pone.0142533>. The package has been developed and tested for binary data from 'GENEActiv' <https://www.activinsights.com/> and GENEA devices (not for sale), .csv-export data from 'Actigraph' <http://actigraphcorp.com> devices, and .cwa and .wav-format data from 'Axivity' <https://axivity.com>. These devices are currently widely used in research on human daily physical activity. Further, the package can handle accelerometer data file from any other sensor brand providing that the data is stored in csv format and has either no header or a two column header. Also the package allows for external function embedding.",2020-05-01,Vincent T van Hees,"https://github.com/wadpac/GGIR/,
https://groups.google.com/forum/#!forum/RpackageGGIR",TRUE,https://github.com/wadpac/ggir,79870,31,2020-06-04T17:21:19Z,2576.451612903226
ggiraph,Create interactive 'ggplot2' graphics using 'htmlwidgets'.,2019-10-31,David Gohel,https://davidgohel.github.io/ggiraph,TRUE,https://github.com/davidgohel/ggiraph,158050,400,2020-04-09T09:53:54Z,395.125
gglasso,"A unified algorithm, blockwise-majorization-descent (BMD), for efficiently computing the solution paths of the group-lasso penalized least squares, logistic regression, Huberized SVM and squared SVM. The package is an implementation of Yang, Y. and Zou, H. (2015) DOI: <doi:10.1007/s11222-014-9498-5>.",2020-03-18,Yi Yang,https://github.com/emeryyi/gglasso,TRUE,https://github.com/emeryyi/gglasso,39181,1,2020-02-17T18:56:58Z,39181
gglogo,"Visualize sequences in (modified) logo plots. The design choices
used by these logo plots allow sequencing data to be more easily analyzed.
Because it is integrated into the 'ggplot2' geom framework, these logo plots
support native features such as faceting.",2020-01-28,Eric Hare,https://github.com/heike/gglogo,TRUE,https://github.com/heike/gglogo,13671,14,2020-01-28T20:41:46Z,976.5
gglorenz,"Provides statistical transformations for plotting empirical
ordinary Lorenz curve (Lorenz 1905) <doi:10.2307/2276207> and
generalized Lorenz curve (Shorrocks 1983) <doi:10.2307/2554117>.",2020-05-27,JJ Chen,https://github.com/jjchern/gglorenz,TRUE,https://github.com/jjchern/gglorenz,8782,12,2020-05-31T21:33:05Z,731.8333333333334
ggmap,"A collection of functions to visualize spatial data and models
on top of static maps from various online sources (e.g Google Maps and Stamen
Maps). It includes tools common to those tasks, including functions for
geolocation and routing.",2019-02-05,David Kahle,https://github.com/dkahle/ggmap,TRUE,https://github.com/dkahle/ggmap,2143211,594,2020-05-26T03:26:15Z,3608.0993265993266
ggmcmc,"Tools for assessing and diagnosing convergence of
Markov Chain Monte Carlo simulations, as well as for graphically display
results from full MCMC analysis. The package also facilitates the graphical
interpretation of models by providing flexible functions to plot the
results against observed variables.",2020-04-02,Xavier Fernández i Marín,"http://xavier-fim.net/packages/ggmcmc,
https://github.com/xfim/ggmcmc",TRUE,https://github.com/xfim/ggmcmc,78004,92,2020-04-22T04:50:07Z,847.8695652173913
ggmix,"Fit penalized multivariable linear mixed models with a single
random effect to control for population structure in genetic association
studies. The goal is to simultaneously fit many genetic variants at the
same time, in order to select markers that are independently associated
with the response. Can also handle prior annotation information,
for example, rare variants, in the form of variable weights. For more
information, see the website below and the accompanying paper:
Bhatnagar et al., ""Simultaneous SNP selection and adjustment for
population structure in high dimensional prediction models"", 2020,
<DOI:10.1101/408484>.",2020-03-20,Sahir Bhatnagar,https://github.com/sahirbhatnagar/ggmix,TRUE,https://github.com/sahirbhatnagar/ggmix,1147,6,2020-05-14T15:59:28Z,191.16666666666666
ggmosaic,"Mosaic plots in the 'ggplot2' framework. Mosaic
plot functionality is provided in a single 'ggplot2' layer by calling
the geom 'mosaic'.",2018-09-12,Haley Jeppson,http://github.com/haleyjeppson/ggmosaic,TRUE,https://github.com/haleyjeppson/ggmosaic,93876,102,2020-05-06T15:04:17Z,920.3529411764706
ggnetwork,Geometries to plot network objects with 'ggplot2'.,2020-02-12,François Briatte,https://github.com/briatte/ggnetwork,TRUE,https://github.com/briatte/ggnetwork,56818,95,2020-02-12T15:54:09Z,598.0842105263158
ggnuplot,"Provides a theme, a discrete color palette, and continuous scales
to make 'ggplot2' look like 'gnuplot'. This may be helpful if you use both
'ggplot2' and 'gnuplot' in one project.",2020-06-04,Hannes Riebl,https://github.com/hriebl/ggnuplot,TRUE,https://github.com/hriebl/ggnuplot,0,0,2020-06-01T14:48:58Z,NA
ggpacman,"A funny coding challenge to reproduce the game Pac-Man using 'ggplot2' and 'gganimate'.
It provides a pre-defined moves set for Pac-Man and the ghosts for the first level of the
game Pac-Man as well as polygon datasets to draw ghosts in 'ggplot2'.",2020-05-16,Mickaël Canouil,https://github.com/mcanouil/pacman,TRUE,https://github.com/mcanouil/pacman,356,40,2020-05-12T10:30:44Z,8.9
ggpage,"Facilitates the creation of page layout
visualizations in which words are represented as rectangles with sizes
relating to the length of the words. Which then is divided in lines
and pages for easy overview of up to quite large texts.",2019-06-13,Emil Hvitfeldt,https://github.com/EmilHvitfeldt/ggpage,TRUE,https://github.com/emilhvitfeldt/ggpage,8082,296,2019-06-13T23:41:07Z,27.304054054054053
ggparty,Extends 'ggplot2' functionality to the 'partykit' package. 'ggparty' provides the necessary tools to create clearly structured and highly customizable visualizations for tree-objects of the class 'party'.,2019-07-18,Martin Borkovec,https://github.com/martin-borkovec/ggparty,TRUE,https://github.com/martin-borkovec/ggparty,5416,109,2019-07-15T15:02:29Z,49.68807339449541
ggplot.multistats,"
Provides the ggplot binning layer stat_summaries_hex(),
which functions similar to its singular form,
but allows the use of multiple statistics per bin.
Those statistics can be mapped to multiple bin aesthetics.",2019-10-28,Philipp Angerer,https://github.com/flying-sheep/ggplot.multistats,TRUE,https://github.com/flying-sheep/ggplot.multistats,19496,8,2019-12-07T11:02:11Z,2437
ggplot2,"A system for 'declaratively' creating graphics,
based on ""The Grammar of Graphics"". You provide the data, tell 'ggplot2'
how to map variables to aesthetics, what graphical primitives to use,
and it takes care of the details.",2020-05-28,Hadley Wickham,"http://ggplot2.tidyverse.org, https://github.com/tidyverse/ggplot2",TRUE,https://github.com/tidyverse/ggplot2,32768488,4465,2020-06-09T11:35:16Z,7338.967077267637
ggplotgui,Easily explore data by creating ggplots through a (shiny-)GUI. R-code to recreate graph provided. ,2017-07-08,Gert Stulp,https://github.com/gertstulp/ggplotgui/,TRUE,https://github.com/gertstulp/ggplotgui,31486,105,2020-05-06T12:24:24Z,299.8666666666667
ggplotify,"Convert plot function call (using expression or formula) to 'grob' or 'ggplot' object that compatible to the 'grid' and 'ggplot2' ecosystem. With this package, we are able to e.g. using 'cowplot' to align plots produced by 'base' graphics, 'ComplexHeatmap', 'eulerr', 'grid', 'lattice', 'magick', 'pheatmap', 'vcd' etc. by converting them to 'ggplot' objects.",2020-03-12,Guangchuang Yu,https://github.com/GuangchuangYu/ggplotify,TRUE,https://github.com/guangchuangyu/ggplotify,198953,54,2020-03-27T08:47:33Z,3684.314814814815
ggPMX,"At Novartis, we aimed at standardizing the set of diagnostic plots used for modeling
activities in order to reduce the overall effort required for generating such plots.
For this, we developed a guidance that proposes an adequate set of diagnostics and a toolbox,
called 'ggPMX' to execute them. 'ggPMX' is a toolbox that can generate all diagnostic plots at a quality sufficient
for publication and submissions using few lines of code. ",2020-05-14,Amine Gassem,https://github.com/ggPMXdevelopment/ggPMX,TRUE,https://github.com/ggpmxdevelopment/ggpmx,6860,17,2020-06-05T02:43:32Z,403.52941176470586
ggpointdensity,"A cross between a 2D density plot and a scatter plot,
implemented as a 'ggplot2' geom. Points in the scatter plot are
colored by the number of neighboring points. This is useful to
visualize the 2D-distribution of points in case of overplotting.",2019-08-28,Lukas P. M. Kremer,https://github.com/LKremer/ggpointdensity,TRUE,https://github.com/lkremer/ggpointdensity,6148,255,2020-02-21T11:01:51Z,24.109803921568627
ggpol,A 'ggplot2' extension for implementing parliament charts and several other useful visualizations. ,2020-03-28,Frederik Tiedemann,https://github.com/erocoar/ggpol,TRUE,https://github.com/erocoar/ggpol,22838,62,2020-03-28T13:16:12Z,368.35483870967744
ggpubr,"The 'ggplot2' package is excellent and flexible for elegant data
visualization in R. However the default generated plots requires some formatting
before we can send them for publication. Furthermore, to customize a 'ggplot',
the syntax is opaque and this raises the level of difficulty for researchers
with no advanced R programming skills. 'ggpubr' provides some easy-to-use
functions for creating and customizing 'ggplot2'- based publication ready plots.",2020-05-04,Alboukadel Kassambara,https://rpkgs.datanovia.com/ggpubr/,TRUE,https://github.com/kassambara/ggpubr,3097124,593,2020-06-09T18:37:03Z,5222.8060708263065
ggpval,"Automatically performs desired statistical tests (e.g. wilcox.test(), t.test()) to compare between groups,
and adds the resulting p-values to the plot with an annotation bar.
Visualizing group differences are frequently performed by boxplots, bar plots, etc.
Statistical test results are often needed to be annotated on these plots.
This package provides a convenient function that works on 'ggplot2' objects,
performs the desired statistical test between groups of interest and annotates the test results on the plot.",2019-09-10,Jun Cheng,https://github.com/s6juncheng/ggpval,TRUE,https://github.com/s6juncheng/ggpval,14689,24,2019-09-10T18:39:06Z,612.0416666666666
ggquickeda,"Quickly and easily perform exploratory data analysis by uploading your
data as a 'csv' file. Start generating insights using 'ggplot2' plots and
'table1' tables with descriptive stats, all using an easy-to-use point and click
'Shiny' interface.",2020-04-17,Samer Mouksassi,https://github.com/smouksassi/ggquickeda,TRUE,https://github.com/smouksassi/ggquickeda,12155,33,2020-06-08T21:46:59Z,368.3333333333333
ggRandomForests,"Graphic elements for exploring Random Forests using the 'randomForest' or
'randomForestSRC' package for survival, regression and classification forests and
'ggplot2' package plotting.",2016-09-07,John Ehrlinger,https://github.com/ehrlinger/ggRandomForests,TRUE,https://github.com/ehrlinger/ggrandomforests,47811,117,2020-04-27T01:03:41Z,408.64102564102564
ggraph,"The grammar of graphics as implemented in ggplot2 is a poor fit for
graph and network visualizations due to its reliance on tabular data input.
ggraph is an extension of the ggplot2 API tailored to graph visualizations
and provides the same flexible approach to building up plots layer by layer.",2020-05-20,Thomas Lin Pedersen,"https://ggraph.data-imaginist.com,
https://github.com/thomasp85/ggraph",TRUE,https://github.com/thomasp85/ggraph,659712,727,2020-05-20T19:07:27Z,907.4442916093535
ggraptR,"Intended for both technical and non-technical users to create
interactive data visualizations through a web browser GUI without writing any
code.",2019-09-04,Eugene Dubossarsky,NA,TRUE,https://github.com/cargomoose/raptr,48925,61,2020-05-01T20:35:45Z,802.0491803278688
ggrepel,"Provides text and label geoms for 'ggplot2' that help to avoid
overlapping text labels. Labels repel away from each other and away from the
data points.",2020-03-08,Kamil Slowikowski,http://github.com/slowkow/ggrepel,TRUE,https://github.com/slowkow/ggrepel,4328914,743,2020-04-07T17:03:26Z,5826.263795423957
ggridges,Ridgeline plots provide a convenient way of visualizing changes in distributions over time or space. This package enables the creation of such plots in 'ggplot2'.,2020-01-12,Claus O. Wilke,https://wilkelab.org/ggridges,TRUE,https://github.com/wilkelab/ggridges,903726,303,2020-01-12T06:17:51Z,2982.5940594059407
ggrisk,"The risk plot may be one of the most commonly used figures in
tumor genetic data analysis. We can conclude the following two points:
Comparing the prediction results of the model with the real survival situation
to see whether the survival rate of the high-risk group is lower than that of the
low-level group, and whether the survival time of the high-risk group is
shorter than that of the low-risk group. The other is to compare the heat
map and scatter plot to see the correlation between the predictors and the
outcome.",2020-02-09,Jing Zhang,https://github.com/yikeshu0611/ggrisk,TRUE,https://github.com/yikeshu0611/ggrisk,2003,1,2020-02-11T14:13:49Z,2003
ggroups,"Calculates additive and dominance genetic relationship matrices and their inverses, in matrix and tabular-sparse formats. It includes functions for checking and processing pedigree, as well as functions to calculate the matrix of genetic group contributions (Q), and adding those contributions to the genetic merit of animals (Quaas (1988) <doi:10.3168/jds.S0022-0302(88)79691-5>). Calculation of Q is computationally extensive. There are computationally optimized functions to calculate Q.",2020-03-19,Mohammad Ali Nilforooshan,https://github.com/nilforooshan/ggroups,TRUE,https://github.com/nilforooshan/ggroups,6371,0,2020-03-19T06:45:40Z,NA
ggsci,"A collection of 'ggplot2' color palettes inspired by
plots in scientific journals, data visualization libraries,
science fiction movies, and TV shows.",2018-05-14,Nan Xiao,"https://nanx.me/ggsci/, https://github.com/road2stat/ggsci",TRUE,https://github.com/road2stat/ggsci,2255906,335,2020-04-23T15:30:54Z,6734.04776119403
ggsignif,"Enrich your 'ggplots' with group-wise comparisons.
This package provides an easy way to indicate if two groups are significantly different.
Commonly this is shown by a bracket on top connecting the groups of interest which itself is annotated with the level of significance (NS, *, **, ***).
The package provides a single layer (geom_signif()) that takes the groups for comparison and the test (t.test(), wilcox.text() etc.) as arguments and adds the annotation
to the plot.",2019-08-08,Constantin Ahlmann-Eltze,https://github.com/const-ae/ggsignif,TRUE,https://github.com/const-ae/ggsignif,2266810,232,2020-05-10T12:49:01Z,9770.73275862069
ggsn,"Adds north symbols (18 options) and scale bars in kilometers,
meters, nautical miles, or statue miles, to maps in geographic
or metric coordinates created with 'ggplot2' or 'ggmap'.",2019-02-18,Oswaldo Santos Baquero,https://github.com/oswaldosantos/ggsn,TRUE,https://github.com/oswaldosantos/ggsn,55885,137,2019-08-29T13:54:12Z,407.91970802919707
ggsoccer,"The 'ggplot2' package provides a powerful set of tools
for visualising and investigating data. The 'ggsoccer' package provides a
set of functions for elegantly displaying and exploring soccer event data
with 'ggplot2'. Providing extensible layers and themes, it is designed to
work smoothly with a variety of popular sports data providers.",2019-05-14,Ben Torvaney,"ggsoccer.statsandsnakeoil.com, github.com/Torvaney/ggsoccer",TRUE,https://github.com/torvaney/ggsoccer,10950,92,2020-04-12T11:45:39Z,119.02173913043478
ggsom,The aim of this package is to offer more variability of graphics based on the self-organizing maps.,2020-01-15,Felipe Carvalho,https://github.com/oldlipe/ggsom,TRUE,https://github.com/oldlipe/ggsom,8111,4,2020-01-15T16:24:11Z,2027.75
ggspatial,"Spatial data plus the power of the ggplot2 framework means easier mapping when input
data are already in the form of spatial objects.",2020-06-09,Dewey Dunnington,"https://paleolimbot.github.io/ggspatial,
https://github.com/paleolimbot/ggspatial",TRUE,https://github.com/paleolimbot/ggspatial,68663,233,2020-06-05T19:17:52Z,294.6909871244635
ggstar,"To create the regular polygon layer for easily discernible shapes,
we developed the package, it can be easily used if you know the 'ggplot2'.",2020-05-15,Shuangbin Xu,https://github.com/xiangpin/ggstar/,TRUE,https://github.com/xiangpin/ggstar,1626,8,2020-06-09T07:34:36Z,203.25
ggstatsplot,"Extension of 'ggplot2', 'ggstatsplot' creates
graphics with details from statistical tests included in the plots
themselves. It is targeted primarily at behavioral sciences community
to provide a one-line code to generate information-rich plots for
statistical analysis of continuous (violin plots, scatterplots,
histograms, dot plots, dot-and-whisker plots) or categorical (pie and
bar charts) data. Currently, it supports only the most common types of
statistical tests: parametric, nonparametric, robust, and bayesian
versions of t-test/anova, correlation analyses, contingency table
analysis, meta-analysis, and regression analyses.",2020-05-30,Indrajeet Patil,"https://indrajeetpatil.github.io/ggstatsplot/,
https://github.com/IndrajeetPatil/ggstatsplot",TRUE,https://github.com/indrajeetpatil/ggstatsplot,60399,869,2020-06-09T12:42:32Z,69.50402761795166
ggswissmaps,"Offers various swiss maps as data frames and 'ggplot2' objects and gives the
possibility to add layers of data on the maps. Data are publicly available
from the swiss federal statistical office.",2016-10-29,Sandro Petrillo Burri,https://github.com/gibonet/ggswissmaps,TRUE,https://github.com/gibonet/ggswissmaps,19274,4,2020-01-29T16:11:20Z,4818.5
ggtext,"A 'ggplot2' extension that enables the rendering of
complex formatted plot labels (titles, subtitles, facet labels,
axis labels, etc.). Text boxes with automatic word wrap are also
supported.",2020-06-04,Claus O. Wilke,https://wilkelab.org/ggtext,TRUE,https://github.com/wilkelab/ggtext,0,376,2020-06-04T15:16:00Z,0
ggThemeAssist,Rstudio add-in that delivers a graphical interface for editing 'ggplot2' theme elements.,2016-08-13,Calli Gross,https://github.com/calligross/ggthemeassist,TRUE,https://github.com/calligross/ggthemeassist,56648,333,2019-06-15T06:26:12Z,170.1141141141141
ggTimeSeries,"Provides additional display mediums for time series visualisations, such as calendar heat map, steamgraph, marimekko, etc.",2018-09-03,Aditya Kothari,https://github.com/Ather-Energy/ggTimeSeries,TRUE,https://github.com/ather-energy/ggtimeseries,14616,195,2019-06-21T05:57:32Z,74.95384615384616
GGUM,"An implementation of the generalized graded unfolding model (GGUM) in R, see Roberts, Donoghue, and Laughlin (2000) <doi:10.1177/01466216000241001>). It allows to simulate data sets based on the GGUM. It fits the GGUM and the GUM, and it retrieves item and person parameter estimates. Several plotting functions are available (item and test information functions; item and test characteristic curves; item category response curves). Additionally, there are some functions that facilitate the communication between R and 'GGUM2004'. Finally, a model-fit checking utility, MODFIT(), is also available.",2020-05-18,Jorge N. Tendeiro,http://github.com/jorgetendeiro/GGUM,TRUE,https://github.com/jorgetendeiro/ggum,11390,1,2020-05-18T16:13:03Z,11390
ggupset,"Replace the standard x-axis in 'ggplots' with a combination matrix
to visualize complex set overlaps. 'UpSet' has introduced a new way to visualize
the overlap of sets as an alternative to Venn diagrams.
This package provides a simple way to produce such plots using 'ggplot2'.
In addition it can convert any categorical axis into a combination
matrix axis.",2020-05-05,Constantin Ahlmann-Eltze,https://github.com/const-ae/ggupset,TRUE,https://github.com/const-ae/ggupset,9314,141,2020-05-08T16:31:16Z,66.05673758865248
ggVennDiagram,"Easy-to-use functions to generate 2-4 sets Venn plot in publication quality.
'ggVennDiagram' is the first software that can automatically fill different colors to each part of a Venn diagram.",2019-10-09,Chun-Hui Gao,https://github.com/gaospecial/ggVennDiagram,TRUE,https://github.com/gaospecial/ggvenndiagram,5018,65,2019-12-25T08:36:31Z,77.2
ggwordcloud,"Provides a word cloud text geom for 'ggplot2'. Texts
are placed so that they do not overlap as in 'ggrepel'. The algorithm
used is a variation around the one of 'wordcloud2.js'.",2019-06-02,Erwan Le Pennec,"https://github.com/lepennec/ggwordcloud,
https://lepennec.github.io/ggwordcloud/",TRUE,https://github.com/lepennec/ggwordcloud,22002,126,2020-03-03T08:45:18Z,174.61904761904762
Ghat,"Functions are provided for quantifying evolution and selection on complex traits.
The package implements effective handling and analysis algorithms scaled for
genome-wide data and calculates a composite statistic, denoted Ghat, which is used
to test for selection on a trait. The package provides a number of simple examples
for handling and analysing the genome data and visualising the output and results.
Beissinger et al., (2018) <doi:10.1534/genetics.118.300857>.",2019-08-02,Medhat Mahmoud,https://www.genetics.org/content/209/1/321,TRUE,https://github.com/medhat86/ghat,3473,0,2019-07-18T07:14:45Z,NA
ghibli,"Colour palettes inspired by Studio Ghibli <https://en.wikipedia.org/wiki/Studio_Ghibli>
films, ported to R for your enjoyment.",2020-04-16,Ewen Henderson,"https://ewenme.github.io/ghibli/, https://github.com/ewenme/ghibli",TRUE,https://github.com/ewenme/ghibli,12669,188,2020-04-16T11:56:45Z,67.38829787234043
ghql,"A 'GraphQL' client, with an R6 interface for initializing
a connection to a 'GraphQL' instance, and methods for constructing
queries, including fragments and parameterized queries. Queries
are checked with the 'libgraphqlparser' C++ parser via the
'gaphql' package.",2020-03-04,Scott Chamberlain,"https://github.com/ropensci/ghql (devel)
https://docs.ropensci.org/ghql (docs)",TRUE,https://github.com/ropensci/ghql,1870,66,2020-03-04T18:21:28Z,28.333333333333332
gibble,"Build a map of path-based geometry, this is a simple description of the number
of parts in an object and their basic structure. Translation and restructuring operations for
planar shapes and other hierarchical types require a data model with a record of the underlying
relationships between elements. The gibble() function creates a geometry map, a simple record of
the underlying structure in path-based hierarchical types. There are methods for the planar shape
types in the 'sf' and 'sp' packages and for types in the 'trip' and 'silicate' packages. ",2020-05-09,Michael Sumner,https://github.com/mdsumner/gibble,TRUE,https://github.com/mdsumner/gibble,18295,5,2020-05-18T13:09:32Z,3659
gifski,"Multi-threaded GIF encoder written in Rust: <https://gif.ski/>.
Converts images to GIF animations using pngquant's efficient cross-frame
palettes and temporal dithering with thousands of colors per frame.",2018-09-28,Jeroen Ooms,"https://gif.ski/ (upstream), https://github.com/r-rust/gifski
(devel)",TRUE,https://github.com/r-rust/gifski,321963,41,2019-10-05T11:38:08Z,7852.756097560976
gifti,"Functions to read in the geometry format under the
'Neuroimaging' 'Informatics' Technology Initiative ('NIfTI'), called
'GIFTI' <https://www.nitrc.org/projects/gifti/>.
These files contain surfaces of brain imaging data.",2018-02-01,John Muschelli,NA,TRUE,https://github.com/muschellij2/gifti,13234,3,2020-05-08T15:44:51Z,4411.333333333333
GIFTr,"A framework and functions to create 'MOODLE' quizzes. 'GIFTr' takes dataframe of questions of
four types: multiple choices, numerical, true or false and short answer questions, and exports a text
file formatted in 'MOODLE' GIFT format. You can prepare a spreadsheet in any software and import
it into R to generate any number of questions with 'HTML', 'markdown' and 'LaTeX' support.",2019-10-20,Omar I. Elashkar,https://github.com/omarelashkar/GIFTr,TRUE,https://github.com/omarelashkar/giftr,2791,1,2019-10-21T16:46:53Z,2791
GillespieSSA2,"A fast, scalable, and versatile framework for simulating large
systems with Gillespie's Stochastic Simulation Algorithm ('SSA').
This package is the spiritual successor to the 'GillespieSSA' package
originally written by Mario Pineda-Krch. Benefits of this package
include major speed improvements (>100x), easier to understand documentation,
and many unit tests that try to ensure the package works as intended.",2020-03-14,Robrecht Cannoodt,http://github.com/rcannood/GillespieSSA2,TRUE,https://github.com/rcannood/gillespiessa2,4357,4,2020-03-16T09:33:26Z,1089.25
gim,"Implements the generalized integration model, which integrates individual-level data and summary statistics under a generalized linear model framework. It supports continuous and binary outcomes to be modeled by the linear and logistic regression models. For binary outcome, data can be sampled in prospective cohort studies or case-control studies. ",2019-11-04,Han Zhang,https://github.com/zhangh12/gim,TRUE,https://github.com/zhangh12/gim,8771,0,2020-05-20T06:04:57Z,NA
gimme,"Automated identification and estimation of group- and
individual-level relations in time series data from within a structural
equation modeling framework.",2020-02-15,Kathleen Gates,https://github.com/GatesLab/gimme/,TRUE,https://github.com/gateslab/gimme,40077,10,2020-05-19T22:23:04Z,4007.7
gimms,"This is a set of functions to retrieve information about GIMMS
NDVI3g files currently available online; download (and re-arrange, in the
case of NDVI3g.v0) the half-monthly data sets from NASA Ames Ecological
Forecasting Lab (ECOCAST); import downloaded files from ENVI binary
(NDVI3g.v0) or NetCDF format (NDVI3g.v1) directly into R based on the
widespread 'raster' package; conduct quality control; and generate monthly
composites (e.g., maximum values) from the half-monthly input data. As a
special gimmick, a method is included to conveniently apply the Mann-Kendall
trend test upon 'Raster*' images, optionally featuring trend-free
pre-whitening to account for lag-1 autocorrelation.",2020-03-19,Florian Detsch,https://github.com/environmentalinformatics-marburg/gimms,TRUE,https://github.com/environmentalinformatics-marburg/gimms,24061,13,2020-03-19T13:15:32Z,1850.8461538461538
gistr,"Work with 'GitHub' 'gists' from 'R' (e.g.,
<http://en.wikipedia.org/wiki/GitHub#Gist>,
<https://help.github.com/articles/about-gists/>). A 'gist'
is simply one or more files with code/text/images/etc. This package allows
the user to create new 'gists', update 'gists' with new files, rename files,
delete files, get and delete 'gists', star and 'un-star' 'gists', fork 'gists',
open a 'gist' in your default browser, get embed code for a 'gist', list
'gist' 'commits', and get rate limit information when 'authenticated'. Some
requests require authentication and some do not. 'Gists' website:
<https://gist.github.com/>.",2020-01-09,Scott Chamberlain,"https://github.com/ropensci/gistr (devel),
https://docs.ropensci.org/gistr (website)",TRUE,https://github.com/ropensci/gistr,135580,90,2020-02-11T00:16:55Z,1506.4444444444443
git2r,"Interface to the 'libgit2' library, which is a pure C
implementation of the 'Git' core methods. Provides access to 'Git'
repositories to extract data and running some basic 'Git'
commands.",2020-05-03,See AUTHORS file.,"https://docs.ropensci.org/git2r (website),
https://github.com/ropensci/git2r",TRUE,https://github.com/ropensci/git2r,7971170,163,2020-05-08T15:05:51Z,48902.88343558282
git2rdata,Make versioning of data.frame easy and efficient using git repositories.,2020-03-02,Thierry Onkelinx,"https://github.com/ropensci/git2rdata,
https://doi.org/10.5281/zenodo.1485309",TRUE,https://github.com/ropensci/git2rdata,5001,78,2020-03-02T12:25:56Z,64.11538461538461
gitgadget,"An Rstudio addin for version control that allows users to clone
repositories, create and delete branches, and sync forks on GitHub, GitLab, etc.
Furthermore, the addin uses the GitLab API to allow instructors to create
forks and merge requests for all students/teams with one click of a button.",2019-10-10,Vincent Nijs,URL: https://github.com/vnijs/gitgadget,TRUE,https://github.com/vnijs/gitgadget,14171,15,2020-05-17T22:16:31Z,944.7333333333333
gitignore,"Simple interface to query gitignore.io to fetch
gitignore templates that can be included in the .gitignore file. More
than 450 templates are currently available.",2019-07-29,Philippe Massicotte,https://github.com/ropensci/gitignore,TRUE,https://github.com/ropensci/gitignore,4545,29,2020-04-12T22:04:47Z,156.72413793103448
gitlink,"Provides helpers to add 'Git' links to 'shiny'
applications, 'rmarkdown' documents, and other 'HTML' based resources.
This is most commonly used for 'GitHub' ribbons.",2019-07-23,Cole Arendt,https://github.com/colearendt/gitlink,TRUE,https://github.com/colearendt/gitlink,3984,14,2019-11-28T05:32:55Z,284.57142857142856
glcm,"Enables calculation of image textures (Haralick 1973)
<doi:10.1109/TSMC.1973.4309314> from grey-level co-occurrence matrices
(GLCMs). Supports processing images that cannot fit in memory.",2020-02-26,Alex Zvoleff,http://www.azvoleff.com/glcm,TRUE,https://github.com/azvoleff/glcm,23494,11,2020-02-26T21:44:22Z,2135.818181818182
gllvm,"Analysis of multivariate data using generalized linear latent variable models (gllvm).
Estimation is performed using either Laplace approximation method or variational approximation method implemented via TMB (Kristensen et al., (2016), <doi:10.18637/jss.v070.i05>).
For details see Niku et al. (2019a) <doi:10.1371/journal.pone.0216129> and Niku et al. (2019b) <doi:10.1111/2041-210X.13303>.",2020-05-11,Jenni Niku,https://github.com/JenniNiku/gllvm.git,TRUE,https://github.com/jenniniku/gllvm,13901,16,2020-05-27T15:45:45Z,868.8125
glmbb,"Find all hierarchical models of specified generalized linear
model with information criterion (AIC, BIC, or AICc) within specified
cutoff of minimum value. Alternatively, find all such graphical models.
Use branch and bound algorithm so we do not have to fit all models.",2017-06-02,Charles J. Geyer <[email protected]>.,https://github.com/cjgeyer/glmbb,TRUE,https://github.com/cjgeyer/glmbb,15856,0,2020-05-28T19:44:36Z,NA
glmdisc,"A Stochastic-Expectation-Maximization (SEM) algorithm (Celeux et al. (1995) <https://hal.inria.fr/inria-00074164>) associated with a Gibbs sampler which purpose is to learn a constrained representation for logistic regression that is called quantization (Ehrhardt et al. (2019) <arXiv:1903.08920>). Continuous features are discretized and categorical features' values are grouped to produce a better logistic regression model. Pairwise interactions between quantized features are dynamically added to the model through a Metropolis-Hastings algorithm (Hastings, W. K. (1970) <doi:10.1093/biomet/57.1.97>).",2020-03-22,Adrien Ehrhardt,https://adimajo.github.io,TRUE,https://github.com/adimajo/glmdisc,6237,3,2020-04-14T15:43:43Z,2079
GLMMadaptive,"Fits generalized linear mixed models for a single grouping factor under
maximum likelihood approximating the integrals over the random effects with an
adaptive Gaussian quadrature rule; Jose C. Pinheiro and Douglas M. Bates (1995)
<doi:10.1080/10618600.1995.10474663>. ",2020-01-24,Dimitris Rizopoulos,"https://drizopoulos.github.io/GLMMadaptive/,
https://github.com/drizopoulos/GLMMadaptive",TRUE,https://github.com/drizopoulos/glmmadaptive,31810,28,2020-02-02T20:30:21Z,1136.0714285714287
glmmboot,"Performs bootstrap resampling for most models that update() works for. There
are two primary functions: bootstrap_model() performs block resampling if random effects
are present, and case resampling if not; bootstrap_ci() converts output from
bootstrap model runs into confidence intervals and p-values. By default,
bootstrap_model() calls bootstrap_ci().
Package motivated by Humphrey and Swingley (2018) <arXiv:1805.08670>.",2020-03-30,Colman Humphrey,https://github.com/ColmanHumphrey/glmmboot,TRUE,https://github.com/colmanhumphrey/glmmboot,12080,3,2020-03-29T19:22:08Z,4026.6666666666665
glmmfields,"Implements Bayesian spatial and spatiotemporal models that
optionally allow for extreme spatial deviations through time. 'glmmfields'
uses a predictive process approach with random fields implemented through
a multivariate-t distribution instead of the usual multivariate normal.
Sampling is conducted with 'Stan'. References: Anderson and Ward (2019)
<doi:10.1002/ecy.2403>.",2019-05-18,Sean C. Anderson,https://github.com/seananderson/glmmfields,TRUE,https://github.com/seananderson/glmmfields,9609,24,2020-05-28T05:54:30Z,400.375
glmmsr,"Conduct inference about generalized linear mixed models, with a
choice about which method to use to approximate the likelihood. In addition
to the Laplace and adaptive Gaussian quadrature approximations, which are
borrowed from 'lme4', the likelihood may be approximated by the sequential
reduction approximation, or an importance sampling approximation. These
methods provide an accurate approximation to the likelihood in some
situations where it is not possible to use adaptive Gaussian quadrature.",2019-02-04,Helen Ogden,http://github.com/heogden/glmmsr,TRUE,https://github.com/heogden/glmmsr,23992,13,2019-06-20T13:34:54Z,1845.5384615384614
glmnetUtils,"Provides a formula interface for the 'glmnet' package for
elasticnet regression, a method for cross-validating the alpha parameter,
and other quality-of-life tools.",2020-03-12,Hong Ooi,https://github.com/Hong-Revo/glmnetUtils,TRUE,https://github.com/hong-revo/glmnetutils,88976,46,2020-03-07T12:25:27Z,1934.2608695652175
glmpca,"Implements a generalized version of principal components analysis
(GLM-PCA) for dimension reduction of non-normally distributed data such as
counts or binary matrices.
Townes FW, Hicks SC, Aryee MJ, Irizarry RA (2019) <doi:10.1101/574574>.
Townes FW (2019) <arXiv:1907.02647>.",2019-09-27,F. William Townes,https://github.com/willtownes/glmpca,TRUE,https://github.com/willtownes/glmpca,4863,37,2020-04-01T13:06:42Z,131.43243243243242
glmtree,"A logistic regression tree is a decision tree with logistic regressions at its leaves. A particular stochastic expectation maximization algorithm is used to draw a few good trees, that are then assessed via the user's criterion of choice among BIC / AIC / test set Gini. The formal development is given in a PhD chapter, see Ehrhardt (2019) <https://github.com/adimajo/manuscrit_these/releases/>.",2019-10-06,Adrien Ehrhardt,https://adimajo.github.io,TRUE,https://github.com/adimajo/glmtree,3318,1,2019-09-26T13:37:00Z,3318
GlobalOptions,"It provides more configurations on the option values such as validation
and filtering on the values, making options invisible or private.",2019-09-30,Zuguang Gu,https://github.com/jokergoo/GlobalOptions,TRUE,https://github.com/jokergoo/globaloptions,428816,3,2020-06-06T20:11:10Z,142938.66666666666
globals,"Identifies global (""unknown"" or ""free"") objects in R expressions
by code inspection using various strategies, e.g. conservative or liberal.
The objective of this package is to make it as simple as possible to
identify global objects for the purpose of exporting them in distributed
compute environments.",2019-12-07,Henrik Bengtsson,https://github.com/HenrikBengtsson/globals,TRUE,https://github.com/henrikbengtsson/globals,1252822,17,2020-05-02T21:51:11Z,73695.41176470589
glue,"An implementation of interpreted string literals, inspired by
Python's Literal String Interpolation <https://www.python.org/dev/peps/pep-0498/> and Docstrings
<https://www.python.org/dev/peps/pep-0257/> and Julia's Triple-Quoted String Literals
<https://docs.julialang.org/en/v1.3/manual/strings/#Triple-Quoted-String-Literals-1>.",2020-05-13,Jim Hester,"https://github.com/tidyverse/glue, https://glue.tidyverse.org/",TRUE,https://github.com/tidyverse/glue,20314140,455,2020-06-04T19:29:54Z,44646.46153846154
gluedown,"Ease the transition between R vectors and markdown
text. With 'gluedown' and 'rmarkdown', users can create traditional
vectors in R, glue those strings together with the markdown syntax,
and print those formatted vectors directly to the document. This
package primarily uses GitHub Flavored Markdown (GFM), an offshoot of
the unambiguous CommonMark specification by John MacFarlane (2019)
<https://spec.commonmark.org/>.",2020-01-14,Kiernan Nicholls,"https://kiernann.com/gluedown/,
https://github.com/kiernann/gluedown/",TRUE,https://github.com/kiernann/gluedown,3534,74,2020-01-29T16:30:00Z,47.75675675675676
gmailr,"An interface to the 'Gmail' 'RESTful' API. Allows
access to your 'Gmail' messages, threads, drafts and labels.",2019-08-23,Jim Hester,https://github.com/r-lib/gmailr,TRUE,https://github.com/r-lib/gmailr,2079070,196,2020-02-03T16:31:03Z,10607.5
gmapsdistance,"Get distance and travel time between two points from Google Maps.
Four possible modes of transportation (bicycling, walking, driving and
public transportation).",2018-08-28,Rodrigo Azuero Melo & & Demetrio Rodriguez T & David Zarruk,https://github.com/rodazuero/gmapsdistance,TRUE,https://github.com/rodazuero/gmapsdistance,41473,62,2020-02-17T22:41:48Z,668.9193548387096
gmat,"Simulation of correlation matrices possibly constrained by a given undirected or acyclic directed graph. In particular, the package provides functions that implement the simulation methods described in Córdoba et al. (2018a) <doi:10.1007/978-3-030-03493-1_13>, Córdoba et al. (2018b) <doi:10.1145/2695664.2695717> and Córdoba et al. (2019) <arXiv:1909.01062>.",2019-09-13,Irene Córdoba,https://github.com/irenecrsn/gmat,TRUE,https://github.com/irenecrsn/gmat,7651,0,2019-10-08T10:20:56Z,NA
GMCM,"Unsupervised Clustering and Meta-analysis using Gaussian Mixture
Copula Models.",2019-11-05,Anders Ellern Bilgrau,https://github.com/AEBilgrau/GMCM,TRUE,https://github.com/aebilgrau/gmcm,27222,6,2020-01-28T19:37:54Z,4537
gMCP,"Functions and a graphical user interface for graphical described
multiple test procedures.",2020-03-23,Kornelius Rohmeyer,http://gsrmtp.r-forge.r-project.org/,TRUE,https://github.com/kornl/gmcp,38935,5,2020-03-22T20:44:36Z,7787
Gmisc,"Tools for making the descriptive ""Table 1"" used in medical
articles, a transition plot for showing changes between categories
(also known as a Sankey diagram), flow charts by extending the grid package,
a method for variable selection based on the SVD, Bézier lines with arrows complementing the
ones in the 'grid' package, and more.",2020-05-06,Max Gordon,http://gforge.se,TRUE,https://github.com/gforge/gmisc,72781,37,2020-05-05T12:13:00Z,1967.054054054054
gMOIP,"Make 2D and 3D plots of linear programming (LP),
integer linear programming (ILP), or mixed integer linear programming (MILP) models
with up to three objectives. Plots of both the solution and criterion space are possible.
For instance the non-dominated (Pareto) set for bi-objective LP/ILP/MILP programming models
(see vignettes for an overview).",2020-02-20,Lars Relund Nielsen,https://github.com/relund/gMOIP/,TRUE,https://github.com/relund/gmoip,11518,0,2020-05-20T13:25:02Z,NA
GMSE,"Integrates game theory and ecological theory to construct
social-ecological models that simulate the management of populations and
stakeholder actions. These models build off of a previously developed
management strategy evaluation (MSE) framework to simulate all aspects of
management: population dynamics, manager observation of populations, manager
decision making, and stakeholder responses to management decisions. The
newly developed generalised management strategy evaluation (GMSE)
framework uses genetic algorithms to mimic the decision-making process of
managers and stakeholders under conditions of change, uncertainty, and
conflict. Simulations can be run using gmse(), gmse_apply(), and
gmse_gui() functions.",2020-05-31,A. Bradley Duthie,https://confoobio.github.io/gmse/,TRUE,https://github.com/confoobio/gmse,14827,6,2020-06-05T11:43:15Z,2471.1666666666665
gnm,"Functions to specify and fit generalized nonlinear models, including models with multiplicative interaction terms such as the UNIDIFF model from sociology and the AMMI model from crop science, and many others. Over-parameterized representations of models are used throughout; functions are provided for inference on estimable parameter combinations, as well as standard methods for diagnostics etc.",2020-02-03,Heather Turner,https://github.com/hturner/gnm,TRUE,https://github.com/hturner/gnm,243906,3,2020-02-03T09:21:22Z,81302
goffda,"Implementation of several goodness-of-fit tests for functional
data. Currently, mostly related with the functional linear model with
functional/scalar response and functional/scalar predictor. The package
allows for the replication of the data applications considered in
García-Portugués, Álvarez-Liébana, Álvarez-Pérez and González-Manteiga
(2019) <arXiv:1909.07686>.",2019-12-17,Eduardo García-Portugués,https://github.com/egarpor/goffda,TRUE,https://github.com/egarpor/goffda,3342,5,2019-12-16T23:16:28Z,668.4
goftest,"Cramer-Von Mises and Anderson-Darling tests of goodness-of-fit
for continuous univariate distributions, using
efficient algorithms.",2019-12-02,Adrian Baddeley,https://github.com/baddstats/goftest,TRUE,https://github.com/baddstats/goftest,683087,1,2019-11-27T01:55:16Z,683087
golem,"An opinionated framework for building a
production-ready 'Shiny' application. This package contains a series
of tools for building a robust 'Shiny' application from start to
finish.",2020-03-05,Vincent Guyader,https://github.com/ThinkR-open/golem,TRUE,https://github.com/thinkr-open/golem,21530,369,2020-05-28T12:47:07Z,58.34688346883469
goodpractice,"Give advice about good practices when building R packages.
Advice includes functions and syntax to avoid, package structure,
code complexity, code formatting, etc.",2018-05-02,Hannah Frick,https://github.com/mangothecat/goodpractice,TRUE,https://github.com/mangothecat/goodpractice,15188,312,2019-11-14T11:22:44Z,48.67948717948718
googleAnalyticsR,"Interact with the Google Analytics
APIs <https://developers.google.com/analytics/>, including
the Core Reporting API (v3 and v4), Management API, User Activity API
and Multi-Channel Funnel API.",2019-11-04,Mark Edmondson,http://code.markedmondson.me/googleAnalyticsR/,TRUE,https://github.com/markedmondson1234/googleanalyticsr,117273,192,2020-06-02T19:03:14Z,610.796875
googleAuthR,"Create R functions that interact with OAuth2 Google APIs
<https://developers.google.com/apis-explorer/> easily,
with auto-refresh and Shiny compatibility.",2020-04-26,Mark Edmondson,http://code.markedmondson.me/googleAuthR/,TRUE,https://github.com/markedmondson1234/googleauthr,251177,139,2020-05-24T16:38:36Z,1807.0287769784172
googleCloudRunner,"Tools to easily enable R scripts in the Google Cloud Platform.
Utilise cloud services such as Cloud Run <https://cloud.run> for R over HTTP,
Cloud Build <https://cloud.google.com/cloud-build/> for Continuous Delivery
and Integration services and
Cloud Scheduler <https://cloud.google.com/scheduler/> for scheduled scripts.",2020-05-02,Mark Edmondson,https://code.markedmondson.me/googleCloudRunner,TRUE,https://github.com/markedmondson1234/googlecloudrunner,2274,30,2020-06-08T21:11:35Z,75.8
googleCloudStorageR,"Interact with Google Cloud Storage <https://cloud.google.com/storage/>
API in R. Part of the 'cloudyr' <https://cloudyr.github.io/> project.",2019-08-31,Mark Edmondson,http://code.markedmondson.me/googleCloudStorageR/,TRUE,https://github.com/cloudyr/googlecloudstorager,86202,65,2020-06-06T10:30:55Z,1326.1846153846154
googleCloudVisionR,"Interact with the 'Google Cloud Vision' <https://cloud.google.com/vision/>
API in R. Part of the 'cloudyr' <https://cloudyr.github.io/> project.",2020-02-07,Jeno Pal,NA,TRUE,https://github.com/cloudyr/googlecloudvisionr,4367,4,2020-04-02T09:18:55Z,1091.75
googleComputeEngineR,"Interact with the 'Google Compute Engine' API in R. Lets you create,
start and stop instances in the 'Google Cloud'. Support for preconfigured instances,
with templates for common R needs. ",2019-05-04,Mark Edmondson,https://cloudyr.github.io/googleComputeEngineR/,TRUE,https://github.com/cloudyr/googlecomputeenginer,29968,124,2020-05-26T07:00:33Z,241.67741935483872
googledrive,Manage Google Drive files from R.,2020-05-05,Jennifer Bryan,"https://googledrive.tidyverse.org,
https://github.com/tidyverse/googledrive",TRUE,https://github.com/tidyverse/googledrive,224268,188,2020-05-28T15:31:53Z,1192.9148936170213
googleLanguageR,"Call 'Google Cloud' machine learning APIs for text and speech tasks.
Call the 'Cloud Translation' API <https://cloud.google.com/translate/> for detection
and translation of text, the 'Natural Language' API <https://cloud.google.com/natural-language/> to
analyse text for sentiment, entities or syntax, the 'Cloud Speech' API
<https://cloud.google.com/speech/> to transcribe sound files to text and
the 'Cloud Text-to-Speech' API <https://cloud.google.com/text-to-speech/> to turn text
into sound files.",2020-04-19,Mark Edmondson,"http://code.markedmondson.me/googleLanguageR/,
https://github.com/ropensci/googleLanguageR,
https://docs.ropensci.org/googleLanguageR/",TRUE,https://github.com/ropensci/googlelanguager,25903,129,2020-04-20T13:36:52Z,200.7984496124031
googler,"This is a wrapper for the command line tool 'googler', which can be
found at the following URL: <https://github.com/jarun/googler>.",2019-09-04,Michael W. Kearney,https://github.com/mkearney/googler,TRUE,https://github.com/mkearney/googler,3520,9,2019-09-03T14:13:00Z,391.1111111111111
googlesheets,Interact with Google Sheets from R.,2018-06-29,Jennifer Bryan,https://github.com/jennybc/googlesheets,TRUE,https://github.com/jennybc/googlesheets,431130,753,2020-04-21T19:00:30Z,572.5498007968127
googlesheets4,"Interact with Google Sheets through the Sheets API
v4 <https://developers.google.com/sheets/api>. ""API"" is an acronym for
""application programming interface""; the Sheets API allows users to
interact with Google Sheets programmatically, instead of via a web
browser. The ""v4"" refers to the fact that the Sheets API is currently
at version 4. This package can read and write both the metadata and
the cell data in a Sheet.",2020-05-08,Jennifer Bryan,https://github.com/tidyverse/googlesheets4,TRUE,https://github.com/tidyverse/googlesheets4,36479,174,2020-05-28T15:30:30Z,209.6494252873563
googleway,"Provides a mechanism to plot a 'Google Map' from 'R' and overlay
it with shapes and markers. Also provides access to 'Google Maps' APIs,
including places, directions, roads, distances, geocoding, elevation and
timezone.",2018-09-17,David Cooley,NA,TRUE,https://github.com/symbolixau/googleway,84124,171,2020-02-17T03:10:49Z,491.953216374269
gotop,Add a scroll back to top 'Font Awesome' icon in R Markdown documents and Shiny apps using 'jQuery GoTop'.,2020-04-25,Félix Luginbuhl,"https://felixluginbuhl.com/gotop, https://github.com/lgnbhl/gotop",TRUE,https://github.com/lgnbhl/gotop,1022,0,2020-04-25T15:56:04Z,NA
govdown,"A suite of custom R Markdown formats and templates
for authoring web pages styled with the GOV.UK Design System.",2020-05-13,Duncan Garmonsway,https://ukgovdatascience.github.io/govdown,TRUE,https://github.com/ukgovdatascience/govdown,3573,32,2020-05-13T20:17:18Z,111.65625
GPareto,"Gaussian process regression models, a.k.a. Kriging models, are
applied to global multi-objective optimization of black-box functions.
Multi-objective Expected Improvement and Step-wise Uncertainty Reduction
sequential infill criteria are available. A quantification of uncertainty
on Pareto fronts is provided using conditional simulations.",2020-04-01,Mickael Binois,http://github.com/mbinois/GPareto,TRUE,https://github.com/mbinois/gpareto,54031,7,2020-04-01T10:56:49Z,7718.714285714285
gpclib,General polygon clipping routines for R based on Alan Murta's C library.,2020-02-28,Roger D. Peng <[email protected]> with contributions from Duncan Murdoch and Barry Rowlingson; GPC library by Alan Murta,"http://www.cs.man.ac.uk/~toby/gpc/,
http://github.com/rdpeng/gpclib",TRUE,https://github.com/rdpeng/gpclib,108351,8,2020-02-28T16:18:57Z,13543.875
gpg,"Bindings to GnuPG for working with OpenGPG (RFC4880) cryptographic methods.
Includes utilities for public key encryption, creating and verifying digital signatures,
and managing your local keyring. Note that some functionality depends on the version of
GnuPG that is installed on the system. On Windows this package can be used together with
'GPG4Win' which provides a GUI for managing keys and entering passphrases.",2019-12-02,Jeroen Ooms,"https://jeroen.cran.dev/gpg/ (docs) https://github.com/jeroen/gpg
(dev)",TRUE,https://github.com/jeroen/gpg,16818,14,2019-12-02T13:09:17Z,1201.2857142857142
gplots,"Various R programming tools for plotting data, including:
- calculating and plotting locally smoothed summary function as
('bandplot', 'wapply'),
- enhanced versions of standard plots ('barplot2', 'boxplot2',
'heatmap.2', 'smartlegend'),
- manipulating colors ('col2hex', 'colorpanel', 'redgreen',
'greenred', 'bluered', 'redblue', 'rich.colors'),
- calculating and plotting two-dimensional data summaries ('ci2d',
'hist2d'),
- enhanced regression diagnostic plots ('lmplot2', 'residplot'),
- formula-enabled interface to 'stats::lowess' function ('lowess'),
- displaying textual data in plots ('textplot', 'sinkplot'),
- plotting a matrix where each cell contains a dot whose size
reflects the relative magnitude of the elements ('balloonplot'),
- plotting ""Venn"" diagrams ('venn'),
- displaying Open-Office style plots ('ooplot'),
- plotting multiple data on same region, with separate axes
('overplot'),
- plotting means and confidence intervals ('plotCI', 'plotmeans'),
- spacing points in an x-y plot so they don't overlap ('space').",2020-02-25,Gregory R. Warnes,https://github.com/talgalili/gplots,TRUE,https://github.com/talgalili/gplots,4595210,4,2020-02-24T06:28:44Z,1148802.5
gqlr,"Server implementation of 'GraphQL' <http://graphql.github.io/graphql-spec/>,
a query language originally created by Facebook for describing data requirements on complex application
data models. Visit <http://graphql.org> to learn more about 'GraphQL'.",2019-12-02,Barret Schloerke,"https://github.com/schloerke/gqlr,
http://graphql.github.io/graphql-spec/, http://graphql.org",TRUE,https://github.com/schloerke/gqlr,9298,36,2019-12-02T16:09:40Z,258.27777777777777
grabsampling,"Functions for obtaining the probability of detection, for grab samples selection by using two different methods such as systematic or random based on two-state Markov chain model. For detection probability calculation, we used results from Bhat, U. and Lal, R. (1988) <doi:10.2307/1427041>.",2020-03-04,Mayooran Thevaraja,https://github.com/Mayooran1987/grabsampling,TRUE,https://github.com/mayooran1987/grabsampling,1529,0,2020-03-12T20:57:01Z,NA
grainscape,"Given a landscape resistance surface, creates grains of connectivity
(Galpern et al. (2012) <doi:10.1111/j.1365-294X.2012.05677.x>) and minimum planar graph
(Fall et al. (2007) <doi:10.1007/s10021-007-9038-7>) models that can be used to calculate
effective distances for landscape connectivity at multiple scales.",2019-12-06,Alex M Chubaty,"https://alexchubaty.com/grainscape,
https://github.com/achubaty/grainscape",TRUE,https://github.com/achubaty/grainscape,4936,9,2019-12-06T17:07:18Z,548.4444444444445
GRANBase,"Repository based tools for department and analysis level
reproducibility. 'GRANBase' allows creation of custom branched, continuous
integration-ready R repositories, including incremental testing of only packages
which have changed versions since the last repository build.",2020-02-05,Cory Barr,https://github.com/gmbecker/gRAN,TRUE,https://github.com/gmbecker/gran,19826,24,2020-05-07T18:21:54Z,826.0833333333334
GRANCore,"Provides the classes and methods for GRANRepository
objects that are used within the 'GRAN' build framework for R packages.
This is primarily used by the 'GRANBase' package and repositories
that are created by it.",2020-02-04,Gabriel Becker[aut,https://github.com/gmbecker/GRANCore,TRUE,https://github.com/gmbecker/grancore,9679,1,2019-10-31T00:35:24Z,9679
grangers,"Contains five functions performing the calculation of unconditional and conditional Granger-causality spectra, bootstrap inference on both, and inference on the difference between them via the bootstrap approach of Farne' and Montanari, 2018 <arXiv:1803.00374>.",2019-06-03,Matteo Farne,https://github.com/MatFar88/grangers,TRUE,https://github.com/matfar88/grangers,4444,1,2019-07-29T22:13:21Z,4444
grapherator,"Set of functions for step-wise generation of (weighted) graphs. Aimed for research in the field of single- and multi-objective combinatorial optimization. Graphs are generated adding nodes, edges and weights. Each step may be repeated multiple times with different predefined and custom generators resulting in high flexibility regarding the graph topology and structure of edge weights.",2017-12-21,Jakob Bossek,https://github.com/jakobbossek/grapherator,TRUE,https://github.com/jakobbossek/grapherator,8875,5,2019-09-17T10:56:43Z,1775
graphlayouts,"Several new layout algorithms to visualize networks are provided which are not part of 'igraph'.
Most are based on the concept of stress majorization by Gansner et al. (2004) <doi:10.1007/978-3-540-31843-9_25>.
Some more specific algorithms allow to emphasize hidden group structures in networks or focus on specific nodes.",2020-04-25,David Schoch,"http://graphlayouts.schochastics.net/,
https://github.com/schochastics/graphlayouts",TRUE,https://github.com/schochastics/graphlayouts,346297,145,2020-04-25T21:09:25Z,2388.255172413793
graphql,"Bindings to the 'libgraphqlparser' C++ library. Parses GraphQL syntax
and exports the AST in JSON format.",2018-12-01,Jeroen Ooms,"http://graphql.org (upstream) https://github.com/ropensci/graphql
(devel)",TRUE,https://github.com/ropensci/graphql,17465,26,2019-12-08T22:41:42Z,671.7307692307693
graphTweets,"Allows building an edge table from data frame of tweets,
also provides function to build nodes and another create a temporal graph.",2020-01-08,John Coene,http://graphTweets.john-coene.com,TRUE,https://github.com/johncoene/graphtweets,31168,42,2020-01-07T12:45:54Z,742.0952380952381
gratia,"Graceful 'ggplot'-based graphics and utility functions for working with generalized additive models (GAMs) fitted using the 'mgcv' package. Provides a reimplementation of the plot() method for GAMs that 'mgcv' provides, as well as 'tidyverse' compatible representations of estimated smooths.",2020-05-31,Gavin L. Simpson,https://gavinsimpson.github.io/gratia,TRUE,https://github.com/gavinsimpson/gratia,8472,83,2020-06-05T21:10:50Z,102.07228915662651
graticule,"Create graticule lines and labels for maps. Control the creation
of lines by setting their placement (at particular meridians and parallels)
and extent (along parallels and meridians). Labels are created independently of
lines.",2016-02-02,Michael D. Sumner,https://github.com/mdsumner/graticule,TRUE,https://github.com/mdsumner/graticule,17044,17,2019-10-14T05:54:02Z,1002.5882352941177
grattan,"Utilities for costing and evaluating Australian tax policy, including high-performance tax and transfer calculators, a fast method of projecting tax collections, and an interface to common indices from the Australian Bureau of Statistics. Written to support Grattan Institute's Australian Perspectives program. For access to the 'taxstats' package, please run
install.packages(""taxstats"", repos = ""https://hughparsonage.github.io/tax-drat/"", type = ""source"").
N.B. The 'taxstats' package is approximately 50 MB.",2020-03-16,Hugh Parsonage,"https://github.com/HughParsonage/grattan,
https://hughparsonage.github.io/grattan/",TRUE,https://github.com/hughparsonage/grattan,27992,15,2020-03-24T00:32:11Z,1866.1333333333334
gravitas,"Provides tools for systematically exploring large quantities of
temporal data across nonlinear temporal granularities
(deconstructions of time) by visualizing probability distributions.
Nonlinear time granularities can be circular, quasi-circular or
aperiodic. 'gravitas' computes nonlinear
single-order-up or multiple-order-up granularities, check the
feasibility of creating plots for any two nonlinear granularities
and recommend probability distributions plots for exploring
periodicity in the data.",2020-02-17,Sayani Gupta,https://github.com/Sayani07/gravitas/,TRUE,https://github.com/sayani07/gravitas,3491,11,2020-06-01T06:00:06Z,317.3636363636364
grec,Provides algorithms for detection of spatial patterns from oceanographic data using image processing methods based on Gradient Recognition.,2020-02-19,Wencheng Lau-Medrano,https://github.com/LuisLauM/grec,TRUE,https://github.com/luislaum/grec,12943,1,2020-02-06T14:38:41Z,12943
greenclust,"Implements a method of iteratively collapsing the rows of a
contingency table, two at a time, by selecting the pair of categories whose
combination yields a new table with the smallest loss of chi-squared, as
described by Greenacre, M.J. (1988) <doi:10.1007/BF01901670>. The result is
compatible with the class of object returned by the 'stats' package's
hclust() function and can be used similarly (plotted as a dendrogram,
cut, etc.). Additional functions are provided for automatic cutting and
diagnostic plotting.",2020-01-10,Jeff Jetton,https://github.com/JeffJetton/greenclust,TRUE,https://github.com/jeffjetton/greenclust,4223,3,2020-02-13T04:00:03Z,1407.6666666666667
gremlin,"Fit linear mixed-effects models using restricted (or residual)
maximum likelihood (REML) and with generalized inverse matrices to specify
covariance structures for random effects. In particular, the package is suited
to fit quantitative genetic mixed models, often referred to as 'animal models'
(Kruuk. 2004 <DOI: 10.1098/rstb.2003.1437>). Implements the average
information algorithm as the main tool to maximize the restricted likelihood,
but with other algorithms available (Meyer. 1997. Genet Sel Evol 29:97; Meyer
and Smith. 1998. Genet Sel Evol 28:23.).",2019-04-09,Matthew Wolak,http://github.com/matthewwolak/gremlin,TRUE,https://github.com/matthewwolak/gremlin,6028,2,2019-10-12T11:29:02Z,3014
gren,"Allows the user to incorporate multiple sources of co-data
(e.g., previously obtained p-values, published gene lists, and annotation) in the estimation of a logistic regression model to enhance predictive performance and feature selection, as described in Münch, Peeters, van der Vaart, and van de Wiel (2018) <arXiv:1805.00389>.",2018-07-30,Magnus M. Münch,https://github.com/magnusmunch/gren/,TRUE,https://github.com/magnusmunch/gren,6483,0,2019-12-02T09:39:02Z,NA
greta,"Write statistical models in R and fit them by MCMC and optimisation on CPUs and GPUs, using Google 'TensorFlow'.
greta lets you write your own model like in BUGS, JAGS and Stan, except that you write models right in R, it scales well to massive datasets, and it’s easy to extend and build on.
See the website for more information, including tutorials, examples, package documentation, and the greta forum.",2019-08-09,Nick Golding,https://greta-stats.org,TRUE,https://github.com/greta-dev/greta,21058,419,2020-04-13T07:39:10Z,50.25775656324582
gretel,"The social network literature features numerous methods for assigning
value to paths as a function of their ties. 'gretel' systemizes these approaches,
casting them as instances of a generalized path value function indexed by
a penalty parameter. The package also calculates probabilistic path value and
identifies optimal paths in either value framework. Finally, proximity
matrices can be generated in these frameworks that capture high-order connections
overlooked in primitive adjacency sociomatrices. Novel methods are described
in Buch (2019) <https://davidbuch.github.io/analyzing-networks-with-gretel.html>.
More traditional methods are also implemented, as described in Yang, Knoke (2001)
<doi:10.1016/S0378-8733(01)00043-0>.",2019-08-22,David Buch,https://github.com/davidbuch/gretel,TRUE,https://github.com/davidbuch/gretel,3433,0,2019-10-14T16:35:51Z,NA
grex,"Convert 'Ensembl' gene identifiers from Genotype-Tissue
Expression (GTEx) data to identifiers in other annotation systems,
including 'Entrez', 'HGNC', and 'UniProt'.",2019-05-17,Nan Xiao,"https://nanx.me/grex/, https://github.com/nanxstats/grex",TRUE,https://github.com/nanxstats/grex,16799,4,2020-04-23T22:58:16Z,4199.75
greybox,"Implements functions and instruments for regression model building and its
application to forecasting. The main scope of the package is in variables selection
and models specification for cases of time series data. This includes promotional
modelling, selection between different dynamic regressions with non-standard
distributions of errors, selection based on cross validation, solutions to the fat
regression model problem and more. Models developed in the package are tailored
specifically for forecasting purposes. So as a results there are several methods
that allow producing forecasts from these models and visualising them.",2020-05-20,"Ivan Svetunkov (Lecturer at Centre for Marketing Analytics
and Forecasting",https://github.com/config-i1/greybox,TRUE,https://github.com/config-i1/greybox,208137,15,2020-06-06T14:48:52Z,13875.8
grf,"A pluggable package for forest-based statistical estimation and inference.
GRF currently provides methods for non-parametric least-squares regression,
quantile regression, survival regression and treatment effect estimation (optionally using instrumental
variables), with support for missing values.",2020-06-04,Julie Tibshirani,https://github.com/grf-labs/grf,TRUE,https://github.com/grf-labs/grf,75628,417,2020-06-09T21:04:24Z,181.3621103117506
gridGraphics,"Functions to convert a page of plots drawn with the
'graphics' package into identical output drawn with the 'grid' package.
The result looks like the original 'graphics'-based plot, but consists
of 'grid' grobs and viewports that can then be manipulated with
'grid' functions (e.g., edit grobs and revisit viewports).",2020-02-25,Paul Murrell,https://github.com/pmur002/gridgraphics,TRUE,https://github.com/pmur002/gridgraphics,297494,28,2020-02-25T00:30:17Z,10624.785714285714
gridsampler,"Simulation tool to facilitate determination of
required sample size to achieve category saturation
for studies using multiple repertory grids in conjunction with
content analysis.",2016-11-23,Mark Heckmann,https://github.com/markheckmann/gridsampler,TRUE,https://github.com/markheckmann/gridsampler,11652,4,2020-02-06T19:45:55Z,2913
gridtext,"Provides support for rendering of formatted text using 'grid' graphics. Text can be
formatted via a minimal subset of 'Markdown', 'HTML', and inline 'CSS' directives, and it can be
rendered both with and without word wrap.",2020-02-24,Claus O. Wilke,https://wilkelab.org/gridtext,TRUE,https://github.com/wilkelab/gridtext,6848,75,2020-05-03T19:08:59Z,91.30666666666667
groupdata2,"Methods for dividing data into groups.
Create balanced partitions and cross-validation folds.
Perform time series windowing and general grouping and splitting of data.
Balance existing groups with up- and downsampling.",2020-06-06,Ludvig Renbo Olsen,https://github.com/ludvigolsen/groupdata2,TRUE,https://github.com/ludvigolsen/groupdata2,17044,14,2020-06-07T22:52:08Z,1217.4285714285713
groupedstats,"Collection of functions to run statistical tests
across all combinations of multiple grouping variables.",2020-05-29,Indrajeet Patil,"https://indrajeetpatil.github.io/groupedstats/,
https://github.com/IndrajeetPatil/groupedstats/",TRUE,https://github.com/indrajeetpatil/groupedstats,48753,46,2020-05-31T11:22:10Z,1059.8478260869565
groupICA,"Contains an implementation of an independent component analysis (ICA) for grouped data. The main function groupICA() performs a blind source separation, by maximizing an independence across sources and allows to adjust for varying confounding for user-specified groups. Additionally, the package contains the function uwedge() which can be used to approximately jointly diagonalize a list of matrices. For more details see the project website <https://sweichwald.de/groupICA/>.",2018-06-19,Niklas Pfister and Sebastian Weichwald,https://github.com/sweichwald/groupICA-R,TRUE,https://github.com/sweichwald/groupica-r,6701,1,2020-05-15T08:05:37Z,6701
growthrates,"A collection of methods to determine growth rates from
experimental data, in particular from batch experiments and
plate reader trials.",2019-12-18,Thomas Petzoldt,https://github.com/tpetzoldt/growthrates,TRUE,https://github.com/tpetzoldt/growthrates,20623,13,2020-01-25T20:03:01Z,1586.3846153846155
grpreg,"Efficient algorithms for fitting the regularization path of linear
regression, GLM, and Cox regression models with grouped penalties. This
includes group selection methods such as group lasso, group MCP, and
group SCAD as well as bi-level selection methods such as the group
exponential lasso, the composite MCP, and the group bridge.",2020-02-19,Patrick Breheny,"http://pbreheny.github.io/grpreg,
https://github.com/pbreheny/grpreg",TRUE,https://github.com/pbreheny/grpreg,332933,21,2020-06-09T21:42:53Z,15853.952380952382
grpSLOPE,"Group SLOPE is a penalized linear regression method that is used
for adaptive selection of groups of significant predictors in a
high-dimensional linear model.
The Group SLOPE method can control the (group) false discovery rate at a
user-specified level (i.e., control the expected proportion of irrelevant
among all selected groups of predictors).",2020-04-07,Alexej Gossmann,https://github.com/agisga/grpSLOPE.git,TRUE,https://github.com/agisga/grpslope,13466,3,2020-04-13T20:10:39Z,4488.666666666667
gscaLCA,"
Execute Latent Class Analysis (LCA) and Latent Class Regression (LCR) by using Generalized Structured Component Analysis (GSCA). This is explained in Ryoo, Park, and Kim (2019) <doi:10.1007/s41237-019-00084-6>.
It estimates the parameters of latent class prevalence and item response probability in LCA with a single line comment. It also provides graphs of item response probabilities. In addition, the package enables to estimate the relationship between the prevalence and covariates. ",2020-06-08,Seohee Park,https://github.com/hee6904/gscaLCA,TRUE,https://github.com/hee6904/gscalca,3209,0,2020-06-08T17:48:42Z,NA
gsheet,"Simple package to download Google Sheets using just the sharing
link. Spreadsheets can be downloaded as a data frame, or as plain text to parse
manually. Google Sheets is the new name for Google Docs Spreadsheets <https://www.google.com/sheets/about>.",2020-04-07,Max Conway,https://github.com/maxconway/gsheet,TRUE,https://github.com/maxconway/gsheet,129150,37,2020-04-07T13:35:14Z,3490.5405405405404
gsl,"
An R wrapper for some of the functionality of the
Gnu Scientific Library.",2019-03-25,Robin K. S. Hankin,https://github.com/RobinHankin/gsl.git,TRUE,https://github.com/robinhankin/gsl,580566,6,2020-05-01T23:37:17Z,96761
GSODR,"Provides automated downloading, parsing, cleaning, unit conversion
and formatting of Global Surface Summary of the Day ('GSOD') weather data
from the from the USA National Centers for Environmental Information
('NCEI'). Units are converted from from United States Customary System
('USCS') units to International System of Units ('SI'). Stations may be
individually checked for number of missing days defined by the user, where
stations with too many missing observations are omitted. Only stations with
valid reported latitude and longitude values are permitted in the final
data. Additional useful elements, saturation vapour pressure ('es'), actual
vapour pressure ('ea') and relative humidity ('RH') are calculated from the
original data using the improved August-Roche-Magnus approximation (Alduchov
& Eskridge 1996) and included in the final data set. The resulting metadata
include station identification information, country, state, latitude,
longitude, elevation, weather observations and associated flags. For
information on the 'GSOD' data from 'NCEI', please see the 'GSOD'
'readme.txt' file available from,
<http://www1.ncdc.noaa.gov/pub/data/gsod/readme.txt>.",2020-04-17,Adam Sparks,https://docs.ropensci.org/GSODR/,TRUE,https://github.com/ropensci/gsodr,55097,69,2020-06-01T13:01:39Z,798.5072463768116
gstat,"Variogram modelling; simple, ordinary and universal point or block (co)kriging; spatio-temporal kriging; sequential Gaussian or indicator (co)simulation; variogram and variogram map plotting utility functions; supports sf and stars.",2020-05-18,Edzer Pebesma,https://github.com/r-spatial/gstat/,TRUE,https://github.com/r-spatial/gstat,797248,110,2020-05-18T11:15:51Z,7247.709090909091
gt,"Build display tables from tabular data with an easy-to-use set of
functions. With its progressive approach, we can construct display tables
with a cohesive set of table parts. Table values can be formatted using any
of the included formatting functions. Footnotes and cell styles can be
precisely added through a location targeting system. The way in which 'gt'
handles things for you means that you don't often have to worry about the
fine details.",2020-05-23,Richard Iannone,https://github.com/rstudio/gt,TRUE,https://github.com/rstudio/gt,19156,1095,2020-05-29T19:26:03Z,17.49406392694064
gtable,"Tools to make it easier to work with ""tables"" of
'grobs'. The 'gtable' package defines a 'gtable' grob class that specifies a
grid along with a list of grobs and their placement in the grid. Further the
package makes it easy to manipulate and combine 'gtable' objects so that
complex compositions can be build up sequentially.",2019-03-25,Hadley Wickham,https://github.com/r-lib/gtable,TRUE,https://github.com/r-lib/gtable,16404790,63,2019-06-26T07:57:33Z,260393.49206349207
gtfs2gps,Convert general transit feed specification (GTFS) data to global positioning system (GPS) records in 'data.table' format. It also has some functions to subset GTFS data in time and space and to convert both representations to simple feature format.,2020-05-28,Rafael H. M. Pereira,https://github.com/ipeaGIT/gtfs2gps,TRUE,https://github.com/ipeagit/gtfs2gps,1653,37,2020-06-04T11:14:49Z,44.67567567567568
gtfsrouter,"Use GTFS (General Transit Feed Specification) data for routing from
nominated start and end stations, and for extracting isochrones from
nominated start station.",2019-03-22,Mark Padgham,https://github.com/ATFutures/gtfs-router,TRUE,https://github.com/atfutures/gtfs-router,4964,30,2020-06-04T13:54:54Z,165.46666666666667
gtrendsR,"An interface for retrieving and displaying the information
returned online by Google Trends is provided. Trends (number of
hits) over the time as well as geographic representation of the
results can be displayed.",2020-05-17,Philippe Massicotte,https://github.com/PMassicotte/gtrendsR,TRUE,https://github.com/pmassicotte/gtrendsr,118835,234,2020-05-17T17:19:56Z,507.84188034188037
gtsummary,"Creates presentation-ready tables summarizing data
sets, regression models, and more. The code to create the tables is
concise and highly customizable. Data frames can be summarized with
any function, e.g. mean(), median(), even user-written functions.
Regression models are summarized and include the reference rows for
categorical variables. Common regression models, such as logistic
regression and Cox proportional hazards regression, are automatically
identified and the tables are pre-filled with appropriate column
headers. ",2020-06-02,Daniel D. Sjoberg,"https://github.com/ddsjoberg/gtsummary,
http://www.danieldsjoberg.com/gtsummary/",TRUE,https://github.com/ddsjoberg/gtsummary,14940,231,2020-06-09T23:40:48Z,64.67532467532467
guardianapi,"Access to 'The Guardian' newspaper's open API
<https://open-platform.theguardian.com/>, containing all articles published
in 'The Guardian' from 1999 to the present, including article text, metadata,
tags and contributor information. An API key and registration is required.",2019-06-23,Evan Odell,https://docs.evanodell.com/guardianapi,TRUE,https://github.com/evanodell/guardianapi,6170,4,2019-06-23T21:09:38Z,1542.5
Guerry,"Maps of France in 1830, multivariate datasets from A.-M. Guerry and others, and statistical and
graphic methods related to Guerry's ""Moral Statistics of France"". The goal is to facilitate the exploration and
development of statistical and graphic methods for multivariate data in a geo-spatial context of historical interest.",2020-01-29,Michael Friendly,https://github.com/friendly/Guerry,TRUE,https://github.com/friendly/guerry,55443,0,2020-01-29T15:11:04Z,NA
GuessCompx,"Make an empirical guess on the time and memory complexities of an algorithm or a function.
Tests multiple, increasing size random samples of your data and tries to fit various complexity functions o(n), o(n2), o(log(n)), etc.
Based on best fit, it predicts the full computation time on your whole dataset. Results are plotted with 'ggplot2'.",2019-06-03,Marc Agenis,https://github.com/agenis/GuessCompx,TRUE,https://github.com/agenis/guesscompx,4296,9,2019-06-17T22:36:49Z,477.3333333333333
gustave,"Provides a toolkit for analytical variance estimation in survey sampling. Apart from the implementation of standard variance estimators, its main feature is to help the sampling expert produce easy-to-use variance estimation ""wrappers"", where systematic operations (linearization, domain estimation) are handled in a consistent and transparent way.",2019-12-16,Martin Chevalier,https://github.com/martinchevalier/gustave,TRUE,https://github.com/martinchevalier/gustave,7919,4,2019-12-16T22:03:37Z,1979.75
gutenbergr,"Download and process public domain works in the Project
Gutenberg collection <http://www.gutenberg.org/>. Includes metadata for
all Project Gutenberg works, so that they can be searched and retrieved.",2019-09-10,David Robinson,http://github.com/ropensci/gutenbergr,TRUE,https://github.com/ropensci/gutenbergr,155395,70,2019-12-09T21:12:11Z,2219.9285714285716
gvc,"Several tools for Global Value Chain ('GVC') analysis are
implemented.",2020-04-23,Bastiaan Quast,"https://qua.st/gvc, https://github.com/bquast/gvc",TRUE,https://github.com/bquast/gvc,23285,6,2020-04-25T09:54:55Z,3880.8333333333335
gWidgets2RGtk2,Implements the 'gWidgets2' API for 'RGtk2.',2018-01-04,John Verzani,https://github.com/jverzani/gWidgets2RGtk2,TRUE,https://github.com/jverzani/gwidgets2rgtk2,40477,4,2019-10-28T14:01:25Z,10119.25
gwsem,"Melds genome-wide association tests with structural
equation modeling (SEM) using 'OpenMx'. This package contains
low-level C/C++ code to rapidly read genetic data encoded in U.K.
Biobank or 'plink' formats. Prebuilt modeling options include one and
two factor models. Alternately, analyses may utilize arbitrary,
user-provided SEMs. See Verhulst, Maes, & Neale (2017)
<doi:10.1007/s10519-017-9842-6> for details. An updated manuscript is
in preparation.",2020-03-27,Joshua N. Pritikin,https://github.com/jpritikin/gwsem,TRUE,https://github.com/jpritikin/gwsem,2752,1,2020-05-29T13:28:15Z,2752
h2o4gpu,"Interface to 'H2O4GPU' <https://github.com/h2oai/h2o4gpu>, a collection of 'GPU' solvers for machine learning algorithms.",2018-03-23,Yuan Tang,https://github.com/h2oai/h2o4gpu,TRUE,https://github.com/h2oai/h2o4gpu,10710,377,2020-06-03T13:54:04Z,28.408488063660478
hablar,Simple tools for converting columns to new data types. Intuitive functions for columns with missing values. ,2020-03-19,David Sjoberg,https://davidsjoberg.github.io/,TRUE,https://github.com/davidsjoberg/hablar,21996,23,2020-05-17T06:42:22Z,956.3478260869565
hackeRnews,"Use the <https://hacker-news.firebaseio.com/v0/> API through R. Retrieve
posts, articles and other items in form of convenient R objects.",2019-12-13,Ryszard Szymanski,https://github.com/szymanskir/hackeRnews,TRUE,https://github.com/szymanskir/hackernews,2562,19,2020-01-22T19:51:32Z,134.8421052631579
hagis,"Analysis of plant pathogen pathotype survey data. Functions
provided calculate distribution of susceptibilities, distribution of
complexities with statistics, pathotype frequency distribution, as well as
diversity indices for pathotypes. This package is meant to be a direct
replacement for Herrmann, Löwer, Schachtel's (1999)
<doi:10.1046/j.1365-3059.1999.00325.x> Habgood-Gilmour Spreadsheet, 'HaGiS',
previously used for pathotype analysis.",2019-11-18,Austin G. McCoy,"https://github.com/openplantpathology/hagis,
https://openplantpathology.github.io/hagis/",TRUE,https://github.com/openplantpathology/hagis,4266,2,2020-04-11T10:12:49Z,2133
hal9001,"A scalable implementation of the highly adaptive lasso algorithm,
including routines for constructing sparse matrices of basis functions of the
observed data, as well as a custom implementation of Lasso regression tailored
to enhance efficiency when the matrix of predictors is composed exclusively of
indicator functions. For ease of use and increased flexibility, the Lasso
fitting routines invoke code from the 'glmnet' package by default. The highly
adaptive lasso was first formulated and described by MJ van der Laan (2017)
<doi:10.1515/ijb-2015-0097>, with practical demonstrations of its performance
given by Benkeser and van der Laan (2016) <doi:10.1109/DSAA.2016.93>.",2020-03-05,Jeremy Coyle,https://github.com/tlverse/hal9001,TRUE,https://github.com/tlverse/hal9001,1725,13,2020-03-06T00:56:08Z,132.69230769230768
haldensify,"Conditional density estimation is a longstanding and challenging
problem in statistical theory, and numerous proposals exist for optimally
estimating such complex functions. Algorithms for nonparametric estimation
of conditional densities based on a pooled hazard regression formulation and
semiparametric estimation via conditional hazards modeling are implemented
based on the highly adaptive lasso, a nonparametric regression function for
efficient estimation with fast convergence under mild assumptions. The
pooled hazards formulation implemented was first described by Díaz and
van der Laan (2011) <doi:10.2202/1557-4679.1356>.",2020-03-14,Nima Hejazi,https://github.com/nhejazi/haldensify,TRUE,https://github.com/nhejazi/haldensify,1461,3,2020-05-27T20:46:01Z,487
handlr,"Converts among many citation formats, including 'BibTeX',
'Citeproc', 'Codemeta', 'RDF XML', 'RIS', and 'Schema.org'. A low
level 'R6' class is provided, as well as stand-alone functions
for each citation format for both read and write.",2019-08-19,Scott Chamberlain,https://github.com/ropensci/handlr,TRUE,https://github.com/ropensci/handlr,5872,29,2020-04-14T23:24:55Z,202.48275862068965
hansard,"Provides functions to download data from the
<http://www.data.parliament.uk/> APIs. Because of the structure of the API,
there is a named function for each type of available data for ease of use,
as well as some functions designed to retrieve specific pieces of commonly
used data. Functions for each new API will be added as and when they become
available.",2019-11-13,Evan Odell,https://docs.evanodell.com/hansard,TRUE,https://github.com/evanodell/hansard,22711,20,2020-03-13T13:11:00Z,1135.55
hardhat,"Building modeling packages is hard. A large amount of effort
generally goes into providing an implementation for a new method that is
efficient, fast, and correct, but often less emphasis is put on the user
interface. A good interface requires specialized knowledge about S3 methods
and formulas, which the average package developer might not have.
The goal of 'hardhat' is to reduce the burden around building new modeling
packages by providing functionality for preprocessing, predicting, and
validating input.",2020-05-20,Davis Vaughan,https://github.com/tidymodels/hardhat,TRUE,https://github.com/tidymodels/hardhat,29586,67,2020-05-20T21:38:58Z,441.5820895522388
HARModel,"Estimation, simulation, and forecasting using the HAR model from Corsi(2009) <DOI:10.1093/jjfinec/nbp001> and extensions.",2019-08-31,Emil Sjoerup,https://github.com/emilsjoerup/HARModel,TRUE,https://github.com/emilsjoerup/harmodel,7554,3,2019-09-04T18:01:35Z,2518
harrypotter,Implementation of characteristic palettes inspired in the Wizarding World and the Harry Potter movie franchise.,2020-03-05,Alejandro Jimenez Rico,https://github.com/aljrico/harrypotter,TRUE,https://github.com/aljrico/harrypotter,53006,54,2020-03-05T19:25:02Z,981.5925925925926
hashr,"Apply the SuperFastHash algorithm to any R object. Hash whole R objects or,
for vectors or lists, hash R objects to obtain a set of hash values that is stored
in a structure equivalent to the input. ",2015-08-06,Mark van der Loo,https://github.com/markvanderloo/hashr,TRUE,https://github.com/markvanderloo/hashr,15381,7,2019-06-24T21:22:32Z,2197.285714285714
hasseDiagram,Drawing Hasse diagram - visualization of transitive reduction of a finite partially ordered set.,2017-02-24,Krzysztof Ciomek,https://github.com/kciomek/hasseDiagram,TRUE,https://github.com/kciomek/hassediagram,20377,4,2020-03-17T21:02:08Z,5094.25
haven,"Import foreign statistical formats into R via the
embedded 'ReadStat' C library,
<https://github.com/WizardMac/ReadStat>.",2020-06-01,Hadley Wickham,"http://haven.tidyverse.org, https://github.com/tidyverse/haven,
https://github.com/WizardMac/ReadStat",TRUE,https://github.com/tidyverse/haven,10508617,309,2020-06-01T16:07:43Z,34008.46925566343
hBayesDM,"
Fit an array of decision-making tasks with computational models in
a hierarchical Bayesian framework. Can perform hierarchical Bayesian analysis of
various computational models with a single line of coding
(Ahn et al., 2017) <doi:10.1162/CPSY_a_00002>.",2019-11-13,Woo-Young Ahn,https://github.com/CCS-Lab/hBayesDM,TRUE,https://github.com/ccs-lab/hbayesdm,25819,96,2019-11-15T05:00:17Z,268.9479166666667
hcandersenr,"Texts for H.C. Andersens fairy tales, ready for
text analysis. Fairy tales in German, Danish, English, Spanish and
French.",2019-01-19,Emil Hvitfeldt,https://github.com/EmilHvitfeldt/hcandersenr,TRUE,https://github.com/emilhvitfeldt/hcandersenr,6764,7,2020-03-11T22:54:04Z,966.2857142857143
hchinamap,"By binding R functions and the 'Highmaps' <https://www.highcharts.com.cn/products/highmaps> chart library, 'hchinamap' package provides a simple way to map China and its provinces. The map of China drawn by this package contains complete Chinese territory, especially the Nine-dotted line, South Tibet, Hong Kong, Macao and Taiwan.",2019-08-23,Zhenxing Cheng,https://github.com/czxa/hchinamap,TRUE,https://github.com/czxa/hchinamap,4715,14,2019-10-19T13:25:03Z,336.7857142857143
HCmodelSets,"Software for performing the reduction, exploratory and model selection phases of the procedure proposed by Cox, D.R. and Battey, H.S. (2017) <doi:10.1073/pnas.1703764114> for sparse regression when the number of potential explanatory variables far exceeds the sample size. The software supports linear regression, likelihood-based fitting of generalized linear regression models and the proportional hazards model fitted by partial likelihood.",2020-04-20,H. H. Hoeltgebaum,NA,TRUE,https://github.com/hhhelfer/hcmodelsets,9393,0,2020-04-20T15:41:39Z,NA
hddtools,"Tools to discover hydrological data, accessing catalogues and databases from various data providers.",2020-05-25,Claudia Vitolo,"http://docs.ropensci.org/hddtools,
https://github.com/ropensci/hddtools",TRUE,https://github.com/ropensci/hddtools,19969,33,2020-05-25T10:10:12Z,605.1212121212121
hdf5r,"'HDF5' is a data model, library and file format for storing
and managing large amounts of data. This package provides a nearly
feature complete, object oriented wrapper for the 'HDF5' API
<https://support.hdfgroup.org/HDF5/doc/RM/RM_H5Front.html> using R6 classes.
Additionally, functionality is added so that 'HDF5' objects behave very
similar to their corresponding R counterparts.",2020-03-25,Holger Hoefling,"https://hhoeflin.github.io/hdf5r,
https://github.com/hhoeflin/hdf5r",TRUE,https://github.com/hhoeflin/hdf5r,155016,37,2020-03-24T12:10:55Z,4189.621621621622
hdme,"Penalized regression for generalized linear models for
measurement error problems (aka. errors-in-variables). The package
contains a version of the lasso (L1-penalization) which corrects
for measurement error (Sorensen et al. (2015) <doi:10.5705/ss.2013.180>).
It also contains an implementation of the Generalized Matrix Uncertainty
Selector, which is a version the (Generalized) Dantzig Selector for the
case of measurement error (Sorensen et al. (2018) <doi:10.1080/10618600.2018.1425626>).",2020-05-18,Oystein Sorensen,https://github.com/osorensen/hdme,TRUE,https://github.com/osorensen/hdme,12694,4,2020-05-18T12:09:12Z,3173.5
hdnom,"Creates nomogram visualizations for penalized Cox regression
models, with the support of reproducible survival model building,
validation, calibration, and comparison for high-dimensional data.",2019-06-23,Nan Xiao,"https://nanx.me/hdnom/, https://github.com/nanxstats/hdnom,
http://hdnom.io",TRUE,https://github.com/nanxstats/hdnom,36225,28,2020-04-23T23:03:02Z,1293.75
healthcareai,A machine learning toolbox tailored to healthcare data.,2020-02-28,Mike Mastanduno,http://docs.healthcare.ai,TRUE,https://github.com/healthcatalyst/healthcareai-r,29057,177,2020-02-28T18:44:20Z,164.1638418079096
heatmaply,"Create interactive cluster 'heatmaps' that can be saved as a stand-
alone HTML file, embedded in 'R Markdown' documents or in a 'Shiny' app, and
available in the 'RStudio' viewer pane. Hover the mouse pointer over a cell to
show details or drag a rectangle to zoom. A 'heatmap' is a popular graphical
method for visualizing high-dimensional data, in which a table of numbers
are encoded as a grid of colored cells. The rows and columns of the matrix
are ordered to highlight patterns and are often accompanied by 'dendrograms'.
'Heatmaps' are used in many fields for visualizing observations, correlations,
missing values patterns, and more. Interactive 'heatmaps' allow the inspection
of specific value by hovering the mouse over a cell, as well as zooming into
a region of the 'heatmap' by dragging a rectangle around the relevant area.
This work is based on the 'ggplot2' and 'plotly.js' engine. It produces
similar 'heatmaps' as 'heatmap.2' or 'd3heatmap', with the advantage of speed
('plotly.js' is able to handle larger size matrix), the ability to zoom from
the 'dendrogram' panes, and the placing of factor variables in the sides of the
'heatmap'.",2020-03-28,Tal Galili,"https://talgalili.github.io/heatmaply/,
https://cran.r-project.org/package=heatmaply,
https://github.com/talgalili/heatmaply/,
https://www.r-statistics.com/tag/heatmaply/",TRUE,https://github.com/talgalili/heatmaply,184732,243,2020-05-26T13:33:08Z,760.2139917695473
heatwaveR,"The different methods of defining and detecting extreme events, known as heatwaves or cold-spells in both air and water temperature data are encompassed within this package. These detection algorithms may be used on non-temperature data as well however, this is not catered for explicitly here as no use of this technique in the literature currently exists.",2019-12-01,Robert W. Schlegel,"https://robwschlegel.github.io/heatwaveR/index.html,
https://github.com/robwschlegel/heatwaveR",TRUE,https://github.com/robwschlegel/heatwaver,10565,13,2020-06-06T21:34:24Z,812.6923076923077
heddlr,"Helper functions designed to make
dynamically generating R Markdown documents easier by providing a
simple and tidy way to create report pieces, shape them to your data,
and combine them for exporting into a single R Markdown document.",2020-03-24,Michael Mahoney,"https://github.com/mikemahoney218/heddlr,
https://mikemahoney218.github.io/heddlr/",TRUE,https://github.com/mikemahoney218/heddlr,2277,10,2020-05-24T23:25:57Z,227.7
hedgehog,"Hedgehog will eat all your bugs.
'Hedgehog' is a property-based testing package in the spirit
of 'QuickCheck'. With 'Hedgehog', one can test properties
of their programs against randomly generated input, providing
far superior test coverage compared to unit testing. One of the
key benefits of 'Hedgehog' is integrated shrinking of
counterexamples, which allows one to quickly find the cause of
bugs, given salient examples when incorrect behaviour occurs.",2018-08-22,Huw Campbell,https://hedgehog.qa,TRUE,https://github.com/hedgehogqa/r-hedgehog,6511,35,2020-05-03T12:12:32Z,186.02857142857144
heemod,"An implementation of the modelling and reporting features described
in reference textbook and guidelines (Briggs, Andrew, et al. Decision
Modelling for Health Economic Evaluation. Oxford Univ. Press, 2011;
Siebert, U. et al. State-Transition Modeling. Medical Decision Making
32, 690-700 (2012).): deterministic and probabilistic sensitivity analysis,
heterogeneity analysis, time dependency on state-time and model-time
(semi-Markov and non-homogeneous Markov models), etc.",2020-05-11,Kevin Zarca,NA,TRUE,https://github.com/pierucci/heemod,32183,28,2020-04-20T13:07:01Z,1149.392857142857
helda,"The main focus is on preprocessing and data visualization of machine learning models performances.
Some functions allow to fill in gaps in time series using linear interpolation on panel data, some functions
permit to draw lift effect and lift curve in order to benchmark machine learning models or you can even find
the optimal number of clusters in agglomerative clustering algorithm.",2020-06-07,Simon Corde,https://www.github.com/Redcart/helda,TRUE,https://github.com/redcart/helda,2815,0,2020-06-07T10:34:50Z,NA
helminthR,"Access to large host-parasite data is often hampered by the
availability of data and difficulty in obtaining it in a programmatic way
to encourage analyses. 'helminthR' provides a programmatic interface to the
London Natural History Museum's host-parasite database, one of the largest
host-parasite databases existing currently <http://www.nhm.ac.uk/research-curation/scientific-resources/taxonomy-systematics/host-parasites/>. The package allows the user
to query by host species, parasite species, and geographic location.",2019-02-03,Tad Dallas,https://github.com/rOpenSci/helminthR,TRUE,https://github.com/ropensci/helminthr,7858,6,2019-12-09T21:13:00Z,1309.6666666666667
heplots,"Provides HE plot and other functions for visualizing hypothesis
tests in multivariate linear models. HE plots represent sums-of-squares-and-
products matrices for linear hypotheses and for error using ellipses (in two
dimensions) and ellipsoids (in three dimensions). The related 'candisc' package
provides visualizations in a reduced-rank canonical discriminant space when
there are more than a few response variables.",2018-04-03,Michael Friendly,http://datavis.ca/R/index.php#heplots,TRUE,https://github.com/friendly/heplots,245238,3,2020-04-28T14:45:59Z,81746
hereR,"Interface to the 'HERE' REST APIs <https://developer.here.com/develop/rest-apis>:
(1) geocode and autocomplete addresses or reverse geocode POIs using the 'Geocoder' API;
(2) route directions, travel distance or time matrices and isolines using the 'Routing' API;
(3) request real-time traffic flow and incident information from the 'Traffic' API;
(4) find request public transport connections and nearby stations from the 'Public Transit' API;
(5) get weather forecasts, reports on current weather conditions, astronomical
information and alerts at a specific location from the 'Destination Weather' API.
Locations, routes and isolines are returned as 'sf' objects.",2020-04-18,Merlin Unterfinger,"https://munterfinger.github.io/hereR/,
https://github.com/munterfinger/hereR/",TRUE,https://github.com/munterfinger/herer,5370,33,2020-06-02T10:05:56Z,162.72727272727272
hesim,"A modular and computationally efficient R package for
parameterizing, simulating, and analyzing health-economic simulation
models. The package supports cohort discrete time state transition models
(Briggs et al. 1998) <doi:10.2165/00019053-199813040-00003>,
N-state partitioned survival models (Glasziou et al. 1990)
<doi:10.1002/sim.4780091106>, and individual-level continuous
time state transition models (Siebert et al. 2012) <doi:10.1016/j.jval.2012.06.014>,
encompassing both Markov (time-homogeneous and time-inhomogeneous) and
semi-Markov processes. Decision uncertainty from a cost-effectiveness analysis is
quantified with standard graphical and tabular summaries of a probabilistic
sensitivity analysis (Claxton et al. 2005, Barton et al. 2008) <doi:10.1002/hec.985>,
<doi:10.1111/j.1524-4733.2008.00358.x>. Use of C++ and data.table
make individual-patient simulation, probabilistic sensitivity analysis,
and incorporation of patient heterogeneity fast.",2020-06-02,Devin Incerti,https://github.com/hesim-dev/hesim,TRUE,https://github.com/hesim-dev/hesim,10920,23,2020-06-09T03:36:20Z,474.7826086956522
hettreatreg,"Computes diagnostics for linear regression when treatment effects are heterogeneous.
The output of 'hettreatreg' represents ordinary least squares (OLS)
estimates of the effect of a binary treatment as a weighted average of the average treatment effect
on the treated (ATT) and the average treatment effect on the untreated (ATU).
The program estimates the OLS weights on these parameters, computes the associated model diagnostics,
and reports the implicit OLS estimate of the average treatment effect (ATE).
See Sloczynski (2019), <http://people.brandeis.edu/~tslocz/Sloczynski_paper_regression.pdf>.",2020-05-13,Mark McAvoy,https://github.com/tslocz/hettreatreg,TRUE,https://github.com/tslocz/hettreatreg,368,1,2020-04-30T01:39:07Z,368
hettx,"Implements methods developed by Ding, Feller, and Miratrix (2016) <doi:10.1111/rssb.12124> <arXiv:1412.5000>,
and Ding, Feller, and Miratrix (2018) <doi:10.1080/01621459.2017.1407322> <arXiv:1605.06566>
for testing whether there is unexplained variation in treatment effects across observations, and for characterizing
the extent of the explained and unexplained variation in treatment effects. The package includes wrapper functions
implementing the proposed methods, as well as helper functions for analyzing and visualizing the results of the test.",2019-03-06,Ben Fifield,NA,TRUE,https://github.com/bfifield/detect_heteffects,4983,5,2020-02-08T19:56:51Z,996.6
heuristica,"Implements various heuristics like Take The Best and
unit-weight linear, which do two-alternative choice: which of
two objects will have a higher criterion? Also offers functions
to assess performance, e.g. percent correct across all row pairs
in a data set and finding row pairs where models disagree.
New models can be added by implementing a fit and predict function--
see vignette.",2019-08-21,Jean Whitmore,https://github.com/jeanimal/heuristica,TRUE,https://github.com/jeanimal/heuristica,16105,4,2019-08-21T12:49:44Z,4026.25
heuristicsmineR,"Provides the heuristics miner algorithm for process discovery
as proposed by Weijters et al. (2011) <doi:10.1109/CIDM.2011.5949453>. The
algorithm builds a causal net from an event log created with the 'bupaR'
package. Event logs are a set of ordered sequences of events for which
'bupaR' provides the S3 class eventlog(). The discovered causal nets
can be visualised as 'htmlwidgets' and it is possible to annotate them with
the occurrence frequency or processing and waiting time of process
activities. ",2020-03-29,Felix Mannhardt,https://github.com/bupaverse/heuristicsmineR,TRUE,https://github.com/bupaverse/heuristicsminer,6030,8,2020-03-29T19:02:16Z,753.75
hexbin,Binning and plotting functions for hexagonal bins.,2020-02-03,Dan Carr,http://github.com/edzer/hexbin,TRUE,https://github.com/edzer/hexbin,4751457,23,2020-02-03T13:36:16Z,206585.08695652173
hexSticker,Helper functions for creating reproducible hexagon sticker purely in R.,2020-06-01,Guangchuang Yu,https://github.com/GuangchuangYu/hexSticker,TRUE,https://github.com/guangchuangyu/hexsticker,21124,362,2020-06-01T13:25:09Z,58.353591160220994
HGNChelper,"Contains functions for
identifying and correcting HGNC human gene symbols and MGI mouse gene symbols
which have been converted to date format by Excel, withdrawn, or aliased.
Also contains functions for reversibly converting between HGNC
symbols and valid R names.",2019-10-24,Levi Waldron and Markus Riester,https://github.com/waldronlab/HGNChelper,TRUE,https://github.com/waldronlab/hgnchelper,37420,11,2020-04-28T17:51:15Z,3401.818181818182
hgutils,"
A handy collection of utility functions designed to aid in package development, plotting and scientific research.
Package development functionalities includes among others tools such as cross-referencing package imports with the description file,
analysis of redundant package imports, editing of the description file and the creation of package badges for GitHub.
Some of the other functionalities include automatic package installation and loading, plotting points without overlap,
creating nice breaks for plots, overview tables and many more handy utility functions.",2019-09-07,H.G. van den Boorn,https://github.com/hvdboorn/hgutils,TRUE,https://github.com/hvdboorn/hgutils,7088,0,2019-09-07T13:11:05Z,NA
HiClimR,"A tool for Hierarchical Climate Regionalization applicable to any correlation-based clustering.
It adds several features and a new clustering method (called, 'regional' linkage) to hierarchical
clustering in R ('hclust' function in 'stats' library): data regridding, coarsening spatial resolution,
geographic masking, contiguity-constrained clustering, data filtering by mean and/or variance
thresholds, data preprocessing (detrending, standardization, and PCA), faster correlation function
with preliminary big data support, different clustering methods, hybrid hierarchical clustering,
multivariate clustering (MVC), cluster validation, visualization of regionalization results, and
exporting region map and mean timeseries into NetCDF-4 file.",2020-02-22,Hamada S. Badr,https://github.com/hsbadr/HiClimR,TRUE,https://github.com/hsbadr/hiclimr,27911,8,2020-02-23T08:05:29Z,3488.875
hier.part,"Partitioning of the independent and joint contributions of each
variable in a multivariate data set, to a linear regression by hierarchical
decomposition of goodness-of-fit measures of regressions using all subsets
of predictors in the data set. (i.e., model (1), (2), ..., (N), (1,2), ...,
(1,N), ..., (1,2,3,...,N)). A Z-score based estimate of the 'importance' of
each predictor is provided by using a randomisation test.",2020-03-03,Chris Walsh,NA,TRUE,https://github.com/cjbwalsh/hier.part,34951,0,2020-03-03T08:46:58Z,NA
hierfstat,"Allows the estimation of hierarchical F-statistics from haploid or diploid genetic data
with any numbers of levels in the hierarchy, following the algorithm of Yang (Evolution, 1998, 52(4):950-956;
<DOI:10.2307/2411227>. Functions are also given to test via randomisations the significance of each F and variance components,
using the likelihood-ratio statistics G.",2015-12-04,Jerome Goudet,"http://www.r-project.org, http://github.com/jgx65/hierfstat",TRUE,https://github.com/jgx65/hierfstat,79733,11,2019-09-24T22:05:33Z,7248.454545454545
highcharter,"A wrapper for the 'Highcharts' library including
shortcut functions to plot R objects. 'Highcharts'
<http://www.highcharts.com/> is a charting library offering
numerous chart types with a simple configuration syntax.",2019-01-15,Joshua Kunst,http://jkunst.com/highcharter,TRUE,https://github.com/jbkunst/highcharter,293806,530,2020-05-06T00:12:46Z,554.3509433962264
highfrequency,"Provide functionality to manage, clean and match highfrequency
trades and quotes data, calculate various liquidity measures, estimate and
forecast volatility, detect price jumps and investigate microstructure noise and intraday
periodicity.",2020-04-15,Kris Boudt,https://github.com/jonathancornelissen/highfrequency,TRUE,https://github.com/jonathancornelissen/highfrequency,47840,51,2020-04-14T17:17:51Z,938.0392156862745
highlight,"Syntax highlighter for R code based on the results
of the R parser. Rendering in HTML and latex markup. Custom Sweave
driver performing syntax highlighting of R code chunks.",2019-12-15,Hadley Wickham,https://github.com/hadley/highlight,TRUE,https://github.com/hadley/highlight,423828,1,2019-12-16T00:28:58Z,423828
highlightHTML,"A tool to format R markdown with CSS ids for HTML output.
The tool may be most helpful for those using markdown to create reproducible
documents. The biggest limitations in formatting is the knowledge of CSS
by the document authors.",2020-04-21,Brandon LeBeau,https://github.com/lebebr01/highlightHTML,TRUE,https://github.com/lebebr01/highlighthtml,11301,4,2020-04-21T03:03:43Z,2825.25
highr,"Provides syntax highlighting for R source code. Currently it
supports LaTeX and HTML output. Source code of other languages is supported
via Andre Simon's highlight package (<http://www.andre-simon.de>).",2019-03-20,Yihui Xie,https://github.com/yihui/highr,TRUE,https://github.com/yihui/highr,15684850,35,2020-02-06T16:37:56Z,448138.5714285714
hilbertSimilarity,"Quantifying similarity between high-dimensional single cell samples is challenging, and usually requires
some simplifying hypothesis to be made. By transforming the high dimensional space into a high dimensional grid,
the number of cells in each sub-space of the grid is characteristic of a given sample. Using a Hilbert curve
each sample can be visualized as a simple density plot, and the distance between samples can be calculated from
the distribution of cells using the Jensen-Shannon distance. Bins that correspond to significant differences
between samples can identified using a simple bootstrap procedure.",2019-11-11,Yann Abraham,http://github.com/yannabraham/hilbertSimilarity,TRUE,https://github.com/yannabraham/hilbertsimilarity,3022,5,2019-11-29T11:20:32Z,604.4
hilldiv,"Tools for analysing, comparing, visualising and partitioning diversity based on Hill numbers.
'hilldiv' is an R package that provides a set of functions to assist analysis of diversity for
diet reconstruction, microbial community profiling or more general ecosystem characterisation
analyses based on Hill numbers, using OTU/ASV tables and associated phylogenetic trees as
inputs. The package includes functions for (phylo)diversity measurement, (phylo)diversity
profile plotting, (phylo)diversity comparison between samples and groups, (phylo)diversity
partitioning and (dis)similarity measurement. All of these grounded in abundance-based and
incidence-based Hill numbers.
The statistical framework developed around Hill numbers encompasses many of the most
broadly employed diversity (e.g. richness, Shannon index, Simpson index),
phylogenetic diversity (e.g. Faith's PD, Allen's H, Rao's quadratic entropy) and
dissimilarity (e.g. Sorensen index, Unifrac distances) metrics. This enables the most
common analyses of diversity to be performed while grounded in a single statistical
framework. The methods are described in Jost et al. (2007) <DOI:10.1890/06-1736.1>,
Chao et al. (2010) <DOI:10.1098/rstb.2010.0272> and Chiu et al. (2014)
<DOI:10.1890/12-0960.1>; and reviewed in the framework of molecularly characterised
biological systems in Alberdi & Gilbert (2019) <DOI:10.1111/1755-0998.13014>.",2019-10-01,Antton Alberdi,https://github.com/anttonalberdi/hilldiv,TRUE,https://github.com/anttonalberdi/hilldiv,3399,1,2020-05-10T08:11:36Z,3399
hillR,"Calculate taxonomic, functional and phylogenetic diversity measures
through Hill Numbers proposed by Chao, Chiu and Jost (2014)
<doi:10.1146/annurev-ecolsys-120213-091540>.",2018-11-19,Daijiang Li,https://github.com/daijiang/hillR,TRUE,https://github.com/daijiang/hillr,7206,13,2020-05-22T15:40:45Z,554.3076923076923
hIRT,"Implementation of a class of hierarchical item response
theory (IRT) models where both the mean and the variance of latent preferences
(ability parameters) may depend on observed covariates. The current
implementation includes both the two-parameter latent trait model for binary data and the
graded response model for ordinal data. Both are fitted via the Expectation-Maximization (EM)
algorithm. Asymptotic standard errors are derived from the observed information
matrix.",2020-03-26,Xiang Zhou,http://github.com/xiangzhou09/hIRT,TRUE,https://github.com/xiangzhou09/hirt,11139,2,2020-04-01T19:00:51Z,5569.5
historydata,"These sample data sets are intended for historians
learning R. They include population, institutional, religious,
military, and prosopographical data suitable for mapping,
quantitative analysis, and network analysis.",2014-12-24,Lincoln Mullen,https://github.com/ropensci/historydata,TRUE,https://github.com/ropensci/historydata,21698,65,2019-12-09T13:21:09Z,333.81538461538463
HiveR,"Creates and plots 2D and 3D hive plots. Hive plots are a unique method of displaying networks of many types in which node properties are mapped to axes using meaningful properties rather than being arbitrarily positioned. The hive plot concept was invented by Martin Krzywinski at the Genome Science Center (www.hiveplot.net/). Keywords: networks, food webs, linnet, systems biology, bioinformatics.",2020-06-09,Bryan A. Hanson,https://github.com/bryanhanson/HiveR,TRUE,https://github.com/bryanhanson/hiver,30972,62,2020-06-09T14:22:28Z,499.5483870967742
hJAM,"Provides functions to implement a hierarchical approach which is designed to perform joint analysis of summary statistics using the framework of Mendelian Randomization or transcriptome analysis. Reference: Lai Jiang, Shujing Xu, Nicholas Mancuso, Paul J. Newcombe, David V. Conti (2020). ""A Hierarchical Approach Using Marginal Summary Statistics for Multiple Intermediates in a Mendelian Randomization or Transcriptome Analysis."" <bioRxiv><doi:10.1101/2020.02.03.924241>.",2020-02-20,Lai Jiang,https://github.com/lailylajiang/hJAM,TRUE,https://github.com/lailylajiang/hjam,1781,5,2020-05-18T05:03:51Z,356.2
HLMdiag,"A suite of diagnostic tools for hierarchical
(multilevel) linear models. The tools include
not only leverage and traditional deletion diagnostics (Cook's
distance, covratio, covtrace, and MDFFITS) but also
convenience functions and graphics for residual analysis. Models
can be fit using either lmer in the 'lme4' package or lme in the 'nlme' package,
but only two-level models fit using lme are currently supported.",2015-12-12,Adam Loy,https://github.com/aloy/HLMdiag,TRUE,https://github.com/aloy/hlmdiag,34893,9,2020-02-19T17:02:21Z,3877
HMDHFDplus,"Utilities for reading data from the Human Mortality Database (<https://www.mortality.org>), Human Fertility Database (<https://www.humanfertility.org>), and similar databases from the web or locally into an R session as data.frame objects. These are the two most widely used sources of demographic data to study basic demographic change, trends, and develop new demographic methods. Other supported databases at this time include the Human Fertility Collection (<http://www.fertilitydata.org/>), The Japanese Mortality Database (<http://www.ipss.go.jp/p-toukei/JMD>), and the Canadian Human Mortality Database (<http://www.bdlc.umontreal.ca/chmd/>). Arguments and data are standardized.",2020-02-20,Tim Riffe,https://github.com/timriffe/TR1,TRUE,https://github.com/timriffe/tr1,18658,1,2020-02-20T08:04:31Z,18658
hmi,"Runs single level and multilevel imputation models. The user just has to pass the data to the main function and, optionally, his analysis model. Basically the package then translates this analysis model into commands to impute the data according to it with functions from 'mice', 'MCMCglmm' or routines build for this package.",2020-05-03,Matthias Speidel (Munich,http://github.com/matthiasspeidel/hmi,TRUE,https://github.com/matthiasspeidel/hmi,22451,3,2020-05-03T16:00:04Z,7483.666666666667
Hmisc,"Contains many functions useful for data
analysis, high-level graphics, utility operations, functions for
computing sample size and power, importing and annotating datasets,
imputing missing values, advanced table making, variable clustering,
character string manipulation, conversion of R objects to LaTeX and html code,
and recoding variables.",2020-03-23,Frank E Harrell Jr,"http://biostat.mc.vanderbilt.edu/Hmisc,
https://github.com/harrelfe/Hmisc",TRUE,https://github.com/harrelfe/hmisc,9408907,151,2020-05-28T19:12:43Z,62310.64238410596
hms,"Implements an S3 class for storing and formatting time-of-day
values, based on the 'difftime' class.",2020-01-08,Kirill Müller,"https://hms.tidyverse.org/, https://github.com/tidyverse/hms",TRUE,https://github.com/tidyverse/hms,14635755,111,2020-01-07T16:19:41Z,131853.64864864864
Hmsc,"Hierarchical Modelling of Species Communities (HMSC)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment