# install nougat
pip install "nougat-ocr[api, dataset]"
# crop the table from paper (preserve pdf)
# using default 0.1.0-small model
nougat /tmp/2304.08485.table3.only.pdf -o /tmp/" --markdown\begin{table}| # check the ports | |
| # openssl s_client -connect ega.ebi.ac.uk:8443 | |
| # openssl s_client -connect ega.ebi.ac.uk:8052 | |
| # pyEGA3 - EGA python client version 3.4.0 | |
| # Python version - 3.7.3 | |
| pyega3 -cf default_credential_file.json fetch EGAF00001383154 | |
| pyega3 -cf default_credential_file.json fetch EGAD00001000440 --saveto Desktop/ega_output/ |
| #!/usr/bin/bash | |
| # ClinVar weekly updates: https://ftp.ncbi.nlm.nih.gov/pub/clinvar/tab_delimited/ | |
| # Download assembly-specific variant annotation (Release date: 2022-09-19) | |
| wget https://ftp.ncbi.nlm.nih.gov/pub/clinvar/tab_delimited/variant_summary.txt.gz | |
| awk '{print "\t"$0"\t"}' acmg.filtered.genes.list \| | |
| rg -zf - variant_summary.txt.gz \| | |
| rg 'GRCh38' \| | |
| rg 'single nucleotide variant' \| |
# install nougat
pip install "nougat-ocr[api, dataset]"
# crop the table from paper (preserve pdf)
# using default 0.1.0-small model
nougat /tmp/2304.08485.table3.only.pdf -o /tmp/" --markdown\begin{table}| # tinygrad implementation: https://github.com/tinygrad/tinygrad/blob/master/examples/beautiful_mnist.py | |
| %time | |
| import torch | |
| import torch.nn as nn | |
| import torch.optim as optim | |
| from torchvision import datasets | |
| import torchvision.transforms as transforms | |
| from torch.utils.data import DataLoader | |
| from tqdm import trange |
Change the number of columns and rows on the launchpad grid.
defaults write com.apple.dock springboard-columns -int 8
defaults write com.apple.dock springboard-rows -int 8
defaults write com.apple.dock ResetLaunchPad -bool TRUE
killall DockUse Default to reset.
Use GitHub Copilot in the command line.
pip uninstall gh
brew install gh
gh auth login
gh extension install github/gh-copilot
gh extension upgrade gh-copilot
gh copilot suggest 'read text file line by line and return the total number of alphanum chars in each line'| from openai import OpenAI | |
| client = OpenAI() | |
| response = client.chat.completions.create( | |
| model="gpt-4-turbo", | |
| messages=[ | |
| { | |
| "role": "user", | |
| "content": [ |
User prompt:
You have access to source code for torch.distributed.pipelining package and relevant documentation for implementing pipeline parallelism. Write a function that creates an accurate pipeline_order for Schedule1F1B given world_size and n_microbatches such that _format_pipeline_order will return the correct output. Make sure to provide a minimal example to verify function performs as expected.
CoT: