Observação: Rota feita nas seguintes condições:
- Mercador level 101
- Carroça Mercantil (T5)
- Utilizando a cobertura "Cobertura de Carroça Nobre"
- Nenhuma roda equipada
- Trent (Sudoeste Calpheon) - Começa Aqui
- Behr (Sudoeste Calpheon)
{ | |
"openapi": "3.0.0", | |
"info": { | |
"description": "API de títulos do contas a pagar.", | |
"version": "1.0.0-oas3", | |
"title": "Títulos do Contas a Pagar" | |
}, | |
"tags": [ | |
{ | |
"name": "Títulos a pagar", |
services: | |
weaviate: | |
command: | |
- --host | |
- 0.0.0.0 | |
- --port | |
- "8080" | |
- --scheme | |
- http | |
image: cr.weaviate.io/semitechnologies/weaviate:1.28.2 |
import fnmatch | |
import os | |
import sys | |
from typing import List, Optional, Set | |
def parse_exclusion_file(file_path: str) -> Set[str]: | |
patterns = set() | |
if file_path and os.path.exists(file_path): | |
with open(file_path, "r") as f: |
- ColBERT: Efficient and Effective Passage Search via Contextualized Late Interaction over BERT (Khattab et al., 2020) | |
https://arxiv.org/abs/2004.12832 | |
- Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks (Lewis et al., 2020) | |
https://arxiv.org/abs/2005.11401 | |
- Approximate Nearest Neighbor Negative Contrastive Learning for Dense Text Retrieval (Xiong et al., 2020) | |
https://arxiv.org/abs/2007.00808 | |
- Improving Efficient Neural Ranking Models with Cross-Architecture Knowledge Distillation (Hofstätter et al., 2020) |
#!/usr/bin/env sh | |
. "$(dirname -- "$0")/_/husky.sh" | |
red=$(tput setaf 1) # ANSI escape code for red color | |
reset=$(tput sgr0) # ANSI escape code to reset color | |
#Commit message check | |
commit_msg=$(git log -1 --pretty=%B) |
window.scroll_flag = true | |
window.scroll_exit = false | |
window.scroll_delay = 100 | |
$(".output_scroll").each(function() { | |
$(this)[0].scrollTop = $(this)[0].scrollHeight; | |
}); | |
function callScrollToBottom() { | |
setTimeout(scrollToBottom, window.scroll_delay); |
!pip install -q transformers sentencepiece | |
from transformers import AutoTokenizer | |
import transformers | |
import torch | |
model = "meta-llama/Llama-2-13b-chat-hf" | |
tokenizer = AutoTokenizer.from_pretrained(model) | |
pipeline = transformers.pipeline( |