Standard escape codes are prefixed with Escape:
- Ctrl-Key:
^[ - Octal:
\033 - Unicode:
\u001b - Hexadecimal:
\x1B - Decimal:
27
| #!/usr/bin/lua5.1 | |
| local json = require("cjson") | |
| local windows = {} | |
| local drill_down | |
| drill_down = function(workspace_name, nodes) | |
| if nodes then | |
| for i, w in ipairs(nodes) do | |
| if w.pid then |
| import argparse | |
| import time | |
| from langchain.chains import RetrievalQA | |
| from langchain.vectorstores import Chroma | |
| from langchain.embeddings import HuggingFaceInstructEmbeddings | |
| from langchain.llms import HuggingFacePipeline | |
| from transformers import LlamaTokenizer, LlamaForCausalLM, pipeline | |
| from chromadb.config import Settings | |
| from langchain.llms import LlamaCpp | |
| from bottle import Bottle, run, route, request |
| [ | |
| {"content":"How can I make SSH act as a local SOCKS server? ","role":"user"}, | |
| {"sources":["/storage/datasets/manpages/ssh.txt"], | |
| "content":"You can do this using the -L flag in your SSH command line options. For example, if you want to forward all incoming traffic on port 8080 to localhost port 8080, you would run the following command:\n\nssh -L 8080:localhost:8080 username@remote\\_host\n\nThis sets up a SOCKS 4/5 proxy on your local machine listening on port 8080, which forwards incoming requests to localhost port 8080.","role":"assistant","model":"WizVicUncen13.8bit","embedding":"Man pages", "rate":19.3 | |
| }, | |
| {"content":"How can I make SSH act as a local SOCKS server? ","role":"user"}, | |
| {"sources":["/storage/datasets/manpages/ssh.txt"],"content":"You can configure SSH to act as a SOCKS server using the -D option. For example, if you want to run a web browser through SSH, you could start it like this:\n\nssh -D 8080 username@remote\\_machine\n\nThis would create a SOCKS server listening on port 8080 o |
| import transformers | |
| import transformers.models.llama.modeling_llama | |
| def enable_ntk_rope_scaling(alpha=4): | |
| old_init = transformers.models.llama.modeling_llama.LlamaRotaryEmbedding.__init__ | |
| def ntk_scaled_init(self, dim, max_position_embeddings=2048, base=10000, device=None): | |
| max_position_embeddings = 2048*alpha | |
| a = alpha | |
| base = base * a ** (dim / (dim-2)) |
| import argparse | |
| import time | |
| import torch | |
| import uuid | |
| import os | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaForCausalLM, BitsAndBytesConfig, GPTQConfig | |
| from transformers import StoppingCriteria, StoppingCriteriaList | |
| from auto_gptq import exllama_set_max_input_length | |
| import transformers | |
| from peft import PeftModel |
| import json | |
| import os | |
| import sys | |
| from collections import defaultdict | |
| from tqdm import tqdm | |
| import argparse | |
| import torch | |
| from safetensors.torch import load_file, save_file |