This is a deep dive into the Continue code base, folder by folder and file by file where relevant.
In this project, our goal is to establish a robust and scalable infrastructure for a PostgreSQL database with high availability, seamless security, and integrated monitoring and alerting systems.
We'll leverage tools like Patroni, Consul, Vault, Prometheus, Grafana, and Cert-Manager to ensure a comprehensive, modern solution. Coolify will act as our orchestration platform, managing various services and simplifying deployments. We aim to not only build a highly available database cluster but also provide a learning experience for interns that demonstrates best practices in DevOps, security, and observability.
The backbone of our infrastructure will focus on a distributed, high-availability PostgreSQL cluster. To ensure reliability, we’ll introduce Patroni for automating failover, Consul for service coordination, and Vault for managing sensitive information. Monitoring will be handled by Prometheus and visualized u
import asyncio | |
import os | |
from contextlib import asynccontextmanager | |
import sqlalchemy as sa | |
from dependency_injector import providers | |
from dependency_injector.containers import DeclarativeContainer | |
from dependency_injector.wiring import Provide, inject | |
from fastapi import Depends, FastAPI | |
from sqlalchemy.ext.asyncio import ( |
UPDATE: This is tested and working on both Linux and Windows 11 used for LlaMa & DeepSeek
Here's a sample README.md
file written by Llama3.2 using this docker-compose.yaml file that explains the purpose and usage of the Docker Compose configuration:
ollama-portal
A multi-container Docker application for serving OLLAMA API.
#!/usr/bin/env xcrun -sdk macosx swift | |
// import Foundation | |
import SwiftUI | |
import AppKit | |
/** | |
Minimal UI for ollama | |
## Dev: |
import httpx | |
import json | |
from httpx import ByteStream | |
from openai import OpenAI | |
import instructor | |
from pydantic import BaseModel | |
from loguru import logger |
import motor.motor_asyncio | |
import asyncio | |
from typing import List, Dict, Any | |
import logging | |
from urllib.parse import quote_plus | |
from contextlib import asynccontextmanager | |
# Set up logging | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) |
# Requires Python 3.12+, IPython, rich | |
import datetime | |
import hashlib | |
import importlib | |
import inspect | |
import json | |
import logging | |
import os | |
import pkgutil |
-- chat.lua - Neovim chat script using Lua. | |
local uv = vim.loop | |
local server = nil | |
local clients = {} | |
local client_socket = nil | |
local server_started = false | |
local chat_buffer = nil -- Buffer variable to store the chat buffer | |
local reconnect_attempts = 0 | |
local max_reconnect_attempts = 5 |
gists | |||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
#runtime #compatibility #ai #instructions #personality #gist #jailbreak #llm