When making this website, i wanted a simple, reasonable way to make it look good on most displays. Not counting any minimization techniques, the following 58 bytes worked well for me:
main {
max-width: 38rem;
padding: 2rem;
margin: auto;
}
// | |
// Regular Expression for URL validation | |
// | |
// Author: Diego Perini | |
// Created: 2010/12/05 | |
// Updated: 2018/09/12 | |
// License: MIT | |
// | |
// Copyright (c) 2010-2018 Diego Perini (http://www.iport.it) | |
// |
/** | |
* General-purpose NodeJS CLI/API wrapping the Stable-Diffusion python scripts. | |
* | |
* Note that this uses an older fork of stable-diffusion | |
* with the 'txt2img.py' script, and that script was modified to | |
* support the --outfile command. | |
*/ | |
var { spawn, exec } = require("child_process"); | |
var path = require("path"); |
// Website you intended to retrieve for users. | |
const upstream = 'api.openai.com' | |
// Custom pathname for the upstream website. | |
const upstream_path = '/' | |
// Website you intended to retrieve for users using mobile devices. | |
const upstream_mobile = upstream | |
// Countries and regions where you wish to suspend your service. |
import json | |
import os | |
import requests | |
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY") | |
OPENAI_BASE_URL = "https://api.openai.com/v1/chat/completions" | |
def translator(srt): |
import { getAuth, withClerkMiddleware } from "@clerk/nextjs/server"; | |
import { NextResponse, NextFetchEvent } from "next/server"; | |
import type { NextRequest } from "next/server"; | |
import { Ratelimit } from "@upstash/ratelimit"; | |
import { Redis } from "@upstash/redis"; | |
// Add public paths for Clerk to handle. | |
const publicPaths = ["/", "/sign-in*", "/sign-up*", "/api/blocked"]; | |
// set your rate limit. |
// Requires the gpt library from https://github.com/hrishioa/socrate and the progress bar library. | |
// Created by Hrishi Olickel ([email protected]) (@hrishioa). Reach out if you have trouble running this. | |
import { ThunkQueue } from '../../utils/simplethrottler'; | |
import { | |
AcceptedModels, | |
Messages, | |
askChatGPT, | |
getMessagesTokenCount, | |
getProperJSONFromGPT, |
Rollup builds doesn't scale well in large apps. You need to increase Node's memory with --max-old-space-size=4096
to handle all the modules. This is one of Vite's highest-rated issue.
This file documents various findings and attempts to improve this issue.
NOTE: I've only been reading Rollup's source code for a while, so some of these may not be accurate.
from langchain.chat_models import ChatOpenAI | |
from pydantic import BaseModel, Field | |
from langchain.document_loaders import UnstructuredURLLoader | |
from langchain.chains.openai_functions import create_extraction_chain_pydantic | |
class LLMItem(BaseModel): | |
title: str = Field(description="The simple and concise title of the product") | |
description: str = Field(description="The description of the product") | |
def main(): |