Skip to content

Instantly share code, notes, and snippets.

@etrobot
Created November 19, 2023 09:08
Show Gist options
  • Save etrobot/8565391c816a73e544628e33a4f18d69 to your computer and use it in GitHub Desktop.
Save etrobot/8565391c816a73e544628e33a4f18d69 to your computer and use it in GitHub Desktop.
OneClickNewsVideo.ipynb
Display the source blob
Display the rendered blob
Raw
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": [],
"authorship_tag": "ABX9TyO92lmF9lYT910I2n4xSwQm",
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
},
"accelerator": "TPU"
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/gist/etrobot/8565391c816a73e544628e33a4f18d69/oneclicknewsvideo.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"source": [
"from google.colab import drive\n",
"drive.mount('/content/drive')"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "T1iVltM0-A3t",
"outputId": "c9e75cf1-b156-4104-e1a2-0f7b78bbe150"
},
"execution_count": 11,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"!wget http://archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2_amd64.deb\n",
"!sudo dpkg -i libssl1.1_1.1.1f-1ubuntu2_amd64.deb"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "yhJo5yLUIXcb",
"outputId": "9c496c9c-9f0d-4df5-d6e6-a4e2b4f16fa8"
},
"execution_count": 12,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"--2023-11-19 08:43:25-- http://archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2_amd64.deb\n",
"Resolving archive.ubuntu.com (archive.ubuntu.com)... 91.189.91.83, 91.189.91.82, 185.125.190.39, ...\n",
"Connecting to archive.ubuntu.com (archive.ubuntu.com)|91.189.91.83|:80... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 1318204 (1.3M) [application/vnd.debian.binary-package]\n",
"Saving to: ‘libssl1.1_1.1.1f-1ubuntu2_amd64.deb.1’\n",
"\n",
"libssl1.1_1.1.1f-1u 100%[===================>] 1.26M 5.61MB/s in 0.2s \n",
"\n",
"2023-11-19 08:43:26 (5.61 MB/s) - ‘libssl1.1_1.1.1f-1ubuntu2_amd64.deb.1’ saved [1318204/1318204]\n",
"\n",
"(Reading database ... 120890 files and directories currently installed.)\n",
"Preparing to unpack libssl1.1_1.1.1f-1ubuntu2_amd64.deb ...\n",
"Unpacking libssl1.1:amd64 (1.1.1f-1ubuntu2) over (1.1.1f-1ubuntu2) ...\n",
"Setting up libssl1.1:amd64 (1.1.1f-1ubuntu2) ...\n",
"debconf: unable to initialize frontend: Dialog\n",
"debconf: (No usable dialog-like program is installed, so the dialog based frontend cannot be used. at /usr/share/perl5/Debconf/FrontEnd/Dialog.pm line 78.)\n",
"debconf: falling back to frontend: Readline\n",
"debconf: unable to initialize frontend: Readline\n",
"debconf: (This frontend requires a controlling tty.)\n",
"debconf: falling back to frontend: Teletype\n",
"Processing triggers for libc-bin (2.35-0ubuntu3.4) ...\n",
"/sbin/ldconfig.real: /usr/local/lib/libtbbbind_2_0.so.3 is not a symbolic link\n",
"\n",
"/sbin/ldconfig.real: /usr/local/lib/libtbbbind.so.3 is not a symbolic link\n",
"\n",
"/sbin/ldconfig.real: /usr/local/lib/libtbbbind_2_5.so.3 is not a symbolic link\n",
"\n",
"/sbin/ldconfig.real: /usr/local/lib/libtbbmalloc_proxy.so.2 is not a symbolic link\n",
"\n",
"/sbin/ldconfig.real: /usr/local/lib/libtbb.so.12 is not a symbolic link\n",
"\n",
"/sbin/ldconfig.real: /usr/local/lib/libtbbmalloc.so.2 is not a symbolic link\n",
"\n"
]
}
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "3jADUN4Z9GGI",
"outputId": "93ce008d-9de2-4812-bfac-9e9744cc51b6"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Requirement already satisfied: duckduckgo-search in /usr/local/lib/python3.10/dist-packages (3.9.6)\n",
"Requirement already satisfied: python-dotenv in /usr/local/lib/python3.10/dist-packages (1.0.0)\n",
"Requirement already satisfied: azure-cognitiveservices-speech in /usr/local/lib/python3.10/dist-packages (1.32.1)\n",
"Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (2.31.0)\n",
"Requirement already satisfied: pillow in /usr/local/lib/python3.10/dist-packages (9.4.0)\n",
"Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.10/dist-packages (4.11.2)\n",
"Requirement already satisfied: litellm in /usr/local/lib/python3.10/dist-packages (1.1.1)\n",
"Requirement already satisfied: aiofiles>=23.2.1 in /usr/local/lib/python3.10/dist-packages (from duckduckgo-search) (23.2.1)\n",
"Requirement already satisfied: click>=8.1.7 in /usr/local/lib/python3.10/dist-packages (from duckduckgo-search) (8.1.7)\n",
"Requirement already satisfied: lxml>=4.9.3 in /usr/local/lib/python3.10/dist-packages (from duckduckgo-search) (4.9.3)\n",
"Requirement already satisfied: httpx[brotli,http2,socks]>=0.25.1 in /usr/local/lib/python3.10/dist-packages (from duckduckgo-search) (0.25.1)\n",
"Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests) (3.3.2)\n",
"Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests) (3.4)\n",
"Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests) (2.0.7)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests) (2023.7.22)\n",
"Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.10/dist-packages (from beautifulsoup4) (2.5)\n",
"Requirement already satisfied: aiohttp in /usr/local/lib/python3.10/dist-packages (from litellm) (3.8.6)\n",
"Requirement already satisfied: appdirs<2.0.0,>=1.4.4 in /usr/local/lib/python3.10/dist-packages (from litellm) (1.4.4)\n",
"Requirement already satisfied: importlib-metadata>=6.8.0 in /usr/local/lib/python3.10/dist-packages (from litellm) (6.8.0)\n",
"Requirement already satisfied: jinja2<4.0.0,>=3.1.2 in /usr/local/lib/python3.10/dist-packages (from litellm) (3.1.2)\n",
"Requirement already satisfied: openai>=1.0.0 in /usr/local/lib/python3.10/dist-packages (from litellm) (1.3.3)\n",
"Requirement already satisfied: tiktoken>=0.4.0 in /usr/local/lib/python3.10/dist-packages (from litellm) (0.5.1)\n",
"Requirement already satisfied: tokenizers in /usr/local/lib/python3.10/dist-packages (from litellm) (0.15.0)\n",
"Requirement already satisfied: anyio in /usr/local/lib/python3.10/dist-packages (from httpx[brotli,http2,socks]>=0.25.1->duckduckgo-search) (3.7.1)\n",
"Requirement already satisfied: httpcore in /usr/local/lib/python3.10/dist-packages (from httpx[brotli,http2,socks]>=0.25.1->duckduckgo-search) (1.0.2)\n",
"Requirement already satisfied: sniffio in /usr/local/lib/python3.10/dist-packages (from httpx[brotli,http2,socks]>=0.25.1->duckduckgo-search) (1.3.0)\n",
"Requirement already satisfied: h2<5,>=3 in /usr/local/lib/python3.10/dist-packages (from httpx[brotli,http2,socks]>=0.25.1->duckduckgo-search) (4.1.0)\n",
"Requirement already satisfied: socksio==1.* in /usr/local/lib/python3.10/dist-packages (from httpx[brotli,http2,socks]>=0.25.1->duckduckgo-search) (1.0.0)\n",
"Requirement already satisfied: brotli in /usr/local/lib/python3.10/dist-packages (from httpx[brotli,http2,socks]>=0.25.1->duckduckgo-search) (1.1.0)\n",
"Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.10/dist-packages (from importlib-metadata>=6.8.0->litellm) (3.17.0)\n",
"Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2<4.0.0,>=3.1.2->litellm) (2.1.3)\n",
"Requirement already satisfied: distro<2,>=1.7.0 in /usr/lib/python3/dist-packages (from openai>=1.0.0->litellm) (1.7.0)\n",
"Requirement already satisfied: pydantic<3,>=1.9.0 in /usr/local/lib/python3.10/dist-packages (from openai>=1.0.0->litellm) (1.10.13)\n",
"Requirement already satisfied: tqdm>4 in /usr/local/lib/python3.10/dist-packages (from openai>=1.0.0->litellm) (4.66.1)\n",
"Requirement already satisfied: typing-extensions<5,>=4.5 in /usr/local/lib/python3.10/dist-packages (from openai>=1.0.0->litellm) (4.5.0)\n",
"Requirement already satisfied: regex>=2022.1.18 in /usr/local/lib/python3.10/dist-packages (from tiktoken>=0.4.0->litellm) (2023.6.3)\n",
"Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->litellm) (23.1.0)\n",
"Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.10/dist-packages (from aiohttp->litellm) (6.0.4)\n",
"Requirement already satisfied: async-timeout<5.0,>=4.0.0a3 in /usr/local/lib/python3.10/dist-packages (from aiohttp->litellm) (4.0.3)\n",
"Requirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->litellm) (1.9.2)\n",
"Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from aiohttp->litellm) (1.4.0)\n",
"Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.10/dist-packages (from aiohttp->litellm) (1.3.1)\n",
"Requirement already satisfied: huggingface_hub<1.0,>=0.16.4 in /usr/local/lib/python3.10/dist-packages (from tokenizers->litellm) (0.19.3)\n",
"Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio->httpx[brotli,http2,socks]>=0.25.1->duckduckgo-search) (1.1.3)\n",
"Requirement already satisfied: hyperframe<7,>=6.0 in /usr/local/lib/python3.10/dist-packages (from h2<5,>=3->httpx[brotli,http2,socks]>=0.25.1->duckduckgo-search) (6.0.1)\n",
"Requirement already satisfied: hpack<5,>=4.0 in /usr/local/lib/python3.10/dist-packages (from h2<5,>=3->httpx[brotli,http2,socks]>=0.25.1->duckduckgo-search) (4.0.0)\n",
"Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from huggingface_hub<1.0,>=0.16.4->tokenizers->litellm) (3.13.1)\n",
"Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.10/dist-packages (from huggingface_hub<1.0,>=0.16.4->tokenizers->litellm) (2023.6.0)\n",
"Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from huggingface_hub<1.0,>=0.16.4->tokenizers->litellm) (6.0.1)\n",
"Requirement already satisfied: packaging>=20.9 in /usr/local/lib/python3.10/dist-packages (from huggingface_hub<1.0,>=0.16.4->tokenizers->litellm) (23.2)\n",
"Requirement already satisfied: h11<0.15,>=0.13 in /usr/local/lib/python3.10/dist-packages (from httpcore->httpx[brotli,http2,socks]>=0.25.1->duckduckgo-search) (0.14.0)\n"
]
}
],
"source": [
"!pip install duckduckgo-search python-dotenv azure-cognitiveservices-speech requests pillow beautifulsoup4 litellm"
]
},
{
"cell_type": "code",
"source": [
"!pip install -q google-generativeai"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "9TETxnqpwJxp",
"outputId": "3ce0b370-9668-488b-b514-41e61d4462e8"
},
"execution_count": 16,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/133.2 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[91m━━━━━━━━━━━━\u001b[0m\u001b[90m╺\u001b[0m\u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m41.0/133.2 kB\u001b[0m \u001b[31m1.1 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m133.2/133.2 kB\u001b[0m \u001b[31m2.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25h\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/267.9 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m267.9/267.9 kB\u001b[0m \u001b[31m7.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25h"
]
}
]
},
{
"cell_type": "code",
"source": [
"from dotenv import find_dotenv, load_dotenv\n",
"load_dotenv(dotenv_path='/content/drive/MyDrive/Colab Notebooks/.env')"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "E2A2IIVEYglL",
"outputId": "c41840e2-bb6f-47ea-d179-78d59cdccda7"
},
"execution_count": 14,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"True"
]
},
"metadata": {},
"execution_count": 14
}
]
},
{
"cell_type": "code",
"source": [
"import json,os,base64\n",
"import requests,PIL\n",
"from PIL import Image\n",
"from io import BytesIO\n",
"from requests.cookies import RequestsCookieJar\n",
"from bs4 import BeautifulSoup\n",
"from duckduckgo_search import DDGS\n",
"from dotenv import find_dotenv, load_dotenv\n",
"from litellm import completion\n",
"import azure.cognitiveservices.speech as speechsdk\n",
"\n",
"load_dotenv(find_dotenv())\n",
"WIDTH = 648\n",
"HEIGHT = 1152\n",
"\n",
"KEYS={\n",
" \"palm/chat-bison\":os.environ['PALM_API_KEY'],\n",
" \"openai/gpt-3.5-turbo-1106\":os.environ['OPENAI_API_KEY']\n",
"}\n",
"# MODEL = \"openai/gpt-3.5-turbo-1106\"\n",
"MODEL = \"palm/chat-bison\"\n",
"\n",
"\n",
"def sumPage(url: str) -> str:\n",
" print('Sum:',url)\n",
"\n",
" def dealCookies(cookies):\n",
" cookie_jar = RequestsCookieJar()\n",
" for cookie in cookies:\n",
" cookie_jar.set(cookie['name'], cookie['value'], domain=cookie['domain'], path=cookie['path'],\n",
" secure=cookie['secure'])\n",
" return cookie_jar\n",
"\n",
" headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Mobile Safari/537.36\"\n",
" }\n",
" # headers = {\n",
" # \"User-Agent\": \"Mozilla\"\n",
" # }\n",
"\n",
" session = requests.Session()\n",
" session.headers = headers\n",
" try:\n",
" cookies = dealCookies(json.load(open('cookies.json')))\n",
" session.cookies.update(cookies)\n",
" except Exception as e:\n",
" print(e)\n",
" pass\n",
"\n",
" response = session.get(url)\n",
" print(response.text[:100])\n",
" soup = BeautifulSoup(response.text, 'html.parser')\n",
"\n",
" elements = [\n",
" element.text for element in soup.find_all([\"h1\", \"h2\", \"h3\", \"p\"])\n",
" if len(element.text) > 5\n",
" ]\n",
" txt=' '.join(elements)\n",
"\n",
" response = completion(model=MODEL, messages=[{\"role\": \"user\",\"content\":'『%s』TLDR;'%txt}],api_key=KEYS[MODEL])\n",
" txt=response[\"choices\"][0][\"message\"][\"content\"]\n",
" print(txt)\n",
" return txt\n",
"\n",
" # except Exception as e:\n",
" # print(e)\n",
" # return ''\n",
"\n",
"def resize_and_crop(image_path):\n",
" image = Image.open(image_path)\n",
" width, height = image.size\n",
"\n",
" new_height = HEIGHT\n",
" new_width = int(width * (new_height / height))\n",
"\n",
" resized_image = image.resize((new_width, new_height), PIL.Image.LANCZOS)\n",
"\n",
" # 计算裁剪位置\n",
" left = (new_width - WIDTH) / 2\n",
" top = (new_height - HEIGHT) / 2\n",
" right = (new_width + WIDTH) / 2\n",
" bottom = (new_height + HEIGHT) / 2\n",
"\n",
" # 进行图像裁剪\n",
" cropped_image = resized_image.crop((left, top, right, bottom))\n",
"\n",
" # 保存裁剪后的图像\n",
" cropped_image.save(image_path)\n",
" return image_path\n",
"\n",
"def genImg(prompt=None,filename=\"segmind.jpg\",):\n",
" if prompt is None:\n",
" return\n",
" api_key = os.environ['SEGMIND']\n",
" url = \"https://api.segmind.com/v1/sdxl1.0-txt2img\"\n",
" # Request payload\n",
" data = {\n",
" \"prompt\":prompt,\n",
" \"negative_prompt\": \"ugly, tiling, poorly drawn feet, poorly drawn face, out of frame, extra limbs, disfigured, deformed, body out of frame, blurry, bad anatomy, blurred, watermark, grainy, signature, cut off, draft\",\n",
" \"style\": \"base\",\n",
" \"samples\": 1,\n",
" \"scheduler\": \"dpmpp_sde_ancestral\",\n",
" \"num_inference_steps\": 50,\n",
" \"guidance_scale\": 8,\n",
" \"strength\": 1,\n",
" \"seed\": 45674567,\n",
" \"img_width\": 1024,\n",
" \"img_height\": 1024,\n",
" \"refiner\": \"yes\",\n",
" \"base64\": True # Set this to True to get base64 encoding\n",
" }\n",
"\n",
" response = requests.post(url, json=data, headers={'x-api-key': api_key})\n",
"\n",
" # Get the base64 encoding from the response\n",
" img_data = response.json()[\"image\"]\n",
"\n",
" # Decode the base64 encoding to bytes\n",
" img_bytes = base64.b64decode(img_data)\n",
"\n",
" # Convert the bytes to an image object\n",
" img = Image.open(BytesIO(img_bytes))\n",
"\n",
" # Save the image to a local file\n",
" img.save(filename)\n",
"\n",
" return filename\n",
"\n",
"def text2voice(text:str,filename='audio.mp3',voice='en-US-ElizabethNeural',region=\"eastasia\"):\n",
" speech_config = speechsdk.SpeechConfig(subscription=os.environ[\"TTS\"],region=region)\n",
" speech_config.set_speech_synthesis_output_format(speechsdk.SpeechSynthesisOutputFormat.Audio48Khz192KBitRateMonoMp3)\n",
" audio_config = speechsdk.audio.AudioOutputConfig(filename=filename)\n",
"\n",
" # The language of the voice that speaks.\n",
" speech_config.speech_synthesis_voice_name = voice\n",
" speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config, audio_config=audio_config)\n",
"\n",
" result = speech_synthesizer.speak_text_async(text).get()\n",
" # Check result\n",
" if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:\n",
" print(\"Speech synthesized to speaker for text [{}]\".format(text))\n",
" stream = speechsdk.AudioDataStream(result)\n",
" stream.save_to_wav_file(filename)\n",
" return filename\n",
"\n",
"def search(prompt:str)->str:\n",
" print(prompt)\n",
" search_cmd = [{\n",
" \"role\": \"user\",\n",
" \"content\": f'I want to search this info:『{prompt}』,plz give me the keywords, output in one line',\n",
" }]\n",
"\n",
" response = completion(model=MODEL, messages=search_cmd,api_key=KEYS[MODEL])\n",
"\n",
" reply_text = response[\"choices\"][0][\"message\"][\"content\"]\n",
"\n",
" with DDGS() as ddgs:\n",
" serp=ddgs.text(reply_text, max_results=3)\n",
" results = [sumPage(r['href']) for r in serp]\n",
" return '\\n'.join(results)\n",
"\n",
"\n",
"if __name__=='__main__':\n",
" prompt1 = 'OpenAI board orst Sam Altman'\n",
" prompt2 = '『%s』\\nplease rewrite the text above to be a broadcast manuscript for the announcer, output pure content text without additional instructions,starts with \"welcome to our broadcast. Breaking news\"'%search(prompt1)\n",
" reply_text = completion(model=MODEL,\n",
" messages=[{\n",
" \"role\": \"user\",\n",
" \"content\": prompt2,\n",
" }],\n",
" api_key=KEYS[MODEL])[\"choices\"][0][\"message\"][\"content\"]\n",
" print(reply_text)\n",
" # input(\"Shall I continue?\")\n",
" text2voice(reply_text)\n",
" resize_and_crop(genImg(\"Photo-realistic 20s young female with short hair in suit is broadcasting at tech news channel, facing the camera, aqua tone background\"))\n",
"\n"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "solH_1ei9Pef",
"outputId": "5122d83f-ada6-4d99-9018-0d7620bba591"
},
"execution_count": 22,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"OpenAI board orst Sam Altman\n",
"Sum: https://www.cnn.com/2023/11/18/tech/openai-sam-altman-shakeup-what-happened/index.html\n",
"[Errno 2] No such file or directory: 'cookies.json'\n",
" <!DOCTYPE html>\n",
"<html lang=\"en\" data-uri=\"cms.cnn.com/_pages/clp45psm9000046qidcdl80jz@published\" \n",
"OpenAI CEO Sam Altman was fired on Friday. The board is having second thoughts about the firing and has asked Altman to return. Altman is considering the offer.\n",
"Sum: https://www.nytimes.com/2023/11/18/technology/sam-altman-openai-board.html\n",
"[Errno 2] No such file or directory: 'cookies.json'\n",
"<!DOCTYPE html>\n",
"<html lang=\"en\" class=\" story nytapp-vi-article\" xmlns:og=\"http://opengraphprotocol\n",
" - Sam Altman was forced out of OpenAI on Friday.\n",
"- OpenAI's investors and supporters pressured the board to bring him back.\n",
"- Microsoft led the pressure campaign.\n",
"- OpenAI's investors are willing to invest if he were to start a new company.\n",
"- There is no guarantee that Mr. Altman or Mr. Brockman will be reinstated at OpenAI.\n",
"- OpenAI's board was talking with Mr. Altman about potentially returning to the company.\n",
"Sum: https://www.cnbc.com/2023/11/18/heres-whos-on-openais-board-the-group-behind-sam-altmans-ouster.html\n",
"[Errno 2] No such file or directory: 'cookies.json'\n",
"<!DOCTYPE html><html lang=\"en\" prefix=\"og=https://ogp.me/ns#\" itemscope=\"\" itemType=\"https://schema.\n",
"The OpenAI board ousted CEO Sam Altman after he was not \"consistently candid\" in his communications with the board. The board also said it no longer has confidence in his ability to lead OpenAI. The board is made up of OpenAI co-founder Greg Brockman, chief scientist Ilya Sutskever, Quora CEO Adam D'Angelo, Tasha McCauley, Helen Toner, and Altman himself.\n",
"Welcome to our broadcast. Breaking news:\n",
"\n",
"OpenAI CEO Sam Altman was fired on Friday, but the board is having second thoughts about the firing and has asked Altman to return. Altman is considering the offer.\n",
"\n",
"Sam Altman was forced out of OpenAI on Friday. OpenAI's investors and supporters pressured the board to bring him back. Microsoft led the pressure campaign. OpenAI's investors are willing to invest if he were to start a new company.\n",
"\n",
"There is no guarantee that Mr. Altman or Mr. Brockman will be reinstated at OpenAI. OpenAI's board was talking with Mr. Altman about potentially returning to the company.\n",
"\n",
"The OpenAI board ousted CEO Sam Altman after he was not \"consistently candid\" in his communications with the board. The board also said it no longer has confidence in his ability to lead OpenAI. The board is made up of OpenAI co-founder Greg Brockman, chief scientist Ilya Sutskever, Quora CEO Adam D'Angelo, Tasha McCauley, Helen Toner, and Altman himself.\n",
"Speech synthesized to speaker for text [Welcome to our broadcast. Breaking news:\n",
"\n",
"OpenAI CEO Sam Altman was fired on Friday, but the board is having second thoughts about the firing and has asked Altman to return. Altman is considering the offer.\n",
"\n",
"Sam Altman was forced out of OpenAI on Friday. OpenAI's investors and supporters pressured the board to bring him back. Microsoft led the pressure campaign. OpenAI's investors are willing to invest if he were to start a new company.\n",
"\n",
"There is no guarantee that Mr. Altman or Mr. Brockman will be reinstated at OpenAI. OpenAI's board was talking with Mr. Altman about potentially returning to the company.\n",
"\n",
"The OpenAI board ousted CEO Sam Altman after he was not \"consistently candid\" in his communications with the board. The board also said it no longer has confidence in his ability to lead OpenAI. The board is made up of OpenAI co-founder Greg Brockman, chief scientist Ilya Sutskever, Quora CEO Adam D'Angelo, Tasha McCauley, Helen Toner, and Altman himself.]\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"!git clone https://github.com/Rudrabha/Wav2Lip.git"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "t75HbSvLJlop",
"outputId": "62cef3fb-5bf2-4d65-fcc2-471e93225f41"
},
"execution_count": 23,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"fatal: destination path 'Wav2Lip' already exists and is not an empty directory.\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"!cd Wav2Lip && pip install -r requirements.txt"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "KqBTH9--QvH-",
"outputId": "c51d883f-9f30-4458-80cc-af939eb663ec"
},
"execution_count": 24,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Collecting librosa==0.7.0 (from -r requirements.txt (line 1))\n",
" Using cached librosa-0.7.0.tar.gz (1.6 MB)\n",
" Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
"Collecting numpy==1.17.1 (from -r requirements.txt (line 2))\n",
" Using cached numpy-1.17.1.zip (6.5 MB)\n",
" Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
"Requirement already satisfied: opencv-contrib-python>=4.2.0.34 in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 3)) (4.8.0.76)\n",
"\u001b[31mERROR: Could not find a version that satisfies the requirement opencv-python==4.1.0.25 (from versions: 3.4.0.14, 3.4.10.37, 3.4.11.39, 3.4.11.41, 3.4.11.43, 3.4.11.45, 3.4.13.47, 3.4.15.55, 3.4.16.57, 3.4.16.59, 3.4.17.61, 3.4.17.63, 3.4.18.65, 4.3.0.38, 4.4.0.40, 4.4.0.42, 4.4.0.44, 4.4.0.46, 4.5.1.48, 4.5.3.56, 4.5.4.58, 4.5.4.60, 4.5.5.62, 4.5.5.64, 4.6.0.66, 4.7.0.68, 4.7.0.72, 4.8.0.74, 4.8.0.76, 4.8.1.78)\u001b[0m\u001b[31m\n",
"\u001b[0m\u001b[31mERROR: No matching distribution found for opencv-python==4.1.0.25\u001b[0m\u001b[31m\n",
"\u001b[0m"
]
}
]
},
{
"cell_type": "code",
"source": [
"!pip install librosa==0.8.0"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "Ma6P3xtITBW2",
"outputId": "3fb6d687-c1f3-4c3f-a807-87341987ff5a"
},
"execution_count": 25,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Requirement already satisfied: librosa==0.8.0 in /usr/local/lib/python3.10/dist-packages (0.8.0)\n",
"Requirement already satisfied: audioread>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from librosa==0.8.0) (3.0.1)\n",
"Requirement already satisfied: numpy>=1.15.0 in /usr/local/lib/python3.10/dist-packages (from librosa==0.8.0) (1.23.5)\n",
"Requirement already satisfied: scipy>=1.0.0 in /usr/local/lib/python3.10/dist-packages (from librosa==0.8.0) (1.11.3)\n",
"Requirement already satisfied: scikit-learn!=0.19.0,>=0.14.0 in /usr/local/lib/python3.10/dist-packages (from librosa==0.8.0) (1.2.2)\n",
"Requirement already satisfied: joblib>=0.14 in /usr/local/lib/python3.10/dist-packages (from librosa==0.8.0) (1.3.2)\n",
"Requirement already satisfied: decorator>=3.0.0 in /usr/local/lib/python3.10/dist-packages (from librosa==0.8.0) (4.4.2)\n",
"Requirement already satisfied: resampy>=0.2.2 in /usr/local/lib/python3.10/dist-packages (from librosa==0.8.0) (0.4.2)\n",
"Requirement already satisfied: numba>=0.43.0 in /usr/local/lib/python3.10/dist-packages (from librosa==0.8.0) (0.58.1)\n",
"Requirement already satisfied: soundfile>=0.9.0 in /usr/local/lib/python3.10/dist-packages (from librosa==0.8.0) (0.12.1)\n",
"Requirement already satisfied: pooch>=1.0 in /usr/local/lib/python3.10/dist-packages (from librosa==0.8.0) (1.8.0)\n",
"Requirement already satisfied: llvmlite<0.42,>=0.41.0dev0 in /usr/local/lib/python3.10/dist-packages (from numba>=0.43.0->librosa==0.8.0) (0.41.1)\n",
"Requirement already satisfied: platformdirs>=2.5.0 in /usr/local/lib/python3.10/dist-packages (from pooch>=1.0->librosa==0.8.0) (4.0.0)\n",
"Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from pooch>=1.0->librosa==0.8.0) (23.2)\n",
"Requirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.10/dist-packages (from pooch>=1.0->librosa==0.8.0) (2.31.0)\n",
"Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn!=0.19.0,>=0.14.0->librosa==0.8.0) (3.2.0)\n",
"Requirement already satisfied: cffi>=1.0 in /usr/local/lib/python3.10/dist-packages (from soundfile>=0.9.0->librosa==0.8.0) (1.16.0)\n",
"Requirement already satisfied: pycparser in /usr/local/lib/python3.10/dist-packages (from cffi>=1.0->soundfile>=0.9.0->librosa==0.8.0) (2.21)\n",
"Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->pooch>=1.0->librosa==0.8.0) (3.3.2)\n",
"Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->pooch>=1.0->librosa==0.8.0) (3.4)\n",
"Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->pooch>=1.0->librosa==0.8.0) (2.0.7)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->pooch>=1.0->librosa==0.8.0) (2023.7.22)\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"!cp -ri \"/content/drive/MyDrive/Wav2Lip/wav2lip_gan.pth\" /content/Wav2Lip/checkpoints/"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "NzP2BabbUd3k",
"outputId": "cd2334b3-7b73-42d0-9160-220fd6df4aca"
},
"execution_count": 26,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"cp: overwrite '/content/Wav2Lip/checkpoints/wav2lip_gan.pth'? \n"
]
}
]
},
{
"cell_type": "code",
"source": [
"!cd Wav2Lip && python inference.py --checkpoint_path checkpoints/wav2lip_gan.pth --face \"../segmind.jpg\" --audio \"../audio.mp3\" --pads 0 20 0 0"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "lNYVP4P2QwLP",
"outputId": "ad529c22-51b9-4744-aba1-04ae841c5548"
},
"execution_count": 27,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Using cpu for inference.\n",
"Reading video frames...\n",
"Number of frames available for inference: 1\n",
"Extracting raw audio...\n",
"ffmpeg version 4.4.2-0ubuntu0.22.04.1 Copyright (c) 2000-2021 the FFmpeg developers\n",
" built with gcc 11 (Ubuntu 11.2.0-19ubuntu1)\n",
" configuration: --prefix=/usr --extra-version=0ubuntu0.22.04.1 --toolchain=hardened --libdir=/usr/lib/x86_64-linux-gnu --incdir=/usr/include/x86_64-linux-gnu --arch=amd64 --enable-gpl --disable-stripping --enable-gnutls --enable-ladspa --enable-libaom --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libcdio --enable-libcodec2 --enable-libdav1d --enable-libflite --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libgme --enable-libgsm --enable-libjack --enable-libmp3lame --enable-libmysofa --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librabbitmq --enable-librubberband --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libsrt --enable-libssh --enable-libtheora --enable-libtwolame --enable-libvidstab --enable-libvorbis --enable-libvpx --enable-libwebp --enable-libx265 --enable-libxml2 --enable-libxvid --enable-libzimg --enable-libzmq --enable-libzvbi --enable-lv2 --enable-omx --enable-openal --enable-opencl --enable-opengl --enable-sdl2 --enable-pocketsphinx --enable-librsvg --enable-libmfx --enable-libdc1394 --enable-libdrm --enable-libiec61883 --enable-chromaprint --enable-frei0r --enable-libx264 --enable-shared\n",
" libavutil 56. 70.100 / 56. 70.100\n",
" libavcodec 58.134.100 / 58.134.100\n",
" libavformat 58. 76.100 / 58. 76.100\n",
" libavdevice 58. 13.100 / 58. 13.100\n",
" libavfilter 7.110.100 / 7.110.100\n",
" libswscale 5. 9.100 / 5. 9.100\n",
" libswresample 3. 9.100 / 3. 9.100\n",
" libpostproc 55. 9.100 / 55. 9.100\n",
"\u001b[0;35m[mp3 @ 0x5643e4031180] \u001b[0m\u001b[0;33mEstimating duration from bitrate, this may be inaccurate\n",
"\u001b[0mInput #0, mp3, from '../audio.mp3':\n",
" Duration: 00:01:08.69, start: 0.000000, bitrate: 192 kb/s\n",
" Stream #0:0: Audio: mp3, 48000 Hz, mono, fltp, 192 kb/s\n",
"Stream mapping:\n",
" Stream #0:0 -> #0:0 (mp3 (mp3float) -> pcm_s16le (native))\n",
"Press [q] to stop, [?] for help\n",
"Output #0, wav, to 'temp/temp.wav':\n",
" Metadata:\n",
" ISFT : Lavf58.76.100\n",
" Stream #0:0: Audio: pcm_s16le ([1][0][0][0] / 0x0001), 48000 Hz, mono, s16, 768 kb/s\n",
" Metadata:\n",
" encoder : Lavc58.134.100 pcm_s16le\n",
"size= 2kB time=00:00:00.00 bitrate=N/A speed=N/A \rsize= 6440kB time=00:01:08.66 bitrate= 768.3kbits/s speed= 431x \n",
"video:0kB audio:6440kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: 0.001183%\n",
"(80, 5496)\n",
"Length of mel chunks: 1714\n",
" 0% 0/14 [00:00<?, ?it/s]\n",
" 0% 0/1 [00:00<?, ?it/s]\u001b[A\n",
"100% 1/1 [00:16<00:00, 16.33s/it]\n",
"Load checkpoint from: checkpoints/wav2lip_gan.pth\n",
"Model loaded\n",
"100% 14/14 [07:46<00:00, 33.36s/it]\n",
"ffmpeg version 4.4.2-0ubuntu0.22.04.1 Copyright (c) 2000-2021 the FFmpeg developers\n",
" built with gcc 11 (Ubuntu 11.2.0-19ubuntu1)\n",
" configuration: --prefix=/usr --extra-version=0ubuntu0.22.04.1 --toolchain=hardened --libdir=/usr/lib/x86_64-linux-gnu --incdir=/usr/include/x86_64-linux-gnu --arch=amd64 --enable-gpl --disable-stripping --enable-gnutls --enable-ladspa --enable-libaom --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libcdio --enable-libcodec2 --enable-libdav1d --enable-libflite --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libgme --enable-libgsm --enable-libjack --enable-libmp3lame --enable-libmysofa --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librabbitmq --enable-librubberband --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libsrt --enable-libssh --enable-libtheora --enable-libtwolame --enable-libvidstab --enable-libvorbis --enable-libvpx --enable-libwebp --enable-libx265 --enable-libxml2 --enable-libxvid --enable-libzimg --enable-libzmq --enable-libzvbi --enable-lv2 --enable-omx --enable-openal --enable-opencl --enable-opengl --enable-sdl2 --enable-pocketsphinx --enable-librsvg --enable-libmfx --enable-libdc1394 --enable-libdrm --enable-libiec61883 --enable-chromaprint --enable-frei0r --enable-libx264 --enable-shared\n",
" libavutil 56. 70.100 / 56. 70.100\n",
" libavcodec 58.134.100 / 58.134.100\n",
" libavformat 58. 76.100 / 58. 76.100\n",
" libavdevice 58. 13.100 / 58. 13.100\n",
" libavfilter 7.110.100 / 7.110.100\n",
" libswscale 5. 9.100 / 5. 9.100\n",
" libswresample 3. 9.100 / 3. 9.100\n",
" libpostproc 55. 9.100 / 55. 9.100\n",
"\u001b[0;33mGuessed Channel Layout for Input Stream #0.0 : mono\n",
"\u001b[0mInput #0, wav, from 'temp/temp.wav':\n",
" Metadata:\n",
" encoder : Lavf58.76.100\n",
" Duration: 00:01:08.69, bitrate: 768 kb/s\n",
" Stream #0:0: Audio: pcm_s16le ([1][0][0][0] / 0x0001), 48000 Hz, mono, s16, 768 kb/s\n",
"Input #1, avi, from 'temp/result.avi':\n",
" Metadata:\n",
" software : Lavf59.27.100\n",
" Duration: 00:01:08.56, start: 0.000000, bitrate: 1072 kb/s\n",
" Stream #1:0: Video: mpeg4 (Simple Profile) (DIVX / 0x58564944), yuv420p, 648x1152 [SAR 1:1 DAR 9:16], 1067 kb/s, 25 fps, 25 tbr, 25 tbn, 25 tbc\n",
"Stream mapping:\n",
" Stream #1:0 -> #0:0 (mpeg4 (native) -> h264 (libx264))\n",
" Stream #0:0 -> #0:1 (pcm_s16le (native) -> aac (native))\n",
"Press [q] to stop, [?] for help\n",
"\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0m\u001b[0;33m-qscale is ignored, -crf is recommended.\n",
"\u001b[0m\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0musing SAR=1/1\n",
"\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0musing cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2 AVX FMA3 BMI2 AVX2\n",
"\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0mprofile High, level 3.1, 4:2:0, 8-bit\n",
"\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0m264 - core 163 r3060 5db6aa6 - H.264/MPEG-4 AVC codec - Copyleft 2003-2021 - http://www.videolan.org/x264.html - options: cabac=1 ref=3 deblock=1:0:0 analyse=0x3:0x113 me=hex subme=7 psy=1 psy_rd=1.00:0.00 mixed_ref=1 me_range=16 chroma_me=1 trellis=1 8x8dct=1 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=-2 threads=3 lookahead_threads=1 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=3 b_pyramid=2 b_adapt=1 b_bias=0 direct=1 weightb=1 open_gop=0 weightp=2 keyint=250 keyint_min=25 scenecut=40 intra_refresh=0 rc_lookahead=40 rc=crf mbtree=1 crf=23.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=1:1.00\n",
"Output #0, mp4, to 'results/result_voice.mp4':\n",
" Metadata:\n",
" encoder : Lavf58.76.100\n",
" Stream #0:0: Video: h264 (avc1 / 0x31637661), yuv420p(progressive), 648x1152 [SAR 1:1 DAR 9:16], q=2-31, 25 fps, 12800 tbn\n",
" Metadata:\n",
" encoder : Lavc58.134.100 libx264\n",
" Side data:\n",
" cpb: bitrate max/min/avg: 0/0/0 buffer size: 0 vbv_delay: N/A\n",
" Stream #0:1: Audio: aac (LC) (mp4a / 0x6134706D), 48000 Hz, mono, fltp, 69 kb/s\n",
" Metadata:\n",
" encoder : Lavc58.134.100 aac\n",
"frame= 1714 fps= 44 q=-1.0 Lsize= 1727kB time=00:01:08.67 bitrate= 206.0kbits/s speed=1.76x \n",
"video:1151kB audio:524kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: 3.126398%\n",
"\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0mframe I:7 Avg QP:14.57 size: 65616\n",
"\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0mframe P:458 Avg QP:15.42 size: 1001\n",
"\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0mframe B:1249 Avg QP:25.40 size: 208\n",
"\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0mconsecutive B-frames: 1.9% 2.0% 2.3% 93.8%\n",
"\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0mmb I I16..4: 12.2% 80.3% 7.6%\n",
"\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0mmb P I16..4: 0.0% 0.5% 0.0% P16..4: 5.2% 1.1% 0.6% 0.0% 0.0% skip:92.6%\n",
"\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0mmb B I16..4: 0.0% 0.1% 0.0% B16..8: 2.6% 0.2% 0.0% direct: 0.0% skip:97.1% L0:49.1% L1:48.1% BI: 2.8%\n",
"\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0m8x8 transform intra:83.0% inter:93.2%\n",
"\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0mcoded y,uvDC,uvAC intra: 75.1% 84.0% 49.5% inter: 0.6% 1.2% 0.0%\n",
"\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0mi16 v,h,dc,p: 23% 37% 21% 20%\n",
"\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0mi8 v,h,dc,ddl,ddr,vr,hd,vl,hu: 21% 29% 25% 4% 3% 4% 4% 4% 4%\n",
"\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0mi4 v,h,dc,ddl,ddr,vr,hd,vl,hu: 45% 40% 9% 1% 2% 1% 2% 1% 1%\n",
"\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0mi8c dc,h,v,p: 36% 30% 25% 9%\n",
"\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0mWeighted P-Frames: Y:0.0% UV:0.0%\n",
"\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0mref P L0: 67.1% 7.1% 14.3% 11.5%\n",
"\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0mref B L0: 79.1% 15.4% 5.6%\n",
"\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0mref B L1: 94.1% 5.9%\n",
"\u001b[1;36m[libx264 @ 0x56796aa19800] \u001b[0mkb/s:137.40\n",
"\u001b[1;36m[aac @ 0x56796aa1b780] \u001b[0mQavg: 8673.380\n"
]
}
]
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment