Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save Norod/af07131231f6e46ae28c5f94b5ddd7b0 to your computer and use it in GitHub Desktop.
Save Norod/af07131231f6e46ae28c5f94b5ddd7b0 to your computer and use it in GitHub Desktop.
deepspeed-hebrew-gpt_neo_xl-TextIteratorStreamer.ipynb
Display the source blob
Display the rendered blob
Raw
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"private_outputs": true,
"provenance": [],
"gpuType": "T4",
"authorship_tag": "ABX9TyOPmDFg+c/99C+SyYhg4AR4",
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/gist/Norod/af07131231f6e46ae28c5f94b5ddd7b0/deepspeed-hebrew-gpt_neo_xl-textiteratorstreamer.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"source": [
"!pip install transformers tokenizers deepspeed xformers bitsandbytes accelerate gradio"
],
"metadata": {
"id": "8BKOA-W_Zndy"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "cIBivOgKZY6h"
},
"outputs": [],
"source": [
"\n",
"import deepspeed\n",
"import torch\n",
"from transformers import pipeline\n",
"import os\n",
"\n",
"model_id = 'Norod78/hebrew-gpt_neo-xl'\n",
"\n",
"local_rank = int(os.getenv('LOCAL_RANK', '0'))\n",
"world_size = int(os.getenv('WORLD_SIZE', '1'))\n",
"generator = pipeline('text-generation', model=model_id,\n",
" tokenizer=model_id, \n",
" device=local_rank)\n",
"\n",
"ds_engine = deepspeed.init_inference(generator.model,\n",
" mp_size=world_size,\n",
" dtype=torch.half, \n",
" #save_mp_checkpoint_path=\"./hebrew-gpt_neo-xl-half\",\n",
" replace_with_kernel_inject=True)\n",
"generator.model = ds_engine.module\n"
]
},
{
"cell_type": "code",
"source": [
"#init_text = \"מהירות העל היא\"\n",
"init_text = \"האיש האחרון עלי אדמות ישב לבד בחדרו, כשלפתע\"\n",
"\n",
"string = generator(init_text, do_sample=True, min_length=20, max_length=64, top_k=40, top_p=0.92, temperature=0.9, repetition_penalty=2.2)\n",
"if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:\n",
" print(string)"
],
"metadata": {
"id": "DWXB0Il-jQaB"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"import gradio as gr\n",
"from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer\n",
"from threading import Thread\n",
"import torch\n",
"\n",
"\n",
"early_stop_pattern = \"\\n\\n\\n\"\n",
"print(f'Early stop pattern = \\\"{early_stop_pattern}\\\"')\n",
"\n",
"model = generator.model\n",
"tok = generator.tokenizer \n",
"\n",
"CUDA_AVAILABLE = torch.cuda.is_available()\n",
"device = torch.device(\"cuda\" if CUDA_AVAILABLE else \"cpu\")\n",
"\n",
"def generate(text = \"\"):\n",
" print(\"Create streamer\")\n",
" yield \"[אנא המתינו לתשובה]\"\n",
" streamer = TextIteratorStreamer(tok, timeout=5.)\n",
" if len(text) == 0:\n",
" text = \"\\n\"\n",
"\n",
" inputs = tok([text], return_tensors=\"pt\").to(device)\n",
" generation_kwargs = dict(inputs, streamer=streamer, repetition_penalty=2.5, do_sample=True, top_k=40, top_p=0.2, temperature=0.4, num_beams = 1 ,max_new_tokens=256, pad_token_id = model.config.eos_token_id, early_stopping=True, no_repeat_ngram_size=4)\n",
" thread = Thread(target=model.generate, kwargs=generation_kwargs)\n",
" thread.start()\n",
" generated_text = \"\"\n",
" for new_text in streamer:\n",
" yield generated_text + new_text\n",
" print(new_text, end =\"\")\n",
" generated_text += new_text\n",
" if (early_stop_pattern in generated_text) or (tok.eos_token in new_text):\n",
" generated_text = generated_text[: generated_text.find(early_stop_pattern) if early_stop_pattern else None]\n",
" streamer.end()\n",
" print(\"\\n--\\n\")\n",
" yield generated_text\n",
" return generated_text \n",
" return generated_text\n",
"\n",
"demo = gr.Interface(\n",
" title=\"Hebrew GPT-Neo 1.3B - Gradio demo\",\n",
" fn=generate,\n",
" inputs=gr.Textbox(label=\"כתבו כאן את הטקסט שלכם או השאירו ריק\", elem_id=\"input_text\"),\n",
" outputs=gr.Textbox(type=\"text\", label=\"פה יופיע הטקסט שהמחולל יחולל\", elem_id=\"output_text\"),\n",
" css=\"#output_text{direction: rtl} #input_text{direction: rtl}\"\n",
")\n",
"\n",
"demo.queue()\n",
"demo.launch(debug=True)"
],
"metadata": {
"id": "FhSCepWx77Vu"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"\n",
"#generator.tokenizer.save_pretrained(\"hebrew-gpt_neo-xl-half\")"
],
"metadata": {
"id": "hgB7tMnOgmKJ"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"#from google.colab import drive\n",
"#drive.mount('/content/gdrive')"
],
"metadata": {
"id": "tn57muXLj4QL"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"#!cp -rf /content/hebrew-gpt_neo-xl-half /content/gdrive/MyDrive/colab_data/gpt2/"
],
"metadata": {
"id": "hvzRAV_UkBGd"
},
"execution_count": null,
"outputs": []
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment