Save the yaml file at .github/workflows/comment_crossval_results.yml
Add the format_results.py script at the root of your repo.
| function gpt() { | |
| local url="https://api.openai.com/v1/chat/completions" | |
| local model="gpt-3.5-turbo" | |
| local body="{\"model\":\"$model\", \"messages\":[{\"role\": \"user\", \"content\": \"$1\"}]}" | |
| local headers="Content-Type: application/json" | |
| local auth="Authorization: Bearer ${OPENAI_API_KEY}" | |
| curl -s -H "$headers" -H "$auth" -d "$body" "$url" \ | |
| | jq -r '.choices[0].message.content' | |
| } |
| import streamlit as st | |
| import numpy as np | |
| import pandas as pd | |
| import json | |
| import requests | |
| @st.cache | |
| def get_auth_token(host, user, pw): | |
| st.write("cache miss token!") | |
| url = f"{host}/api/auth" |
| import json | |
| import requests | |
| def get_auth_token(host, user, pw): | |
| url = f"{host}/api/auth" | |
| payload = {"username": user, "password": pw} | |
| response = requests.post(url, json=payload) | |
| try: | |
| token = response.json()["access_token"] | |
| return token |
| from __future__ import absolute_import | |
| from __future__ import division | |
| from __future__ import print_function | |
| from __future__ import unicode_literals | |
| import logging | |
| from flask import Blueprint, request, jsonify | |
| import requests | |
| from rasa_dm.channels.channel import UserMessage, OutputChannel |
| from rasa_dm.actions import Action | |
| import requests | |
| class ActionHTTPRequest(Action): | |
| def name(self): | |
| return "make_request" | |
| def run(self, dispatcher, tracker, domain): | |
| url = 'https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20weather.forecast%20where%20woeid%20in%20(select%20woeid%20from%20geo.places(1)%20where%20text%3D%22nome%2C%20ak%22)&format=json' | |
| result = requests.get(url).json() |
| from __future__ import unicode_literals | |
| from __future__ import print_function | |
| from __future__ import division | |
| from __future__ import absolute_import | |
| from builtins import str as text | |
| import argparse | |
| import io | |
| import json |
| """Implements the long-short term memory character model. | |
| This version vectorizes over multiple examples, but each string | |
| has a fixed length.""" | |
| from __future__ import absolute_import | |
| from __future__ import print_function | |
| from builtins import range | |
| from os.path import dirname, join | |
| import numpy as np | |
| import numpy.random as npr |
| import rinocloud as rino | |
| import shutil, os | |
| import subprocess | |
| import hashlib | |
| """ | |
| persist = Persistor(config.rino_token,config.rino_dir) | |
| def save_model_new(persist,model_file,score): | |
| temp_file="tmp_{0:06d}.txt".format(random.choice(range(10000))) |