import SwiftUI | |
import CoreMotion | |
// Main View | |
struct AppleLogoWithRgbOffset: View { | |
// Initiate and pass a motion manager instance as | |
// an enviromental dependency when you initilize this view | |
@EnvironmentObject var motion: MotionManager | |
tell application "System Events" | |
if (name of processes) contains "Xcode" then | |
-- Check if Xcode is already the active/focused application | |
if (name of first application process whose frontmost is true) is not "Xcode" then | |
tell application "Xcode 16.2 (Beta)" | |
activate | |
delay 0.25 -- Wait for Xcode to become active | |
end tell | |
end if |
import SwiftUI | |
// Remember to download FontSettings.swift | |
struct WWDC24AnimatedTextView: View { | |
var text = "Hello, World!" | |
var animation: Animation = .easeInOut | |
var targetFontSize: CGFloat = 40 | |
var minimumFontSize: CGFloat = 30 | |
var targetFontWeight: Font.Weight = .semibold |
""" | |
The code below combines approaches published by both @eugene-yh and @jinyongyoo on Github. | |
Thanks for the contributions guys! | |
""" | |
import torch | |
import peft |
# | |
# this is adapted from https://github.com/facebookresearch/llama/blob/main/llama/generation.py#L213 | |
# the tokenizer is replaced with ord() to make it easier to see whats actually happening | |
from typing_extensions import TypedDict, Literal | |
from typing import List, Optional | |
Role = Literal["system", "user", "assistant"] | |
class Message(TypedDict): |
smiling mouth revealing white straight teeth - 24426 | |
anxious expression with biting lower lip - 17012 | |
shallow depth of field - 16806 | |
early childhood age - 14067 | |
social worker - 12566 | |
smiling mouth revealing slightly crooked teeth - 12329 | |
broad grin revealing straight white teeth - 11336 | |
pediatrician - 11212 | |
preschooler age - 10873 | |
headshot - 10462 |
Yoav Goldberg, April 2023.
With the release of the ChatGPT model and followup large language models (LLMs), there was a lot of discussion of the importance of "RLHF training", that is, "reinforcement learning from human feedback". I was puzzled for a while as to why RL (Reinforcement Learning) is better than learning from demonstrations (a.k.a supervised learning) for training language models. Shouldn't learning from demonstrations (or, in language model terminology "instruction fine tuning", learning to immitate human written answers) be sufficient? I came up with a theoretical argument that was somewhat convincing. But I came to realize there is an additional argumment which not only supports the case of RL training, but also requires it, in particular for models like ChatGPT. This additional argument is spelled out in (the first half of) a talk by John Schulman from OpenAI. This post pretty much
The SalesForce CodeGen models are a family of large language models trained on a large amount of natural language data and then fine-tuned on specialized datasets of code. Models of size 350M, 2B, 6B, and 16B parameters are provided in three flavors:
- nl, the base model trained on The Pile, a large natural language dataset compiled by EleutherAI
- multi, which is fine-tuned from the nl model on a dataset of code in multiple languages, scraped from GitHub, and
- mono, which is fine-tuned from the multi model on Python code only.
If you're trying to install the postgresql gem pg
and it is failing with the following error message:
Installing pg 1.2.3 with native extensions
Gem::Ext::BuildError: ERROR: Failed to build gem native extension.
current directory: ~/.rbenv/versions/3.0.0/lib/ruby/gems/3.0.0/gems/pg-1.2.3/ext
~/.rbenv/versions/3.0.0/bin/ruby -I ~/.rbenv/versions/3.0.0/lib/ruby/3.0.0 -r ./siteconf20210125-97201-pycpo.rb extconf.rb