Skip to content

Instantly share code, notes, and snippets.

@partybusiness
Last active March 3, 2025 16:53
Show Gist options
  • Save partybusiness/e50410e7256739e8aa203b320d4d45b5 to your computer and use it in GitHub Desktop.
Save partybusiness/e50410e7256739e8aa203b320d4d45b5 to your computer and use it in GitHub Desktop.
Compositor effect in Godot that makes camera look like a VHS recording.
#[compute]
#version 450
// Invocations in the (x, y, z) dimension
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
layout(rgba16f, set = 0, binding = 0) uniform image2D small_image;
layout(rgba16f, set = 0, binding = 1) uniform image2D color_image;
layout(push_constant, std430) uniform Params {
vec2 screen_mult;
float time;
float padding;
} params;
// The code we want to execute in each invocation
void main() {
vec2 mult = params.screen_mult;
ivec2 uv = ivec2(gl_GlobalInvocationID.xy);
// get offsets for four adjacent pixels
ivec2 uv_s = ivec2(gl_GlobalInvocationID.xy / mult);
ivec2 uv_sr = ivec2(gl_GlobalInvocationID.xy / mult) + ivec2(1, 0);
ivec2 uv_sd = ivec2(gl_GlobalInvocationID.xy / mult) + ivec2(0, 1);
ivec2 uv_sdr = ivec2(gl_GlobalInvocationID.xy / mult) + ivec2(1, 1);
vec2 uv_fract = vec2(fract(gl_GlobalInvocationID.x / mult.x), fract(gl_GlobalInvocationID.y / mult.y));
vec3 colour_tl = imageLoad(small_image, uv_s).rgb;
vec3 colour_tr = imageLoad(small_image, uv_sr).rgb;
vec3 colour_bl = imageLoad(small_image, uv_sd).rgb;
vec3 colour_br = imageLoad(small_image, uv_sdr).rgb;
// linearly interpolate between four adjacent pixels
vec3 mixed_colour = mix(mix(colour_tl, colour_tr, uv_fract.x), mix(colour_bl, colour_br, uv_fract.x), uv_fract.y);
imageStore(color_image, uv, vec4(mixed_colour, 1.0));
}
@tool
extends CompositorEffect
class_name VHSEffect
const shrink_shader_path:String = "res://vhs/vhs_shrink_effect.glsl"
const blur_shader_path:String = "res://vhs/vhs_blur_effect.glsl"
var rd: RenderingDevice
var shrink_shader: RID
var shrink_pipeline: RID
var blur_shader: RID
var blur_pipeline: RID
# small texture shrink_shader writes to and blur_shader reads from
var small_texture: RID
# glow texture stores overblown values and leaves afterimages
var glow_texture: RID
# multiplier to convert small texture to full screen
var text_mult:Vector2 = Vector2.ONE
# size of screen
var screen_size:Vector2i = Vector2.ZERO
var push_constant:PackedFloat32Array
var even_scanlines:bool = false
#const small_size:Vector2i = Vector2i(550, 320)
const small_size:Vector2i = Vector2i(550, 320)
const small_groups:Vector2i = ceil(small_size / 8.0)
func _init() -> void:
print("init")
#effect_callback_type = EFFECT_CALLBACK_TYPE_POST_TRANSPARENT
rd = RenderingServer.get_rendering_device()
RenderingServer.call_on_render_thread(_initialize_compute)
# cleans up rids if
func _notification(what: int) -> void:
if what == NOTIFICATION_PREDELETE:
if shrink_shader.is_valid():
rd.free_rid(shrink_shader)
if blur_shader.is_valid():
rd.free_rid(blur_shader)
if small_texture.is_valid():
rd.free_rid(small_texture)
# Compile our shader at initialization.
func _initialize_compute() -> void:
rd = RenderingServer.get_rendering_device()
print("init compute ",(rd))
if not rd:
return
# Compile our shader.
var shader_file := load(shrink_shader_path)
var shader_spirv: RDShaderSPIRV = shader_file.get_spirv()
shrink_shader = rd.shader_create_from_spirv(shader_spirv)
if shrink_shader.is_valid():
shrink_pipeline = rd.compute_pipeline_create(shrink_shader)
shader_file = load(blur_shader_path)
shader_spirv = shader_file.get_spirv()
blur_shader = rd.shader_create_from_spirv(shader_spirv)
if blur_shader.is_valid():
blur_pipeline = rd.compute_pipeline_create(blur_shader)
# generate 333x480 pixel texture
var tf:RDTextureFormat = RDTextureFormat.new()
tf.texture_type = RenderingDevice.TEXTURE_TYPE_2D
tf.width = small_size.x
tf.height = small_size.y
tf.depth = 1
tf.array_layers = 1
tf.mipmaps = 1
tf.format = RenderingDevice.DATA_FORMAT_R32G32B32A32_SFLOAT
tf.usage_bits = RenderingDevice.TEXTURE_USAGE_SAMPLING_BIT | \
RenderingDevice.TEXTURE_USAGE_CAN_UPDATE_BIT | \
RenderingDevice.TEXTURE_USAGE_STORAGE_BIT
#+ RenderingDevice.TEXTURE_USAGE_COLOR_ATTACHMENT_BIT + RenderingDevice.TEXTURE_USAGE_STORAGE_BIT + RenderingDevice.TEXTURE_USAGE_CAN_UPDATE_BIT
var render_texture:Texture2DRD = Texture2DRD.new()
render_texture.texture_rd_rid = rd.texture_create(tf, RDTextureView.new(), [])
small_texture = render_texture.texture_rd_rid
tf.format = RenderingDevice.DATA_FORMAT_R32_SFLOAT
glow_texture = rd.texture_create(tf, RDTextureView.new(), [])
func set_up_screen_size(size:Vector2i) -> void:
print ("new size ",size)
screen_size = size
# We can use a compute shader here.
@warning_ignore("integer_division")
var x_groups := (screen_size.x - 1) / 8 + 1
@warning_ignore("integer_division")
var y_groups := (screen_size.y - 1) / 8 + 1
var z_groups := 1
text_mult = Vector2(float(screen_size.x) / float(small_size.x), float(screen_size.y) / float(small_size.y))
# Create push constant.
# Must be aligned to 16 bytes and be in the same order as defined in the shader.
push_constant = PackedFloat32Array([
text_mult.x,
text_mult.y,
0.0,
0.0,
])
# Called by the rendering thread every frame.
func _render_callback(p_effect_callback_type: EffectCallbackType, p_render_data: RenderData) -> void:
if rd and p_effect_callback_type == EFFECT_CALLBACK_TYPE_POST_TRANSPARENT and blur_pipeline.is_valid():
# If you need to compare the original to the processed version, enable these and it will turn off every other second
#var second:int = floori(Time.get_ticks_msec() / 1000.0)
#if second%2 == 0:
# return
# Get our render scene buffers object, this gives us access to our render buffers.
# Note that implementation differs per renderer hence the need for the cast.
var render_scene_buffers:RenderSceneBuffers = p_render_data.get_render_scene_buffers()
if render_scene_buffers:
# Get our render size, this is the 3D render resolution!
var size: Vector2i = render_scene_buffers.get_internal_size()
if size.x == 0 and size.y == 0:
return
if size != screen_size:
set_up_screen_size(size)
push_constant[2] = float(Time.get_ticks_msec()) # update time, which is used for random seed
if even_scanlines:
push_constant[3] = 1.0
else:
push_constant[3] = 0.0
even_scanlines = !even_scanlines # flip value
# Loop through views just in case we're doing stereo rendering. No extra cost if this is mono.
var view_count: int = render_scene_buffers.get_view_count()
for view in view_count:
# Get the RID for our color image, we will be reading from and writing to it.
var colour_image: RID = render_scene_buffers.get_color_layer(view)
# Create a uniform set, this will be cached, the cache will be cleared if our viewports configuration is changed.
var small_uniform:RDUniform = RDUniform.new()
small_uniform.uniform_type = RenderingDevice.UNIFORM_TYPE_IMAGE
small_uniform.binding = 0
small_uniform.add_id(small_texture)
var colour_uniform:RDUniform = RDUniform.new()
colour_uniform.uniform_type = RenderingDevice.UNIFORM_TYPE_IMAGE
colour_uniform.binding = 1
colour_uniform.add_id(colour_image)
var glow_uniform:RDUniform = RDUniform.new()
glow_uniform.uniform_type = RenderingDevice.UNIFORM_TYPE_IMAGE
glow_uniform.binding = 2
glow_uniform.add_id(glow_texture)
var shrink_uniform_set := UniformSetCacheRD.get_cache(shrink_shader, 0, [small_uniform, colour_uniform, glow_uniform])
var blur_uniform_set := UniformSetCacheRD.get_cache(blur_shader, 0, [small_uniform, colour_uniform])
# run one compute shader that saves to a small_texture and blurs chroma values
var compute_list := rd.compute_list_begin()
rd.compute_list_bind_compute_pipeline(compute_list, shrink_pipeline)
rd.compute_list_bind_uniform_set(compute_list, shrink_uniform_set, 0)
rd.compute_list_set_push_constant(compute_list, push_constant.to_byte_array(), push_constant.size() * 4)
rd.compute_list_dispatch(compute_list, small_groups.x, small_groups.y, 1)
# run another compute shader that displays small_texture to full screen
rd.compute_list_bind_compute_pipeline(compute_list, blur_pipeline)
rd.compute_list_bind_uniform_set(compute_list, blur_uniform_set, 0)
rd.compute_list_set_push_constant(compute_list, push_constant.to_byte_array(), push_constant.size() * 4)
@warning_ignore("integer_division")
var x_groups := (screen_size.x - 1) / 8 + 1
@warning_ignore("integer_division")
var y_groups := (screen_size.y - 1) / 8 + 1
rd.compute_list_dispatch(compute_list, x_groups, y_groups, 1)
rd.compute_list_end()
#[compute]
#version 450
// Invocations in the (x, y, z) dimension
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
layout(rgba16f, set = 0, binding = 0) uniform image2D small_image;
layout(rgba16f, set = 0, binding = 1) uniform image2D color_image;
layout(r16f, set = 0, binding = 2) uniform image2D glow_image;
layout(push_constant, std430) uniform Params {
vec2 screen_mult;
float time;
float scanline; //even or odd scanline
} params;
const float PHI = 1.61803398874989484820459; // Golden Ratio
float random(vec2 xy, float seed) {
return fract(tan(distance(xy*PHI, xy)*seed)*xy.x);
}
//based on https://www.shadertoy.com/view/3lycWz
vec3 rgb2yuv(vec3 rgb) {
float y = 0.299*rgb.r + 0.587*rgb.g + 0.114*rgb.b;
return vec3(y, 0.493*(rgb.b - y), 0.877*(rgb.r - y));
}
vec3 yuv2rgb(vec3 yuv) {
float y = yuv.x;
float u = yuv.y;
float v = yuv.z;
vec3 rgb = vec3(
y + 1.0 / 0.877*v,
y - 0.39393*u - 0.58081*v,
y + 1.0 / 0.493*u
);
return rgb;
}
void main() {
vec2 mult = params.screen_mult;
ivec2 uv = ivec2(gl_GlobalInvocationID.xy);
// pick one possible pixel randomly for downsample
float r1 = random(gl_GlobalInvocationID.xy, 1.0 + params.time);
float r2 = random(gl_GlobalInvocationID.xy, 2.0 + params.time);
ivec2 uv_r = ivec2(gl_GlobalInvocationID.xy * mult) + ivec2(floor(r1 * mult.x), floor(r2 * mult.y)); // uv resized
vec4 colour = imageLoad(color_image, uv_r);
// handle glow
float sample_glow = imageLoad(glow_image, uv + ivec2(-1, 0)).r; // offset sample used for hue
float old_glow = imageLoad(glow_image, uv).r;
// if current pixel brightness is above 1.0 this will accumulate on glow texture
old_glow += clamp((colour.r + colour.g + colour.b) / 3.0 - 1.0, 0.0, 3.0);
// if adjacent pixels had no glow,
float neighbouring_glow = min(min(imageLoad(glow_image, uv + ivec2(1, 0)).r,
imageLoad(glow_image, uv + ivec2(0, 1)).r),
min(imageLoad(glow_image, uv + ivec2(-1, 0)).r,
imageLoad(glow_image, uv + ivec2(0, -1)).r));
imageStore(glow_image, uv, vec4(old_glow * 0.4 + neighbouring_glow * 0.50));
// blur the UV of the YUV more than the Y
vec3 mixed_colour = mix(imageLoad(color_image, uv_r + ivec2(mult.x,0)), imageLoad(color_image, uv_r - ivec2(mult.x,0)), 0.5).rgb;
mixed_colour.g = max(mixed_colour.g, sample_glow);
vec3 blur = rgb2yuv(mixed_colour);
colour.rgb = rgb2yuv(colour.rgb);
colour.r = max(colour.r, old_glow);
// convert back to rgb
colour.rgb = yuv2rgb(vec3(colour.r, blur.gb));
if (int(round(params.scanline)) == uv.y%2) {
imageStore(small_image, uv, colour);
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment