Skip to content

Instantly share code, notes, and snippets.

@erichlof
Created May 14, 2025 18:40
Show Gist options
  • Save erichlof/dd4ae899962261c403f6326a50f43631 to your computer and use it in GitHub Desktop.
Save erichlof/dd4ae899962261c403f6326a50f43631 to your computer and use it in GitHub Desktop.
Making more interesting and complex models using CSG techniques
precision highp float;
precision highp int;
precision highp sampler2D;
#include <pathtracing_uniforms_and_defines>
uniform sampler2D tShape_DataTexture;
uniform sampler2D tAABB_DataTexture;
uniform vec3 uOutlineColor;
uniform float uOutlineIntensity;
//float InvTextureWidth = 0.000244140625; // (1 / 4096 texture width)
//float InvTextureWidth = 0.00048828125; // (1 / 2048 texture width)
//float InvTextureWidth = 0.0009765625; // (1 / 1024 texture width)
#define INV_TEXTURE_WIDTH 0.00048828125
#define N_QUADS 1
#define N_BOXES 1
vec3 rayOrigin, rayDirection;
// recorded intersection data:
vec3 hitNormal, hitColor;
vec2 hitUV;
float hitObjectID;
float hitDistance = INFINITY;
int hitType = -100;
struct Quad { vec3 normal; vec3 v0; vec3 v1; vec3 v2; vec3 v3; vec3 color; int type; };
struct Box { vec3 minCorner; vec3 maxCorner; vec3 color; int type; };
Quad quads[N_QUADS];
Box boxes[N_BOXES];
#include <pathtracing_random_functions>
#include <pathtracing_calc_fresnel_reflectance>
#include <pathtracing_sphere_intersect>
#include <pathtracing_unit_sphere_intersect>
#include <pathtracing_unit_cylinder_intersect>
#include <pathtracing_unit_cone_intersect>
#include <pathtracing_unit_paraboloid_intersect>
#include <pathtracing_unit_box_intersect>
#include <pathtracing_box_intersect>
#include <pathtracing_boundingbox_intersect>
#include <pathtracing_box_interior_intersect>
#include <pathtracing_quad_intersect>
#include <pathtracing_sample_quad_light>
vec2 stackLevels[28];
//vec4 boxNodeData0 corresponds to: .x = aabbMin.x, .y = aabbMin.y, .z = aabbMin.z, .w = aabbMax.x,
//vec4 boxNodeData1 corresponds to: .x = aabbMax.y, .y = aabbMax.z, .z = primitiveCount, .w = leafOrChild_ID
void GetBoxNodeData(const in float i, inout vec4 boxNodeData0, inout vec4 boxNodeData1)
{
// each bounding box's data is encoded in 2 rgba(or xyzw) texture slots
float ix2 = i * 2.0;
// (ix2 + 0.0) corresponds to: .x = aabbMin.x, .y = aabbMin.y, .z = aabbMin.z, .w = aabbMax.x,
// (ix2 + 1.0) corresponds to: .x = aabbMax.y, .y = aabbMax.z, .z = primitiveCount, .w = leafOrChild_ID
ivec2 uv0 = ivec2( mod(ix2 + 0.0, 2048.0), (ix2 + 0.0) * INV_TEXTURE_WIDTH ); // data0
ivec2 uv1 = ivec2( mod(ix2 + 1.0, 2048.0), (ix2 + 1.0) * INV_TEXTURE_WIDTH ); // data1
boxNodeData0 = texelFetch(tAABB_DataTexture, uv0, 0);
boxNodeData1 = texelFetch(tAABB_DataTexture, uv1, 0);
}
//---------------------------------------------------------------------------------------
float SceneIntersect( )
//---------------------------------------------------------------------------------------
{
mat4 invTransformMatrix, hitMatrix;
vec4 currentBoxNodeData0, nodeAData0, nodeBData0, tmpNodeData0;
vec4 currentBoxNodeData1, nodeAData1, nodeBData1, tmpNodeData1;
vec4 sd0, sd1, sd2, sd3, sd4, sd5, sd6, sd7;
vec3 inverseDir = 1.0 / rayDirection;
vec3 normal;
vec3 rObjOrigin, rObjDirection;
vec3 n, hitPoint;
vec2 currentStackData, stackDataA, stackDataB, tmpStackData;
ivec2 uv0, uv1, uv2, uv3, uv4, uv5, uv6, uv7;
float d;
float t = INFINITY;
float stackptr = 0.0;
float id = 0.0;
float shapeID = 0.0;
int objectCount = 0;
hitObjectID = -INFINITY;
int isRayExiting = FALSE;
int skip = FALSE;
int shapeLookupNeeded = FALSE;
GetBoxNodeData(stackptr, currentBoxNodeData0, currentBoxNodeData1);
currentStackData = vec2(stackptr, BoundingBoxIntersect(currentBoxNodeData0.xyz, vec3(currentBoxNodeData0.w, currentBoxNodeData1.xy), rayOrigin, inverseDir));
stackLevels[0] = currentStackData;
skip = (currentStackData.y < t) ? TRUE : FALSE;
while (true)
{
if (skip == FALSE)
{
// decrease pointer by 1 (0.0 is root level, 27.0 is maximum depth)
if (--stackptr < 0.0) // went past the root level, terminate loop
break;
currentStackData = stackLevels[int(stackptr)];
if (currentStackData.y >= t)
continue;
GetBoxNodeData(currentStackData.x, currentBoxNodeData0, currentBoxNodeData1);
}
skip = FALSE; // reset skip
if (currentBoxNodeData1.z == 0.0) // == 0.0 signifies an inner node
{
GetBoxNodeData(currentBoxNodeData1.w, nodeAData0, nodeAData1); // leftChild
GetBoxNodeData(currentBoxNodeData1.w + 1.0, nodeBData0, nodeBData1); // rightChild
stackDataA = vec2(currentBoxNodeData1.w, BoundingBoxIntersect(nodeAData0.xyz, vec3(nodeAData0.w, nodeAData1.xy), rayOrigin, inverseDir));
stackDataB = vec2(currentBoxNodeData1.w + 1.0, BoundingBoxIntersect(nodeBData0.xyz, vec3(nodeBData0.w, nodeBData1.xy), rayOrigin, inverseDir));
// first sort the branch node data so that 'a' is the smallest
if (stackDataB.y < stackDataA.y)
{
tmpStackData = stackDataB;
stackDataB = stackDataA;
stackDataA = tmpStackData;
tmpNodeData0 = nodeBData0; tmpNodeData1 = nodeBData1;
nodeBData0 = nodeAData0; nodeBData1 = nodeAData1;
nodeAData0 = tmpNodeData0; nodeAData1 = tmpNodeData1;
} // branch 'b' now has the larger rayT value of 'a' and 'b'
if (stackDataB.y < t) // see if branch 'b' (the larger rayT) needs to be processed
{
currentStackData = stackDataB;
currentBoxNodeData0 = nodeBData0;
currentBoxNodeData1 = nodeBData1;
skip = TRUE; // this will prevent the stackptr from decreasing by 1
}
if (stackDataA.y < t) // see if branch 'a' (the smaller rayT) needs to be processed
{
if (skip == TRUE) // if larger branch 'b' needed to be processed also,
stackLevels[int(stackptr++)] = stackDataB; // cue larger branch 'b' for future round
// also, increase pointer by 1
currentStackData = stackDataA;
currentBoxNodeData0 = nodeAData0;
currentBoxNodeData1 = nodeAData1;
skip = TRUE; // this will prevent the stackptr from decreasing by 1
}
continue;
} // end if (currentBoxNodeData1.z == 0.0) // inner node
/*
// debug leaf AABB visualization
d = BoxIntersect(currentBoxNodeData0.xyz, vec3(currentBoxNodeData0.w, currentBoxNodeData1.xy), rayOrigin, rayDirection, n, isRayExiting);
if (d > 0.0 && d < t)
{
t = d;
hitNormal = n;
hitColor = vec3(1,1,0);
hitType = REFR;
hitObjectID = float(objectCount);
} */
// else this is a leaf
// each shape's data is encoded in 8 rgba(or xyzw) texture slots
id = 8.0 * currentBoxNodeData1.w;
uv0 = ivec2( mod(id + 0.0, 2048.0), (id + 0.0) * INV_TEXTURE_WIDTH );
uv1 = ivec2( mod(id + 1.0, 2048.0), (id + 1.0) * INV_TEXTURE_WIDTH );
uv2 = ivec2( mod(id + 2.0, 2048.0), (id + 2.0) * INV_TEXTURE_WIDTH );
uv3 = ivec2( mod(id + 3.0, 2048.0), (id + 3.0) * INV_TEXTURE_WIDTH );
uv4 = ivec2( mod(id + 4.0, 2048.0), (id + 4.0) * INV_TEXTURE_WIDTH );
invTransformMatrix = mat4( texelFetch(tShape_DataTexture, uv0, 0),
texelFetch(tShape_DataTexture, uv1, 0),
texelFetch(tShape_DataTexture, uv2, 0),
texelFetch(tShape_DataTexture, uv3, 0) );
sd4 = texelFetch(tShape_DataTexture, uv4, 0);
// transform ray into shape's object space
rObjOrigin = vec3( invTransformMatrix * vec4(rayOrigin, 1.0) );
rObjDirection = vec3( invTransformMatrix * vec4(rayDirection, 0.0) );
if (sd4.x == 0.0)
d = UnitBoxIntersect(rObjOrigin, rObjDirection, n);
else if (sd4.x == 1.0)
d = UnitSphereIntersect(rObjOrigin, rObjDirection, n);
else if (sd4.x == 2.0)
d = UnitCylinderIntersect(rObjOrigin, rObjDirection, n);
else if (sd4.x == 3.0)
d = UnitConeIntersect(rObjOrigin, rObjDirection, n);
else if (sd4.x == 4.0)
d = UnitParaboloidIntersect(rObjOrigin, rObjDirection, n);
if (d > 0.0 && d < t)
{
t = d;
hitNormal = n;
hitMatrix = invTransformMatrix; // save winning matrix for hitNormal code below
shapeID = id;
shapeLookupNeeded = TRUE;
objectCount++;
}
} // end while (TRUE)
if (shapeLookupNeeded == TRUE)
{
uv0 = ivec2( mod(shapeID + 0.0, 2048.0), (shapeID + 0.0) * INV_TEXTURE_WIDTH );
uv1 = ivec2( mod(shapeID + 1.0, 2048.0), (shapeID + 1.0) * INV_TEXTURE_WIDTH );
uv2 = ivec2( mod(shapeID + 2.0, 2048.0), (shapeID + 2.0) * INV_TEXTURE_WIDTH );
uv3 = ivec2( mod(shapeID + 3.0, 2048.0), (shapeID + 3.0) * INV_TEXTURE_WIDTH );
uv4 = ivec2( mod(shapeID + 4.0, 2048.0), (shapeID + 4.0) * INV_TEXTURE_WIDTH );
uv5 = ivec2( mod(shapeID + 5.0, 2048.0), (shapeID + 5.0) * INV_TEXTURE_WIDTH );
uv6 = ivec2( mod(shapeID + 6.0, 2048.0), (shapeID + 6.0) * INV_TEXTURE_WIDTH );
uv7 = ivec2( mod(shapeID + 7.0, 2048.0), (shapeID + 7.0) * INV_TEXTURE_WIDTH );
sd0 = texelFetch(tShape_DataTexture, uv0, 0);
sd1 = texelFetch(tShape_DataTexture, uv1, 0);
sd2 = texelFetch(tShape_DataTexture, uv2, 0);
sd3 = texelFetch(tShape_DataTexture, uv3, 0);
sd4 = texelFetch(tShape_DataTexture, uv4, 0);
sd5 = texelFetch(tShape_DataTexture, uv5, 0);
sd6 = texelFetch(tShape_DataTexture, uv6, 0);
sd7 = texelFetch(tShape_DataTexture, uv7, 0);
hitNormal = transpose(mat3(hitMatrix)) * hitNormal;
hitColor = sd5.rgb;//vec3(1,0,1);
//hitUV =
hitType = int(sd4.y);//LIGHT;
hitObjectID = float(objectCount);
}
objectCount++;
d = QuadIntersect( quads[0].v0, quads[0].v1, quads[0].v2, quads[0].v3, rayOrigin, rayDirection, FALSE );
if (d < t)
{
t = d;
hitNormal = quads[0].normal;
hitColor = quads[0].color;
hitType = quads[0].type;
hitObjectID = float(objectCount);
}
objectCount++;
d = BoxInteriorIntersect( boxes[0].minCorner, boxes[0].maxCorner, rayOrigin, rayDirection, n );
if (d < t && n != vec3(0,0,-1))
{
t = d;
hitNormal = n;
hitType = DIFF;
if (n == vec3(1,0,0)) // left wall
hitColor = vec3(0.7, 0.05, 0.05);
else if (n == vec3(-1,0,0)) // right wall
hitColor = vec3(0.05, 0.05, 0.7);
else
hitColor = vec3(1); // floor, ceiling, back wall
hitObjectID = float(objectCount);
}
return t;
} // end float SceneIntersect( )
//-----------------------------------------------------------------------------------------------------------------------------
vec3 CalculateRadiance( out vec3 objectNormal, out vec3 objectColor, out float objectID, out float pixelSharpness )
//-----------------------------------------------------------------------------------------------------------------------------
{
Quad light = quads[0];
vec3 accumCol = vec3(0);
vec3 mask = vec3(1);
vec3 reflectionMask = vec3(1);
vec3 reflectionRayOrigin = vec3(0);
vec3 reflectionRayDirection = vec3(0);
vec3 dirToLight;
vec3 x, n, nl;
vec3 absorptionCoefficient;
float t;
float nc, nt, ratioIoR, Re, Tr;
float weight;
float previousObjectID;
int reflectionBounces = -1;
int diffuseCount = 0;
int previousIntersecType = -100;
hitType = -100;
int bounceIsSpecular = TRUE;
int sampleLight = FALSE;
int willNeedReflectionRay = FALSE;
int isReflectionTime = FALSE;
int reflectionNeedsToBeSharp = FALSE;
for (int bounces = 0; bounces < 8; bounces++)
{
if (isReflectionTime == TRUE)
reflectionBounces++;
previousIntersecType = hitType;
previousObjectID = hitObjectID;
t = SceneIntersect();
if (bounces == 0)
hitDistance = t;
if (t == INFINITY)
{
// this makes the object edges sharp against the background
if (bounces == 0)
pixelSharpness = 1.0;
if (willNeedReflectionRay == TRUE)
{
mask = reflectionMask;
rayOrigin = reflectionRayOrigin;
rayDirection = reflectionRayDirection;
willNeedReflectionRay = FALSE;
bounceIsSpecular = TRUE;
sampleLight = FALSE;
isReflectionTime = TRUE;
continue;
}
break;
}
// useful data
n = normalize(hitNormal);
nl = dot(n, rayDirection) < 0.0 ? n : -n;
x = rayOrigin + rayDirection * t;
if (bounces == 0)
{
objectID = hitObjectID;
}
if (isReflectionTime == FALSE && diffuseCount == 0)// && hitObjectID != previousObjectID)
{
objectNormal += n;
objectColor += hitColor;
}
/* if (reflectionNeedsToBeSharp == TRUE && reflectionBounces == 0)
{
objectNormal += n;
objectColor += hitColor;
} */
if (hitType == LIGHT)
{
if (diffuseCount == 0 && isReflectionTime == FALSE)
pixelSharpness = 1.0;
if (isReflectionTime == TRUE && bounceIsSpecular == TRUE)
{
//objectNormal += nl;
//objectColor = hitColor;
objectID += hitObjectID;
}
if (bounceIsSpecular == TRUE || sampleLight == TRUE)
accumCol += mask * hitColor;
if (willNeedReflectionRay == TRUE)
{
mask = reflectionMask;
rayOrigin = reflectionRayOrigin;
rayDirection = reflectionRayDirection;
willNeedReflectionRay = FALSE;
bounceIsSpecular = TRUE;
sampleLight = FALSE;
isReflectionTime = TRUE;
continue;
}
// reached a light, so we can exit
break;
} // end if (hitType == LIGHT)
// if we get here and sampleLight is still TRUE, shadow ray failed to find the light source
// the ray hit an occluding object along its way to the light
if (sampleLight == TRUE)
{
if (willNeedReflectionRay == TRUE)
{
mask = reflectionMask;
rayOrigin = reflectionRayOrigin;
rayDirection = reflectionRayDirection;
willNeedReflectionRay = FALSE;
bounceIsSpecular = TRUE;
sampleLight = FALSE;
isReflectionTime = TRUE;
continue;
}
break;
}
if (hitType == DIFF) // Ideal DIFFUSE reflection
{
diffuseCount++;
mask *= hitColor;
bounceIsSpecular = FALSE;
if (diffuseCount == 1 && rand() < 0.5)
{
mask *= 2.0;
// choose random Diffuse sample vector
rayDirection = randomCosWeightedDirectionInHemisphere(nl);
rayOrigin = x + nl * uEPS_intersect;
continue;
}
dirToLight = sampleQuadLight(x, nl, quads[0], weight);
mask *= diffuseCount == 1 ? 2.0 : 1.0;
mask *= weight;
rayDirection = dirToLight;
rayOrigin = x + nl * uEPS_intersect;
sampleLight = TRUE;
continue;
} // end if (hitType == DIFF)
if (hitType == SPEC) // Ideal SPECULAR reflection
{
mask *= hitColor;
rayDirection = reflect(rayDirection, nl);
rayOrigin = x + nl * uEPS_intersect;
continue;
}
if (hitType == REFR) // Ideal dielectric REFRACTION
{
nc = 1.0; // IOR of Air
nt = 1.5; // IOR of common Glass
Re = calcFresnelReflectance(rayDirection, n, nc, nt, ratioIoR);
Tr = 1.0 - Re;
if (Re == 1.0)
{
rayDirection = reflect(rayDirection, nl);
rayOrigin = x + nl * uEPS_intersect;
continue;
}
//if (diffuseCount == 0 && hitObjectID != previousObjectID && n == nl)
if (bounces == 0 && n == nl)
{
reflectionMask = mask * Re;
reflectionRayDirection = reflect(rayDirection, nl); // reflect ray from surface
reflectionRayOrigin = x + nl * uEPS_intersect;
willNeedReflectionRay = TRUE;
}
// transmit ray through surface
mask *= hitColor;
mask *= Tr;
rayDirection = refract(rayDirection, nl, ratioIoR);
rayOrigin = x - nl * uEPS_intersect;
if (diffuseCount == 1 && isReflectionTime == FALSE)
bounceIsSpecular = TRUE; // turn on refracting caustics
continue;
} // end if (hitType == REFR)
if (hitType == COAT) // Diffuse object underneath with ClearCoat on top
{
nc = 1.0; // IOR of Air
nt = 1.4; // IOR of Clear Coat
Re = calcFresnelReflectance(rayDirection, nl, nc, nt, ratioIoR);
Tr = 1.0 - Re;
//if (diffuseCount == 0 && hitObjectID != previousObjectID)
if (bounces == 0)
{
reflectionMask = mask * Re;
reflectionRayDirection = reflect(rayDirection, nl); // reflect ray from surface
reflectionRayOrigin = x + nl * uEPS_intersect;
willNeedReflectionRay = TRUE;
}
diffuseCount++;
mask *= Tr;
mask *= hitColor;
bounceIsSpecular = FALSE;
if (diffuseCount == 1 && rand() < 0.5)
{
mask *= 2.0;
// choose random Diffuse sample vector
rayDirection = randomCosWeightedDirectionInHemisphere(nl);
rayOrigin = x + nl * uEPS_intersect;
continue;
}
dirToLight = sampleQuadLight(x, nl, quads[0], weight);
mask *= diffuseCount == 1 ? 2.0 : 1.0;
mask *= weight;
rayDirection = dirToLight;
rayOrigin = x + nl * uEPS_intersect;
sampleLight = TRUE;
continue;
} //end if (hitType == COAT)
} // end for (int bounces = 0; bounces < 8; bounces++)
return max(vec3(0), accumCol);
} // end vec3 CalculateRadiance( out vec3 objectNormal, out vec3 objectColor, out float objectID, out float pixelSharpness )
//-----------------------------------------------------------------------
void SetupScene(void)
//-----------------------------------------------------------------------
{
vec3 z = vec3(0);// No color value, Black
vec3 L1 = vec3(1.0, 1.0, 1.0) * 5.0;// Bright light
float wallRadius = 50.0;
float lightRadius = 10.0;
quads[0] = Quad( vec3(0,-1, 0), vec3(-lightRadius, wallRadius-1.0,-lightRadius), vec3(lightRadius, wallRadius-1.0,-lightRadius), vec3(lightRadius, wallRadius-1.0, lightRadius), vec3(-lightRadius, wallRadius-1.0, lightRadius), L1, LIGHT);// Quad Area Light on ceiling
boxes[0] = Box( vec3(-wallRadius), vec3(wallRadius), vec3(1), DIFF);// the Cornell Box interior
}
//#include <pathtracing_main>
// tentFilter from Peter Shirley's 'Realistic Ray Tracing (2nd Edition)' book, pg. 60
float tentFilter(float x) // input: x: a random float(0.0 to 1.0), output: a filtered float (-1.0 to +1.0)
{
return (x < 0.5) ? sqrt(2.0 * x) - 1.0 : 1.0 - sqrt(2.0 - (2.0 * x));
}
void main( void )
{
vec3 camRight = vec3( uCameraMatrix[0][0], uCameraMatrix[0][1], uCameraMatrix[0][2]);
vec3 camUp = vec3( uCameraMatrix[1][0], uCameraMatrix[1][1], uCameraMatrix[1][2]);
vec3 camForward = vec3(-uCameraMatrix[2][0], -uCameraMatrix[2][1], -uCameraMatrix[2][2]);
// the following is not needed - three.js has a built-in uniform named cameraPosition
//vec3 camPos = vec3( uCameraMatrix[3][0], uCameraMatrix[3][1], uCameraMatrix[3][2]);
// calculate unique seed for rng() function
seed = uvec2(uFrameCounter, uFrameCounter + 1.0) * uvec2(gl_FragCoord);
// initialize rand() variables
randNumber = 0.0; // the final randomly-generated number (range: 0.0 to 1.0)
blueNoise = texelFetch(tBlueNoiseTexture, ivec2(mod(floor(gl_FragCoord.xy), 128.0)), 0).r;
vec2 pixelOffset;
if (uSampleCounter < 50.0)
{
pixelOffset = vec2( tentFilter(rand()), tentFilter(rand()) );
pixelOffset *= uCameraIsMoving ? 0.5 : 1.0;
}
else pixelOffset = vec2( tentFilter(uRandomVec2.x), tentFilter(uRandomVec2.y) );
// we must map pixelPos into the range -1.0 to +1.0: (-1.0,-1.0) is bottom-left screen corner, (1.0,1.0) is top-right
vec2 pixelPos = ((gl_FragCoord.xy + vec2(0.5) + pixelOffset) / uResolution) * 2.0 - 1.0;
vec3 rayDir = uUseOrthographicCamera ? camForward :
normalize( (camRight * pixelPos.x * uULen) + (camUp * pixelPos.y * uVLen) + camForward );
// depth of field
vec3 focalPoint = uFocusDistance * rayDir;
float randomAngle = rng() * TWO_PI; // pick random point on aperture
float randomRadius = rng() * uApertureSize;
vec3 randomAperturePos = ((camRight * cos(randomAngle)) + (camUp * sin(randomAngle))) * sqrt(randomRadius);
// point on aperture to focal point
vec3 finalRayDir = normalize(focalPoint - randomAperturePos);
rayOrigin = cameraPosition + randomAperturePos;
rayOrigin += !uUseOrthographicCamera ? vec3(0) :
(camRight * pixelPos.x * uULen * 100.0) + (camUp * pixelPos.y * uVLen * 100.0);
rayDirection = finalRayDir;
SetupScene();
// Edge Detection - don't want to blur edges where either surface normals change abruptly (i.e. room wall corners), objects overlap each other (i.e. edge of a foreground sphere in front of another sphere right behind it),
// or an abrupt color variation on the same smooth surface, even if it has similar surface normals (i.e. checkerboard pattern). Want to keep all of these cases as sharp as possible - no blur filter will be applied.
vec3 objectNormal = vec3(0);
vec3 objectColor = vec3(0);
float objectID = -INFINITY;
float pixelSharpness = 0.0;
// perform path tracing and get resulting pixel color
vec4 currentPixel = vec4( vec3(CalculateRadiance(objectNormal, objectColor, objectID, pixelSharpness)), 0.0 );
// if difference between normals of neighboring pixels is less than the first edge0 threshold, the white edge line effect is considered off (0.0)
float edge0 = 0.2; // edge0 is the minimum difference required between normals of neighboring pixels to start becoming a white edge line
// any difference between normals of neighboring pixels that is between edge0 and edge1 smoothly ramps up the white edge line brightness (smoothstep 0.0-1.0)
float edge1 = 0.6; // once the difference between normals of neighboring pixels is >= this edge1 threshold, the white edge line is considered fully bright (1.0)
float difference_Nx = fwidth(objectNormal.x);
float difference_Ny = fwidth(objectNormal.y);
float difference_Nz = fwidth(objectNormal.z);
float normalDifference = smoothstep(edge0, edge1, difference_Nx) + smoothstep(edge0, edge1, difference_Ny) + smoothstep(edge0, edge1, difference_Nz);
float objectDifference = min(fwidth(objectID), 1.0);
float colorDifference = (fwidth(objectColor.r) + fwidth(objectColor.g) + fwidth(objectColor.b)) > 0.0 ? 1.0 : 0.0;
// white outlines - in the style of MAGI, makers of the CSG models (like Light Cycles and Tanks) for the movie TRON in 1982
vec3 lineColor = (colorDifference > 0.0 || normalDifference >= 0.9) ? (uOutlineColor * uOutlineIntensity) : vec3(0);
currentPixel.rgb += mix(vec3(0), lineColor, clamp(exp(-hitDistance * 0.01), 0.0, 1.0));
vec4 previousPixel = texelFetch(tPreviousTexture, ivec2(gl_FragCoord.xy), 0);
if (uFrameCounter == 1.0) // camera just moved after being still
{
previousPixel.rgb *= (1.0 / uPreviousSampleCount) * 0.5; // essentially previousPixel *= 0.5, like below
previousPixel.a = 0.0;
currentPixel.rgb *= 0.5;
}
else if (uCameraIsMoving) // camera is currently moving
{
previousPixel.rgb *= 0.5; // motion-blur trail amount (old image)
previousPixel.a = 0.0;
currentPixel.rgb *= 0.5; // brightness of new image (noisy)
}
if (colorDifference > 0.0 || normalDifference >= 0.9 || objectDifference >= 1.0)
pixelSharpness = 1.0; // 1.0 means an edge pixel
currentPixel.a = pixelSharpness;
// Eventually, all edge-containing pixels' .a (alpha channel) values will converge to 1.0,
// which keeps them from getting blurred by the box-blur filter, thus retaining sharpness over time.
if (previousPixel.a == 1.0) // an edge or a light source
currentPixel.a = 1.0;
pc_fragColor = vec4(previousPixel.rgb + currentPixel.rgb, currentPixel.a);
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment