From 3da3db61f1099cd398cd7b69452f2e5a865539e7 Mon Sep 17 00:00:00 2001 From: Rezmason Date: Thu, 11 Aug 2022 00:08:05 -0700 Subject: [PATCH] Camera input and mirror effect now also work in WebGPU. Added the "once" parameter, which renders a single frame. Fixed bugs in gpu-buffer. --- TODO.txt | 2 - js/camera.js | 5 +- js/config.js | 1 + js/regl/main.js | 13 ++++- js/regl/mirrorPass.js | 12 +--- js/webgpu/imagePass.js | 1 + js/webgpu/main.js | 34 ++++++++++- js/webgpu/mirrorPass.js | 109 +++++++++++++++++++++++++++++++++++ lib/gpu-buffer.js | 6 +- shaders/wgsl/mirrorPass.wgsl | 78 +++++++++++++++++++++++++ 10 files changed, 241 insertions(+), 20 deletions(-) create mode 100644 js/webgpu/mirrorPass.js create mode 100644 shaders/wgsl/mirrorPass.wgsl diff --git a/TODO.txt b/TODO.txt index 2264ed7..b00c882 100644 --- a/TODO.txt +++ b/TODO.txt @@ -1,7 +1,5 @@ TODO: -Add mirror effect to WebGPU - Reformulate the basis https://buf.com/films/the-matrix-resurrections diff --git a/js/camera.js b/js/camera.js index 9409fc9..3aeeaa1 100644 --- a/js/camera.js +++ b/js/camera.js @@ -6,6 +6,7 @@ cameraCanvas.width = 1; cameraCanvas.height = 1; const context = cameraCanvas.getContext("2d"); let cameraAspectRatio = 1.0; +const cameraSize = [1, 1]; const drawToCanvas = () => { requestAnimationFrame(drawToCanvas); @@ -29,6 +30,8 @@ const setupCamera = async () => { cameraCanvas.width = width; cameraCanvas.height = height; cameraAspectRatio = width / height; + cameraSize[0] = width; + cameraSize[1] = height; video.srcObject = stream; video.play(); @@ -39,4 +42,4 @@ const setupCamera = async () => { } }; -export { cameraCanvas, cameraAspectRatio, setupCamera }; +export { cameraCanvas, cameraAspectRatio, cameraSize, setupCamera }; diff --git a/js/config.js b/js/config.js index e1b532b..7444213 100644 --- a/js/config.js +++ b/js/config.js @@ -299,6 +299,7 @@ const paramMapping = { volumetric: { key: "volumetric", parser: (s) => s.toLowerCase().includes("true") }, loops: { key: "loops", parser: (s) => s.toLowerCase().includes("true") }, renderer: { key: "renderer", parser: (s) => s }, + once: { key: "once", parser: (s) => s.toLowerCase().includes("true") }, }; paramMapping.dropLength = paramMapping.raindropLength; paramMapping.angle = paramMapping.slant; diff --git a/js/regl/main.js b/js/regl/main.js index 3e7eecf..da1b49d 100644 --- a/js/regl/main.js +++ b/js/regl/main.js @@ -8,8 +8,8 @@ import makeImagePass from "./imagePass.js"; import makeResurrectionPass from "./resurrectionPass.js"; import makeQuiltPass from "./quiltPass.js"; import makeMirrorPass from "./mirrorPass.js"; +import { setupCamera, cameraCanvas, cameraAspectRatio } from "../camera.js"; import getLKG from "./lkgHelper.js"; -import { setupCamera } from "../camera.js"; const effects = { none: null, @@ -70,17 +70,24 @@ export default async (canvas, config) => { optionalExtensions: ["EXT_color_buffer_half_float", "WEBGL_color_buffer_float", "OES_standard_derivatives"], }); + const cameraTex = regl.texture(cameraCanvas); const lkg = await getLKG(config.useHoloplay, true); // All this takes place in a full screen quad. const fullScreenQuad = makeFullScreenQuad(regl); const effectName = config.effect in effects ? config.effect : "plain"; - const pipeline = makePipeline({ regl, config, lkg }, [makeRain, makeBloomPass, effects[effectName], makeQuiltPass]); + const context = { regl, config, lkg, cameraTex, cameraAspectRatio }; + const pipeline = makePipeline(context, [makeRain, makeBloomPass, effects[effectName], makeQuiltPass]); const screenUniforms = { tex: pipeline[pipeline.length - 1].outputs.primary }; const drawToScreen = regl({ uniforms: screenUniforms }); await Promise.all(pipeline.map((step) => step.ready)); const tick = regl.frame(({ viewportWidth, viewportHeight }) => { - // tick.cancel(); + if (config.once) { + tick.cancel(); + } + if (config.useCamera) { + cameraTex(cameraCanvas); + } if (dimensions.width !== viewportWidth || dimensions.height !== viewportHeight) { dimensions.width = viewportWidth; dimensions.height = viewportHeight; diff --git a/js/regl/mirrorPass.js b/js/regl/mirrorPass.js index c5b0606..d6c54d9 100644 --- a/js/regl/mirrorPass.js +++ b/js/regl/mirrorPass.js @@ -1,5 +1,4 @@ import { loadImage, loadText, makePassFBO, makePass } from "./utils.js"; -import { cameraCanvas, cameraAspectRatio } from "../camera.js"; let start; const numClicks = 5; @@ -14,9 +13,7 @@ window.onclick = (e) => { index = (index + 1) % numClicks; }; -export default ({ regl, config }, inputs) => { - const cameraTex = regl.texture(cameraCanvas); - +export default ({ regl, config, cameraTex, cameraAspectRatio }, inputs) => { const output = makePassFBO(regl, config.useHalfFloat); const mirrorPassFrag = loadText("shaders/glsl/mirrorPass.frag.glsl"); const render = regl({ @@ -28,7 +25,7 @@ export default ({ regl, config }, inputs) => { cameraTex, clicks: () => clicks, aspectRatio: () => aspectRatio, - cameraAspectRatio: () => cameraAspectRatio, + cameraAspectRatio, }, framebuffer: output, }); @@ -44,9 +41,6 @@ export default ({ regl, config }, inputs) => { output.resize(w, h); aspectRatio = w / h; }, - () => { - cameraTex(cameraCanvas); - render({ frag: mirrorPassFrag.text() }); - } + () => render({ frag: mirrorPassFrag.text() }) ); }; diff --git a/js/webgpu/imagePass.js b/js/webgpu/imagePass.js index e3f473b..0ef1959 100644 --- a/js/webgpu/imagePass.js +++ b/js/webgpu/imagePass.js @@ -27,6 +27,7 @@ export default ({ config, device }) => { backgroundTex = bgTex; computePipeline = device.createComputePipeline({ + layout: "auto", compute: { module: imageShader.module, entryPoint: "computeMain", diff --git a/js/webgpu/main.js b/js/webgpu/main.js index bb36793..bc2a5b6 100644 --- a/js/webgpu/main.js +++ b/js/webgpu/main.js @@ -7,8 +7,9 @@ import makePalettePass from "./palettePass.js"; import makeStripePass from "./stripePass.js"; import makeImagePass from "./imagePass.js"; import makeResurrectionPass from "./resurrectionPass.js"; +import makeMirrorPass from "./mirrorPass.js"; import makeEndPass from "./endPass.js"; -import { setupCamera } from "../camera.js"; +import { setupCamera, cameraCanvas, cameraAspectRatio, cameraSize } from "../camera.js"; const loadJS = (src) => new Promise((resolve, reject) => { @@ -30,11 +31,26 @@ const effects = { image: makeImagePass, resurrection: makeResurrectionPass, resurrections: makeResurrectionPass, + mirror: makeMirrorPass, }; export default async (canvas, config) => { await loadJS("lib/gl-matrix.js"); + if (document.fullscreenEnabled || document.webkitFullscreenEnabled) { + window.ondblclick = () => { + if (document.fullscreenElement == null) { + if (canvas.webkitRequestFullscreen != null) { + canvas.webkitRequestFullscreen(); + } else { + canvas.requestFullscreen(); + } + } else { + document.exitFullscreen(); + } + }; + } + if (config.useCamera) { await setupCamera(); } @@ -57,6 +73,11 @@ export default async (canvas, config) => { const timeUniforms = structs.from(`struct Time { seconds : f32, frames : i32, };`).Time; const timeBuffer = makeUniformBuffer(device, timeUniforms); + const cameraTex = device.createTexture({ + size: cameraSize, + format: "rgba8unorm", + usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT, + }); const context = { config, @@ -65,6 +86,9 @@ export default async (canvas, config) => { canvasContext, timeBuffer, canvasFormat, + cameraTex, + cameraAspectRatio, + cameraSize, }; const effectName = config.effect in effects ? config.effect : "plain"; @@ -89,6 +113,10 @@ export default async (canvas, config) => { outputs = pipeline.build(canvasSize); } + if (config.useCamera) { + device.queue.copyExternalImageToTexture({ source: cameraCanvas }, { texture: cameraTex }, cameraSize); + } + device.queue.writeBuffer(timeBuffer, 0, timeUniforms.toBuffer({ seconds: (now - start) / 1000, frames })); frames++; @@ -97,7 +125,9 @@ export default async (canvas, config) => { // Eventually, when WebGPU allows it, we'll remove the endPass and just copy from our pipeline's output to the canvas texture. // encoder.copyTextureToTexture({ texture: outputs?.primary }, { texture: canvasContext.getCurrentTexture() }, canvasSize); device.queue.submit([encoder.finish()]); - requestAnimationFrame(renderLoop); + if (!config.once) { + requestAnimationFrame(renderLoop); + } }; requestAnimationFrame(renderLoop); diff --git a/js/webgpu/mirrorPass.js b/js/webgpu/mirrorPass.js new file mode 100644 index 0000000..c9c7692 --- /dev/null +++ b/js/webgpu/mirrorPass.js @@ -0,0 +1,109 @@ +import { structs } from "../../lib/gpu-buffer.js"; +import { makeComputeTarget, makeUniformBuffer, loadShader, makeBindGroup, makePass } from "./utils.js"; + +let start; +const numTouches = 5; +const touches = Array(numTouches) + .fill() + .map((_) => [0, 0, -Infinity, 0]); +let aspectRatio = 1; + +let index = 0; +let touchesChanged = true; +window.onclick = (e) => { + touches[index][0] = 0 + e.clientX / e.srcElement.clientWidth; + touches[index][1] = 1 - e.clientY / e.srcElement.clientHeight; + touches[index][2] = (Date.now() - start) / 1000; + index = (index + 1) % numTouches; + touchesChanged = true; +}; + +/* +uniforms: { + touches: () => touches, + aspectRatio: () => aspectRatio, + cameraAspectRatio, +} +*/ + +export default ({ config, device, cameraTex, cameraAspectRatio, timeBuffer }) => { + const assets = [loadShader(device, "shaders/wgsl/mirrorPass.wgsl")]; + + const linearSampler = device.createSampler({ + magFilter: "linear", + minFilter: "linear", + }); + + let computePipeline; + let configBuffer; + let sceneUniforms; + let sceneBuffer; + let touchUniforms; + let touchBuffer; + let output; + let screenSize; + let computeBindGroup; + + const loaded = (async () => { + const [mirrorShader] = await Promise.all(assets); + + computePipeline = device.createComputePipeline({ + layout: "auto", + compute: { + module: mirrorShader.module, + entryPoint: "computeMain", + }, + }); + + const mirrorShaderUniforms = structs.from(mirrorShader.code); + + const configUniforms = mirrorShaderUniforms.Config; + configBuffer = makeUniformBuffer(device, configUniforms, { bloomStrength: config.bloomStrength }); + + sceneUniforms = mirrorShaderUniforms.Scene; + sceneBuffer = makeUniformBuffer(device, sceneUniforms); + + touchUniforms = mirrorShaderUniforms.Touches; + touchBuffer = makeUniformBuffer(device, touchUniforms); + })(); + + const build = (size, inputs) => { + output?.destroy(); + output = makeComputeTarget(device, size); + screenSize = size; + aspectRatio = size[0] / size[1]; + computeBindGroup = makeBindGroup(device, computePipeline, 0, [ + configBuffer, + timeBuffer, + sceneBuffer, + touchBuffer, + linearSampler, + inputs.primary.createView(), + inputs.bloom.createView(), + cameraTex.createView(), + output.createView(), + ]); + + const screenAspectRatio = size[0] / size[1]; + device.queue.writeBuffer(sceneBuffer, 0, sceneUniforms.toBuffer({ screenAspectRatio, cameraAspectRatio })); + + return { primary: output }; + }; + + const run = (encoder) => { + if (touchesChanged) { + touchesChanged = false; + device.queue.writeBuffer(touchBuffer, 0, touchUniforms.toBuffer({ touches })); + } + + const computePass = encoder.beginComputePass(); + computePass.setPipeline(computePipeline); + computePass.setBindGroup(0, computeBindGroup); + computePass.dispatchWorkgroups(Math.ceil(screenSize[0] / 32), screenSize[1], 1); + computePass.end(); + }; + + start = Date.now(); + + return makePass(loaded, build, run); +}; diff --git a/lib/gpu-buffer.js b/lib/gpu-buffer.js index 8eca783..a064862 100644 --- a/lib/gpu-buffer.js +++ b/lib/gpu-buffer.js @@ -88,7 +88,7 @@ const getTypeData = (type, attributes, otherStructLayouts) => { const mult = parseInt(fixedSize ?? "0"); const align = elementTypeData.align; - let stride = elementTypeData.byteOffset; + let stride = elementTypeData.size; if (attributes.stride != null) { stride = parseInt(attributes.stride); } @@ -214,7 +214,7 @@ const writeField = (allLayouts, field, value, views, byteOffset, warnMissingFiel } else { const view = views[field.baseType]; const array = value[Symbol.iterator] == null ? [Number(value)] : value; - view.set(array, (byteOffset + field.byteOffset) / 4); + view.set(array, (byteOffset + (field.byteOffset ?? 0)) / 4); } }; @@ -227,7 +227,7 @@ const makeGenerator = (layout, structLayouts) => { if (destination == null) { let size = layout.size; const lastField = layout.fields[layout.fields.length - 1]; - if (lastField.isArray && lastField.identifier in object) { + if (lastField.isArray && lastField.identifier in object && !lastField.isFixedSize) { size += lastField.stride * object[lastField.identifier].length; } destination = new ArrayBuffer(size); diff --git a/shaders/wgsl/mirrorPass.wgsl b/shaders/wgsl/mirrorPass.wgsl new file mode 100644 index 0000000..7604e17 --- /dev/null +++ b/shaders/wgsl/mirrorPass.wgsl @@ -0,0 +1,78 @@ +struct Config { + bloomStrength : f32, +}; + +struct Time { + seconds : f32, + frames : i32, +}; + +struct Touches { + touches : array, 5>, +}; + +struct Scene { + screenAspectRatio : f32, + cameraAspectRatio : f32, +}; + +@group(0) @binding(0) var config : Config; +@group(0) @binding(1) var time : Time; +@group(0) @binding(2) var scene : Scene; +@group(0) @binding(3) var touches : Touches; +@group(0) @binding(4) var linearSampler : sampler; +@group(0) @binding(5) var tex : texture_2d; +@group(0) @binding(6) var bloomTex : texture_2d; +@group(0) @binding(7) var cameraTex : texture_2d; +@group(0) @binding(8) var outputTex : texture_storage_2d; + +struct ComputeInput { + @builtin(global_invocation_id) id : vec3, +}; + +fn getBrightness(uv : vec2, intensity : f32) -> vec4 { + + var primary = textureSampleLevel(tex, linearSampler, uv, 0.0); + var bloom = textureSampleLevel(bloomTex, linearSampler, uv, 0.0) * config.bloomStrength; + + return primary * (1.0 + intensity * 0.3) + bloom * 0.5; +} + +@compute @workgroup_size(32, 1, 1) fn computeMain(input : ComputeInput) { + + // Resolve the invocation ID to a texel coordinate + var coord = vec2(input.id.xy); + var screenSize = textureDimensions(tex); + + if (coord.x >= screenSize.x) { + return; + } + + var uv = vec2(coord) / vec2(screenSize); + + var intensity = 0.0; + for (var i = 0; i < 5; i++) { + var touch = touches.touches[i]; + touch.y = 1.0 - touch.y; + var distanceToClick = length((touch.xy - uv) * vec2(scene.screenAspectRatio, 1.0)); + var elapsedTime = clamp(time.seconds - touch.z, -100.0, 100.0); + var t = distanceToClick - elapsedTime * 0.5; + intensity += sin(t * 40.0) / t; + } + intensity *= 0.2; + + var rippledUV = uv + intensity * 0.001; + + var webcamAspectAdjust = scene.cameraAspectRatio / scene.screenAspectRatio; + var webcamTransform = vec2(1.0, webcamAspectAdjust); + if (webcamAspectAdjust > 1.0) { + webcamTransform = vec2(1.0 / webcamAspectAdjust, 1.0); + } + var webcamUV = ((rippledUV - 0.5) * webcamTransform) + 0.5; + + var webcam = textureSampleLevel(cameraTex, linearSampler, webcamUV, 0.0).rgb; + webcam *= mix(vec3(0.1, 0.3, 0.0), vec3(0.9, 1.0, 0.7), 1.0 - length(uv - 0.5) * 1.5); + + var code = mix(webcam, vec3(0.7, 1.0, 0.4), getBrightness(rippledUV, intensity).r); + textureStore(outputTex, coord, vec4(code, 1.0)); +}