Camera input and mirror effect now also work in WebGPU. Added the "once" parameter, which renders a single frame. Fixed bugs in gpu-buffer.

This commit is contained in:
Rezmason
2022-08-11 00:08:05 -07:00
parent fc6821f4db
commit 3da3db61f1
10 changed files with 241 additions and 20 deletions

View File

@@ -1,7 +1,5 @@
TODO:
Add mirror effect to WebGPU
Reformulate the basis
https://buf.com/films/the-matrix-resurrections

View File

@@ -6,6 +6,7 @@ cameraCanvas.width = 1;
cameraCanvas.height = 1;
const context = cameraCanvas.getContext("2d");
let cameraAspectRatio = 1.0;
const cameraSize = [1, 1];
const drawToCanvas = () => {
requestAnimationFrame(drawToCanvas);
@@ -29,6 +30,8 @@ const setupCamera = async () => {
cameraCanvas.width = width;
cameraCanvas.height = height;
cameraAspectRatio = width / height;
cameraSize[0] = width;
cameraSize[1] = height;
video.srcObject = stream;
video.play();
@@ -39,4 +42,4 @@ const setupCamera = async () => {
}
};
export { cameraCanvas, cameraAspectRatio, setupCamera };
export { cameraCanvas, cameraAspectRatio, cameraSize, setupCamera };

View File

@@ -299,6 +299,7 @@ const paramMapping = {
volumetric: { key: "volumetric", parser: (s) => s.toLowerCase().includes("true") },
loops: { key: "loops", parser: (s) => s.toLowerCase().includes("true") },
renderer: { key: "renderer", parser: (s) => s },
once: { key: "once", parser: (s) => s.toLowerCase().includes("true") },
};
paramMapping.dropLength = paramMapping.raindropLength;
paramMapping.angle = paramMapping.slant;

View File

@@ -8,8 +8,8 @@ import makeImagePass from "./imagePass.js";
import makeResurrectionPass from "./resurrectionPass.js";
import makeQuiltPass from "./quiltPass.js";
import makeMirrorPass from "./mirrorPass.js";
import { setupCamera, cameraCanvas, cameraAspectRatio } from "../camera.js";
import getLKG from "./lkgHelper.js";
import { setupCamera } from "../camera.js";
const effects = {
none: null,
@@ -70,17 +70,24 @@ export default async (canvas, config) => {
optionalExtensions: ["EXT_color_buffer_half_float", "WEBGL_color_buffer_float", "OES_standard_derivatives"],
});
const cameraTex = regl.texture(cameraCanvas);
const lkg = await getLKG(config.useHoloplay, true);
// All this takes place in a full screen quad.
const fullScreenQuad = makeFullScreenQuad(regl);
const effectName = config.effect in effects ? config.effect : "plain";
const pipeline = makePipeline({ regl, config, lkg }, [makeRain, makeBloomPass, effects[effectName], makeQuiltPass]);
const context = { regl, config, lkg, cameraTex, cameraAspectRatio };
const pipeline = makePipeline(context, [makeRain, makeBloomPass, effects[effectName], makeQuiltPass]);
const screenUniforms = { tex: pipeline[pipeline.length - 1].outputs.primary };
const drawToScreen = regl({ uniforms: screenUniforms });
await Promise.all(pipeline.map((step) => step.ready));
const tick = regl.frame(({ viewportWidth, viewportHeight }) => {
// tick.cancel();
if (config.once) {
tick.cancel();
}
if (config.useCamera) {
cameraTex(cameraCanvas);
}
if (dimensions.width !== viewportWidth || dimensions.height !== viewportHeight) {
dimensions.width = viewportWidth;
dimensions.height = viewportHeight;

View File

@@ -1,5 +1,4 @@
import { loadImage, loadText, makePassFBO, makePass } from "./utils.js";
import { cameraCanvas, cameraAspectRatio } from "../camera.js";
let start;
const numClicks = 5;
@@ -14,9 +13,7 @@ window.onclick = (e) => {
index = (index + 1) % numClicks;
};
export default ({ regl, config }, inputs) => {
const cameraTex = regl.texture(cameraCanvas);
export default ({ regl, config, cameraTex, cameraAspectRatio }, inputs) => {
const output = makePassFBO(regl, config.useHalfFloat);
const mirrorPassFrag = loadText("shaders/glsl/mirrorPass.frag.glsl");
const render = regl({
@@ -28,7 +25,7 @@ export default ({ regl, config }, inputs) => {
cameraTex,
clicks: () => clicks,
aspectRatio: () => aspectRatio,
cameraAspectRatio: () => cameraAspectRatio,
cameraAspectRatio,
},
framebuffer: output,
});
@@ -44,9 +41,6 @@ export default ({ regl, config }, inputs) => {
output.resize(w, h);
aspectRatio = w / h;
},
() => {
cameraTex(cameraCanvas);
render({ frag: mirrorPassFrag.text() });
}
() => render({ frag: mirrorPassFrag.text() })
);
};

View File

@@ -27,6 +27,7 @@ export default ({ config, device }) => {
backgroundTex = bgTex;
computePipeline = device.createComputePipeline({
layout: "auto",
compute: {
module: imageShader.module,
entryPoint: "computeMain",

View File

@@ -7,8 +7,9 @@ import makePalettePass from "./palettePass.js";
import makeStripePass from "./stripePass.js";
import makeImagePass from "./imagePass.js";
import makeResurrectionPass from "./resurrectionPass.js";
import makeMirrorPass from "./mirrorPass.js";
import makeEndPass from "./endPass.js";
import { setupCamera } from "../camera.js";
import { setupCamera, cameraCanvas, cameraAspectRatio, cameraSize } from "../camera.js";
const loadJS = (src) =>
new Promise((resolve, reject) => {
@@ -30,11 +31,26 @@ const effects = {
image: makeImagePass,
resurrection: makeResurrectionPass,
resurrections: makeResurrectionPass,
mirror: makeMirrorPass,
};
export default async (canvas, config) => {
await loadJS("lib/gl-matrix.js");
if (document.fullscreenEnabled || document.webkitFullscreenEnabled) {
window.ondblclick = () => {
if (document.fullscreenElement == null) {
if (canvas.webkitRequestFullscreen != null) {
canvas.webkitRequestFullscreen();
} else {
canvas.requestFullscreen();
}
} else {
document.exitFullscreen();
}
};
}
if (config.useCamera) {
await setupCamera();
}
@@ -57,6 +73,11 @@ export default async (canvas, config) => {
const timeUniforms = structs.from(`struct Time { seconds : f32, frames : i32, };`).Time;
const timeBuffer = makeUniformBuffer(device, timeUniforms);
const cameraTex = device.createTexture({
size: cameraSize,
format: "rgba8unorm",
usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
});
const context = {
config,
@@ -65,6 +86,9 @@ export default async (canvas, config) => {
canvasContext,
timeBuffer,
canvasFormat,
cameraTex,
cameraAspectRatio,
cameraSize,
};
const effectName = config.effect in effects ? config.effect : "plain";
@@ -89,6 +113,10 @@ export default async (canvas, config) => {
outputs = pipeline.build(canvasSize);
}
if (config.useCamera) {
device.queue.copyExternalImageToTexture({ source: cameraCanvas }, { texture: cameraTex }, cameraSize);
}
device.queue.writeBuffer(timeBuffer, 0, timeUniforms.toBuffer({ seconds: (now - start) / 1000, frames }));
frames++;
@@ -97,7 +125,9 @@ export default async (canvas, config) => {
// Eventually, when WebGPU allows it, we'll remove the endPass and just copy from our pipeline's output to the canvas texture.
// encoder.copyTextureToTexture({ texture: outputs?.primary }, { texture: canvasContext.getCurrentTexture() }, canvasSize);
device.queue.submit([encoder.finish()]);
if (!config.once) {
requestAnimationFrame(renderLoop);
}
};
requestAnimationFrame(renderLoop);

109
js/webgpu/mirrorPass.js Normal file
View File

@@ -0,0 +1,109 @@
import { structs } from "../../lib/gpu-buffer.js";
import { makeComputeTarget, makeUniformBuffer, loadShader, makeBindGroup, makePass } from "./utils.js";
let start;
const numTouches = 5;
const touches = Array(numTouches)
.fill()
.map((_) => [0, 0, -Infinity, 0]);
let aspectRatio = 1;
let index = 0;
let touchesChanged = true;
window.onclick = (e) => {
touches[index][0] = 0 + e.clientX / e.srcElement.clientWidth;
touches[index][1] = 1 - e.clientY / e.srcElement.clientHeight;
touches[index][2] = (Date.now() - start) / 1000;
index = (index + 1) % numTouches;
touchesChanged = true;
};
/*
uniforms: {
touches: () => touches,
aspectRatio: () => aspectRatio,
cameraAspectRatio,
}
*/
export default ({ config, device, cameraTex, cameraAspectRatio, timeBuffer }) => {
const assets = [loadShader(device, "shaders/wgsl/mirrorPass.wgsl")];
const linearSampler = device.createSampler({
magFilter: "linear",
minFilter: "linear",
});
let computePipeline;
let configBuffer;
let sceneUniforms;
let sceneBuffer;
let touchUniforms;
let touchBuffer;
let output;
let screenSize;
let computeBindGroup;
const loaded = (async () => {
const [mirrorShader] = await Promise.all(assets);
computePipeline = device.createComputePipeline({
layout: "auto",
compute: {
module: mirrorShader.module,
entryPoint: "computeMain",
},
});
const mirrorShaderUniforms = structs.from(mirrorShader.code);
const configUniforms = mirrorShaderUniforms.Config;
configBuffer = makeUniformBuffer(device, configUniforms, { bloomStrength: config.bloomStrength });
sceneUniforms = mirrorShaderUniforms.Scene;
sceneBuffer = makeUniformBuffer(device, sceneUniforms);
touchUniforms = mirrorShaderUniforms.Touches;
touchBuffer = makeUniformBuffer(device, touchUniforms);
})();
const build = (size, inputs) => {
output?.destroy();
output = makeComputeTarget(device, size);
screenSize = size;
aspectRatio = size[0] / size[1];
computeBindGroup = makeBindGroup(device, computePipeline, 0, [
configBuffer,
timeBuffer,
sceneBuffer,
touchBuffer,
linearSampler,
inputs.primary.createView(),
inputs.bloom.createView(),
cameraTex.createView(),
output.createView(),
]);
const screenAspectRatio = size[0] / size[1];
device.queue.writeBuffer(sceneBuffer, 0, sceneUniforms.toBuffer({ screenAspectRatio, cameraAspectRatio }));
return { primary: output };
};
const run = (encoder) => {
if (touchesChanged) {
touchesChanged = false;
device.queue.writeBuffer(touchBuffer, 0, touchUniforms.toBuffer({ touches }));
}
const computePass = encoder.beginComputePass();
computePass.setPipeline(computePipeline);
computePass.setBindGroup(0, computeBindGroup);
computePass.dispatchWorkgroups(Math.ceil(screenSize[0] / 32), screenSize[1], 1);
computePass.end();
};
start = Date.now();
return makePass(loaded, build, run);
};

View File

@@ -88,7 +88,7 @@ const getTypeData = (type, attributes, otherStructLayouts) => {
const mult = parseInt(fixedSize ?? "0");
const align = elementTypeData.align;
let stride = elementTypeData.byteOffset;
let stride = elementTypeData.size;
if (attributes.stride != null) {
stride = parseInt(attributes.stride);
}
@@ -214,7 +214,7 @@ const writeField = (allLayouts, field, value, views, byteOffset, warnMissingFiel
} else {
const view = views[field.baseType];
const array = value[Symbol.iterator] == null ? [Number(value)] : value;
view.set(array, (byteOffset + field.byteOffset) / 4);
view.set(array, (byteOffset + (field.byteOffset ?? 0)) / 4);
}
};
@@ -227,7 +227,7 @@ const makeGenerator = (layout, structLayouts) => {
if (destination == null) {
let size = layout.size;
const lastField = layout.fields[layout.fields.length - 1];
if (lastField.isArray && lastField.identifier in object) {
if (lastField.isArray && lastField.identifier in object && !lastField.isFixedSize) {
size += lastField.stride * object[lastField.identifier].length;
}
destination = new ArrayBuffer(size);

View File

@@ -0,0 +1,78 @@
struct Config {
bloomStrength : f32,
};
struct Time {
seconds : f32,
frames : i32,
};
struct Touches {
touches : array<vec4<f32>, 5>,
};
struct Scene {
screenAspectRatio : f32,
cameraAspectRatio : f32,
};
@group(0) @binding(0) var<uniform> config : Config;
@group(0) @binding(1) var<uniform> time : Time;
@group(0) @binding(2) var<uniform> scene : Scene;
@group(0) @binding(3) var<uniform> touches : Touches;
@group(0) @binding(4) var linearSampler : sampler;
@group(0) @binding(5) var tex : texture_2d<f32>;
@group(0) @binding(6) var bloomTex : texture_2d<f32>;
@group(0) @binding(7) var cameraTex : texture_2d<f32>;
@group(0) @binding(8) var outputTex : texture_storage_2d<rgba8unorm, write>;
struct ComputeInput {
@builtin(global_invocation_id) id : vec3<u32>,
};
fn getBrightness(uv : vec2<f32>, intensity : f32) -> vec4<f32> {
var primary = textureSampleLevel(tex, linearSampler, uv, 0.0);
var bloom = textureSampleLevel(bloomTex, linearSampler, uv, 0.0) * config.bloomStrength;
return primary * (1.0 + intensity * 0.3) + bloom * 0.5;
}
@compute @workgroup_size(32, 1, 1) fn computeMain(input : ComputeInput) {
// Resolve the invocation ID to a texel coordinate
var coord = vec2<i32>(input.id.xy);
var screenSize = textureDimensions(tex);
if (coord.x >= screenSize.x) {
return;
}
var uv = vec2<f32>(coord) / vec2<f32>(screenSize);
var intensity = 0.0;
for (var i = 0; i < 5; i++) {
var touch = touches.touches[i];
touch.y = 1.0 - touch.y;
var distanceToClick = length((touch.xy - uv) * vec2(scene.screenAspectRatio, 1.0));
var elapsedTime = clamp(time.seconds - touch.z, -100.0, 100.0);
var t = distanceToClick - elapsedTime * 0.5;
intensity += sin(t * 40.0) / t;
}
intensity *= 0.2;
var rippledUV = uv + intensity * 0.001;
var webcamAspectAdjust = scene.cameraAspectRatio / scene.screenAspectRatio;
var webcamTransform = vec2<f32>(1.0, webcamAspectAdjust);
if (webcamAspectAdjust > 1.0) {
webcamTransform = vec2<f32>(1.0 / webcamAspectAdjust, 1.0);
}
var webcamUV = ((rippledUV - 0.5) * webcamTransform) + 0.5;
var webcam = textureSampleLevel(cameraTex, linearSampler, webcamUV, 0.0).rgb;
webcam *= mix(vec3<f32>(0.1, 0.3, 0.0), vec3<f32>(0.9, 1.0, 0.7), 1.0 - length(uv - 0.5) * 1.5);
var code = mix(webcam, vec3<f32>(0.7, 1.0, 0.4), getBrightness(rippledUV, intensity).r);
textureStore(outputTex, coord, vec4<f32>(code, 1.0));
}