Camera input and mirror effect now also work in WebGPU. Added the "once" parameter, which renders a single frame. Fixed bugs in gpu-buffer.

This commit is contained in:
Rezmason
2022-08-11 00:08:05 -07:00
parent fc6821f4db
commit 3da3db61f1
10 changed files with 241 additions and 20 deletions

View File

@@ -27,6 +27,7 @@ export default ({ config, device }) => {
backgroundTex = bgTex;
computePipeline = device.createComputePipeline({
layout: "auto",
compute: {
module: imageShader.module,
entryPoint: "computeMain",

View File

@@ -7,8 +7,9 @@ import makePalettePass from "./palettePass.js";
import makeStripePass from "./stripePass.js";
import makeImagePass from "./imagePass.js";
import makeResurrectionPass from "./resurrectionPass.js";
import makeMirrorPass from "./mirrorPass.js";
import makeEndPass from "./endPass.js";
import { setupCamera } from "../camera.js";
import { setupCamera, cameraCanvas, cameraAspectRatio, cameraSize } from "../camera.js";
const loadJS = (src) =>
new Promise((resolve, reject) => {
@@ -30,11 +31,26 @@ const effects = {
image: makeImagePass,
resurrection: makeResurrectionPass,
resurrections: makeResurrectionPass,
mirror: makeMirrorPass,
};
export default async (canvas, config) => {
await loadJS("lib/gl-matrix.js");
if (document.fullscreenEnabled || document.webkitFullscreenEnabled) {
window.ondblclick = () => {
if (document.fullscreenElement == null) {
if (canvas.webkitRequestFullscreen != null) {
canvas.webkitRequestFullscreen();
} else {
canvas.requestFullscreen();
}
} else {
document.exitFullscreen();
}
};
}
if (config.useCamera) {
await setupCamera();
}
@@ -57,6 +73,11 @@ export default async (canvas, config) => {
const timeUniforms = structs.from(`struct Time { seconds : f32, frames : i32, };`).Time;
const timeBuffer = makeUniformBuffer(device, timeUniforms);
const cameraTex = device.createTexture({
size: cameraSize,
format: "rgba8unorm",
usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
});
const context = {
config,
@@ -65,6 +86,9 @@ export default async (canvas, config) => {
canvasContext,
timeBuffer,
canvasFormat,
cameraTex,
cameraAspectRatio,
cameraSize,
};
const effectName = config.effect in effects ? config.effect : "plain";
@@ -89,6 +113,10 @@ export default async (canvas, config) => {
outputs = pipeline.build(canvasSize);
}
if (config.useCamera) {
device.queue.copyExternalImageToTexture({ source: cameraCanvas }, { texture: cameraTex }, cameraSize);
}
device.queue.writeBuffer(timeBuffer, 0, timeUniforms.toBuffer({ seconds: (now - start) / 1000, frames }));
frames++;
@@ -97,7 +125,9 @@ export default async (canvas, config) => {
// Eventually, when WebGPU allows it, we'll remove the endPass and just copy from our pipeline's output to the canvas texture.
// encoder.copyTextureToTexture({ texture: outputs?.primary }, { texture: canvasContext.getCurrentTexture() }, canvasSize);
device.queue.submit([encoder.finish()]);
requestAnimationFrame(renderLoop);
if (!config.once) {
requestAnimationFrame(renderLoop);
}
};
requestAnimationFrame(renderLoop);

109
js/webgpu/mirrorPass.js Normal file
View File

@@ -0,0 +1,109 @@
import { structs } from "../../lib/gpu-buffer.js";
import { makeComputeTarget, makeUniformBuffer, loadShader, makeBindGroup, makePass } from "./utils.js";
let start;
const numTouches = 5;
const touches = Array(numTouches)
.fill()
.map((_) => [0, 0, -Infinity, 0]);
let aspectRatio = 1;
let index = 0;
let touchesChanged = true;
window.onclick = (e) => {
touches[index][0] = 0 + e.clientX / e.srcElement.clientWidth;
touches[index][1] = 1 - e.clientY / e.srcElement.clientHeight;
touches[index][2] = (Date.now() - start) / 1000;
index = (index + 1) % numTouches;
touchesChanged = true;
};
/*
uniforms: {
touches: () => touches,
aspectRatio: () => aspectRatio,
cameraAspectRatio,
}
*/
export default ({ config, device, cameraTex, cameraAspectRatio, timeBuffer }) => {
const assets = [loadShader(device, "shaders/wgsl/mirrorPass.wgsl")];
const linearSampler = device.createSampler({
magFilter: "linear",
minFilter: "linear",
});
let computePipeline;
let configBuffer;
let sceneUniforms;
let sceneBuffer;
let touchUniforms;
let touchBuffer;
let output;
let screenSize;
let computeBindGroup;
const loaded = (async () => {
const [mirrorShader] = await Promise.all(assets);
computePipeline = device.createComputePipeline({
layout: "auto",
compute: {
module: mirrorShader.module,
entryPoint: "computeMain",
},
});
const mirrorShaderUniforms = structs.from(mirrorShader.code);
const configUniforms = mirrorShaderUniforms.Config;
configBuffer = makeUniformBuffer(device, configUniforms, { bloomStrength: config.bloomStrength });
sceneUniforms = mirrorShaderUniforms.Scene;
sceneBuffer = makeUniformBuffer(device, sceneUniforms);
touchUniforms = mirrorShaderUniforms.Touches;
touchBuffer = makeUniformBuffer(device, touchUniforms);
})();
const build = (size, inputs) => {
output?.destroy();
output = makeComputeTarget(device, size);
screenSize = size;
aspectRatio = size[0] / size[1];
computeBindGroup = makeBindGroup(device, computePipeline, 0, [
configBuffer,
timeBuffer,
sceneBuffer,
touchBuffer,
linearSampler,
inputs.primary.createView(),
inputs.bloom.createView(),
cameraTex.createView(),
output.createView(),
]);
const screenAspectRatio = size[0] / size[1];
device.queue.writeBuffer(sceneBuffer, 0, sceneUniforms.toBuffer({ screenAspectRatio, cameraAspectRatio }));
return { primary: output };
};
const run = (encoder) => {
if (touchesChanged) {
touchesChanged = false;
device.queue.writeBuffer(touchBuffer, 0, touchUniforms.toBuffer({ touches }));
}
const computePass = encoder.beginComputePass();
computePass.setPipeline(computePipeline);
computePass.setBindGroup(0, computeBindGroup);
computePass.dispatchWorkgroups(Math.ceil(screenSize[0] / 32), screenSize[1], 1);
computePass.end();
};
start = Date.now();
return makePass(loaded, build, run);
};