Refactoring the pass and pipeline, so that inputs and size are handed to and returned from the build function (formerly setSize). This is now the earliest place to build bind groups, which makes sense, because it's also the earliest place to create textures that are proportional to the size of the canvas.

This commit is contained in:
Rezmason
2021-11-15 00:30:09 -08:00
parent b0a4acdfdb
commit 1b61e304a5
10 changed files with 142 additions and 144 deletions

View File

@@ -54,7 +54,7 @@ export default async (canvas, config) => {
const effectName = config.effect in effects ? config.effect : "plain";
const pipeline = makePipeline(context, [makeRain, makeBloomPass, effects[effectName], makeEndPass]);
await Promise.all(pipeline.map((step) => step.ready));
await Promise.all(pipeline.map((step) => step.loaded));
let frames = 0;
let start = NaN;
@@ -67,14 +67,14 @@ export default async (canvas, config) => {
if (canvasSize[0] !== canvasConfig.size[0] || canvasSize[1] !== canvasConfig.size[1]) {
canvasConfig.size = canvasSize;
canvasContext.configure(canvasConfig);
pipeline.forEach((step) => step.setSize(...canvasSize));
pipeline.reduce((outputs, step) => step.build(canvasSize, outputs), null);
}
device.queue.writeBuffer(timeBuffer, 0, timeUniforms.toBuffer({ seconds: (now - start) / 1000, frames }));
frames++;
const encoder = device.createCommandEncoder();
pipeline.forEach((step) => step.execute(encoder));
pipeline.forEach((step) => step.run(encoder));
// Eventually, when WebGPU allows it, we'll remove the endPass and just copy from our pipeline's output to the canvas texture.
// encoder.copyTextureToTexture({ texture: pipeline[pipeline.length - 1].getOutputs().primary }, { texture: canvasContext.getCurrentTexture() }, canvasSize);
device.queue.submit([encoder.finish()]);