Gouging a simpler project out of the larger project.

This commit is contained in:
Rezmason
2023-08-22 10:08:48 -07:00
parent 91201830f8
commit c231935475
168 changed files with 21 additions and 5613 deletions

View File

@@ -1,45 +0,0 @@
// TODO: switch to video-based texture
// TODO: mipmap?
const video = document.createElement("video");
const cameraCanvas = document.createElement("canvas");
cameraCanvas.width = 1;
cameraCanvas.height = 1;
const context = cameraCanvas.getContext("2d");
let cameraAspectRatio = 1.0;
const cameraSize = [1, 1];
const drawToCanvas = () => {
requestAnimationFrame(drawToCanvas);
context.drawImage(video, 0, 0);
};
const setupCamera = async () => {
try {
const stream = await navigator.mediaDevices.getUserMedia({
video: {
width: { min: 800, ideal: 1280 },
frameRate: { ideal: 60 },
},
audio: false,
});
const videoTrack = stream.getVideoTracks()[0];
const { width, height } = videoTrack.getSettings();
video.width = width;
video.height = height;
cameraCanvas.width = width;
cameraCanvas.height = height;
cameraAspectRatio = width / height;
cameraSize[0] = width;
cameraSize[1] = height;
video.srcObject = stream;
video.play();
drawToCanvas();
} catch (e) {
console.warn(`Camera not initialized: ${e}`);
}
};
export { cameraCanvas, cameraAspectRatio, cameraSize, setupCamera };

View File

@@ -1,33 +0,0 @@
import { loadImage, loadText, makePassFBO, makePass } from "./utils.js";
// Multiplies the rendered rain and bloom by a loaded in image
const defaultBGURL = "https://upload.wikimedia.org/wikipedia/commons/thumb/0/0a/Flammarion_Colored.jpg/917px-Flammarion_Colored.jpg";
export default ({ regl, config }, inputs) => {
const output = makePassFBO(regl, config.useHalfFloat);
const bgURL = "bgURL" in config ? config.bgURL : defaultBGURL;
const background = loadImage(regl, bgURL);
const imagePassFrag = loadText("shaders/glsl/imagePass.frag.glsl");
const render = regl({
frag: regl.prop("frag"),
uniforms: {
backgroundTex: background.texture,
tex: inputs.primary,
bloomTex: inputs.bloom,
},
framebuffer: output,
});
return makePass(
{
primary: output,
},
Promise.all([background.loaded, imagePassFrag.loaded]),
(w, h) => output.resize(w, h),
(shouldRender) => {
if (shouldRender) {
render({ frag: imagePassFrag.text() });
}
}
);
};

View File

@@ -1,89 +0,0 @@
const recordedDevice = {
buttons: [0, 0, 0, 0],
calibration: {
DPI: { value: 324 },
center: { value: 0.15018756687641144 },
configVersion: "3.0",
flipImageX: { value: 0 },
flipImageY: { value: 0 },
flipSubp: { value: 0 },
fringe: { value: 0 },
invView: { value: 1 },
pitch: { value: 52.58013153076172 },
screenH: { value: 2048 },
screenW: { value: 1536 },
slope: { value: -7.145165920257568 },
verticalAngle: { value: 0 },
viewCone: { value: 40 },
},
defaultQuilt: {
quiltAspect: 0.75,
quiltX: 3840,
quiltY: 3840,
tileX: 8,
tileY: 6,
},
hardwareVersion: "portrait",
hwid: "LKG-P11063",
index: 0,
joystickIndex: -1,
state: "ok",
unityIndex: 1,
windowCoords: [1440, 900],
};
const interpretDevice = (device) => {
if (device == null) {
return { enabled: false, tileX: 1, tileY: 1 };
}
const fov = 15;
const calibration = Object.fromEntries(
Object.entries(device.calibration)
.map(([key, value]) => [key, value.value])
.filter(([key, value]) => value != null)
);
const screenInches = calibration.screenW / calibration.DPI;
const pitch = calibration.pitch * screenInches * Math.cos(Math.atan(1.0 / calibration.slope));
const tilt = (calibration.screenH / (calibration.screenW * calibration.slope)) * -(calibration.flipImageX * 2 - 1);
const subp = 1 / (calibration.screenW * 3);
const defaultQuilt = device.defaultQuilt;
const quiltViewPortion = [
(Math.floor(defaultQuilt.quiltX / defaultQuilt.tileX) * defaultQuilt.tileX) / defaultQuilt.quiltX,
(Math.floor(defaultQuilt.quiltY / defaultQuilt.tileY) * defaultQuilt.tileY) / defaultQuilt.quiltY,
];
return {
...defaultQuilt,
...calibration,
pitch,
tilt,
subp,
quiltViewPortion,
fov,
enabled: true,
};
};
export default async (useHoloplay = false, useRecordedDevice = false) => {
if (!useHoloplay) {
return interpretDevice(null);
}
const HoloPlayCore = await import("../../lib/holoplaycore.module.js");
const device = await new Promise(
(resolve, reject) =>
new HoloPlayCore.Client(
(data) => resolve(data.devices?.[0]),
(error) => resolve(null)
)
);
if (device == null && useRecordedDevice) {
return interpretDevice(recordedDevice);
}
return interpretDevice(device);
};

View File

@@ -3,25 +3,6 @@ import { makeFullScreenQuad, makePipeline } from "./utils.js";
import makeRain from "./rainPass.js";
import makeBloomPass from "./bloomPass.js";
import makePalettePass from "./palettePass.js";
import makeStripePass from "./stripePass.js";
import makeImagePass from "./imagePass.js";
import makeQuiltPass from "./quiltPass.js";
import makeMirrorPass from "./mirrorPass.js";
import { setupCamera, cameraCanvas, cameraAspectRatio } from "../camera.js";
import getLKG from "./lkgHelper.js";
const effects = {
none: null,
plain: makePalettePass,
palette: makePalettePass,
customStripes: makeStripePass,
stripes: makeStripePass,
pride: makeStripePass,
transPride: makeStripePass,
trans: makeStripePass,
image: makeImagePass,
mirror: makeMirrorPass,
};
const dimensions = { width: 1, height: 1 };
@@ -35,7 +16,7 @@ const loadJS = (src) =>
});
export default async (canvas, config) => {
await Promise.all([loadJS("lib/regl.min.js"), loadJS("lib/gl-matrix.js")]);
await Promise.all([loadJS("lib/regl.js"), loadJS("lib/gl-matrix.js")]);
const resize = () => {
const devicePixelRatio = window.devicePixelRatio ?? 1;
@@ -78,14 +59,10 @@ export default async (canvas, config) => {
const regl = createREGL({ canvas, pixelRatio: 1, extensions, optionalExtensions });
const cameraTex = regl.texture(cameraCanvas);
const lkg = await getLKG(config.useHoloplay, true);
// All this takes place in a full screen quad.
const fullScreenQuad = makeFullScreenQuad(regl);
const effectName = config.effect in effects ? config.effect : "palette";
const context = { regl, config, lkg, cameraTex, cameraAspectRatio };
const pipeline = makePipeline(context, [makeRain, makeBloomPass, effects[effectName], makeQuiltPass]);
const context = { regl, config };
const pipeline = makePipeline(context, [makeRain, makeBloomPass, makePalettePass]);
const screenUniforms = { tex: pipeline[pipeline.length - 1].outputs.primary };
const drawToScreen = regl({ uniforms: screenUniforms });
await Promise.all(pipeline.map((step) => step.ready));

View File

@@ -1,50 +0,0 @@
import { loadText, makePassFBO, makePass } from "./utils.js";
let start;
const numClicks = 5;
const clicks = Array(numClicks).fill([0, 0, -Infinity]).flat();
let aspectRatio = 1;
let index = 0;
window.onclick = (e) => {
clicks[index * 3 + 0] = 0 + e.clientX / e.srcElement.clientWidth;
clicks[index * 3 + 1] = 1 - e.clientY / e.srcElement.clientHeight;
clicks[index * 3 + 2] = (Date.now() - start) / 1000;
index = (index + 1) % numClicks;
};
export default ({ regl, config, cameraTex, cameraAspectRatio }, inputs) => {
const output = makePassFBO(regl, config.useHalfFloat);
const mirrorPassFrag = loadText("shaders/glsl/mirrorPass.frag.glsl");
const render = regl({
frag: regl.prop("frag"),
uniforms: {
time: regl.context("time"),
tex: inputs.primary,
bloomTex: inputs.bloom,
cameraTex,
clicks: () => clicks,
aspectRatio: () => aspectRatio,
cameraAspectRatio,
},
framebuffer: output,
});
start = Date.now();
return makePass(
{
primary: output,
},
Promise.all([mirrorPassFrag.loaded]),
(w, h) => {
output.resize(w, h);
aspectRatio = w / h;
},
(shouldRender) => {
if (shouldRender) {
render({ frag: mirrorPassFrag.text() });
}
}
);
};

View File

@@ -1,34 +0,0 @@
import { loadText, makePassFBO, makePass } from "./utils.js";
// Multiplies the rendered rain and bloom by a loaded in image
export default ({ regl, config, lkg }, inputs) => {
if (!lkg.enabled) {
return makePass({
primary: inputs.primary,
});
}
const output = makePassFBO(regl, config.useHalfFloat);
const quiltPassFrag = loadText("shaders/glsl/quiltPass.frag.glsl");
const render = regl({
frag: regl.prop("frag"),
uniforms: {
quiltTexture: inputs.primary,
...lkg,
},
framebuffer: output,
});
return makePass(
{
primary: output,
},
Promise.all([quiltPassFrag.loaded]),
(w, h) => output.resize(w, h),
(shouldRender) => {
if (shouldRender) {
render({ frag: quiltPassFrag.text() });
}
}
);
};

View File

@@ -29,7 +29,7 @@ const blVert = [1, 0];
const brVert = [1, 1];
const quadVertices = [tlVert, trVert, brVert, tlVert, brVert, blVert];
export default ({ regl, config, lkg }) => {
export default ({ regl, config }) => {
// The volumetric mode multiplies the number of columns
// to reach the desired density, and then overlaps them
const volumetric = config.volumetric;
@@ -198,8 +198,6 @@ export default ({ regl, config, lkg }) => {
screenSize: regl.prop("screenSize"),
},
viewport: regl.prop("viewport"),
attributes: {
aPosition: quadPositions,
aCorner: Array(numQuads).fill(quadVertices),
@@ -218,17 +216,11 @@ export default ({ regl, config, lkg }) => {
mat4.rotateY(transform, transform, (Math.PI * 1) / 4);
mat4.translate(transform, transform, vec3.fromValues(0, 0, -1));
mat4.scale(transform, transform, vec3.fromValues(1, 1, 2));
} else if (lkg.enabled) {
mat4.translate(transform, transform, vec3.fromValues(0, 0, -1.1));
mat4.scale(transform, transform, vec3.fromValues(1, 1, 1));
mat4.scale(transform, transform, vec3.fromValues(0.15, 0.15, 0.15));
} else {
mat4.translate(transform, transform, vec3.fromValues(0, 0, -1));
}
const camera = mat4.create();
const vantagePoints = [];
return makePass(
{
primary: output,
@@ -248,48 +240,16 @@ export default ({ regl, config, lkg }) => {
output.resize(w, h);
const aspectRatio = w / h;
const [numTileColumns, numTileRows] = [lkg.tileX, lkg.tileY];
const numVantagePoints = numTileRows * numTileColumns;
const tileWidth = Math.floor(w / numTileColumns);
const tileHeight = Math.floor(h / numTileRows);
vantagePoints.length = 0;
for (let row = 0; row < numTileRows; row++) {
for (let column = 0; column < numTileColumns; column++) {
const index = column + row * numTileColumns;
const camera = mat4.create();
if (volumetric && config.isometric) {
if (aspectRatio > 1) {
mat4.ortho(camera, -1.5 * aspectRatio, 1.5 * aspectRatio, -1.5, 1.5, -1000, 1000);
} else {
mat4.ortho(camera, -1.5, 1.5, -1.5 / aspectRatio, 1.5 / aspectRatio, -1000, 1000);
}
} else if (lkg.enabled) {
mat4.perspective(camera, (Math.PI / 180) * lkg.fov, lkg.quiltAspect, 0.0001, 1000);
const distanceToTarget = -1; // TODO: Get from somewhere else
let vantagePointAngle = (Math.PI / 180) * lkg.viewCone * (index / (numVantagePoints - 1) - 0.5);
if (isNaN(vantagePointAngle)) {
vantagePointAngle = 0;
}
const xOffset = distanceToTarget * Math.tan(vantagePointAngle);
mat4.translate(camera, camera, vec3.fromValues(xOffset, 0, 0));
camera[8] = -xOffset / (distanceToTarget * Math.tan((Math.PI / 180) * 0.5 * lkg.fov) * lkg.quiltAspect); // Is this right??
} else {
mat4.perspective(camera, (Math.PI / 180) * 90, aspectRatio, 0.0001, 1000);
}
const viewport = {
x: column * tileWidth,
y: row * tileHeight,
width: tileWidth,
height: tileHeight,
};
vantagePoints.push({ camera, viewport });
if (volumetric && config.isometric) {
if (aspectRatio > 1) {
mat4.ortho(camera, -1.5 * aspectRatio, 1.5 * aspectRatio, -1.5, 1.5, -1000, 1000);
} else {
mat4.ortho(camera, -1.5, 1.5, -1.5 / aspectRatio, 1.5 / aspectRatio, -1000, 1000);
}
} else {
mat4.perspective(camera, (Math.PI / 180) * 90, aspectRatio, 0.0001, 1000);
}
[screenSize[0], screenSize[1]] = aspectRatio > 1 ? [1, aspectRatio] : [1 / aspectRatio, 1];
},
(shouldRender) => {
@@ -305,9 +265,7 @@ export default ({ regl, config, lkg }) => {
framebuffer: output,
});
for (const vantagePoint of vantagePoints) {
render({ ...vantagePoint, transform, screenSize, vert: rainPassVert.text(), frag: rainPassFrag.text() });
}
render({ camera, transform, screenSize, vert: rainPassVert.text(), frag: rainPassFrag.text() });
}
}
);

View File

@@ -1,73 +0,0 @@
import colorToRGB from "../colorToRGB.js";
import { loadText, make1DTexture, makePassFBO, makePass } from "./utils.js";
// Multiplies the rendered rain and bloom by a 1D gradient texture
// generated from the passed-in color sequence
// This shader introduces noise into the renders, to avoid banding
const transPrideStripeColors = [
{ space: "rgb", values: [0.36, 0.81, 0.98] },
{ space: "rgb", values: [0.96, 0.66, 0.72] },
{ space: "rgb", values: [1.0, 1.0, 1.0] },
{ space: "rgb", values: [0.96, 0.66, 0.72] },
{ space: "rgb", values: [0.36, 0.81, 0.98] },
]
.map((color) => Array(3).fill(color))
.flat();
const prideStripeColors = [
{ space: "rgb", values: [0.89, 0.01, 0.01] },
{ space: "rgb", values: [1.0, 0.55, 0.0] },
{ space: "rgb", values: [1.0, 0.93, 0.0] },
{ space: "rgb", values: [0.0, 0.5, 0.15] },
{ space: "rgb", values: [0.0, 0.3, 1.0] },
{ space: "rgb", values: [0.46, 0.03, 0.53] },
]
.map((color) => Array(2).fill(color))
.flat();
export default ({ regl, config }, inputs) => {
const output = makePassFBO(regl, config.useHalfFloat);
const { backgroundColor, cursorColor, glintColor, cursorIntensity, glintIntensity, ditherMagnitude } = config;
// Expand and convert stripe colors into 1D texture data
const stripeColors = "stripeColors" in config ? config.stripeColors : config.effect === "pride" ? prideStripeColors : transPrideStripeColors;
const stripeTex = make1DTexture(
regl,
stripeColors.map((color) => [...colorToRGB(color), 1])
);
const stripePassFrag = loadText("shaders/glsl/stripePass.frag.glsl");
const render = regl({
frag: regl.prop("frag"),
uniforms: {
backgroundColor: colorToRGB(backgroundColor),
cursorColor: colorToRGB(cursorColor),
glintColor: colorToRGB(glintColor),
cursorIntensity,
glintIntensity,
ditherMagnitude,
tex: inputs.primary,
bloomTex: inputs.bloom,
stripeTex,
},
framebuffer: output,
});
return makePass(
{
primary: output,
},
stripePassFrag.loaded,
(w, h) => output.resize(w, h),
(shouldRender) => {
if (shouldRender) {
render({ frag: stripePassFrag.text() });
}
}
);
};

View File

@@ -1,162 +0,0 @@
import { structs } from "../../lib/gpu-buffer.js";
import { makeComputeTarget, loadShader, makeUniformBuffer, makeBindGroup, makePass } from "./utils.js";
// const makePyramid = makeComputeTarget;
// const destroyPyramid = (pyramid) => pyramid?.destroy();
// const makePyramidLevelView = (pyramid, level) =>
// pyramid.createView({
// baseMipLevel: level,
// mipLevelCount: 1,
// dimension: "2d",
// });
// const makePyramidViews = (pyramid) => [pyramid.createView()];
const makePyramid = (device, size, pyramidHeight) =>
Array(pyramidHeight)
.fill()
.map((_, index) =>
makeComputeTarget(
device,
size.map((x) => Math.floor(x * 2 ** -index))
)
);
const destroyPyramid = (pyramid) => pyramid?.forEach((texture) => texture.destroy());
const makePyramidLevelView = (pyramid, level) => pyramid[level].createView();
const makePyramidViews = (pyramid) => pyramid.map((tex) => tex.createView());
// The bloom pass is basically an added blur of the rain pass's high-pass output.
// The blur approximation is the sum of a pyramid of downscaled, blurred textures.
export default ({ config, device }) => {
const pyramidHeight = 4;
const bloomSize = config.bloomSize;
const bloomStrength = config.bloomStrength;
const bloomRadius = 2; // Looks better with more, but is more costly
const enabled = bloomSize > 0 && bloomStrength > 0;
// If there's no bloom to apply, return a no-op pass with an empty bloom texture
if (!enabled) {
const emptyTexture = makeComputeTarget(device, [1, 1]);
return makePass("No Bloom", null, (size, inputs) => ({ ...inputs, bloom: emptyTexture }));
}
const assets = [loadShader(device, "shaders/wgsl/bloomBlur.wgsl"), loadShader(device, "shaders/wgsl/bloomCombine.wgsl")];
const linearSampler = device.createSampler({
magFilter: "linear",
minFilter: "linear",
});
// The blur pipeline applies a blur in one direction; it's applied horizontally
// to the first image pyramid, and then vertically to the second image pyramid.
let blurPipeline;
let hBlurPyramid;
let vBlurPyramid;
let hBlurBuffer;
let vBlurBuffer;
let hBlurBindGroups;
let vBlurBindGroups;
// The combine pipeline blends the last image pyramid's layers into the output.
let combinePipeline;
let combineBuffer;
let combineBindGroup;
let output;
let scaledScreenSize;
const loaded = (async () => {
const [blurShader, combineShader] = await Promise.all(assets);
[blurPipeline, combinePipeline] = await Promise.all([
device.createComputePipeline({
layout: "auto",
compute: {
module: blurShader.module,
entryPoint: "computeMain",
},
}),
device.createComputePipeline({
layout: "auto",
compute: {
module: combineShader.module,
entryPoint: "computeMain",
},
}),
]);
const blurUniforms = structs.from(blurShader.code).Config;
hBlurBuffer = makeUniformBuffer(device, blurUniforms, { bloomRadius, direction: [1, 0] });
vBlurBuffer = makeUniformBuffer(device, blurUniforms, { bloomRadius, direction: [0, 1] });
const combineUniforms = structs.from(combineShader.code).Config;
combineBuffer = makeUniformBuffer(device, combineUniforms, { pyramidHeight, bloomStrength });
})();
const build = (screenSize, inputs) => {
// Since the bloom is blurry, we downscale everything
scaledScreenSize = screenSize.map((x) => Math.floor(x * bloomSize));
destroyPyramid(hBlurPyramid);
hBlurPyramid = makePyramid(device, scaledScreenSize, pyramidHeight);
destroyPyramid(vBlurPyramid);
vBlurPyramid = makePyramid(device, scaledScreenSize, pyramidHeight);
output?.destroy();
output = makeComputeTarget(device, scaledScreenSize);
hBlurBindGroups = [];
vBlurBindGroups = [];
// The first pyramid's level 1 texture is the input texture blurred.
// The subsequent levels of the pyramid are the preceding level blurred.
let srcView = inputs.highPass.createView();
for (let i = 0; i < pyramidHeight; i++) {
const hBlurPyramidView = makePyramidLevelView(hBlurPyramid, i);
const vBlurPyramidView = makePyramidLevelView(vBlurPyramid, i);
hBlurBindGroups[i] = makeBindGroup(device, blurPipeline, 0, [hBlurBuffer, linearSampler, srcView, hBlurPyramidView]);
vBlurBindGroups[i] = makeBindGroup(device, blurPipeline, 0, [vBlurBuffer, linearSampler, hBlurPyramidView, vBlurPyramidView]);
srcView = hBlurPyramidView;
}
combineBindGroup = makeBindGroup(device, combinePipeline, 0, [combineBuffer, linearSampler, ...makePyramidViews(vBlurPyramid), output.createView()]);
return {
...inputs,
bloom: output,
};
};
const run = (encoder, shouldRender) => {
if (!shouldRender) {
return;
}
const computePass = encoder.beginComputePass();
computePass.setPipeline(blurPipeline);
for (let i = 0; i < pyramidHeight; i++) {
const dispatchSize = [Math.ceil(Math.floor(scaledScreenSize[0] * 2 ** -i) / 32), Math.floor(Math.floor(scaledScreenSize[1] * 2 ** -i)), 1];
computePass.setBindGroup(0, hBlurBindGroups[i]);
computePass.dispatchWorkgroups(...dispatchSize);
computePass.setBindGroup(0, vBlurBindGroups[i]);
computePass.dispatchWorkgroups(...dispatchSize);
}
computePass.setPipeline(combinePipeline);
computePass.setBindGroup(0, combineBindGroup);
computePass.dispatchWorkgroups(Math.ceil(scaledScreenSize[0] / 32), scaledScreenSize[1], 1);
computePass.end();
};
return makePass("Bloom", loaded, build, run);
};

View File

@@ -1,66 +0,0 @@
import { loadShader, makeBindGroup, makePass } from "./utils.js";
// Eventually, WebGPU will allow the output of the final pass in the pipeline to be copied to the canvas texture.
// Until then, this render pass does the job.
const numVerticesPerQuad = 2 * 3;
export default ({ device, canvasFormat, canvasContext }) => {
const nearestSampler = device.createSampler();
const renderPassConfig = {
colorAttachments: [
{
// view: null,
loadOp: "clear",
storeOp: "store",
},
],
};
let renderPipeline;
let renderBindGroup;
const assets = [loadShader(device, "shaders/wgsl/endPass.wgsl")];
const loaded = (async () => {
const [imageShader] = await Promise.all(assets);
renderPipeline = await device.createRenderPipelineAsync({
layout: "auto",
vertex: {
module: imageShader.module,
entryPoint: "vertMain",
},
fragment: {
module: imageShader.module,
entryPoint: "fragMain",
targets: [
{
format: canvasFormat,
},
],
},
});
})();
const build = (size, inputs) => {
renderBindGroup = makeBindGroup(device, renderPipeline, 0, [nearestSampler, inputs.primary.createView()]);
return null;
};
const run = (encoder, shouldRender) => {
if (!shouldRender) {
return;
}
renderPassConfig.colorAttachments[0].view = canvasContext.getCurrentTexture().createView();
const renderPass = encoder.beginRenderPass(renderPassConfig);
renderPass.setPipeline(renderPipeline);
renderPass.setBindGroup(0, renderBindGroup);
renderPass.draw(numVerticesPerQuad, 1, 0, 0);
renderPass.end();
};
return makePass("End", loaded, build, run);
};

View File

@@ -1,69 +0,0 @@
import { structs } from "../../lib/gpu-buffer.js";
import { makeComputeTarget, makeUniformBuffer, loadTexture, loadShader, makeBindGroup, makePass } from "./utils.js";
// Multiplies the rendered rain and bloom by a loaded in image
const defaultBGURL = "https://upload.wikimedia.org/wikipedia/commons/thumb/0/0a/Flammarion_Colored.jpg/917px-Flammarion_Colored.jpg";
export default ({ config, device }) => {
const bgURL = "bgURL" in config ? config.bgURL : defaultBGURL;
const assets = [loadTexture(device, bgURL), loadShader(device, "shaders/wgsl/imagePass.wgsl")];
const linearSampler = device.createSampler({
magFilter: "linear",
minFilter: "linear",
});
let computePipeline;
let configBuffer;
let output;
let screenSize;
let backgroundTex;
let computeBindGroup;
const loaded = (async () => {
const [bgTex, imageShader] = await Promise.all(assets);
backgroundTex = bgTex;
computePipeline = await device.createComputePipelineAsync({
layout: "auto",
compute: {
module: imageShader.module,
entryPoint: "computeMain",
},
});
const configUniforms = structs.from(imageShader.code).Config;
configBuffer = makeUniformBuffer(device, configUniforms, { unused: 0 });
})();
const build = (size, inputs) => {
output?.destroy();
output = makeComputeTarget(device, size);
screenSize = size;
computeBindGroup = makeBindGroup(device, computePipeline, 0, [
configBuffer,
linearSampler,
inputs.primary.createView(),
inputs.bloom.createView(),
backgroundTex.createView(),
output.createView(),
]);
return { primary: output };
};
const run = (encoder, shouldRender) => {
if (!shouldRender) {
return;
}
const computePass = encoder.beginComputePass();
computePass.setPipeline(computePipeline);
computePass.setBindGroup(0, computeBindGroup);
computePass.dispatchWorkgroups(Math.ceil(screenSize[0] / 32), screenSize[1], 1);
computePass.end();
};
return makePass("Image", loaded, build, run);
};

View File

@@ -1,146 +0,0 @@
import { structs } from "../../lib/gpu-buffer.js";
import { makeUniformBuffer, makePipeline } from "./utils.js";
import makeRain from "./rainPass.js";
import makeBloomPass from "./bloomPass.js";
import makePalettePass from "./palettePass.js";
import makeStripePass from "./stripePass.js";
import makeImagePass from "./imagePass.js";
import makeMirrorPass from "./mirrorPass.js";
import makeEndPass from "./endPass.js";
import { setupCamera, cameraCanvas, cameraAspectRatio, cameraSize } from "../camera.js";
const loadJS = (src) =>
new Promise((resolve, reject) => {
const tag = document.createElement("script");
tag.onload = resolve;
tag.onerror = reject;
tag.src = src;
document.body.appendChild(tag);
});
const effects = {
none: null,
plain: makePalettePass,
palette: makePalettePass,
customStripes: makeStripePass,
stripes: makeStripePass,
pride: makeStripePass,
transPride: makeStripePass,
trans: makeStripePass,
image: makeImagePass,
mirror: makeMirrorPass,
};
export default async (canvas, config) => {
await loadJS("lib/gl-matrix.js");
if (document.fullscreenEnabled || document.webkitFullscreenEnabled) {
window.ondblclick = () => {
if (document.fullscreenElement == null) {
if (canvas.webkitRequestFullscreen != null) {
canvas.webkitRequestFullscreen();
} else {
canvas.requestFullscreen();
}
} else {
document.exitFullscreen();
}
};
}
if (config.useCamera) {
await setupCamera();
}
const canvasFormat = navigator.gpu.getPreferredCanvasFormat();
const adapter = await navigator.gpu.requestAdapter();
const device = await adapter.requestDevice();
const canvasContext = canvas.getContext("webgpu");
// console.table(device.limits);
canvasContext.configure({
device,
format: canvasFormat,
alphaMode: "opaque",
usage:
// GPUTextureUsage.STORAGE_BINDING |
GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_DST,
});
const timeUniforms = structs.from(`struct Time { seconds : f32, frames : i32, };`).Time;
const timeBuffer = makeUniformBuffer(device, timeUniforms);
const cameraTex = device.createTexture({
size: cameraSize,
format: "rgba8unorm",
usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
});
const context = {
config,
adapter,
device,
canvasContext,
timeBuffer,
canvasFormat,
cameraTex,
cameraAspectRatio,
cameraSize,
};
const effectName = config.effect in effects ? config.effect : "palette";
const pipeline = await makePipeline(context, [makeRain, makeBloomPass, effects[effectName], makeEndPass]);
const targetFrameTimeMilliseconds = 1000 / config.fps;
let frames = 0;
let start = NaN;
let last = NaN;
let outputs;
const renderLoop = (now) => {
if (isNaN(start)) {
start = now;
}
if (isNaN(last)) {
last = start;
}
const shouldRender = config.fps >= 60 || now - last >= targetFrameTimeMilliseconds || config.once;
if (shouldRender) {
while (now - targetFrameTimeMilliseconds > last) {
last += targetFrameTimeMilliseconds;
}
}
const devicePixelRatio = window.devicePixelRatio ?? 1;
const canvasWidth = Math.ceil(canvas.clientWidth * devicePixelRatio * config.resolution);
const canvasHeight = Math.ceil(canvas.clientHeight * devicePixelRatio * config.resolution);
const canvasSize = [canvasWidth, canvasHeight];
if (canvas.width !== canvasWidth || canvas.height !== canvasHeight) {
canvas.width = canvasWidth;
canvas.height = canvasHeight;
outputs = pipeline.build(canvasSize);
}
if (config.useCamera) {
device.queue.copyExternalImageToTexture({ source: cameraCanvas }, { texture: cameraTex }, cameraSize);
}
device.queue.writeBuffer(timeBuffer, 0, timeUniforms.toBuffer({ seconds: (now - start) / 1000, frames }));
frames++;
const encoder = device.createCommandEncoder();
pipeline.run(encoder, shouldRender);
// Eventually, when WebGPU allows it, we'll remove the endPass and just copy from our pipeline's output to the canvas texture.
// encoder.copyTextureToTexture({ texture: outputs?.primary }, { texture: canvasContext.getCurrentTexture() }, canvasSize);
device.queue.submit([encoder.finish()]);
if (!config.once) {
requestAnimationFrame(renderLoop);
}
};
requestAnimationFrame(renderLoop);
};

View File

@@ -1,105 +0,0 @@
import { structs } from "../../lib/gpu-buffer.js";
import { makeComputeTarget, makeUniformBuffer, loadShader, makeBindGroup, makePass } from "./utils.js";
let start;
const numTouches = 5;
const touches = Array(numTouches)
.fill()
.map((_) => [0, 0, -Infinity, 0]);
let aspectRatio = 1;
let index = 0;
let touchesChanged = true;
window.onclick = (e) => {
touches[index][0] = 0 + e.clientX / e.srcElement.clientWidth;
touches[index][1] = 1 - e.clientY / e.srcElement.clientHeight;
touches[index][2] = (Date.now() - start) / 1000;
index = (index + 1) % numTouches;
touchesChanged = true;
};
export default ({ config, device, cameraTex, cameraAspectRatio, timeBuffer }) => {
const assets = [loadShader(device, "shaders/wgsl/mirrorPass.wgsl")];
const linearSampler = device.createSampler({
magFilter: "linear",
minFilter: "linear",
});
let computePipeline;
let configBuffer;
let sceneUniforms;
let sceneBuffer;
let touchUniforms;
let touchBuffer;
let output;
let screenSize;
let computeBindGroup;
const loaded = (async () => {
const [mirrorShader] = await Promise.all(assets);
computePipeline = await device.createComputePipelineAsync({
layout: "auto",
compute: {
module: mirrorShader.module,
entryPoint: "computeMain",
},
});
const mirrorShaderUniforms = structs.from(mirrorShader.code);
const configUniforms = mirrorShaderUniforms.Config;
configBuffer = makeUniformBuffer(device, configUniforms, { unused: 0 });
sceneUniforms = mirrorShaderUniforms.Scene;
sceneBuffer = makeUniformBuffer(device, sceneUniforms);
touchUniforms = mirrorShaderUniforms.Touches;
touchBuffer = makeUniformBuffer(device, touchUniforms);
})();
const build = (size, inputs) => {
output?.destroy();
output = makeComputeTarget(device, size);
screenSize = size;
aspectRatio = size[0] / size[1];
computeBindGroup = makeBindGroup(device, computePipeline, 0, [
configBuffer,
timeBuffer,
sceneBuffer,
touchBuffer,
linearSampler,
inputs.primary.createView(),
inputs.bloom.createView(),
cameraTex.createView(),
output.createView(),
]);
const screenAspectRatio = size[0] / size[1];
device.queue.writeBuffer(sceneBuffer, 0, sceneUniforms.toBuffer({ screenAspectRatio, cameraAspectRatio }));
return { primary: output };
};
const run = (encoder, shouldRender) => {
if (!shouldRender) {
return;
}
if (touchesChanged) {
touchesChanged = false;
device.queue.writeBuffer(touchBuffer, 0, touchUniforms.toBuffer({ touches }));
}
const computePass = encoder.beginComputePass();
computePass.setPipeline(computePipeline);
computePass.setBindGroup(0, computeBindGroup);
computePass.dispatchWorkgroups(Math.ceil(screenSize[0] / 32), screenSize[1], 1);
computePass.end();
};
start = Date.now();
return makePass("Mirror", loaded, build, run);
};

View File

@@ -1,140 +0,0 @@
import colorToRGB from "../colorToRGB.js";
import { structs } from "../../lib/gpu-buffer.js";
import { loadShader, makeUniformBuffer, makeBindGroup, makeComputeTarget, makePass } from "./utils.js";
// Maps the brightness of the rendered rain and bloom to colors
// in a linear gradient buffer generated from the passed-in color sequence
// This shader introduces noise into the renders, to avoid banding
const makePalette = (device, paletteUniforms, entries) => {
const PALETTE_SIZE = 512;
const paletteColors = Array(PALETTE_SIZE);
// Convert HSL gradient into sorted RGB gradient, capping the ends
const sortedEntries = entries
.slice()
.sort((e1, e2) => e1.at - e2.at)
.map((entry) => ({
rgb: colorToRGB(entry.color),
arrayIndex: Math.floor(Math.max(Math.min(1, entry.at), 0) * (PALETTE_SIZE - 1)),
}));
sortedEntries.unshift({ rgb: sortedEntries[0].rgb, arrayIndex: 0 });
sortedEntries.push({
rgb: sortedEntries[sortedEntries.length - 1].rgb,
arrayIndex: PALETTE_SIZE - 1,
});
// Interpolate between the sorted RGB entries to generate
// the palette texture data
sortedEntries.forEach((entry, index) => {
paletteColors[entry.arrayIndex] = entry.rgb.slice();
if (index + 1 < sortedEntries.length) {
const nextEntry = sortedEntries[index + 1];
const diff = nextEntry.arrayIndex - entry.arrayIndex;
for (let i = 0; i < diff; i++) {
const ratio = i / diff;
paletteColors[entry.arrayIndex + i] = [
entry.rgb[0] * (1 - ratio) + nextEntry.rgb[0] * ratio,
entry.rgb[1] * (1 - ratio) + nextEntry.rgb[1] * ratio,
entry.rgb[2] * (1 - ratio) + nextEntry.rgb[2] * ratio,
];
}
}
});
// TODO: try using gpu-uniforms
const paletteBuffer = device.createBuffer({
size: (3 + 1) * PALETTE_SIZE * Float32Array.BYTES_PER_ELEMENT,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
mappedAtCreation: true,
});
const view = new Float32Array(paletteBuffer.getMappedRange());
for (let i = 0; i < paletteColors.length; i++) {
view.set(paletteColors[i], (3 + 1) * i);
}
paletteBuffer.unmap();
return paletteBuffer;
};
// The rendered texture's values are mapped to colors in a palette texture.
// A little noise is introduced, to hide the banding that appears
// in subtle gradients. The noise is also time-driven, so its grain
// won't persist across subsequent frames. This is a safe trick
// in screen space.
export default ({ config, device, timeBuffer }) => {
const linearSampler = device.createSampler({
magFilter: "linear",
minFilter: "linear",
});
let computePipeline;
let configBuffer;
let paletteBuffer;
let computeBindGroup;
let output;
let screenSize;
const assets = [loadShader(device, "shaders/wgsl/palettePass.wgsl")];
const loaded = (async () => {
const [paletteShader] = await Promise.all(assets);
computePipeline = await device.createComputePipelineAsync({
layout: "auto",
compute: {
module: paletteShader.module,
entryPoint: "computeMain",
},
});
const paletteShaderUniforms = structs.from(paletteShader.code);
const configUniforms = paletteShaderUniforms.Config;
configBuffer = makeUniformBuffer(device, configUniforms, {
ditherMagnitude: config.ditherMagnitude,
backgroundColor: colorToRGB(config.backgroundColor),
cursorColor: colorToRGB(config.cursorColor),
glintColor: colorToRGB(config.glintColor),
cursorIntensity: config.cursorIntensity,
glintIntensity: config.glintIntensity,
});
const paletteUniforms = paletteShaderUniforms.Palette;
paletteBuffer = makePalette(device, paletteUniforms, config.palette);
})();
const build = (size, inputs) => {
output?.destroy();
output = makeComputeTarget(device, size);
screenSize = size;
computeBindGroup = makeBindGroup(device, computePipeline, 0, [
configBuffer,
paletteBuffer,
timeBuffer,
linearSampler,
inputs.primary.createView(),
inputs.bloom.createView(),
output.createView(),
]);
return { primary: output };
};
const run = (encoder, shouldRender) => {
if (!shouldRender) {
return;
}
const computePass = encoder.beginComputePass();
computePass.setPipeline(computePipeline);
computePass.setBindGroup(0, computeBindGroup);
computePass.dispatchWorkgroups(Math.ceil(screenSize[0] / 32), screenSize[1], 1);
computePass.end();
};
return makePass("Palette", loaded, build, run);
};

View File

@@ -1,237 +0,0 @@
import { structs } from "../../lib/gpu-buffer.js";
import { makeRenderTarget, loadTexture, loadShader, makeUniformBuffer, makeBindGroup, makePass } from "./utils.js";
const rippleTypes = {
box: 0,
circle: 1,
};
const numVerticesPerQuad = 2 * 3;
const makeConfigBuffer = (device, configUniforms, config, density, gridSize) => {
const configData = {
...config,
gridSize,
density,
showDebugView: config.effect === "none",
rippleType: config.rippleTypeName in rippleTypes ? rippleTypes[config.rippleTypeName] : -1,
slantScale: 1 / (Math.abs(Math.sin(2 * config.slant)) * (Math.sqrt(2) - 1) + 1),
slantVec: [Math.cos(config.slant), Math.sin(config.slant)],
msdfPxRange: 4,
};
// console.table(configData);
return makeUniformBuffer(device, configUniforms, configData);
};
export default ({ config, device, timeBuffer }) => {
const { mat4, vec3 } = glMatrix;
const assets = [
loadTexture(device, config.glyphMSDFURL),
loadTexture(device, config.glintMSDFURL),
loadTexture(device, config.baseTextureURL, false, true),
loadTexture(device, config.glintTextureURL, false, true),
loadShader(device, "shaders/wgsl/rainPass.wgsl"),
];
// The volumetric mode multiplies the number of columns
// to reach the desired density, and then overlaps them
const density = config.volumetric && config.effect !== "none" ? config.density : 1;
const gridSize = [Math.floor(config.numColumns * density), config.numColumns];
const numCells = gridSize[0] * gridSize[1];
// The volumetric mode requires us to create a grid of quads,
// rather than a single quad for our geometry
const numQuads = config.volumetric ? numCells : 1;
const transform = mat4.create();
if (config.volumetric && config.isometric) {
mat4.rotateX(transform, transform, (Math.PI * 1) / 8);
mat4.rotateY(transform, transform, (Math.PI * 1) / 4);
mat4.translate(transform, transform, vec3.fromValues(0, 0, -1));
mat4.scale(transform, transform, vec3.fromValues(1, 1, 2));
} else {
mat4.translate(transform, transform, vec3.fromValues(0, 0, -1));
}
const camera = mat4.create();
// TODO: vantage points, multiple renders
// We use the different channels for different parts of the raindrop
const renderFormat = "rgba8unorm";
const linearSampler = device.createSampler({
magFilter: "linear",
minFilter: "linear",
});
const renderPassConfig = {
colorAttachments: [
{
// view: null,
loadOp: "clear",
storeOp: "store",
},
{
// view: null,
loadOp: "clear",
storeOp: "store",
},
],
};
let configBuffer;
let sceneUniforms;
let sceneBuffer;
let introPipeline;
let computePipeline;
let renderPipeline;
let introBindGroup;
let computeBindGroup;
let renderBindGroup;
let output;
let highPassOutput;
const loaded = (async () => {
const [glyphMSDFTexture, glintMSDFTexture, baseTexture, glintTexture, rainShader] = await Promise.all(assets);
const rainShaderUniforms = structs.from(rainShader.code);
configBuffer = makeConfigBuffer(device, rainShaderUniforms.Config, config, density, gridSize);
const introCellsBuffer = device.createBuffer({
size: gridSize[0] * rainShaderUniforms.IntroCell.minSize,
usage: GPUBufferUsage.STORAGE,
});
const cellsBuffer = device.createBuffer({
size: numCells * rainShaderUniforms.Cell.minSize,
usage: GPUBufferUsage.STORAGE,
});
sceneUniforms = rainShaderUniforms.Scene;
sceneBuffer = makeUniformBuffer(device, sceneUniforms);
const additiveBlendComponent = {
operation: "add",
srcFactor: "one",
dstFactor: "one",
};
[introPipeline, computePipeline, renderPipeline] = await Promise.all([
device.createComputePipelineAsync({
layout: "auto",
compute: {
module: rainShader.module,
entryPoint: "computeIntro",
},
}),
device.createComputePipelineAsync({
layout: "auto",
compute: {
module: rainShader.module,
entryPoint: "computeMain",
},
}),
device.createRenderPipelineAsync({
layout: "auto",
vertex: {
module: rainShader.module,
entryPoint: "vertMain",
},
fragment: {
module: rainShader.module,
entryPoint: "fragMain",
targets: [
{
format: renderFormat,
blend: {
color: additiveBlendComponent,
alpha: additiveBlendComponent,
},
},
{
format: renderFormat,
blend: {
color: additiveBlendComponent,
alpha: additiveBlendComponent,
},
},
],
},
}),
]);
introBindGroup = makeBindGroup(device, introPipeline, 0, [configBuffer, timeBuffer, introCellsBuffer]);
computeBindGroup = makeBindGroup(device, computePipeline, 0, [configBuffer, timeBuffer, cellsBuffer, introCellsBuffer]);
renderBindGroup = makeBindGroup(device, renderPipeline, 0, [
configBuffer,
timeBuffer,
sceneBuffer,
linearSampler,
glyphMSDFTexture.createView(),
glintMSDFTexture.createView(),
baseTexture.createView(),
glintTexture.createView(),
cellsBuffer,
]);
})();
const build = (size) => {
// Update scene buffer: camera and transform math for the volumetric mode
const aspectRatio = size[0] / size[1];
if (config.volumetric && config.isometric) {
if (aspectRatio > 1) {
mat4.orthoZO(camera, -1.5 * aspectRatio, 1.5 * aspectRatio, -1.5, 1.5, -1000, 1000);
} else {
mat4.orthoZO(camera, -1.5, 1.5, -1.5 / aspectRatio, 1.5 / aspectRatio, -1000, 1000);
}
} else {
mat4.perspectiveZO(camera, (Math.PI / 180) * 90, aspectRatio, 0.0001, 1000);
}
const screenSize = aspectRatio > 1 ? [1, aspectRatio] : [1 / aspectRatio, 1];
device.queue.writeBuffer(sceneBuffer, 0, sceneUniforms.toBuffer({ screenSize, camera, transform }));
// Update
output?.destroy();
output = makeRenderTarget(device, size, renderFormat);
highPassOutput?.destroy();
highPassOutput = makeRenderTarget(device, size, renderFormat);
return {
primary: output,
highPass: highPassOutput,
};
};
const run = (encoder, shouldRender) => {
// We render the code into an Target using MSDFs: https://github.com/Chlumsky/msdfgen
const introPass = encoder.beginComputePass();
introPass.setPipeline(introPipeline);
introPass.setBindGroup(0, introBindGroup);
introPass.dispatchWorkgroups(Math.ceil(gridSize[0] / 32), 1, 1);
introPass.end();
const computePass = encoder.beginComputePass();
computePass.setPipeline(computePipeline);
computePass.setBindGroup(0, computeBindGroup);
computePass.dispatchWorkgroups(Math.ceil(gridSize[0] / 32), gridSize[1], 1);
computePass.end();
if (shouldRender) {
renderPassConfig.colorAttachments[0].view = output.createView();
renderPassConfig.colorAttachments[1].view = highPassOutput.createView();
const renderPass = encoder.beginRenderPass(renderPassConfig);
renderPass.setPipeline(renderPipeline);
renderPass.setBindGroup(0, renderBindGroup);
renderPass.draw(numVerticesPerQuad * numQuads, 1, 0, 0);
renderPass.end();
}
};
return makePass("Rain", loaded, build, run);
};

View File

@@ -1,118 +0,0 @@
import colorToRGB from "../colorToRGB.js";
import { structs } from "../../lib/gpu-buffer.js";
import { loadShader, make1DTexture, makeUniformBuffer, makeBindGroup, makeComputeTarget, makePass } from "./utils.js";
// Multiplies the rendered rain and bloom by a 1D gradient texture
// generated from the passed-in color sequence
// This shader introduces noise into the renders, to avoid banding
const transPrideStripeColors = [
{ space: "rgb", values: [0.36, 0.81, 0.98] },
{ space: "rgb", values: [0.96, 0.66, 0.72] },
{ space: "rgb", values: [1.0, 1.0, 1.0] },
{ space: "rgb", values: [0.96, 0.66, 0.72] },
{ space: "rgb", values: [0.36, 0.81, 0.98] },
]
.map((color) => Array(3).fill(color))
.flat(1);
const prideStripeColors = [
{ space: "rgb", values: [0.89, 0.01, 0.01] },
{ space: "rgb", values: [1.0, 0.55, 0.0] },
{ space: "rgb", values: [1.0, 0.93, 0.0] },
{ space: "rgb", values: [0.0, 0.5, 0.15] },
{ space: "rgb", values: [0.0, 0.3, 1.0] },
{ space: "rgb", values: [0.46, 0.03, 0.53] },
]
.map((color) => Array(2).fill(color))
.flat(1);
const numVerticesPerQuad = 2 * 3;
// The rendered texture's values are mapped to colors in a palette texture.
// A little noise is introduced, to hide the banding that appears
// in subtle gradients. The noise is also time-driven, so its grain
// won't persist across subsequent frames. This is a safe trick
// in screen space.
export default ({ config, device, timeBuffer }) => {
// Expand and convert stripe colors into 1D texture data
const stripeColors = "stripeColors" in config ? config.stripeColors : config.effect === "pride" ? prideStripeColors : transPrideStripeColors;
const stripeTex = make1DTexture(
device,
stripeColors.map((color) => [...colorToRGB(color), 1])
);
const linearSampler = device.createSampler({
magFilter: "linear",
minFilter: "linear",
});
let computePipeline;
let configBuffer;
let tex;
let bloomTex;
let output;
let screenSize;
const assets = [loadShader(device, "shaders/wgsl/stripePass.wgsl")];
const loaded = (async () => {
const [stripeShader] = await Promise.all(assets);
computePipeline = await device.createComputePipelineAsync({
layout: "auto",
compute: {
module: stripeShader.module,
entryPoint: "computeMain",
},
});
const configUniforms = structs.from(stripeShader.code).Config;
configBuffer = makeUniformBuffer(device, configUniforms, {
ditherMagnitude: config.ditherMagnitude,
backgroundColor: colorToRGB(config.backgroundColor),
cursorColor: colorToRGB(config.cursorColor),
glintColor: colorToRGB(config.glintColor),
cursorIntensity: config.cursorIntensity,
glintIntensity: config.glintIntensity,
});
})();
const build = (size, inputs) => {
output?.destroy();
output = makeComputeTarget(device, size);
screenSize = size;
tex = inputs.primary;
bloomTex = inputs.bloom;
return {
primary: output,
};
};
const run = (encoder, shouldRender) => {
if (!shouldRender) {
return;
}
const computePass = encoder.beginComputePass();
computePass.setPipeline(computePipeline);
const computeBindGroup = makeBindGroup(device, computePipeline, 0, [
configBuffer,
timeBuffer,
linearSampler,
tex.createView(),
bloomTex.createView(),
stripeTex.createView(),
output.createView(),
]);
computePass.setBindGroup(0, computeBindGroup);
computePass.dispatchWorkgroups(Math.ceil(screenSize[0] / 32), screenSize[1], 1);
computePass.end();
};
return makePass("Stripe", loaded, build, run);
};

View File

@@ -1,108 +0,0 @@
const loadTexture = async (device, url) => {
if (url == null) {
return device.createTexture({
size: [1, 1, 1],
format: "rgba8unorm",
usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
});
}
const response = await fetch(url);
const data = await response.blob();
const source = await createImageBitmap(data);
const size = [source.width, source.height, 1];
const texture = device.createTexture({
size,
format: "rgba8unorm",
usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
});
device.queue.copyExternalImageToTexture({ source, flipY: true }, { texture }, size);
return texture;
};
const makeRenderTarget = (device, size, format, mipLevelCount = 1) =>
device.createTexture({
size: [...size, 1],
mipLevelCount,
format,
usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
});
const makeComputeTarget = (device, size, mipLevelCount = 1) =>
device.createTexture({
size: [...size, 1],
mipLevelCount,
format: "rgba8unorm",
usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST | GPUTextureUsage.STORAGE_BINDING,
});
const loadShader = async (device, url) => {
const response = await fetch(url);
const code = await response.text();
return {
code,
module: device.createShaderModule({ code }),
};
};
const makeUniformBuffer = (device, uniforms, data = null) => {
const buffer = device.createBuffer({
size: uniforms.minSize,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
mappedAtCreation: data != null,
});
if (data != null) {
uniforms.toBuffer(data, buffer.getMappedRange());
buffer.unmap();
}
return buffer;
};
const make1DTexture = (device, rgbas) => {
const size = [rgbas.length];
const texture = device.createTexture({
size,
// dimension: "1d",
format: "rgba8unorm",
usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_DST,
});
const data = new Uint8ClampedArray(rgbas.map((color) => color.map((f) => f * 0xff)).flat());
device.queue.writeTexture({ texture }, data, {}, size);
return texture;
};
const makeBindGroup = (device, pipeline, index, entries) =>
device.createBindGroup({
layout: pipeline.getBindGroupLayout(index),
entries: entries
.map((resource) => (resource instanceof GPUBuffer ? { buffer: resource } : resource))
.map((resource, binding) => ({
binding,
resource,
})),
});
const makePass = (name, loaded, build, run) => ({
loaded: loaded ?? Promise.resolve(),
build: build ?? ((size, inputs) => inputs),
run: (encoder, shouldRender) => {
encoder.pushDebugGroup(`Pass "${name}"`);
run?.(encoder, shouldRender);
encoder.popDebugGroup();
},
});
const makePipeline = async (context, steps) => {
steps = steps.filter((f) => f != null).map((f) => f(context));
await Promise.all(steps.map((step) => step.loaded));
return {
steps,
build: (canvasSize) => steps.reduce((outputs, step) => step.build(canvasSize, outputs), null),
run: (encoder, shouldRender) => steps.forEach((step) => step.run(encoder, shouldRender)),
};
};
export { makeRenderTarget, makeComputeTarget, make1DTexture, loadTexture, loadShader, makeUniformBuffer, makePass, makePipeline, makeBindGroup };