WLJS LogoWLJS Notebook

Web IO

Microphone capture

The WebAPI provides access to many peripherals and has the advantage that no special drivers or libraries are needed. In this guide, we capture microphone audio and process it in real time using Wolfram Language.

We will use event system as a main way of piping data from Javascript to Wolfram side.

You need to have WLJS Notebook installed as a desktop application or run it as a Docker container or server script with secure HTTPS connection. Otherwise, your browser will not allow audio capture.

.js const sign = document.createElement("div"); sign.style.color = "gray"; sign.innerText = "Idle"; const audioContext = new (window.AudioContext || window.webkitAudioContext)(); const analyser = audioContext.createAnalyser(); const scriptProcessor = audioContext.createScriptProcessor(2048, 1, 1); let isRunning = false; let mediaStream = null; let inputNode = null; analyser.smoothingTimeConstant = 0.3; analyser.fftSize = 1024; function setStatus(text, color) { sign.innerText = text; sign.style.color = color; } function connectGraph(stream) { mediaStream = stream; inputNode = audioContext.createMediaStreamSource(stream); inputNode.connect(analyser); analyser.connect(scriptProcessor); scriptProcessor.connect(audioContext.destination); scriptProcessor.onaudioprocess = onAudioProcess; } function disconnectGraph() { scriptProcessor.onaudioprocess = null; try { inputNode?.disconnect(); } catch {} try { analyser?.disconnect(); } catch {} try { scriptProcessor?.disconnect(); } catch {} inputNode = null; // Optional: stop mic hardware capture mediaStream?.getTracks?.().forEach((t) => t.stop()); mediaStream = null; } function onAudioProcess() { const data = new Uint8Array(analyser.frequencyBinCount); analyser.getByteTimeDomainData(data); server.kernel.io.fire("audio", Array.from(data)); } async function requestMicrophoneAccess() { try { return await navigator.mediaDevices.getUserMedia({ audio: true }); } catch (err) { alert( "Could not access the microphone. " + "Make sure you're using https and you granted permission." ); return null; } } core.MicStart = async () => { if (isRunning) return; isRunning = true; // Some browsers need this on a user gesture if (audioContext.state === "suspended") { await audioContext.resume(); } const stream = await requestMicrophoneAccess(); if (!stream) { isRunning = false; setStatus("Idle", "gray"); return; } connectGraph(stream); setStatus("Recording...", "red"); }; core.MicStop = async () => { if (!isRunning) return; isRunning = false; disconnectGraph(); setStatus("Stopped", "blue"); }; this.ondestroy = () => { if (!isRunning) return; isRunning = false; disconnectGraph(); }; // Return UI element return sign;

Here we introduce two frontend symbols, MicStart and MicStop, to control the streaming. The rest is boilerplate code for handling the standard WebAudio API. The actual data is sent via

const data = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteTimeDomainData(data);   
server.kernel.io.fire("audio", Array.from(data));

as a list of bytes in integer representation. Unfortunately, the JS->WL binary format is not yet available.

Let's add control buttons:

{Button["Start", MicStart // FrontSubmit], Button["Stop", MicStop // FrontSubmit]} // Row

And finally to handle the incoming data we use EventHandler and hook it to some Graphics primitive:

Module[{line = {}}, EventHandler["audio", Function[data, line = Transpose[{Range[data//Length], data}]; ]]; Graphics[ Line[line // Offload], PlotRange -> {{0,512}, {127-50, 127+50}}, "TransitionDuration"->30, Axes->True, AspectRatio->1/2 ] ]

Let's add fourier transform as well:

Module[{line = {}, fft={}}, EventHandler["audio", Function[data, line = Transpose[{Range[data//Length], data}]; fft = Drop[Transpose[{Range[128]//N, Log@Take[Fourier[data], 128]//Abs}],1]; ]]; {Graphics[ {Blue, Line[line // Offload]}, PlotRange -> {{0,512}, {127-50, 127+50}}, TransitionDuration->30, Axes->True, AspectRatio->1/2 ], Graphics[ {Red, Line[fft // Offload]}, PlotRange -> {{1,128}, {0,6}}, TransitionDuration->30, Axes->True, AspectRatio->1/2 ]} // Row ]

You can also accumulate the buffer for more samples and perform the computations asynchronously with a timer or similar mechanism.

Canvas

Here we will perform a small experiment with the JavaScript Canvas API. Optimizing drawing operations on a canvas is itself a challenging task. However, we will skip that part and focus on what we have:

  • animated bubbles that fade away with time
  • we provide an XY array of points where bubbles can appear
  • we animate them continuously
  • we define a frontend symbol Spark to update XY array

Let's create Javascript cell and evaluate it:

.js const canvas = document.createElement("canvas"); const CSS_WIDTH = 500; const CSS_HEIGHT = 200; const MAX_PARTICLES = 5000; const SPAWN_PER_FRAME = 10; const AGE_STEP = 0.1; const FADE_START = 4; const FADE_END = 18; const ctx = canvas.getContext("2d"); // HiDPI / DPR let dpr = 1; function resizeCanvas(cssW = CSS_WIDTH, cssH = CSS_HEIGHT) { dpr = Math.max(1, Math.min(window.devicePixelRatio || 1, 3)); // cap optional // Set the displayed size canvas.style.width = Math.round(cssW)+'px'; canvas.style.height = Math.round(cssH)+'px'; // Set the backing-store size canvas.width = Math.floor(cssW * dpr); canvas.height = Math.floor(cssH * dpr); // Draw in CSS pixels ctx.setTransform(dpr, 0, 0, dpr, 0, 0); } resizeCanvas(); const rand = (min, max) => Math.random() * (max - min) + min; const randInt = (min, maxInclusive) => Math.floor(rand(min, maxInclusive + 1)); const clamp = (v, lo, hi) => Math.max(lo, Math.min(hi, v)); function convertRange(value, [a1, a2], [b1, b2]) { if (a1 === a2) return (b1 + b2) / 2; return ((value - a1) * (b2 - b1)) / (a2 - a1) + b1; } function getMinMax(points) { let minX = Infinity, minY = Infinity, maxX = -Infinity, maxY = -Infinity; for (const [x, y] of points) { if (x < minX) minX = x; if (y < minY) minY = y; if (x > maxX) maxX = x; if (y > maxY) maxY = y; } return { minX, minY, maxX, maxY }; } function alphaForAge(age) { if (age <= FADE_START) return 1; if (age >= FADE_END) return 0; const t = (age - FADE_START) / (FADE_END - FADE_START); return (1 - t) * (1 - t); // smooth-ish fade } const rgba = (rgb, a) => 'rgba('+rgb[0]+','+rgb[1]+','+rgb[2]+','+a+')'; const particles = new Array(MAX_PARTICLES); let writeIndex = 0; let liveCount = 0; // Positions are stored in CSS pixels (not device pixels) let data = []; function putParticle(x, y) { const p = particles[writeIndex] ?? (particles[writeIndex] = { x: 0, y: 0, xvel: 0, yvel: 0, rgb: [0, 0, 0], baseAlpha: 0.6, size: 5, age: 1, }); p.x = x; p.y = y; p.xvel = rand(-1, 1); p.yvel = rand(-1, 1); p.rgb[0] = randInt(0, 255); p.rgb[1] = randInt(0, 255); p.rgb[2] = randInt(0, 255); p.baseAlpha = 0.6; p.size = 5; p.age = 1; writeIndex = (writeIndex + 1) % MAX_PARTICLES; if (liveCount < MAX_PARTICLES) liveCount++; } function spawnParticles() { if (!data.length) return; for (let j = 0; j < SPAWN_PER_FRAME; j++) { const idx = randInt(0, data.length - 1); const [x, y] = data[idx]; putParticle(x, y); } } function drawParticles() { // Clear using CSS pixel dimensions ctx.clearRect(0, 0, CSS_WIDTH, CSS_HEIGHT); // Oldest -> newest ordering when full const start = liveCount === MAX_PARTICLES ? writeIndex : 0; for (let i = 0; i < liveCount; i++) { const idx = (start + i) % MAX_PARTICLES; const p = particles[idx]; if (!p) continue; const a = p.baseAlpha * alphaForAge(p.age); if (a <= 0) continue; const radius = p.size / Math.max(p.age, 0.001); ctx.beginPath(); ctx.arc(p.x, p.y, radius, 0, Math.PI * 2); ctx.fillStyle = rgba(p.rgb, a); ctx.fill(); p.age += AGE_STEP; p.x += p.xvel; p.y -= p.yvel; } } let rafId = 0; let destroyed = false; function animate() { if (destroyed) return; const currentDpr = Math.max(1, Math.min(window.devicePixelRatio || 1, 3)); if (currentDpr !== dpr) resizeCanvas(); spawnParticles(); drawParticles(); rafId = window.requestAnimationFrame(animate); } this.ondestroy = () => { destroyed = true; if (rafId) window.cancelAnimationFrame(rafId); }; core.Spark = async (args, env) => { const raw = await interpretate(args[0], env); if (!Array.isArray(raw) || raw.length === 0) { data = []; return; } const { minX, minY, maxX, maxY } = getMinMax(raw); const pad = 50; const x0 = pad, x1 = CSS_WIDTH - pad; const y0 = pad, y1 = CSS_HEIGHT - pad; data = raw.map(([x, y]) => { const mx = convertRange(x, [minX, maxX], [x0, x1]); const my = convertRange(y, [maxY, minY], [y0, y1]); // invert Y return [clamp(mx, x0, x1), clamp(my, y0, y1)]; }); }; // Start animate(); return canvas;

From the evaluation kernel, we need to design a function like Plot that accepts a function, samples it in the provided interval, and pipes the data to the Spark symbol on the frontend:

SparkPlot[func_, range_] := With[{var = Extract[range,1, Inactivate], min = range[[2]], max = range[[3]]}, Table[{var, func}, {var, min, max, (max-min)/200.0}]] // FrontSubmit[Spark[#]] & SetAttributes[SparkPlot, HoldAll]

Here we go:

SparkPlot[Sinc[1.5 x], {x,-10,10}]

On this page