Last active
November 10, 2025 17:51
-
-
Save naps62/2b0fd41593b89f74ad512399ff4b55de to your computer and use it in GitHub Desktop.
Custom react view for a home assistant Webrtc camera
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| export class AudioAnalyser { | |
| context: AudioContext; | |
| analyser: AnalyserNode; | |
| source: MediaStreamAudioSourceNode; | |
| animationFrame?: number; | |
| freqDomainData: Uint8Array<ArrayBuffer>; | |
| setSpectrum: (spectrum: number[]) => void; | |
| previousSpectrum: number[]; | |
| blendingSmoothness: number; | |
| static tryCreate( | |
| stream: MediaStream, | |
| setSpectrum: (spectrum: number[]) => void, | |
| smoothingTimeConstant = 0.7, | |
| blendingSmoothness = 0.6, | |
| ): AudioAnalyser | undefined { | |
| const contextCtor = window.AudioContext || window.webkitAudioContext; | |
| if (!contextCtor) { | |
| console.warn("Web Audio API unavailable: skipping remote audio meter"); | |
| return; | |
| } | |
| return new AudioAnalyser( | |
| new contextCtor(), | |
| stream, | |
| smoothingTimeConstant, | |
| setSpectrum, | |
| blendingSmoothness, | |
| ); | |
| } | |
| private constructor( | |
| context: AudioContext, | |
| stream: MediaStream, | |
| smoothingTimeConstant: number, | |
| setSpectrum: (spectrum: number[]) => void, | |
| blendingSmoothness: number, | |
| ) { | |
| const analyser = context.createAnalyser(); | |
| analyser.fftSize = 512; | |
| analyser.smoothingTimeConstant = smoothingTimeConstant; | |
| const source = context.createMediaStreamSource(stream); | |
| source.connect(analyser); | |
| this.context = context; | |
| this.analyser = analyser; | |
| this.source = source; | |
| this.freqDomainData = new Uint8Array(analyser.frequencyBinCount); | |
| this.setSpectrum = setSpectrum; | |
| this.previousSpectrum = createSpectrumBands(); | |
| this.blendingSmoothness = blendingSmoothness; | |
| this.start(); | |
| } | |
| private start() { | |
| // if it's closed, don't do anything | |
| if (this.context.state === "closed") { | |
| return; | |
| } | |
| // if it's suspended, resume and defer start | |
| if (this.context.state === "suspended") { | |
| this.context | |
| .resume() | |
| .then(() => this.start()) | |
| .catch((err) => { | |
| console.warn("Failed to resume audio context:", err); | |
| this.start(); | |
| }); | |
| return; | |
| } | |
| this.animationFrame = requestAnimationFrame(this.update.bind(this)); | |
| } | |
| public stop() { | |
| if (this.animationFrame !== undefined) { | |
| cancelAnimationFrame(this.animationFrame); | |
| } | |
| this.setSpectrum?.(createSpectrumBands()); | |
| try { | |
| this.source.disconnect(); | |
| } catch {} | |
| try { | |
| this.analyser.disconnect(); | |
| } catch {} | |
| this.context?.close().catch(() => {}); | |
| } | |
| update() { | |
| this.analyser?.getByteFrequencyData(this.freqDomainData); | |
| const spectrum = this.sampleFrequencyBands( | |
| this.freqDomainData, | |
| this.context.sampleRate, | |
| ); | |
| const blended = this.blendSpectrum( | |
| this.previousSpectrum, | |
| spectrum, | |
| this.blendingSmoothness, | |
| ); | |
| this.previousSpectrum = blended; | |
| this.setSpectrum?.(blended); | |
| this.animationFrame = requestAnimationFrame(this.update.bind(this)); | |
| } | |
| private sampleFrequencyBands(data: Uint8Array, sampleRate: number) { | |
| const nyquist = sampleRate / 2; | |
| const binWidth = sampleRate / (data.length * 2); | |
| const samples: number[] = []; | |
| for (const band of AUDIO_METER_BANDS) { | |
| const cappedMax = Math.min(band.max, nyquist); | |
| if (cappedMax <= band.min) { | |
| samples.push(0); | |
| continue; | |
| } | |
| const startBin = Math.max(0, Math.floor(band.min / binWidth)); | |
| const endBin = Math.min(data.length - 1, Math.ceil(cappedMax / binWidth)); | |
| if (endBin < startBin) { | |
| samples.push(0); | |
| continue; | |
| } | |
| let sum = 0; | |
| let count = 0; | |
| for (let i = startBin; i <= endBin; i += 1) { | |
| sum += data[i]; | |
| count += 1; | |
| } | |
| const average = count > 0 ? sum / (count * 255) : 0; | |
| samples.push(Math.min(1, Math.max(0, average))); | |
| } | |
| return samples; | |
| } | |
| private blendSpectrum(prev: number[], next: number[], smoothing: number) { | |
| if (!prev.length) return next; | |
| const length = Math.max(prev.length, next.length); | |
| const blended: number[] = new Array(length); | |
| for (let i = 0; i < length; i += 1) { | |
| const previous = prev[i] ?? 0; | |
| const current = next[i] ?? 0; | |
| blended[i] = previous * smoothing + current * (1 - smoothing); | |
| } | |
| return blended; | |
| } | |
| } | |
| export function createSpectrumBands() { | |
| return new Array(AUDIO_METER_BANDS.length).fill(0); | |
| } | |
| const AUDIO_METER_BANDS = [ | |
| { min: 80, max: 95 }, | |
| { min: 95, max: 110 }, | |
| { min: 110, max: 130 }, | |
| { min: 130, max: 160 }, | |
| { min: 160, max: 190 }, | |
| { min: 190, max: 225 }, | |
| { min: 225, max: 265 }, | |
| { min: 265, max: 315 }, | |
| { min: 315, max: 370 }, | |
| { min: 370, max: 440 }, | |
| { min: 440, max: 520 }, | |
| { min: 520, max: 620 }, | |
| { min: 620, max: 740 }, | |
| { min: 740, max: 880 }, | |
| { min: 880, max: 1050 }, | |
| { min: 1050, max: 1250 }, | |
| { min: 1250, max: 1500 }, | |
| { min: 1500, max: 1800 }, | |
| { min: 1800, max: 2150 }, | |
| { min: 2150, max: 2550 }, | |
| { min: 2550, max: 3050 }, | |
| { min: 3050, max: 3650 }, | |
| { min: 3650, max: 4350 }, | |
| { min: 4350, max: 5200 }, | |
| { min: 5200, max: 6200 }, | |
| { min: 6200, max: 7400 }, | |
| { min: 7400, max: 8800 }, | |
| { min: 8800, max: 10500 }, | |
| { min: 10500, max: 12500 }, | |
| { min: 12500, max: 15000 }, | |
| { min: 15000, max: 18000 }, | |
| { min: 18000, max: 20000 }, | |
| ] as const; |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import { Loader2 } from "lucide-react"; | |
| import { | |
| createContext, | |
| type ReactNode, | |
| useContext, | |
| useEffect, | |
| useMemo, | |
| useRef, | |
| useState, | |
| } from "react"; | |
| import { cn } from "@/lib/utils"; | |
| import { useCameraPan } from "./useCameraPan"; | |
| import { usePip } from "./usePip"; | |
| import { useWebrtc } from "./useWebrtc"; | |
| type CameraContextValue = { | |
| mic: { | |
| enabled: boolean; | |
| setEnabled: (enabled: boolean) => void; | |
| supported: boolean; | |
| }; | |
| pip: { | |
| active: boolean; | |
| toggle: () => void; | |
| supported: boolean; | |
| }; | |
| audioSpectrums: { | |
| in: number[]; | |
| out: number[]; | |
| }; | |
| muted: boolean; | |
| setMuted: React.Dispatch<React.SetStateAction<boolean>>; | |
| hasStream: boolean; | |
| audioRequested: boolean; | |
| micRequested: boolean; | |
| pipRequested: boolean; | |
| }; | |
| const CameraContext = createContext<CameraContextValue | null>(null); | |
| export function useCameraContext() { | |
| const context = useContext(CameraContext); | |
| if (!context) { | |
| throw new Error("Camera compound components must be used within Camera"); | |
| } | |
| return context; | |
| } | |
| type Props = { | |
| camera: string; | |
| className?: string; | |
| children?: ReactNode; | |
| audio?: boolean; | |
| mic?: boolean; | |
| pip?: boolean; | |
| panning?: boolean; | |
| }; | |
| function CameraRoot({ | |
| camera, | |
| className, | |
| children, | |
| audio: audioRequested = true, | |
| mic: micRequested = false, | |
| pip: pipRequested = false, | |
| panning: panningRequested = false, | |
| }: Props) { | |
| const { videoRef, stream, mic, audioSpectrums } = useWebrtc({ | |
| camera, | |
| micRequested, | |
| }); | |
| const panSurfaceRef = useRef<HTMLDivElement>(null); | |
| const [muted, setMuted] = useState(true); | |
| const hasStream = Boolean(stream); | |
| const pip = usePip({ | |
| enabled: pipRequested, | |
| videoRef, | |
| }); | |
| useEffect(() => { | |
| if (!videoRef.current) return; | |
| videoRef.current.muted = muted; | |
| }, [muted, videoRef]); | |
| useCameraPan({ | |
| videoRef, | |
| surfaceRef: panSurfaceRef, | |
| enabled: panningRequested, | |
| }); | |
| const contextValue = useMemo( | |
| () => ({ | |
| mic, | |
| pip, | |
| audioSpectrums, | |
| muted, | |
| setMuted, | |
| hasStream, | |
| audioRequested, | |
| micRequested, | |
| pipRequested, | |
| }), | |
| [ | |
| mic, | |
| pip, | |
| audioSpectrums, | |
| muted, | |
| hasStream, | |
| audioRequested, | |
| micRequested, | |
| pipRequested, | |
| ], | |
| ); | |
| return ( | |
| <CameraContext.Provider value={contextValue}> | |
| <div | |
| ref={panSurfaceRef} | |
| className={cn( | |
| "relative flex h-full min-h-0 w-full min-w-0 flex-1 overflow-hidden rounded-md bg-primary", | |
| className, | |
| )} | |
| > | |
| {/** biome-ignore-start lint/a11y/useMediaCaption: no captions available */} | |
| <video | |
| ref={videoRef} | |
| autoPlay | |
| className={cn( | |
| "h-full w-full object-cover transition-opacity duration-200", | |
| hasStream ? "opacity-100" : "opacity-0", | |
| )} | |
| controls={false} | |
| playsInline | |
| /> | |
| {/** biome-ignore-end lint/a11y/useMediaCaption: no captions available */} | |
| {!hasStream && ( | |
| <div className="pointer-events-none absolute inset-0 flex items-center justify-center bg-black/60"> | |
| <Loader2 className="h-8 w-8 animate-spin text-white" /> | |
| </div> | |
| )} | |
| {children} | |
| </div> | |
| </CameraContext.Provider> | |
| ); | |
| } | |
| type OverlayProps = { | |
| children?: ReactNode; | |
| }; | |
| function CameraOverlay({ children }: OverlayProps) { | |
| const { hasStream } = useCameraContext(); | |
| return ( | |
| <div className="pointer-events-none absolute inset-0 flex flex-col justify-end"> | |
| {hasStream && ( | |
| <div className="pointer-events-none relative"> | |
| <div className="h-28" /> | |
| <div | |
| className="pointer-events-auto absolute inset-x-0 bottom-0 flex items-end justify-end gap-4 px-5 pb-4" | |
| data-camera-pan-ignore | |
| > | |
| {children} | |
| </div> | |
| </div> | |
| )} | |
| </div> | |
| ); | |
| } | |
| export const Camera = Object.assign(CameraRoot, { | |
| Overlay: CameraOverlay, | |
| }); | |
| function getTailwindColor(className: string): string { | |
| const tempEl = document.createElement("div"); | |
| tempEl.className = className; | |
| tempEl.style.position = "absolute"; | |
| tempEl.style.visibility = "hidden"; | |
| document.body.appendChild(tempEl); | |
| const color = getComputedStyle(tempEl).backgroundColor; | |
| document.body.removeChild(tempEl); | |
| return color; | |
| } | |
| type AudioMeterProps = { | |
| levels: number[] | (() => number[]); | |
| muted?: boolean; | |
| colorClass?: string; | |
| }; | |
| type AudioButtonWithMeterProps = AudioMeterProps & { | |
| children: ReactNode; | |
| }; | |
| export function AudioButtonWithMeter({ | |
| levels, | |
| muted = false, | |
| colorClass, | |
| children, | |
| }: AudioButtonWithMeterProps) { | |
| return ( | |
| <div className="flex flex-col items-center"> | |
| <AudioMeterCanvas levels={levels} muted={muted} colorClass={colorClass} /> | |
| {children} | |
| </div> | |
| ); | |
| } | |
| function AudioMeterCanvas({ | |
| levels, | |
| muted = false, | |
| colorClass = "bg-white", | |
| }: AudioMeterProps) { | |
| const canvasRef = useRef<HTMLCanvasElement>(null); | |
| const frameRef = useRef<number>(null); | |
| const levelsRef = useRef(levels); | |
| const colorRef = useRef<string>(getTailwindColor(colorClass)); | |
| useEffect(() => { | |
| levelsRef.current = levels; | |
| }); | |
| useEffect(() => { | |
| colorRef.current = getTailwindColor(colorClass); | |
| }, [colorClass]); | |
| useEffect(() => { | |
| const canvas = canvasRef.current; | |
| if (!canvas) return undefined; | |
| const ctx = canvas.getContext("2d"); | |
| if (!ctx) return undefined; | |
| const resizeToDevicePixels = () => { | |
| const rect = canvas.getBoundingClientRect(); | |
| const dpr = window.devicePixelRatio || 1; | |
| const width = Math.max(1, Math.round(rect.width * dpr)); | |
| const height = Math.max(1, Math.round(rect.height * dpr)); | |
| if (canvas.width !== width || canvas.height !== height) { | |
| canvas.width = width; | |
| canvas.height = height; | |
| } | |
| ctx.setTransform(dpr, 0, 0, dpr, 0, 0); | |
| ctx.clearRect(0, 0, rect.width, rect.height); | |
| }; | |
| resizeToDevicePixels(); | |
| window.addEventListener("resize", resizeToDevicePixels); | |
| const render = () => { | |
| const rect = canvas.getBoundingClientRect(); | |
| const dpr = window.devicePixelRatio || 1; | |
| const requiredWidth = Math.max(1, Math.round(rect.width * dpr)); | |
| const requiredHeight = Math.max(1, Math.round(rect.height * dpr)); | |
| if (canvas.width !== requiredWidth || canvas.height !== requiredHeight) { | |
| resizeToDevicePixels(); | |
| } else { | |
| ctx.setTransform(dpr, 0, 0, dpr, 0, 0); | |
| } | |
| const width = rect.width; | |
| const height = rect.height; | |
| ctx.clearRect(0, 0, width, height); | |
| const currentLevels = levelsRef.current; | |
| const levelsData = | |
| typeof currentLevels === "function" ? currentLevels() : currentLevels; | |
| const data = Array.isArray(levelsData) ? levelsData : []; | |
| const barCount = data.length || 1; | |
| data.forEach((value, index) => { | |
| const clamped = Number.isFinite(value) | |
| ? Math.min(1, Math.max(0, value)) | |
| : 0; | |
| const barHeight = Math.max(0, clamped ** 0.6 * height); | |
| const startRatio = index / barCount; | |
| const endRatio = (index + 1) / barCount; | |
| const x = Math.round(startRatio * width); | |
| const nextX = | |
| index === barCount - 1 | |
| ? Math.ceil(width) | |
| : Math.ceil(endRatio * width + 1); | |
| const w = Math.max(1, nextX - x); | |
| const y = height - barHeight; | |
| ctx.fillStyle = colorRef.current; | |
| ctx.fillRect(x, y, w, barHeight); | |
| }); | |
| frameRef.current = requestAnimationFrame(render); | |
| }; | |
| frameRef.current = requestAnimationFrame(render); | |
| return () => { | |
| if (frameRef.current) { | |
| cancelAnimationFrame(frameRef.current); | |
| } | |
| window.removeEventListener("resize", resizeToDevicePixels); | |
| }; | |
| }, []); | |
| return ( | |
| <canvas | |
| ref={canvasRef} | |
| className={cn("h-14 w-14", muted && "opacity-45")} | |
| style={{ display: "block" }} | |
| /> | |
| ); | |
| } |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import { useDevice, useHass } from "@hakit/core"; | |
| import { createFileRoute } from "@tanstack/react-router"; | |
| import { | |
| Key, | |
| Mic, | |
| MicOff, | |
| Phone, | |
| PictureInPicture, | |
| PictureInPicture2, | |
| Volume2, | |
| VolumeX, | |
| } from "lucide-react"; | |
| import type { ComponentProps } from "react"; | |
| import { useCallback, useState } from "react"; | |
| import { Button } from "@/components/ui/button"; | |
| import { | |
| AudioButtonWithMeter, | |
| Camera, | |
| useCameraContext, | |
| } from "@/components/Webrtc"; | |
| import { cn } from "@/lib/utils"; | |
| export const Route = createFileRoute("/_l/dashboards/doorbell")({ | |
| component: Component, | |
| }); | |
| function Component() { | |
| return ( | |
| <div className="flex h-full min-h-0 w-full"> | |
| <Camera | |
| camera="doorbell" | |
| className="h-full min-h-0 w-full min-w-0 rounded-none" | |
| mic | |
| pip | |
| panning | |
| > | |
| <Camera.Overlay> | |
| <DoorbellOverlayContent /> | |
| </Camera.Overlay> | |
| </Camera> | |
| </div> | |
| ); | |
| } | |
| function DoorbellOverlayContent() { | |
| const { callService } = useHass(); | |
| const doorbell = useDevice("binary_sensor.doorbell_door_status"); | |
| const [callActive, setCallActive] = useState(false); | |
| const { | |
| mic, | |
| pip, | |
| audioSpectrums, | |
| muted, | |
| setMuted, | |
| hasStream, | |
| audioRequested, | |
| micRequested, | |
| pipRequested, | |
| } = useCameraContext(); | |
| const deviceId = doorbell?.device_id || undefined; | |
| const triggerDoor = useCallback(() => { | |
| if (!deviceId) return; | |
| callService({ | |
| domain: "dahua", | |
| service: "vto_open_door", | |
| target: { device_id: deviceId }, | |
| serviceData: { door_id: 1 }, | |
| }); | |
| }, [deviceId, callService]); | |
| const answerCall = useCallback(() => { | |
| setMuted(false); // Unmute incoming audio from doorbell | |
| mic.setEnabled(true); // Enable outgoing mic | |
| setCallActive(true); | |
| }, [setMuted, mic]); | |
| const endCall = useCallback(() => { | |
| setMuted(true); // Mute incoming audio from doorbell | |
| mic.setEnabled(false); // Disable outgoing mic | |
| setCallActive(false); // Go back to initial state | |
| }, [setMuted, mic]); | |
| // Stable function references for audio spectrum getters | |
| const getOutSpectrum = useCallback( | |
| () => audioSpectrums.out, | |
| [audioSpectrums], | |
| ); | |
| const getInSpectrum = useCallback(() => audioSpectrums.in, [audioSpectrums]); | |
| return ( | |
| <> | |
| {/* Lock/Door trigger button - always visible */} | |
| <OverlayButton | |
| size="icon-2xl" | |
| className="rounded-md" | |
| onClick={triggerDoor} | |
| > | |
| <Key className="size-6" /> | |
| </OverlayButton> | |
| {/* Camera controls */} | |
| {pipRequested && pip.supported && ( | |
| <Button | |
| type="button" | |
| size="icon-2xl" | |
| className="rounded-md bg-overlay text-overlay-foreground hover:bg-gray-800" | |
| variant={pip.active ? "secondary" : "default"} | |
| onClick={pip.toggle} | |
| title={ | |
| pip.active ? "Exit picture-in-picture" : "Enter picture-in-picture" | |
| } | |
| aria-pressed={pip.active} | |
| aria-label={ | |
| pip.active ? "Exit picture-in-picture" : "Enter picture-in-picture" | |
| } | |
| disabled={!hasStream} | |
| > | |
| {pip.active ? ( | |
| <PictureInPicture2 className="size-6" /> | |
| ) : ( | |
| <PictureInPicture className="size-6" /> | |
| )} | |
| </Button> | |
| )} | |
| {callActive && audioRequested && micRequested && ( | |
| <AudioButtonWithMeter | |
| levels={getOutSpectrum} | |
| muted={!mic.enabled} | |
| colorClass={mic.enabled ? "bg-white" : "bg-overlay"} | |
| > | |
| <Button | |
| type="button" | |
| size="icon-2xl" | |
| className={cn( | |
| "rounded-none rounded-b-md", | |
| mic.enabled | |
| ? "bg-white text-black hover:bg-gray-200" | |
| : "bg-overlay text-overlay-foreground hover:bg-gray-800", | |
| )} | |
| onClick={() => mic.setEnabled(!mic.enabled)} | |
| title={mic.enabled ? "Mute microphone" : "Enable microphone"} | |
| aria-pressed={mic.enabled} | |
| aria-label={mic.enabled ? "Mute microphone" : "Enable microphone"} | |
| disabled={!mic.supported} | |
| > | |
| {mic.enabled ? ( | |
| <Mic className="size-6" /> | |
| ) : ( | |
| <MicOff className="size-6" /> | |
| )} | |
| </Button> | |
| </AudioButtonWithMeter> | |
| )} | |
| {callActive && audioRequested && ( | |
| <AudioButtonWithMeter | |
| levels={getInSpectrum} | |
| muted={muted} | |
| colorClass={!muted ? "bg-white" : "bg-overlay"} | |
| > | |
| <Button | |
| type="button" | |
| size="icon-xl" | |
| className={cn( | |
| "rounded-none rounded-b-md", | |
| !muted | |
| ? "bg-white text-black hover:bg-gray-200" | |
| : "bg-overlay text-overlay-foreground hover:bg-gray-800", | |
| )} | |
| onClick={() => setMuted((current) => !current)} | |
| title={muted ? "Unmute" : "Mute"} | |
| aria-pressed={!muted} | |
| aria-label={muted ? "Unmute audio" : "Mute audio"} | |
| > | |
| {muted ? ( | |
| <VolumeX className="size-6" /> | |
| ) : ( | |
| <Volume2 className="size-6" /> | |
| )} | |
| </Button> | |
| </AudioButtonWithMeter> | |
| )} | |
| {/* Accept call button - only visible when call is not active */} | |
| {!callActive && ( | |
| <OverlayButton | |
| size="icon-2xl" | |
| className="rounded-md bg-green-700 hover:bg-green-800" | |
| onClick={answerCall} | |
| > | |
| <Phone className="size-6" /> | |
| </OverlayButton> | |
| )} | |
| {/* Disconnect call button - only visible when call is active */} | |
| {callActive && ( | |
| <OverlayButton | |
| size="icon-2xl" | |
| className="rounded-md bg-red-700 hover:bg-red-800" | |
| onClick={endCall} | |
| > | |
| <Phone className="size-6" /> | |
| </OverlayButton> | |
| )} | |
| </> | |
| ); | |
| } | |
| type OverlayButtonProps = ComponentProps<typeof Button>; | |
| function OverlayButton({ className, ...props }: OverlayButtonProps) { | |
| return ( | |
| <Button | |
| type="button" | |
| size="icon-xl" | |
| className={cn( | |
| "bg-overlay text-overlay-foreground hover:bg-gray-800", | |
| className, | |
| )} | |
| {...props} | |
| /> | |
| ); | |
| } |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| export { | |
| AudioButtonWithMeter, | |
| Camera, | |
| useCameraContext, | |
| } from "./Camera"; |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import { HASS_HOST } from "@/config"; | |
| const { MODE, VITE_TURN_IP } = import.meta.env; | |
| const iceServers = | |
| MODE === "development" | |
| ? [ | |
| { | |
| urls: `turn:${VITE_TURN_IP}:3478`, | |
| username: "user", | |
| credential: "pass", | |
| }, | |
| ] | |
| : [ | |
| { urls: "stun:stun.l.google.com:19302" }, | |
| { urls: `stun:${HASS_HOST}:3478` }, | |
| ]; | |
| export class WebRTC { | |
| private pc: RTCPeerConnection; | |
| private ws: WebSocket; | |
| private pendingMicTrack?: MediaStreamTrack; | |
| private audioTransceiver?: RTCRtpTransceiver; | |
| private micStatsTimer?: number; | |
| private micConnectionListenerRegistered = false; | |
| constructor(wsUrl: string) { | |
| this.ws = new WebSocket(wsUrl); | |
| this.pc = new RTCPeerConnection({ | |
| iceServers, | |
| bundlePolicy: "max-bundle", | |
| }); | |
| this.setupListeners(); | |
| } | |
| public onIncomingStream(cb: (stream: MediaStream) => void) { | |
| // biome-ignore lint/suspicious/noExplicitAny: not a problem | |
| this.pc.addEventListener("addstream", (ev: any) => cb(ev.stream)); | |
| } | |
| setupListeners() { | |
| this.ws.onopen = () => { | |
| this.createOffer().then((offer) => { | |
| this.ws.send( | |
| JSON.stringify({ type: "webrtc/offer", value: offer.sdp }), | |
| ); | |
| }); | |
| }; | |
| this.ws.onmessage = async (ev) => { | |
| const msg = JSON.parse(ev.data); | |
| switch (msg.type) { | |
| case "webrtc/candidate": | |
| if (!msg.value) { | |
| this.pc.addIceCandidate(null).catch((err) => console.warn(err)); | |
| break; | |
| } | |
| { | |
| const candidateInit: RTCIceCandidateInit = | |
| typeof msg.value === "string" | |
| ? { candidate: msg.value } | |
| : { ...msg.value }; | |
| if ( | |
| candidateInit.sdpMid == null && | |
| candidateInit.sdpMLineIndex == null | |
| ) { | |
| candidateInit.sdpMid = "0"; | |
| } | |
| this.pc | |
| .addIceCandidate(candidateInit) | |
| .catch((err) => console.warn(err)); | |
| } | |
| break; | |
| case "webrtc/answer": | |
| this.pc | |
| .setRemoteDescription({ type: "answer", sdp: msg.value }) | |
| .catch((err) => console.warn(err)); | |
| break; | |
| case "error": | |
| console.error("WebRTC error from server:", msg); | |
| if (msg.value.indexOf("webrtc/offer") < 0) return; | |
| this.pc.close(); | |
| break; | |
| } | |
| }; | |
| this.pc.onconnectionstatechange = () => {}; | |
| this.pc.onicecandidate = (ev) => { | |
| if (this.ws.readyState !== WebSocket.OPEN) return; | |
| this.ws.send( | |
| JSON.stringify({ | |
| type: "webrtc/candidate", | |
| value: ev.candidate ? ev.candidate.toJSON() : null, | |
| }), | |
| ); | |
| }; | |
| } | |
| async createOffer() { | |
| this.pc.addTransceiver("video", { direction: "recvonly" }); | |
| if (!this.audioTransceiver) { | |
| this.audioTransceiver = this.pc.addTransceiver("audio", { | |
| direction: "sendrecv", | |
| }); | |
| this.applyPendingMicTrack(); | |
| } else { | |
| this.applyPendingMicTrack(); | |
| } | |
| const offer = await this.pc.createOffer(); | |
| await this.pc.setLocalDescription(offer); | |
| return offer; | |
| } | |
| public setMicTrack(track?: MediaStreamTrack) { | |
| this.pendingMicTrack = track; | |
| this.applyPendingMicTrack(); | |
| } | |
| private applyPendingMicTrack() { | |
| if (!this.audioTransceiver) { | |
| return; | |
| } | |
| const nextTrack = this.pendingMicTrack ?? null; | |
| this.audioTransceiver.sender | |
| .replaceTrack(nextTrack) | |
| .catch((err) => | |
| console.warn("Failed to attach mic track to WebRTC:", err), | |
| ); | |
| if (nextTrack && !this.micConnectionListenerRegistered) { | |
| this.micConnectionListenerRegistered = true; | |
| this.pc.addEventListener("connectionstatechange", () => { | |
| if ( | |
| this.pc.connectionState === "closed" || | |
| this.pc.connectionState === "failed" || | |
| this.pc.connectionState === "disconnected" | |
| ) { | |
| if (this.micStatsTimer) { | |
| clearInterval(this.micStatsTimer); | |
| this.micStatsTimer = undefined; | |
| } | |
| } | |
| }); | |
| } | |
| } | |
| public close() { | |
| if (this.micStatsTimer) { | |
| clearInterval(this.micStatsTimer); | |
| this.micStatsTimer = undefined; | |
| } | |
| try { | |
| this.ws.close(); | |
| } catch {} | |
| try { | |
| this.pc.close(); | |
| } catch {} | |
| } | |
| } |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import { type RefObject, useEffect } from "react"; | |
| type UseCameraPanOptions = { | |
| videoRef: RefObject<HTMLVideoElement | null>; | |
| surfaceRef?: RefObject<HTMLElement | null>; | |
| enabled?: boolean; | |
| }; | |
| export function useCameraPan({ | |
| videoRef, | |
| surfaceRef, | |
| enabled = false, | |
| }: UseCameraPanOptions) { | |
| useEffect(() => { | |
| if (typeof window === "undefined") { | |
| return; | |
| } | |
| const video = videoRef.current; | |
| const surface = surfaceRef?.current ?? video; | |
| const resetStyles = (_reason: string) => { | |
| const currentVideo = videoRef.current; | |
| if (!currentVideo) return; | |
| currentVideo.style.removeProperty("object-position"); | |
| currentVideo.style.removeProperty("will-change"); | |
| currentVideo.style.removeProperty("transform"); | |
| currentVideo.style.removeProperty("transform-origin"); | |
| const currentSurface = surfaceRef?.current ?? currentVideo; | |
| currentSurface?.style.removeProperty("touch-action"); | |
| }; | |
| if (!enabled) { | |
| resetStyles("disabled"); | |
| return; | |
| } | |
| if (!video || !surface) return; | |
| const clamp = (value: number, min: number, max: number) => | |
| Math.min(max, Math.max(min, value)); | |
| const baseScale = 1; | |
| const smoothing = 0.12; | |
| const current = { x: 0, y: 0 }; | |
| const target = { x: 0, y: 0 }; | |
| let rafId: number | undefined; | |
| const dragState = { | |
| active: false, | |
| pointerId: null as number | null, | |
| startX: 0, | |
| startY: 0, | |
| originX: 0, | |
| originY: 0, | |
| }; | |
| const setTarget = (x: number, y: number) => { | |
| target.x = clamp(x, -1, 1); | |
| target.y = clamp(y, -1, 1); | |
| }; | |
| const shouldIgnoreEvent = (event: PointerEvent) => { | |
| const target = event.target as HTMLElement | null; | |
| return Boolean(target?.closest("[data-camera-pan-ignore]")); | |
| }; | |
| const applyDrag = (event: PointerEvent) => { | |
| const rect = surface.getBoundingClientRect(); | |
| if (rect.width === 0 || rect.height === 0) return; | |
| const deltaX = event.clientX - dragState.startX; | |
| const deltaY = event.clientY - dragState.startY; | |
| const rangeX = rect.width / 2; | |
| const rangeY = rect.height / 2; | |
| const nextX = dragState.originX - deltaX / rangeX; | |
| const nextY = dragState.originY - deltaY / rangeY; | |
| setTarget(nextX, nextY); | |
| }; | |
| const handlePointerMove = (event: PointerEvent) => { | |
| if (event.pointerId === dragState.pointerId) { | |
| applyDrag(event); | |
| } | |
| }; | |
| const stopDragging = () => { | |
| if (dragState.pointerId !== null) { | |
| surface.releasePointerCapture?.(dragState.pointerId); | |
| surface.removeEventListener("pointermove", handlePointerMove); | |
| surface.removeEventListener("pointerup", stopDragging); | |
| surface.removeEventListener("pointercancel", stopDragging); | |
| surface.removeEventListener("pointerleave", stopDragging); | |
| dragState.active = false; | |
| dragState.pointerId = null; | |
| } | |
| }; | |
| const handlePointerDown = (event: PointerEvent) => { | |
| if (shouldIgnoreEvent(event)) { | |
| return; | |
| } | |
| if (event.pointerType !== "mouse") { | |
| event.preventDefault(); | |
| } | |
| // Stop any existing drag first | |
| stopDragging(); | |
| dragState.active = true; | |
| dragState.pointerId = event.pointerId; | |
| dragState.startX = event.clientX; | |
| dragState.startY = event.clientY; | |
| dragState.originX = target.x; | |
| dragState.originY = target.y; | |
| surface.setPointerCapture?.(event.pointerId); | |
| surface.addEventListener("pointermove", handlePointerMove); | |
| surface.addEventListener("pointerup", stopDragging); | |
| surface.addEventListener("pointercancel", stopDragging); | |
| surface.addEventListener("pointerleave", stopDragging); | |
| }; | |
| const animate = () => { | |
| current.x += (target.x - current.x) * smoothing; | |
| current.y += (target.y - current.y) * smoothing; | |
| const positionX = ((current.x + 1) / 2) * 100; | |
| const positionY = ((current.y + 1) / 2) * 100; | |
| if (videoRef.current === video) { | |
| video.style.transform = `scale(${baseScale})`; | |
| video.style.objectPosition = `${positionX}% ${positionY}%`; | |
| } | |
| rafId = window.requestAnimationFrame(animate); | |
| }; | |
| video.style.willChange = "transform, object-position"; | |
| video.style.transformOrigin = "center"; | |
| surface.style.touchAction = "none"; | |
| rafId = window.requestAnimationFrame(animate); | |
| surface.addEventListener("pointerdown", handlePointerDown); | |
| return () => { | |
| if (rafId !== undefined) { | |
| window.cancelAnimationFrame(rafId); | |
| } | |
| stopDragging(); | |
| surface.removeEventListener("pointerdown", handlePointerDown); | |
| resetStyles("cleanup"); | |
| }; | |
| }, [enabled, surfaceRef, videoRef]); | |
| } |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import { | |
| type RefObject, | |
| useCallback, | |
| useEffect, | |
| useMemo, | |
| useRef, | |
| useState, | |
| } from "react"; | |
| const defaultConstraints: MediaTrackConstraints = { | |
| echoCancellation: true, | |
| noiseSuppression: true, | |
| autoGainControl: true, | |
| channelCount: 1, | |
| advanced: [{ voiceIsolation: true }], | |
| }; | |
| type UseMicOptions = { | |
| requested?: boolean; | |
| autoDisableOnBackground?: boolean; | |
| label?: string; | |
| videoRef?: RefObject<HTMLVideoElement | null>; | |
| shouldKeepEnabled?: () => boolean; | |
| }; | |
| export function useMic({ | |
| requested = false, | |
| autoDisableOnBackground = false, | |
| label = "Mic", | |
| videoRef, | |
| shouldKeepEnabled, | |
| }: UseMicOptions = {}) { | |
| const [track, setTrack] = useState<MediaStreamTrack | undefined>(); | |
| const [supported, setSupported] = useState(false); | |
| const [enabled, setEnabled] = useState(false); | |
| const enabledRef = useRef(false); | |
| const autoMutedRef = useRef(false); | |
| useEffect(() => { | |
| if (!requested) { | |
| setTrack(undefined); | |
| setSupported(false); | |
| setEnabled(false); | |
| return; | |
| } | |
| if (!navigator?.mediaDevices?.getUserMedia) { | |
| console.warn("Navigator mediaDevices API unavailable: mic disabled"); | |
| setTrack(undefined); | |
| setSupported(false); | |
| setEnabled(false); | |
| return; | |
| } | |
| let cancelled = false; | |
| let stream: MediaStream | undefined; | |
| navigator.mediaDevices | |
| .getUserMedia({ audio: defaultConstraints }) | |
| .then((mediaStream) => { | |
| if (cancelled) { | |
| mediaStream.getTracks().forEach((mediaTrack) => { | |
| mediaTrack.stop(); | |
| }); | |
| return; | |
| } | |
| const [mediaTrack] = mediaStream.getAudioTracks(); | |
| if (!mediaTrack) return; | |
| stream = mediaStream; | |
| mediaTrack.enabled = false; | |
| setTrack(mediaTrack); | |
| setSupported(true); | |
| setEnabled(false); | |
| }) | |
| .catch((err) => { | |
| if (cancelled) return; | |
| console.warn("Failed to initialize mic for WebRTC:", err); | |
| setTrack(undefined); | |
| setSupported(false); | |
| setEnabled(false); | |
| }); | |
| return () => { | |
| cancelled = true; | |
| stream?.getTracks().forEach((mediaTrack) => { | |
| mediaTrack.stop(); | |
| }); | |
| setTrack(undefined); | |
| setSupported(false); | |
| setEnabled(false); | |
| }; | |
| }, [requested]); | |
| const setEnabledState = useCallback( | |
| (nextEnabled: boolean) => { | |
| if (!track || track.enabled === nextEnabled) return; | |
| track.enabled = nextEnabled; | |
| setEnabled(nextEnabled); | |
| }, | |
| [track], | |
| ); | |
| useEffect(() => { | |
| enabledRef.current = enabled; | |
| }, [enabled]); | |
| useEffect(() => { | |
| if (!autoDisableOnBackground || !requested) { | |
| autoMutedRef.current = false; | |
| return; | |
| } | |
| const keepEnabled = | |
| typeof shouldKeepEnabled === "function" | |
| ? shouldKeepEnabled | |
| : videoRef | |
| ? () => document.pictureInPictureElement === videoRef.current | |
| : () => false; | |
| const disableTemporarily = (reason: string) => { | |
| if (keepEnabled()) return; | |
| if (!enabledRef.current) return; | |
| console.info(`${label} auto-muted: ${reason}`); | |
| autoMutedRef.current = true; | |
| setEnabledState(false); | |
| }; | |
| const restoreIfNeeded = (reason: string) => { | |
| if (!autoMutedRef.current) return; | |
| autoMutedRef.current = false; | |
| console.info(`${label} restored: ${reason}`); | |
| setEnabledState(true); | |
| }; | |
| const handleVisibilityChange = () => { | |
| if (document.visibilityState === "hidden") { | |
| disableTemporarily("document hidden"); | |
| } else if (document.visibilityState === "visible") { | |
| restoreIfNeeded("document visible"); | |
| } | |
| }; | |
| const handleWindowBlur = () => { | |
| disableTemporarily("window blur/tab switch"); | |
| }; | |
| const handleWindowFocus = () => { | |
| restoreIfNeeded("window focus regained"); | |
| }; | |
| document.addEventListener("visibilitychange", handleVisibilityChange); | |
| window.addEventListener("blur", handleWindowBlur); | |
| window.addEventListener("focus", handleWindowFocus); | |
| if (document.visibilityState === "hidden") { | |
| disableTemporarily("document hidden"); | |
| } | |
| return () => { | |
| document.removeEventListener("visibilitychange", handleVisibilityChange); | |
| window.removeEventListener("blur", handleWindowBlur); | |
| window.removeEventListener("focus", handleWindowFocus); | |
| }; | |
| }, [ | |
| autoDisableOnBackground, | |
| label, | |
| requested, | |
| setEnabledState, | |
| shouldKeepEnabled, | |
| videoRef, | |
| ]); | |
| return useMemo( | |
| () => ({ | |
| track, | |
| supported, | |
| enabled, | |
| setEnabled: setEnabledState, | |
| autoMuted: autoMutedRef.current, | |
| }), | |
| [track, supported, enabled, setEnabledState], | |
| ); | |
| } |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import { | |
| type RefObject, | |
| useCallback, | |
| useEffect, | |
| useMemo, | |
| useState, | |
| } from "react"; | |
| type UsePipOptions = { | |
| videoRef: RefObject<HTMLVideoElement | null>; | |
| enabled?: boolean; | |
| }; | |
| export function usePip({ videoRef, enabled = false }: UsePipOptions) { | |
| const [supported, setSupported] = useState(false); | |
| const [active, setActive] = useState(false); | |
| useEffect(() => { | |
| if (!enabled) { | |
| setSupported(false); | |
| setActive(false); | |
| return; | |
| } | |
| const video = videoRef.current; | |
| if (!video) return; | |
| const isSupported = | |
| typeof video.requestPictureInPicture === "function" && | |
| (typeof document.pictureInPictureEnabled === "boolean" | |
| ? document.pictureInPictureEnabled | |
| : true) && | |
| typeof document.exitPictureInPicture === "function"; | |
| setSupported(isSupported); | |
| if (!isSupported) { | |
| setActive(false); | |
| return; | |
| } | |
| setActive(document.pictureInPictureElement === video); | |
| const handleEnter = () => setActive(true); | |
| const handleLeave = () => setActive(false); | |
| video.addEventListener("enterpictureinpicture", handleEnter); | |
| video.addEventListener("leavepictureinpicture", handleLeave); | |
| return () => { | |
| video.removeEventListener("enterpictureinpicture", handleEnter); | |
| video.removeEventListener("leavepictureinpicture", handleLeave); | |
| }; | |
| }, [enabled, videoRef]); | |
| const toggle = useCallback(async () => { | |
| if (!enabled || !supported) return; | |
| const video = videoRef.current; | |
| if (!video) return; | |
| try { | |
| if (document.pictureInPictureElement === video) { | |
| await document.exitPictureInPicture?.(); | |
| } else { | |
| await video.requestPictureInPicture?.(); | |
| } | |
| } catch (error) { | |
| console.warn("Failed to toggle picture-in-picture:", error); | |
| } | |
| }, [enabled, supported, videoRef]); | |
| return useMemo( | |
| () => ({ | |
| supported, | |
| active, | |
| toggle, | |
| }), | |
| [supported, active, toggle], | |
| ); | |
| } |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import { useQuery } from "@tanstack/react-query"; | |
| import { useEffect, useMemo, useRef, useState } from "react"; | |
| import { HASS_HOST } from "@/config"; | |
| import { useConn } from "@/hooks/useConn"; | |
| import { AudioAnalyser, createSpectrumBands } from "./audioAnalyser"; | |
| import { WebRTC } from "./state"; | |
| import { useMic } from "./useMic"; | |
| type UseWebrtcOptions = { | |
| camera: string; | |
| micRequested?: boolean; | |
| }; | |
| export function useWebrtc({ camera, micRequested }: UseWebrtcOptions) { | |
| const [stream, setStream] = useState<MediaStream>(); | |
| const video = useRef<HTMLVideoElement | null>(null); | |
| const webrtc = useRef<WebRTC>(undefined); | |
| const mic = useMic({ | |
| requested: micRequested, | |
| autoDisableOnBackground: true, | |
| label: `WebRTC camera ${camera}`, | |
| videoRef: video, | |
| }); | |
| const latestMicTrack = useRef<MediaStreamTrack | undefined>(undefined); | |
| // audio analysis state - use refs to avoid re-renders on every frame | |
| const incomingAnalyser = useRef<AudioAnalyser>(undefined); | |
| const outgoingAnalyser = useRef<AudioAnalyser>(undefined); | |
| const spectrumInRef = useRef(createSpectrumBands()); | |
| const spectrumOutRef = useRef(createSpectrumBands()); | |
| // fetch webrtc url | |
| const { data: webrtcUrl } = useWebrtcUrl(camera); | |
| // crate webrtc instance (no mic initially, added later) | |
| useEffect(() => { | |
| if (!webrtcUrl) { | |
| setStream(undefined); | |
| webrtc.current?.close(); | |
| webrtc.current = undefined; | |
| return; | |
| } | |
| const instance = new WebRTC(webrtcUrl); | |
| webrtc.current = instance; | |
| setStream(undefined); | |
| if (latestMicTrack.current) { | |
| instance.setMicTrack(latestMicTrack.current); | |
| } | |
| instance.onIncomingStream((incomingStream) => { | |
| setStream(incomingStream); | |
| }); | |
| return () => { | |
| webrtc.current?.close(); | |
| webrtc.current = undefined; | |
| }; | |
| }, [webrtcUrl]); | |
| // add mic track once available | |
| useEffect(() => { | |
| latestMicTrack.current = mic.track ?? undefined; | |
| webrtc.current?.setMicTrack(mic.track); | |
| }, [mic.track]); | |
| // setup mic audio analyser | |
| useEffect(() => { | |
| if (!mic.track) return; | |
| outgoingAnalyser.current = AudioAnalyser.tryCreate( | |
| new MediaStream([mic.track]), | |
| (spectrum) => { | |
| spectrumOutRef.current = spectrum; | |
| }, | |
| 0.7, | |
| 0.6, | |
| ); | |
| return () => { | |
| outgoingAnalyser.current?.stop(); | |
| }; | |
| }, [mic.track]); | |
| // setup incoming audio analyser | |
| useEffect(() => { | |
| if (!stream || stream.getAudioTracks().length === 0) return; | |
| incomingAnalyser.current = AudioAnalyser.tryCreate( | |
| stream, | |
| (spectrum) => { | |
| spectrumInRef.current = spectrum; | |
| }, | |
| 0.75, | |
| 0.6, | |
| ); | |
| return () => { | |
| incomingAnalyser.current?.stop(); | |
| }; | |
| }, [stream]); | |
| // autoplay stream once available | |
| useEffect(() => { | |
| if (video.current && stream) { | |
| video.current.srcObject = stream; | |
| video.current.play(); | |
| } | |
| }, [stream]); | |
| // stop everything when unmounting | |
| useEffect( | |
| () => () => { | |
| webrtc.current?.close(); | |
| incomingAnalyser.current?.stop(); | |
| outgoingAnalyser.current?.stop(); | |
| }, | |
| [], | |
| ); | |
| const audioSpectrums = useMemo( | |
| () => ({ | |
| get in() { | |
| return spectrumInRef.current; | |
| }, | |
| get out() { | |
| return spectrumOutRef.current; | |
| }, | |
| }), | |
| [], | |
| ); | |
| return useMemo( | |
| () => ({ | |
| videoRef: video, | |
| stream, | |
| audioSpectrums, | |
| mic, | |
| }), | |
| [stream, audioSpectrums, mic], | |
| ); | |
| } | |
| function useWebrtcUrl(camera: string) { | |
| const conn = useConn(); | |
| return useQuery({ | |
| queryKey: ["webrtc-stream", camera, conn?.haVersion], | |
| enabled: Boolean(conn), | |
| gcTime: 0, | |
| retry: false, | |
| refetchOnReconnect: false, | |
| refetchOnWindowFocus: false, | |
| queryFn: async () => { | |
| if (!conn) { | |
| throw new Error("Missing connection for WebRTC query"); | |
| } | |
| const signed: { path: string } | undefined = | |
| await conn.sendMessagePromise({ | |
| type: "auth/sign_path", | |
| path: "/api/webrtc/ws", | |
| }); | |
| if (!signed?.path) { | |
| throw new Error("Failed to obtain signed path for WebRTC"); | |
| } | |
| return `https://${HASS_HOST}${signed.path}&url=${camera}`; | |
| }, | |
| }); | |
| } |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment