Getting Started
@outspeed/react
@outspeed/react-native
Next.js / Vercel
Getting Started
We assume you have already deployed your backend. If not, please follow this tutorial to deploy your backend with Outspeed Python SDK.
Let’s create a Voice Bot application using Next.js to connect to the Outspeed backend.
If you haven’t already set up a Next.js app, you can follow the official setup guide to get started.
Install the necessary dependencies
Create the WebRTCTakeInput component
Create a new directory _components
in the root app directory of your project and add the following file: WebRTCTakeInput.tsx
.
This component helps us access the user’s microphone and the WebRTC backend function URL.
import React from "react";
import {
RealtimeFunctionURLInput,
RealtimeAudioInput,
RealtimeFormButton,
} from "@outspeed/react";
import { useSearchParams } from "next/navigation";
export type TWebRTCTakeInputProps = {
onSubmit: (config: {
functionURL: string;
audioDeviceId?: string;
videoDeviceId?: string;
}) => void;
};
export function WebRTCTakeInput(props: TWebRTCTakeInputProps) {
const { onSubmit } = props;
const [audioDeviceId, setAudioDeviceId] = React.useState("");
const [videoDeviceId, setVideoDeviceId] = React.useState("");
const queryParams = useSearchParams();
const initialFunctionURL =
queryParams.get("functionURL") || "http://localhost:8080";
const [functionURL, setFunctionURL] = React.useState(initialFunctionURL);
const [isMediaMissing, setIsMediaMissing] = React.useState(false);
const [isFunctionURLMissing, setIsFunctionURLMissing] = React.useState(false);
function handleOnMediaInputChange(kind: "audio" | "video", value: string) {
setIsMediaMissing(false);
switch (kind) {
case "audio":
setAudioDeviceId(value);
break;
case "video":
setVideoDeviceId(value);
break;
}
}
function handleFormSubmit() {
let isFormValid = true;
if (!audioDeviceId && !videoDeviceId) {
setIsMediaMissing(true);
isFormValid = false;
}
if (!functionURL) {
setIsFunctionURLMissing(true);
isFormValid = false;
}
if (!isFormValid) {
return;
}
try {
const config = {
functionURL,
audioDeviceId,
};
onSubmit(config);
} catch (error) {
console.error("Unable to create config", error);
}
}
return (
<div className="space-y-6 max-w-lg relative z-10">
<div className="font-bold text-3xl mb-8">WebRTC</div>
<RealtimeFunctionURLInput
isError={isFunctionURLMissing}
onChange={(e) => {
setIsFunctionURLMissing(false);
setFunctionURL(e.currentTarget.value);
}}
value={functionURL}
description="Once you've deployed your WebRTC backend application, you'll receive a URL. If you are running your backend locally, use http://localhost:8080."
errorMsg={isFunctionURLMissing ? "Function url is required." : ""}
/>
<RealtimeAudioInput
isError={isMediaMissing}
value={audioDeviceId}
onChange={(value) => handleOnMediaInputChange("audio", value)}
description="Select the microphone you want to use. If you don't see your microphone, make sure it is plugged in."
errorMsg={
isMediaMissing ? "Either audio or video input is required." : ""
}
/>
<RealtimeFormButton onClick={handleFormSubmit}>Run</RealtimeFormButton>
</div>
);
}
Create the VoiceBotComponent component
Inside the _components
directory, add the following file: VoiceBotComponent.tsx
.
This component helps us interact with the Voice Bot and provides a chat interface.
import { RealtimeAudioVisualizer, RealtimeChat, Track } from "@outspeed/react";
import { DataChannel } from "@outspeed/react";
import React from "react";
import { RealtimeAudio } from "@outspeed/react";
export type TVoiceBotComponentProps = {
remoteTrack: Track | null;
localTrack: Track | null;
remoteAudioTrack: Track | null;
localAudioTrack: Track | null;
onCallEndClick: () => void;
dataChannel?: DataChannel<unknown> | null;
title: string;
};
export function VoiceBotComponent(props: TVoiceBotComponentProps) {
const {
localAudioTrack,
remoteAudioTrack,
remoteTrack,
onCallEndClick,
dataChannel,
title,
} = props;
const [isEnabled, setIsEnabled] = React.useState(true);
function handleOnToggle() {
if (!localAudioTrack) return;
if (localAudioTrack.isMute()) {
localAudioTrack.resume();
} else {
localAudioTrack.pause();
}
setIsEnabled((prevState) => !prevState);
}
return (
<div className="flex flex-col flex-1 relative max-w-[calc(100vw-32px)] h-full">
{/* Video and Chat Section */}
<div className="flex-1 flex flex-col sm:flex-row items-stretch py-4 space-y-6 sm:space-y-0 sm:space-x-6">
{/* Realtime Audio Visualizer Section */}
<div className="flex-1 flex flex-col sm:flex-row justify-between items-stretch w-full space-y-6 sm:space-y-0 sm:space-x-6">
{!remoteTrack && (
<div className="flex-1 flex flex-col items-center border border-gray-300 p-4">
<div className="w-1/2 h-full">
<RealtimeAudioVisualizer
track={remoteAudioTrack}
threshold={120}
/>
</div>
<RealtimeAudio track={remoteAudioTrack} />
</div>
)}
</div>
{/* Realtime Chat Section */}
{dataChannel && (
<div
className="flex-1 flex flex-col h-full w-1/2"
style={{ height: `${window.innerHeight - 225}px` }}
>
<RealtimeChat
userLabel="You"
avatarLabel="Outspeed"
heading="Messages"
dataChannel={dataChannel}
noMessage="Your conversation will appear here."
/>
</div>
)}
</div>
{/* Call Section */}
<div className="pb-4 flex">
<div className="flex flex-1 p-4 rounded-md">
<div className="flex-1 justify-start items-center space-x-4 hidden sm:flex"></div>
<div className="flex flex-1 space-x-4 justify-center items-center">
<button
className="rounded-md px-4 py-2 flex items-center justify-center bg-red-500 hover:bg-red-600 text-white transition-colors duration-300 border border-red-700"
onClick={onCallEndClick}
>
<span className="text-sm">Disconnect</span>
</button>
<button
className="rounded-md px-4 py-2 flex items-center justify-center bg-blue-500 hover:bg-blue-600 text-white transition-colors duration-300 border border-blue-700"
onClick={handleOnToggle}
>
<span className="text-sm">{isEnabled ? "Mute" : "Unmute"}</span>
</button>
</div>
<div className="flex-1 justify-end items-center hidden sm:flex">
<span className="font-bold text-muted">{title}</span>
</div>
</div>
</div>
</div>
);
}
Update the Home Page
Inside the page.tsx
file in the app
directory, add the following code:
"use client";
import Image from "next/image";
import { useRouter } from "next/navigation";
import { WebRTCTakeInput } from "./_components/WebRTCTakeInput";
import { Suspense } from "react";
export default function LandingPage() {
const { push } = useRouter();
function onSubmit(
config: {
functionURL: string;
audioDeviceId?: string;
videoDeviceId?: string;
},
targetURL: string
) {
const { functionURL, audioDeviceId } = config;
const sp = new URLSearchParams();
sp.set("functionURL", functionURL);
sp.set("audioDeviceId", audioDeviceId || "not-set");
push(`${targetURL}?${sp.toString()}`);
}
return (
<>
<div className="mt-2 justify-start p-4">
<a href="https://outspeed.com">
<Image
height={40}
width={200}
alt="logo"
src="/outspeed.svg"
className="h-10"
/>
</a>
</div>
<div className="flex w-dvw flex-col items-center md:items-stretch md:flex-row">
<div className="flex-1 flex w-full justify-center bg-background">
<div className="flex-1 flex flex-col max-w-lg justify-center items-center md:px-10 md:max-w-2xl p-4">
<div className="mb-4 p-4 text-red-500 text-lg border border-red-500 rounded">
Please ensure that the app is running and Function URL is correct.
</div>
<Suspense fallback={<div>Loading search parameters...</div>}>
<WebRTCTakeInput
onSubmit={(config) => onSubmit(config, "/webrtc")}
/>
</Suspense>
</div>
</div>
</div>
</>
);
}
Here we use the WebRTCTakeInput
component to take the user’s input for the function URL and audio device ID.
Create the webrtc page
Create a new directory webrtc
in the root app directory of your project and add the following file: page.tsx
.
We add a /webrtc
route to handle the WebRTC connection to the backend and display the Voice Bot UI.
"use client";
import React, { Suspense } from "react";
import { useWebRTC } from "@outspeed/react";
import { ConsoleLogger, createConfig } from "@outspeed/react";
import { useRouter, useSearchParams } from "next/navigation";
import { VoiceBotComponent } from "../_components/VoiceBotComponent";
import { ERealtimeConnectionStatus } from "@outspeed/react";
export default function WebRTCApp() {
return (
<Suspense fallback={<div>Loading search parameters...</div>}>
<WebRTCContent />
</Suspense>
);
}
function WebRTCContent() {
const searchparams = useSearchParams();
const { push } = useRouter();
const {
connectionStatus,
response,
connect,
disconnect,
localAudioTrack,
localVideoTrack,
remoteAudioTrack,
remoteVideoTrack,
dataChannel,
} = useWebRTC({
config: createConfig({
functionURL: searchparams.get("functionURL") || "",
audioDeviceId: searchparams.get("audioDeviceId") || undefined,
audioCodec: "opus/48000/2", // Needed for high quality audio
logger: ConsoleLogger.getLogger(),
}),
});
React.useEffect(() => {
connect();
}, [connect]);
function handleDisconnect() {
disconnect();
push("/?query=webrtc");
}
if (connectionStatus === ERealtimeConnectionStatus.Connecting) {
return (
<div className="h-full flex flex-1 justify-center items-center">
Loading...
</div>
);
}
if (connectionStatus === ERealtimeConnectionStatus.Failed) {
return (
<div className="h-full flex flex-1 justify-center items-center">
<div className="flex items-center space-y-4 flex-col">
<h2 className="text-3xl font-light">
Failed to connect.{" "}
{/* eslint-disable-next-line @typescript-eslint/no-explicit-any */}
{(response?.data as any)?.detail || "Please try again."}
</h2>
<details className="max-w-lg overflow-auto">
<summary>See Response</summary>
<pre className="bg-gray-800 text-gray-100 p-4 rounded-lg overflow-x-auto">
<code className="language-js text-sm">
{JSON.stringify(response, undefined, 2)}
</code>
</pre>
</details>
<button
className="inline-flex max-w-24"
onClick={() => window.location.reload()}
>
Refresh
</button>
</div>
</div>
);
}
return (
<div className="h-full flex flex-1">
<div className="flex-1 flex">
<VoiceBotComponent
title="WebRTC Example"
onCallEndClick={handleDisconnect}
localTrack={localVideoTrack}
remoteTrack={remoteVideoTrack}
localAudioTrack={localAudioTrack}
remoteAudioTrack={remoteAudioTrack}
dataChannel={dataChannel}
/>
</div>
</div>
);
}
Run the app
Run the app using the following command:
npm run dev
Or you can use the pre-built version of the app on Vercel: Outspeed Next.js Example
- Update the functionURL with the URL of your deployed WebRTC backend.
- Select the proper audio device.
- Click on the
Run
button.
Don’t forget to replace the placeholder functionURL with the actual URL of your WebRTC backend.
For a more in-depth explanation of the components, proceed to the @outspeed/react
documentation.
Was this page helpful?