"use client" import * as React from "react" import { cn } from "@/lib/utils" import { ICameraVideoTrack, ILocalVideoTrack, IMicrophoneAudioTrack } from "agora-rtc-sdk-ng" import { useAppSelector, useAppDispatch, VOICE_OPTIONS, VideoSourceType, useIsCompactLayout } from "@/common" import { ITextItem, EMessageType, IChatItem } from "@/types" import { rtcManager, IUserTracks, IRtcUser } from "@/manager" import { setRoomConnected, addChatItem, setVoiceType, setOptions, } from "@/store/reducers/global" import AgentVoicePresetSelect from "@/components/Agent/VoicePresetSelect" import AgentView from "@/components/Agent/View" import Avatar from "@/components/Agent/AvatarTrulience" import MicrophoneBlock from "@/components/Agent/Microphone" import VideoBlock from "@/components/Agent/Camera" import dynamic from "next/dynamic" import ChatCard from "@/components/Chat/ChatCard" let hasInit: boolean = false export default function RTCCard(props: { className?: string }) { const { className } = props const dispatch = useAppDispatch() const options = useAppSelector((state) => state.global.options) const trulienceSettings = useAppSelector((state) => state.global.trulienceSettings) const { userId, channel } = options const [videoTrack, setVideoTrack] = React.useState() const [audioTrack, setAudioTrack] = React.useState() const [screenTrack, setScreenTrack] = React.useState() const [remoteuser, setRemoteUser] = React.useState() const [videoSourceType, setVideoSourceType] = React.useState(VideoSourceType.CAMERA) const useTrulienceAvatar = trulienceSettings.enabled const avatarInLargeWindow = trulienceSettings.avatarDesktopLargeWindow; const isCompactLayout = useIsCompactLayout(); const DynamicChatCard = dynamic(() => import("@/components/Chat/ChatCard"), { ssr: false, }); React.useEffect(() => { if (!options.channel) { return } if (hasInit) { return } init() return () => { if (hasInit) { destory() } } }, [options.channel]) const init = async () => { console.log("[rtc] init") rtcManager.on("localTracksChanged", onLocalTracksChanged) rtcManager.on("textChanged", onTextChanged) rtcManager.on("remoteUserChanged", onRemoteUserChanged) await rtcManager.createCameraTracks() await rtcManager.createMicrophoneAudioTrack() await rtcManager.join({ channel, userId, }) dispatch( setOptions({ ...options, appId: rtcManager.appId ?? "", token: rtcManager.token ?? "", }), ) await rtcManager.publish() dispatch(setRoomConnected(true)) hasInit = true } const destory = async () => { console.log("[rtc] destory") rtcManager.off("textChanged", onTextChanged) rtcManager.off("localTracksChanged", onLocalTracksChanged) rtcManager.off("remoteUserChanged", onRemoteUserChanged) await rtcManager.destroy() dispatch(setRoomConnected(false)) hasInit = false } const onRemoteUserChanged = (user: IRtcUser) => { console.log("[rtc] onRemoteUserChanged", user) if (useTrulienceAvatar) { // trulience SDK will play audio in synch with mouth user.audioTrack?.stop(); } if (user.audioTrack) { setRemoteUser(user) } } const onLocalTracksChanged = (tracks: IUserTracks) => { console.log("[rtc] onLocalTracksChanged", tracks) const { videoTrack, audioTrack, screenTrack } = tracks setVideoTrack(videoTrack) setScreenTrack(screenTrack) if (audioTrack) { setAudioTrack(audioTrack) } } const onTextChanged = (text: IChatItem) => { console.log("[rtc] onTextChanged", text) dispatch( addChatItem(text), ) } const onVoiceChange = (value: any) => { dispatch(setVoiceType(value)) } const onVideoSourceTypeChange = async (value: VideoSourceType) => { await rtcManager.switchVideoSource(value) setVideoSourceType(value) } return (
{/* Scrollable top region (Avatar or ChatCard) */}
{useTrulienceAvatar ? ( !avatarInLargeWindow ? (
) : ( !isCompactLayout && ) ) : ( )}
{/* Bottom region for microphone and video blocks */}
); }