diff --git a/android/build.gradle b/android/build.gradle index 1740c87e..b83071cf 100644 --- a/android/build.gradle +++ b/android/build.gradle @@ -165,9 +165,9 @@ dependencies { implementation "com.facebook.react:react-native:+" implementation fileTree(include: ['*.jar', '*.aar'], dir: 'libs') /// dependencies start - api 'cn.shengwang.rtc:full-sdk:4.6.2' - implementation 'cn.shengwang.rtc:full-screen-sharing:4.6.2' - api 'io.agora.rtc:iris-rtc:4.6.2-dev.16' + implementation 'io.agora.rtc:full-screen-sharing:4.6.2.70' + api 'io.agora.rtc:agora-special-full:4.6.2.70' + api 'io.agora.rtc:iris-rtc:4.6.2-dev.25' /// dependencies end } diff --git a/examples/expo/ios/Podfile b/examples/expo/ios/Podfile index 6bdacd6a..3d7cbc7a 100644 --- a/examples/expo/ios/Podfile +++ b/examples/expo/ios/Podfile @@ -65,6 +65,6 @@ end target 'ScreenShare' do #dependencies start - pod 'ShengwangRtcEngine_iOS', '4.6.2' + pod 'AgoraVideo_Special_iOS', '4.6.2.70' #dependencies end end diff --git a/examples/expo/ios/Podfile.lock b/examples/expo/ios/Podfile.lock index f2feb1e3..29b1b895 100644 --- a/examples/expo/ios/Podfile.lock +++ b/examples/expo/ios/Podfile.lock @@ -1,5 +1,6 @@ PODS: - - AgoraIrisRTC_iOS (4.6.2-dev.16) + - AgoraIrisRTC_iOS2 (4.6.2-dev.25) + - AgoraVideo_Special_iOS (4.6.2.70) - boost (1.84.0) - DoubleConversion (1.1.6) - EXConstants (17.1.7): @@ -1644,7 +1645,8 @@ PODS: - React-RCTFBReactNativeSpec - ReactCommon/turbomodule/core - react-native-agora (4.5.2-build.2-rc.1): - - AgoraIrisRTC_iOS (= 4.6.2-dev.16) + - AgoraIrisRTC_iOS2 (= 4.6.2-dev.25) + - AgoraVideo_Special_iOS (= 4.6.2.70) - DoubleConversion - glog - hermes-engine @@ -1667,7 +1669,6 @@ PODS: - ReactCodegen - ReactCommon/turbomodule/bridging - ReactCommon/turbomodule/core - - ShengwangRtcEngine_iOS (= 4.6.2) - Yoga - react-native-image-tools (0.8.1): - React @@ -2393,43 +2394,6 @@ PODS: - Yoga - RNVectorIcons (9.2.0): - React-Core - - ShengwangInfra_iOS (1.3.5) - - ShengwangRtcEngine_iOS (4.6.2): - - ShengwangRtcEngine_iOS/AIAEC (= 4.6.2) - - ShengwangRtcEngine_iOS/AIAECLL (= 4.6.2) - - ShengwangRtcEngine_iOS/AINS (= 4.6.2) - - ShengwangRtcEngine_iOS/AINSLL (= 4.6.2) - - ShengwangRtcEngine_iOS/AudioBeauty (= 4.6.2) - - ShengwangRtcEngine_iOS/ClearVision (= 4.6.2) - - ShengwangRtcEngine_iOS/ContentInspect (= 4.6.2) - - ShengwangRtcEngine_iOS/FaceCapture (= 4.6.2) - - ShengwangRtcEngine_iOS/FaceDetection (= 4.6.2) - - ShengwangRtcEngine_iOS/LipSync (= 4.6.2) - - ShengwangRtcEngine_iOS/ReplayKit (= 4.6.2) - - ShengwangRtcEngine_iOS/RtcBasic (= 4.6.2) - - ShengwangRtcEngine_iOS/SpatialAudio (= 4.6.2) - - ShengwangRtcEngine_iOS/VideoAv1CodecEnc (= 4.6.2) - - ShengwangRtcEngine_iOS/VideoCodecEnc (= 4.6.2) - - ShengwangRtcEngine_iOS/VirtualBackground (= 4.6.2) - - ShengwangRtcEngine_iOS/VQA (= 4.6.2) - - ShengwangRtcEngine_iOS/AIAEC (4.6.2) - - ShengwangRtcEngine_iOS/AIAECLL (4.6.2) - - ShengwangRtcEngine_iOS/AINS (4.6.2) - - ShengwangRtcEngine_iOS/AINSLL (4.6.2) - - ShengwangRtcEngine_iOS/AudioBeauty (4.6.2) - - ShengwangRtcEngine_iOS/ClearVision (4.6.2) - - ShengwangRtcEngine_iOS/ContentInspect (4.6.2) - - ShengwangRtcEngine_iOS/FaceCapture (4.6.2) - - ShengwangRtcEngine_iOS/FaceDetection (4.6.2) - - ShengwangRtcEngine_iOS/LipSync (4.6.2) - - ShengwangRtcEngine_iOS/ReplayKit (4.6.2) - - ShengwangRtcEngine_iOS/RtcBasic (4.6.2): - - ShengwangInfra_iOS (= 1.3.5) - - ShengwangRtcEngine_iOS/SpatialAudio (4.6.2) - - ShengwangRtcEngine_iOS/VideoAv1CodecEnc (4.6.2) - - ShengwangRtcEngine_iOS/VideoCodecEnc (4.6.2) - - ShengwangRtcEngine_iOS/VirtualBackground (4.6.2) - - ShengwangRtcEngine_iOS/VQA (4.6.2) - SocketRocket (0.7.1) - Yoga (0.0.0) - ZXingObjC/Core (3.6.9) @@ -2439,6 +2403,7 @@ PODS: - ZXingObjC/Core DEPENDENCIES: + - AgoraVideo_Special_iOS (= 4.6.2.70) - boost (from `../node_modules/react-native/third-party-podspecs/boost.podspec`) - DoubleConversion (from `../node_modules/react-native/third-party-podspecs/DoubleConversion.podspec`) - EXConstants (from `../node_modules/expo-constants/ios`) @@ -2538,14 +2503,12 @@ DEPENDENCIES: - RNScreens (from `../node_modules/react-native-screens`) - RNSVG (from `../node_modules/react-native-svg`) - RNVectorIcons (from `../node_modules/react-native-vector-icons`) - - ShengwangRtcEngine_iOS (= 4.6.2) - Yoga (from `../node_modules/react-native/ReactCommon/yoga`) SPEC REPOS: trunk: - - AgoraIrisRTC_iOS - - ShengwangInfra_iOS - - ShengwangRtcEngine_iOS + - AgoraIrisRTC_iOS2 + - AgoraVideo_Special_iOS - SocketRocket - ZXingObjC @@ -2749,7 +2712,8 @@ EXTERNAL SOURCES: :path: "../node_modules/react-native/ReactCommon/yoga" SPEC CHECKSUMS: - AgoraIrisRTC_iOS: 5ab14bd0b9e140455acfd26296a76c71eac1625f + AgoraIrisRTC_iOS2: 4cb14ca79fe4b2b3135c5dfc9d00ea01aafe817d + AgoraVideo_Special_iOS: 9bd42c487f936f4d335d5630e2632f0b08e214f5 boost: 7e761d76ca2ce687f7cc98e698152abd03a18f90 DoubleConversion: cb417026b2400c8f53ae97020b2be961b59470cb EXConstants: 9d62a46a36eae6d28cb978efcbc68aef354d1704 @@ -2805,7 +2769,7 @@ SPEC CHECKSUMS: React-logger: e6e6164f1753e46d1b7e2c8f0949cd7937eaf31b React-Mapbuffer: 5b4959cbd91e7e8fae42ab0f4b7c25b86fd139a1 React-microtasksnativemodule: 1695ab137281dd03de967b7bbeb4e392601f6432 - react-native-agora: c8629d665f4799ef38c8bce72cc76752b965fb5f + react-native-agora: 75382d8c1f423531481963b588fed4e2cb199945 react-native-image-tools: 88218449791389bbf550a2c475a3b564c8233c8b react-native-safe-area-context: 6863f9e225b541b481514b0f6d51be0867184c2c react-native-slider: 351d1186b07d686b93dad14ce2b474ca62dea0fc @@ -2847,12 +2811,10 @@ SPEC CHECKSUMS: RNScreens: c2e3cc506212228c607b4785b315205e28acbf0f RNSVG: ab2249cc665e5d0b2d30657a766a86c99a649a65 RNVectorIcons: fcc2f6cb32f5735b586e66d14103a74ce6ad61f8 - ShengwangInfra_iOS: 0ae2d429ec428cdbaebb39ac883ec64c5c20f7bb - ShengwangRtcEngine_iOS: f2f2ae79c5909bb51d138bb4ec62c6761c0ab9a3 SocketRocket: d4aabe649be1e368d1318fdf28a022d714d65748 Yoga: adb397651e1c00672c12e9495babca70777e411e ZXingObjC: 8898711ab495761b2dbbdec76d90164a6d7e14c5 -PODFILE CHECKSUM: e3c472dd0d11eafdc13405fe4e79ab3503198bfd +PODFILE CHECKSUM: 07bb6d369247a2e09b98312a93b890aaf0c33fbc COCOAPODS: 1.16.2 diff --git a/examples/expo/ios/reactnativeagoraexampleexpo.xcodeproj/project.pbxproj b/examples/expo/ios/reactnativeagoraexampleexpo.xcodeproj/project.pbxproj index fa99f328..a81065e7 100644 --- a/examples/expo/ios/reactnativeagoraexampleexpo.xcodeproj/project.pbxproj +++ b/examples/expo/ios/reactnativeagoraexampleexpo.xcodeproj/project.pbxproj @@ -490,36 +490,35 @@ ); inputPaths = ( "${PODS_ROOT}/Target Support Files/Pods-reactnativeagoraexampleexpo/Pods-reactnativeagoraexampleexpo-frameworks.sh", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraIrisRTC_iOS/AgoraRtcWrapper.framework/AgoraRtcWrapper", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangInfra_iOS/aosl.framework/aosl", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangRtcEngine_iOS/AIAEC/AgoraAiEchoCancellationExtension.framework/AgoraAiEchoCancellationExtension", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangRtcEngine_iOS/AIAECLL/AgoraAiEchoCancellationLLExtension.framework/AgoraAiEchoCancellationLLExtension", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangRtcEngine_iOS/AINS/AgoraAiNoiseSuppressionExtension.framework/AgoraAiNoiseSuppressionExtension", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangRtcEngine_iOS/AINSLL/AgoraAiNoiseSuppressionLLExtension.framework/AgoraAiNoiseSuppressionLLExtension", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangRtcEngine_iOS/AudioBeauty/AgoraAudioBeautyExtension.framework/AgoraAudioBeautyExtension", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangRtcEngine_iOS/ClearVision/AgoraClearVisionExtension.framework/AgoraClearVisionExtension", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangRtcEngine_iOS/ContentInspect/AgoraContentInspectExtension.framework/AgoraContentInspectExtension", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangRtcEngine_iOS/FaceCapture/AgoraFaceCaptureExtension.framework/AgoraFaceCaptureExtension", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangRtcEngine_iOS/FaceDetection/AgoraFaceDetectionExtension.framework/AgoraFaceDetectionExtension", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangRtcEngine_iOS/LipSync/AgoraLipSyncExtension.framework/AgoraLipSyncExtension", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangRtcEngine_iOS/ReplayKit/AgoraReplayKitExtension.framework/AgoraReplayKitExtension", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangRtcEngine_iOS/RtcBasic/AgoraRtcKit.framework/AgoraRtcKit", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangRtcEngine_iOS/RtcBasic/Agorafdkaac.framework/Agorafdkaac", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangRtcEngine_iOS/RtcBasic/Agoraffmpeg.framework/Agoraffmpeg", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangRtcEngine_iOS/RtcBasic/AgoraSoundTouch.framework/AgoraSoundTouch", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangRtcEngine_iOS/RtcBasic/video_dec.framework/video_dec", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangRtcEngine_iOS/SpatialAudio/AgoraSpatialAudioExtension.framework/AgoraSpatialAudioExtension", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangRtcEngine_iOS/VQA/AgoraVideoQualityAnalyzerExtension.framework/AgoraVideoQualityAnalyzerExtension", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangRtcEngine_iOS/VideoAv1CodecEnc/AgoraVideoAv1EncoderExtension.framework/AgoraVideoAv1EncoderExtension", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangRtcEngine_iOS/VideoCodecEnc/AgoraVideoEncoderExtension.framework/AgoraVideoEncoderExtension", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangRtcEngine_iOS/VideoCodecEnc/video_enc.framework/video_enc", - "${PODS_XCFRAMEWORKS_BUILD_DIR}/ShengwangRtcEngine_iOS/VirtualBackground/AgoraVideoSegmentationExtension.framework/AgoraVideoSegmentationExtension", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraIrisRTC_iOS2/AgoraRtcWrapper.framework/AgoraRtcWrapper", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/AgoraAiEchoCancellationExtension.framework/AgoraAiEchoCancellationExtension", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/AgoraAiEchoCancellationLLExtension.framework/AgoraAiEchoCancellationLLExtension", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/AgoraAiNoiseSuppressionExtension.framework/AgoraAiNoiseSuppressionExtension", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/AgoraAiNoiseSuppressionLLExtension.framework/AgoraAiNoiseSuppressionLLExtension", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/AgoraAudioBeautyExtension.framework/AgoraAudioBeautyExtension", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/AgoraClearVisionExtension.framework/AgoraClearVisionExtension", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/AgoraContentInspectExtension.framework/AgoraContentInspectExtension", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/AgoraFaceCaptureExtension.framework/AgoraFaceCaptureExtension", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/AgoraFaceDetectionExtension.framework/AgoraFaceDetectionExtension", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/AgoraLipSyncExtension.framework/AgoraLipSyncExtension", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/AgoraReplayKitExtension.framework/AgoraReplayKitExtension", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/AgoraRtcKit.framework/AgoraRtcKit", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/AgoraSoundTouch.framework/AgoraSoundTouch", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/AgoraSpatialAudioExtension.framework/AgoraSpatialAudioExtension", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/AgoraVideoAv1EncoderExtension.framework/AgoraVideoAv1EncoderExtension", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/AgoraVideoEncoderExtension.framework/AgoraVideoEncoderExtension", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/AgoraVideoQualityAnalyzerExtension.framework/AgoraVideoQualityAnalyzerExtension", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/AgoraVideoSegmentationExtension.framework/AgoraVideoSegmentationExtension", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/Agorafdkaac.framework/Agorafdkaac", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/Agoraffmpeg.framework/Agoraffmpeg", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/aosl.framework/aosl", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/video_dec.framework/video_dec", + "${PODS_XCFRAMEWORKS_BUILD_DIR}/AgoraVideo_Special_iOS/video_enc.framework/video_enc", "${PODS_XCFRAMEWORKS_BUILD_DIR}/hermes-engine/Pre-built/hermes.framework/hermes", ); name = "[CP] Embed Pods Frameworks"; outputPaths = ( "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/AgoraRtcWrapper.framework", - "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/aosl.framework", "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/AgoraAiEchoCancellationExtension.framework", "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/AgoraAiEchoCancellationLLExtension.framework", "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/AgoraAiNoiseSuppressionExtension.framework", @@ -532,16 +531,17 @@ "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/AgoraLipSyncExtension.framework", "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/AgoraReplayKitExtension.framework", "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/AgoraRtcKit.framework", - "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/Agorafdkaac.framework", - "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/Agoraffmpeg.framework", "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/AgoraSoundTouch.framework", - "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/video_dec.framework", "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/AgoraSpatialAudioExtension.framework", - "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/AgoraVideoQualityAnalyzerExtension.framework", "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/AgoraVideoAv1EncoderExtension.framework", "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/AgoraVideoEncoderExtension.framework", - "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/video_enc.framework", + "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/AgoraVideoQualityAnalyzerExtension.framework", "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/AgoraVideoSegmentationExtension.framework", + "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/Agorafdkaac.framework", + "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/Agoraffmpeg.framework", + "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/aosl.framework", + "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/video_dec.framework", + "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/video_enc.framework", "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/hermes.framework", ); runOnlyForDeploymentPostprocessing = 0; diff --git a/examples/legacy/ios/Podfile b/examples/legacy/ios/Podfile index 2d442f69..d0868b69 100644 --- a/examples/legacy/ios/Podfile +++ b/examples/legacy/ios/Podfile @@ -41,6 +41,6 @@ end target 'ScreenShare' do #dependencies start - pod 'ShengwangRtcEngine_iOS', '4.6.2' + pod 'AgoraVideo_Special_iOS', '4.6.2.70' #dependencies end end diff --git a/react-native-agora.podspec b/react-native-agora.podspec index a9d1951c..50c6ed83 100644 --- a/react-native-agora.podspec +++ b/react-native-agora.podspec @@ -41,8 +41,8 @@ Pod::Spec.new do |s| end #dependencies start - s.dependency 'ShengwangRtcEngine_iOS', '4.6.2' - s.dependency 'AgoraIrisRTC_iOS', '4.6.2-dev.16' + s.dependency 'AgoraVideo_Special_iOS', '4.6.2.70' + s.dependency 'AgoraIrisRTC_iOS2', '4.6.2-dev.25' #dependencies end s.libraries = 'stdc++' diff --git a/src/AgoraBase.ts b/src/AgoraBase.ts index 704f20b6..72271400 100644 --- a/src/AgoraBase.ts +++ b/src/AgoraBase.ts @@ -8,23 +8,23 @@ import { } from './AgoraMediaBase'; /** - * The channel profile. + * Channel profile. */ export enum ChannelProfileType { /** - * 0: Communication. Use this profile when there are only two users in the channel. + * 0: Communication profile. We recommend using the live broadcasting profile for better audio and video experience. */ ChannelProfileCommunication = 0, /** - * 1: Live streaming. Use this profile when there are more than two users in the channel. + * 1: (Default) Live broadcasting profile. */ ChannelProfileLiveBroadcasting = 1, /** - * 2: Gaming. + * 2: Gaming profile. Deprecated: Use ChannelProfileLiveBroadcasting instead. */ ChannelProfileGame = 2, /** - * Cloud gaming. The scenario is optimized for latency. Use this profile if the use case requires frequent interactions between users. + * 3: Interactive profile. This profile is optimized for low latency. If your scenario involves frequent interactions, we recommend using this profile. Deprecated: Use ChannelProfileLiveBroadcasting instead. */ ChannelProfileCloudGaming = 3, /** @@ -170,7 +170,7 @@ export enum WarnCodeType { /** * Error codes. * - * An error code indicates that the SDK encountered an unrecoverable error that requires application intervention. For example, an error is returned when the camera fails to open, and the app needs to inform the user that the camera cannot be used. + * Error codes indicate that the SDK encountered an unrecoverable error and requires intervention from the application. For example, an error is returned when the camera fails to open, and the app needs to inform the user that the camera cannot be used. */ export enum ErrorCodeType { /** @@ -178,52 +178,50 @@ export enum ErrorCodeType { */ ErrOk = 0, /** - * 1: General error with no classified reason. Try calling the method again. + * 1: A general error (no specific classification). Please retry the method call. */ ErrFailed = 1, /** - * 2: An invalid parameter is used. For example, the specified channel name includes illegal characters. Reset the parameter. + * 2: Invalid parameter set in the method. For example, the specified channel name contains illegal characters. Please reset the parameter. */ ErrInvalidArgument = 2, /** - * 3: The SDK is not ready. Possible reasons include the following: - * The initialization of IRtcEngine fails. Reinitialize the IRtcEngine. - * No user has joined the channel when the method is called. Check the code logic. - * The user has not left the channel when the rate or complain method is called. Check the code logic. - * The audio module is disabled. - * The program is not complete. + * 3: The SDK is not ready. Possible reasons include: IRtcEngine initialization failed. Please reinitialize IRtcEngine. + * The user has not joined a channel when calling the method. Please check the method call logic. + * The user has not left the channel when calling rate or complain. Please check the method call logic. + * The audio module is not enabled. + * The assembly is incomplete. */ ErrNotReady = 3, /** - * 4: The IRtcEngine does not support the request. Possible reasons include the following: - * The built-in encryption mode is incorrect, or the SDK fails to load the external encryption library. Check the encryption mode setting, or reload the external encryption library. + * 4: The current state of IRtcEngine does not support this operation. Possible reasons include: + * When using built-in encryption, the encryption mode is incorrect, or loading the external encryption library failed. Please check if the encryption enumeration value is correct, or reload the external encryption library. */ ErrNotSupported = 4, /** - * 5: The request is rejected. Possible reasons include the following: - * The IRtcEngine initialization fails. Reinitialize the IRtcEngine. - * The channel name is set as the empty string "" when joining the channel. Reset the channel name. - * When the joinChannelEx method is called to join multiple channels, the specified channel name is already in use. Reset the channel name. + * 5: This method call was rejected. Possible reasons include: IRtcEngine initialization failed. Please reinitialize IRtcEngine. + * The channel name was set to an empty string "" when joining the channel. Please reset the channel name. + * In multi-channel scenarios, the specified channel name already exists when calling joinChannelEx to join the channel. Please reset the channel name. */ ErrRefused = 5, /** - * 6: The buffer size is insufficient to store the returned data. + * 6: The buffer size is insufficient to hold the returned data. */ ErrBufferTooSmall = 6, /** - * 7: A method is called before the initialization of IRtcEngine. Ensure that the IRtcEngine object is initialized before using this method. + * 7: The method is called before IRtcEngine is initialized. Please ensure the IRtcEngine object is created and initialized before calling this method. */ ErrNotInitialized = 7, /** - * 8: Invalid state. + * 8: The current state is invalid. */ ErrInvalidState = 8, /** - * 9: Permission to access is not granted. Check whether your app has access to the audio and video device. + * 9: No permission to operate. Please check whether the user has granted the app access to audio and video devices. */ ErrNoPermission = 9, /** - * 10: A timeout occurs. Some API calls require the SDK to return the execution result. This error occurs if the SDK takes too long (more than 10 seconds) to return the result. + * 10: The method call timed out. Some method calls require the SDK to return a result. If the SDK takes too long (over 10 seconds) to process the event, this error occurs. */ ErrTimedout = 10, /** @@ -243,31 +241,31 @@ export enum ErrorCodeType { */ ErrNetDown = 14, /** - * 17: The request to join the channel is rejected. Possible reasons include the following: - * The user is already in the channel. Agora recommends that you use the onConnectionStateChanged callback to see whether the user is in the channel. Do not call this method to join the channel unless you receive the ConnectionStateDisconnected (1) state. - * After calling startEchoTest for the call test, the user tries to join the channel without calling stopEchoTest to end the current test. To join a channel, the call test must be ended by calling stopEchoTest. + * 17: Joining the channel was rejected. Possible reasons include: + * The user is already in the channel. It is recommended to determine whether the user is in the channel through the onConnectionStateChanged callback. Do not call this method again to join the channel unless receiving the ConnectionStateDisconnected (1) state. + * The user attempted to join a channel after calling startEchoTest for a call test without first calling stopEchoTest to end the current test. After starting a call test, you must call stopEchoTest to end the test before joining a channel. */ ErrJoinChannelRejected = 17, /** - * 18: Fails to leave the channel. Possible reasons include the following: - * The user has left the channel before calling the leaveChannel method. Stop calling this method to clear this error. - * The user calls the leaveChannel method to leave the channel before joining the channel. In this case, no extra operation is needed. + * 18: Failed to leave the channel. Possible reasons include: + * The user has already left the channel before calling leaveChannel. Just stop calling this method. + * The user called leaveChannel to exit the channel without having joined it. No further action is needed in this case. */ ErrLeaveChannelRejected = 18, /** - * 19: Resources are already in use. + * 19: The resource is already in use and cannot be reused. */ ErrAlreadyInUse = 19, /** - * 20: The request is abandoned by the SDK, possibly because the request has been sent too frequently. + * 20: The SDK aborted the request, possibly due to too many requests. */ ErrAborted = 20, /** - * 21: The IRtcEngine fails to initialize and has crashed because of specific Windows firewall settings. + * 21: Specific firewall settings on Windows caused IRtcEngine initialization to fail and crash. */ ErrInitNetEngine = 21, /** - * 22: The SDK fails to allocate resources because your app uses too many system resources or system resources are insufficient. + * 22: The SDK failed to allocate resources, possibly due to excessive resource usage by the app or system resource exhaustion. */ ErrResourceLimited = 22, /** @@ -275,73 +273,73 @@ export enum ErrorCodeType { */ ErrFuncIsProhibited = 23, /** - * 101: The specified App ID is invalid. Rejoin the channel with a valid App ID. + * 101: Invalid App ID. Please use a valid App ID to rejoin the channel. */ ErrInvalidAppId = 101, /** - * 102: The specified channel name is invalid. A possible reason is that the parameter's data type is incorrect. Rejoin the channel with a valid channel name. + * 102: Invalid channel name. Possible reason is incorrect data type for the parameter. Please use a valid channel name to rejoin the channel. */ ErrInvalidChannelName = 102, /** - * 103: Fails to get server resources in the specified region. Try another region when initializing IRtcEngine. + * 103: Unable to acquire server resources in the current region. Try specifying another region when initializing IRtcEngine. */ ErrNoServerResources = 103, /** - * 109: The current token has expired. Apply for a new token on the server and call renewToken. Deprecated: This enumerator is deprecated. Use ConnectionChangedTokenExpired (9) in the onConnectionStateChanged callback instead. + * 109: The current Token has expired and is no longer valid. Please request a new Token from your server and call renewToken to update it. Deprecated: This enumeration is deprecated. Use ConnectionChangedTokenExpired (9) in the onConnectionStateChanged callback instead. */ ErrTokenExpired = 109, /** - * 110: Invalid token. Typical reasons include the following: - * App Certificate is enabled in Agora Console, but the code still uses App ID for authentication. Once App Certificate is enabled for a project, you must use token-based authentication. - * The uid used to generate the token is not the same as the uid used to join the channel. Deprecated: This enumerator is deprecated. Use ConnectionChangedInvalidToken (8) in the onConnectionStateChanged callback instead. + * Deprecated: This enumeration is deprecated. Use ConnectionChangedInvalidToken (8) in the onConnectionStateChanged callback instead. 110: Invalid Token. Common reasons include: + * App certificate is enabled in the console, but App ID + Token authentication is not used. When the App certificate is enabled, Token authentication must be used. + * The uid field used when generating the Token does not match the uid used when the user joins the channel. */ ErrInvalidToken = 110, /** - * 111: The network connection is interrupted. The SDK triggers this callback when it loses connection with the server for more than four seconds after the connection is established. + * 111: Network connection interrupted. After the SDK establishes a connection with the server, it loses network connectivity for more than 4 seconds. */ ErrConnectionInterrupted = 111, /** - * 112: The network connection is lost. Occurs when the SDK cannot reconnect to Agora's edge server 10 seconds after its connection to the server is interrupted. + * 112: Network connection lost. The network is disconnected and the SDK fails to reconnect to the server within 10 seconds. */ ErrConnectionLost = 112, /** - * 113: The user is not in the channel when calling the sendStreamMessage method. + * 113: The user is not in a channel when calling the sendStreamMessage method. */ ErrNotInChannel = 113, /** - * 114: The data size exceeds 1 KB when calling the sendStreamMessage method. + * 114: The data length exceeds 1 KB when calling sendStreamMessage. */ ErrSizeTooLarge = 114, /** - * 115: The data bitrate exceeds 6 KB/s when calling the sendStreamMessage method. + * 115: The data sending rate exceeds the limit (6 KB/s) when calling sendStreamMessage. */ ErrBitrateLimit = 115, /** - * 116: More than five data streams are created when calling the createDataStream method. + * 116: The number of data streams created exceeds the limit (5) when calling createDataStream. */ ErrTooManyDataStreams = 116, /** - * 117: The data stream transmission times out. + * 117: Data stream sending timed out. */ ErrStreamMessageTimeout = 117, /** - * 119: Switching roles fails, try rejoining the channel. + * 119: Failed to switch user role. Please try rejoining the channel. */ ErrSetClientRoleNotAuthorized = 119, /** - * 120: Media streams decryption fails. The user might use an incorrect password to join the channel. Check the entered password, or tell the user to try rejoining the channel. + * 120: Media stream decryption failed. Possibly due to an incorrect key used when the user joined the channel. Please check the key entered when joining the channel, or guide the user to try rejoining. */ ErrDecryptionFailed = 120, /** - * 121: The user ID is invalid. + * 121: Invalid user ID. */ ErrInvalidUserId = 121, /** - * 122: Data streams decryption fails. The user might use an incorrect password to join the channel. Check the entered password, or tell the user to try rejoining the channel. + * 122: Data stream decryption failed. Possibly due to an incorrect key used when the user joined the channel. Please check the key entered when joining the channel, or guide the user to try rejoining. */ ErrDatastreamDecryptionFailed = 122, /** - * 123: The user is banned from the server. + * 123: This user is banned by the server. */ ErrClientIsBannedByServer = 123, /** @@ -353,7 +351,7 @@ export enum ErrorCodeType { */ ErrLicenseCredentialInvalid = 131, /** - * 134: The user account is invalid, possibly because it contains invalid parameters. + * 134: Invalid user account, possibly due to an invalid parameter. */ ErrInvalidUserAccount = 134, /** @@ -361,7 +359,7 @@ export enum ErrorCodeType { */ ErrModuleNotFound = 157, /** - * 1001: The SDK fails to load the media engine. + * 1001: Failed to load the media engine. */ ErrCertRaw = 157, /** @@ -409,11 +407,11 @@ export enum ErrorCodeType { */ ErrCertRequest = 168, /** - * @ignore + * 200: Unsupported PCM format. */ ErrPcmsendFormat = 200, /** - * @ignore + * 201: Buffer overflow due to PCM sending rate being too fast. */ ErrPcmsendBufferoverflow = 201, /** @@ -449,35 +447,35 @@ export enum ErrorCodeType { */ ErrLoadMediaEngine = 1001, /** - * 1005: A general error occurs (no specified reason). Check whether the audio device is already in use by another app, or try rejoining the channel. + * 1005: Audio device error (unspecified). Please check whether the audio device is occupied by another application, or try rejoining the channel. */ ErrAdmGeneralError = 1005, /** - * 1008: An error occurs when initializing the playback device. Check whether the playback device is already in use by another app, or try rejoining the channel. + * 1008: Failed to initialize the playback device. Please check whether the playback device is occupied by another application, or try rejoining the channel. */ ErrAdmInitPlayout = 1008, /** - * 1009: An error occurs when starting the playback device. Check the playback device. + * 1009: Failed to start the playback device. Please check whether the playback device is functioning properly. */ ErrAdmStartPlayout = 1009, /** - * 1010: An error occurs when stopping the playback device. + * 1010: Failed to stop the playback device. */ ErrAdmStopPlayout = 1010, /** - * 1011: An error occurs when initializing the recording device. Check the recording device, or try rejoining the channel. + * 1011: Failed to initialize the recording device. Please check whether the recording device is functioning properly, or try rejoining the channel. */ ErrAdmInitRecording = 1011, /** - * 1012: An error occurs when starting the recording device. Check the recording device. + * 1012: Failed to start the recording device. Please check whether the recording device is functioning properly. */ ErrAdmStartRecording = 1012, /** - * 1013: An error occurs when stopping the recording device. + * 1013: Failed to stop the recording device. */ ErrAdmStopRecording = 1013, /** - * 1501: Permission to access the camera is not granted. Check whether permission to access the camera permission is granted. + * 1501: No permission to use the camera. Please check whether camera permission is enabled. */ ErrVdmCameraNotAuthorized = 1501, } @@ -513,59 +511,59 @@ export enum LicenseErrorType { } /** - * The operation permissions of the SDK on the audio session. + * Permissions for SDK operations on the Audio Session. */ export enum AudioSessionOperationRestriction { /** - * 0: No restriction, the SDK can change the audio session. + * 0: No restriction. The SDK can modify the Audio Session. */ AudioSessionOperationRestrictionNone = 0, /** - * 1: The SDK cannot change the audio session category. + * 1: The SDK cannot modify the category of the Audio Session. */ AudioSessionOperationRestrictionSetCategory = 1, /** - * 2: The SDK cannot change the audio session category, mode, or categoryOptions. + * 2: The SDK cannot modify the category, mode, or categoryOptions of the Audio Session. */ AudioSessionOperationRestrictionConfigureSession = 1 << 1, /** - * 4: The SDK keeps the audio session active when the user leaves the channel, for example, to play an audio file in the background. + * 4: When leaving the channel, the SDK keeps the Audio Session active, such as for playing audio in the background. */ AudioSessionOperationRestrictionDeactivateSession = 1 << 2, /** - * 128: Completely restricts the operation permissions of the SDK on the audio session; the SDK cannot change the audio session. + * 128: Fully restricts the SDK from operating on the Audio Session. The SDK can no longer make any changes to the Audio Session. */ AudioSessionOperationRestrictionAll = 1 << 7, } /** - * Reasons for a user being offline. + * Reason for user going offline. */ export enum UserOfflineReasonType { /** - * 0: The user quits the call. + * 0: The user left voluntarily. */ UserOfflineQuit = 0, /** - * 1: The SDK times out and the user drops offline because no data packet is received within a certain period of time. If the user quits the call and the message is not passed to the SDK (due to an unreliable channel), the SDK assumes the user dropped offline. + * 1: Timed out due to not receiving packets from the peer for a long time. Since the SDK uses an unreliable channel, it is also possible that the peer left the channel voluntarily, but the local side did not receive the leave message and mistakenly determined it as a timeout. */ UserOfflineDropped = 1, /** - * 2: The user switches the client role from the host to the audience. + * 2: The user's role changed from host to audience. */ UserOfflineBecomeAudience = 2, } /** - * The interface class. + * Interface class. */ export enum InterfaceIdType { /** - * 1: The IAudioDeviceManager interface class. + * 1: IAudioDeviceManager interface class. */ AgoraIidAudioDeviceManager = 1, /** - * 2: The IVideoDeviceManager interface class. + * 2: IVideoDeviceManager interface class. */ AgoraIidVideoDeviceManager = 2, /** @@ -573,7 +571,7 @@ export enum InterfaceIdType { */ AgoraIidParameterEngine = 3, /** - * 4: The IMediaEngine interface class. + * 4: IMediaEngine interface class. */ AgoraIidMediaEngine = 4, /** @@ -589,7 +587,7 @@ export enum InterfaceIdType { */ AgoraIidRtcConnection = 7, /** - * @ignore + * 8: This interface class is deprecated. */ AgoraIidSignalingEngine = 8, /** @@ -597,7 +595,7 @@ export enum InterfaceIdType { */ AgoraIidMediaEngineRegulator = 9, /** - * @ignore + * 11: ILocalSpatialAudioEngine interface class. */ AgoraIidLocalSpatialAudio = 11, /** @@ -609,7 +607,7 @@ export enum InterfaceIdType { */ AgoraIidMetaService = 14, /** - * @ignore + * 15: IMusicContentCenter interface class. */ AgoraIidMusicContentCenter = 15, /** @@ -619,35 +617,35 @@ export enum InterfaceIdType { } /** - * Network quality types. + * Network quality. */ export enum QualityType { /** - * 0: The network quality is unknown. + * 0: Unknown network quality. */ QualityUnknown = 0, /** - * 1: The network quality is excellent. + * 1: Excellent network quality. */ QualityExcellent = 1, /** - * 2: The network quality is quite good, but the bitrate may be slightly lower than excellent. + * 2: Subjectively similar to excellent, but bitrate may be slightly lower. */ QualityGood = 2, /** - * 3: Users can feel the communication is slightly impaired. + * 3: Slightly impaired experience but communication is not affected. */ QualityPoor = 3, /** - * 4: Users cannot communicate smoothly. + * 4: Barely communicable but not smooth. */ QualityBad = 4, /** - * 5: The quality is so bad that users can barely communicate. + * 5: Very poor network quality, communication is nearly impossible. */ QualityVbad = 5, /** - * 6: The network is down and users cannot communicate at all. + * 6: Communication is completely impossible. */ QualityDown = 6, /** @@ -655,7 +653,7 @@ export enum QualityType { */ QualityUnsupported = 7, /** - * 8: The last-mile network probe test is in progress. + * 8: Network quality detection in progress. */ QualityDetecting = 8, } @@ -675,29 +673,29 @@ export enum FitModeType { } /** - * The clockwise rotation of the video. + * Clockwise video rotation information. */ export enum VideoOrientation { /** - * 0: (Default) No rotation. + * 0: (Default) Rotate 0 degrees clockwise. */ VideoOrientation0 = 0, /** - * 90: 90 degrees. + * 90: Rotate 90 degrees clockwise. */ VideoOrientation90 = 90, /** - * 180: 180 degrees. + * 180: Rotate 180 degrees clockwise. */ VideoOrientation180 = 180, /** - * 270: 270 degrees. + * 270: Rotate 270 degrees clockwise. */ VideoOrientation270 = 270, } /** - * The video frame rate. + * Video frame rate. */ export enum FrameRate { /** @@ -751,11 +749,11 @@ export enum FrameHeight { } /** - * The video frame type. + * Video frame type. */ export enum VideoFrameType { /** - * 0: A black frame. + * 0: Blank frame. */ VideoFrameTypeBlankFrame = 0, /** @@ -767,11 +765,11 @@ export enum VideoFrameType { */ VideoFrameTypeDeltaFrame = 4, /** - * 5: The B frame. + * 5: B frame. */ VideoFrameTypeBFrame = 5, /** - * 6: A discarded frame. + * 6: Droppable frame. */ VideoFrameTypeDroppableFrame = 6, /** @@ -781,13 +779,13 @@ export enum VideoFrameType { } /** - * Video output orientation mode. + * Orientation mode for video encoding. */ export enum OrientationMode { /** - * 0: (Default) The output video always follows the orientation of the captured video. The receiver takes the rotational information passed on from the video encoder. This mode applies to scenarios where video orientation can be adjusted on the receiver. - * If the captured video is in landscape mode, the output video is in landscape mode. - * If the captured video is in portrait mode, the output video is in portrait mode. + * 0: (Default) In this mode, the SDK outputs video with the same orientation as the captured video. The receiving end rotates the video based on the received rotation information. This mode is suitable when the receiver can adjust the video orientation. + * If the captured video is in landscape mode, the output video is also in landscape mode. + * If the captured video is in portrait mode, the output video is also in portrait mode. */ OrientationModeAdaptive = 0, /** @@ -801,27 +799,27 @@ export enum OrientationMode { } /** - * Video degradation preferences when the bandwidth is a constraint. + * Video encoding degradation preference when bandwidth is limited. */ export enum DegradationPreference { /** - * -1: (Default) Automatic mode. The SDK will automatically select MaintainFramerate, MaintainBalanced or MaintainResolution based on the video scenario you set, in order to achieve the best overall quality of experience (QoE). + * -1: (Default) Auto mode. The SDK automatically selects MaintainFramerate, MaintainBalanced, or MaintainResolution based on your video scenario settings to achieve optimal overall quality of experience (QoE). */ MaintainAuto = -1, /** - * 0: Prefers to reduce the video frame rate while maintaining video resolution during video encoding under limited bandwidth. This degradation preference is suitable for scenarios where video quality is prioritized. Deprecated: This enumerator is deprecated. Use other enumerations instead. + * 0: When bandwidth is limited, prioritize reducing video frame rate while maintaining resolution. This degradation preference suits scenarios where video quality is prioritized. Deprecated: This enum is deprecated. Use other enums instead. */ MaintainQuality = 0, /** - * 1: Reduces the video resolution while maintaining the video frame rate during video encoding under limited bandwidth. This degradation preference is suitable for scenarios where smoothness is prioritized and video quality is allowed to be reduced. + * 1: When bandwidth is limited, prioritize reducing video resolution while maintaining frame rate. This degradation preference suits scenarios where smoothness is prioritized and some quality loss is acceptable. */ MaintainFramerate = 1, /** - * 2: Reduces the video frame rate and video resolution simultaneously during video encoding under limited bandwidth. The MaintainBalanced has a lower reduction than MaintainQuality and MaintainFramerate, and this preference is suitable for scenarios where both smoothness and video quality are a priority. The resolution of the video sent may change, so remote users need to handle this issue. See onVideoSizeChanged. + * 2: When bandwidth is limited, reduce both video frame rate and resolution. The degradation level of MaintainBalanced is lower than that of MaintainQuality and MaintainFramerate, suitable for scenarios with both smoothness and quality constraints. The resolution of the locally sent video may change. The remote user must be able to handle this. See onVideoSizeChanged. */ MaintainBalanced = 2, /** - * 3: Reduces the video frame rate while maintaining the video resolution during video encoding under limited bandwidth. This degradation preference is suitable for scenarios where video quality is prioritized. + * 3: When bandwidth is limited, prioritize reducing video frame rate while maintaining resolution. This degradation preference suits scenarios where video quality is prioritized. */ MaintainResolution = 3, /** @@ -831,33 +829,33 @@ export enum DegradationPreference { } /** - * The video dimension. + * Video dimensions. */ export class VideoDimensions { /** - * The width (pixels) of the video. + * Video width in pixels. */ width?: number; /** - * The height (pixels) of the video. + * Video height in pixels. */ height?: number; } /** - * The highest frame rate supported by the screen sharing device. + * Maximum frame rate supported by the screen sharing device. */ export enum ScreenCaptureFramerateCapability { /** - * 0: The device supports the frame rate of up to 15 fps. + * 0: Supports up to 15 fps. */ ScreenCaptureFramerateCapability15Fps = 0, /** - * 1: The device supports the frame rate of up to 30 fps. + * 1: Supports up to 30 fps. */ ScreenCaptureFramerateCapability30Fps = 1, /** - * 2: The device supports the frame rate of up to 60 fps. + * 2: Supports up to 60 fps. */ ScreenCaptureFramerateCapability60Fps = 2, } @@ -889,11 +887,11 @@ export enum VideoCodecCapabilityLevel { } /** - * Video codec types. + * Video codec format. */ export enum VideoCodecType { /** - * 0: (Default) Unspecified codec format. The SDK automatically matches the appropriate codec format based on the current video stream's resolution and device performance. + * 0: (Default) No specific codec format. The SDK automatically selects a suitable codec based on the video resolution and device performance. */ VideoCodecNone = 0, /** @@ -909,7 +907,7 @@ export enum VideoCodecType { */ VideoCodecH265 = 3, /** - * 6: Generic. This type is used for transmitting raw video data, such as encrypted video frames. The SDK returns this type of video frames in callbacks, and you need to decode and render the frames yourself. + * 6: Generic. This type is mainly used for transmitting raw video data (e.g., user-encrypted video frames). The video frames are returned via callback and need to be decoded and rendered by the user. */ VideoCodecGeneric = 6, /** @@ -925,13 +923,13 @@ export enum VideoCodecType { */ VideoCodecVp9 = 13, /** - * 20: Generic JPEG. This type consumes minimum computing resources and applies to IoT devices. + * 20: Generic JPEG. Requires less computing power and is suitable for IoT devices with limited resources. */ VideoCodecGenericJpeg = 20, } /** - * The camera focal length types. + * Camera focal length type. */ export enum CameraFocalLengthType { /** @@ -943,11 +941,11 @@ export enum CameraFocalLengthType { */ CameraFocalLengthWideAngle = 1, /** - * 2: Ultra-wide-angle lens. + * 2: Ultra wide-angle lens. */ CameraFocalLengthUltraWide = 2, /** - * 3: (For iOS only) Telephoto lens. + * 3: (iOS only) Telephoto lens. */ CameraFocalLengthTelephoto = 3, } @@ -985,7 +983,7 @@ export class SenderOptions { } /** - * The codec type of audio. + * Audio codec format. */ export enum AudioCodecType { /** @@ -1035,61 +1033,61 @@ export enum AudioCodecType { */ export enum AudioEncodingType { /** - * 0x010101: AAC encoding format, 16000 Hz sampling rate, bass quality. A file with an audio duration of 10 minutes is approximately 1.2 MB after encoding. + * 0x010101: AAC encoding format, 16000 Hz sampling rate, low quality. The encoded file size for 10 minutes of audio is approximately 1.2 MB. */ AudioEncodingTypeAac16000Low = 0x010101, /** - * 0x010102: AAC encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio duration of 10 minutes is approximately 2 MB after encoding. + * 0x010102: AAC encoding format, 16000 Hz sampling rate, medium quality. The encoded file size for 10 minutes of audio is approximately 2 MB. */ AudioEncodingTypeAac16000Medium = 0x010102, /** - * 0x010201: AAC encoding format, 32000 Hz sampling rate, bass quality. A file with an audio duration of 10 minutes is approximately 1.2 MB after encoding. + * 0x010201: AAC encoding format, 32000 Hz sampling rate, low quality. The encoded file size for 10 minutes of audio is approximately 1.2 MB. */ AudioEncodingTypeAac32000Low = 0x010201, /** - * 0x010202: AAC encoding format, 32000 Hz sampling rate, medium sound quality. A file with an audio duration of 10 minutes is approximately 2 MB after encoding. + * 0x010202: AAC encoding format, 32000 Hz sampling rate, medium quality. The encoded file size for 10 minutes of audio is approximately 2 MB. */ AudioEncodingTypeAac32000Medium = 0x010202, /** - * 0x010203: AAC encoding format, 32000 Hz sampling rate, high sound quality. A file with an audio duration of 10 minutes is approximately 3.5 MB after encoding. + * 0x010203: AAC encoding format, 32000 Hz sampling rate, high quality. The encoded file size for 10 minutes of audio is approximately 3.5 MB. */ AudioEncodingTypeAac32000High = 0x010203, /** - * 0x010302: AAC encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio duration of 10 minutes is approximately 2 MB after encoding. + * 0x010302: AAC encoding format, 48000 Hz sampling rate, medium quality. The encoded file size for 10 minutes of audio is approximately 2 MB. */ AudioEncodingTypeAac48000Medium = 0x010302, /** - * 0x010303: AAC encoding format, 48000 Hz sampling rate, high sound quality. A file with an audio duration of 10 minutes is approximately 3.5 MB after encoding. + * 0x010303: AAC encoding format, 48000 Hz sampling rate, high quality. The encoded file size for 10 minutes of audio is approximately 3.5 MB. */ AudioEncodingTypeAac48000High = 0x010303, /** - * 0x020101: OPUS encoding format, 16000 Hz sampling rate, bass quality. A file with an audio duration of 10 minutes is approximately 2 MB after encoding. + * 0x020101: OPUS encoding format, 16000 Hz sampling rate, low quality. The encoded file size for 10 minutes of audio is approximately 2 MB. */ AudioEncodingTypeOpus16000Low = 0x020101, /** - * 0x020102: OPUS encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio duration of 10 minutes is approximately 2 MB after encoding. + * 0x020102: OPUS encoding format, 16000 Hz sampling rate, medium quality. The encoded file size for 10 minutes of audio is approximately 2 MB. */ AudioEncodingTypeOpus16000Medium = 0x020102, /** - * 0x020302: OPUS encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio duration of 10 minutes is approximately 2 MB after encoding. + * 0x020302: OPUS encoding format, 48000 Hz sampling rate, medium quality. The encoded file size for 10 minutes of audio is approximately 2 MB. */ AudioEncodingTypeOpus48000Medium = 0x020302, /** - * 0x020303: OPUS encoding format, 48000 Hz sampling rate, high sound quality. A file with an audio duration of 10 minutes is approximately 3.5 MB after encoding. + * 0x020303: OPUS encoding format, 48000 Hz sampling rate, high quality. The encoded file size for 10 minutes of audio is approximately 3.5 MB. */ AudioEncodingTypeOpus48000High = 0x020303, } /** - * The adaptation mode of the watermark. + * Watermark adaptation mode. */ export enum WatermarkFitMode { /** - * 0: Use the positionInLandscapeMode and positionInPortraitMode values you set in WatermarkOptions. The settings in WatermarkRatio are invalid. + * 0: Uses the values of positionInLandscapeMode and positionInPortraitMode set in WatermarkOptions. The settings in WatermarkRatio are ignored. */ FitModeCoverPosition = 0, /** - * 1: Use the value you set in WatermarkRatio. The settings in positionInLandscapeMode and positionInPortraitMode in WatermarkOptions are invalid. + * 1: Uses the value set in WatermarkRatio. The settings of positionInLandscapeMode and positionInPortraitMode in WatermarkOptions are ignored. */ FitModeUseImageRatio = 1, } @@ -1109,11 +1107,11 @@ export class EncodedAudioFrameAdvancedSettings { } /** - * Audio information after encoding. + * Information about encoded audio. */ export class EncodedAudioFrameInfo { /** - * Audio Codec type: AudioCodecType. + * Audio codec type: AudioCodecType. */ codec?: AudioCodecType; /** @@ -1121,19 +1119,19 @@ export class EncodedAudioFrameInfo { */ sampleRateHz?: number; /** - * The number of audio samples per channel. + * Number of audio samples per channel. */ samplesPerChannel?: number; /** - * The number of audio channels. + * Number of channels. */ numberOfChannels?: number; /** - * This function is currently not supported. + * This feature is not supported yet. */ advancedSettings?: EncodedAudioFrameAdvancedSettings; /** - * The Unix timestamp (ms) for capturing the external encoded video frames. + * Unix timestamp (ms) of the captured external encoded video frame. */ captureTimeMs?: number; } @@ -1179,15 +1177,15 @@ export enum H264PacketizeMode { } /** - * The type of video streams. + * Type of video stream. */ export enum VideoStreamType { /** - * 0: High-quality video stream, that is, a video stream with the highest resolution and bitrate. + * 0: High video stream, i.e., high resolution and high bitrate stream. */ VideoStreamHigh = 0, /** - * 1: Low-quality video stream, that is, a video stream with the lowest resolution and bitrate. + * 1: Low video stream, i.e., low resolution and low bitrate stream. */ VideoStreamLow = 1, /** @@ -1217,25 +1215,25 @@ export enum VideoStreamType { } /** - * Video subscription options. + * Video subscription settings. */ export class VideoSubscriptionOptions { /** - * The video stream type that you want to subscribe to. The default value is VideoStreamHigh, indicating that the high-quality video streams are subscribed. See VideoStreamType. + * Type of video stream to subscribe to. Default is VideoStreamHigh, i.e., subscribe to the high-quality video stream. See VideoStreamType. */ type?: VideoStreamType; /** - * Whether to subscribe to encoded video frames only: true : Subscribe to the encoded video data (structured data) only; the SDK does not decode or render raw video data. false : (Default) Subscribe to both raw video data and encoded video data. + * Whether to subscribe only to the encoded video stream: true : Subscribe only to encoded video data (structured data), the SDK does not decode or render the video. false : (Default) Subscribe to both raw and encoded video data. */ encodedFrameOnly?: boolean; } /** - * The maximum length of the user account. + * Maximum length of the user account. */ export enum MaxUserAccountLengthType { /** - * The maximum length of the user account is 256 bytes. + * The maximum length of the user account is 255 characters. */ MaxUserAccountLength = 256, } @@ -1245,35 +1243,36 @@ export enum MaxUserAccountLengthType { */ export class EncodedVideoFrameInfo { /** - * The codec type of the local video stream. See VideoCodecType. The default value is VideoCodecH264 (2). + * Video codec type. See VideoCodecType. Default value is VideoCodecH264 (2). */ codecType?: VideoCodecType; /** - * Width (pixel) of the video frame. + * Width of the video frame (px). */ width?: number; /** - * Height (pixel) of the video frame. + * Height of the video frame (px). */ height?: number; /** - * The number of video frames per second. When this parameter is not 0, you can use it to calculate the Unix timestamp of externally encoded video frames. + * Frames per second. + * When this parameter is not 0, you can use it to calculate the Unix timestamp of the externally encoded video frame. */ framesPerSecond?: number; /** - * The video frame type. See VideoFrameType. + * Type of the video frame. See VideoFrameType. */ frameType?: VideoFrameType; /** - * The rotation information of the video frame. See VideoOrientation. + * Rotation information of the video frame. See VideoOrientation. */ rotation?: VideoOrientation; /** - * Reserved for future use. + * Reserved parameter. */ trackId?: number; /** - * The Unix timestamp (ms) for capturing the external encoded video frames. + * Unix timestamp (ms) when the externally encoded video frame was captured. */ captureTimeMs?: number; /** @@ -1281,7 +1280,7 @@ export class EncodedVideoFrameInfo { */ decodeTimeMs?: number; /** - * The type of video streams. See VideoStreamType. + * Type of the video stream. See VideoStreamType. */ streamType?: VideoStreamType; /** @@ -1291,19 +1290,19 @@ export class EncodedVideoFrameInfo { } /** - * Compression preference for video encoding. + * Compression preference type for video encoding. */ export enum CompressionPreference { /** - * -1: (Default) Automatic mode. The SDK will automatically select PreferLowLatency or PreferQuality based on the video scenario you set to achieve the best user experience. + * -1: (Default) Automatic mode. The SDK automatically selects PreferLowLatency or PreferQuality based on your video scenario to provide the best user experience. */ PreferCompressionAuto = -1, /** - * 0: Low latency preference. The SDK compresses video frames to reduce latency. This preference is suitable for scenarios where smoothness is prioritized and reduced video quality is acceptable. + * 0: Low latency preference. The SDK compresses video frames to reduce latency. Suitable for scenarios where smoothness is prioritized and some quality loss is acceptable. */ PreferLowLatency = 0, /** - * 1: High quality preference. The SDK compresses video frames while maintaining video quality. This preference is suitable for scenarios where video quality is prioritized. + * 1: High quality preference. The SDK compresses video frames while maintaining video quality. Suitable for scenarios where video quality is prioritized. */ PreferQuality = 1, } @@ -1313,15 +1312,15 @@ export enum CompressionPreference { */ export enum EncodingPreference { /** - * -1: Adaptive preference. The SDK automatically selects the optimal encoding type for encoding based on factors such as platform and device type. + * -1: Adaptive preference. The SDK automatically selects the optimal encoder type based on platform, device type, and other factors. */ PreferAuto = -1, /** - * 0: Software coding preference. The SDK prefers software encoders for video encoding. + * 0: Software encoder preference. The SDK prioritizes using the software encoder for video encoding. */ PreferSoftware = 0, /** - * 1: Hardware encoding preference. The SDK prefers a hardware encoder for video encoding. When the device does not support hardware encoding, the SDK automatically uses software encoding and reports the currently used video encoder type through hwEncoderAccelerating in the onLocalVideoStats callback. + * 1: Hardware encoder preference. The SDK prioritizes using the hardware encoder for video encoding. If the device does not support hardware encoding, the SDK automatically switches to software encoding and reports the current encoder type via the hwEncoderAccelerating field in the onLocalVideoStats callback. */ PreferHardware = 1, } @@ -1339,19 +1338,19 @@ export class AdvanceOptions { */ compressionPreference?: CompressionPreference; /** - * Whether to encode and send the Alpha data present in the video frame to the remote end: true : Encode and send Alpha data. false : (Default) Do not encode and send Alpha data. + * When the video frame contains alpha channel data, specifies whether to encode and send the alpha data to the remote end: true : Encode and send alpha data. false : (Default) Do not encode or send alpha data. */ encodeAlpha?: boolean; } /** - * Video mirror mode. + * Mirror mode type. */ export enum VideoMirrorModeType { /** * 0: The SDK determines the mirror mode. - * For the mirror mode of the local video view: If you use a front camera, the SDK enables the mirror mode by default; if you use a rear camera, the SDK disables the mirror mode by default. - * For the remote user: The mirror mode is disabled by default. + * Local view mirror mode: If you use the front camera, the local view mirror mode is enabled by default; if you use the rear camera, the local view mirror mode is disabled by default. + * Remote user view mirror mode: The mirror mode for remote users is disabled by default. */ VideoMirrorModeAuto = 0, /** @@ -1427,111 +1426,114 @@ export enum HdrCapability { } /** - * The bit mask of the codec type. + * Codec capability bit mask. */ export enum CodecCapMask { /** - * (0): The device does not support encoding or decoding. + * (0): Codec not supported. */ CodecCapMaskNone = 0, /** - * (1 << 0): The device supports hardware decoding. + * (1 << 0): Supports hardware decoding. */ CodecCapMaskHwDec = 1 << 0, /** - * (1 << 1): The device supports hardware encoding. + * (1 << 1): Supports hardware encoding. */ CodecCapMaskHwEnc = 1 << 1, /** - * (1 << 2): The device supports software decoding. + * (1 << 2): Supports software decoding. */ CodecCapMaskSwDec = 1 << 2, /** - * (1 << 3): The device supports software ecoding. + * (1 << 3): Supports software encoding. */ CodecCapMaskSwEnc = 1 << 3, } /** - * The level of the codec capability. + * Codec capability levels. */ export class CodecCapLevels { /** - * Hardware decoding capability level, which represents the device's ability to perform hardware decoding on videos of different quality. See VIDEO_CODEC_CAPABILITY_LEVEL. + * Hardware decoding capability level, indicating the device's ability to decode videos of different qualities using hardware. See VIDEO_CODEC_CAPABILITY_LEVEL. */ hwDecodingLevel?: VideoCodecCapabilityLevel; /** - * Software decoding capability level, which represents the device's ability to perform software decoding on videos of different quality. See VIDEO_CODEC_CAPABILITY_LEVEL. + * Software decoding capability level, indicating the device's ability to decode videos of different qualities using software. See VIDEO_CODEC_CAPABILITY_LEVEL. */ swDecodingLevel?: VideoCodecCapabilityLevel; } /** - * The codec capability of the SDK. + * Information about codec capabilities supported by the SDK. */ export class CodecCapInfo { /** - * The video codec types. See VideoCodecType. + * Video codec type. See VideoCodecType. */ codecType?: VideoCodecType; /** - * Bit mask of the codec types in SDK. See CodecCapMask. + * Bit mask of codec types supported by the SDK. See CodecCapMask. */ codecCapMask?: number; /** - * Codec capability of the SDK. See CodecCapLevels. + * @ignore */ codecLevels?: CodecCapLevels; } /** - * Focal length information supported by the camera, including the camera direction and focal length type. + * Focal length information supported by the camera, including camera direction and focal length type. */ export class FocalLengthInfo { /** - * The camera direction. See CameraDirection. + * Camera direction. See CameraDirection. */ cameraDirection?: number; /** - * The focal length type. See CameraFocalLengthType. + * Focal length type. See CameraFocalLengthType. */ focalLengthType?: CameraFocalLengthType; } /** - * Video encoder configurations. + * Configuration for the video encoder. */ export class VideoEncoderConfiguration { /** - * The codec type of the local video stream. See VideoCodecType. + * Video codec type. See VideoCodecType. */ codecType?: VideoCodecType; /** - * The dimensions of the encoded video (px). See VideoDimensions. This parameter measures the video encoding quality in the format of length × width. The default value is 960 × 540. You can set a custom value. + * Resolution (px) of the video encoding. See VideoDimensions. This parameter is used to measure encoding quality, represented as height × width. Default is 960 × 540. You can customize the resolution. */ dimensions?: VideoDimensions; /** - * The frame rate (fps) of the encoding video frame. The default value is 15. See FrameRate. + * Frame rate (fps) of the video encoding. Default is 15. See FrameRate. */ frameRate?: number; /** - * The encoding bitrate (Kbps) of the video. This parameter does not need to be set; keeping the default value STANDARD_BITRATE is sufficient. The SDK automatically matches the most suitable bitrate based on the video resolution and frame rate you have set. For the correspondence between video resolution and frame rate, see. STANDARD_BITRATE (0): (Recommended) Standard bitrate mode. COMPATIBLE_BITRATE (-1): Adaptive bitrate mode. In general, Agora suggests that you do not use this value. + * Bitrate of the video encoding in Kbps. You do not need to set this parameter; keep the default value STANDARD_BITRATE. The SDK automatically matches the optimal bitrate based on your configured resolution and frame rate. For the mapping between resolution and frame rate, see [Video Profile](https://doc.shengwang.cn/doc/rtc/rn/basic-features/video-profile#%E8%A7%86%E9%A2%91%E5%B1%9E%E6%80%A7%E5%8F%82%E8%80%83). + * STANDARD_BITRATE (0): (Default) Standard bitrate mode. + * COMPATIBLE_BITRATE (-1): Compatible bitrate mode. In general, we recommend not using this value. */ bitrate?: number; /** - * The minimum encoding bitrate (Kbps) of the video. The SDK automatically adjusts the encoding bitrate to adapt to the network conditions. Using a value greater than the default value forces the video encoder to output high-quality images but may cause more packet loss and sacrifice the smoothness of the video transmission. Unless you have special requirements for image quality, Agora does not recommend changing this value. This parameter only applies to the interactive streaming profile. + * Minimum encoding bitrate in Kbps. + * The SDK adjusts the video encoding bitrate automatically based on network conditions. Setting this parameter higher than the default forces the encoder to output high-quality images, but may cause packet loss and stuttering under poor network conditions. Therefore, unless you have special quality requirements, we recommend not changing this value. (Live streaming only) */ minBitrate?: number; /** - * The orientation mode of the encoded video. See OrientationMode. + * Orientation mode of the video encoding. See OrientationMode. */ orientationMode?: OrientationMode; /** - * Video degradation preference under limited bandwidth. See DegradationPreference. When this parameter is set to MaintainFramerate (1) or MaintainBalanced (2), orientationMode needs to be set to OrientationModeAdaptive (0) at the same time, otherwise the setting will not take effect. + * Video degradation preference when bandwidth is limited. See DegradationPreference. When this parameter is set to MaintainFramerate (1) or MaintainBalanced (2), you must also set orientationMode to OrientationModeAdaptive (0), otherwise the setting will not take effect. */ degradationPreference?: DegradationPreference; /** - * Sets the mirror mode of the published local video stream. It only affects the video that the remote user sees. See VideoMirrorModeType. By default, the video is not mirrored. + * Whether to enable mirror mode when sending encoded video. This only affects the video seen by remote users. See VideoMirrorModeType. Mirror mode is disabled by default. */ mirrorMode?: VideoMirrorModeType; /** @@ -1541,53 +1543,58 @@ export class VideoEncoderConfiguration { } /** - * The configurations for the data stream. + * Data stream settings. * - * The following table shows the SDK behaviors under different parameter settings: + * The table below shows the SDK behavior under different parameter settings: syncWithAudio ordered + * SDK Behavior false false + * The SDK immediately triggers the onStreamMessage callback upon receiving the data packet. true false + * If the data packet delay is within the audio delay range, the SDK triggers the onStreamMessage callback synchronized with the audio packet during playback. If the delay exceeds the audio delay, the SDK triggers the callback immediately upon receiving the packet, which may cause desynchronization between audio and data packets. false true + * If the data packet delay is within 5 seconds, the SDK corrects the packet disorder. If the delay exceeds 5 seconds, the SDK discards the packet. true true + * If the data packet delay is within the audio delay range, the SDK corrects the packet disorder. If the delay exceeds the audio delay, the SDK discards the packet. */ export class DataStreamConfig { /** - * Whether to synchronize the data packet with the published audio packet. true : Synchronize the data packet with the audio packet. This setting is suitable for special scenarios such as lyrics synchronization. false : Do not synchronize the data packet with the audio packet. This setting is suitable for scenarios where data packets need to arrive at the receiving end immediately. When you set the data packet to synchronize with the audio, then if the data packet delay is within the audio delay, the SDK triggers the onStreamMessage callback when the synchronized audio packet is played out. + * Whether to synchronize with the locally sent audio stream. true : Synchronize the data stream with the audio stream. This setting is suitable for special scenarios such as lyrics synchronization. false : Do not synchronize the data stream with the audio stream. This setting is suitable for scenarios where data packets need to reach the receiver immediately. When synchronized with the audio stream, if the data packet delay is within the audio delay range, the SDK triggers the onStreamMessage callback synchronized with the audio packet during playback. */ syncWithAudio?: boolean; /** - * Whether the SDK guarantees that the receiver receives the data in the sent order. true : Guarantee that the receiver receives the data in the sent order. false : Do not guarantee that the receiver receives the data in the sent order. Do not set this parameter as true if you need the receiver to receive the data packet immediately. + * Whether to ensure the received data is in the same order as sent. true : Ensures the SDK outputs data packets in the order sent by the sender. false : Does not ensure the SDK outputs data packets in the order sent by the sender. If data packets need to reach the receiver immediately, do not set this parameter to true. */ ordered?: boolean; } /** - * The mode in which the video stream is sent. + * Mode for sending video streams. */ export enum SimulcastStreamMode { /** - * -1: By default, do not send the low-quality video stream until a subscription request for the low-quality video stream is received from the receiving end, then automatically start sending low-quality video stream. + * -1: By default, the low-quality stream is not sent until a subscription request for the low-quality stream is received from the receiver, at which point the low-quality stream is sent automatically. */ AutoSimulcastStream = -1, /** - * 0: Never send low-quality video stream. + * 0: Never send the low-quality stream. */ DisableSimulcastStream = 0, /** - * 1: Always send low-quality video stream. + * 1: Always send the low-quality stream. */ EnableSimulcastStream = 1, } /** - * The configuration of the low-quality video stream. + * Configuration for video simulcast stream. */ export class SimulcastStreamConfig { /** - * The video dimension. See VideoDimensions. The default value is 50% of the high-quality video stream. + * Video dimensions. See VideoDimensions. The default value is 50% of the main stream resolution. */ dimensions?: VideoDimensions; /** - * Video bitrate (Kbps). The default value is -1. This parameter does not need to be set. The SDK automatically matches the most suitable bitrate based on the video resolution and frame rate you set. + * Video bitrate (Kbps), default is -1. This parameter does not need to be set; the SDK will automatically match the most appropriate bitrate based on the resolution and frame rate you set. */ kBitrate?: number; /** - * The frame rate (fps) of the local video. The default value is 5. + * Video frame rate (fps). Default is 5. */ framerate?: number; } @@ -1663,15 +1670,15 @@ export class SimulcastConfig { } /** - * The location of the target area relative to the screen or window. If you do not set this parameter, the SDK selects the whole screen or window. + * The position of the target area relative to the entire screen or window. If not specified, it represents the entire screen or window. */ export class Rectangle { /** - * The horizontal offset from the top-left corner. + * The horizontal offset of the top-left corner. */ x?: number; /** - * The vertical offset from the top-left corner. + * The vertical offset of the top-left corner. */ y?: number; /** @@ -1685,370 +1692,390 @@ export class Rectangle { } /** - * The position and size of the watermark on the screen. + * Position and size of the watermark on the screen. * * The position and size of the watermark on the screen are determined by xRatio, yRatio, and widthRatio : - * (xRatio, yRatio) refers to the coordinates of the upper left corner of the watermark, which determines the distance from the upper left corner of the watermark to the upper left corner of the screen. - * The widthRatio determines the width of the watermark. + * (xRatio, yRatio) indicates the coordinates of the top-left corner of the watermark, representing the distance from the top-left corner of the screen. widthRatio determines the width of the watermark. */ export class WatermarkRatio { /** - * The x-coordinate of the upper left corner of the watermark. The horizontal position relative to the origin, where the upper left corner of the screen is the origin, and the x-coordinate is the upper left corner of the watermark. The value range is [0.0,1.0], and the default value is 0. + * The x-coordinate of the top-left corner of the watermark. With the top-left corner of the screen as the origin, the x-coordinate represents the horizontal offset of the watermark's top-left corner relative to the origin. Value range: [0.0, 1.0], default is 0. */ xRatio?: number; /** - * The y-coordinate of the upper left corner of the watermark. The vertical position relative to the origin, where the upper left corner of the screen is the origin, and the y-coordinate is the upper left corner of the screen. The value range is [0.0,1.0], and the default value is 0. + * The y-coordinate of the top-left corner of the watermark. With the top-left corner of the screen as the origin, the y-coordinate represents the vertical offset of the watermark's top-left corner relative to the origin. Value range: [0.0, 1.0], default is 0. */ yRatio?: number; /** - * The width of the watermark. The SDK calculates the height of the watermark proportionally according to this parameter value to ensure that the enlarged or reduced watermark image is not distorted. The value range is [0,1], and the default value is 0, which means no watermark is displayed. + * The width of the watermark. The SDK calculates the proportional height of the watermark based on this value to ensure the image is not distorted when scaled. Value range: [0.0, 1.0], default is 0, which means the watermark is not displayed. */ widthRatio?: number; } /** - * Watermark image configurations. + * Configures the watermark image. * - * Configuration options for setting the watermark image to be added. + * Used to configure the settings for the watermark image to be added. */ export class WatermarkOptions { /** - * Whether the watermark is visible in the local preview view: true : (Default) The watermark is visible in the local preview view. false : The watermark is not visible in the local preview view. + * Whether the watermark is visible in the local preview view: true : (default) The watermark is visible in the local preview view. false : The watermark is not visible in the local preview view. */ visibleInPreview?: boolean; /** - * When the adaptation mode of the watermark is FitModeCoverPosition, it is used to set the area of the watermark image in landscape mode. See Rectangle. + * When the watermark fit mode is FitModeCoverPosition, sets the area of the watermark image in landscape mode. See Rectangle. */ positionInLandscapeMode?: Rectangle; /** - * When the adaptation mode of the watermark is FitModeCoverPosition, it is used to set the area of the watermark image in portrait mode. See Rectangle. + * When the watermark fit mode is FitModeCoverPosition, sets the area of the watermark image in portrait mode. See Rectangle. */ positionInPortraitMode?: Rectangle; /** - * When the watermark adaptation mode is FitModeUseImageRatio, this parameter is used to set the watermark coordinates. See WatermarkRatio. + * When the watermark fit mode is FitModeUseImageRatio, this parameter sets the watermark coordinates in scaling mode. See WatermarkRatio. */ watermarkRatio?: WatermarkRatio; /** - * The adaptation mode of the watermark. See WatermarkFitMode. + * The fit mode of the watermark. See WatermarkFitMode. */ mode?: WatermarkFitMode; /** - * @ignore + * The z-order of the watermark image. Default is 0. */ zOrder?: number; } /** - * @ignore + * Watermark source type. + * + * Since Available since v4.6.2. */ export enum WatermarkSourceType { /** - * @ignore + * (0): The watermark source is an image. */ Image = 0, /** - * @ignore + * (1): The watermark source is a buffer. */ Buffer = 1, /** - * @ignore + * (2): The watermark source is a text literal. (Linux only) */ Literal = 2, /** - * @ignore + * (3): The watermark source is a timestamp. (Linux only) */ Timestamps = 3, } /** - * @ignore + * Used to configure timestamp watermark. + * + * Since Available since v4.6.2. (Linux only) */ export class WatermarkTimestamp { /** - * @ignore + * Font size of the timestamp. Default is 10. */ fontSize?: number; /** - * @ignore + * Path to the timestamp font file. Default is NULL. The font file must be in .ttf format. If not set, the SDK uses the system default font (if available). If used asynchronously, copy the path to memory that will not be released. */ fontFilePath?: string; /** - * @ignore + * Stroke width of the timestamp. Default is 1. */ strokeWidth?: number; /** - * @ignore + * Format of the timestamp. Default is %F %X. The format follows the C standard library function strftime. See strftime. If used asynchronously, copy the format string to memory that will not be released. */ format?: string; } /** - * @ignore + * Used to configure text watermark. + * + * Since Available since v4.6.2. (Linux only) */ export class WatermarkLiteral { /** - * @ignore + * Font size of the text. Default is 10. */ fontSize?: number; /** - * @ignore + * Stroke width of the text. Default is 1. */ strokeWidth?: number; /** - * @ignore + * Text content of the watermark. Default is NULL. If used asynchronously, copy the string to memory that will not be released. */ wmLiteral?: string; /** - * @ignore + * Path to the font file. Default is NULL. The font file should be in .ttf format. If not set, the SDK uses the system default font (if available). If used asynchronously, copy the string to memory that will not be released. */ fontFilePath?: string; } /** - * @ignore + * Used to configure the format, size, and pixel buffer of the watermark image. + * + * Since Available since v4.6.2. */ export class WatermarkBuffer { /** - * @ignore + * Width of the watermark image in pixels. */ width?: number; /** - * @ignore + * Height of the watermark image in pixels. */ height?: number; /** - * @ignore + * Length of the watermark image buffer in bytes. */ length?: number; /** - * @ignore + * Pixel format of the watermark image. See VideoPixelFormat. */ format?: VideoPixelFormat; /** - * @ignore + * Pixel buffer data of the watermark image. */ buffer?: Uint8Array; } /** - * @ignore + * Used to configure watermark information. + * + * Since Available since v4.6.2. */ export class WatermarkConfig { /** - * @ignore + * Unique identifier of the watermark. It is recommended to use UUID. */ id?: string; /** - * @ignore + * Type of the watermark. See WatermarkSourceType. */ type?: WatermarkSourceType; /** - * @ignore + * Buffer of the watermark. See WatermarkBuffer. */ buffer?: WatermarkBuffer; /** - * @ignore + * Timestamp of the watermark. (Linux only) */ timestamp?: WatermarkTimestamp; /** - * @ignore + * Text content of the watermark. (Linux only) */ literal?: WatermarkLiteral; /** - * @ignore + * URL of the watermark image file. Default is NULL. */ imageUrl?: string; /** - * @ignore + * Configuration options for the watermark. See WatermarkOptions. */ options?: WatermarkOptions; } /** - * @ignore + * Mode for multipath data transmission. + * + * Since Available since v4.6.2. */ export enum MultipathMode { /** - * @ignore + * (0): Redundant transmission mode. The same data is transmitted redundantly through all available paths. */ Duplicate = 0, /** - * @ignore + * (1): Dynamic transmission mode. The SDK dynamically selects the optimal path for data transmission based on the current network conditions to improve performance. */ Dynamic = 1, } /** - * @ignore + * Network path types used for multipath transmission. + * + * Since Available since v4.6.2. */ export enum MultipathType { /** - * @ignore + * (0): Local Area Network (LAN) path. */ Lan = 0, /** - * @ignore + * (1): Wi-Fi path. */ Wifi = 1, /** - * @ignore + * (2): Mobile network path. */ Mobile = 2, /** - * @ignore + * (99): Unknown or unspecified network path. */ Unknown = 99, } /** - * @ignore + * Used to obtain statistics for a specific network path. + * + * Since Available since v4.6.2. */ export class PathStats { /** - * @ignore + * The type of network path. See MultipathType. */ type?: MultipathType; /** - * @ignore + * The transmission bitrate of this path, in Kbps. */ txKBitRate?: number; /** - * @ignore + * The receiving bitrate of this path, in Kbps. */ rxKBitRate?: number; } /** - * @ignore + * Used to aggregate statistics of each network path in multipath transmission. + * + * Since Available since v4.6.2. */ export class MultipathStats { /** - * @ignore + * Total bytes sent through the LAN path. */ lanTxBytes?: number; /** - * @ignore + * Total bytes received through the LAN path. */ lanRxBytes?: number; /** - * @ignore + * Total bytes sent through the Wi-Fi path. */ wifiTxBytes?: number; /** - * @ignore + * Total bytes received through the Wi-Fi path. */ wifiRxBytes?: number; /** - * @ignore + * Total bytes sent through the mobile network path. */ mobileTxBytes?: number; /** - * @ignore + * Total bytes received through the mobile network path. */ mobileRxBytes?: number; /** - * @ignore + * The number of currently active transmission paths. */ activePathNum?: number; /** - * @ignore + * An array of statistics for each active transmission path. See PathStats. */ pathStats?: PathStats[]; } /** - * Statistics of a call session. + * Call-related statistics. */ export class RtcStats { /** - * Call duration of the local user in seconds, represented by an aggregate value. + * Call duration of the local user (seconds), cumulative value. */ duration?: number; /** - * The number of bytes sent. + * Number of bytes sent. */ txBytes?: number; /** - * The number of bytes received. + * Number of bytes received. */ rxBytes?: number; /** - * The total number of audio bytes sent, represented by an aggregate value. + * Number of audio bytes sent, cumulative value. */ txAudioBytes?: number; /** - * The total number of video bytes sent, represented by an aggregate value. + * Number of video bytes sent, cumulative value. */ txVideoBytes?: number; /** - * The total number of audio bytes received, represented by an aggregate value. + * Number of audio bytes received, cumulative value. */ rxAudioBytes?: number; /** - * The total number of video bytes received, represented by an aggregate value. + * Number of video bytes received, cumulative value. */ rxVideoBytes?: number; /** - * The actual bitrate (Kbps) while sending the local video stream. + * Sending bitrate (Kbps). */ txKBitRate?: number; /** - * The receiving bitrate (Kbps). + * Receiving bitrate (Kbps). */ rxKBitRate?: number; /** - * The bitrate (Kbps) of receiving the audio. + * Audio receiving bitrate (Kbps). */ rxAudioKBitRate?: number; /** - * The bitrate (Kbps) of sending the audio packet. + * Audio sending bitrate (Kbps). */ txAudioKBitRate?: number; /** - * The bitrate (Kbps) of receiving the video. + * Video receiving bitrate (Kbps). */ rxVideoKBitRate?: number; /** - * The bitrate (Kbps) of sending the video. + * Video sending bitrate (Kbps). */ txVideoKBitRate?: number; /** - * The client-to-server delay (milliseconds). + * Client-to-access-server latency (ms). */ lastmileDelay?: number; /** - * The number of users in the channel. + * Number of users in the current channel. */ userCount?: number; /** - * Application CPU usage (%). - * The value of cpuAppUsage is always reported as 0 in the onLeaveChannel callback. + * CPU usage (%) of the current app. + * The cpuAppUsage reported in the onLeaveChannel callback is always 0. */ cpuAppUsage?: number; /** - * The system CPU usage (%). - * The value of cpuTotalUsage is always reported as 0 in the onLeaveChannel callback. - * As of Android 8.1, you cannot get the CPU usage from this attribute due to system limitations. + * CPU usage (%) of the current system. + * The cpuTotalUsage reported in the onLeaveChannel callback is always 0. + * Since Android 8.1, due to system limitations, you cannot obtain CPU usage through this property. */ cpuTotalUsage?: number; /** - * The round-trip time delay (ms) from the client to the local router. This property is disabled on devices running iOS 14 or later, and enabled on devices running versions earlier than iOS 14 by default. To enable this property on devices running iOS 14 or later,. On Android, to get gatewayRtt, ensure that you add the android.permission.ACCESS_WIFI_STATE permission after in the AndroidManifest.xml file in your project. + * Round-trip time (ms) from client to local router. This property is enabled by default on devices before iOS 14 and disabled on iOS 14 and later. + * + * To enable this property on iOS 14 and later, [contact technical support](https://ticket.shengwang.cn/). + * On Android, to obtain gatewayRtt, ensure that the android.permission.ACCESS_WIFI_STATE permission is added after in your project's AndroidManifest.xml file. */ gatewayRtt?: number; /** - * The memory ratio occupied by the app (%). This value is for reference only. Due to system limitations, you may not get this value. + * Memory usage ratio (%) of the current app. This value is for reference only. It may not be available due to system limitations. */ memoryAppUsageRatio?: number; /** - * The memory occupied by the system (%). This value is for reference only. Due to system limitations, you may not get this value. + * Memory usage ratio (%) of the current system. This value is for reference only. It may not be available due to system limitations. */ memoryTotalUsageRatio?: number; /** - * The memory size occupied by the app (KB). This value is for reference only. Due to system limitations, you may not get this value. + * Memory usage (KB) of the current app. This value is for reference only. It may not be available due to system limitations. */ memoryAppUsageInKbytes?: number; /** - * The duration (ms) between the SDK starts connecting and the connection is established. If the value reported is 0, it means invalid. + * Time from starting connection to successful connection (ms). A value of 0 indicates invalid. */ connectTimeMs?: number; /** @@ -2088,11 +2115,11 @@ export class RtcStats { */ firstVideoKeyFrameRenderedDurationAfterUnmute?: number; /** - * The packet loss rate (%) from the client to the Agora server before applying the anti-packet-loss algorithm. + * Uplink packet loss rate (%) from client to server before anti-packet-loss technique is applied. */ txPacketLossRate?: number; /** - * The packet loss rate (%) from the Agora server to the client before using the anti-packet-loss method. + * Downlink packet loss rate (%) from server to client before anti-packet-loss technique is applied. */ rxPacketLossRate?: number; /** @@ -2102,39 +2129,39 @@ export class RtcStats { } /** - * The user role in the interactive live streaming. + * User roles in live broadcasting. */ export enum ClientRoleType { /** - * 1: Host. A host can both send and receive streams. + * 1: Broadcaster. A broadcaster can both send and receive streams. */ ClientRoleBroadcaster = 1, /** - * 2: (Default) Audience. An audience member can only receive streams. + * 2: (Default) Audience. An audience can only receive streams, not send. */ ClientRoleAudience = 2, } /** - * Quality change of the local video in terms of target frame rate and target bit rate since last count. + * Adaptation of local video quality since last statistics (based on target frame rate and target bitrate). */ export enum QualityAdaptIndication { /** - * 0: The local video quality stays the same. + * 0: Local video quality remains unchanged. */ AdaptNone = 0, /** - * 1: The local video quality improves because the network bandwidth increases. + * 1: Local video quality improves due to increased network bandwidth. */ AdaptUpBandwidth = 1, /** - * 2: The local video quality deteriorates because the network bandwidth decreases. + * 2: Local video quality degrades due to decreased network bandwidth. */ AdaptDownBandwidth = 2, } /** - * The latency level of an audience member in interactive live streaming. This enum takes effect only when the user role is set to ClientRoleAudience . + * Latency level of audience in a live broadcast channel. This enum takes effect only when the user role is set to ClientRoleAudience. */ export enum AudienceLatencyLevelType { /** @@ -2142,141 +2169,143 @@ export enum AudienceLatencyLevelType { */ AudienceLatencyLevelLowLatency = 1, /** - * 2: (Default) Ultra low latency. + * 2: (Default) Ultra-low latency. */ AudienceLatencyLevelUltraLowLatency = 2, } /** - * Setting of user role properties. + * User role property settings. */ export class ClientRoleOptions { /** - * The latency level of an audience member in interactive live streaming. See AudienceLatencyLevelType. + * Audience latency level. See AudienceLatencyLevelType. */ audienceLatencyLevel?: AudienceLatencyLevelType; } /** - * The Quality of Experience (QoE) of the local user when receiving a remote audio stream. + * The subjective experience quality of the local user when receiving remote audio. */ export enum ExperienceQualityType { /** - * 0: The QoE of the local user is good. + * 0: Good subjective experience quality. */ ExperienceQualityGood = 0, /** - * 1: The QoE of the local user is poor. + * 1: Poor subjective experience quality. */ ExperienceQualityBad = 1, } /** - * Reasons why the QoE of the local user when receiving a remote audio stream is poor. + * The reason for poor subjective experience quality of the local user when receiving remote audio. */ export enum ExperiencePoorReason { /** - * 0: No reason, indicating a good QoE of the local user. + * 0: No reason, indicates good subjective experience quality. */ ExperienceReasonNone = 0, /** - * 1: The remote user's network quality is poor. + * 1: Poor network quality of the remote user. */ RemoteNetworkQualityPoor = 1, /** - * 2: The local user's network quality is poor. + * 2: Poor network quality of the local user. */ LocalNetworkQualityPoor = 2, /** - * 4: The local user's Wi-Fi or mobile network signal is weak. + * 4: Weak Wi-Fi or mobile data signal of the local user. */ WirelessSignalPoor = 4, /** - * 8: The local user enables both Wi-Fi and bluetooth, and their signals interfere with each other. As a result, audio transmission quality is undermined. + * 8: Wi-Fi and Bluetooth are enabled simultaneously on the local device, causing signal interference and degraded audio transmission quality. */ WifiBluetoothCoexist = 8, } /** - * AI noise suppression modes. + * AI noise reduction mode. */ export enum AudioAinsMode { /** - * 0: (Default) Balance mode. This mode allows for a balanced performance on noice suppression and time delay. + * 0: (Default) Balanced noise reduction mode. Choose this mode if you want a balance between noise suppression and latency. */ AinsModeBalanced = 0, /** - * 1: Aggressive mode. In scenarios where high performance on noise suppression is required, such as live streaming outdoor events, this mode reduces nosie more dramatically, but may sometimes affect the original character of the audio. + * 1: Aggressive noise reduction mode. Suitable for scenarios with high noise suppression requirements, such as outdoor live streaming. This mode reduces noise more significantly but may slightly damage voice quality. */ AinsModeAggressive = 1, /** - * 2: Aggressive mode with low latency. The noise suppression delay of this mode is about only half of that of the balance and aggressive modes. It is suitable for scenarios that have high requirements on noise suppression with low latency, such as sing together online in real time. + * 2: Low-latency aggressive noise reduction mode. This mode has about half the latency of weak and aggressive noise reduction modes, suitable for scenarios requiring both noise reduction and low latency, such as real-time chorus. */ AinsModeUltralowlatency = 2, } /** - * The audio profile. + * Audio encoding properties. */ export enum AudioProfileType { /** - * 0: The default audio profile. - * For the interactive streaming profile: A sample rate of 48 kHz, music encoding, mono, and a bitrate of up to 64 Kbps. - * For the communication profile: A sample rate of 32 kHz, audio encoding, mono, and a bitrate of up to 18 Kbps. + * 0: Default value. + * In live broadcast: 48 kHz sample rate, music encoding, mono, max bitrate 64 Kbps. + * In communication: 32 kHz sample rate, speech encoding, mono, max bitrate 18 Kbps. */ AudioProfileDefault = 0, /** - * 1: A sample rate of 32 kHz, audio encoding, mono, and a bitrate of up to 18 Kbps. + * 1: Sets 32 kHz sample rate, speech encoding, mono, max bitrate 18 Kbps. */ AudioProfileSpeechStandard = 1, /** - * 2: A sample rate of 48 kHz, music encoding, mono, and a bitrate of up to 64 Kbps. + * 2: Sets 48 kHz sample rate, music encoding, mono, max bitrate 64 Kbps. */ AudioProfileMusicStandard = 2, /** - * 3: A sample rate of 48 kHz, music encoding, stereo, and a bitrate of up to 80 Kbps. To implement stereo audio, you also need to call setAdvancedAudioOptions and set audioProcessingChannels to AudioProcessingStereo in AdvancedAudioOptions. + * 3: Sets 48 kHz sample rate, music encoding, stereo, max bitrate 80 Kbps. + * To enable stereo, you also need to call setAdvancedAudioOptions and set audioProcessingChannels to AudioProcessingStereo in AdvancedAudioOptions. */ AudioProfileMusicStandardStereo = 3, /** - * 4: A sample rate of 48 kHz, music encoding, mono, and a bitrate of up to 96 Kbps. + * 4: Sets 48 kHz sample rate, music encoding, mono, max bitrate 96 Kbps. */ AudioProfileMusicHighQuality = 4, /** - * 5: A sample rate of 48 kHz, music encoding, stereo, and a bitrate of up to 128 Kbps. To implement stereo audio, you also need to call setAdvancedAudioOptions and set audioProcessingChannels to AudioProcessingStereo in AdvancedAudioOptions. + * 5: Sets 48 kHz sample rate, music encoding, stereo, max bitrate 128 Kbps. + * To enable stereo, you also need to call setAdvancedAudioOptions and set audioProcessingChannels to AudioProcessingStereo in AdvancedAudioOptions. */ AudioProfileMusicHighQualityStereo = 5, /** - * 6: A sample rate of 16 kHz, audio encoding, mono, and Acoustic Echo Cancellation (AEC) enabled. + * 6: Sets 16 kHz sample rate, speech encoding, mono, applies echo cancellation algorithm AEC. */ AudioProfileIot = 6, /** - * Enumerator boundary. + * Boundary of enum values. */ AudioProfileNum = 7, } /** - * The audio scenarios. + * Audio scenario. */ export enum AudioScenarioType { /** - * 0: (Default) Automatic scenario match, where the SDK chooses the appropriate audio quality according to the user role and audio route. + * 0: (Default) Auto scenario. Automatically matches appropriate audio quality based on user role and audio route. */ AudioScenarioDefault = 0, /** - * 3: High-quality audio scenario, where users mainly play music. For example, instrument tutoring. + * 3: High-quality scenario, suitable for music-centric use cases. For example: instrument practice. */ AudioScenarioGameStreaming = 3, /** - * 5: Chatroom scenario, where users need to frequently switch the user role or mute and unmute the microphone. For example, education scenarios. + * 5: Chatroom scenario, suitable for frequent mic on/off situations. For example: education scenarios. */ AudioScenarioChatroom = 5, /** - * 7: Real-time chorus scenario, where users have good network conditions and require ultra-low latency. + * 7: Chorus scenario. Suitable for real-time chorus with low latency under good network conditions. */ AudioScenarioChorus = 7, /** - * 8: Meeting scenario that mainly contains the human voice. + * 8: Meeting scenario, suitable for multi-person voice-centric meetings. */ AudioScenarioMeeting = 8, /** @@ -2284,104 +2313,91 @@ export enum AudioScenarioType { */ AudioScenarioAiServer = 9, /** - * 10: AI conversation scenario, which is only applicable to scenarios where the user interacts with the conversational AI agent created by. + * 10: AI dialogue scenario, only applicable to interactions with [Agora Conversational AI Engine](https://doc.shengwang.cn/doc/convoai/restful/landing-page). */ AudioScenarioAiClient = 10, /** - * The number of enumerations. + * Number of enum values. */ AudioScenarioNum = 11, } /** - * The format of the video frame. + * Video frame format. */ export class VideoFormat { /** - * The width (px) of the video frame. The default value is 960. + * Width of the video frame (px). Default is 960. */ width?: number; /** - * The height (px) of the video frame. The default value is 540. + * Height of the video frame (px). Default is 540. */ height?: number; /** - * The video frame rate (fps). The default value is 15. + * Frame rate of the video frame. Default is 15. */ fps?: number; } /** - * The content hint for screen sharing. + * Content type of screen sharing. */ export enum VideoContentHint { /** - * (Default) No content hint. + * (Default) No specified content type. */ ContentHintNone = 0, /** - * Motion-intensive content. Choose this option if you prefer smoothness or when you are sharing a video clip, movie, or video game. + * Content type is motion. Recommended when sharing videos, movies, or video games. */ ContentHintMotion = 1, /** - * Motionless content. Choose this option if you prefer sharpness or when you are sharing a picture, PowerPoint slides, or texts. + * Content type is details. Recommended when sharing images or text. */ ContentHintDetails = 2, } /** - * The screen sharing scenario. + * Screen sharing scenarios. */ export enum ScreenScenarioType { /** - * 1: (Default) Document. This scenario prioritizes the video quality of screen sharing and reduces the latency of the shared video for the receiver. If you share documents, slides, and tables, you can set this scenario. + * 1: (Default) Document. In this scenario, the quality of the shared content is prioritized, and the latency for the receiver to see the shared video is reduced. You can use this scenario when sharing documents, slides, or spreadsheets. */ ScreenScenarioDocument = 1, /** - * 2: Game. This scenario prioritizes the smoothness of screen sharing. If you share games, you can set this scenario. + * 2: Gaming. In this scenario, the smoothness of the shared content is prioritized. You can use this scenario when sharing games. */ ScreenScenarioGaming = 2, /** - * 3: Video. This scenario prioritizes the smoothness of screen sharing. If you share movies or live videos, you can set this scenario. + * 3: Video. In this scenario, the smoothness of the shared content is prioritized. You can use this scenario when sharing movies or live videos. */ ScreenScenarioVideo = 3, /** - * 4: Remote control. This scenario prioritizes the video quality of screen sharing and reduces the latency of the shared video for the receiver. If you share the device desktop being remotely controlled, you can set this scenario. + * 4: Remote control. In this scenario, the quality of the shared content is prioritized, and the latency for the receiver to see the shared video is reduced. You can use this scenario when sharing the desktop of a remotely controlled device. */ ScreenScenarioRdc = 4, } /** - * The video application scenarios. + * Video application scenario type. */ export enum VideoApplicationScenarioType { /** - * 0: (Default) The general scenario. + * 0: (Default) General scenario. */ ApplicationScenarioGeneral = 0, /** - * ApplicationScenarioMeeting (1) is suitable for meeting scenarios. The SDK automatically enables the following strategies: - * In meeting scenarios where low-quality video streams are required to have a high bitrate, the SDK automatically enables multiple technologies used to deal with network congestions, to enhance the performance of the low-quality streams and to ensure the smooth reception by subscribers. - * The SDK monitors the number of subscribers to the high-quality video stream in real time and dynamically adjusts its configuration based on the number of subscribers. - * If nobody subscribers to the high-quality stream, the SDK automatically reduces its bitrate and frame rate to save upstream bandwidth. - * If someone subscribes to the high-quality stream, the SDK resets the high-quality stream to the VideoEncoderConfiguration configuration used in the most recent calling of setVideoEncoderConfiguration. If no configuration has been set by the user previously, the following values are used: - * Resolution: 960 × 540 - * Frame rate: 15 fps - * Bitrate: 1000 Kbps - * The SDK monitors the number of subscribers to the low-quality video stream in real time and dynamically enables or disables it based on the number of subscribers. If the user has called setDualStreamMode to set that never send low-quality video stream (DisableSimulcastStream), the dynamic adjustment of the low-quality stream in meeting scenarios will not take effect. - * If nobody subscribes to the low-quality stream, the SDK automatically disables it to save upstream bandwidth. - * If someone subscribes to the low-quality stream, the SDK enables the low-quality stream and resets it to the SimulcastStreamConfig configuration used in the most recent calling of setDualStreamMode. If no configuration has been set by the user previously, the following values are used: - * Resolution: 480 × 272 - * Frame rate: 15 fps - * Bitrate: 500 Kbps 1: The meeting scenario. + * 1: Meeting scenario. */ ApplicationScenarioMeeting = 1, /** - * ApplicationScenario1v1 (2) This is applicable to the scenario. To meet the requirements for low latency and high-quality video in this scenario, the SDK optimizes its strategies, improving performance in terms of video quality, first frame rendering, latency on mid-to-low-end devices, and smoothness under weak network conditions. This enumeration value is only applicable to the broadcaster vs. broadcaster scenario. 2: 1v1 video call scenario. + * 2: 1v1 video call */ ApplicationScenario1v1 = 2, /** - * ApplicationScenarioLiveshow (3) This is applicable to the scenario. In this scenario, fast video rendering and high image quality are crucial. The SDK implements several performance optimizations, including automatically enabling accelerated audio and video frame rendering to minimize first-frame latency (no need to call enableInstantMediaRendering), and B-frame encoding to achieve better image quality and bandwidth efficiency. The SDK also provides enhanced video quality and smooth playback, even in poor network conditions or on lower-end devices. 3. Live show scenario. + * 3: Live show */ ApplicationScenarioLiveshow = 3, } @@ -2409,7 +2425,7 @@ export enum VideoQoePreferenceType { } /** - * The brightness level of the video image captured by the local camera. + * Brightness level of the locally captured video. */ export enum CaptureBrightnessLevelType { /** @@ -2431,29 +2447,29 @@ export enum CaptureBrightnessLevelType { } /** - * Camera stabilization modes. + * Camera stabilization mode. * - * The camera stabilization effect increases in the order of 1 < 2 < 3, and the latency will also increase accordingly. + * Camera stabilization effect increases in the order of 1 < 2 < 3, with corresponding increase in latency. */ export enum CameraStabilizationMode { /** - * -1: (Default) Camera stabilization mode off. + * -1: (Default) Camera stabilization is off. */ CameraStabilizationModeOff = -1, /** - * 0: Automatic camera stabilization. The system automatically selects a stabilization mode based on the status of the camera. However, the latency is relatively high in this mode, so it is recommended not to use this enumeration. + * 0: Camera auto stabilization. The system automatically selects a stabilization mode based on the camera state. However, this mode has higher latency, and it is recommended not to use this enumeration. */ CameraStabilizationModeAuto = 0, /** - * 1: (Recommended) Level 1 camera stabilization. + * 1: (Recommended) Camera stabilization level 1. */ CameraStabilizationModeLevel1 = 1, /** - * 2: Level 2 camera stabilization. + * 2: Camera stabilization level 2. */ CameraStabilizationModeLevel2 = 2, /** - * 3: Level 3 camera stabilization. + * 3: Camera stabilization level 3. */ CameraStabilizationModeLevel3 = 3, /** @@ -2463,53 +2479,53 @@ export enum CameraStabilizationMode { } /** - * The state of the local audio. + * Local audio state. */ export enum LocalAudioStreamState { /** - * 0: The local audio is in the initial state. + * 0: Default initial state of local audio. */ LocalAudioStreamStateStopped = 0, /** - * 1: The local audio capturing device starts successfully. + * 1: Local audio capture device started successfully. */ LocalAudioStreamStateRecording = 1, /** - * 2: The first audio frame encodes successfully. + * 2: First frame of local audio encoded successfully. */ LocalAudioStreamStateEncoding = 2, /** - * 3: The local audio fails to start. + * 3: Failed to start local audio. */ LocalAudioStreamStateFailed = 3, } /** - * Reasons for local audio state changes. + * Reason for local audio state change. */ export enum LocalAudioStreamReason { /** - * 0: The local audio is normal. + * 0: Local audio state is normal. */ LocalAudioStreamReasonOk = 0, /** - * 1: No specified reason for the local audio failure. Remind your users to try to rejoin the channel. + * 1: The cause of the local audio error is unclear. It is recommended to prompt the user to try rejoining the channel. */ LocalAudioStreamReasonFailure = 1, /** - * 2: No permission to use the local audio capturing device. Remind your users to grant permission. Deprecated: This enumerator is deprecated. Please use RecordAudio in the onPermissionError callback instead. + * 2: No permission to start the local audio capture device. Prompt the user to enable the permission. Deprecated: This enumeration is deprecated. Use the onPermissionError callback with RecordAudio instead. */ LocalAudioStreamReasonDeviceNoPermission = 2, /** - * 3: The local audio capture device is already in use. Remind your users to check whether another application occupies the microphone. Local audio capture automatically resumes after the microphone is idle for about five seconds. You can also try to rejoin the channel after the microphone is idle. + * 3: The local audio capture device is already in use. Prompt the user to check whether the microphone is occupied by another app. Local audio capture will automatically resume about 5 seconds after the microphone becomes idle. You can also try rejoining the channel after the microphone is idle. */ LocalAudioStreamReasonDeviceBusy = 3, /** - * 4: The local audio capture fails. + * 4: Local audio capture failed. */ LocalAudioStreamReasonRecordFailure = 4, /** - * 5: The local audio encoding fails. + * 5: Local audio encoding failed. */ LocalAudioStreamReasonEncodeFailure = 5, /** @@ -2521,7 +2537,7 @@ export enum LocalAudioStreamReason { */ LocalAudioStreamReasonNoPlayoutDevice = 7, /** - * 8: The local audio capture is interrupted by a system call, smart assistants, or alarm clock. Prompt your users to end the phone call, smart assistants, or alarm clock if the local audio capture is required. + * 8: Local audio capture was interrupted by system phone call, voice assistant, or alarm. To resume local audio capture, ask the user to end the call, voice assistant, or alarm. */ LocalAudioStreamReasonInterrupted = 8, /** @@ -2535,87 +2551,89 @@ export enum LocalAudioStreamReason { } /** - * Local video state types. + * Local video state. */ export enum LocalVideoStreamState { /** - * 0: The local video is in the initial state. + * 0: Default initial state of local video. */ LocalVideoStreamStateStopped = 0, /** - * 1: The local video capturing device starts successfully. + * 1: Local video capture device started successfully. */ LocalVideoStreamStateCapturing = 1, /** - * 2: The first video frame is successfully encoded. + * 2: First frame of local video encoded successfully. */ LocalVideoStreamStateEncoding = 2, /** - * 3: Fails to start the local video. + * 3: Failed to start local video. */ LocalVideoStreamStateFailed = 3, } /** - * @ignore + * Local video event types. + * + * Since Available since v4.6.1. */ export enum LocalVideoEventType { /** - * @ignore + * (1): The screen capture window is hidden. (Android only) */ LocalVideoEventTypeScreenCaptureWindowHidden = 1, /** - * @ignore + * (2): The screen capture window recovers from being hidden. (Android only) */ LocalVideoEventTypeScreenCaptureWindowRecoverFromHidden = 2, /** - * @ignore + * (3): The screen capture is stopped by the user. (Android only) */ LocalVideoEventTypeScreenCaptureStoppedByUser = 3, /** - * @ignore + * (4): A system internal error occurs during screen capture. (Android only) */ LocalVideoEventTypeScreenCaptureSystemInternalError = 4, } /** - * Reasons for local video state changes. + * Reason for local video state change. */ export enum LocalVideoStreamReason { /** - * 0: The local video is normal. + * 0: Local video is in a normal state. */ LocalVideoStreamReasonOk = 0, /** - * 1: No specified reason for the local video failure. + * 1: Unknown error. */ LocalVideoStreamReasonFailure = 1, /** - * 2: No permission to use the local video capturing device. Prompt the user to grant permissions and rejoin the channel. Deprecated: This enumerator is deprecated. Please use CAMERA in the onPermissionError callback instead. + * 2: No permission to start the local video capture device. Prompt the user to enable device permissions before rejoining the channel. Deprecated: This enumeration is deprecated. Use onPermissionError callback with Camera instead. */ LocalVideoStreamReasonDeviceNoPermission = 2, /** - * 3: The local video capturing device is in use. Prompt the user to check if the camera is being used by another app, or try to rejoin the channel. + * 3: The local video capture device is in use. Prompt the user to check if the camera is occupied by another app or try rejoining the channel. */ LocalVideoStreamReasonDeviceBusy = 3, /** - * 4: The local video capture fails. Prompt the user to check whether the video capture device is working properly, whether the camera is used by another app, or try to rejoin the channel. + * 4: Failed to capture local video. Prompt the user to check whether the video capture device is working properly, whether the camera is occupied by another app, or try rejoining the channel. */ LocalVideoStreamReasonCaptureFailure = 4, /** - * 5: The local video encoding fails. + * 5: Failed to encode local video. */ LocalVideoStreamReasonCodecNotSupport = 5, /** - * 6: (iOS only) The app is in the background. Prompt the user that video capture cannot be performed normally when the app is in the background. + * 6: (iOS only) The app is in the background. Prompt the user that video capture is not available when the app is in the background. */ LocalVideoStreamReasonCaptureInbackground = 6, /** - * 7: (iOS only) The current app window is running in Slide Over, Split View, or Picture in Picture mode, and another app is occupying the camera. Prompt the user that the app cannot capture video properly when it is running in Slide Over, Split View, or Picture in Picture mode and another app is occupying the camera. + * 7: (iOS only) The app is in slide-over, split-view, or picture-in-picture mode and another app is using the camera. Prompt the user that video capture is not available under these conditions. */ LocalVideoStreamReasonCaptureMultipleForegroundApps = 7, /** - * 8: Fails to find a local video capture device. Remind the user to check whether the camera is connected to the device properly or the camera is working properly, and then to rejoin the channel. + * 8: No local video capture device found. Check if the camera is properly connected and functioning, or try rejoining the channel. */ LocalVideoStreamReasonDeviceNotFound = 8, /** @@ -2627,13 +2645,13 @@ export enum LocalVideoStreamReason { */ LocalVideoStreamReasonDeviceInvalidId = 10, /** - * 14: (Android only) Video capture is interrupted. Possible reasons include the following: - * The camera is being used by another app. Prompt the user to check if the camera is being used by another app. - * The current app has been switched to the background. You can use foreground services to notify the operating system and ensure that the app can still collect video when it switches to the background. + * 14: (Android only) Video capture is interrupted. Possible reasons: + * The camera is occupied by another app. Prompt the user to check if the camera is occupied. + * The app has been switched to the background. You can use a foreground service notification to inform the OS to continue capturing video in the background. See [Why does audio/video capture fail after screen lock or backgrounding on some Android versions?](https://doc.shengwang.cn/faq/quality-issues/android-background). */ LocalVideoStreamReasonDeviceInterrupt = 14, /** - * 15: (Android only) The video capture device encounters an error. Prompt the user to close and restart the camera to restore functionality. If this operation does not solve the problem, check if the camera has a hardware failure. + * 15: (Android only) Video capture device error. Prompt the user to turn the camera off and on again to restore functionality. If that doesn't work, check for hardware failure. */ LocalVideoStreamReasonDeviceFatalError = 15, /** @@ -2711,37 +2729,37 @@ export enum LocalVideoStreamReason { } /** - * Remote audio states. + * Remote audio stream state. */ export enum RemoteAudioState { /** - * 0: The local audio is in the initial state. The SDK reports this state in the case of RemoteAudioReasonLocalMuted, RemoteAudioReasonRemoteMuted or RemoteAudioReasonRemoteOffline. + * 0: Default initial state of remote audio. This state is reported under RemoteAudioReasonLocalMuted, RemoteAudioReasonRemoteMuted, or RemoteAudioReasonRemoteOffline. */ RemoteAudioStateStopped = 0, /** - * 1: The first remote audio packet is received. + * 1: Local user has received the first packet of remote audio. */ RemoteAudioStateStarting = 1, /** - * 2: The remote audio stream is decoded and plays normally. The SDK reports this state in the case of RemoteAudioReasonNetworkRecovery, RemoteAudioReasonLocalUnmuted or RemoteAudioReasonRemoteUnmuted. + * 2: Remote audio stream is decoding and playing normally. This state is reported under RemoteAudioReasonNetworkRecovery, RemoteAudioReasonLocalUnmuted, or RemoteAudioReasonRemoteUnmuted. */ RemoteAudioStateDecoding = 2, /** - * 3: The remote audio is frozen. The SDK reports this state in the case of RemoteAudioReasonNetworkCongestion. + * 3: Remote audio stream is frozen. This state is reported under RemoteAudioReasonNetworkCongestion. */ RemoteAudioStateFrozen = 3, /** - * 4: The remote audio fails to start. The SDK reports this state in the case of RemoteAudioReasonInternal. + * 4: Failed to play remote audio stream. This state is reported under RemoteAudioReasonInternal. */ RemoteAudioStateFailed = 4, } /** - * The reason for the remote audio state change. + * Reasons for remote audio stream state changes. */ export enum RemoteAudioStateReason { /** - * 0: The SDK reports this reason when the audio state changes. + * 0: Reports this reason when the audio state changes. */ RemoteAudioReasonInternal = 0, /** @@ -2749,27 +2767,27 @@ export enum RemoteAudioStateReason { */ RemoteAudioReasonNetworkCongestion = 1, /** - * 2: Network recovery. + * 2: Network recovered. */ RemoteAudioReasonNetworkRecovery = 2, /** - * 3: The local user stops receiving the remote audio stream or disables the audio module. + * 3: Local user stopped receiving remote audio stream or disabled the audio module. */ RemoteAudioReasonLocalMuted = 3, /** - * 4: The local user resumes receiving the remote audio stream or enables the audio module. + * 4: Local user resumed receiving remote audio stream or enabled the audio module. */ RemoteAudioReasonLocalUnmuted = 4, /** - * 5: The remote user stops sending the audio stream or disables the audio module. + * 5: Remote user stopped sending audio stream or disabled the audio module. */ RemoteAudioReasonRemoteMuted = 5, /** - * 6: The remote user resumes sending the audio stream or enables the audio module. + * 6: Remote user resumed sending audio stream or enabled the audio module. */ RemoteAudioReasonRemoteUnmuted = 6, /** - * 7: The remote user leaves the channel. + * 7: Remote user left the channel. */ RemoteAudioReasonRemoteOffline = 7, /** @@ -2783,37 +2801,37 @@ export enum RemoteAudioStateReason { } /** - * The state of the remote video stream. + * Remote video stream state. */ export enum RemoteVideoState { /** - * 0: The remote video is in the initial state. The SDK reports this state in the case of RemoteVideoStateReasonLocalMuted, RemoteVideoStateReasonRemoteMuted, or RemoteVideoStateReasonRemoteOffline. + * 0: The default initial state of the remote video. This state is reported under RemoteVideoStateReasonLocalMuted, RemoteVideoStateReasonRemoteMuted, or RemoteVideoStateReasonRemoteOffline. */ RemoteVideoStateStopped = 0, /** - * 1: The first remote video packet is received. + * 1: The local user has received the first packet of the remote video. */ RemoteVideoStateStarting = 1, /** - * 2: The remote video stream is decoded and plays normally. The SDK reports this state in the case of RemoteVideoStateReasonNetworkRecovery, RemoteVideoStateReasonLocalUnmuted, or RemoteVideoStateReasonRemoteUnmuted. + * 2: The remote video stream is being decoded and playing normally. This state is reported under RemoteVideoStateReasonNetworkRecovery, RemoteVideoStateReasonLocalUnmuted, or RemoteVideoStateReasonRemoteUnmuted. */ RemoteVideoStateDecoding = 2, /** - * 3: The remote video is frozen. The SDK reports this state in the case of RemoteVideoStateReasonNetworkCongestion. + * 3: The remote video stream is frozen. This state is reported under RemoteVideoStateReasonNetworkCongestion. */ RemoteVideoStateFrozen = 3, /** - * 4: The remote video fails to start. The SDK reports this state in the case of RemoteVideoStateReasonInternal. + * 4: Failed to play the remote video stream. This state is reported under RemoteVideoStateReasonInternal. */ RemoteVideoStateFailed = 4, } /** - * The reason for the remote video state change. + * Reasons for remote video stream state changes. */ export enum RemoteVideoStateReason { /** - * 0: The SDK reports this reason when the video state changes. + * 0: Reports this reason when the video state changes. */ RemoteVideoStateReasonInternal = 0, /** @@ -2821,35 +2839,35 @@ export enum RemoteVideoStateReason { */ RemoteVideoStateReasonNetworkCongestion = 1, /** - * 2: Network is recovered. + * 2: Network recovered. */ RemoteVideoStateReasonNetworkRecovery = 2, /** - * 3: The local user stops receiving the remote video stream or disables the video module. + * 3: Local user stopped receiving remote video stream or disabled the video module. */ RemoteVideoStateReasonLocalMuted = 3, /** - * 4: The local user resumes receiving the remote video stream or enables the video module. + * 4: Local user resumed receiving remote video stream or enabled the video module. */ RemoteVideoStateReasonLocalUnmuted = 4, /** - * 5: The remote user stops sending the video stream or disables the video module. + * 5: Remote user stopped sending video stream or disabled the video module. */ RemoteVideoStateReasonRemoteMuted = 5, /** - * 6: The remote user resumes sending the video stream or enables the video module. + * 6: Remote user resumed sending video stream or enabled the video module. */ RemoteVideoStateReasonRemoteUnmuted = 6, /** - * 7: The remote user leaves the channel. + * 7: Remote user left the channel. */ RemoteVideoStateReasonRemoteOffline = 7, /** - * 8: The remote audio-and-video stream falls back to the audio-only stream due to poor network conditions. + * 8: Under poor network conditions, remote audio/video stream falls back to audio only. */ RemoteVideoStateReasonAudioFallback = 8, /** - * 9: The remote audio-only stream switches back to the audio-and-video stream after the network conditions improve. + * 9: When the network improves, remote audio stream recovers to audio/video stream. */ RemoteVideoStateReasonAudioFallbackRecovery = 9, /** @@ -2865,7 +2883,7 @@ export enum RemoteVideoStateReason { */ RemoteVideoStateReasonSdkInBackground = 12, /** - * 13: The local video decoder does not support decoding the remote video stream. + * 13: The local video decoder does not support decoding the received remote video stream. */ RemoteVideoStateReasonCodecNotSupport = 13, } @@ -2957,41 +2975,40 @@ export enum RemoteVideoDownscaleLevel { } /** - * The volume information of users. + * User volume information. */ export class AudioVolumeInfo { /** - * The user ID. - * In the local user's callback, uid is 0. - * In the remote users' callback, uid is the user ID of a remote user whose instantaneous volume is the highest. + * User ID. + * In the local user callback, uid is 0. + * In the remote user callback, uid is the ID of the remote user (up to 3) with the highest instantaneous volume. */ uid?: number; /** - * The volume of the user. The value ranges between 0 (the lowest volume) and 255 (the highest volume). If the local user enables audio capturing and calls muteLocalAudioStream and set it as true to mute, the value of volume indicates the volume of locally captured audio signal. + * User volume, range [0,255]. If the user mutes themselves (sets muteLocalAudioStream to true) but enables audio capture, the volume value indicates the volume of the locally captured signal. */ volume?: number; /** - * Voice activity status of the local user. - * 0: The local user is not speaking. - * 1: The local user is speaking. - * The vad parameter does not report the voice activity status of remote users. In a remote user's callback, the value of vad is always 1. - * To use this parameter, you must set reportVad to true when calling enableAudioVolumeIndication. + * vad cannot report the voice status of remote users. For remote users, the value of vad is always 1. + * To use this parameter, set reportVad to true when calling enableAudioVolumeIndication. Local user's voice activity status. + * 0: No voice detected locally. + * 1: Voice detected locally. */ vad?: number; /** - * The voice pitch of the local user. The value ranges between 0.0 and 4000.0. The voicePitch parameter does not report the voice pitch of remote users. In the remote users' callback, the value of voicePitch is always 0.0. + * Local user's voice pitch (Hz). Value range: [0.0,4000.0]. voicePitch cannot report the pitch of remote users. For remote users, the value of voicePitch is always 0.0. */ voicePitch?: number; } /** - * The audio device information. + * Audio device information. * - * This class is for Android only. + * (Android only) */ export class DeviceInfo { /** - * Whether the audio device supports ultra-low-latency capture and playback: true : The device supports ultra-low-latency capture and playback. false : The device does not support ultra-low-latency capture and playback. + * Whether ultra-low-latency audio capture and playback is supported: true : Supported false : Not supported */ isLowLatencyAudioSupported?: boolean; } @@ -3011,7 +3028,7 @@ export class Packet { } /** - * The audio sampling rate of the stream to be pushed to the CDN. + * Sample rate of audio output for streaming. */ export enum AudioSampleRateType { /** @@ -3029,7 +3046,7 @@ export enum AudioSampleRateType { } /** - * The codec type of the output video. + * Codec type for transcoded output video stream. */ export enum VideoCodecTypeForStream { /** @@ -3043,37 +3060,37 @@ export enum VideoCodecTypeForStream { } /** - * Video codec profile types. + * Codec profile for video in relay streaming output. */ export enum VideoCodecProfileType { /** - * 66: Baseline video codec profile; generally used for video calls on mobile phones. + * 66: Baseline profile, typically used in low-end or error-tolerant applications such as video calls, mobile videos, etc. */ VideoCodecProfileBaseline = 66, /** - * 77: Main video codec profile; generally used in mainstream electronics such as MP4 players, portable video players, PSP, and iPads. + * 77: Main profile, typically used in mainstream consumer electronics such as MP4, portable video players, PSP, iPad, etc. */ VideoCodecProfileMain = 77, /** - * 100: (Default) High video codec profile; generally used in high-resolution live streaming or television. + * 100: (Default) High profile, typically used in broadcasting, video disc storage, and HDTV. */ VideoCodecProfileHigh = 100, } /** - * Self-defined audio codec profile. + * Audio codec specification for stream publishing output. Defaults to LC-AAC. */ export enum AudioCodecProfileType { /** - * 0: (Default) LC-AAC. + * 0: (Default) LC-AAC specification. */ AudioCodecProfileLcAac = 0, /** - * 1: HE-AAC. + * 1: HE-AAC specification. */ AudioCodecProfileHeAac = 1, /** - * 2: HE-AAC v2. + * 2: HE-AAC v2 specification. */ AudioCodecProfileHeAacV2 = 2, } @@ -3083,27 +3100,27 @@ export enum AudioCodecProfileType { */ export class LocalAudioStats { /** - * The number of audio channels. + * Number of audio channels. */ numChannels?: number; /** - * The sampling rate (Hz) of sending the local user's audio stream. + * Sampling rate of the sent local audio, in Hz. */ sentSampleRate?: number; /** - * The average bitrate (Kbps) of sending the local user's audio stream. + * Average bitrate of the sent local audio, in Kbps. */ sentBitrate?: number; /** - * The internal payload codec. + * Internal payload type. */ internalCodec?: number; /** - * The packet loss rate (%) from the local client to the Agora server before applying the anti-packet loss strategies. + * Packet loss rate (%) from the local end to the Agora edge server before network resilience. */ txPacketLossRate?: number; /** - * The audio device module delay (ms) when playing or recording audio. + * Delay (ms) of the audio device module during audio playback or recording. */ audioDeviceDelay?: number; /** @@ -3111,105 +3128,105 @@ export class LocalAudioStats { */ audioPlayoutDelay?: number; /** - * The ear monitor delay (ms), which is the delay from microphone input to headphone output. + * Ear monitoring delay (ms), i.e., the delay from microphone input to headphone output. */ earMonitorDelay?: number; /** - * Acoustic echo cancellation (AEC) module estimated delay (ms), which is the signal delay between when audio is played locally before being locally captured. + * Echo cancellation delay (ms), i.e., the delay estimated by the Acoustic Echo Cancellation (AEC) module between audio playback and the signal captured locally. */ aecEstimatedDelay?: number; } /** - * States of the Media Push. + * Stream publishing state. */ export enum RtmpStreamPublishState { /** - * 0: The Media Push has not started or has ended. + * 0: Streaming has not started or has ended. */ RtmpStreamPublishStateIdle = 0, /** - * 1: The streaming server and CDN server are being connected. + * 1: Connecting to the streaming and CDN servers. */ RtmpStreamPublishStateConnecting = 1, /** - * 2: The RTMP or RTMPS streaming publishes. The SDK successfully publishes the RTMP or RTMPS streaming and returns this state. + * 2: Streaming is ongoing. This state is returned after successful publishing. */ RtmpStreamPublishStateRunning = 2, /** - * 3: The RTMP or RTMPS streaming is recovering. When exceptions occur to the CDN, or the streaming is interrupted, the SDK tries to resume RTMP or RTMPS streaming and returns this state. - * If the SDK successfully resumes the streaming, RtmpStreamPublishStateRunning (2) returns. - * If the streaming does not resume within 60 seconds or server errors occur, RtmpStreamPublishStateFailure (4) returns. If you feel that 60 seconds is too long, you can also actively try to reconnect. + * 3: Recovering the stream. When the CDN encounters an issue or the stream is briefly interrupted, the SDK automatically attempts to recover the stream and returns this state. + * If recovery is successful, it transitions to RtmpStreamPublishStateRunning(2). + * If the server fails or recovery is unsuccessful within 60 seconds, it transitions to RtmpStreamPublishStateFailure(4). If 60 seconds is too long, you can also manually attempt to reconnect. */ RtmpStreamPublishStateRecovering = 3, /** - * 4: The RTMP or RTMPS streaming fails. After a failure, you can troubleshoot the cause of the error through the returned error code. + * 4: Streaming failed. After failure, you can troubleshoot using the returned error code. */ RtmpStreamPublishStateFailure = 4, /** - * 5: The SDK is disconnecting from the Agora streaming server and CDN. When you call stopRtmpStream to stop the Media Push normally, the SDK reports the Media Push state as RtmpStreamPublishStateDisconnecting and RtmpStreamPublishStateIdle in sequence. + * 5: The SDK is disconnecting from the streaming and CDN servers. When you call stopRtmpStream to end the stream normally, the SDK reports the states RtmpStreamPublishStateDisconnecting and RtmpStreamPublishStateIdle in sequence. */ RtmpStreamPublishStateDisconnecting = 5, } /** - * Reasons for changes in the status of RTMP or RTMPS streaming. + * Reason for stream publishing state change. */ export enum RtmpStreamPublishReason { /** - * 0: The RTMP or RTMPS streaming has not started or has ended. + * 0: Stream published successfully. */ RtmpStreamPublishReasonOk = 0, /** - * 1: Invalid argument used. Check the parameter setting. + * 1: Invalid parameter. Please check whether the input parameters are correct. */ RtmpStreamPublishReasonInvalidArgument = 1, /** - * 2: The RTMP or RTMPS streaming is encrypted and cannot be published. + * 2: The stream is encrypted and cannot be published. */ RtmpStreamPublishReasonEncryptedStreamNotAllowed = 2, /** - * 3: Timeout for the RTMP or RTMPS streaming. + * 3: Stream publishing timed out. */ RtmpStreamPublishReasonConnectionTimeout = 3, /** - * 4: An error occurs in Agora's streaming server. + * 4: An error occurred on the streaming server. */ RtmpStreamPublishReasonInternalServerError = 4, /** - * 5: An error occurs in the CDN server. + * 5: An error occurred on the CDN server. */ RtmpStreamPublishReasonRtmpServerError = 5, /** - * 6: The RTMP or RTMPS streaming publishes too frequently. + * 6: Stream publishing requests are too frequent. */ RtmpStreamPublishReasonTooOften = 6, /** - * 7: The host publishes more than 10 URLs. Delete the unnecessary URLs before adding new ones. + * 7: The number of stream URLs for a single host has reached the limit of 10. Please delete some unused stream URLs before adding new ones. */ RtmpStreamPublishReasonReachLimit = 7, /** - * 8: The host manipulates other hosts' URLs. For example, the host updates or stops other hosts' streams. Check your app logic. + * 8: The host is operating on a stream that does not belong to them, such as updating or stopping another host's stream. Please check your app logic. */ RtmpStreamPublishReasonNotAuthorized = 8, /** - * 9: Agora's server fails to find the RTMP or RTMPS streaming. + * 9: The server did not find the stream. */ RtmpStreamPublishReasonStreamNotFound = 9, /** - * 10: The format of the RTMP or RTMPS streaming URL is not supported. Check whether the URL format is correct. + * 10: The stream URL format is incorrect. Please verify the format of the stream URL. */ RtmpStreamPublishReasonFormatNotSupported = 10, /** - * 11: The user role is not host, so the user cannot use the CDN live streaming function. Check your application code logic. + * 11: The user is not a broadcaster and cannot use the streaming feature. Please check your application logic. */ RtmpStreamPublishReasonNotBroadcaster = 11, /** - * 13: The updateRtmpTranscoding method is called to update the transcoding configuration in a scenario where there is streaming without transcoding. Check your application code logic. + * 13: Called updateRtmpTranscoding to update transcoding properties without enabling transcoding. Please check your application logic. */ RtmpStreamPublishReasonTranscodingNoMixStream = 13, /** - * 14: Errors occurred in the host's network. + * 14: The host's network encountered an error. */ RtmpStreamPublishReasonNetDown = 14, /** @@ -3217,7 +3234,7 @@ export enum RtmpStreamPublishReason { */ RtmpStreamPublishReasonInvalidAppid = 15, /** - * 16: Your project does not have permission to use streaming services. + * 16: Your project does not have permission to use the streaming service. */ RtmpStreamPublishReasonInvalidPrivilege = 16, /** @@ -3227,23 +3244,23 @@ export enum RtmpStreamPublishReason { } /** - * Events during the Media Push. + * Events that occur during CDN live streaming. */ export enum RtmpStreamingEvent { /** - * 1: An error occurs when you add a background image or a watermark image in the Media Push. + * 1: Failed to add background image or watermark during CDN live streaming. */ RtmpStreamingEventFailedLoadImage = 1, /** - * 2: The streaming URL is already being used for Media Push. If you want to start new streaming, use a new streaming URL. + * 2: The stream URL is already in use. If you want to start a new stream, please use a new stream URL. */ RtmpStreamingEventUrlAlreadyInUse = 2, /** - * 3: The feature is not supported. + * 3: Feature not supported. */ RtmpStreamingEventAdvancedFeatureNotSupport = 3, /** - * 4: Reserved. + * 4: Reserved parameter. */ RtmpStreamingEventRequestTooOften = 4, } @@ -3251,19 +3268,19 @@ export enum RtmpStreamingEvent { /** * Image properties. * - * This class sets the properties of the watermark and background images in the live video. + * Used to set the watermark and background image properties for live video. */ export class RtcImage { /** - * The HTTP/HTTPS URL address of the image in the live video. The maximum length of this parameter is 1024 bytes. + * The HTTP/HTTPS URL of the image on the live video. The character length must not exceed 1024 bytes. */ url?: string; /** - * The x-coordinate (px) of the image on the video frame (taking the upper left corner of the video frame as the origin). + * The x-coordinate (px) of the image on the video frame, using the top-left corner of the output video frame as the origin. */ x?: number; /** - * The y-coordinate (px) of the image on the video frame (taking the upper left corner of the video frame as the origin). + * The y-coordinate (px) of the image on the video frame, using the top-left corner of the output video frame as the origin. */ y?: number; /** @@ -3275,286 +3292,287 @@ export class RtcImage { */ height?: number; /** - * The layer index of the watermark or background image. When you use the watermark array to add a watermark or multiple watermarks, you must pass a value to zOrder in the range [1,255]; otherwise, the SDK reports an error. In other cases, zOrder can optionally be passed in the range [0,255], with 0 being the default value. 0 means the bottom layer and 255 means the top layer. + * The z-order of the watermark or background image. When adding one or more watermarks using an array, you must assign a value to zOrder, with a valid range of [1,255], otherwise the SDK will report an error. In other cases, zOrder is optional, with a valid range of [0,255], where 0 is the default. 0 indicates the bottom layer, and 255 indicates the top layer. */ zOrder?: number; /** - * The transparency of the watermark or background image. The range of the value is [0.0,1.0]: - * 0.0: Completely transparent. - * 1.0: (Default) Opaque. + * The transparency of the watermark or background image. Valid range is [0.0, 1.0]: + * 0.0: Fully transparent. + * 1.0: (Default) Fully opaque. */ alpha?: number; } /** - * The configuration for advanced features of the RTMP or RTMPS streaming with transcoding. + * Advanced feature configuration for live transcoding. * - * If you want to enable the advanced features of streaming with transcoding, contact. + * To use advanced features for live transcoding, please [contact sales](https://www.shengwang.cn/contact-sales/). */ export class LiveStreamAdvancedFeature { /** - * The feature names, including LBHQ (high-quality video with a lower bitrate) and VEO (optimized video encoder). + * Name of the advanced feature for live transcoding, including LBHQ (low-bitrate high-quality video) and VEO (optimized video encoder). */ featureName?: string; /** - * Whether to enable the advanced features of streaming with transcoding: true : Enable the advanced features. false : (Default) Do not enable the advanced features. + * Whether to enable the advanced feature for live transcoding: true : Enable the advanced feature for live transcoding. false : (default) Disable the advanced feature for live transcoding. */ opened?: boolean; } /** - * Connection states. + * Network connection state. */ export enum ConnectionStateType { /** - * 1: The SDK is disconnected from the Agora edge server. The state indicates the SDK is in one of the following phases: - * Theinitial state before calling the joinChannel method. - * The app calls the leaveChannel method. + * 1: Network connection is disconnected. This state indicates that the SDK is in: + * The initialization phase before calling joinChannel to join a channel. + * Or the phase after calling leaveChannel to leave a channel. */ ConnectionStateDisconnected = 1, /** - * 2: The SDK is connecting to the Agora edge server. This state indicates that the SDK is establishing a connection with the specified channel after the app calls joinChannel. - * If the SDK successfully joins the channel, it triggers the onConnectionStateChanged callback and the connection state switches to ConnectionStateConnected. - * After the connection is established, the SDK also initializes the media and triggers onJoinChannelSuccess when everything is ready. + * 2: Connecting to the network. This state indicates that the SDK is establishing a connection with the specified channel after calling joinChannel. + * If the channel is joined successfully, the app receives the onConnectionStateChanged callback, indicating the network state has changed to ConnectionStateConnected. + * After the connection is established, the SDK initializes media and triggers the onJoinChannelSuccess callback once everything is ready. */ ConnectionStateConnecting = 2, /** - * 3: The SDK is connected to the Agora edge server. This state also indicates that the user has joined a channel and can now publish or subscribe to a media stream in the channel. If the connection to the channel is lost because, for example, if the network is down or switched, the SDK automatically tries to reconnect and triggers onConnectionStateChanged callback, notifying that the current network state becomes ConnectionStateReconnecting. + * 3: Network is connected. This state indicates that the user has joined the channel and can publish or subscribe to media streams. If the connection is interrupted due to network disconnection or switching, the SDK automatically reconnects. The app receives the onConnectionStateChanged callback, indicating the network state has changed to ConnectionStateReconnecting. */ ConnectionStateConnected = 3, /** - * 4: The SDK keeps reconnecting to the Agora edge server. The SDK keeps rejoining the channel after being disconnected from a joined channel because of network issues. - * If the SDK cannot rejoin the channel within 10 seconds, it triggers onConnectionLost, stays in the ConnectionStateReconnecting state, and keeps rejoining the channel. - * If the SDK fails to rejoin the channel 20 minutes after being disconnected from the Agora edge server, the SDK triggers the onConnectionStateChanged callback, switches to the ConnectionStateFailed state, and stops rejoining the channel. + * 4: Reconnecting to the network. This state indicates that the SDK had previously joined a channel but the connection was interrupted due to network issues. The SDK is now trying to reconnect to the channel. + * If the SDK cannot rejoin the channel within 10 seconds, onConnectionLost is triggered. The SDK remains in the ConnectionStateReconnecting state and continues trying to rejoin. + * If the SDK still cannot rejoin the channel within 20 minutes after disconnection, the app receives the onConnectionStateChanged callback, indicating that the network state has changed to ConnectionStateFailed, and the SDK stops trying to reconnect. */ ConnectionStateReconnecting = 4, /** - * 5: The SDK fails to connect to the Agora edge server or join the channel. This state indicates that the SDK stops trying to rejoin the channel. You must call leaveChannel to leave the channel. - * You can call joinChannel to rejoin the channel. - * If the SDK is banned from joining the channel by the Agora edge server through the RESTful API, the SDK triggers the onConnectionStateChanged callback. + * 5: Network connection failed. This state indicates that the SDK has stopped trying to rejoin the channel and you need to call leaveChannel to leave the channel. + * If the user wants to rejoin the channel, they need to call joinChannel again. + * If the SDK is prohibited from joining the channel by the server using RESTful API, the app will receive onConnectionStateChanged. */ ConnectionStateFailed = 5, } /** - * Transcoding configurations of each host. + * Settings for each host participating in the transcoding mix. */ export class TranscodingUser { /** - * The user ID of the host. + * User ID of the host. */ uid?: number; /** - * The x coordinate (pixel) of the host's video on the output video frame (taking the upper left corner of the video frame as the origin). The value range is [0, width], where width is the width set in LiveTranscoding. + * The x-coordinate (px) of the host's video in the output video, using the top-left corner of the output video as the origin. Value range: [0,width], where width is set in LiveTranscoding. */ x?: number; /** - * The y coordinate (pixel) of the host's video on the output video frame (taking the upper left corner of the video frame as the origin). The value range is [0, height], where height is the height set in LiveTranscoding. + * The y-coordinate (px) of the host's video in the output video, using the top-left corner of the output video as the origin. Value range: [0,height], where height is set in LiveTranscoding. */ y?: number; /** - * The width (pixel) of the host's video. + * Width (px) of the host's video. */ width?: number; /** - * The height (pixel) of the host's video. + * Height (px) of the host's video. */ height?: number; /** - * The layer index number of the host's video. The value range is [0, 100]. - * 0: (Default) The host's video is the bottom layer. - * 100: The host's video is the top layer. - * If the value is less than 0 or greater than 100, ErrInvalidArgument error is returned. - * Setting zOrder to 0 is supported. + * If the value is less than 0 or greater than 100, an ErrInvalidArgument error is returned. + * Setting zOrder to 0 is supported. The layer index of the host's video. Value range: [0,100]. + * 0: (Default) Video is at the bottom layer. + * 100: Video is at the top layer. */ zOrder?: number; /** - * The transparency of the host's video. The value range is [0.0,1.0]. - * 0.0: Completely transparent. - * 1.0: (Default) Opaque. + * Transparency of the host's video. Value range: [0.0,1.0]. + * 0.0: Fully transparent. + * 1.0: (Default) Fully opaque. */ alpha?: number; /** - * The audio channel used by the host's audio in the output audio. The default value is 0, and the value range is [0, 5]. 0 : (Recommended) The defaut setting, which supports dual channels at most and depends on the upstream of the host. 1 : The host's audio uses the FL audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. 2 : The host's audio uses the FC audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. 3 : The host's audio uses the FR audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. 4 : The host's audio uses the BL audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. 5 : The host's audio uses the BR audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. 0xFF or a value greater than 5 : The host's audio is muted, and the Agora server removes the host's audio. If the value is not 0, a special player is required. + * When the value is not 0, a special player is required. The audio channel occupied by the host's audio in the output audio. Default is 0. Value range: [0,5]: 0 : (Recommended) Default audio mixing setting, supports up to stereo, related to the host's upstream audio. 1 : Host audio in the FL channel of the output audio. If the host's upstream audio is multi-channel, the Agora server mixes it into mono first. 2 : Host audio in the FC channel of the output audio. If the host's upstream audio is multi-channel, the Agora server mixes it into mono first. 3 : Host audio in the FR channel of the output audio. If the host's upstream audio is multi-channel, the Agora server mixes it into mono first. 4 : Host audio in the BL channel of the output audio. If the host's upstream audio is multi-channel, the Agora server mixes it into mono first. 5 : Host audio in the BR channel of the output audio. If the host's upstream audio is multi-channel, the Agora server mixes it into mono first. 0xFF or values greater than 5 : The host's audio is muted and removed by the Agora server. */ audioChannel?: number; } /** - * Transcoding configurations for Media Push. + * Transcoding properties for CDN streaming. */ export class LiveTranscoding { /** - * The width of the video in pixels. The default value is 360. - * When pushing video streams to the CDN, the value range of width is [64,1920]. If the value is less than 64, Agora server automatically adjusts it to 64; if the value is greater than 1920, Agora server automatically adjusts it to 1920. - * When pushing audio streams to the CDN, set width and height as 0. + * Total width of the video stream in pixels, default is 360. + * For video streams, the value range is [64,1920]. Values below 64 will be adjusted to 64, and values above 1920 will be adjusted to 1920 by the Agora server. + * For audio-only streams, set both width and height to 0. */ width?: number; /** - * The height of the video in pixels. The default value is 640. - * When pushing video streams to the CDN, the value range of height is [64,1080]. If the value is less than 64, Agora server automatically adjusts it to 64; if the value is greater than 1080, Agora server automatically adjusts it to 1080. - * When pushing audio streams to the CDN, set width and height as 0. + * Total height of the video stream in pixels, default is 640. + * For video streams, the value range is [64,1080]. Values below 64 will be adjusted to 64, and values above 1080 will be adjusted to 1080 by the Agora server. + * For audio-only streams, set both width and height to 0. */ height?: number; /** - * The encoding bitrate (Kbps) of the video. This parameter does not need to be set; keeping the default value STANDARD_BITRATE is sufficient. The SDK automatically matches the most suitable bitrate based on the video resolution and frame rate you have set. For the correspondence between video resolution and frame rate, see. + * Video encoding bitrate in Kbps. You do not need to set this parameter. Keep the default value STANDARD_BITRATE, and the SDK will automatically match the optimal bitrate based on the video resolution and frame rate you set. For the relationship between resolution and frame rate, see [Video Profile](https://doc.shengwang.cn/doc/rtc/rn/basic-features/video-profile#%E8%A7%86%E9%A2%91%E5%B1%9E%E6%80%A7%E5%8F%82%E8%80%83). */ videoBitrate?: number; /** - * Frame rate (fps) of the output video stream set for Media Push. The default value is 15. The value range is (0,30]. The Agora server adjusts any value over 30 to 30. + * Frame rate of the output video for CDN streaming. The range is (0,30], in fps. Default is 15 fps. The Agora server will adjust any value above 30 fps to 30 fps. */ videoFramerate?: number; /** - * Deprecated This member is deprecated. Latency mode: true : Low latency with unassured quality. false : (Default) High latency with assured quality. + * Deprecated, not recommended. Low latency mode true : Low latency, no guarantee on video quality. false : (default) High latency, guaranteed video quality. */ lowLatency?: boolean; /** - * GOP (Group of Pictures) in fps of the video frames for Media Push. The default value is 30. + * GOP (Group of Pictures) of the output video for CDN streaming, in frames. Default is 30. */ videoGop?: number; /** - * Video codec profile type for Media Push. Set it as 66, 77, or 100 (default). See VideoCodecProfileType for details. If you set this parameter to any other value, Agora adjusts it to the default value. + * Codec profile of the output video for CDN streaming. Can be set to 66, 77, or 100. See VideoCodecProfileType. If you set this parameter to other values, the Agora server will adjust it to the default value. */ videoCodecProfile?: VideoCodecProfileType; /** - * The background color in RGB hex value. Value only. Do not include a preceeding #. For example, 0xFFB6C1 (light pink). The default value is 0x000000 (black). + * Background color of the output video for CDN streaming, specified as a hexadecimal RGB integer without the # symbol, e.g., 0xFFB6C1 for light pink. Default is 0x000000 (black). */ backgroundColor?: number; /** - * Video codec profile types for Media Push. See VideoCodecTypeForStream. + * Codec type of the output video for CDN streaming. See VideoCodecTypeForStream. */ videoCodecType?: VideoCodecTypeForStream; /** - * The number of users in the Media Push. The value range is [0,17]. + * Number of users in the video mixing layout. Default is 0. Value range: [0,17]. */ userCount?: number; /** - * Manages the user layout configuration in the Media Push. Agora supports a maximum of 17 transcoding users in a Media Push channel. See TranscodingUser. + * Manages users participating in video mixing for CDN streaming. Supports up to 17 users simultaneously. See TranscodingUser. */ transcodingUsers?: TranscodingUser[]; /** - * Reserved property. Extra user-defined information to send SEI for the H.264/H.265 video stream to the CDN live client. Maximum length: 4096 bytes. For more information on SEI, see SEI-related questions. + * Reserved parameter: custom information sent to the CDN client, used to fill SEI frames in H264/H265 video. Length limit: 4096 bytes. For more about SEI, see [SEI Frame Issues](https://doc.shengwang.cn/faq/quality-issues/sei). */ transcodingExtraInfo?: string; /** - * Deprecated Obsolete and not recommended for use. The metadata sent to the CDN client. + * Metadata sent to the CDN client. Deprecated, not recommended. */ metadata?: string; /** - * The watermark on the live video. The image format needs to be PNG. See RtcImage. You can add one watermark, or add multiple watermarks using an array. This parameter is used with watermarkCount. + * Watermark on the live video. The image format must be PNG. See RtcImage. + * You can add one watermark or use an array to add multiple watermarks. This parameter works with watermarkCount. */ watermark?: RtcImage[]; /** - * The number of watermarks on the live video. The total number of watermarks and background images can range from 0 to 10. This parameter is used with watermark. + * Number of watermarks on the live video. The total number of watermarks and background images must be between 0 and 10. This parameter works with watermark. */ watermarkCount?: number; /** - * The number of background images on the live video. The image format needs to be PNG. See RtcImage. You can add a background image or use an array to add multiple background images. This parameter is used with backgroundImageCount. + * Background image on the live video. The image format must be PNG. See RtcImage. + * You can add one background image or use an array to add multiple background images. This parameter works with backgroundImageCount. */ backgroundImage?: RtcImage[]; /** - * The number of background images on the live video. The total number of watermarks and background images can range from 0 to 10. This parameter is used with backgroundImage. + * Number of background images on the live video. The total number of watermarks and background images must be between 0 and 10. This parameter works with backgroundImage. */ backgroundImageCount?: number; /** - * The audio sampling rate (Hz) of the output media stream. See AudioSampleRateType. + * Audio sample rate (Hz) of the output media stream for CDN streaming. See AudioSampleRateType. */ audioSampleRate?: AudioSampleRateType; /** - * Bitrate (Kbps) of the audio output stream for Media Push. The default value is 48, and the highest value is 128. + * Bitrate of the output audio for CDN streaming, in Kbps. Default is 48, maximum is 128. */ audioBitrate?: number; /** - * The number of audio channels for Media Push. Agora recommends choosing 1 (mono), or 2 (stereo) audio channels. Special players are required if you choose 3, 4, or 5. - * 1: (Default) Mono. - * 2: Stereo. - * 3: Three audio channels. - * 4: Four audio channels. - * 5: Five audio channels. + * Number of audio channels in the output audio for CDN streaming. Default is 1. Value range: integer from [1,5]. Recommended values are 1 or 2. Values 3, 4, and 5 require special player support: + * 1: (default) Mono + * 2: Stereo + * 3: Three channels + * 4: Four channels + * 5: Five channels */ audioChannels?: number; /** - * Audio codec profile type for Media Push. See AudioCodecProfileType. + * Codec profile of the output audio for CDN streaming. See AudioCodecProfileType. */ audioCodecProfile?: AudioCodecProfileType; /** - * Advanced features of the Media Push with transcoding. See LiveStreamAdvancedFeature. + * Advanced features for live transcoding. See LiveStreamAdvancedFeature. */ advancedFeatures?: LiveStreamAdvancedFeature[]; /** - * The number of enabled advanced features. The default value is 0. + * Number of enabled advanced features. Default is 0. */ advancedFeatureCount?: number; } /** - * The video streams for local video mixing. + * Video stream participating in local composition. */ export class TranscodingVideoStream { /** - * The video source type for local video mixing. See VideoSourceType. + * Type of video source participating in local composition. See VideoSourceType. */ sourceType?: VideoSourceType; /** - * The user ID of the remote user. Use this parameter only when the source type is VideoSourceRemote for local video mixing. + * Remote user ID. Use this parameter only when the video source type is VideoSourceRemote. */ remoteUserUid?: number; /** - * The file path of local images. Use this parameter only when the source type is the image for local video mixing. Examples: + * Use this parameter only when the video source type is an image. Path to the local image. Example paths: * Android: /storage/emulated/0/Pictures/image.png * iOS: /var/mobile/Containers/Data/Application//Documents/image.png */ imageUrl?: string; /** - * (Optional) Media player ID. Use the parameter only when you set sourceType to VideoSourceMediaPlayer. + * (Optional) Media player ID. Required when sourceType is set to VideoSourceMediaPlayer. */ mediaPlayerId?: number; /** - * The relative lateral displacement of the top left corner of the video for local video mixing to the origin (the top left corner of the canvas). + * Horizontal offset of the top-left corner of the video relative to the top-left corner (origin) of the composition canvas. */ x?: number; /** - * The relative longitudinal displacement of the top left corner of the captured video to the origin (the top left corner of the canvas). + * Vertical offset of the top-left corner of the video relative to the top-left corner (origin) of the composition canvas. */ y?: number; /** - * The width (px) of the video for local video mixing on the canvas. + * Width (px) of the video in the composition. */ width?: number; /** - * The height (px) of the video for local video mixing on the canvas. + * Height (px) of the video in the composition. */ height?: number; /** - * The number of the layer to which the video for the local video mixing belongs. The value range is [0, 100]. - * 0: (Default) The layer is at the bottom. - * 100: The layer is at the top. + * Layer index of the video in the composition. Value range: [0,100]. + * 0: (Default) Bottom layer. + * 100: Top layer. */ zOrder?: number; /** - * The transparency of the video for local video mixing. The value range is [0.0, 1.0]. 0.0 indicates that the video is completely transparent, and 1.0 indicates that it is opaque. + * Transparency of the video in the composition. Value range: [0.0,1.0]. 0.0 means fully transparent, 1.0 means fully opaque. */ alpha?: number; /** - * Whether to mirror the video for the local video mixing. true : Mirror the video for the local video mixing. false : (Default) Do not mirror the video for the local video mixing. This parameter only takes effect on video source types that are cameras. + * This parameter only takes effect for camera video sources. Whether to mirror the video in the composition: true : Mirror the video. false : (Default) Do not mirror the video. */ mirror?: boolean; } /** - * The configuration of the video mixing on the local client. + * Local video mixing configuration. */ export class LocalTranscoderConfiguration { /** - * The number of the video streams for the video mixing on the local client. + * Number of video streams to be mixed locally. */ streamCount?: number; /** - * The video streams for local video mixing. See TranscodingVideoStream. + * Video streams to be mixed locally. See TranscodingVideoStream. */ videoInputStreams?: TranscodingVideoStream[]; /** - * The encoding configuration of the mixed video stream after the local video mixing. See VideoEncoderConfiguration. + * Encoding configuration for the mixed video after local mixing. See VideoEncoderConfiguration. */ videoOutputConfiguration?: VideoEncoderConfiguration; /** @@ -3564,27 +3582,27 @@ export class LocalTranscoderConfiguration { } /** - * The error code of the local video mixing failure. + * Local video composition error codes. */ export enum VideoTranscoderError { /** - * 1: The selected video source has not started video capture. You need to create a video track for it and start video capture. + * 1: The specified video source has not started video capture. You need to create a video track and start video capture for it. */ VtErrVideoSourceNotReady = 1, /** - * 2: The video source type is invalid. You need to re-specify the supported video source type. + * 2: Invalid video source type. You need to re-specify a supported video source type. */ VtErrInvalidVideoSourceType = 2, /** - * 3: The image path is invalid. You need to re-specify the correct image path. + * 3: Invalid image path. You need to re-specify a correct image path. */ VtErrInvalidImagePath = 3, /** - * 4: The image format is invalid. Make sure the image format is one of PNG, JPEG, or GIF. + * 4: Invalid image format. Make sure the image format is one of PNG, JPEG, or GIF. */ VtErrUnsupportImageFormat = 4, /** - * 5: The video encoding resolution after video mixing is invalid. + * 5: Invalid video encoding resolution after composition. */ VtErrInvalidLayout = 5, /** @@ -3594,227 +3612,219 @@ export enum VideoTranscoderError { } /** - * The source of the audio streams that are mixed locally. + * Audio source for local audio mixing. */ export class MixedAudioStream { /** - * The type of the audio source. See AudioSourceType. + * Type of the audio source. See AudioSourceType. */ sourceType?: AudioSourceType; /** - * The user ID of the remote user. Set this parameter if the source type of the locally mixed audio steams is AudioSourceRemoteUser. + * Remote user ID. This parameter is required when the audio source type for local audio mixing is AudioSourceRemoteUser. */ remoteUserUid?: number; /** - * The channel name. This parameter signifies the channel in which users engage in real-time audio and video interaction. Under the premise of the same App ID, users who fill in the same channel ID enter the same channel for audio and video interaction. The string length must be less than 64 bytes. Supported characters (89 characters in total): - * All lowercase English letters: a to z. - * All uppercase English letters: A to Z. - * All numeric characters: 0 to 9. - * "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", "{", "}", "|", "~", "," Set this parameter if the source type of the locally mixed audio streams is AudioSourceRemoteChannel or AudioSourceRemoteUser. + * @ignore */ channelId?: string; /** - * The audio track ID. Set this parameter to the custom audio track ID returned in createCustomAudioTrack. Set this parameter if the source type of the locally mixed audio steams is AudioSourceCustom. + * Audio track ID. Set this to the custom audio track ID returned by the createCustomAudioTrack method. This parameter is required when the audio source type for local audio mixing is AudioSourceCustom. */ trackId?: number; } /** - * The configurations for mixing the local audio. + * Local audio mixing configuration. */ export class LocalAudioMixerConfiguration { /** - * The number of the audio streams that are mixed locally. + * Number of audio streams to be mixed locally. */ streamCount?: number; /** - * The source of the audio streams that are mixed locally. See MixedAudioStream. + * Audio sources to be mixed locally. See MixedAudioStream. */ audioInputStreams?: MixedAudioStream[]; /** - * Whether the mxied audio stream uses the timestamp of the audio frames captured by the local microphone. true : (Default) Yes. Set to this value if you want all locally captured audio streams synchronized. false : No. The SDK uses the timestamp of the audio frames at the time when they are mixed. + * Whether the mixed audio stream uses the timestamp of audio frames captured by the local microphone: true : (Default) Uses the timestamp of audio frames captured by the local microphone. Set this value if you want all locally captured audio streams to stay synchronized. false : Does not use the timestamp of audio frames captured by the local microphone. The SDK uses the timestamp when the mixed audio frame is constructed. */ syncWithLocalMic?: boolean; } /** - * Configurations of the last-mile network test. + * Last mile network probe configuration. */ export class LastmileProbeConfig { /** - * Sets whether to test the uplink network. Some users, for example, the audience members in a LIVE_BROADCASTING channel, do not need such a test. true : Test the uplink network. false : Do not test the uplink network. + * Whether to probe the uplink network. Some users, such as audience members in a live broadcast channel, do not need network probing: true : Probe uplink network. false : Do not probe uplink network. */ probeUplink?: boolean; /** - * Sets whether to test the downlink network: true : Test the downlink network. false : Do not test the downlink network. + * Whether to probe the downlink network: true : Probe downlink network. false : Do not probe downlink network. */ probeDownlink?: boolean; /** - * The expected maximum uplink bitrate (bps) of the local user. The value range is [100000, 5000000]. Agora recommends referring to setVideoEncoderConfiguration to set the value. + * Expected maximum sending bitrate in bps, ranging from [100000,5000000]. It is recommended to refer to the bitrate value in setVideoEncoderConfiguration when setting this parameter. */ expectedUplinkBitrate?: number; /** - * The expected maximum downlink bitrate (bps) of the local user. The value range is [100000,5000000]. + * Expected maximum receiving bitrate in bps, ranging from [100000,5000000]. */ expectedDownlinkBitrate?: number; } /** - * The status of the last-mile probe test. + * Status of last mile quality probe result. */ export enum LastmileProbeResultState { /** - * 1: The last-mile network probe test is complete. + * 1: Indicates the last mile quality probe result is complete. */ LastmileProbeResultComplete = 1, /** - * 2: The last-mile network probe test is incomplete because the bandwidth estimation is not available due to limited test resources. One possible reason is that testing resources are temporarily limited. + * 2: Indicates the last mile quality probe did not perform bandwidth estimation, so the result is incomplete. One possible reason is limited testing resources. */ LastmileProbeResultIncompleteNoBwe = 2, /** - * 3: The last-mile network probe test is not carried out. Probably due to poor network conditions. + * 3: Last mile quality probe was not performed. One possible reason is network disconnection. */ LastmileProbeResultUnavailable = 3, } /** - * Results of the uplink or downlink last-mile network test. + * Last mile network quality probe result for uplink or downlink. */ export class LastmileProbeOneWayResult { /** - * The packet loss rate (%). + * Packet loss rate. */ packetLossRate?: number; /** - * The network jitter (ms). + * Network jitter (ms). */ jitter?: number; /** - * The estimated available bandwidth (bps). + * Estimated available network bandwidth (bps). */ availableBandwidth?: number; } /** - * Results of the uplink and downlink last-mile network tests. + * Last mile network quality probe result for both uplink and downlink. */ export class LastmileProbeResult { /** - * The status of the last-mile network tests. See LastmileProbeResultState. + * Status of the last mile probe result. See LastmileProbeResultState. */ state?: LastmileProbeResultState; /** - * Results of the uplink last-mile network test. See LastmileProbeOneWayResult. + * Uplink network quality report. See LastmileProbeOneWayResult. */ uplinkReport?: LastmileProbeOneWayResult; /** - * Results of the downlink last-mile network test. See LastmileProbeOneWayResult. + * Downlink network quality report. See LastmileProbeOneWayResult. */ downlinkReport?: LastmileProbeOneWayResult; /** - * The round-trip time (ms). + * Round-trip time (ms). */ rtt?: number; } /** - * Reasons causing the change of the connection state. + * The reason for the change in network connection state. */ export enum ConnectionChangedReasonType { /** - * 0: The SDK is connecting to the Agora edge server. + * 0: Connecting to the network. */ ConnectionChangedConnecting = 0, /** - * 1: The SDK has joined the channel successfully. + * 1: Successfully joined the channel. */ ConnectionChangedJoinSuccess = 1, /** - * 2: The connection between the SDK and the Agora edge server is interrupted. + * 2: Network connection interrupted. */ ConnectionChangedInterrupted = 2, /** - * 3: The connection between the SDK and the Agora edge server is banned by the Agora edge server. For example, when a user is kicked out of the channel, this status will be returned. + * 3: Network connection is banned by the server. For example, this status is returned when the user is kicked out of the channel. */ ConnectionChangedBannedByServer = 3, /** - * 4: The SDK fails to join the channel. When the SDK fails to join the channel for more than 20 minutes, this code will be returned and the SDK stops reconnecting to the channel. You need to prompt the user to try to switch to another network and rejoin the channel. + * 4: Failed to join the channel. If the SDK fails to join the channel after trying for 20 minutes, this status is returned and the SDK stops trying to reconnect. Prompt the user to switch networks and try joining the channel again. */ ConnectionChangedJoinFailed = 4, /** - * 5: The SDK has left the channel. + * 5: Left the channel. */ ConnectionChangedLeaveChannel = 5, /** - * 6: The App ID is invalid. You need to rejoin the channel with a valid APP ID and make sure the App ID you are using is consistent with the one generated in the Agora Console. + * 6: Invalid App ID. Use a valid App ID to rejoin the channel and ensure the App ID matches the one generated in the Agora Console. */ ConnectionChangedInvalidAppId = 6, /** - * 7: Invalid channel name. Rejoin the channel with a valid channel name. A valid channel name is a string of up to 64 bytes in length. Supported characters (89 characters in total): - * All lowercase English letters: a to z. - * All uppercase English letters: A to Z. - * All numeric characters: 0 to 9. - * "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", "{", "}", "|", "~", "," + * 7: Invalid channel name. Use a valid channel name to rejoin the channel. A valid channel name is a string within 64 bytes. The supported character set includes 89 characters: */ ConnectionChangedInvalidChannelName = 7, /** - * 8: Invalid token. Possible reasons are as follows: - * The App Certificate for the project is enabled in Agora Console, but you do not pass in a token when joining a channel. - * The uid specified when calling joinChannel to join the channel is inconsistent with the uid passed in when generating the token. - * The generated token and the token used to join the channel are not consistent. Ensure the following: - * When your project enables App Certificate, you need to pass in a token to join a channel. - * The user ID specified when generating the token is consistent with the user ID used when joining the channel. - * The generated token is the same as the token passed in to join the channel. + * 8: Invalid Token. Possible reasons: + * Your project has App Certificate enabled, but you joined the channel without using a Token. + * The user ID specified in joinChannel does not match the one used to generate the Token. + * The generated Token does not match the one used to join the channel. Ensure that: + * When App Certificate is enabled, use a Token to join the channel. + * The user ID used to generate the Token matches the one used to join the channel. + * The generated Token matches the one used to join the channel. */ ConnectionChangedInvalidToken = 8, /** - * 9: The token currently being used has expired. You need to generate a new token on your server and rejoin the channel with the new token. + * 9: The current Token has expired. Generate a new Token on your server and use it to rejoin the channel. */ ConnectionChangedTokenExpired = 9, /** - * 10: The connection is rejected by server. Possible reasons are as follows: - * The user is already in the channel and still calls a method, for example, joinChannel, to join the channel. Stop calling this method to clear this error. - * The user tries to join a channel while a test call is in progress. The user needs to join the channel after the call test ends. + * 10: This user is banned by the server. Possible reasons: + * The user has already joined the channel and calls the join channel API again, such as joinChannel, which returns this status. Stop calling this method. + * The user tries to join a channel during a call test. Wait until the call test ends before joining the channel. */ ConnectionChangedRejectedByServer = 10, /** - * 11: The connection state changed to reconnecting because the SDK has set a proxy server. + * 11: SDK attempts to reconnect due to proxy server settings. */ ConnectionChangedSettingProxyServer = 11, /** - * 12: The connection state changed because the token is renewed. + * 12: Network connection state changed due to Token renewal. */ ConnectionChangedRenewToken = 12, /** - * 13: Client IP address changed. If you receive this code multiple times, You need to prompt the user to switch networks and try joining the channel again. + * 13: Client IP address changed. If this status code is received multiple times, prompt the user to switch networks and try rejoining the channel. */ ConnectionChangedClientIpAddressChanged = 13, /** - * 14: Timeout for the keep-alive of the connection between the SDK and the Agora edge server. The SDK tries to reconnect to the server automatically. + * 14: Connection keep-alive timeout between SDK and server, entering auto-reconnect state. */ ConnectionChangedKeepAliveTimeout = 14, /** - * 15: The user has rejoined the channel successfully. + * 15: Successfully rejoined the channel. */ ConnectionChangedRejoinSuccess = 15, /** - * 16: The connection between the SDK and the server is lost. + * 16: SDK lost connection with the server. */ ConnectionChangedLost = 16, /** - * 17: The connection state changes due to the echo test. + * 17: Connection state changed due to echo test. */ ConnectionChangedEchoTest = 17, /** - * 18: The local IP address was changed by the user. + * 18: Local IP address changed by the user. */ ConnectionChangedClientIpAddressChangedByUser = 18, /** - * 19: The user joined the same channel from different devices with the same UID. + * 19: The same UID joined the same channel from different devices. */ ConnectionChangedSameUidLogin = 19, /** - * 20: The number of hosts in the channel has reached the upper limit. + * 20: The number of broadcasters in the channel has reached the limit. */ ConnectionChangedTooManyBroadcasters = 20, /** @@ -3836,71 +3846,71 @@ export enum ConnectionChangedReasonType { } /** - * The reason for a user role switch failure. + * Reasons for client role change failure. */ export enum ClientRoleChangeFailedReason { /** - * 1: The number of hosts in the channel exceeds the limit. This enumerator is reported only when the support for 128 users is enabled. The maximum number of hosts is based on the actual number of hosts configured when you enable the 128-user feature. + * 1: The number of broadcasters in the channel has reached the limit. This enum is reported only when the 128-user feature is enabled. The limit depends on the actual configuration when enabling the 128-user feature. */ ClientRoleChangeFailedTooManyBroadcasters = 1, /** - * 2: The request is rejected by the Agora server. Agora recommends you prompt the user to try to switch their user role again. + * 2: The request is rejected by the server. Prompt the user to retry changing the role. */ ClientRoleChangeFailedNotAuthorized = 2, /** - * 3: The request is timed out. Agora recommends you prompt the user to check the network connection and try to switch their user role again. Deprecated: This enumerator is deprecated since v4.4.0 and is not recommended for use. + * 3: Request timed out. Prompt the user to check the network connection and retry. Deprecated: This enum value is deprecated since v4.4.0 and not recommended for use. */ ClientRoleChangeFailedRequestTimeOut = 3, /** - * 4: The SDK is disconnected from the Agora edge server. You can troubleshoot the failure through the reason reported by onConnectionStateChanged. Deprecated: This enumerator is deprecated since v4.4.0 and is not recommended for use. + * 4: Network connection failed. You can troubleshoot the cause based on the reason reported in onConnectionStateChanged. Deprecated: This enum value is deprecated since v4.4.0 and not recommended for use. */ ClientRoleChangeFailedConnectionFailed = 4, } /** - * Network type. + * Network connection type. */ export enum NetworkType { /** - * -1: The network type is unknown. + * -1: Unknown network connection type. */ NetworkTypeUnknown = -1, /** - * 0: The SDK disconnects from the network. + * 0: Network connection is disconnected. */ NetworkTypeDisconnected = 0, /** - * 1: The network type is LAN. + * 1: LAN network type. */ NetworkTypeLan = 1, /** - * 2: The network type is Wi-Fi (including hotspots). + * 2: Wi-Fi network type (including hotspot). */ NetworkTypeWifi = 2, /** - * 3: The network type is mobile 2G. + * 3: 2G mobile network type. */ NetworkTypeMobile2g = 3, /** - * 4: The network type is mobile 3G. + * 4: 3G mobile network type. */ NetworkTypeMobile3g = 4, /** - * 5: The network type is mobile 4G. + * 5: 4G mobile network type. */ NetworkTypeMobile4g = 5, /** - * 6: The network type is mobile 5G. + * 6: 5G mobile network type. */ NetworkTypeMobile5g = 6, } /** - * Setting mode of the view. + * View setup mode. */ export enum VideoViewSetupMode { /** - * 0: (Default) Clear all added views and replace with a new view. + * 0: (Default) Clears all added views and replaces them with a new view. */ VideoViewSetupReplace = 0, /** @@ -3908,55 +3918,55 @@ export enum VideoViewSetupMode { */ VideoViewSetupAdd = 1, /** - * 2: Deletes a view. When you no longer need to use a certain view, it is recommended to delete the view by setting setupMode to VideoViewSetupRemove, otherwise it may lead to leak of rendering resources. + * 2: Removes a view. When you no longer need a view, it is recommended to set setupMode to VideoViewSetupRemove to remove the view in time, otherwise it may cause rendering resource leaks. */ VideoViewSetupRemove = 2, } /** - * Attributes of the video canvas object. + * Properties of the video canvas object. */ export class VideoCanvas { /** - * User ID that publishes the video source. + * For Android and iOS platforms, when the video source is a transcoded video stream (VideoSourceTranscoded), this parameter indicates the user ID of the publisher of the transcoded video stream. The local user's uid defaults to 0. If you want to use a custom uid to render the local view, you also need to pass sourceType. */ uid?: number; /** - * The ID of the user who publishes a specific sub-video stream within the mixed video stream. + * User ID of the publisher of a sub-video stream in the transcoded video. */ subviewUid?: number; /** - * The video display window. In one VideoCanvas, you can only choose to set either view or surfaceTexture. If both are set, only the settings in view take effect. + * Video display window. In a single VideoCanvas, only one of view or surfaceTexture can be set. If both are set, only the view setting takes effect. */ view?: any; /** - * The background color of the video canvas in RGBA format. The default value is 0x00000000, which represents black. + * Background color of the video canvas in RGBA format. Default is 0x00000000, which represents black. */ backgroundColor?: number; /** - * The rendering mode of the video. See RenderModeType. + * Video render mode. See RenderModeType. */ renderMode?: RenderModeType; /** - * The mirror mode of the view. See VideoMirrorModeType. - * For the mirror mode of the local video view: If you use a front camera, the SDK enables the mirror mode by default; if you use a rear camera, the SDK disables the mirror mode by default. - * For the remote user: The mirror mode is disabled by default. + * View mirror mode. See VideoMirrorModeType. + * Local view mirror mode: If you use the front camera, local view mirror mode is enabled by default; if you use the rear camera, it is disabled by default. + * Remote user view mirror mode: Disabled by default. */ mirrorMode?: VideoMirrorModeType; /** - * Setting mode of the view. See VideoViewSetupMode. + * View setup mode. See VideoViewSetupMode. */ setupMode?: VideoViewSetupMode; /** - * The type of the video source. See VideoSourceType. + * Type of video source. See VideoSourceType. */ sourceType?: VideoSourceType; /** - * The ID of the media player. You can get the Device ID by calling getMediaPlayerId. + * Media player ID. Obtainable via getMediaPlayerId. */ mediaPlayerId?: number; /** - * (Optional) Display area of the video frame, see Rectangle. width and height represent the video pixel width and height of the area. The default value is null (width or height is 0), which means that the actual resolution of the video frame is displayed. + * (Optional) Display area of the video frame. See Rectangle. width and height indicate the pixel width and height of the video in this area. Default is empty (width or height is 0), meaning the video frame is displayed at its actual resolution. */ cropArea?: Rectangle; /** @@ -3964,223 +3974,227 @@ export class VideoCanvas { */ enableAlphaMask?: boolean; /** - * The observation position of the video frame in the video link. See VideoModulePosition. + * Position of the video frame in the video pipeline. See VideoModulePosition. */ position?: VideoModulePosition; } /** - * The contrast level. + * Brightness contrast level. */ export enum LighteningContrastLevel { /** - * 0: Low contrast level. + * 0: Low contrast. */ LighteningContrastLow = 0, /** - * 1: (Default) Normal contrast level. + * 1: Normal contrast. */ LighteningContrastNormal = 1, /** - * 2: High contrast level. + * 2: High contrast. */ LighteningContrastHigh = 2, } /** - * Image enhancement options. + * Beauty options. */ export class BeautyOptions { /** - * The contrast level, used with the lighteningLevel parameter. The larger the value, the greater the contrast between light and dark. See LighteningContrastLevel. + * Contrast level, usually used together with lighteningLevel. The higher the value, the greater the contrast between light and dark. See LighteningContrastLevel. */ lighteningContrastLevel?: LighteningContrastLevel; /** - * The brightening level, in the range [0.0,1.0], where 0.0 means the original brightening. The default value is 0.0. The higher the value, the greater the degree of brightening. + * Whitening level, value range [0.0,1.0], where 0.0 indicates original brightness. Default is 0.0. The higher the value, the greater the whitening effect. */ lighteningLevel?: number; /** - * The smoothness level, in the range [0.0,1.0], where 0.0 means the original smoothness. The default value is 0.0. The greater the value, the greater the smoothness level. + * Smoothing level, value range [0.0,1.0], where 0.0 indicates original smoothness. Default is 0.0. The higher the value, the greater the smoothing effect. */ smoothnessLevel?: number; /** - * The redness level, in the range [0.0,1.0], where 0.0 means the original redness. The default value is 0.0. The larger the value, the greater the redness level. + * Redness level, value range [0.0,1.0], where 0.0 indicates original redness. Default is 0.0. The higher the value, the greater the redness effect. */ rednessLevel?: number; /** - * The sharpness level, in the range [0.0,1.0], where 0.0 means the original sharpness. The default value is 0.0. The larger the value, the greater the sharpness level. + * Sharpness level, value range [0.0,1.0], where 0.0 indicates original sharpness. Default is 0.0. The higher the value, the greater the sharpness effect. */ sharpnessLevel?: number; } /** - * @ignore + * Selects the specific facial area to be adjusted. + * + * Since Available since v4.4.0. */ export enum FaceShapeArea { /** - * @ignore + * (-1): Default value, indicates an invalid area, the facial shaping effect is not applied. */ FaceShapeAreaNone = -1, /** - * @ignore + * (100): Head area, used to achieve a smaller head effect. Value range: [0, 100], default is 50. The higher the value, the more noticeable the adjustment. */ FaceShapeAreaHeadscale = 100, /** - * @ignore + * (101): Forehead area, used to adjust the hairline height. Value range: [0, 100], default is 0. The higher the value, the more noticeable the adjustment. */ FaceShapeAreaForehead = 101, /** - * @ignore + * (102): Face contour area, used to achieve a slimming face effect. Value range: [0, 100], default is 0. The higher the value, the more noticeable the adjustment. */ FaceShapeAreaFacecontour = 102, /** - * @ignore + * (103): Face length area, used to elongate the face. Value range: [-100, 100], default is 0. The greater the absolute value, the more noticeable the adjustment. Negative values indicate the opposite direction. */ FaceShapeAreaFacelength = 103, /** - * @ignore + * (104): Face width area, used to achieve a narrower face effect. Value range: [0, 100], default is 0. The higher the value, the more noticeable the adjustment. */ FaceShapeAreaFacewidth = 104, /** - * @ignore + * (105): Cheekbone area, used to adjust the width of the cheekbones. Value range: [0, 100], default is 0. The higher the value, the more noticeable the adjustment. */ FaceShapeAreaCheekbone = 105, /** - * @ignore + * (106): Cheek area, used to adjust the width of the cheeks. Value range: [0, 100], default is 0. The higher the value, the more noticeable the adjustment. */ FaceShapeAreaCheek = 106, /** - * @ignore + * (107): Mandible area, used to adjust the width of the jawbone. Value range: [0, 100], default is 0. The higher the value, the more noticeable the adjustment. */ FaceShapeAreaMandible = 107, /** - * @ignore + * (108): Chin area, used to adjust the length of the chin. Value range: [-100, 100], default is 0. The greater the absolute value, the more noticeable the adjustment. Negative values indicate the opposite direction. */ FaceShapeAreaChin = 108, /** - * @ignore + * (200): Eye area, used to achieve a bigger eye effect. Value range: [0, 100], default is 50. The higher the value, the more noticeable the adjustment. */ FaceShapeAreaEyescale = 200, /** - * @ignore + * (201): Eye distance area, used to adjust the distance between the eyes. Value range: [-100, 100], default is 0. The greater the absolute value, the more noticeable the adjustment. Negative values indicate the opposite direction. */ FaceShapeAreaEyedistance = 201, /** - * @ignore + * (202): Eye position area, used to adjust the overall position of the eyes. Value range: [-100, 100], default is 0. The greater the absolute value, the more noticeable the adjustment. Negative values indicate the opposite direction. */ FaceShapeAreaEyeposition = 202, /** - * @ignore + * (203): Lower eyelid area, used to adjust the shape of the lower eyelid. Value range: [0, 100], default is 0. The higher the value, the more noticeable the adjustment. */ FaceShapeAreaLowereyelid = 203, /** - * @ignore + * (204): Pupil area, used to adjust the size of the pupils. Value range: [0, 100], default is 0. The higher the value, the more noticeable the adjustment. */ FaceShapeAreaEyepupils = 204, /** - * @ignore + * (205): Inner eye corner area, used to adjust the shape of the inner eye corners. Value range: [-100, 100], default is 0. The greater the absolute value, the more noticeable the adjustment. Negative values indicate the opposite direction. */ FaceShapeAreaEyeinnercorner = 205, /** - * @ignore + * (206): Outer eye corner area, used to adjust the shape of the outer eye corners. Value range: [-100, 100], default is 0. The greater the absolute value, the more noticeable the adjustment. Negative values indicate the opposite direction. */ FaceShapeAreaEyeoutercorner = 206, /** - * @ignore + * (300): Nose length area, used to elongate the nose. Value range: [-100, 100], default is 0. The greater the absolute value, the more noticeable the adjustment. Negative values indicate the opposite direction. */ FaceShapeAreaNoselength = 300, /** - * @ignore + * (301): Nose width area, used to achieve a slimmer nose effect. Value range: [0, 100], default is 0. The higher the value, the more noticeable the slimming effect. */ FaceShapeAreaNosewidth = 301, /** - * @ignore + * (302): Nose wing area, used to adjust the width of the nose wings. Value range: [0, 100], default is 10. The higher the value, the more noticeable the adjustment. */ FaceShapeAreaNosewing = 302, /** - * @ignore + * (303): Nose root area, used to adjust the height of the nose root. Value range: [0, 100], default is 0. The higher the value, the more noticeable the adjustment. */ FaceShapeAreaNoseroot = 303, /** - * @ignore + * (304): Nose bridge area, used to adjust the height of the nose bridge. Value range: [0, 100], default is 50. The higher the value, the more noticeable the adjustment. */ FaceShapeAreaNosebridge = 304, /** - * @ignore + * (305): Nose tip area, used to adjust the shape of the nose tip. Value range: [0, 100], default is 50. The higher the value, the more noticeable the adjustment. */ FaceShapeAreaNosetip = 305, /** - * @ignore + * (306): Overall nose area, used to uniformly adjust the shape of the nose. Value range: [-100, 100], default is 50. The greater the absolute value, the more noticeable the adjustment. Negative values indicate the opposite direction. */ FaceShapeAreaNosegeneral = 306, /** - * @ignore + * (400): Mouth area, used to achieve a larger mouth effect. Value range: [-100, 100], default is 20. The greater the absolute value, the more noticeable the adjustment. Negative values indicate the opposite direction. */ FaceShapeAreaMouthscale = 400, /** - * @ignore + * (401): Mouth position area, used to adjust the overall position of the mouth. Value range: [0, 100], default is 0. The higher the value, the more noticeable the adjustment. */ FaceShapeAreaMouthposition = 401, /** - * @ignore + * (402): Mouth smile area, used to adjust the degree of mouth corner lift. Value range: [0, 1], default is 0. The higher the value, the more noticeable the adjustment. */ FaceShapeAreaMouthsmile = 402, /** - * @ignore + * (403): Lip shape area, used to adjust the shape of the lips. Value range: [0, 100], default is 0. The higher the value, the more noticeable the adjustment. */ FaceShapeAreaMouthlip = 403, /** - * @ignore + * (500): Eyebrow position area, used to adjust the overall position of the eyebrows. Value range: [-100, 100], default is 0. The greater the absolute value, the more noticeable the adjustment. Negative values indicate the opposite direction. */ FaceShapeAreaEyebrowposition = 500, /** - * @ignore + * (501): Eyebrow thickness area, used to adjust the thickness of the eyebrows. Value range: [-100, 100], default is 0. The higher the value, the more noticeable the adjustment. */ FaceShapeAreaEyebrowthickness = 501, } /** - * @ignore + * Filter effect options. */ export class FaceShapeAreaOptions { /** - * @ignore + * Facial area for beautification. See FaceShapeArea. */ shapeArea?: FaceShapeArea; /** - * @ignore + * Intensity of the effect. The definition of intensity (including direction, range, default value, etc.) varies by area. See FaceShapeArea. */ shapeIntensity?: number; } /** - * @ignore + * Face shaping makeup effect style options. + * + * Since Available since v4.4.0. */ export enum FaceShapeBeautyStyle { /** - * @ignore + * (0): (Default) Female style makeup effect. */ FaceShapeBeautyStyleFemale = 0, /** - * @ignore + * (1): Male style makeup effect. */ FaceShapeBeautyStyleMale = 1, /** - * @ignore + * (2): Natural style makeup effect, makes minimal adjustments to facial features only. */ FaceShapeBeautyStyleNatural = 2, } /** - * @ignore + * Face shaping style options. */ export class FaceShapeBeautyOptions { /** - * @ignore + * Face shaping style. See FaceShapeBeautyStyle. */ shapeStyle?: FaceShapeBeautyStyle; /** - * @ignore + * Intensity of the face shaping style, ranging from [0,100], with a default value of 0.0, meaning no face shaping effect. The higher the value, the more noticeable the changes to the modified areas. */ styleIntensity?: number; } @@ -4190,124 +4204,123 @@ export class FaceShapeBeautyOptions { */ export class FilterEffectOptions { /** - * The absolute path to the local cube map texture file, which can be used to customize the filter effect. The specified .cude file should strictly follow the Cube LUT Format Specification; otherwise, the filter options do not take effect. The following is a sample of the .cude file: - * LUT_3D_SIZE 32 + * Local absolute path to the 3D cube map file used to implement custom filter effects. The referenced .cube file must strictly follow the Cube LUT (Lookup Table) specification, otherwise the filter effect will not take effect. Example of a .cube file: LUT_3D_SIZE 32 * 0.0039215689 0 0.0039215682 * 0.0086021447 0.0037950677 0 * ... * 0.0728652592 0.0039215689 0 - * The identifier LUT_3D_SIZE on the first line of the cube map file represents the size of the three-dimensional lookup table. The LUT size for filter effect can only be set to 32. - * The SDK provides a built-in built_in_whiten_filter.cube file. You can pass the absolute path of this file to get the whitening filter effect. + * The LUT_3D_SIZE identifier in the first line of the cube map file indicates the size of the 3D lookup table. Currently, only LUT size 32 is supported. + * The SDK provides a built-in built_in_whiten_filter.cube file. Passing the absolute path of this file enables a whitening filter effect. */ path?: string; /** - * The intensity of the filter effect, with a range value of [0.0,1.0], in which 0.0 represents no filter effect. The default value is 0.5. The higher the value, the stronger the filter effect. + * Strength of the filter effect, ranging from [0.0,1.0], where 0.0 means no filter effect. The default value is 0.5. The higher the value, the stronger the filter effect. */ strength?: number; } /** - * The low-light enhancement mode. + * Low-light enhancement mode. */ export enum LowLightEnhanceMode { /** - * 0: (Default) Automatic mode. The SDK automatically enables or disables the low-light enhancement feature according to the ambient light to compensate for the lighting level or prevent overexposure, as necessary. + * 0: (Default) Auto mode. The SDK automatically enables or disables low-light enhancement based on ambient brightness to provide proper lighting and avoid overexposure. */ LowLightEnhanceAuto = 0, /** - * 1: Manual mode. Users need to enable or disable the low-light enhancement feature manually. + * 1: Manual mode. You need to manually enable or disable low-light enhancement. */ LowLightEnhanceManual = 1, } /** - * The low-light enhancement level. + * Low-light enhancement level. */ export enum LowLightEnhanceLevel { /** - * 0: (Default) Promotes video quality during low-light enhancement. It processes the brightness, details, and noise of the video image. The performance consumption is moderate, the processing speed is moderate, and the overall video quality is optimal. + * 0: (Default) High-quality low-light enhancement. Optimizes brightness, detail, and noise in the video image with moderate performance consumption and processing speed, offering the best overall image quality. */ LowLightEnhanceLevelHighQuality = 0, /** - * 1: Promotes performance during low-light enhancement. It processes the brightness and details of the video image. The processing speed is faster. + * 1: Performance-first low-light enhancement. Optimizes brightness and detail with lower performance consumption and faster processing speed. */ LowLightEnhanceLevelFast = 1, } /** - * The low-light enhancement options. + * Low-light enhancement options. */ export class LowlightEnhanceOptions { /** - * The low-light enhancement mode. See LowLightEnhanceMode. + * Mode of low-light enhancement. See LowLightEnhanceMode. */ mode?: LowLightEnhanceMode; /** - * The low-light enhancement level. See LowLightEnhanceLevel. + * Level of low-light enhancement. See LowLightEnhanceLevel. */ level?: LowLightEnhanceLevel; } /** - * Video noise reduction mode. + * Video denoising mode. */ export enum VideoDenoiserMode { /** - * 0: (Default) Automatic mode. The SDK automatically enables or disables the video noise reduction feature according to the ambient light. + * 0: (Default) Auto mode. The SDK automatically enables or disables video denoising based on ambient brightness. */ VideoDenoiserAuto = 0, /** - * 1: Manual mode. Users need to enable or disable the video noise reduction feature manually. + * 1: Manual mode. You need to manually enable or disable video denoising. */ VideoDenoiserManual = 1, } /** - * Video noise reduction level. + * Video denoising level. */ export enum VideoDenoiserLevel { /** - * 0: (Default) Promotes video quality during video noise reduction. balances performance consumption and video noise reduction quality. The performance consumption is moderate, the video noise reduction speed is moderate, and the overall video quality is optimal. + * 0: (Default) Denoising with priority on video quality. Balances performance consumption and denoising effect. Moderate performance usage, moderate speed, optimal overall quality. */ VideoDenoiserLevelHighQuality = 0, /** - * 1: Promotes reducing performance consumption during video noise reduction. It prioritizes reducing performance consumption over video noise reduction quality. The performance consumption is lower, and the video noise reduction speed is faster. To avoid a noticeable shadowing effect (shadows trailing behind moving objects) in the processed video, Agora recommends that you use this setting when the camera is fixed. + * 1: Denoising with priority on performance. Focuses on saving performance over denoising effect. Low performance consumption, fast speed. To avoid noticeable ghosting, it is recommended to use this setting when the camera is stationary. */ VideoDenoiserLevelFast = 1, } /** - * Video noise reduction options. + * Video denoising options. */ export class VideoDenoiserOptions { /** - * Video noise reduction mode. + * Video denoising mode. */ mode?: VideoDenoiserMode; /** - * Video noise reduction level. + * Video denoising level. */ level?: VideoDenoiserLevel; } /** - * The color enhancement options. + * Color enhancement options. */ export class ColorEnhanceOptions { /** - * The level of color enhancement. The value range is [0.0, 1.0]. 0.0 is the default value, which means no color enhancement is applied to the video. The higher the value, the higher the level of color enhancement. The default value is 0.5. + * Color enhancement strength. Value range is [0.0,1.0]. 0.0 means no color enhancement is applied to the video. The larger the value, the stronger the enhancement. Default value is 0.5. */ strengthLevel?: number; /** - * The level of skin tone protection. The value range is [0.0, 1.0]. 0.0 means no skin tone protection. The higher the value, the higher the level of skin tone protection. The default value is 1.0. - * When the level of color enhancement is higher, the portrait skin tone can be significantly distorted, so you need to set the level of skin tone protection. - * When the level of skin tone protection is higher, the color enhancement effect can be slightly reduced. Therefore, to get the best color enhancement effect, Agora recommends that you adjust strengthLevel and skinProtectLevel to get the most appropriate values. + * Skin tone protection level. Value range is [0.0,1.0]. 0.0 means no skin tone protection. The larger the value, the stronger the protection. Default value is 1.0. + * When the color enhancement strength is high, facial skin tones may appear distorted. You need to set the skin tone protection level. + * Higher skin tone protection levels may slightly reduce the color enhancement effect. Therefore, to achieve the best color enhancement effect, it is recommended to dynamically adjust strengthLevel and skinProtectLevel for optimal results. */ skinProtectLevel?: number; } /** - * The custom background. + * Custom background. */ export enum BackgroundSourceType { /** @@ -4315,115 +4328,115 @@ export enum BackgroundSourceType { */ BackgroundNone = 0, /** - * 1: (Default) The background image is a solid color. + * 1: (Default) Solid color background. */ BackgroundColor = 1, /** - * 2: The background is an image in PNG or JPG format. + * 2: Background image in PNG or JPG format. */ BackgroundImg = 2, /** - * 3: The background is a blurred version of the original background. + * 3: Blurred background. */ BackgroundBlur = 3, /** - * 4: The background is a local video in MP4, AVI, MKV, FLV, or other supported formats. + * 4: Local video background in formats such as MP4, AVI, MKV, FLV. */ BackgroundVideo = 4, } /** - * The degree of blurring applied to the custom background image. + * Degree of background blur for custom background image. */ export enum BackgroundBlurDegree { /** - * 1: The degree of blurring applied to the custom background image is low. The user can almost see the background clearly. + * 1: Low blur level for custom background image. Users can almost clearly see the background. */ BlurDegreeLow = 1, /** - * 2: The degree of blurring applied to the custom background image is medium. It is difficult for the user to recognize details in the background. + * 2: Medium blur level for custom background image. Users have difficulty seeing the background clearly. */ BlurDegreeMedium = 2, /** - * 3: (Default) The degree of blurring applied to the custom background image is high. The user can barely see any distinguishing features in the background. + * 3: (Default) High blur level for custom background image. Users can barely see the background. */ BlurDegreeHigh = 3, } /** - * The custom background. + * Custom background. */ export class VirtualBackgroundSource { /** - * The custom background. See BackgroundSourceType. + * Custom background. See BackgroundSourceType. */ background_source_type?: BackgroundSourceType; /** - * The type of the custom background image. The color of the custom background image. The format is a hexadecimal integer defined by RGB, without the # sign, such as 0xFFB6C1 for light pink. The default value is 0xFFFFFF, which signifies white. The value range is [0x000000, 0xffffff]. If the value is invalid, the SDK replaces the original background image with a white background image. This parameter is only applicable to custom backgrounds of the following types: BackgroundColor : The background image is a solid-colored image of the color passed in by the parameter. BackgroundImg : If the image in source has a transparent background, the transparent background will be filled with the color passed in by the parameter. + * Color of the custom background image. Format is a hexadecimal integer in RGB without the # symbol, e.g., 0xFFB6C1 represents light pink. Default is 0xFFFFFF (white). Valid range is [0x000000, 0xffffff]. If the value is invalid, the SDK replaces the background with white. This parameter takes effect only when the custom background is one of the following types: BackgroundColor : The background is a solid color image of the specified color. BackgroundImg : If the image in source has a transparent background, the transparent area is filled with the specified color. */ color?: number; /** - * The local absolute path of the custom background image. Supports PNG, JPG, MP4, AVI, MKV, and FLV formats. If the path is invalid, the SDK will use either the original background image or the solid color image specified by color. This parameter takes effect only when the type of the custom background image is BackgroundImg or BackgroundVideo. + * Absolute local path of the custom background. Supports PNG, JPG, MP4, AVI, MKV, and FLV formats. If the path is invalid, the SDK uses the original background or the solid color specified by color. This parameter takes effect only when the custom background type is BackgroundImg or BackgroundVideo. */ source?: string; /** - * The degree of blurring applied to the custom background image. See BackgroundBlurDegree. This parameter takes effect only when the type of the custom background image is BackgroundBlur. + * Blur level of the custom background. See BackgroundBlurDegree. This parameter takes effect only when the custom background type is BackgroundBlur. */ blur_degree?: BackgroundBlurDegree; } /** - * The type of algorithms to user for background processing. + * Algorithm for background processing. */ export enum SegModelType { /** - * 1: (Default) Use the algorithm suitable for all scenarios. + * 1: (Default) Background processing algorithm suitable for all scenarios. */ SegModelAi = 1, /** - * 2: Use the algorithm designed specifically for scenarios with a green screen background. + * 2: Background processing algorithm (green screen only). */ SegModelGreen = 2, } /** - * @ignore + * Screen color type. */ export enum ScreenColorType { /** - * @ignore + * (0): Automatically selects the screen color. */ ScreenColorAuto = 0, /** - * @ignore + * (1): Green screen color. */ ScreenColorGreen = 1, /** - * @ignore + * (2): Blue screen color. */ ScreenColorBlue = 2, } /** - * Processing properties for background images. + * Background image processing properties. */ export class SegmentationProperty { /** - * The type of algorithms to user for background processing. See SegModelType. + * The algorithm used for background processing. See SegModelType. */ modelType?: SegModelType; /** - * The accuracy range for recognizing background colors in the image. The value range is [0,1], and the default value is 0.5. The larger the value, the wider the range of identifiable shades of pure color. When the value of this parameter is too large, the edge of the portrait and the pure color in the portrait range are also detected. Agora recommends that you dynamically adjust the value of this parameter according to the actual effect. This parameter only takes effect when modelType is set to SegModelGreen. + * The precision range for recognizing background colors in the image. Value range is [0,1], default is 0.5. A higher value indicates a wider range of recognizable solid colors. If the value is too high, edges of the portrait and solid colors within the portrait may also be recognized. It is recommended to adjust this value dynamically based on actual effects. This parameter takes effect only when modelType is set to SegModelGreen. */ greenCapacity?: number; /** - * @ignore + * Type of screen color. See ScreenColorType. */ screenColorType?: ScreenColorType; } /** - * The type of the audio track. + * Type of custom audio capture track. */ export enum AudioTrackType { /** @@ -4431,65 +4444,65 @@ export enum AudioTrackType { */ AudioTrackInvalid = -1, /** - * 0: Mixable audio tracks. This type of audio track supports mixing with other audio streams (such as audio streams captured by microphone) and playing locally or publishing to channels after mixing. The latency of mixable audio tracks is higher than that of direct audio tracks. + * 0: Mixable audio track. Supports mixing with other audio streams (e.g., microphone audio) before local playback or publishing to the channel. Has higher latency compared to non-mixable audio tracks. */ AudioTrackMixable = 0, /** - * 1: Direct audio tracks. This type of audio track will replace the audio streams captured by the microphone and does not support mixing with other audio streams. The latency of direct audio tracks is lower than that of mixable audio tracks. If AudioTrackDirect is specified for this parameter, you must set publishMicrophoneTrack to false in ChannelMediaOptions when calling joinChannel to join the channel; otherwise, joining the channel fails and returns the error code -2. + * 1: Non-mixable audio track. Replaces microphone capture and does not support mixing with other audio streams. Has lower latency compared to mixable audio tracks. If you specify AudioTrackDirect, you must set publishMicrophoneTrack in ChannelMediaOptions to false when calling joinChannel, otherwise joining the channel will fail and return error code -2. */ AudioTrackDirect = 1, } /** - * The configuration of custom audio tracks. + * Configuration options for custom audio tracks. */ export class AudioTrackConfig { /** - * Whether to enable the local audio-playback device: true : (Default) Enable the local audio-playback device. false : Do not enable the local audio-playback device. + * Whether to enable local audio playback: true : (Default) Enable local audio playback. false : Disable local audio playback. */ enableLocalPlayback?: boolean; /** - * Whether to enable audio processing module: true : Enable the audio processing module to apply the Automatic Echo Cancellation (AEC), Automatic Noise Suppression (ANS), and Automatic Gain Control (AGC) effects. false : (Default) Do not enable the audio processing module. This parameter only takes effect on AudioTrackDirect in custom audio capturing. + * This parameter only takes effect for custom audio capture tracks of type AudioTrackDirect. Whether to enable the audio processing module: true : Enable the audio processing module, which applies echo cancellation (AEC), noise suppression (ANS), and automatic gain control (AGC). false : (Default) Disable the audio processing module. */ enableAudioProcessing?: boolean; } /** - * The options for SDK preset voice beautifier effects. + * Preset voice beautifier options. */ export enum VoiceBeautifierPreset { /** - * Turn off voice beautifier effects and use the original voice. + * Original voice, i.e., disables voice beautifier effects. */ VoiceBeautifierOff = 0x00000000, /** - * A more magnetic voice. Agora recommends using this enumerator to process a male-sounding voice; otherwise, you may experience vocal distortion. + * Magnetic (male). This setting is only effective for male voices. Do not use it for female voices, or audio distortion may occur. */ ChatBeautifierMagnetic = 0x01010100, /** - * A fresher voice. Agora recommends using this enumerator to process a female-sounding voice; otherwise, you may experience vocal distortion. + * Fresh (female). This setting is only effective for female voices. Do not use it for male voices, or audio distortion may occur. */ ChatBeautifierFresh = 0x01010200, /** - * A more vital voice. Agora recommends using this enumerator to process a female-sounding voice; otherwise, you may experience vocal distortion. + * Energetic (female). This setting is only effective for female voices. Do not use it for male voices, or audio distortion may occur. */ ChatBeautifierVitality = 0x01010300, /** - * Singing beautifier effect. - * If you call setVoiceBeautifierPreset (SingingBeautifier), you can beautify a male-sounding voice and add a reverberation effect that sounds like singing in a small room. Agora recommends using this enumerator to process a male-sounding voice; otherwise, you might experience vocal distortion. - * If you call setVoiceBeautifierParameters (SingingBeautifier, param1, param2), you can beautify a male or female-sounding voice and add a reverberation effect. + * Singing beautifier. + * If you call setVoiceBeautifierPreset (SingingBeautifier), you can beautify male voices and add a small room reverb effect. Do not use for female voices, or audio distortion may occur. + * If you call setVoiceBeautifierParameters (SingingBeautifier, param1, param2), you can beautify both male and female voices and add reverb effects. */ SingingBeautifier = 0x01020100, /** - * A more vigorous voice. + * Vigorous. */ TimbreTransformationVigorous = 0x01030100, /** - * A deep voice. + * Deep. */ TimbreTransformationDeep = 0x01030200, /** - * A mellower voice. + * Mellow. */ TimbreTransformationMellow = 0x01030300, /** @@ -4497,25 +4510,25 @@ export enum VoiceBeautifierPreset { */ TimbreTransformationFalsetto = 0x01030400, /** - * A fuller voice. + * Full. */ TimbreTransformationFull = 0x01030500, /** - * A clearer voice. + * Clear. */ TimbreTransformationClear = 0x01030600, /** - * A more resounding voice. + * Resounding. */ TimbreTransformationResounding = 0x01030700, /** - * A more ringing voice. + * Ringing. */ TimbreTransformationRinging = 0x01030800, /** - * A ultra-high quality voice, which makes the audio clearer and restores more details. - * To achieve better audio effect quality, Agora recommends that you set the profile of to AudioProfileMusicHighQuality (4) or AudioProfileMusicHighQualityStereo (5) and scenario to AudioScenarioGameStreaming (3) before calling setVoiceBeautifierPreset. - * If you have an audio capturing device that can already restore audio details to a high degree, Agora recommends that you do not enable ultra-high quality; otherwise, the SDK may over-restore audio details, and you may not hear the anticipated voice effect. + * Ultra-high quality voice, which makes the audio clearer and more detailed. + * For better results, it is recommended to set the profile parameter of setAudioProfile2 to AudioProfileMusicHighQuality (4) or AudioProfileMusicHighQualityStereo (5), and the scenario parameter to AudioScenarioGameStreaming (3), before calling setVoiceBeautifierPreset. + * If the user's audio capture device can highly restore audio details, it is recommended not to enable ultra-high quality voice, otherwise the SDK may over-restore audio details and fail to achieve the expected effect. */ UltraHighQualityVoice = 0x01040100, } @@ -4523,117 +4536,121 @@ export enum VoiceBeautifierPreset { /** * Preset audio effects. * - * To get better audio effects, Agora recommends calling setAudioProfile and setting the profile parameter as recommended below before using the preset audio effects. + * setAudioProfile profile + * Preset audio effects profile + * RoomAcousticsVirtualStereo + * RoomAcoustics3dVoice + * RoomAcousticsVirtualSurroundSound AudioProfileMusicHighQualityStereo or AudioProfileMusicStandardStereo Other preset effects (except AudioEffectOff) AudioProfileMusicHighQuality or AudioProfileMusicHighQualityStereo */ export enum AudioEffectPreset { /** - * Turn off voice effects, that is, use the original voice. + * Original sound, i.e., disables voice effects. */ AudioEffectOff = 0x00000000, /** - * The voice effect typical of a KTV venue. + * KTV. */ RoomAcousticsKtv = 0x02010100, /** - * The voice effect typical of a concert hall. + * Concert. */ RoomAcousticsVocalConcert = 0x02010200, /** - * The voice effect typical of a recording studio. + * Studio. */ RoomAcousticsStudio = 0x02010300, /** - * The voice effect typical of a vintage phonograph. + * Phonograph. */ RoomAcousticsPhonograph = 0x02010400, /** - * The virtual stereo effect, which renders monophonic audio as stereo audio. + * Virtual stereo, where the SDK renders mono audio into stereo effect. */ RoomAcousticsVirtualStereo = 0x02010500, /** - * A more spatial voice effect. + * Spacious. */ RoomAcousticsSpacial = 0x02010600, /** - * A more ethereal voice effect. + * Ethereal. */ RoomAcousticsEthereal = 0x02010700, /** - * A 3D voice effect that makes the voice appear to be moving around the user. The default cycle period is 10 seconds. After setting this effect, you can call setAudioEffectParameters to modify the movement period. If the 3D voice effect is enabled, users need to use stereo audio playback devices to hear the anticipated voice effect. + * 3D voice, where the SDK renders audio to surround the user. The default surround cycle is 10 seconds. After setting this effect, you can also call setAudioEffectParameters to modify the surround cycle. To hear the expected effect after enabling 3D voice, users need to use audio playback devices that support stereo. */ RoomAcoustics3dVoice = 0x02010800, /** - * Virtual surround sound, that is, the SDK generates a simulated surround sound field on the basis of stereo channels, thereby creating a surround sound effect. If the virtual surround sound is enabled, users need to use stereo audio playback devices to hear the anticipated audio effect. + * Virtual surround sound, where the SDK simulates a surround sound field based on stereo to create a surround effect. To hear the expected effect after enabling virtual surround sound, users need to use audio playback devices that support stereo. */ RoomAcousticsVirtualSurroundSound = 0x02010900, /** - * The audio effect of chorus. Agora recommends using this effect in chorus scenarios to enhance the sense of depth and dimension in the vocals. + * Chorus. Agora recommends using this in chorus scenarios to enhance the spatial stereo effect of vocals. */ RoomAcousticsChorus = 0x02010d00, /** - * A middle-aged man's voice. Agora recommends using this preset to process a male-sounding voice; otherwise, you may not hear the anticipated voice effect. + * Uncle. Recommended for processing male voices; otherwise, the effect may not meet expectations. */ VoiceChangerEffectUncle = 0x02020100, /** - * An older man's voice. Agora recommends using this preset to process a male-sounding voice; otherwise, you may not hear the anticipated voice effect. + * Elderly male. Recommended for processing male voices; otherwise, the effect may not meet expectations. */ VoiceChangerEffectOldman = 0x02020200, /** - * A boy's voice. Agora recommends using this preset to process a male-sounding voice; otherwise, you may not hear the anticipated voice effect. + * Boy. Recommended for processing male voices; otherwise, the effect may not meet expectations. */ VoiceChangerEffectBoy = 0x02020300, /** - * A young woman's voice. Agora recommends using this preset to process a female-sounding voice; otherwise, you may not hear the anticipated voice effect. + * Young woman. Recommended for processing female voices; otherwise, the effect may not meet expectations. */ VoiceChangerEffectSister = 0x02020400, /** - * A girl's voice. Agora recommends using this preset to process a female-sounding voice; otherwise, you may not hear the anticipated voice effect. + * Girl. Recommended for processing female voices; otherwise, the effect may not meet expectations. */ VoiceChangerEffectGirl = 0x02020500, /** - * The voice of Pig King, a character in Journey to the West who has a voice like a growling bear. + * Pigsy. */ VoiceChangerEffectPigking = 0x02020600, /** - * The Hulk's voice. + * Hulk. */ VoiceChangerEffectHulk = 0x02020700, /** - * The voice effect typical of R&B music. + * R&B. */ StyleTransformationRnb = 0x02030100, /** - * The voice effect typical of popular music. + * Pop. */ StyleTransformationPopular = 0x02030200, /** - * A pitch correction effect that corrects the user's pitch based on the pitch of the natural C major scale. After setting this voice effect, you can call setAudioEffectParameters to adjust the basic mode of tuning and the pitch of the main tone. + * Electronic music, where the SDK corrects the actual pitch of the audio based on the natural major scale with C as the tonic. After setting this effect, you can also call setAudioEffectParameters to adjust the base scale and tonic pitch. */ PitchCorrection = 0x02040100, } /** - * The options for SDK preset voice conversion effects. + * Preset voice conversion options. */ export enum VoiceConversionPreset { /** - * Turn off voice conversion effects and use the original voice. + * Original voice, i.e., disables voice conversion effects. */ VoiceConversionOff = 0x00000000, /** - * A gender-neutral voice. To avoid audio distortion, ensure that you use this enumerator to process a female-sounding voice. + * Neutral. To avoid audio distortion, make sure to apply this effect only to female voices. */ VoiceChangerNeutral = 0x03010100, /** - * A sweet voice. To avoid audio distortion, ensure that you use this enumerator to process a female-sounding voice. + * Sweet. To avoid audio distortion, make sure to apply this effect only to female voices. */ VoiceChangerSweet = 0x03010200, /** - * A steady voice. To avoid audio distortion, ensure that you use this enumerator to process a male-sounding voice. + * Steady. To avoid audio distortion, make sure to apply this effect only to male voices. */ VoiceChangerSolid = 0x03010300, /** - * A deep voice. To avoid audio distortion, ensure that you use this enumerator to process a male-sounding voice. + * Deep. To avoid audio distortion, make sure to apply this effect only to male voices. */ VoiceChangerBass = 0x03010400, /** @@ -4687,21 +4704,21 @@ export enum VoiceConversionPreset { */ export enum HeadphoneEqualizerPreset { /** - * The headphone equalizer is disabled, and the original audio is heard. + * Disable headphone equalizer and listen to the original audio. */ HeadphoneEqualizerOff = 0x00000000, /** - * An equalizer is used for headphones. + * Use the equalizer for over-ear headphones. */ HeadphoneEqualizerOverear = 0x04000001, /** - * An equalizer is used for in-ear headphones. + * Use the equalizer for in-ear headphones. */ HeadphoneEqualizerInear = 0x04000002, } /** - * Voice AI tuner sound types. + * AI tuner voice effect types. */ export enum VoiceAiTunerType { /** @@ -4713,55 +4730,55 @@ export enum VoiceAiTunerType { */ VoiceAiTunerFreshMale = 1, /** - * 2: Elegant female voice. A deep and charming female voice. + * 2: Mature female voice. A deep and charming female voice. */ VoiceAiTunerElegantFemale = 2, /** - * 3: Sweet female voice. A high-pitched and cute female voice. + * 3: Cute female voice. A high-pitched and adorable female voice. */ VoiceAiTunerSweetFemale = 3, /** - * 4: Warm male singing. A warm and melodious male voice. + * 4: Warm male singing voice. A warm and melodious male voice. */ VoiceAiTunerWarmMaleSinging = 4, /** - * 5: Gentle female singing. A soft and delicate female voice. + * 5: Gentle female singing voice. A soft and delicate female voice. */ VoiceAiTunerGentleFemaleSinging = 5, /** - * 6: Husky male singing. A unique husky male voice. + * 6: Husky mature male singing voice. A unique hoarse male voice. */ VoiceAiTunerHuskyMaleSinging = 6, /** - * 7: Warm elegant female singing. A warm and mature female voice. + * 7: Warm mature female singing voice. A warm and mature female voice. */ VoiceAiTunerWarmElegantFemaleSinging = 7, /** - * 8: Powerful male singing. A strong and powerful male voice. + * 8: Powerful male singing voice. A strong and forceful male voice. */ VoiceAiTunerPowerfulMaleSinging = 8, /** - * 9: Dreamy female singing. A dreamy and soft female voice. + * 9: Dreamy female singing voice. A dreamy and soft female voice. */ VoiceAiTunerDreamyFemaleSinging = 9, } /** - * The audio configuration for the shared screen stream. + * Audio configuration for the shared screen stream. * - * Only available where captureAudio is true. + * (Android only) Applies only when captureAudio is set to true. */ export class ScreenAudioParameters { /** - * Audio sample rate (Hz). The default value is 16000. + * Audio sample rate (Hz). Default is 16000. */ sampleRate?: number; /** - * The number of audio channels. The default value is 2, which means stereo. + * Number of channels. Default is 2, indicating stereo. */ channels?: number; /** - * The volume of the captured system audio. The value range is [0, 100]. The default value is 100. + * Captured system volume. Value range: [0, 100]. Default is 100. */ captureSignalVolume?: number; /** @@ -4829,19 +4846,19 @@ export class ScreenCaptureParameters { */ export enum AudioRecordingQualityType { /** - * 0: Low quality. The sample rate is 32 kHz, and the file size is around 1.2 MB after 10 minutes of recording. + * 0: Low quality. Sample rate is 32 kHz, file size for 10 minutes of recording is approximately 1.2 MB. */ AudioRecordingQualityLow = 0, /** - * 1: Medium quality. The sample rate is 32 kHz, and the file size is around 2 MB after 10 minutes of recording. + * 1: Medium quality. Sample rate is 32 kHz, file size for 10 minutes of recording is approximately 2 MB. */ AudioRecordingQualityMedium = 1, /** - * 2: High quality. The sample rate is 32 kHz, and the file size is around 3.75 MB after 10 minutes of recording. + * 2: High quality. Sample rate is 32 kHz, file size for 10 minutes of recording is approximately 3.75 MB. */ AudioRecordingQualityHigh = 2, /** - * 3: Ultra high quality. The sample rate is 32 kHz, and the file size is around 7.5 MB after 10 minutes of recording. + * 3: Ultra-high quality. Sample rate is 32 kHz, file size for 10 minutes of recording is approximately 7.5 MB. */ AudioRecordingQualityUltraHigh = 3, } @@ -4851,59 +4868,59 @@ export enum AudioRecordingQualityType { */ export enum AudioFileRecordingType { /** - * 1: Only records the audio of the local user. + * 1: Record only the local user's audio. */ AudioFileRecordingMic = 1, /** - * 2: Only records the audio of all remote users. + * 2: Record only the audio of all remote users. */ AudioFileRecordingPlayback = 2, /** - * 3: Records the mixed audio of the local and all remote users. + * 3: Record the mixed audio of the local and all remote users. */ AudioFileRecordingMixed = 3, } /** - * Audio profile. + * Audio encoding content. */ export enum AudioEncodedFrameObserverPosition { /** - * 1: Only records the audio of the local user. + * 1: Encode only the local user's audio. */ AudioEncodedFrameObserverPositionRecord = 1, /** - * 2: Only records the audio of all remote users. + * 2: Encode only the audio of all remote users. */ AudioEncodedFrameObserverPositionPlayback = 2, /** - * 3: Records the mixed audio of the local and all remote users. + * 3: Encode the mixed audio of the local and all remote users. */ AudioEncodedFrameObserverPositionMixed = 3, } /** - * Recording configurations. + * Recording configuration. */ export class AudioRecordingConfiguration { /** - * The absolute path (including the filename extensions) of the recording file. For example: C:\music\audio.aac. Ensure that the directory for the log files exists and is writable. + * The absolute path where the recording file is saved locally, including the file name and format. For example: C:\music\audio.aac. Make sure the specified path exists and is writable. */ filePath?: string; /** - * Whether to encode the audio data: true : Encode audio data in AAC. false : (Default) Do not encode audio data, but save the recorded audio data directly. + * Specifies whether to encode the audio data: true : Encode the audio data using AAC. false : (Default) Do not encode the audio data, save the raw recorded audio data. */ encode?: boolean; /** - * Recording sample rate (Hz). + * If you set this parameter to 44100 or 48000, to ensure recording quality, it is recommended to record WAV files or AAC files with quality set to AudioRecordingQualityMedium or AudioRecordingQualityHigh. Recording sample rate (Hz). * 16000 - * (Default) 32000 + * 32000 (Default) * 44100 - * 48000 If you set this parameter to 44100 or 48000, Agora recommends recording WAV files, or AAC files with quality set as AudioRecordingQualityMedium or AudioRecordingQualityHigh for better recording quality. + * 48000 */ sampleRate?: number; /** - * The recording content. See AudioFileRecordingType. + * Recording content. See AudioFileRecordingType. */ fileRecordingType?: AudioFileRecordingType; /** @@ -4911,21 +4928,21 @@ export class AudioRecordingConfiguration { */ quality?: AudioRecordingQualityType; /** - * The audio channel of recording: The parameter supports the following values: + * The actual recorded audio channel depends on the captured audio channel: + * If the captured audio is mono and recordingChannel is set to 2, the recorded audio will be stereo copied from mono data, not true stereo. + * If the captured audio is stereo and recordingChannel is set to 1, the recorded audio will be mono mixed from stereo data. In addition, the integration solution may also affect the final recorded audio channel. If you want to record stereo, please [contact technical support](https://ticket.shengwang.cn/) for assistance. Audio recording channel. The following values are supported: * 1: (Default) Mono. - * 2: Stereo. The actual recorded audio channel is related to the audio channel that you capture. - * If the captured audio is mono and recordingChannel is 2, the recorded audio is the dual-channel data that is copied from mono data, not stereo. - * If the captured audio is dual channel and recordingChannel is 1, the recorded audio is the mono data that is mixed by dual-channel data. The integration scheme also affects the final recorded audio channel. If you need to record in stereo, contact. + * 2: Stereo. */ recordingChannel?: number; } /** - * Observer settings for the encoded audio. + * Observer settings for encoded audio. */ export class AudioEncodedFrameObserverConfig { /** - * Audio observer position. See AudioEncodedFrameObserverPosition. + * Audio encoding content. See AudioEncodedFrameObserverPosition. */ postionType?: AudioEncodedFrameObserverPosition; /** @@ -4935,20 +4952,17 @@ export class AudioEncodedFrameObserverConfig { } /** - * The encoded audio observer. + * Observer for encoded audio. */ export interface IAudioEncodedFrameObserver { /** - * Gets the encoded audio data of the local user. + * Retrieves the encoded audio data of the local user. * - * After calling registerAudioEncodedFrameObserver and setting the encoded audio as AudioEncodedFrameObserverPositionRecord, you can get the encoded audio data of the local user from this callback. + * After calling registerAudioEncodedFrameObserver and setting the audio encoding content to AudioEncodedFrameObserverPositionRecord, you can use this callback to get the encoded audio data of the local user. * - * @param channels The number of channels. - * 1: Mono. - * 2: Stereo. If the channel uses stereo, the data is interleaved. - * @param frameBuffer The audio buffer. - * @param length The data length (byte). - * @param audioEncodedFrameInfo Audio information after encoding. See EncodedAudioFrameInfo. + * @param frameBuffer Audio buffer. + * @param length Length of the audio data in bytes. + * @param audioEncodedFrameInfo Information about the encoded audio. See EncodedAudioFrameInfo. */ onRecordAudioEncodedFrame?( frameBuffer: Uint8Array, @@ -4957,18 +4971,13 @@ export interface IAudioEncodedFrameObserver { ): void; /** - * Gets the encoded audio data of all remote users. + * Retrieves the encoded audio data of all remote users. * - * After calling registerAudioEncodedFrameObserver and setting the encoded audio as AudioEncodedFrameObserverPositionPlayback, you can get encoded audio data of all remote users through this callback. + * After calling registerAudioEncodedFrameObserver and setting the audio encoding content to AudioEncodedFrameObserverPositionPlayback, you can use this callback to get the encoded audio data of all remote users. * - * @param samplesPerSec Recording sample rate (Hz). - * @param channels The number of channels. - * 1: Mono. - * 2: Stereo. If the channel uses stereo, the data is interleaved. - * @param samplesPerChannel The number of samples per channel in the audio frame. - * @param frameBuffer The audio buffer. - * @param length The data length (byte). - * @param audioEncodedFrameInfo Audio information after encoding. See EncodedAudioFrameInfo. + * @param frameBuffer Audio buffer. + * @param length Length of the audio data in bytes. + * @param audioEncodedFrameInfo Information about the encoded audio. See EncodedAudioFrameInfo. */ onPlaybackAudioEncodedFrame?( frameBuffer: Uint8Array, @@ -4977,18 +4986,13 @@ export interface IAudioEncodedFrameObserver { ): void; /** - * Gets the mixed and encoded audio data of the local and all remote users. + * Retrieves the encoded audio data after mixing local and all remote users' audio. * - * After calling registerAudioEncodedFrameObserver and setting the audio profile as AudioEncodedFrameObserverPositionMixed, you can get the mixed and encoded audio data of the local and all remote users through this callback. + * After calling registerAudioEncodedFrameObserver and setting the audio encoding content to AudioEncodedFrameObserverPositionMixed, you can use this callback to get the encoded audio data after mixing local and all remote users' audio. * - * @param samplesPerSec Recording sample rate (Hz). - * @param channels The number of channels. - * 1: Mono. - * 2: Stereo. If the channel uses stereo, the data is interleaved. - * @param samplesPerChannel The number of samples per channel in the audio frame. - * @param frameBuffer The audio buffer. - * @param length The data length (byte). - * @param audioEncodedFrameInfo Audio information after encoding. See EncodedAudioFrameInfo. + * @param frameBuffer Audio buffer. + * @param length Length of the audio data in bytes. + * @param audioEncodedFrameInfo Information about the encoded audio. See EncodedAudioFrameInfo. */ onMixedAudioEncodedFrame?( frameBuffer: Uint8Array, @@ -4998,7 +5002,7 @@ export interface IAudioEncodedFrameObserver { } /** - * The region for connection, which is the region where the server the SDK connects to is located. + * Access region, i.e., the region where the SDK connects to the server. */ export enum AreaCode { /** @@ -5006,15 +5010,15 @@ export enum AreaCode { */ AreaCodeCn = 0x00000001, /** - * North America. + * North America region. */ AreaCodeNa = 0x00000002, /** - * Europe. + * Europe region. */ AreaCodeEu = 0x00000004, /** - * Asia, excluding Mainland China. + * Asia region excluding China. */ AreaCodeAs = 0x00000008, /** @@ -5070,47 +5074,49 @@ export enum AreaCodeEx { } /** - * The error code of the channel media relay. + * Error codes for channel media relay failures. */ export enum ChannelMediaRelayError { /** - * 0: No error. + * 0: Everything works fine. */ RelayOk = 0, /** - * 1: An error occurs in the server response. + * 1: Server response error. */ RelayErrorServerErrorResponse = 1, /** - * 2: No server response. This error may be caused by poor network connections. If this error occurs when initiating a channel media relay, you can try again later; if this error occurs during channel media relay, you can call leaveChannel to leave the channel. This error can also occur if the channel media relay service is not enabled in the project. You can contact to enable the service. + * 2: No response from the server. + * This error may be caused by poor network conditions. If this error is reported when initiating channel media relay, you can retry later; if it occurs during the relay process, you can call the leaveChannel method to leave the channel. + * This error may also occur if the current App ID has not enabled the channel media relay feature. You can [contact technical support](https://ticket.shengwang.cn/) to request enabling channel media relay. */ RelayErrorServerNoResponse = 2, /** - * 3: The SDK fails to access the service, probably due to limited resources of the server. + * 3: SDK fails to get the service, possibly due to limited server resources. */ RelayErrorNoResourceAvailable = 3, /** - * 4: Fails to send the relay request. + * 4: Failed to initiate channel media relay request. */ RelayErrorFailedJoinSrc = 4, /** - * 5: Fails to accept the relay request. + * 5: Failed to accept channel media relay request. */ RelayErrorFailedJoinDest = 5, /** - * 6: The server fails to receive the media stream. + * 6: Server fails to receive media stream from the source channel. */ RelayErrorFailedPacketReceivedFromSrc = 6, /** - * 7: The server fails to send the media stream. + * 7: Server fails to send media stream to the destination channel. */ RelayErrorFailedPacketSentToDest = 7, /** - * 8: The SDK disconnects from the server due to poor network connections. You can call leaveChannel to leave the channel. + * 8: SDK loses connection with the server due to poor network quality. You can call the leaveChannel method to leave the current channel. */ RelayErrorServerConnectionLost = 8, /** - * 9: An internal error occurs in the server. + * 9: Internal server error. */ RelayErrorInternalError = 9, /** @@ -5124,23 +5130,23 @@ export enum ChannelMediaRelayError { } /** - * The state code of the channel media relay. + * State codes for channel media relay. */ export enum ChannelMediaRelayState { /** - * 0: The initial state. After you successfully stop the channel media relay by calling stopChannelMediaRelay, the onChannelMediaRelayStateChanged callback returns this state. + * 0: Idle state. After successfully calling stopChannelMediaRelay to stop the relay, onChannelMediaRelayStateChanged will report this state. */ RelayStateIdle = 0, /** - * 1: The SDK tries to relay the media stream to the destination channel. + * 1: SDK is attempting to relay across channels. */ RelayStateConnecting = 1, /** - * 2: The SDK successfully relays the media stream to the destination channel. + * 2: The host in the source channel has successfully joined the destination channel. */ RelayStateRunning = 2, /** - * 3: An error occurs. See code in onChannelMediaRelayStateChanged for the error code. + * 3: An error occurred. See the code parameter in onChannelMediaRelayStateChanged for details. */ RelayStateFailure = 3, } @@ -5150,47 +5156,47 @@ export enum ChannelMediaRelayState { */ export class ChannelMediaInfo { /** - * The user ID. + * User ID. */ uid?: number; /** - * The channel name. + * Channel name. */ channelName?: string; /** - * The token that enables the user to join the channel. + * Token used to join the channel. */ token?: string; } /** - * Configuration of cross channel media relay. + * Cross-channel media relay configuration information. */ export class ChannelMediaRelayConfiguration { /** - * The information of the source channel. See ChannelMediaInfo. It contains the following members: channelName : The name of the source channel. The default value is null, which means the SDK applies the name of the current channel. token : The token for joining the source channel. This token is generated with the channelName and uid you set in srcInfo. - * If you have not enabled the App Certificate, set this parameter as the default value null, which means the SDK applies the App ID. - * If you have enabled the App Certificate, you must use the token generated with the channelName and uid, and the uid must be set as 0. uid : The unique user ID to identify the relay stream in the source channel. Agora recommends leaving the default value of 0 unchanged. + * Source channel information ChannelMediaInfo, including the following members: channelName : Name of the source channel. The default value is null, which means the SDK fills in the current channel name. token : The token used to join the source channel. It is generated based on the channelName and uid you set in srcInfo. + * If App Certificate is not enabled, you can set this parameter to the default value null, which means the SDK fills in the App ID. + * If App Certificate is enabled, you must provide a token generated using the channelName and uid, and the uid must be 0. uid : The UID that identifies the media stream being relayed in the source channel. The default value is 0. Do not modify it. */ srcInfo?: ChannelMediaInfo; /** - * The information of the target channel ChannelMediaInfo. It contains the following members: channelName : The name of the target channel. token : The token for joining the target channel. It is generated with the channelName and uid you set in destInfos. - * If you have not enabled the App Certificate, set this parameter as the default value null, which means the SDK applies the App ID. - * If you have enabled the App Certificate, you must use the token generated with the channelName and uid. If the token of any target channel expires, the whole media relay stops; hence Agora recommends that you specify the same expiration time for the tokens of all the target channels. uid : The unique user ID to identify the relay stream in the target channel. The value ranges from 0 to (2 32 -1). To avoid user ID conflicts, this user ID must be different from any other user ID in the target channel. The default value is 0, which means the SDK generates a random UID. + * Since token expiration in any destination channel will cause all cross-channel streaming to stop, it is recommended that you set the same expiration duration for tokens in all destination channels. Destination channel information ChannelMediaInfo, including the following members: channelName : Name of the destination channel. token : The token used to join the destination channel. It is generated based on the channelName and uid you set in destInfos. + * If App Certificate is not enabled, you can set this parameter to the default value null, which means the SDK fills in the App ID. + * If App Certificate is enabled, you must provide a token generated using the channelName and uid. uid : The UID that identifies the media stream being relayed in the destination channel. The value range is [0, 2^32-1]. Make sure it is different from all UIDs in the destination channel. The default value is 0, which means the SDK randomly assigns a UID. */ destInfos?: ChannelMediaInfo[]; /** - * The number of target channels. The default value is 0, and the value range is from 0 to 6. Ensure that the value of this parameter corresponds to the number of ChannelMediaInfo structs you define in destInfo. + * Number of destination channels. The default value is 0. Value range is [0,6]. This parameter should match the number of ChannelMediaInfo objects defined in destInfos. */ destCount?: number; } /** - * The uplink network information. + * Uplink network information. */ export class UplinkNetworkInfo { /** - * The target video encoder bitrate (bps). + * Target bitrate (bps) of the video encoder. */ video_encoder_target_bitrate_bps?: number; } @@ -5244,9 +5250,9 @@ export class DownlinkNetworkInfo { } /** - * The built-in encryption mode. + * Built-in encryption modes. * - * Agora recommends using Aes128Gcm2 or Aes256Gcm2 encrypted mode. These two modes support the use of salt for higher security. + * It is recommended to use the Aes128Gcm2 or Aes256Gcm2 encryption modes. These modes support salt, offering higher security. */ export enum EncryptionMode { /** @@ -5274,51 +5280,51 @@ export enum EncryptionMode { */ Aes256Gcm = 6, /** - * 7: (Default) 128-bit AES encryption, GCM mode. This encryption mode requires the setting of salt (encryptionKdfSalt). + * 7: (Default) 128-bit AES encryption, GCM mode. This encryption mode requires setting a salt (encryptionKdfSalt). */ Aes128Gcm2 = 7, /** - * 8: 256-bit AES encryption, GCM mode. This encryption mode requires the setting of salt (encryptionKdfSalt). + * 8: 256-bit AES encryption, GCM mode. This encryption mode requires setting a salt (encryptionKdfSalt). */ Aes256Gcm2 = 8, /** - * Enumerator boundary. + * Enumeration boundary value. */ ModeEnd = 9, } /** - * Built-in encryption configurations. + * Configures the built-in encryption mode and key. */ export class EncryptionConfig { /** - * The built-in encryption mode. See EncryptionMode. Agora recommends using Aes128Gcm2 or Aes256Gcm2 encrypted mode. These two modes support the use of salt for higher security. + * Built-in encryption mode. See EncryptionMode. It is recommended to use the Aes128Gcm2 or Aes256Gcm2 encryption modes. These modes support salt and offer better security. */ encryptionMode?: EncryptionMode; /** - * Encryption key in string type with unlimited length. Agora recommends using a 32-byte key. If you do not set an encryption key or set it as null, you cannot use the built-in encryption, and the SDK returns -2. + * Built-in encryption key, of type string, with no length limit. A 32-byte key is recommended. If this parameter is not specified or is set to null, built-in encryption cannot be enabled, and the SDK returns error code -2. */ encryptionKey?: string; /** - * Salt, 32 bytes in length. Agora recommends that you use OpenSSL to generate salt on the server side. See Media Stream Encryption for details. This parameter takes effect only in Aes128Gcm2 or Aes256Gcm2 encrypted mode. In this case, ensure that this parameter is not 0. + * Salt, 32 bytes in length. It is recommended to generate the salt on the server side using OpenSSL. This parameter takes effect only when the encryption mode is Aes128Gcm2 or Aes256Gcm2. In this case, make sure the value of this parameter is not all 0. */ encryptionKdfSalt?: number[]; /** - * Whether to enable data stream encryption: true : Enable data stream encryption. false : (Default) Disable data stream encryption. + * Whether to enable data stream encryption: true : Enable data stream encryption. false : (default) Disable data stream encryption. */ datastreamEncryptionEnabled?: boolean; } /** - * Encryption error type. + * Built-in encryption error types. */ export enum EncryptionErrorType { /** - * 0: Internal reason. + * 0: Internal error. */ EncryptionErrorInternalFailure = 0, /** - * 1: Media stream decryption error. Ensure that the receiver and the sender use the same encryption mode and key. + * 1: Media stream decryption error. Make sure the encryption mode or key used by the sender and receiver is the same. */ EncryptionErrorDecryptionFailure = 1, /** @@ -5326,7 +5332,7 @@ export enum EncryptionErrorType { */ EncryptionErrorEncryptionFailure = 2, /** - * 3: Data stream decryption error. Ensure that the receiver and the sender use the same encryption mode and key. + * 3: Data stream decryption error. Make sure the encryption mode or key used by the sender and receiver is the same. */ EncryptionErrorDatastreamDecryptionFailure = 3, /** @@ -5354,177 +5360,179 @@ export enum UploadErrorReason { } /** - * @ignore + * Error codes after calling renewToken. + * + * Since Available since v4.6.0. */ export enum RenewTokenErrorCode { /** - * @ignore + * (0): Token updated successfully. */ RenewTokenSuccess = 0, /** - * @ignore + * (1): Token update failed due to an unknown server error. It is recommended to check the parameters used to generate the Token, regenerate the Token, and retry renewToken. */ RenewTokenFailure = 1, /** - * @ignore + * (2): Token update failed because the provided Token has expired. It is recommended to generate a new Token with a longer expiration time and retry renewToken. */ RenewTokenTokenExpired = 2, /** - * @ignore + * (3): Token update failed because the provided Token is invalid. Common causes include: the project has enabled App Certificate in the Agora Console but did not use a Token when joining the channel; the uid specified in joinChannel does not match the one used to generate the Token; the channel name specified in joinChannel does not match the one used to generate the Token. It is recommended to check the Token generation process, regenerate the Token, and retry renewToken. */ RenewTokenInvalidToken = 3, /** - * @ignore + * (4): Token update failed because the channel name in the Token does not match the current channel. It is recommended to check the channel name, regenerate the Token, and retry renewToken. */ RenewTokenInvalidChannelName = 4, /** - * @ignore + * (5): Token update failed because the App ID in the Token does not match the current App ID. It is recommended to check the App ID, regenerate the Token, and retry renewToken. */ RenewTokenInconsistentAppid = 5, /** - * @ignore + * (6): The previous Token update request was canceled due to a new request being initiated. */ RenewTokenCanceledByNewRequest = 6, } /** - * The type of the device permission. + * Device permission types. */ export enum PermissionType { /** - * 0: Permission for the audio capture device. + * 0: Permission for audio capture device. */ RecordAudio = 0, /** - * 1: Permission for the camera. + * 1: Camera permission. */ Camera = 1, /** - * (For Android only) 2: Permission for screen sharing. + * (Android only) 2: Screen sharing permission. */ ScreenCapture = 2, } /** - * The subscribing state. + * Subscription state. */ export enum StreamSubscribeState { /** - * 0: The initial publishing state after joining the channel. + * 0: Initial subscription state after joining the channel. */ SubStateIdle = 0, /** - * 1: Fails to subscribe to the remote stream. Possible reasons: - * The remote user: - * Calls muteLocalAudioStream (true) or muteLocalVideoStream (true) to stop sending local media stream. - * Calls disableAudio or disableVideo to disable the local audio or video module. - * Calls enableLocalAudio (false) or enableLocalVideo (false) to disable local audio or video capture. - * The role of the remote user is audience. - * The local user calls the following methods to stop receiving remote streams: - * Call muteRemoteAudioStream (true) or muteAllRemoteAudioStreams (true) to stop receiving the remote audio stream. - * Call muteRemoteVideoStream (true) or muteAllRemoteVideoStreams (true) to stop receiving the remote video stream. + * 1: Subscription failed. Possible reasons: + * Remote user: + * Called muteLocalAudioStream(true) or muteLocalVideoStream(true) to stop sending local media streams. + * Called disableAudio or disableVideo to disable the local audio or video module. + * Called enableLocalAudio(false) or enableLocalVideo(false) to disable local audio or video capture. + * User role is audience. + * Local user called the following methods to stop receiving remote media streams: + * Called muteRemoteAudioStream(true) or muteAllRemoteAudioStreams(true) to stop receiving remote audio streams. + * Called muteRemoteVideoStream(true) or muteAllRemoteVideoStreams(true) to stop receiving remote video streams. */ SubStateNoSubscribed = 1, /** - * 2: Subscribing. + * 2: Subscribing in progress. */ SubStateSubscribing = 2, /** - * 3: The remote stream is received, and the subscription is successful. + * 3: Remote stream received, subscription successful. */ SubStateSubscribed = 3, } /** - * The publishing state. + * Publishing state. */ export enum StreamPublishState { /** - * 0: The initial publishing state after joining the channel. + * 0: Initial publishing state after joining the channel. */ PubStateIdle = 0, /** - * 1: Fails to publish the local stream. Possible reasons: - * The local user calls muteLocalAudioStream (true) or muteLocalVideoStream (true) to stop sending local media streams. - * The local user calls disableAudio or disableVideo to disable the local audio or video module. - * The local user calls enableLocalAudio (false) or enableLocalVideo (false) to disable the local audio or video capture. - * The role of the local user is audience. + * 1: Publishing failed. Possible reasons: + * The local user called muteLocalAudioStream(true) or muteLocalVideoStream(true) to stop sending local media streams. + * The local user called disableAudio or disableVideo to disable the local audio or video module. + * The local user called enableLocalAudio(false) or enableLocalVideo(false) to disable local audio or video capture. + * The local user's role is audience. */ PubStateNoPublished = 1, /** - * 2: Publishing. + * 2: Publishing in progress. */ PubStatePublishing = 2, /** - * 3: Publishes successfully. + * 3: Publishing succeeded. */ PubStatePublished = 3, } /** - * The configuration of the audio and video call loop test. + * Configuration for audio and video loopback testing. */ export class EchoTestConfiguration { /** - * The view used to render the local user's video. This parameter is only applicable to scenarios testing video devices, that is, when enableVideo is true. + * The view used to render the local user's video. This parameter is only applicable when testing video devices. Make sure enableVideo is set to true. */ view?: any; /** - * Whether to enable the audio device for the loop test: true : (Default) Enable the audio device. To test the audio device, set this parameter as true. false : Disable the audio device. + * Whether to enable audio devices: true : (Default) Enable audio devices. Set to true to test audio devices. false : Disable audio devices. */ enableAudio?: boolean; /** - * Whether to enable the video device for the loop test. Currently, video device loop test is not supported. Please set this parameter to false. + * Whether to enable video devices. Video device testing is not supported currently. Set this parameter to false. */ enableVideo?: boolean; /** - * The token used to secure the audio and video call loop test. If you do not enable App Certificate in Agora Console, you do not need to pass a value in this parameter; if you have enabled App Certificate in Agora Console, you must pass a token in this parameter; the uid used when you generate the token must be 0xFFFFFFFF, and the channel name used must be the channel name that identifies each audio and video call loop tested. For server-side token generation, see. + * Token used to secure the audio and video loopback test. If you have not enabled App Certificate in the console, you do not need to provide this parameter. If you have enabled App Certificate, you must provide a Token, and the uid used to generate the Token must be 0xFFFFFFFF, and the channel name must uniquely identify each loopback test. For how to generate a Token on the server, see [Token Authentication](https://doc.shengwang.cn/doc/rtc/rn/basic-features/token-authentication). */ token?: string; /** - * The channel name that identifies each audio and video call loop. To ensure proper loop test functionality, the channel name passed in to identify each loop test cannot be the same when users of the same project (App ID) perform audio and video call loop tests on different devices. + * The channel name that identifies each audio and video loopback test. To ensure proper loopback testing, users under the same project (App ID) must use different channel names on different devices. */ channelId?: string; /** - * Set the time interval or delay for returning the results of the audio and video loop test. The value range is [2,10], in seconds, with the default value being 2 seconds. - * For audio loop tests, the test results will be returned according to the time interval you set. - * For video loop tests, the video will be displayed in a short time, after which the delay will gradually increase until it reaches the delay you set. + * Set the interval or delay for returning loopback test results. Value range: [2,10] seconds. Default is 2 seconds. + * For audio loopback tests, results are returned based on the interval you set. + * For video loopback tests, the video appears briefly, then the delay gradually increases until it reaches the set interval. */ intervalInSeconds?: number; } /** - * The information of the user. + * User information. */ export class UserInfo { /** - * The user ID. + * User ID. */ uid?: number; /** - * User account. The maximum data length is MaxUserAccountLengthType. + * User account. Length is limited by MaxUserAccountLengthType. */ userAccount?: string; } /** - * The audio filter types of in-ear monitoring. + * Ear monitoring audio filter type. */ export enum EarMonitoringFilterType { /** - * 1<<0: No audio filter added to in-ear monitoring. + * 1<<0: Do not add audio filters in ear monitoring. */ EarMonitoringFilterNone = 1 << 0, /** - * 1<<1: Add vocal effects audio filter to in-ear monitoring. If you implement functions such as voice beautifier and audio effect, users can hear the voice after adding these effects. + * 1<<1: Add vocal effect audio filters in ear monitoring. If you implement features such as voice beautification or sound effects, users can hear the processed sound in ear monitoring. */ EarMonitoringFilterBuiltInAudioFilters = 1 << 1, /** - * 1<<2: Add noise suppression audio filter to in-ear monitoring. + * 1<<2: Add noise suppression audio filters in ear monitoring. */ EarMonitoringFilterNoiseSuppression = 1 << 2, /** - * 1<<15: Reuse the audio filter that has been processed on the sending end for in-ear monitoring. This enumerator reduces CPU usage while increasing in-ear monitoring latency, which is suitable for latency-tolerant scenarios requiring low CPU consumption. + * 1<<15: Reuse audio filters that have already been applied on the sending side. Reusing audio filters reduces CPU usage for ear monitoring, but increases latency. Suitable for scenarios where reducing CPU consumption is more important than minimizing ear monitoring latency. */ EarMonitoringFilterReusePostProcessingFilter = 1 << 15, } @@ -5560,107 +5568,106 @@ export enum ThreadPriorityType { } /** - * The video configuration for the shared screen stream. + * Video encoding configuration for the shared screen stream. */ export class ScreenVideoParameters { /** - * The video encoding dimension. The default value is 1280 × 720. + * Video encoding resolution. Default is 1280 × 720. */ dimensions?: VideoDimensions; /** - * The video encoding frame rate (fps). The default value is 15. + * Video encoding frame rate (fps). Default is 15. */ frameRate?: number; /** - * The video encoding bitrate (Kbps). + * Video encoding bitrate (Kbps). */ bitrate?: number; /** - * The content hint for screen sharing. + * Content type of the screen sharing video. */ contentHint?: VideoContentHint; } /** - * Screen sharing configurations. + * Parameter configuration for screen sharing. */ export class ScreenCaptureParameters2 { /** - * Determines whether to capture system audio during screen sharing: true : Capture system audio. false : (Default) Do not capture system audio. - * Due to system limitations, capturing system audio is only applicable to Android API level 29 and later (that is, Android 10 and later). - * To improve the success rate of capturing system audio during screen sharing, ensure that you have called the setAudioScenario method and set the audio scenario to AudioScenarioGameStreaming. + * Due to system limitations, capturing system audio is only supported on Android API level 29 and above, i.e., Android 10 and above. + * To improve the success rate of capturing system audio during screen sharing, make sure you call the setAudioScenario method and set the audio scenario to AudioScenarioGameStreaming. Whether to capture system audio during screen sharing: true : Capture system audio. false : (Default) Do not capture system audio. */ captureAudio?: boolean; /** - * The audio configuration for the shared screen stream. See ScreenAudioParameters. This parameter only takes effect when captureAudio is true. + * Audio configuration for the shared screen stream. See ScreenAudioParameters. This parameter takes effect only when captureAudio is set to true. */ audioParams?: ScreenAudioParameters; /** - * Whether to capture the screen when screen sharing: true : (Default) Capture the screen. false : Do not capture the screen. Due to system limitations, the capture screen is only applicable to Android API level 21 and above, that is, Android 5 and above. + * Due to system limitations, screen capture is only supported on Android API level 21 and above, i.e., Android 5 and above. Whether to capture the screen during screen sharing: true : (Default) Capture the screen. false : Do not capture the screen. */ captureVideo?: boolean; /** - * The video configuration for the shared screen stream. See ScreenVideoParameters. This parameter only takes effect when captureVideo is true. + * Video encoding configuration for the shared screen stream. See ScreenVideoParameters. This parameter takes effect only when captureVideo is set to true. */ videoParams?: ScreenVideoParameters; } /** - * The rendering state of the media frame. + * The rendering state of media frames. */ export enum MediaTraceEvent { /** - * 0: The video frame has been rendered. + * 0: Video frame rendered. */ MediaTraceEventVideoRendered = 0, /** - * 1: The video frame has been decoded. + * 1: Video frame decoded. */ MediaTraceEventVideoDecoded = 1, } /** - * Indicators during video frame rendering progress. + * Metric information during the video frame rendering process. */ export class VideoRenderingTracingInfo { /** - * The time interval (ms) from startMediaRenderingTracing to SDK triggering the onVideoRenderingTracingResult callback. Agora recommends you call startMediaRenderingTracing before joining a channel. + * Time interval (ms) from calling startMediaRenderingTracing to triggering the onVideoRenderingTracingResult callback. It is recommended to call startMediaRenderingTracing before joining the channel. */ elapsedTime?: number; /** - * The time interval (ms) from startMediaRenderingTracing to joinChannel. A negative number indicates that startMediaRenderingTracing is called after calling joinChannel. + * Time interval (ms) from calling startMediaRenderingTracing to calling joinChannel. A negative value indicates startMediaRenderingTracing was called after joinChannel. */ start2JoinChannel?: number; /** - * The time interval (ms) from or joinChannel to successfully joining the channel. + * Time interval (ms) from calling joinChannel1 or joinChannel to successfully joining the channel. */ join2JoinSuccess?: number; /** - * If the local user calls startMediaRenderingTracing before successfully joining the channel, this value is the time interval (ms) from the local user successfully joining the channel to the remote user joining the channel. - * If the local user calls startMediaRenderingTracing after successfully joining the channel, the value is the time interval (ms) from startMediaRenderingTracing to when the remote user joins the channel. - * If the local user calls startMediaRenderingTracing after the remote user joins the channel, the value is 0 and meaningless. - * In order to reduce the time of rendering the first frame for remote users, Agora recommends that the local user joins the channel when the remote user is in the channel to reduce this value. + * If startMediaRenderingTracing is called after the remote user joins the channel, this value is 0 and has no reference value. + * To improve the rendering speed of the remote user, it is recommended that the local user joins the channel after the remote user, to reduce this value. + * If startMediaRenderingTracing is called before the local user joins the channel, this value is the time interval (ms) from the local user successfully joining the channel to the remote user joining. + * If startMediaRenderingTracing is called after the local user joins the channel, this value is the time interval (ms) from calling startMediaRenderingTracing to the remote user joining. */ joinSuccess2RemoteJoined?: number; /** - * If the local user calls startMediaRenderingTracing before the remote user joins the channel, this value is the time interval (ms) from when the remote user joins the channel to when the local user sets the remote view. - * If the local user calls startMediaRenderingTracing after the remote user joins the channel, this value is the time interval (ms) from calling startMediaRenderingTracing to setting the remote view. - * If the local user calls startMediaRenderingTracing after setting the remote view, the value is 0 and has no effect. - * In order to reduce the time of rendering the first frame for remote users, Agora recommends that the local user sets the remote view before the remote user joins the channel, or sets the remote view immediately after the remote user joins the channel to reduce this value. + * If startMediaRenderingTracing is called after setting the remote view, this value is 0 and has no reference value. + * To improve the rendering speed of the remote user, it is recommended to set the remote view before the remote user joins the channel, or immediately after the remote user joins, to reduce this value. + * If startMediaRenderingTracing is called before the remote user joins the channel, this value is the time interval (ms) from the remote user joining to the local user setting the remote view. + * If startMediaRenderingTracing is called after the remote user joins the channel, this value is the time interval (ms) from calling startMediaRenderingTracing to setting the remote view. */ remoteJoined2SetView?: number; /** - * If the local user calls startMediaRenderingTracing before the remote user joins the channel, this value is the time interval (ms) from the remote user joining the channel to subscribing to the remote video stream. - * If the local user calls startMediaRenderingTracing after the remote user joins the channel, this value is the time interval (ms) from startMediaRenderingTracing to subscribing to the remote video stream. - * If the local user calls startMediaRenderingTracing after subscribing to the remote video stream, the value is 0 and has no effect. - * In order to reduce the time of rendering the first frame for remote users, Agora recommends that after the remote user joins the channel, the local user immediately subscribes to the remote video stream to reduce this value. + * If startMediaRenderingTracing is called after subscribing to the remote video stream, this value is 0 and has no reference value. + * To improve the rendering speed of the remote user, it is recommended to subscribe to the remote video stream immediately after the remote user joins the channel, to reduce this value. + * If startMediaRenderingTracing is called before the remote user joins the channel, this value is the time interval (ms) from the remote user joining to subscribing to the remote video stream. + * If startMediaRenderingTracing is called after the remote user joins the channel, this value is the time interval (ms) from calling startMediaRenderingTracing to subscribing to the remote video stream. */ remoteJoined2UnmuteVideo?: number; /** - * If the local user calls startMediaRenderingTracing before the remote user joins the channel, this value is the time interval (ms) from when the remote user joins the channel to when the local user receives the remote video stream. - * If the local user calls startMediaRenderingTracing after the remote user joins the channel, this value is the time interval (ms) from startMediaRenderingTracing to receiving the remote video stream. - * If the local user calls startMediaRenderingTracing after receiving the remote video stream, the value is 0 and has no effect. - * In order to reduce the time of rendering the first frame for remote users, Agora recommends that the remote user publishes video streams immediately after joining the channel, and the local user immediately subscribes to remote video streams to reduce this value. + * If startMediaRenderingTracing is called after receiving the remote video stream, this value is 0 and has no reference value. + * To improve the rendering speed of the remote user, it is recommended that the remote user publishes the video stream immediately after joining the channel, and the local user subscribes to the remote stream immediately, to reduce this value. + * If startMediaRenderingTracing is called before the remote user joins the channel, this value is the time interval (ms) from the remote user joining to the local user receiving the first remote data packet. + * If startMediaRenderingTracing is called after the remote user joins the channel, this value is the time interval (ms) from calling startMediaRenderingTracing to receiving the first remote data packet. */ remoteJoined2PacketReceived?: number; } @@ -5694,67 +5701,67 @@ export enum LocalProxyMode { } /** - * @ignore + * Configuration information of the log server. */ export class LogUploadServerInfo { /** - * @ignore + * Domain name of the log server. */ serverDomain?: string; /** - * @ignore + * Storage path of the log on the server. */ serverPath?: string; /** - * @ignore + * Port of the log server. */ serverPort?: number; /** - * @ignore + * Whether the log server uses HTTPS protocol: true : Uses HTTPS protocol. false : Uses HTTP protocol. */ serverHttps?: boolean; } /** - * @ignore + * Advanced options for Local Access Point. */ export class AdvancedConfigInfo { /** - * @ignore + * Custom log upload server. By default, the SDK uploads logs to the Agora log server. You can use this parameter to modify the log upload server. See LogUploadServerInfo. */ logUploadServer?: LogUploadServerInfo; } /** - * @ignore + * Local Access Point configuration. */ export class LocalAccessPointConfiguration { /** - * @ignore + * Internal IP address list of the Local Access Point. Either ipList or domainList must be provided. */ ipList?: string[]; /** - * @ignore + * Number of internal IP addresses of the Local Access Point. This value must match the number of IP addresses you provide. */ ipListSize?: number; /** - * @ignore + * Domain name list of the Local Access Point. The SDK resolves the IP addresses of the Local Access Point from the provided domain names. The domain resolution timeout is 10 seconds. Either ipList or domainList must be provided. If you specify both IP addresses and domain names, the SDK merges and deduplicates the resolved IPs and the specified IPs, then randomly connects to one IP for load balancing. */ domainList?: string[]; /** - * @ignore + * Number of domain names for the Local Access Point. This value must match the number of domain names you provide. */ domainListSize?: number; /** - * @ignore + * Domain name for internal certificate verification. If left empty, the SDK uses the default verification domain secure-edge.local. */ verifyDomainName?: string; /** - * @ignore + * Connection mode. See LocalProxyMode. */ mode?: LocalProxyMode; /** - * @ignore + * Advanced options for the Local Access Point. See AdvancedConfigInfo. */ advancedConfig?: AdvancedConfigInfo; /** @@ -5840,96 +5847,95 @@ export enum RdtState { } /** - * The spatial audio parameters. + * Spatial audio parameters. */ export class SpatialAudioParams { /** - * The azimuth angle of the remote user or media player relative to the local user. The value range is [0,360], and the unit is degrees, The values are as follows: - * 0: (Default) 0 degrees, which means directly in front on the horizontal plane. - * 90: 90 degrees, which means directly to the left on the horizontal plane. - * 180: 180 degrees, which means directly behind on the horizontal plane. - * 270: 270 degrees, which means directly to the right on the horizontal plane. - * 360: 360 degrees, which means directly in front on the horizontal plane. + * The horizontal angle of the remote user or media player relative to the local user. Value range: [0,360] degrees. Where: + * 0: (default) 0 degrees, directly in front on the horizontal plane. + * 90: 90 degrees, directly to the left. + * 180: 180 degrees, directly behind. + * 270: 270 degrees, directly to the right. + * 360: 360 degrees, same as 0 degrees. */ speaker_azimuth?: number; /** - * The elevation angle of the remote user or media player relative to the local user. The value range is [-90,90], and the unit is degrees, The values are as follows: - * 0: (Default) 0 degrees, which means that the horizontal plane is not rotated. - * -90: -90 degrees, which means that the horizontal plane is rotated 90 degrees downwards. - * 90: 90 degrees, which means that the horizontal plane is rotated 90 degrees upwards. + * The elevation angle of the remote user or media player relative to the local user. Value range: [-90,90] degrees. Where: + * 0: (default) 0 degrees, no vertical rotation. + * -90: -90 degrees, rotated 90 degrees downward. + * 90: 90 degrees, rotated 90 degrees upward. */ speaker_elevation?: number; /** - * The distance of the remote user or media player relative to the local user. The value range is [1,50], and the unit is meters. The default value is 1 meter. + * The distance of the remote user or media player relative to the local user. Value range: [1,50] meters. Default is 1 meter. */ speaker_distance?: number; /** - * The orientation of the remote user or media player relative to the local user. The value range is [0,180], and the unit is degrees, The values are as follows: - * 0: (Default) 0 degrees, which means that the sound source and listener face the same direction. - * 180: 180 degrees, which means that the sound source and listener face each other. + * The orientation of the remote user or media player relative to the local user. Value range: [0,180] degrees. Where: + * 0: (default) 0 degrees, both source and listener face the same direction. + * 180: 180 degrees, source and listener face each other. */ speaker_orientation?: number; /** - * Whether to enable audio blurring: true : Enable audio blurring. false : (Default) Disable audio blurring. + * Whether to enable sound blur processing: true : Enable blur. false : (default) Disable blur. */ enable_blur?: boolean; /** - * Whether to enable air absorption, that is, to simulate the sound attenuation effect of sound transmitting in the air; under a certain transmission distance, the attenuation speed of high-frequency sound is fast, and the attenuation speed of low-frequency sound is slow. true : (Default) Enable air absorption. Make sure that the value of speaker_attenuation is not 0; otherwise, this setting does not take effect. false : Disable air absorption. + * Whether to enable air absorption, simulating the attenuation of sound timbre as it travels through air: at certain distances, high frequencies attenuate faster than low frequencies. true : (default) Enable air absorption. Make sure speaker_attenuation is not 0, otherwise this setting has no effect. false : Disable air absorption. */ enable_air_absorb?: boolean; /** - * The sound attenuation coefficient of the remote user or media player. The value range is [0,1]. The values are as follows: - * 0: Broadcast mode, where the volume and timbre are not attenuated with distance, and the volume and timbre heard by local users do not change regardless of distance. - * (0,0.5): Weak attenuation mode, where the volume and timbre only have a weak attenuation during the propagation, and the sound can travel farther than that in a real environment. enable_air_absorb needs to be enabled at the same time. - * 0.5: (Default) Simulates the attenuation of the volume in the real environment; the effect is equivalent to not setting the speaker_attenuation parameter. - * (0.5,1]: Strong attenuation mode, where volume and timbre attenuate rapidly during the propagation. enable_air_absorb needs to be enabled at the same time. + * Sound attenuation coefficient for the remote user or media player, value range [0,1]. Where: + * 0: Broadcast mode, volume and timbre do not attenuate with distance; the local user hears no change regardless of distance. + * (0,0.5): Weak attenuation mode; volume and timbre (requires enable_air_absorb) attenuate slightly, allowing sound to travel farther than in real environments. + * 0.5: (default) Simulates real-world volume attenuation, equivalent to not setting speaker_attenuation. + * (0.5,1]: Strong attenuation mode; volume and timbre (requires enable_air_absorb) attenuate rapidly. */ speaker_attenuation?: number; /** - * Whether to enable the Doppler effect: When there is a relative displacement between the sound source and the receiver of the sound source, the tone heard by the receiver changes. true : Enable the Doppler effect. false : (Default) Disable the Doppler effect. - * This parameter is suitable for scenarios where the sound source is moving at high speed (for example, racing games). It is not recommended for common audio and video interactive scenarios (for example, voice chat, co-streaming, or online KTV). - * When this parameter is enabled, Agora recommends that you set a regular period (such as 30 ms), and then call the updatePlayerPositionInfo, updateSelfPosition, and updateRemotePosition methods to continuously update the relative distance between the sound source and the receiver. The following factors can cause the Doppler effect to be unpredictable or the sound to be jittery: the period of updating the distance is too long, the updating period is irregular, or the distance information is lost due to network packet loss or delay. + * This parameter is suitable for scenarios with fast-moving sound sources (e.g., racing games). It is not recommended for typical audio/video interaction scenarios (voice chat, co-hosting, online KTV). + * When enabled, it is recommended to set a regular update interval (e.g., 30 ms) and continuously call updatePlayerPositionInfo, updateSelfPosition, and updateRemotePosition to update the relative distance between the source and receiver. The Doppler effect may not work as expected or may cause jitter if: the update interval is too long, the updates are irregular, or packet loss/delay causes distance info loss. Whether to enable Doppler effect: when there is relative movement between the sound source and the receiver, the pitch heard by the receiver changes. true : Enable Doppler effect. false : (default) Disable Doppler effect. */ enable_doppler?: boolean; } /** - * Layout information of a specific sub-video stream within the mixed stream. + * Layout information of a sub-video stream in a composite video. */ export class VideoLayout { /** - * The channel name to which the sub-video stream belongs. + * Channel name to which the sub-video stream belongs. */ channelId?: string; /** - * User ID who published this sub-video stream. + * User ID that publishes the sub-video stream. */ uid?: number; /** - * Reserved for future use. + * Reserved parameter. */ strUid?: string; /** - * X-coordinate (px) of the sub-video stream on the mixing canvas. The relative lateral displacement of the top left corner of the video for video mixing to the origin (the top left corner of the canvas). + * The x-coordinate (px) of the sub-video on the composite canvas. It represents the horizontal offset of the top-left corner of the sub-video relative to the top-left corner (origin) of the canvas. */ x?: number; /** - * Y-coordinate (px) of the sub-video stream on the mixing canvas. The relative longitudinal displacement of the top left corner of the captured video to the origin (the top left corner of the canvas). + * The y-coordinate (px) of the sub-video on the composite canvas. It represents the vertical offset of the top-left corner of the sub-video relative to the top-left corner (origin) of the canvas. */ y?: number; /** - * Width (px) of the sub-video stream. + * Width of the sub-video stream (px). */ width?: number; /** - * Heitht (px) of the sub-video stream. + * Height of the sub-video stream (px). */ height?: number; /** - * Status of the sub-video stream on the video mixing canvas. - * 0: Normal. The sub-video stream has been rendered onto the mixing canvas. - * 1: Placeholder image. The sub-video stream has no video frames and is displayed as a placeholder on the mixing canvas. - * 2: Black image. The sub-video stream is replaced by a black image. + * State of the sub-video stream on the composite canvas. + * 0: Normal. The video stream has been rendered on the canvas. + * 1: Placeholder. The video stream has no video content and is displayed as a placeholder. + * 2: Black image. The video stream is replaced by a black image. */ videoState?: number; } diff --git a/src/AgoraMediaBase.ts b/src/AgoraMediaBase.ts index 5ab1da1a..bf5e29eb 100644 --- a/src/AgoraMediaBase.ts +++ b/src/AgoraMediaBase.ts @@ -2,53 +2,53 @@ import './extension/AgoraMediaBaseExtension'; import { EncodedVideoFrameInfo } from './AgoraBase'; /** - * The context information of the extension. + * Plugin context information. */ export class ExtensionContext { /** - * Whether the uid in ExtensionContext is valid: true : The uid is valid. false : The uid is invalid. + * Whether the uid reported in ExtensionContext is valid: true : uid is valid. false : uid is invalid. */ isValid?: boolean; /** - * The user ID. 0 represents a local user, while greater than 0 represents a remote user. + * User ID. 0 represents the local user, values greater than 0 represent remote users. */ uid?: number; /** - * The name of the extension provider. + * Name of the plugin provider. */ providerName?: string; /** - * The name of the extension. + * Name of the plugin. */ extensionName?: string; } /** - * The type of the video source. + * Type of video source. */ export enum VideoSourceType { /** - * 0: (Default) The primary camera. + * 0: (Default) The video source is the first camera. */ VideoSourceCameraPrimary = 0, /** - * 0: (Default) The primary camera. + * 0: (Default) The video source is the first camera. */ VideoSourceCamera = 0, /** - * 1: The secondary camera. + * 1: The video source is the second camera. */ VideoSourceCameraSecondary = 1, /** - * 2: The primary screen. + * 2: The video source is the first screen. */ VideoSourceScreenPrimary = 2, /** - * 2: The primary screen. + * 2: The video source is the first screen. */ VideoSourceScreen = 2, /** - * 3: The secondary screen. + * 3: The video source is the second screen. */ VideoSourceScreenSecondary = 3, /** @@ -56,35 +56,35 @@ export enum VideoSourceType { */ VideoSourceCustom = 4, /** - * 5: The media player. + * 5: The video source is a media player. */ VideoSourceMediaPlayer = 5, /** - * 6: One PNG image. + * 6: The video source is a PNG image. */ VideoSourceRtcImagePng = 6, /** - * 7: One JPEG image. + * 7: The video source is a JPEG image. */ VideoSourceRtcImageJpeg = 7, /** - * 8: One GIF image. + * 8: The video source is a GIF image. */ VideoSourceRtcImageGif = 8, /** - * 9: One remote video acquired by the network. + * 9: The video source is a remote video fetched from the network. */ VideoSourceRemote = 9, /** - * 10: One transcoded video source. + * 10: A transcoded video source. */ VideoSourceTranscoded = 10, /** - * 11: (For Android only) The third camera. + * 11: (Android only) The video source is the third camera. */ VideoSourceCameraThird = 11, /** - * 12: (For Android only) The fourth camera. + * 12: (Android only) The video source is the fourth camera. */ VideoSourceCameraFourth = 12, /** @@ -96,17 +96,17 @@ export enum VideoSourceType { */ VideoSourceScreenFourth = 14, /** - * @ignore + * 15: The video source is video processed by a speech-driven plugin. */ VideoSourceSpeechDriven = 15, /** - * 100: An unknown video source. + * 100: Unknown video source. */ VideoSourceUnknown = 100, } /** - * The audio source type. + * Audio source type. */ export enum AudioSourceType { /** @@ -114,7 +114,7 @@ export enum AudioSourceType { */ AudioSourceMicrophone = 0, /** - * 1: Custom audio stream. + * 1: Custom captured audio stream. */ AudioSourceCustom = 1, /** @@ -134,45 +134,45 @@ export enum AudioSourceType { */ AudioSourceRemoteUser = 5, /** - * 6: Mixed audio streams from all users in the current channel. + * 6: Mixed audio stream from all users in the current channel. */ AudioSourceRemoteChannel = 6, /** - * 100: An unknown audio source. + * 100: Unknown audio source. */ AudioSourceUnknown = 100, } /** - * The type of the audio route. + * Type of audio route. */ export enum AudioRoute { /** - * -1: The default audio route. + * -1: Use the default audio route. */ RouteDefault = -1, /** - * 0: Audio output routing is a headset with microphone. + * 0: Audio route is a headset with microphone. */ RouteHeadset = 0, /** - * 1: The audio route is an earpiece. + * 1: Audio route is the earpiece. */ RouteEarpiece = 1, /** - * 2: The audio route is a headset without a microphone. + * 2: Audio route is a headset without microphone. */ RouteHeadsetnomic = 2, /** - * 3: The audio route is the speaker that comes with the device. + * 3: Audio route is the built-in speaker of the device. */ RouteSpeakerphone = 3, /** - * 4: The audio route is an external speaker. (iOS only) + * 4: Audio route is an external speaker. (iOS only) */ RouteLoudspeaker = 4, /** - * 5: The audio route is a Bluetooth device using the HFP protocol. + * 5: Audio route is a Bluetooth device using the HFP protocol. */ RouteBluetoothDeviceHfp = 5, /** @@ -192,7 +192,7 @@ export enum AudioRoute { */ RouteAirplay = 9, /** - * 10: The audio route is a Bluetooth device using the A2DP protocol. + * 10: Audio route is a Bluetooth device using the A2DP protocol. */ RouteBluetoothDeviceA2dp = 10, } @@ -226,21 +226,21 @@ export class AudioParameters { } /** - * The use mode of the audio data. + * Usage modes for audio data. */ export enum RawAudioFrameOpModeType { /** - * 0: Read-only mode, For example, when users acquire the data with the Agora SDK, then start the media push. + * 0: (Default) Read-only mode. For example, if you collect data using the SDK and perform CDN streaming yourself, you can choose this mode. */ RawAudioFrameOpModeReadOnly = 0, /** - * 2: Read and write mode, For example, when users have their own audio-effect processing module and perform some voice preprocessing, such as a voice change. + * 2: Read-write mode. For example, if you have your own audio effects processing module and want to pre-process the data as needed (such as voice changing), you can choose this mode. */ RawAudioFrameOpModeReadWrite = 2, } /** - * Media source type. + * The type of media source. */ export enum MediaSourceType { /** @@ -248,15 +248,15 @@ export enum MediaSourceType { */ AudioPlayoutSource = 0, /** - * 1: Audio capturing device. + * 1: Audio recording device. */ AudioRecordingSource = 1, /** - * 2: The primary camera. + * 2: Primary camera. */ PrimaryCameraSource = 2, /** - * 3: A secondary camera. + * 3: Secondary camera. */ SecondaryCameraSource = 3, /** @@ -268,7 +268,7 @@ export enum MediaSourceType { */ SecondaryScreenSource = 5, /** - * 6: Custom video source. + * 6: Custom video capture source. */ CustomVideoSource = 6, /** @@ -296,7 +296,7 @@ export enum MediaSourceType { */ TranscodedVideoSource = 12, /** - * @ignore + * 13: Video source processed by speech-driven plugin. */ SpeechDrivenVideoSource = 13, /** @@ -334,23 +334,23 @@ export class AudioEncodedFrameInfo { } /** - * The parameters of the audio frame in PCM format. + * Information of external PCM format audio frame. */ export class AudioPcmFrame { /** - * The timestamp (ms) of the audio frame. + * Timestamp of the audio frame (ms). */ capture_timestamp?: number; /** - * The number of samples per channel in the audio frame. + * Number of samples per channel. */ samples_per_channel_?: number; /** - * Audio sample rate (Hz). + * Audio sampling rate (Hz). */ sample_rate_hz_?: number; /** - * The number of audio channels. + * Number of audio channels. */ num_channels_?: number; /** @@ -358,11 +358,11 @@ export class AudioPcmFrame { */ audio_track_number_?: number; /** - * The number of bytes per sample. + * Number of bytes per audio sample. */ bytes_per_sample?: BytesPerSample; /** - * The audio frame. + * Audio frame data. */ data_?: number[]; /** @@ -372,7 +372,7 @@ export class AudioPcmFrame { } /** - * The channel mode. + * Channel mode. */ export enum AudioDualMonoMode { /** @@ -380,29 +380,29 @@ export enum AudioDualMonoMode { */ AudioDualMonoStereo = 0, /** - * 1: Left channel mode. This mode replaces the audio of the right channel with the audio of the left channel, which means the user can only hear the audio of the left channel. + * 1: Left channel mode. This mode replaces the right channel audio with the left channel audio, so the user hears only the left channel. */ AudioDualMonoL = 1, /** - * 2: Right channel mode. This mode replaces the audio of the left channel with the audio of the right channel, which means the user can only hear the audio of the right channel. + * 2: Right channel mode. This mode replaces the left channel audio with the right channel audio, so the user hears only the right channel. */ AudioDualMonoR = 2, /** - * 3: Mixed channel mode. This mode mixes the audio of the left channel and the right channel, which means the user can hear the audio of the left channel and the right channel at the same time. + * 3: Mixed mode. This mode mixes the left and right channels, so the user hears both channels simultaneously. */ AudioDualMonoMix = 3, } /** - * The video pixel format. + * Video pixel format. */ export enum VideoPixelFormat { /** - * 0: Raw video pixel format. + * 0: Original video pixel format. */ VideoPixelDefault = 0, /** - * 1: The format is I420. + * 1: I420 format. */ VideoPixelI420 = 1, /** @@ -414,7 +414,7 @@ export enum VideoPixelFormat { */ VideoPixelNv21 = 3, /** - * 4: The format is RGBA. + * 4: RGBA format. */ VideoPixelRgba = 4, /** @@ -446,7 +446,7 @@ export enum VideoPixelFormat { */ VideoCvpixelP010 = 15, /** - * 16: The format is I422. + * 16: I422 format. */ VideoPixelI422 = 16, /** @@ -460,15 +460,15 @@ export enum VideoPixelFormat { } /** - * Video display modes. + * Video display mode. */ export enum RenderModeType { /** - * 1: Hidden mode. The priority is to fill the window. Any excess video that does not match the window size will be cropped. + * 1: The video is scaled proportionally. Priority is given to filling the view. Any excess part of the video that does not fit due to aspect ratio differences will be cropped. */ RenderModeHidden = 1, /** - * 2: Fit mode. The priority is to ensure that all video content is displayed. Any areas of the window that are not filled due to the mismatch between video size and window size will be filled with black. + * 2: The video is scaled proportionally. Priority is given to displaying the entire video content. Any area not filled due to aspect ratio differences will be filled with black. */ RenderModeFit = 2, /** @@ -804,11 +804,11 @@ export class Hdr10MetadataInfo { } /** - * The relative position of alphaBuffer and video frames. + * The relative position of alphaBuffer and the video frame. */ export enum AlphaStitchMode { /** - * 0: (Default) Only video frame, that is, alphaBuffer is not stitched with the video frame. + * 0: (Default) Video frame only, i.e., alphaBuffer is not stitched with the video frame. */ NoAlphaStitch = 0, /** @@ -844,77 +844,77 @@ export enum EglContextType { } /** - * The video buffer type. + * Video buffer type. */ export enum VideoBufferType { /** - * 1: The video buffer in the format of raw data. + * 1: Type is raw data. */ VideoBufferRawData = 1, /** - * 2: The video buffer in the format of raw data. + * 2: Type is raw data. */ VideoBufferArray = 2, /** - * 3: The video buffer in the format of Texture. + * 3: Type is Texture. */ VideoBufferTexture = 3, } /** - * The external video frame. + * External video frame. */ export class ExternalVideoFrame { /** - * The video type. See VideoBufferType. + * Video type. See VideoBufferType. */ type?: VideoBufferType; /** - * The pixel format. See VideoPixelFormat. + * Pixel format. See VideoPixelFormat. */ format?: VideoPixelFormat; /** - * Video frame buffer. + * Video buffer. */ buffer?: Uint8Array; /** - * Line spacing of the incoming video frame, which must be in pixels instead of bytes. For textures, it is the width of the texture. + * Stride of the input video frame, in pixels (not bytes). For Texture, this value indicates the width of the Texture. */ stride?: number; /** - * Height of the incoming video frame. + * Height of the input video frame. */ height?: number; /** - * Raw data related parameter. The number of pixels trimmed from the left. The default value is 0. + * This parameter applies only to raw video data. */ cropLeft?: number; /** - * Raw data related parameter. The number of pixels trimmed from the top. The default value is 0. + * This parameter applies only to raw video data. */ cropTop?: number; /** - * Raw data related parameter. The number of pixels trimmed from the right. The default value is 0. + * This parameter applies only to raw video data. */ cropRight?: number; /** - * Raw data related parameter. The number of pixels trimmed from the bottom. The default value is 0. + * This parameter applies only to raw video data. */ cropBottom?: number; /** - * Raw data related parameter. The clockwise rotation of the video frame. You can set the rotation angle as 0, 90, 180, or 270. The default value is 0. + * Field related to raw data. Specifies whether to rotate the input video group clockwise. Options: 0, 90, 180, 270. Default is 0. */ rotation?: number; /** - * Timestamp (ms) of the incoming video frame. An incorrect timestamp results in frame loss or unsynchronized audio and video. + * Timestamp of the input video frame, in milliseconds. Incorrect timestamps may result in frame drops or audio-video desynchronization. */ timestamp?: number; /** - * This parameter only applies to video data in Texture format. Texture ID of the video frame. + * This parameter applies only to video data in Texture format. Indicates the Texture ID of the video frame. */ eglType?: EglContextType; /** - * This parameter only applies to video data in Texture format. Incoming 4 × 4 transformational matrix. The typical value is a unit matrix. + * This parameter applies only to video data in Texture format. A 4x4 transformation matrix input, typically an identity matrix. */ textureId?: number; /** @@ -922,29 +922,30 @@ export class ExternalVideoFrame { */ fenceObject?: number; /** - * This parameter only applies to video data in Texture format. Incoming 4 × 4 transformational matrix. The typical value is a unit matrix. + * This parameter applies only to video data in Texture format. A 4x4 transformation matrix input, typically an identity matrix. */ matrix?: number[]; /** - * This parameter only applies to video data in Texture format. The MetaData buffer. The default value is NULL. + * This parameter applies only to video data in Texture format. Indicates the data buffer of MetaData. Default value is NULL. */ metadataBuffer?: Uint8Array; /** - * This parameter only applies to video data in Texture format. The MetaData size. The default value is 0. + * This parameter applies only to video data in Texture format. Indicates the size of MetaData. Default value is 0. */ metadataSize?: number; /** - * The alpha channel data output by using portrait segmentation algorithm. This data matches the size of the video frame, with each pixel value ranging from [0,255], where 0 represents the background and 255 represents the foreground (portrait). By setting this parameter, you can render the video background into various effects, such as transparent, solid color, image, video, etc. In custom video rendering scenarios, ensure that both the video frame and alphaBuffer are of the Full Range type; other types may cause abnormal alpha data rendering. + * Alpha channel data output by the portrait segmentation algorithm. This data matches the size of the video frame. Each pixel value ranges from [0,255], where 0 represents background and 255 represents foreground (portrait). + * You can use this parameter to render the video background with various effects, such as transparency, solid color, image, or video. In custom video rendering scenarios, ensure that both the input video frame and alphaBuffer are of Full Range type; other types may result in abnormal Alpha data rendering. */ alphaBuffer?: Uint8Array; /** - * This parameter only applies to video data in BGRA or RGBA format. Whether to extract the alpha channel data from the video frame and automatically fill it into alphaBuffer : true :Extract and fill the alpha channel data. false : (Default) Do not extract and fill the Alpha channel data. For video data in BGRA or RGBA format, you can set the Alpha channel data in either of the following ways: - * Automatically by setting this parameter to true. - * Manually through the alphaBuffer parameter. + * For video data in BGRA or RGBA format, you can choose either of the following methods to set the Alpha channel data: + * Automatically fill by setting this parameter to true. + * Set via the alphaBuffer parameter. This parameter applies only to video data in BGRA or RGBA format. Specifies whether to extract the Alpha channel data from the video frame and automatically fill it into alphaBuffer : true : Extract and fill the Alpha channel data. false : (default) Do not extract or fill the Alpha channel data. */ fillAlphaBuffer?: boolean; /** - * When the video frame contains alpha channel data, it represents the relative position of alphaBuffer and the video frame. See AlphaStitchMode. + * When the video frame contains Alpha channel data, sets the relative position of alphaBuffer and the video frame. See AlphaStitchMode. */ alphaStitchMode?: AlphaStitchMode; /** @@ -960,39 +961,39 @@ export class ExternalVideoFrame { */ hdr10MetadataInfo?: Hdr10MetadataInfo; /** - * By default, the color space properties of video frames will apply the Full Range and BT.709 standard configurations. + * Color space properties of the video frame. By default, Full Range and BT.709 standard configurations are applied. You can customize settings based on business requirements such as custom capture or rendering. See [VideoColorSpace](https://developer.mozilla.org/en-US/docs/Web/API/VideoColorSpace). */ colorSpace?: ColorSpace; } /** - * Configurations of the video frame. + * Properties of a video frame. * - * Note that the buffer provides a pointer to a pointer. This interface cannot modify the pointer of the buffer, but it can modify the content of the buffer. + * The buffer is a pointer to a pointer. This interface cannot modify the pointer of the buffer, only its contents. */ export class VideoFrame { /** - * The pixel format. See VideoPixelFormat. + * Pixel format. See VideoPixelFormat. */ type?: VideoPixelFormat; /** - * The width of the video, in pixels. + * Video pixel width. */ width?: number; /** - * The height of the video, in pixels. + * Video pixel height. */ height?: number; /** - * For YUV data, the line span of the Y buffer; for RGBA data, the total data length. When dealing with video data, it is necessary to process the offset between each line of pixel data based on this parameter, otherwise it may result in image distortion. + * For YUV data, the stride of the Y buffer; for RGBA data, the total data length. When processing video data, use this parameter to handle the offset between rows of pixel data. Otherwise, image distortion may occur. */ yStride?: number; /** - * For YUV data, the line span of the U buffer; for RGBA data, the value is 0. When dealing with video data, it is necessary to process the offset between each line of pixel data based on this parameter, otherwise it may result in image distortion. + * For YUV data, the stride of the U buffer; for RGBA data, the value is 0. When processing video data, use this parameter to handle the offset between rows of pixel data. Otherwise, image distortion may occur. */ uStride?: number; /** - * For YUV data, the line span of the V buffer; for RGBA data, the value is 0. When dealing with video data, it is necessary to process the offset between each line of pixel data based on this parameter, otherwise it may result in image distortion. + * For YUV data, the stride of the V buffer; for RGBA data, the value is 0. When processing video data, use this parameter to handle the offset between rows of pixel data. Otherwise, image distortion may occur. */ vStride?: number; /** @@ -1000,49 +1001,50 @@ export class VideoFrame { */ yBuffer?: Uint8Array; /** - * For YUV data, the pointer to the U buffer; for RGBA data, the value is 0. + * For YUV data, the pointer to the U buffer; for RGBA data, the value is empty. */ uBuffer?: Uint8Array; /** - * For YUV data, the pointer to the V buffer; for RGBA data, the value is 0. + * For YUV data, the pointer to the V buffer; for RGBA data, the value is empty. */ vBuffer?: Uint8Array; /** - * The clockwise rotation of the video frame before rendering. Supported values include 0, 90, 180, and 270 degrees. + * Clockwise rotation angle to apply before rendering the video. Supported values: 0, 90, 180, and 270 degrees. */ rotation?: number; /** - * The Unix timestamp (ms) when the video frame is rendered. This timestamp can be used to guide the rendering of the video frame. This parameter is required. + * Unix timestamp (ms) when the video frame is rendered. This timestamp is required and guides the rendering of the video frame. */ renderTimeMs?: number; /** - * Reserved for future use. + * Reserved parameter. */ avsync_type?: number; /** - * This parameter only applies to video data in Texture format. The MetaData buffer. The default value is NULL. + * Applicable only to Texture format video data. Metadata buffer. Default is NULL. */ metadata_buffer?: Uint8Array; /** - * This parameter only applies to video data in Texture format. The MetaData size. The default value is 0. + * Applicable only to Texture format video data. Metadata size. Default is 0. */ metadata_size?: number; /** - * This parameter only applies to video data in Texture format. Texture ID. + * Applicable only to Texture format video data. Texture ID. */ textureId?: number; /** - * This parameter only applies to video data in Texture format. Incoming 4 × 4 transformational matrix. The typical value is a unit matrix. + * Applicable only to Texture format video data. A 4x4 transformation matrix input. Typical value is an identity matrix. */ matrix?: number[]; /** - * The alpha channel data output by using portrait segmentation algorithm. This data matches the size of the video frame, with each pixel value ranging from [0,255], where 0 represents the background and 255 represents the foreground (portrait). By setting this parameter, you can render the video background into various effects, such as transparent, solid color, image, video, etc. - * In custom video rendering scenarios, ensure that both the video frame and alphaBuffer are of the Full Range type; other types may cause abnormal alpha data rendering. - * Make sure that alphaBuffer is exactly the same size as the video frame (width × height), otherwise it may cause the app to crash. + * Alpha channel data output by portrait segmentation algorithm. This data matches the video frame dimensions. Each pixel value ranges from [0, 255], where 0 represents background and 255 represents foreground (portrait). + * You can use this parameter to render various background effects such as transparent, solid color, image, or video. + * In custom video rendering scenarios, ensure both the video frame and alphaBuffer are Full Range type; other types may cause rendering issues. + * Make sure alphaBuffer matches the video frame dimensions (width × height) exactly, otherwise the app may crash. */ alphaBuffer?: Uint8Array; /** - * When the video frame contains alpha channel data, it represents the relative position of alphaBuffer and the video frame. See AlphaStitchMode. + * When the video frame includes alpha channel data, sets the relative position of alphaBuffer and the video frame. See AlphaStitchMode. */ alphaStitchMode?: AlphaStitchMode; /** @@ -1050,7 +1052,7 @@ export class VideoFrame { */ pixelBuffer?: Uint8Array; /** - * The meta information in the video frame. To use this parameter, contact. + * Metadata in the video frame. Contact [technical support](https://ticket.shengwang.cn/) to use this parameter. */ metaInfo?: IVideoFrameMetaInfo; /** @@ -1058,7 +1060,7 @@ export class VideoFrame { */ hdr10MetadataInfo?: Hdr10MetadataInfo; /** - * By default, the color space properties of video frames will apply the Full Range and BT.709 standard configurations. + * Color space attributes of the video frame. By default, Full Range and BT.709 standard configurations are applied. You can customize this according to custom capture or rendering needs. See [VideoColorSpace](https://developer.mozilla.org/en-US/docs/Web/API/VideoColorSpace). */ colorSpace?: ColorSpace; } @@ -1082,25 +1084,25 @@ export enum MediaPlayerSourceType { } /** - * The frame position of the video observer. + * Video observation position. */ export enum VideoModulePosition { /** - * 1: The location of the locally collected video data after preprocessing corresponds to the onCaptureVideoFrame callback. The observed video here has the effect of video pre-processing, which can be verified by enabling image enhancement, virtual background, or watermark. + * 1: The position after local video is captured and pre-processed, corresponding to the onCaptureVideoFrame callback. The video observed here includes pre-processing effects, which can be verified by enabling beauty effects, virtual background, or watermark. */ PositionPostCapturer = 1 << 0, /** - * 2: The pre-renderer position, which corresponds to the video data in the onRenderVideoFrame callback. + * 2: The position before rendering the received remote video, corresponding to the onRenderVideoFrame callback. */ PositionPreRenderer = 1 << 1, /** - * 4: The pre-encoder position, which corresponds to the video data in the onPreEncodeVideoFrame callback. The observed video here has the effects of video pre-processing and encoding pre-processing. - * To verify the pre-processing effects of the video, you can enable image enhancement, virtual background, or watermark. - * To verify the pre-encoding processing effect, you can set a lower frame rate (for example, 5 fps). + * 4: The position before local video encoding, corresponding to the onPreEncodeVideoFrame callback. The video observed here includes both pre-processing and pre-encoding processing effects: + * For pre-processing effects, you can verify by enabling beauty effects, virtual background, or watermark. + * For pre-encoding effects, you can verify by setting a lower frame rate (e.g., 5 fps). */ PositionPreEncoder = 1 << 2, /** - * 8: The position after local video capture and before pre-processing. The observed video here does not have pre-processing effects, which can be verified by enabling image enhancement, virtual background, or watermarks. + * 8: The position after local video is captured but before pre-processing. The video observed here does not include pre-processing effects and can be verified by enabling beauty effects, virtual background, or setting a watermark. */ PositionPostCapturerOrigin = 1 << 3, } @@ -1124,11 +1126,11 @@ export enum ContentInspectResult { } /** - * The type of video content moderation module. + * Type of video content inspection module. */ export enum ContentInspectType { /** - * 0: (Default) This module has no actual function. Do not set type to this value. + * 0: (Default) This module has no actual functionality. Do not set type to this value. */ ContentInspectInvalid = 0, /** @@ -1136,83 +1138,85 @@ export enum ContentInspectType { */ ContentInspectModeration = 1, /** - * 2: Video screenshot and upload via Agora self-developed extension. SDK takes screenshots of the video stream in the channel and uploads them. + * 2: Use Agora self-developed plugin for screenshot upload. The SDK takes screenshots of the video stream and uploads them. */ ContentInspectSupervision = 2, /** - * 3: Video screenshot and upload via extensions from Agora Extensions Marketplace. SDK uses video moderation extensions from Agora Extensions Marketplace to take screenshots of the video stream in the channel and uploads them. + * 3: Use cloud marketplace plugin for screenshot upload. The SDK uses the cloud marketplace video moderation plugin to take screenshots of the video stream and upload them. */ ContentInspectImageModeration = 3, } /** - * ContentInspectModule class, a structure used to configure the frequency of video screenshot and upload. + * ContentInspectModule struct used to configure the frequency of local screenshot uploads. */ export class ContentInspectModule { /** - * Types of functional module. See ContentInspectType. + * Type of function module. See ContentInspectType. */ type?: ContentInspectType; /** - * The frequency (s) of video screenshot and upload. The value should be set as larger than 0. The default value is 0, the SDK does not take screenshots. Agora recommends that you set the value as 10; you can also adjust it according to your business needs. + * Interval for local screenshot uploads in seconds. The value must be greater than 0. Default is 0, which means no screenshot upload. Recommended value is 10 seconds, but you can adjust it based on your business needs. */ interval?: number; /** - * @ignore + * Position of the video observer. See VideoModulePosition. */ position?: VideoModulePosition; } /** - * Screenshot and upload configuration. + * Local screenshot upload configuration. */ export class ContentInspectConfig { /** - * Additional information on the video content (maximum length: 1024 Bytes). The SDK sends the screenshots and additional information on the video content to the Agora server. Once the video screenshot and upload process is completed, the Agora server sends the additional information and the callback notification to your server. + * Additional information, with a maximum length of 1024 bytes. + * The SDK uploads this information along with the screenshot to the Agora server. After the screenshot is complete, the Agora server sends the additional information back to your server in the callback notification. */ extraInfo?: string; /** - * (Optional) Server configuration related to uploading video screenshots via extensions from Agora Extensions Marketplace. This parameter only takes effect when type in ContentInspectModule is set to ContentInspectImageModeration. If you want to use it, contact. + * (Optional) Server configuration for video moderation services on the cloud marketplace. This parameter only takes effect when the type in ContentInspectModule is set to ContentInspectImageModeration. To use this feature, please [contact technical support](https://ticket.shengwang.cn/). */ serverConfig?: string; /** - * Functional module. See ContentInspectModule. A maximum of 32 ContentInspectModule instances can be configured, and the value range of MAX_CONTENT_INSPECT_MODULE_COUNT is an integer in [1,32]. A function module can only be configured with one instance at most. Currently only the video screenshot and upload function is supported. + * Function modules. See ContentInspectModule. + * Up to 32 ContentInspectModule instances are supported. The value range of MAX_CONTENT_INSPECT_MODULE_COUNT is an integer in [1,32]. Only one instance can be configured per function module. Currently, only screenshot upload is supported. */ modules?: ContentInspectModule[]; /** - * The number of functional modules, that is,the number of configured ContentInspectModule instances, must be the same as the number of instances configured in modules. The maximum number is 32. + * Number of function modules, i.e., the number of ContentInspectModule instances configured. Must match the number of instances in modules. Maximum value is 32. */ moduleCount?: number; } /** - * The snapshot configuration. + * Video snapshot settings. */ export class SnapshotConfig { /** - * The local path (including filename extensions) of the snapshot. For example: + * Make sure the directory exists and is writable. Local path to save the snapshot, including file name and format, for example: * iOS: /App Sandbox/Library/Caches/example.jpg - * Android: /storage/emulated/0/Android/data//files/example.jpg Ensure that the path you specify exists and is writable. + * Android: /storage/emulated/0/Android/data//files/example.jpg */ filePath?: string; /** - * The position of the snapshot video frame in the video pipeline. See VideoModulePosition. + * The position of the video frame in the video pipeline for the snapshot. See VideoModulePosition. */ position?: VideoModulePosition; } /** - * This class is used to get raw PCM audio. + * This class is used to obtain raw PCM audio data. * - * You can inherit this class and implement the onFrame callback to get raw PCM audio. + * You can inherit this class and implement the onFrame callback to get PCM audio data. */ export interface IAudioPcmFrameSink { /** - * Occurs each time the player receives an audio frame. + * Callback when an audio frame is received. * - * After registering the audio frame observer, the callback occurs every time the player receives an audio frame, reporting the detailed information of the audio frame. + * After registering the audio data observer, this callback is triggered each time an audio frame is received to report audio frame information. * - * @param frame The audio frame information. See AudioPcmFrame. + * @param frame Audio frame information. See AudioPcmFrame. */ onFrame?(frame: AudioPcmFrame): void; } @@ -1232,37 +1236,39 @@ export enum AudioFrameType { */ export class AudioFrame { /** - * The type of the audio frame. See AudioFrameType. + * Audio frame type. See AudioFrameType. */ type?: AudioFrameType; /** - * The number of samples per channel in the audio frame. + * Number of samples per channel. */ samplesPerChannel?: number; /** - * The number of bytes per sample. For PCM, this parameter is generally set to 16 bits (2 bytes). + * Number of bytes per sample. For PCM, typically 16 bits, i.e., 2 bytes. */ bytesPerSample?: BytesPerSample; /** - * The number of audio channels (the data are interleaved if it is stereo). - * 1: Mono. - * 2: Stereo. + * Number of channels (for stereo, data is interleaved). + * 1: Mono + * 2: Stereo */ channels?: number; /** - * The number of samples per channel in the audio frame. + * Number of samples per second per channel. */ samplesPerSec?: number; /** - * The data buffer of the audio frame. When the audio frame uses a stereo channel, the data buffer is interleaved. The size of the data buffer is as follows: buffer = samples × channels × bytesPerSample. + * Audio data buffer (for stereo, data is interleaved). + * Buffer size buffer = samples × channels × bytesPerSample. */ buffer?: Uint8Array; /** - * The timestamp (ms) of the external audio frame. You can use this timestamp to restore the order of the captured audio frame, and synchronize audio and video frames in video scenarios, including scenarios where external video sources are used. + * Render timestamp of the external audio frame. + * You can use this timestamp to restore the order of audio frames; in scenarios with video (including those using external video sources), this parameter can be used to achieve audio-video synchronization. */ renderTimeMs?: number; /** - * Reserved for future use. + * Reserved parameter. */ avsync_type?: number; /** @@ -1312,44 +1318,47 @@ export enum AudioFramePosition { /** * Audio data format. * - * The SDK sets the audio data format in the following callbacks according to AudioParams. onRecordAudioFrame onPlaybackAudioFrame onMixedAudioFrame - * The SDK calculates the sampling interval through the samplesPerCall, sampleRate, and channel parameters in AudioParams, and triggers the onRecordAudioFrame, onPlaybackAudioFrame, onMixedAudioFrame, and onEarMonitoringAudioFrame callbacks according to the sampling interval. Sample interval (sec) = samplePerCall /(sampleRate × channel). - * Ensure that the sample interval ≥ 0.01 (s). + * The SDK sets the audio data format in the following callbacks based on AudioParams : onRecordAudioFrame onPlaybackAudioFrame onMixedAudioFrame + * The SDK calculates the sampling interval using the samplesPerCall, sampleRate, and channel parameters in AudioParams, and triggers the onRecordAudioFrame, onPlaybackAudioFrame, onMixedAudioFrame, and onEarMonitoringAudioFrame callbacks accordingly. + * Sampling interval = samplesPerCall / (sampleRate × channel). + * Ensure the sampling interval is not less than 0.01 (s). */ export class AudioParams { /** - * The audio sample rate (Hz), which can be set as one of the following values: - * 8000. - * (Default) 16000. - * 32000. + * Sampling rate of the data in Hz. Valid values: + * 8000 + * 16000 (default) + * 32000 * 44100 * 48000 */ sample_rate?: number; /** - * The number of audio channels, which can be set as either of the following values: - * 1: (Default) Mono. - * 2: Stereo. + * Number of audio channels. Valid values: + * 1: Mono (default) + * 2: Stereo */ channels?: number; /** - * The use mode of the audio data. See RawAudioFrameOpModeType. + * Usage mode of the data. See RawAudioFrameOpModeType. */ mode?: RawAudioFrameOpModeType; /** - * The number of samples, such as 1024 for the media push. + * Number of samples per call, typically 1024 in scenarios like CDN streaming. */ samples_per_call?: number; } /** - * The audio frame observer. + * Audio observer. + * + * You can call registerAudioFrameObserver to register or unregister the IAudioFrameObserverBase audio observer. */ export interface IAudioFrameObserverBase { /** - * Gets the captured audio frame. + * Receives the raw audio data of the recording. * - * To ensure that the data format of captured audio frame is as expected, Agora recommends that you set the audio data format as follows: After calling setRecordingAudioFrameParameters to set the audio data format, call registerAudioFrameObserver to register the audio observer object, the SDK will calculate the sampling interval according to the parameters set in this method, and triggers the onRecordAudioFrame callback according to the sampling interval. + * To ensure the recorded audio data format meets expectations, you can configure it using the following methods: Call setRecordingAudioFrameParameters to set the audio format, and then call registerAudioFrameObserver to register the audio frame observer. The SDK calculates the sampling interval based on the parameters of this method and triggers the onRecordAudioFrame callback accordingly. * * @param channelId The channel ID. * @param audioFrame The raw audio data. See AudioFrame. @@ -1357,9 +1366,9 @@ export interface IAudioFrameObserverBase { onRecordAudioFrame?(channelId: string, audioFrame: AudioFrame): void; /** - * Gets the raw audio frame for playback. + * Receives the raw audio data of the playback. * - * To ensure that the data format of audio frame for playback is as expected, Agora recommends that you set the audio data format as follows: After calling setPlaybackAudioFrameParameters to set the audio data format and registerAudioFrameObserver to register the audio frame observer object, the SDK calculates the sampling interval according to the parameters set in the methods, and triggers the onPlaybackAudioFrame callback according to the sampling interval. + * To ensure the playback audio data format meets expectations, you can configure it using the following methods: Call setPlaybackAudioFrameParameters to set the audio format, and then call registerAudioFrameObserver to register the audio frame observer. The SDK calculates the sampling interval based on the parameters of this method and triggers the onPlaybackAudioFrame callback accordingly. * * @param channelId The channel ID. * @param audioFrame The raw audio data. See AudioFrame. @@ -1367,19 +1376,19 @@ export interface IAudioFrameObserverBase { onPlaybackAudioFrame?(channelId: string, audioFrame: AudioFrame): void; /** - * Retrieves the mixed captured and playback audio frame. + * Retrieves the data after audio mixing of capture and playback. * - * To ensure that the data format of mixed captured and playback audio frame meets the expectations, Agora recommends that you set the data format as follows: After calling setMixedAudioFrameParameters to set the audio data format and registerAudioFrameObserver to register the audio frame observer object, the SDK calculates the sampling interval according to the parameters set in the methods, and triggers the onMixedAudioFrame callback according to the sampling interval. + * To ensure that the audio data format after capture and playback mixing meets expectations, you can set the audio data format using the following methods: call setMixedAudioFrameParameters to set the audio data format, then call registerAudioFrameObserver to register the audio observer object. The SDK will calculate the sampling interval based on the parameters in this method and trigger the onMixedAudioFrame callback accordingly. * - * @param channelId The channel ID. - * @param audioFrame The raw audio data. See AudioFrame. + * @param channelId Channel ID. + * @param audioFrame Raw audio data. See AudioFrame. */ onMixedAudioFrame?(channelId: string, audioFrame: AudioFrame): void; /** - * Gets the in-ear monitoring audio frame. + * Receives the raw audio data of the ear monitoring. * - * In order to ensure that the obtained in-ear audio data meets the expectations, Agora recommends that you set the in-ear monitoring-ear audio data format as follows: After calling setEarMonitoringAudioFrameParameters to set the audio data format and registerAudioFrameObserver to register the audio frame observer object, the SDK calculates the sampling interval according to the parameters set in the methods, and triggers the onEarMonitoringAudioFrame callback according to the sampling interval. + * To ensure the ear monitoring audio data format meets expectations, you can configure it using the following methods: Call setEarMonitoringAudioFrameParameters to set the audio format, and then call registerAudioFrameObserver to register the audio frame observer. The SDK calculates the sampling interval based on the parameters of this method and triggers the onEarMonitoringAudioFrame callback accordingly. * * @param audioFrame The raw audio data. See AudioFrame. */ @@ -1387,16 +1396,18 @@ export interface IAudioFrameObserverBase { } /** - * The audio frame observer. + * Audio observer. + * + * You can call registerAudioFrameObserver to register or unregister the IAudioFrameObserver audio observer. */ export interface IAudioFrameObserver extends IAudioFrameObserverBase { /** - * Retrieves the audio frame before mixing of subscribed remote users. + * Receives the audio of the subscribed remote user before mixing. * - * Due to framework limitations, this callback does not support sending processed audio data back to the SDK. + * Due to framework limitations, this callback does not support sending the processed audio data back to the SDK. * * @param channelId The channel ID. - * @param uid The ID of subscribed remote users. + * @param uid The ID of the subscribed remote user. * @param audioFrame The raw audio data. See AudioFrame. */ onPlaybackAudioFrameBeforeMixing?( @@ -1407,25 +1418,25 @@ export interface IAudioFrameObserver extends IAudioFrameObserverBase { } /** - * The audio spectrum data. + * Audio spectrum data. */ export class AudioSpectrumData { /** - * The audio spectrum data. Agora divides the audio frequency into 256 frequency domains, and reports the energy value of each frequency domain through this parameter. The value range of each energy type is [-300, 1] and the unit is dBFS. + * Audio spectrum data. Agora divides the audio frequency into 256 frequency bands and reports the energy value of each band through this parameter. The value range of each energy value is [-300,1], in dBFS. */ audioSpectrumData?: number[]; /** - * The audio spectrum data length is 256. + * The length of the audio spectrum data is 256. */ dataLength?: number; } /** - * Audio spectrum information of the remote user. + * Audio spectrum information of a remote user. */ export class UserAudioSpectrumInfo { /** - * @ignore + * Remote user ID. */ uid?: number; /** @@ -1435,24 +1446,24 @@ export class UserAudioSpectrumInfo { } /** - * The audio spectrum observer. + * Audio spectrum observer. */ export interface IAudioSpectrumObserver { /** - * Gets the statistics of a local audio spectrum. + * Receives the local audio spectrum. * - * After successfully calling registerAudioSpectrumObserver to implement the onLocalAudioSpectrum callback in IAudioSpectrumObserver and calling enableAudioSpectrumMonitor to enable audio spectrum monitoring, the SDK triggers this callback as the time interval you set to report the received remote audio data spectrum before encoding. + * After successfully calling registerAudioSpectrumObserver, implementing the onLocalAudioSpectrum callback of IAudioSpectrumObserver, and enabling audio spectrum monitoring via enableAudioSpectrumMonitor, the SDK triggers this callback at the set interval to report the pre-encoded local audio spectrum data. * - * @param data The audio spectrum data of the local user. See AudioSpectrumData. + * @param data The local user's audio spectrum data. See AudioSpectrumData. */ onLocalAudioSpectrum?(data: AudioSpectrumData): void; /** - * Gets the remote audio spectrum. + * Receives the remote audio spectrum. * - * After successfully calling registerAudioSpectrumObserver to implement the onRemoteAudioSpectrum callback in the IAudioSpectrumObserver and calling enableAudioSpectrumMonitor to enable audio spectrum monitoring, the SDK will trigger the callback as the time interval you set to report the received remote audio data spectrum. + * After successfully calling registerAudioSpectrumObserver, implementing the onRemoteAudioSpectrum callback of IAudioSpectrumObserver, and enabling audio spectrum monitoring via enableAudioSpectrumMonitor, the SDK triggers this callback at the set interval to report the received remote audio spectrum data. * - * @param spectrums The audio spectrum information of the remote user. See UserAudioSpectrumInfo. The number of arrays is the number of remote users monitored by the SDK. If the array is null, it means that no audio spectrum of remote users is detected. + * @param spectrums The audio spectrum information of remote users. See UserAudioSpectrumInfo. The array size equals the number of remote users detected by the SDK. An empty array indicates no remote audio spectrum was detected. * @param spectrumNumber The number of remote users. */ onRemoteAudioSpectrum?( @@ -1462,18 +1473,19 @@ export interface IAudioSpectrumObserver { } /** - * Receives encoded video images. + * Class for receiving encoded video frames. */ export interface IVideoEncodedFrameObserver { /** - * Reports that the receiver has received the to-be-decoded video frame sent by the remote end. + * Reports that the receiver has received a remote encoded video frame. * - * If you call the setRemoteVideoSubscriptionOptions method and set encodedFrameOnly to true, the SDK triggers this callback locally to report the received encoded video frame information. + * When you call the setRemoteVideoSubscriptionOptions method and set encodedFrameOnly to true, the SDK triggers this callback locally to report the received encoded video frame information. * - * @param uid The user ID of the remote user. - * @param imageBuffer The encoded video image buffer. - * @param length The data length of the video image. - * @param videoEncodedFrameInfo For the information of the encoded video frame, see EncodedVideoFrameInfo. + * @param channelId Channel name. + * @param uid Remote user ID. + * @param imageBuffer Video image buffer. + * @param length Data length of the video image. + * @param videoEncodedFrameInfo Information about the encoded video frame. See EncodedVideoFrameInfo. */ onEncodedVideoFrameReceived?( channelId: string, @@ -1485,30 +1497,38 @@ export interface IVideoEncodedFrameObserver { } /** - * The process mode of the video frame: + * Video frame processing mode. */ export enum VideoFrameProcessMode { /** - * Read-only mode. In this mode, you do not modify the video frame. The video frame observer is a renderer. + * Read-only mode. + * In read-only mode, you do not modify the video frame, and the video observer acts as a renderer. */ ProcessModeReadOnly = 0, /** - * Read and write mode. In this mode, you modify the video frame. The video frame observer is a video filter. + * Read-write mode. + * In read-write mode, you modify the video frame, and the video observer acts as a video filter. */ ProcessModeReadWrite = 1, } /** - * The IVideoFrameObserver class. + * Video observer. + * + * You can call registerVideoFrameObserver to register or unregister the IVideoFrameObserver video observer. */ export interface IVideoFrameObserver { /** - * Occurs each time the SDK receives a video frame captured by local devices. + * Gets video data captured by the local device. * - * You can get raw video data collected by the local device through this callback. + * You can obtain the raw video data captured by the local device in the callback. + * If the video data you obtain is in RGBA format, the SDK does not support processing the Alpha channel value. + * When modifying parameters in videoFrame, ensure the modified parameters match the actual video frame in the buffer. Otherwise, unexpected rotation, distortion, or other issues may occur in the local preview or remote video. + * It is recommended to implement this callback using the C++ API. + * Due to framework limitations, this callback does not support sending the processed video data back to the SDK. * - * @param sourceType Video source types, including cameras, screens, or media player. See VideoSourceType. - * @param videoFrame The video frame. See VideoFrame. The default value of the video frame data format obtained through this callback is as follows: + * @param sourceType Video source type, which can be: camera, screen, or media player. See VideoSourceType. + * @param videoFrame Video frame data. See VideoFrame. The default format of the video frame data obtained through this callback is: * Android: I420 * iOS: I420 */ @@ -1518,16 +1538,17 @@ export interface IVideoFrameObserver { ): void; /** - * Occurs each time the SDK receives a video frame before encoding. + * Retrieves local video data before encoding. * - * After you successfully register the video frame observer, the SDK triggers this callback each time it receives a video frame. In this callback, you can get the video data before encoding and then process the data according to your particular scenarios. After processing, you can send the processed video data back to the SDK in this callback. - * It is recommended that you ensure the modified parameters in videoFrame are consistent with the actual situation of the video frames in the video frame buffer. Otherwise, it may cause unexpected rotation, distortion, and other issues in the local preview and remote video display. - * It's recommended that you implement this callback through the C++ API. - * Due to framework limitations, this callback does not support sending processed video data back to the SDK. - * The video data that this callback gets has been preprocessed, with its content cropped and rotated, and the image enhanced. + * After successfully registering the video data observer, the SDK triggers this callback for each captured video frame. You can use this callback to retrieve the video data before encoding and process it as needed. + * After processing, you can pass the processed video data back to the SDK in this callback. + * It is recommended to implement this callback using the C++ API. + * Due to framework limitations, this callback does not support sending the processed video data back to the SDK. + * The video data obtained here has been pre-processed, such as cropping, rotation, and beautification. + * When modifying parameters in videoFrame, make sure the modified parameters match the actual video frame in the buffer. Otherwise, unexpected issues such as incorrect rotation or distortion may occur in the local preview or remote video. * - * @param sourceType The type of the video source. See VideoSourceType. - * @param videoFrame The video frame. See VideoFrame. The default value of the video frame data format obtained through this callback is as follows: + * @param sourceType Type of video source. See VideoSourceType. + * @param videoFrame Video frame data. See VideoFrame. The default video frame data format obtained through this callback is: * Android: I420 * iOS: I420 */ @@ -1542,17 +1563,17 @@ export interface IVideoFrameObserver { onMediaPlayerVideoFrame?(videoFrame: VideoFrame, mediaPlayerId: number): void; /** - * Occurs each time the SDK receives a video frame sent by the remote user. + * Retrieves video data sent by the remote user. * - * After you successfully register the video frame observer, the SDK triggers this callback each time it receives a video frame. In this callback, you can get the video data sent from the remote end before rendering, and then process it according to the particular scenarios. - * It is recommended that you ensure the modified parameters in videoFrame are consistent with the actual situation of the video frames in the video frame buffer. Otherwise, it may cause unexpected rotation, distortion, and other issues in the local preview and remote video display. - * If the video data type you get is RGBA, the SDK does not support processing the data of the alpha channel. - * It's recommended that you implement this callback through the C++ API. - * Due to framework limitations, this callback does not support sending processed video data back to the SDK. + * After successfully registering the video data observer, the SDK triggers this callback for each captured video frame. You can use this callback to retrieve the video data sent by the remote user before rendering and process it as needed. + * If the video data type is RGBA, the SDK does not support processing the Alpha channel. + * It is recommended to implement this callback using the C++ API. + * Due to framework limitations, this callback does not support sending the processed video data back to the SDK. + * When modifying parameters in videoFrame, make sure the modified parameters match the actual video frame in the buffer. Otherwise, unexpected issues such as incorrect rotation or distortion may occur in the local preview or remote video. * - * @param channelId The channel ID. - * @param remoteUid The user ID of the remote user who sends the current video frame. - * @param videoFrame The video frame. See VideoFrame. The default value of the video frame data format obtained through this callback is as follows: + * @param channelId Channel ID. + * @param remoteUid ID of the remote user who sent the video frame. + * @param videoFrame Video frame data. See VideoFrame. The default video frame data format obtained through this callback is: * Android: I420 * iOS: I420 */ @@ -1569,15 +1590,15 @@ export interface IVideoFrameObserver { } /** - * The external video frame encoding type. + * Encoding type of external video frames. */ export enum ExternalVideoSourceType { /** - * 0: The video frame is not encoded. + * 0: Unencoded video frame. */ VideoFrame = 0, /** - * 1: The video frame is encoded. + * 1: Encoded video frame. */ EncodedVideoFrame = 1, } @@ -1593,47 +1614,47 @@ export enum MediaRecorderContainerFormat { } /** - * The recording content. + * @ignore */ export enum MediaRecorderStreamType { /** - * 1: Only audio. + * @ignore */ StreamTypeAudio = 0x01, /** - * 2: Only video. + * @ignore */ StreamTypeVideo = 0x02, /** - * 3: (Default) Audio and video. + * @ignore */ StreamTypeBoth = 0x01 | 0x02, } /** - * The current recording state. + * Current recording state. */ export enum RecorderState { /** - * -1: An error occurs during the recording. See RecorderReasonCode for the reason. + * -1: Audio/video stream recording error. See RecorderReasonCode. */ RecorderStateError = -1, /** - * 2: The audio and video recording starts. + * 2: Audio/video stream recording started. */ RecorderStateStart = 2, /** - * 3: The audio and video recording stops. + * 3: Audio/video stream recording stopped. */ RecorderStateStop = 3, } /** - * The reason for the state change. + * Reasons for recording state errors. */ export enum RecorderReasonCode { /** - * 0: No error. + * 0: Everything is normal. */ RecorderReasonNone = 0, /** @@ -1705,27 +1726,25 @@ export class MediaRecorderConfiguration { } /** - * Facial information observer. + * Face information observer. * - * You can call registerFaceInfoObserver to register one IFaceInfoObserver observer. + * You can call registerFaceInfoObserver to register the IFaceInfoObserver observer. */ export interface IFaceInfoObserver { /** - * Occurs when the facial information processed by speech driven extension is received. + * Reports face information processed by the voice driver extension. * - * @param outFaceInfo Output parameter, the JSON string of the facial information processed by the voice driver plugin, including the following fields: - * faces: Object sequence. The collection of facial information, with each face corresponding to an object. - * blendshapes: Object. The collection of face capture coefficients, named according to ARkit standards, with each key-value pair representing a blendshape coefficient. The blendshape coefficient is a floating point number with a range of [0.0, 1.0]. - * rotation: Object sequence. The rotation of the head, which includes the following three key-value pairs, with values as floating point numbers ranging from -180.0 to 180.0: - * pitch: Head pitch angle. A positve value means looking down, while a negative value means looking up. - * yaw: Head yaw angle. A positve value means turning left, while a negative value means turning right. - * roll: Head roll angle. A positve value means tilting to the right, while a negative value means tilting to the left. - * timestamp: String. The timestamp of the output result, in milliseconds. Here is an example of JSON: - * { "faces":[{ "blendshapes":{ "eyeBlinkLeft":0.9, "eyeLookDownLeft":0.0, "eyeLookInLeft":0.0, "eyeLookOutLeft":0.0, "eyeLookUpLeft":0.0, "eyeSquintLeft":0.0, "eyeWideLeft":0.0, "eyeBlinkRight":0.0, "eyeLookDownRight":0.0, "eyeLookInRight":0.0, "eyeLookOutRight":0.0, "eyeLookUpRight":0.0, "eyeSquintRight":0.0, "eyeWideRight":0.0, "jawForward":0.0, "jawLeft":0.0, "jawRight":0.0, "jawOpen":0.0, "mouthClose":0.0, "mouthFunnel":0.0, "mouthPucker":0.0, "mouthLeft":0.0, "mouthRight":0.0, "mouthSmileLeft":0.0, "mouthSmileRight":0.0, "mouthFrownLeft":0.0, "mouthFrownRight":0.0, "mouthDimpleLeft":0.0, "mouthDimpleRight":0.0, "mouthStretchLeft":0.0, "mouthStretchRight":0.0, "mouthRollLower":0.0, "mouthRollUpper":0.0, "mouthShrugLower":0.0, "mouthShrugUpper":0.0, "mouthPressLeft":0.0, "mouthPressRight":0.0, "mouthLowerDownLeft":0.0, "mouthLowerDownRight":0.0, "mouthUpperUpLeft":0.0, "mouthUpperUpRight":0.0, "browDownLeft":0.0, "browDownRight":0.0, "browInnerUp":0.0, "browOuterUpLeft":0.0, "browOuterUpRight":0.0, "cheekPuff":0.0, "cheekSquintLeft":0.0, "cheekSquintRight":0.0, "noseSneerLeft":0.0, "noseSneerRight":0.0, "tongueOut":0.0 }, "rotation":{"pitch":30.0, "yaw":25.5, "roll":-15.5}, - * }], "timestamp":"654879876546" } + * @param outFaceInfo Output parameter. A JSON string of face information processed by the voice driver extension, containing the following fields: + * faces: Array of objects. Contains detected face information, with each object representing one face. + * blendshapes: Object. Blend shape coefficients conforming to the ARKit standard. Each key-value pair represents a blendshape coefficient as a float in the range [0.0, 1.0]. + * rotation: Array of objects. Head rotation angles, including the following key-value pairs with float values in the range [-180.0, 180.0]: + * pitch: Head tilt angle. Positive when looking down, negative when looking up. + * yaw: Horizontal head rotation. Positive when turning left, negative when turning right. + * roll: Vertical head rotation. Positive when tilting right, negative when tilting left. + * timestamp: String. Timestamp of the output result in milliseconds. Example JSON: { "faces":[{ "blendshapes":{ "eyeBlinkLeft":0.9, "eyeLookDownLeft":0.0, "eyeLookInLeft":0.0, "eyeLookOutLeft":0.0, "eyeLookUpLeft":0.0, "eyeSquintLeft":0.0, "eyeWideLeft":0.0, "eyeBlinkRight":0.0, "eyeLookDownRight":0.0, "eyeLookInRight":0.0, "eyeLookOutRight":0.0, "eyeLookUpRight":0.0, "eyeSquintRight":0.0, "eyeWideRight":0.0, "jawForward":0.0, "jawLeft":0.0, "jawRight":0.0, "jawOpen":0.0, "mouthClose":0.0, "mouthFunnel":0.0, "mouthPucker":0.0, "mouthLeft":0.0, "mouthRight":0.0, "mouthSmileLeft":0.0, "mouthSmileRight":0.0, "mouthFrownLeft":0.0, "mouthFrownRight":0.0, "mouthDimpleLeft":0.0, "mouthDimpleRight":0.0, "mouthStretchLeft":0.0, "mouthStretchRight":0.0, "mouthRollLower":0.0, "mouthRollUpper":0.0, "mouthShrugLower":0.0, "mouthShrugUpper":0.0, "mouthPressLeft":0.0, "mouthPressRight":0.0, "mouthLowerDownLeft":0.0, "mouthLowerDownRight":0.0, "mouthUpperUpLeft":0.0, "mouthUpperUpRight":0.0, "browDownLeft":0.0, "browDownRight":0.0, "browInnerUp":0.0, "browOuterUpLeft":0.0, "browOuterUpRight":0.0, "cheekPuff":0.0, "cheekSquintLeft":0.0, "cheekSquintRight":0.0, "noseSneerLeft":0.0, "noseSneerRight":0.0, "tongueOut":0.0 }, "rotation":{"pitch":30.0, "yaw":25.5, "roll":-15.5}, }], "timestamp":"654879876546" } * * @returns - * true : Facial information JSON parsing successful. false : Facial information JSON parsing failed. + * true : Face info JSON parsed successfully. false : Failed to parse face info JSON. */ onFaceInfo?(outFaceInfo: string): void; } diff --git a/src/AgoraMediaPlayerTypes.ts b/src/AgoraMediaPlayerTypes.ts index 512536cb..ffbc620c 100644 --- a/src/AgoraMediaPlayerTypes.ts +++ b/src/AgoraMediaPlayerTypes.ts @@ -1,39 +1,39 @@ import './extension/AgoraMediaPlayerTypesExtension'; /** - * The playback state. + * The state of the media player. */ export enum MediaPlayerState { /** - * 0: The default state. The media player returns this state code before you open the media resource or after you stop the playback. + * 0: Default state. The player returns this state code before you open a media file and after playback ends. */ PlayerStateIdle = 0, /** - * 1: Opening the media resource. + * 1: Opening the media file. */ PlayerStateOpening = 1, /** - * 2: Opens the media resource successfully. + * 2: Media file opened successfully. */ PlayerStateOpenCompleted = 2, /** - * 3: The media resource is playing. + * 3: Playing. */ PlayerStatePlaying = 3, /** - * 4: Pauses the playback. + * 4: Playback paused. */ PlayerStatePaused = 4, /** - * 5: The playback is complete. + * 5: Playback completed. */ PlayerStatePlaybackCompleted = 5, /** - * 6: The loop is complete. + * 6: All loops of playback completed. */ PlayerStatePlaybackAllLoopsCompleted = 6, /** - * 7: The playback stops. + * 7: Playback stopped. */ PlayerStateStopped = 7, /** @@ -65,13 +65,13 @@ export enum MediaPlayerState { */ PlayerStateSetTrackInternal = 56, /** - * 100: The media player fails to play the media resource. + * 100: Playback failed. */ PlayerStateFailed = 100, } /** - * Reasons for the changes in the media player status. + * Reason for media player state change. */ export enum MediaPlayerReason { /** @@ -91,19 +91,19 @@ export enum MediaPlayerReason { */ PlayerReasonNoResource = -3, /** - * -4: Invalid media resource. + * -4: Invalid resource. */ PlayerReasonInvalidMediaSource = -4, /** - * -5: The media stream type is unknown. + * -5: Unknown media stream type. */ PlayerReasonUnknownStreamType = -5, /** - * -6: The object is not initialized. + * -6: Object not initialized. */ PlayerReasonObjNotInitialized = -6, /** - * -7: The codec is not supported. + * -7: Codec not supported. */ PlayerReasonCodecNotSupported = -7, /** @@ -111,31 +111,31 @@ export enum MediaPlayerReason { */ PlayerReasonVideoRenderFailed = -8, /** - * -9: An error with the internal state of the player occurs. + * -9: Invalid internal player state. */ PlayerReasonInvalidState = -9, /** - * -10: The URL of the media resource cannot be found. + * -10: URL not found. */ PlayerReasonUrlNotFound = -10, /** - * -11: Invalid connection between the player and the Agora Server. + * -11: Invalid connection between the player and Agora server. */ PlayerReasonInvalidConnectionState = -11, /** - * -12: The playback buffer is insufficient. + * -12: Insufficient data in the playback buffer. */ PlayerReasonSrcBufferUnderflow = -12, /** - * -13: The playback is interrupted. + * -13: Playback was interrupted abnormally and ended. */ PlayerReasonInterrupted = -13, /** - * -14: The SDK does not support the method being called. + * -14: Unsupported API call by the SDK. */ PlayerReasonNotSupported = -14, /** - * -15: The authentication information of the media resource is expired. + * -15: Authentication information for the media resource network path has expired. */ PlayerReasonTokenExpired = -15, /** @@ -143,29 +143,29 @@ export enum MediaPlayerReason { */ PlayerReasonIpExpired = -16, /** - * -17: An unknown error. + * -17: Unknown error. */ PlayerReasonUnknown = -17, } /** - * The type of the media stream. + * The type of media stream. */ export enum MediaStreamType { /** - * 0: The type is unknown. + * 0: Unknown type. */ StreamTypeUnknown = 0, /** - * 1: The video stream. + * 1: Video stream. */ StreamTypeVideo = 1, /** - * 2: The audio stream. + * 2: Audio stream. */ StreamTypeAudio = 2, /** - * 3: The subtitle stream. + * 3: Subtitle stream. */ StreamTypeSubtitle = 3, } @@ -175,43 +175,43 @@ export enum MediaStreamType { */ export enum MediaPlayerEvent { /** - * 0: The player begins to seek to a new playback position. + * 0: Seek started. */ PlayerEventSeekBegin = 0, /** - * 1: The player finishes seeking to a new playback position. + * 1: Seek completed. */ PlayerEventSeekComplete = 1, /** - * 2: An error occurs when seeking to a new playback position. + * 2: Seek error. */ PlayerEventSeekError = 2, /** - * 5: The audio track used by the player has been changed. + * 5: Current audio track changed. */ PlayerEventAudioTrackChanged = 5, /** - * 6: The currently buffered data is not enough to support playback. + * 6: Current buffer is insufficient for playback. */ PlayerEventBufferLow = 6, /** - * 7: The currently buffered data is just enough to support playback. + * 7: Current buffer is just enough for playback. */ PlayerEventBufferRecover = 7, /** - * 8: The audio or video playback freezes. + * 8: Audio or video stutter occurred. */ PlayerEventFreezeStart = 8, /** - * 9: The audio or video playback resumes without freezing. + * 9: Audio and video stutter stopped. */ PlayerEventFreezeStop = 9, /** - * 10: The player starts switching the media resource. + * 10: Media resource switching started. */ PlayerEventSwitchBegin = 10, /** - * 11: Media resource switching is complete. + * 11: Media resource switching completed. */ PlayerEventSwitchComplete = 11, /** @@ -219,15 +219,15 @@ export enum MediaPlayerEvent { */ PlayerEventSwitchError = 12, /** - * 13: The first video frame is rendered. + * 13: First video frame displayed. */ PlayerEventFirstDisplayed = 13, /** - * 14: The cached media files reach the limit in number. + * 14: Reached the maximum number of cacheable files. */ PlayerEventReachCacheFileMaxCount = 14, /** - * 15: The cached media files reach the limit in aggregate storage space. + * 15: Reached the maximum size of cacheable files. */ PlayerEventReachCacheFileMaxSize = 15, /** @@ -249,25 +249,25 @@ export enum MediaPlayerEvent { } /** - * Events that occur when media resources are preloaded. + * Events that occur when preloading media resources. */ export enum PlayerPreloadEvent { /** - * 0: Starts preloading media resources. + * 0: Start preloading media resources. */ PlayerPreloadEventBegin = 0, /** - * 1: Preloading media resources is complete. + * 1: Preloading of media resources completed. */ PlayerPreloadEventComplete = 1, /** - * 2: An error occurs when preloading media resources. + * 2: Error occurred while preloading media resources. */ PlayerPreloadEventError = 2, } /** - * The detailed information of the media stream. + * All information about the player media stream. */ export class PlayerStreamInfo { /** @@ -279,7 +279,7 @@ export class PlayerStreamInfo { */ streamType?: MediaStreamType; /** - * The codec of the media stream. + * The codec specification of the media stream. */ codecName?: string; /** @@ -287,107 +287,107 @@ export class PlayerStreamInfo { */ language?: string; /** - * This parameter only takes effect for video streams, and indicates the video frame rate (fps). + * Applies to video streams only. Indicates the video frame rate (fps). */ videoFrameRate?: number; /** - * This parameter only takes effect for video streams, and indicates the video bitrate (bps). + * Applies to video streams only. Indicates the video bitrate (bps). */ videoBitRate?: number; /** - * This parameter only takes effect for video streams, and indicates the video width (pixel). + * Applies to video streams only. Indicates the video width (px). */ videoWidth?: number; /** - * This parameter only takes effect for video streams, and indicates the video height (pixel). + * Applies to video streams only. Indicates the video height (px). */ videoHeight?: number; /** - * This parameter only takes effect for video streams, and indicates the video rotation angle. + * Applies to video streams only. Indicates the rotation angle. */ videoRotation?: number; /** - * This parameter only takes effect for audio streams, and indicates the audio sample rate (Hz). + * Applies to audio streams only. Indicates the audio sample rate (Hz). */ audioSampleRate?: number; /** - * This parameter only takes effect for audio streams, and indicates the audio channel number. + * Applies to audio streams only. Indicates the number of audio channels. */ audioChannels?: number; /** - * This parameter only takes effect for audio streams, and indicates the bit number of each audio sample. + * Applies to audio streams only. Indicates the number of bits per audio sample (bit). */ audioBitsPerSample?: number; /** - * The total duration (ms) of the media stream. + * The duration of the media stream (milliseconds). */ duration?: number; } /** - * Information about the video bitrate of the media resource being played. + * Video bitrate information during media playback. */ export class SrcInfo { /** - * The video bitrate (Kbps) of the media resource being played. + * Video bitrate (Kbps) during media playback. */ bitrateInKbps?: number; /** - * The name of the media resource. + * Name of the media resource. */ name?: string; } /** - * The type of media metadata. + * Media metadata type. */ export enum MediaPlayerMetadataType { /** - * 0: The type is unknown. + * 0: Unknown type. */ PlayerMetadataTypeUnknown = 0, /** - * 1: The type is SEI. + * 1: SEI (Supplemental Enhancement Information) type. */ PlayerMetadataTypeSei = 1, } /** - * Statistics about the media files being cached. + * Statistics of cached files. */ export class CacheStatistics { /** - * The size (bytes) of the media file being played. + * Size of the media file played this time, in bytes. */ fileSize?: number; /** - * The size (bytes) of the media file that you want to cache. + * Size of the cached data of the media file played this time, in bytes. */ cacheSize?: number; /** - * The size (bytes) of the media file that has been downloaded. + * Size of the downloaded media file played this time, in bytes. */ downloadSize?: number; } /** - * The information of the media file being played. + * Information about the currently playing media resource. */ export class PlayerPlaybackStats { /** - * The frame rate (fps) of the video. + * Video frame rate, in fps. */ videoFps?: number; /** - * The bitrate (kbps) of the video. + * Video bitrate, in kbps. */ videoBitrateInKbps?: number; /** - * The bitrate (kbps) of the audio. + * Audio bitrate, in kbps. */ audioBitrateInKbps?: number; /** - * The total bitrate (kbps) of the media stream. + * Total bitrate of the media stream, in kbps. */ totalBitrateInKbps?: number; } @@ -401,15 +401,15 @@ export class PlayerUpdatedInfo { */ internalPlayerUuid?: string; /** - * The ID of a deivce. + * Device ID that identifies a device. */ deviceId?: string; /** - * Height (pixel) of the video. + * Video height (pixels). */ videoHeight?: number; /** - * Width (pixel) of the video. + * Video width (pixels). */ videoWidth?: number; /** @@ -417,52 +417,51 @@ export class PlayerUpdatedInfo { */ audioSampleRate?: number; /** - * The number of audio channels. + * Number of audio channels. */ audioChannels?: number; /** - * The number of bits per audio sample point. + * Number of bits per audio sample (bit). */ audioBitsPerSample?: number; } /** - * Information related to the media file to be played and the playback scenario configurations. + * Information and playback settings for the media file to be played. */ export class MediaSource { /** - * The URL of the media file to be played. + * URL of the media resource to be played. */ url?: string; /** - * The URI (Uniform Resource Identifier) of the media file. + * URI (Uniform Resource Identifier) of the media file, used to identify the media file. */ uri?: string; /** - * The starting position (ms) for playback. The default value is 0. + * Start playback position in milliseconds. Default is 0. */ startPos?: number; /** - * Whether to enable autoplay once the media file is opened: true : (Default) Yes. false : No. If autoplay is disabled, you need to call the play method to play a media file after it is opened. + * If you disable auto-play, call the play method after opening the media file to start playback. Whether to enable auto-play after opening the media file: true : (Default) Enable auto-play. false : Disable auto-play. */ autoPlay?: boolean; /** - * Whether to cache the media file when it is being played: true : Enables caching. false : (Default) Disables caching. - * Agora only supports caching on-demand audio and video streams that are not transmitted in HLS protocol. - * If you need to enable caching, pass in a value to uri; otherwise, caching is based on the url of the media file. - * If you enable this function, the Media Player caches part of the media file being played on your local device, and you can play the cached media file without internet connection. The statistics about the media file being cached are updated every second after the media file is played. See CacheStatistics. + * The SDK currently only supports caching for on-demand streams, not for on-demand streams transmitted via HLS. + * Set a value for uri before enabling caching; otherwise, the player uses the media file's url as the cache index. + * When real-time caching is enabled, the player preloads part of the media file to local storage. When you play the file again, the player loads data from the cache to save bandwidth. The statistics of the cached media file update every second after playback starts. See CacheStatistics. Whether to enable real-time caching for this playback: true : Enable real-time caching. false : (Default) Disable real-time caching. */ enableCache?: boolean; /** - * Whether to allow the selection of different audio tracks when playing this media file: true : Allow to select different audio tracks. false : (Default) Do not allow to select different audio tracks. If you need to set different audio tracks for local playback and publishing to the channel, you need to set this parameter to true, and then call the selectMultiAudioTrack method to select the audio track. + * Whether to allow selecting different audio tracks for this playback: true : Allow selecting different audio tracks. false : (Default) Do not allow selecting different audio tracks. If you need to set different audio tracks for local playback and publishing to remote, set this parameter to true and then call the selectMultiAudioTrack method to set the audio track. */ enableMultiAudioTrack?: boolean; /** - * Whether the media resource to be opened is a live stream or on-demand video distributed through Media Broadcast service: true : The media resource to be played is a live or on-demand video distributed through Media Broadcast service. false : (Default) The media resource is not a live stream or on-demand video distributed through Media Broadcast service. If you need to open a live stream or on-demand video distributed through Broadcast Streaming service, pass in the URL of the media resource to url, and set isAgoraSource as true; otherwise, you don't need to set the isAgoraSource parameter. + * If the media resource you want to open is a live or on-demand stream distributed via Agora's CDN, pass the stream URL to url and set isAgoraSource to true. Otherwise, you do not need to set isAgoraSource. Whether the opened media resource is a live or on-demand stream distributed via Agora's CDN: true : The media resource is distributed via Agora's CDN. false : (Default) The media resource is not distributed via Agora's CDN. */ isAgoraSource?: boolean; /** - * Whether the media resource to be opened is a live stream: true : The media resource is a live stream. false : (Default) The media resource is not a live stream. If the media resource you want to open is a live stream, Agora recommends that you set this parameter as true so that the live stream can be loaded more quickly. If the media resource you open is not a live stream, but you set isLiveSource as true, the media resource is not to be loaded more quickly. + * Only when the media resource is a live stream, setting isLiveSource to true can speed up the opening of the media resource. Whether the opened media resource is a live stream: true : Live stream. false : (Default) Not a live stream. If the media resource is a live stream, it is recommended to set this parameter to true to speed up the opening of the live stream. */ isLiveSource?: boolean; } diff --git a/src/AgoraRtcRenderView.tsx b/src/AgoraRtcRenderView.tsx index 347f3516..b121e23c 100644 --- a/src/AgoraRtcRenderView.tsx +++ b/src/AgoraRtcRenderView.tsx @@ -7,43 +7,43 @@ import AgoraRtcSurfaceViewNativeComponent from './specs/AgoraRtcSurfaceViewNativ import AgoraRtcTextureViewNativeComponent from './specs/AgoraRtcTextureViewNativeComponent'; /** - * A common property for RtcSurfaceView and RtcTextureView. + * Common properties of RtcSurfaceView and RtcTextureView. */ export interface RtcRendererViewProps extends ViewProps { /** - * The local video view and settings. See VideoCanvas. + * Local video display properties. See VideoCanvas. */ canvas: VideoCanvas; /** - * The connection infomation. See RtcConnection. + * Connection information. See RtcConnection. */ connection?: RtcConnection; } /** - * Properties of the RtcSurfaceView. + * Properties of RtcSurfaceView. */ export interface RtcSurfaceViewProps extends RtcRendererViewProps { /** - * Controls whether to place the surface of the RtcSurfaceView on top of the window: true : Place it on top of the window. false : Do not place it on top of another RtcSurfaceView in the window. + * Whether to place the surface layer of the RtcSurfaceView above the window: true : Place above the window. false : Do not place above the window. */ zOrderOnTop?: boolean; /** - * Controls whether to place the surface of the RtcSurfaceView on top of another RtcSurfaceView in the window (but still behind the window): true : Place it on top of another RtcSurfaceView in the window. false : Do not place it on top of another RtcSurfaceView in the window. + * Whether to place the surface layer of the RtcSurfaceView above another RtcSurfaceView in the window (but still below the window): true : Place above another RtcSurfaceView in the window. false : Do not place above the window. */ zOrderMediaOverlay?: boolean; } /** - * The RtcSurfaceView class. + * RtcSurfaceView class. * - * This class is used for rendering. - * Android: This class corresponds to the native SurfaceView. - * iOS: This class corresponds to the native UIView. To ensure the rendering of the image, before calling this component, you should proceed based on whether you are joining a channel: - * Not joining a channel: First call startPreview, and then call enableVideo. - * Joining a channel: First ensure capture is enabled, and then call enableVideo. + * This class is used for rendering: + * Android: Corresponds to the native SurfaceView of the Android system. + * iOS: Corresponds to the native UIView of the iOS system. To ensure rendering works, perform the following operations before calling this component depending on whether you have joined a channel: + * If not joined: Call startPreview first, then call enableVideo. + * If joined: Start capture first, then call enableVideo. Related references: RtcSurfaceViewProps RtcRendererViewProps */ export class RtcSurfaceView extends IAgoraRtcRenderView { /** @@ -55,11 +55,12 @@ export class RtcSurfaceView extends IAgoraRtcRenderView { } /** - * The RtcTextureView class. + * RtcTextureView class. * - * This class is used for rendering and corresponds to the Android native TextureView. This class is only available for the Android platform. To ensure the rendering of the image, before calling this component, you should proceed based on whether you are joining a channel: - * Not joining a channel: First call startPreview, and then call enableVideo. - * Joining a channel: First ensure capture is enabled, and then call enableVideo. + * This class is used for rendering. Corresponds to the native TextureView of the Android system. + * To ensure rendering works, perform the following operations before calling this component depending on whether you have joined a channel: + * If not joined: Call startPreview first, then call enableVideo. + * If joined: Start capture first, then call enableVideo. Related reference: RtcRendererViewProps The RtcTextureView class is for Android only and not supported on iOS. */ export class RtcTextureView extends IAgoraRtcRenderView { /** diff --git a/src/IAgoraLog.ts b/src/IAgoraLog.ts index daeef436..b82ebd57 100644 --- a/src/IAgoraLog.ts +++ b/src/IAgoraLog.ts @@ -1,27 +1,27 @@ import './extension/IAgoraLogExtension'; /** - * The output log level of the SDK. + * Log output level. */ export enum LogLevel { /** - * 0: Do not output any log information. + * 0: No log output. */ LogLevelNone = 0x0000, /** - * 0x0001: (Default) Output FATAL, ERROR, WARN, and INFO level log information. We recommend setting your log filter to this level. + * 0x0001: (Default) Outputs logs at FATAL, ERROR, WARN, and INFO levels. It is recommended to set the log level to this. */ LogLevelInfo = 0x0001, /** - * 0x0002: Output FATAL, ERROR, and WARN level log information. + * 0x0002: Outputs logs at FATAL, ERROR, and WARN levels only. */ LogLevelWarn = 0x0002, /** - * 0x0004: Output FATAL and ERROR level log information. + * 0x0004: Outputs logs at FATAL and ERROR levels only. */ LogLevelError = 0x0004, /** - * 0x0008: Output FATAL level log information. + * 0x0008: Outputs logs at FATAL level only. */ LogLevelFatal = 0x0008, /** @@ -35,31 +35,31 @@ export enum LogLevel { } /** - * The output log level of the SDK. + * Log filter level. */ export enum LogFilterType { /** - * 0: Do not output any log information. + * 0: No log output. */ LogFilterOff = 0, /** - * 0x080f: Output all log information. Set your log filter to this level if you want to get the most complete log file. + * 0x080f: Outputs all API log information. Set the log level to this if you want the most complete logs. */ LogFilterDebug = 0x080f, /** - * 0x000f: Output LogFilterCritical, LogFilterError, LogFilterWarn, and LogFilterInfo level log information. We recommend setting your log filter to this level. + * 0x000f: Outputs logs at LogFilterCritical, LogFilterError, LogFilterWarn, and LogFilterInfo levels. It is recommended to set the log level to this. */ LogFilterInfo = 0x000f, /** - * 0x000e: Output LogFilterCritical, LogFilterError, and LogFilterWarn level log information. + * 0x000e: Outputs logs at LogFilterCritical, LogFilterError, and LogFilterWarn levels. */ LogFilterWarn = 0x000e, /** - * 0x000c: Output LogFilterCritical and LogFilterError level log information. + * 0x000c: Outputs logs at LogFilterCritical and LogFilterError levels. */ LogFilterError = 0x000c, /** - * 0x0008: Output LogFilterCritical level log information. + * 0x0008: Outputs logs at LogFilterCritical level only. */ LogFilterCritical = 0x0008, /** @@ -69,21 +69,23 @@ export enum LogFilterType { } /** - * Configuration of Agora SDK log files. + * Configuration of the SDK log file. */ export class LogConfig { /** - * The complete path of the log files. Agora recommends using the default log directory. If you need to modify the default directory, ensure that the directory you specify exists and is writable. The default log directory is: - * Android: /storage/emulated/0/Android/data//files/agorasdk.log. - * iOS: App Sandbox/Library/caches/agorasdk.log. + * Full path of the log file. Agora recommends using the default log path. If you need to change the default path, make sure the specified path exists and is writable. + * Default paths: + * Android: /storage/emulated/0/Android/data//files/agorasdk.log + * iOS: App Sandbox/Library/caches/agorasdk.log */ filePath?: string; /** - * The size (KB) of an agorasdk.log file. The value range is [128,20480]. The default value is 2,048 KB. If you set fileSizeInKByte smaller than 128 KB, the SDK automatically adjusts it to 128 KB; if you set fileSizeInKByte greater than 20,480 KB, the SDK automatically adjusts it to 20,480 KB. + * Size of a single agorasdk.log log file in KB. The value range is [128, 20480], and the default is 2,048 KB. If you set fileSizeInKByte to less than 128 KB, the SDK automatically adjusts it to 128 KB; if you set it to more than 20,480 KB, the SDK adjusts it to 20,480 KB. */ fileSizeInKB?: number; /** - * The output level of the SDK log file. See LogLevel. For example, if you set the log level to WARN, the SDK outputs the logs within levels FATAL, ERROR, and WARN. + * Log output level of the SDK. See LogLevel. + * For example, if you choose the WARN level, you will see all logs at FATAL, ERROR, and WARN levels. */ level?: LogLevel; } diff --git a/src/IAgoraMediaEngine.ts b/src/IAgoraMediaEngine.ts index a6dd410b..3032d91b 100644 --- a/src/IAgoraMediaEngine.ts +++ b/src/IAgoraMediaEngine.ts @@ -16,7 +16,7 @@ import { } from './AgoraMediaBase'; /** - * The channel mode. + * Channel mode. */ export enum AudioMixingDualMonoMode { /** @@ -24,117 +24,122 @@ export enum AudioMixingDualMonoMode { */ AudioMixingDualMonoAuto = 0, /** - * 1: Left channel mode. This mode replaces the audio of the right channel with the audio of the left channel, which means the user can only hear the audio of the left channel. + * 1: Left channel mode. This mode replaces the right channel audio with the left channel audio, so the user only hears the left channel audio. */ AudioMixingDualMonoL = 1, /** - * 2: Right channel mode. This mode replaces the audio of the left channel with the audio of the right channel, which means the user can only hear the audio of the right channel. + * 2: Right channel mode. This mode replaces the left channel audio with the right channel audio, so the user only hears the right channel audio. */ AudioMixingDualMonoR = 2, /** - * 3: Mixed channel mode. This mode mixes the audio of the left channel and the right channel, which means the user can hear the audio of the left channel and the right channel at the same time. + * 3: Mixed mode. This mode overlays the left and right channel data, so the user hears both left and right channel audio simultaneously. */ AudioMixingDualMonoMix = 3, } /** - * The IMediaEngine class. + * IMediaEngine class. */ export abstract class IMediaEngine { /** - * Registers an audio frame observer object. + * Registers an audio frame observer. * - * Call this method to register an audio frame observer object (register a callback). When you need the SDK to trigger the onMixedAudioFrame, onRecordAudioFrame, onPlaybackAudioFrame, onPlaybackAudioFrameBeforeMixing or onEarMonitoringAudioFrame callback, you need to use this method to register the callbacks. + * This method registers an audio frame observer, i.e., registers callbacks. You need to call this method to register callbacks when the SDK needs to trigger onMixedAudioFrame, onRecordAudioFrame, onPlaybackAudioFrame, onPlaybackAudioFrameBeforeMixing, and onEarMonitoringAudioFrame. * - * @param observer The observer instance. See IAudioFrameObserver. Agora recommends calling this method after receiving onLeaveChannel to release the audio observer object. + * @param observer Instance of the interface object. See IAudioFrameObserver. It is recommended to call this after receiving onLeaveChannel to release the audio frame observer. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract registerAudioFrameObserver(observer: IAudioFrameObserver): number; /** - * Registers a raw video frame observer object. + * Registers a raw video frame observer. * - * If you want to observe raw video frames (such as YUV or RGBA format), Agora recommends that you implement one IVideoFrameObserver class with this method. When calling this method to register a video observer, you can register callbacks in the IVideoFrameObserver class as needed. After you successfully register the video frame observer, the SDK triggers the registered callbacks each time a video frame is received. + * If you want to observe raw video frames (such as YUV or RGBA format), Agora recommends registering an IVideoFrameObserver class using this method. + * When registering the video observer via this method, you can choose to register callbacks from the IVideoFrameObserver class as needed. After successful registration, the SDK triggers the registered callbacks when each video frame is captured. When handling the callback, you need to consider changes in the width and height parameters of the video frame, as the observed video frames may vary in the following scenarios: + * Resolution may decrease stepwise when network conditions are poor. + * When the user manually adjusts the resolution, the resolution reported in the callback also changes. * - * @param observer The observer instance. See IVideoFrameObserver. + * @param observer Instance of the interface object. See IVideoFrameObserver. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract registerVideoFrameObserver(observer: IVideoFrameObserver): number; /** - * Registers a receiver object for the encoded video image. + * Registers a video frame observer for encoded video images. * - * If you only want to observe encoded video frames (such as H.264 format) without decoding and rendering the video, Agora recommends that you implement one IVideoEncodedFrameObserver class through this method. Call this method before joining a channel. + * If you only want to observe encoded video frames (e.g., H.264 format) without decoding and rendering them, Agora recommends registering an IVideoEncodedFrameObserver class using this method. This method must be called before joining a channel. * - * @param observer The video frame observer object. See IVideoEncodedFrameObserver. + * @param observer Video frame observer. See IVideoEncodedFrameObserver. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract registerVideoEncodedFrameObserver( observer: IVideoEncodedFrameObserver ): number; /** - * Registers a facial information observer. + * Registers a face information observer. * - * You can call this method to register the onFaceInfo callback to receive the facial information processed by Agora speech driven extension. When calling this method to register a facial information observer, you can register callbacks in the IFaceInfoObserver class as needed. After successfully registering the facial information observer, the SDK triggers the callback you have registered when it captures the facial information converted by the speech driven extension. - * Call this method before joining a channel. - * Before calling this method, you need to make sure that the speech driven extension has been enabled by calling enableExtension. + * You can call this method to register the onFaceInfo callback and receive face information processed by the Agora voice driver extension. When registering the face info observer, you can register callbacks in the IFaceInfoObserver class as needed. After successful registration, the SDK triggers the registered callback when face information processed by the voice driver extension is captured. + * This method must be called before joining a channel. + * Before calling this method, make sure you have called enableExtension to enable the voice driver extension. * - * @param observer Facial information observer, see IFaceInfoObserver. + * @param observer Face information observer. See IFaceInfoObserver. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract registerFaceInfoObserver(observer: IFaceInfoObserver): number; /** - * Pushes the external audio frame. + * Pushes external audio frames. * - * Call this method to push external audio frames through the audio track. + * Call this method to push external audio frames through an audio track. * - * @param frame The external audio frame. See AudioFrame. - * @param trackId The audio track ID. If you want to publish a custom external audio source, set this parameter to the ID of the corresponding custom audio track you want to publish. + * @param frame External audio frame. See AudioFrame. + * @param trackId Audio track ID. If you want to publish a custom external audio source, set this parameter to the custom audio track ID you want to publish. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract pushAudioFrame(frame: AudioFrame, trackId?: number): number; /** - * Pulls the remote audio data. + * Pulls remote audio data. * - * After a successful call of this method, the app pulls the decoded and mixed audio data for playback. + * After calling this method, the app actively pulls the decoded and mixed remote audio data for playback. This method and the onPlaybackAudioFrame callback can both be used to obtain remote mixed audio playback data. After enabling external audio rendering by calling setExternalAudioSink, the app will no longer receive data from the onPlaybackAudioFrame callback. Therefore, choose between this method and the onPlaybackAudioFrame callback based on your actual business needs. The two have different processing mechanisms, detailed as follows: + * After calling this method, the app actively pulls audio data. By setting the audio data, the SDK can adjust the buffer to help the app handle latency, effectively avoiding audio playback jitter. + * After registering the onPlaybackAudioFrame callback, the SDK pushes audio data to the app through the callback. When the app handles audio frame latency, it may cause audio playback jitter. This method is only used to pull remote mixed audio playback data. To obtain raw captured audio data or raw audio playback data of each individual stream before mixing, you can register the corresponding callback by calling registerAudioFrameObserver. * * @returns - * The AudioFrame instance, if the method call succeeds. - * An error code, if the call fails,. + * On success, returns an AudioFrame object. + * On failure, returns an error code. */ abstract pullAudioFrame(frame: AudioFrame): number; /** - * Configures the external video source. + * Sets the external video source. * - * After calling this method to enable an external video source, you can call pushVideoFrame to push external video data to the SDK. + * After enabling an external video source by calling this method, you can call pushVideoFrame to push external video data to the SDK. Switching video sources dynamically within the channel is not supported. If you have enabled an external video source and joined a channel, to switch to an internal video source, you must first leave the channel, then call this method to disable the external video source, and rejoin the channel. * - * @param enabled Whether to use the external video source: true : Use the external video source. The SDK prepares to accept the external video frame. false : (Default) Do not use the external video source. - * @param useTexture Whether to use the external video frame in the Texture format. true : Use the external video frame in the Texture format. false : (Default) Do not use the external video frame in the Texture format. + * @param enabled Whether to enable the external video source: true : Enable the external video source. The SDK prepares to receive external video frames. false : (default) Do not enable the external video source. + * @param useTexture Whether to use external video frames in Texture format: true : Use external video frames in Texture format. false : Do not use external video frames in Texture format. * @param sourceType Whether the external video frame is encoded. See ExternalVideoSourceType. - * @param encodedVideoOption Video encoding options. This parameter needs to be set if sourceType is EncodedVideoFrame. To set this parameter, contact. + * @param encodedVideoOption Video encoding options. If sourceType is EncodedVideoFrame, this parameter must be set. You can [contact technical support](https://ticket.shengwang.cn/) to learn how to set this parameter. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setExternalVideoSource( enabled: boolean, @@ -149,19 +154,19 @@ export abstract class IMediaEngine { abstract setExternalRemoteEglContext(eglContext: any): number; /** - * Sets the external audio source parameters. + * Sets external audio capture parameters. * - * Deprecated: This method is deprecated, use createCustomAudioTrack instead. + * Deprecated Deprecated: This method is obsolete. Use createCustomAudioTrack instead. * - * @param enabled Whether to enable the external audio source: true : Enable the external audio source. false : (Default) Disable the external audio source. - * @param sampleRate The sample rate (Hz) of the external audio source which can be set as 8000, 16000, 32000, 44100, or 48000. - * @param channels The number of channels of the external audio source, which can be set as 1 (Mono) or 2 (Stereo). - * @param localPlayback Whether to play the external audio source: true : Play the external audio source. false : (Default) Do not play the external source. - * @param publish Whether to publish audio to the remote users: true : (Default) Publish audio to the remote users. false : Do not publish audio to the remote users. + * @param enabled Whether to enable the use of external audio sources: true : Enable external audio source. false : (Default) Disable external audio source. + * @param sampleRate Sampling rate (Hz) of the external audio source. Can be set to 8000, 16000, 32000, 44100, or 48000. + * @param channels Number of channels of the external audio source. Can be set to 1 (mono) or 2 (stereo). + * @param localPlayback Whether to play the external audio source locally: true : Play locally. false : (Default) Do not play locally. + * @param publish Whether to publish the audio to the remote end: true : (Default) Publish to remote. false : Do not publish to remote. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setExternalAudioSource( enabled: boolean, @@ -172,19 +177,19 @@ export abstract class IMediaEngine { ): number; /** - * Creates a custom audio track. + * Creates a custom audio capture track. * - * Call this method before joining a channel. To publish a custom audio source, see the following steps: - * Call this method to create a custom audio track and get the audio track ID. - * Call joinChannel to join the channel. In ChannelMediaOptions, set publishCustomAudioTrackId to the audio track ID that you want to publish, and set publishCustomAudioTrack to true. - * Call pushAudioFrame and specify trackId as the audio track ID set in step 2. You can then publish the corresponding custom audio source in the channel. + * To publish custom captured audio in a channel, refer to the following steps: + * Call this method to create an audio track and obtain the audio track ID. + * When calling joinChannel to join a channel, set publishCustomAudioTrackId in ChannelMediaOptions to the audio track ID you want to publish, and set publishCustomAudioTrack to true. + * Call pushAudioFrame and specify the trackId as the audio track ID specified in step 2 to publish the corresponding custom audio source in the channel. You need to call this method before joining a channel. * - * @param trackType The type of the custom audio track. See AudioTrackType. If AudioTrackDirect is specified for this parameter, you must set publishMicrophoneTrack to false in ChannelMediaOptions when calling joinChannel to join the channel; otherwise, joining the channel fails and returns the error code -2. - * @param config The configuration of the custom audio track. See AudioTrackConfig. + * @param trackType Custom audio track type. See AudioTrackType. If AudioTrackDirect is specified, you must set publishMicrophoneTrack to false in ChannelMediaOptions when calling joinChannel, otherwise joining the channel will fail and return error code -2. + * @param config Custom audio track configuration. See AudioTrackConfig. * * @returns - * If the method call is successful, the audio track ID is returned as the unique identifier of the audio track. - * If the method call fails, 0xffffffff is returned. + * On success, returns the audio track ID as the unique identifier of the audio track. + * On failure, returns 0xffffffff. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract createCustomAudioTrack( trackType: AudioTrackType, @@ -194,28 +199,28 @@ export abstract class IMediaEngine { /** * Destroys the specified audio track. * - * @param trackId The custom audio track ID returned in createCustomAudioTrack. + * @param trackId Custom audio track ID returned by the createCustomAudioTrack method. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract destroyCustomAudioTrack(trackId: number): number; /** - * Sets the external audio sink. + * Sets the external audio rendering. * - * After enabling the external audio sink, you can call pullAudioFrame to pull remote audio frames. The app can process the remote audio and play it with the audio effects that you want. + * After calling this method to enable external audio rendering, you can call pullAudioFrame to pull remote audio data. The app can process the pulled raw audio data before rendering to achieve the desired audio effect. After calling this method to enable external audio rendering, the app will no longer receive data from the onPlaybackAudioFrame callback. * - * @param enabled Whether to enable or disable the external audio sink: true : Enables the external audio sink. false : (Default) Disables the external audio sink. - * @param sampleRate The sample rate (Hz) of the external audio sink, which can be set as 16000, 32000, 44100, or 48000. - * @param channels The number of audio channels of the external audio sink: - * 1: Mono. - * 2: Stereo. + * @param enabled Sets whether to enable external audio rendering: true : Enable external audio rendering. false : (Default) Disable external audio rendering. + * @param sampleRate The sample rate (Hz) for external audio rendering. Can be set to 16000, 32000, 44100, or 48000. + * @param channels The number of channels for external audio rendering: + * 1: Mono + * 2: Stereo * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setExternalAudioSink( enabled: boolean, @@ -232,21 +237,21 @@ export abstract class IMediaEngine { ): number; /** - * Pushes the external raw video frame to the SDK through video tracks. + * Publishes an external raw video frame to the channel through a custom video track. * - * To publish a custom video source, see the following steps: - * Call createCustomVideoTrack to create a video track and get the video track ID. If you only need to push one custom video source to the channel, you can directly call the setExternalVideoSource method and the SDK will automatically create a video track with the videoTrackId set to 0. - * Call joinChannel to join the channel. In ChannelMediaOptions, set customVideoTrackId to the video track ID that you want to publish, and set publishCustomVideoTrack to true. - * Call this method and specify videoTrackId as the video track ID set in step 2. You can then publish the corresponding custom video source in the channel. After calling this method, even if you stop pushing external video frames to the SDK, the custom video stream will still be counted as the video duration usage and incur charges. Agora recommends that you take appropriate measures based on the actual situation to avoid such video billing. - * If you no longer need to capture external video data, you can call destroyCustomVideoTrack to destroy the custom video track. - * If you only want to use the external video data for local preview and not publish it in the channel, you can call muteLocalVideoStream to cancel sending video stream or call updateChannelMediaOptions to set publishCustomVideoTrack to false. + * When you need to publish a custom captured video in the channel, refer to the following steps: + * Call the createCustomVideoTrack method to create a video track and obtain the video track ID. + * When calling joinChannel to join the channel, set customVideoTrackId in ChannelMediaOptions to the video track ID you want to publish, and set publishCustomVideoTrack to true. + * Call this method and specify videoTrackId as the video track ID specified in step 2 to publish the corresponding custom video source in the channel. After calling this method, even if you stop pushing external video frames to the SDK, the custom captured video stream will still be counted in video duration usage and incur charges. Agora recommends taking appropriate measures based on your actual situation to avoid such charges: + * If you no longer need to capture external video data, call destroyCustomVideoTrack to destroy the custom captured video track. + * If you only want to use the captured external video data for local preview and not publish it in the channel, call muteLocalVideoStream to stop sending the video stream, or call updateChannelMediaOptions and set publishCustomVideoTrack to false. * - * @param frame The external raw video frame to be pushed. See ExternalVideoFrame. - * @param videoTrackId The video track ID returned by calling the createCustomVideoTrack method. If you only need to push one custom video source, set videoTrackId to 0. + * @param frame The video frame to be pushed. See ExternalVideoFrame. + * @param videoTrackId The video track ID returned by the createCustomVideoTrack method. If you only need to push one external video stream, set videoTrackId to 0. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract pushVideoFrame( frame: ExternalVideoFrame, @@ -269,48 +274,48 @@ export abstract class IMediaEngine { abstract release(): void; /** - * Unregisters an audio frame observer. + * Unregisters the audio frame observer. * - * @param observer The audio frame observer, reporting the reception of each audio frame. See IAudioFrameObserver. + * @param observer The audio frame observer that monitors the reception of each audio frame. See IAudioFrameObserver. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and troubleshooting. */ abstract unregisterAudioFrameObserver(observer: IAudioFrameObserver): number; /** * Unregisters the video frame observer. * - * @param observer The video observer, reporting the reception of each video frame. See IVideoFrameObserver. + * @param observer Video frame observer that observes each received video frame. See IVideoFrameObserver. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract unregisterVideoFrameObserver(observer: IVideoFrameObserver): number; /** - * Unregisters a receiver object for the encoded video frame. + * Unregisters the video frame observer for encoded video images. * - * @param observer The video observer, reporting the reception of each video frame. See IVideoEncodedFrameObserver. + * @param observer Video frame observer that observes each received video frame. See IVideoEncodedFrameObserver. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract unregisterVideoEncodedFrameObserver( observer: IVideoEncodedFrameObserver ): number; /** - * Unregisters a facial information observer. + * Unregisters the face information observer. * - * @param observer Facial information observer, see IFaceInfoObserver. + * @param observer Face information observer. See IFaceInfoObserver. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract unregisterFaceInfoObserver(observer: IFaceInfoObserver): number; } diff --git a/src/IAgoraMediaPlayer.ts b/src/IAgoraMediaPlayer.ts index fa025315..497e5dda 100644 --- a/src/IAgoraMediaPlayer.ts +++ b/src/IAgoraMediaPlayer.ts @@ -16,40 +16,40 @@ import { import { IMediaPlayerSourceObserver } from './IAgoraMediaPlayerSource'; /** - * This class provides media player functions and supports multiple instances. + * Class that provides media player functionality and supports multiple instances. */ export abstract class IMediaPlayer { /** - * Gets the ID of the media player. + * Gets the media player ID. * * @returns - * Success. The ID of the media player. - * < 0: Failure. + * On success, returns the media player ID. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract getMediaPlayerId(): number; /** - * Opens the media resource. + * Opens a media resource. * - * @param url The path of the media file. Both local path and online path are supported. - * @param startPos The starting position (ms) for playback. Default value is 0. + * @param url The path to the media file. Supports both local and online files. + * @param startPos The starting playback position in milliseconds. Default is 0. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract open(url: string, startPos: number): number; /** - * Opens a media file and configures the playback scenarios. + * Opens a media resource and configures playback settings. * - * This method supports opening media files of different sources, including a custom media source, and allows you to configure the playback scenarios. + * This method allows you to open different types of media resources, including custom media files, and configure playback settings. This method is asynchronous. To play the media file, call the play method after receiving the onPlayerSourceStateChanged callback with the state PlayerStateOpenCompleted. * - * @param source Media resources. See MediaSource. + * @param source The media resource. See MediaSource. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract openWithMediaSource(source: MediaSource): number; @@ -58,155 +58,158 @@ export abstract class IMediaPlayer { * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract play(): number; /** - * Pauses the playback. + * Pauses playback. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract pause(): number; /** - * Stops playing the media track. + * Stops playback. * - * After calling this method to stop playback, if you want to play again, you need to call open or openWithMediaSource to open the media resource. + * After calling this method to stop playback, you need to call open or openWithMediaSource again to reopen the media resource if you want to play it again. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract stop(): number; /** - * Resumes playing the media file. + * Resumes playback after pause. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract resume(): number; /** - * Seeks to a new playback position. + * Seeks to the specified playback position in the media file. * - * If you call seek after the playback has completed (upon receiving callback onPlayerSourceStateChanged reporting playback status as PlayerStatePlaybackCompleted or PlayerStatePlaybackAllLoopsCompleted), the SDK will play the media file from the specified position. At this point, you will receive callback onPlayerSourceStateChanged reporting playback status as PlayerStatePlaying. - * If you call seek while the playback is paused, upon successful call of this method, the SDK will seek to the specified position. To resume playback, call resume or play . + * If you call seek after playback has completed (as indicated by the onPlayerSourceStateChanged callback reporting the playback state as PlayerStatePlaybackCompleted or PlayerStatePlaybackAllLoopsCompleted), the SDK automatically starts playback from the specified position upon a successful call. You will receive an onPlayerSourceStateChanged callback reporting the playback state as PlayerStatePlaying. + * If you call seek while playback is paused, the SDK seeks to the specified position upon success. To resume playback, call resume or play. * - * @param newPos The new playback position (ms). + * @param newPos The target position in milliseconds. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract seek(newPos: number): number; /** - * Sets the pitch of the current media resource. + * Adjusts the pitch of the currently playing media resource. * - * Call this method after calling open. + * You need to call this method after calling open. * - * @param pitch Sets the pitch of the local music file by the chromatic scale. The default value is 0, which means keeping the original pitch. The value ranges from -12 to 12, and the pitch value between consecutive values is a chromatic value. The greater the absolute value of this parameter, the higher or lower the pitch of the local music file. + * @param pitch Adjusts the pitch of the local music file in semitone steps. The default value is 0, meaning no pitch adjustment. The value range is [-12, 12], where each adjacent value represents a semitone difference. The greater the absolute value, the more the pitch is raised or lowered. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Code](https://doc.shengwang.cn/api-ref/rtc/rn/error-code) for details and resolution suggestions. */ abstract setAudioPitch(pitch: number): number; /** - * Gets the duration of the media resource. + * Gets the total duration of the media file. * * @returns - * The total duration (ms) of the media file. + * The total duration of the media file in milliseconds. */ abstract getDuration(): number; /** - * Gets current local playback progress. + * Gets the current playback position. * * @returns - * Returns the current playback progress (ms) if the call succeeds. + * On success, returns the current playback position in milliseconds. * < 0: Failure. See MediaPlayerReason. */ abstract getPlayPosition(): number; /** - * Gets the number of the media streams in the media resource. + * Gets the number of media streams in the current media file. * - * Call this method after you call open and receive the onPlayerSourceStateChanged callback reporting the state PlayerStateOpenCompleted. + * Call this method after calling open and receiving the onPlayerSourceStateChanged callback reporting the playback state as PlayerStateOpenCompleted. * * @returns - * The number of the media streams in the media resource if the method call succeeds. + * On success, returns the number of media streams in the media file. * < 0: Failure. See MediaPlayerReason. */ abstract getStreamCount(): number; /** - * Gets the detailed information of the media stream. + * Retrieves media stream information by stream index. * - * @param index The index of the media stream. This parameter must be less than the return value of getStreamCount. + * @param index The media stream index. The value must be less than the return value of getStreamCount. * * @returns - * If the call succeeds, returns the detailed information of the media stream. See PlayerStreamInfo. null is returned, if the method call fails. + * If the method call succeeds, returns the media stream information. See PlayerStreamInfo. + * If the method call fails, returns null. */ abstract getStreamInfo(index: number): PlayerStreamInfo; /** - * Sets the loop playback. + * Sets loop playback. * - * If you want to loop, call this method and set the number of the loops. When the loop finishes, the SDK triggers onPlayerSourceStateChanged and reports the playback state as PlayerStatePlaybackAllLoopsCompleted. + * If you want to enable loop playback, call this method and set the number of loops. + * When loop playback ends, the SDK triggers the onPlayerSourceStateChanged callback to report the playback state as PlayerStatePlaybackAllLoopsCompleted. * - * @param loopCount The number of times the audio effect loops: - * ≥0: Number of times for playing. For example, setting it to 0 means no loop playback, playing only once; setting it to 1 means loop playback once, playing a total of twice. - * -1: Play the audio file in an infinite loop. + * @param loopCount The number of times to loop playback. + * ≥0: Number of loops. For example, 0 means no looping and plays once; 1 means loops once and plays twice in total. + * -1: Loop playback indefinitely. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Code](https://doc.shengwang.cn/api-ref/rtc/rn/error-code) for details and resolution suggestions. */ abstract setLoopCount(loopCount: number): number; /** - * Sets the channel mode of the current audio file. + * Sets the playback speed of the current audio file. * - * Call this method after calling open. + * You need to call this method after open. * - * @param speed The playback speed. Agora recommends that you set this to a value between 30 and 400, defined as follows: - * 30: 0.3 times the original speed. - * 100: The original speed. - * 400: 4 times the original speed. + * @param speed Playback speed. The recommended range is [30, 400], where: + * 30: 0.3x speed. + * 100: Normal speed. + * 400: 4x speed. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Code](https://doc.shengwang.cn/api-ref/rtc/rn/error-code) for details and resolution suggestions. */ abstract setPlaybackSpeed(speed: number): number; /** - * Selects the audio track used during playback. + * Specifies the audio track to play for the current audio file. * - * After getting the track index of the audio file, you can call this method to specify any track to play. For example, if different tracks of a multi-track file store songs in different languages, you can call this method to set the playback language. You need to call this method after calling getStreamInfo to get the audio stream index value. + * After obtaining the audio track index of the audio file, you can call this method to specify any track for playback. For example, if different tracks in a multi-track file store songs in different languages, you can call this method to set the playback language. You need to call this method after calling getStreamInfo to obtain the audio stream index. * * @param index The index of the audio track. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Code](https://doc.shengwang.cn/api-ref/rtc/rn/error-code) for details and resolution suggestions. */ abstract selectAudioTrack(index: number): number; /** - * Selects the audio tracks that you want to play on your local device and publish to the channel respectively. + * Selects the audio tracks for local playback and remote transmission. * - * You can call this method to determine the audio track to be played on your local device and published to the channel. Before calling this method, you need to open the media file with the openWithMediaSource method and set enableMultiAudioTrack in MediaSource as true. + * You can call this method to separately set the audio tracks for local playback and remote transmission. + * Before calling this method, you must open the media file using openWithMediaSource and set enableMultiAudioTrack to true via MediaSource. * - * @param playoutTrackIndex The index of audio tracks for local playback. You can obtain the index through getStreamInfo. - * @param publishTrackIndex The index of audio tracks to be published in the channel. You can obtain the index through getStreamInfo. + * @param playoutTrackIndex The index of the audio track for local playback. You can obtain the index value using getStreamInfo. + * @param publishTrackIndex The index of the audio track to send to the remote end. You can obtain the index value using getStreamInfo. * * @returns * 0: Success. @@ -233,137 +236,137 @@ export abstract class IMediaPlayer { abstract setExternalSubtitle(url: string): number; /** - * Gets current playback state. + * Gets the current state of the media player. * * @returns - * The current playback state. See MediaPlayerState. + * The current state of the media player. See MediaPlayerState. */ abstract getState(): MediaPlayerState; /** - * Sets whether to mute the media file. + * Sets whether to mute. * - * @param muted Whether to mute the media file: true : Mute the media file. false : (Default) Unmute the media file. + * @param muted Mute option. true : Mute. false : (Default) Do not mute. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract mute(muted: boolean): number; /** - * Reports whether the media resource is muted. + * Checks whether the currently playing media file is muted. * * @returns - * true : Reports whether the media resource is muted. false : Reports whether the media resource is muted. + * true : The currently playing media file is muted. false : The currently playing media file is not muted. */ abstract getMute(): boolean; /** * Adjusts the local playback volume. * - * @param volume The local playback volume, which ranges from 0 to 100: + * @param volume Local playback volume. The range is from 0 to 100: * 0: Mute. - * 100: (Default) The original volume. + * 100: (Default) Original volume of the media file. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract adjustPlayoutVolume(volume: number): number; /** - * Gets the local playback volume. + * Gets the current local playback volume. * * @returns - * The local playback volume, which ranges from 0 to 100. + * Returns the current local playback volume, ranging from 0 to 100: * 0: Mute. - * 100: (Default) The original volume. + * 100: (Default) Original playback volume of the media file. */ abstract getPlayoutVolume(): number; /** - * Adjusts the volume of the media file for publishing. + * Adjusts the volume heard by remote users. * - * After connected to the Agora server, you can call this method to adjust the volume of the media file heard by the remote user. + * After connecting to the Agora server, you can call this method to adjust the volume of the media file heard by remote users. * - * @param volume The volume, which ranges from 0 to 400: + * @param volume Signal volume. The range is from 0 to 400: * 0: Mute. - * 100: (Default) The original volume. - * 400: Four times the original volume (amplifying the audio signals by four times). + * 100: (Default) Original volume of the media file. + * 400: Four times the original volume (with built-in overflow protection). * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract adjustPublishSignalVolume(volume: number): number; /** - * Gets the volume of the media file for publishing. + * Gets the volume heard by remote users. * * @returns - * ≥ 0: The remote playback volume. - * < 0: Failure. + * ≥ 0: Remote playback volume of the media file. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract getPublishSignalVolume(): number; /** - * Sets the view. + * Sets the rendering view for the player. * - * @param view The render view. + * @param view Rendering view. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setView(view: any): number; /** - * Sets the render mode of the media player. + * Sets the rendering mode of the player view. * - * @param renderMode Sets the render mode of the view. See RenderModeType. + * @param renderMode The rendering mode of the player view. See RenderModeType. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setRenderMode(renderMode: RenderModeType): number; /** - * Registers a media player observer. + * Registers a player source observer. * - * @param observer The player observer, listening for events during the playback. See IMediaPlayerSourceObserver. + * @param observer The player source observer that reports events during playback. See IMediaPlayerSourceObserver. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract registerPlayerSourceObserver( observer: IMediaPlayerSourceObserver ): number; /** - * Releases a media player observer. + * Unregisters the player source observer. * - * @param observer The player observer, listening for events during the playback. See IMediaPlayerSourceObserver. + * @param observer The player source observer that reports events during playback. See IMediaPlayerSourceObserver. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract unregisterPlayerSourceObserver( observer: IMediaPlayerSourceObserver ): number; /** - * Registers an audio frame observer object. + * Registers an audio frame observer. * - * @param observer The audio frame observer, reporting the reception of each audio frame. See IAudioPcmFrameSink. - * @param mode The use mode of the audio frame. See RawAudioFrameOpModeType. + * @param observer The audio frame observer that monitors the reception of each audio frame. See IAudioPcmFrameSink. + * @param mode The usage mode of the audio frame. See RawAudioFrameOpModeType. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract registerAudioFrameObserver( observer: IAudioPcmFrameSink, @@ -371,26 +374,26 @@ export abstract class IMediaPlayer { ): number; /** - * Unregisters an audio frame observer. + * Unregisters the audio frame observer. * - * @param observer The audio observer. See IAudioPcmFrameSink. + * @param observer The audio frame observer. See IAudioPcmFrameSink. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract unregisterAudioFrameObserver(observer: IAudioPcmFrameSink): number; /** - * Registers a video frame observer object. + * Registers a video frame observer. * - * You need to implement the IMediaPlayerVideoFrameObserver class in this method and register callbacks according to your scenarios. After you successfully register the video frame observer, the SDK triggers the registered callbacks each time a video frame is received. + * You need to implement an IMediaPlayerVideoFrameObserver class in this method and register the callbacks of this class as needed. After successfully registering the video frame observer, the SDK triggers the registered callback each time a video frame is captured. * - * @param observer The video observer, reporting the reception of each video frame. See IMediaPlayerVideoFrameObserver. + * @param observer The video frame observer that monitors the reception of each video frame. See IMediaPlayerVideoFrameObserver. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract registerVideoFrameObserver( observer: IMediaPlayerVideoFrameObserver @@ -399,11 +402,11 @@ export abstract class IMediaPlayer { /** * Unregisters the video frame observer. * - * @param observer The video observer, reporting the reception of each video frame. See IMediaPlayerVideoFrameObserver. + * @param observer The video frame observer that monitors the reception of each video frame. See IMediaPlayerVideoFrameObserver. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract unregisterVideoFrameObserver( observer: IMediaPlayerVideoFrameObserver @@ -425,17 +428,17 @@ export abstract class IMediaPlayer { ): number; /** - * Sets the channel mode of the current audio file. + * Sets the channel mode for the current audio file. * - * In a stereo music file, the left and right channels can store different audio data. According to your needs, you can set the channel mode to original mode, left channel mode, right channel mode, or mixed channel mode. For example, in the KTV scenario, the left channel of the music file stores the musical accompaniment, and the right channel stores the singing voice. If you only need to listen to the accompaniment, call this method to set the channel mode of the music file to left channel mode; if you need to listen to the accompaniment and the singing voice at the same time, call this method to set the channel mode to mixed channel mode. - * Call this method after calling open. - * This method only applies to stereo audio files. + * In stereo audio files, the left and right channels can store different audio data. Depending on your needs, you can set the channel mode to original, left channel, right channel, or mixed mode. For example, in a KTV scenario, the left channel may store the accompaniment and the right channel the original vocals. If you only want to hear the accompaniment, call this method to set the channel mode to left channel; if you want to hear both, set it to mixed mode. + * You need to call this method after calling open. + * This method is applicable only to stereo audio files. * - * @param mode The channel mode. See AudioDualMonoMode. + * @param mode Channel mode. See AudioDualMonoMode. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Code](https://doc.shengwang.cn/api-ref/rtc/rn/error-code) for details and resolution suggestions. */ abstract setAudioDualMonoMode(mode: AudioDualMonoMode): number; @@ -488,72 +491,74 @@ export abstract class IMediaPlayer { abstract switchAgoraCDNSrc(src: string, syncPts?: boolean): number; /** - * Switches the media resource being played. + * Switches the media resource. * - * You can call this method to switch the media resource to be played according to the current network status. For example: - * When the network is poor, the media resource to be played is switched to a media resource address with a lower bitrate. - * When the network is good, the media resource to be played is switched to a media resource address with a higher bitrate. After calling this method, if you receive the onPlayerEvent callback report the PlayerEventSwitchComplete event, the switching is successful. If the switching fails, the SDK will automatically retry 3 times. If it still fails, you will receive the onPlayerEvent callback reporting the PlayerEventSwitchError event indicating an error occurred during media resource switching. - * Ensure that you call this method after open. - * To ensure normal playback, pay attention to the following when calling this method: - * Do not call this method when playback is paused. - * Do not call the seek method during switching. - * Before switching the media resource, make sure that the playback position does not exceed the total duration of the media resource to be switched. + * You can call this method to switch the bitrate of the media resource being played based on the current network conditions. For example: + * When the network is poor, switch to a lower bitrate media resource. + * When the network is good, switch to a higher bitrate media resource. After calling this method, if you receive the onPlayerEvent callback with the event PlayerEventSwitchComplete, the switch is successful. If the switch fails, the SDK automatically retries 3 times. If it still fails, you will receive the onPlayerEvent callback with the event PlayerEventSwitchError, indicating an error occurred during the switch. + * Make sure to call this method after open. + * To ensure normal playback, note the following when calling this method: + * Do not call this method while playback is paused. + * Do not call seek during bitrate switching. + * Ensure the playback position before switching is not greater than the total duration of the target media resource. * - * @param src The URL of the media resource. - * @param syncPts Whether to synchronize the playback position (ms) before and after the switch: true : Synchronize the playback position before and after the switch. false : (Default) Do not synchronize the playback position before and after the switch. + * @param src The network path of the media resource. + * @param syncPts Whether to synchronize the starting playback position before and after the switch: true : Synchronize. false : (Default) Do not synchronize. */ abstract switchSrc(src: string, syncPts?: boolean): number; /** * Preloads a media resource. * - * You can call this method to preload a media resource into the playlist. If you need to preload multiple media resources, you can call this method multiple times. After calling this method, if you receive the PlayerPreloadEventComplete event in the onPreloadEvent callback, the preload is successful; If you receive the PlayerPreloadEventError event in the onPreloadEvent callback, the preload fails. If the preload is successful and you want to play the media resource, call playPreloadedSrc; if you want to clear the playlist, call stop. - * Before calling this method, ensure that you have called open or openWithMediaSource to open the media resource successfully. - * Agora does not support preloading duplicate media resources to the playlist. However, you can preload the media resources that are being played to the playlist again. + * You can call this method to preload a media resource into the playlist. To preload multiple media resources, call this method multiple times. + * After calling this method, if you receive the onPreloadEvent callback with PlayerPreloadEventComplete, the preload is successful. If you receive PlayerPreloadEventError, the preload has failed. + * After a successful preload, call playPreloadedSrc to play the media resource, or stop to clear the playlist. + * Before calling this method, make sure you have successfully opened the media resource using open or openWithMediaSource. + * The SDK does not support preloading duplicate media resources into the playlist, but it does support preloading a media resource that is currently playing. * - * @param src The URL of the media resource. - * @param startPos The starting position (ms) for playing after the media resource is preloaded to the playlist. When preloading a live stream, set this parameter to 0. + * @param src The network path of the media resource. + * @param startPos The starting position (in milliseconds) when playback begins after preloading into the playlist. Set this parameter to 0 when preloading a live stream. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract preloadSrc(src: string, startPos: number): number; /** - * Plays preloaded media resources. + * Plays a preloaded media resource. * - * After calling the preloadSrc method to preload the media resource into the playlist, you can call this method to play the preloaded media resource. After calling this method, if you receive the onPlayerSourceStateChanged callback which reports the PlayerStatePlaying state, the playback is successful. If you want to change the preloaded media resource to be played, you can call this method again and specify the URL of the new media resource that you want to preload. If you want to replay the media resource, you need to call preloadSrc to preload the media resource to the playlist again before playing. If you want to clear the playlist, call the stop method. If you call this method when playback is paused, this method does not take effect until playback is resumed. + * After calling the preloadSrc method to preload a media resource into the playlist, you can call this method to play the preloaded media resource. If you receive the onPlayerSourceStateChanged callback reporting PlayerStatePlaying, it indicates successful playback. + * If you want to switch to another preloaded media resource, you can call this method again with a new media resource path. If you want to replay a media resource, you need to call preloadSrc again to preload it into the playlist before playback. To clear the playlist, call stop. If you call this method while playback is paused, it will take effect only after playback resumes. * - * @param src The URL of the media resource in the playlist must be consistent with the src set by the preloadSrc method; otherwise, the media resource cannot be played. + * @param src The URL of the media resource in the playlist. It must match the src set by the preloadSrc method, otherwise playback will fail. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract playPreloadedSrc(src: string): number; /** - * Unloads media resources that are preloaded. - * - * This method cannot release the media resource being played. + * Releases preloaded media resources. * - * @param src The URL of the media resource. + * @param src The network path of the media resource. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Code](https://doc.shengwang.cn/api-ref/rtc/rn/error-code) for details and resolution suggestions. */ abstract unloadSrc(src: string): number; /** - * Enables or disables the spatial audio effect for the media player. + * Enables or disables spatial audio for the media player. * - * After successfully setting the spatial audio effect parameters of the media player, the SDK enables the spatial audio effect for the media player, and the local user can hear the media resources with a sense of space. If you need to disable the spatial audio effect for the media player, set the params parameter to null. + * After successfully setting the spatial audio parameters for the media player, the SDK enables spatial audio for the media player, allowing the local user to hear spatial audio effects from the media resource. + * To disable spatial audio for the media player, set the params parameter to null. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setSpatialAudioParams(params: SpatialAudioParams): number; @@ -570,40 +575,42 @@ export abstract class IMediaPlayer { /** * Sets media player options. * - * The media player supports setting options through key and value. The difference between this method and setPlayerOptionInString is that the value parameter of this method is of type Int, while the value of setPlayerOptionInString is of type String. These two methods cannot be used together. + * The media player supports setting options via key and value. + * The difference between this method and setPlayerOptionInString is that the value in this method is of type Int, while in setPlayerOptionInString it is of type String. The two are not interchangeable. * - * @param key The key of the option. - * @param value The value of the key. + * @param key The key value. + * @param value The value. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Code](https://doc.shengwang.cn/api-ref/rtc/rn/error-code) for details and resolution suggestions. */ abstract setPlayerOptionInInt(key: string, value: number): number; /** * Sets media player options. * - * The media player supports setting options through key and value. The difference between this method and setPlayerOptionInInt is that the value parameter of this method is of type String, while the value of setPlayerOptionInInt is of type String. These two methods cannot be used together. + * The media player allows you to set options using key and value. + * The difference between this method and setPlayerOptionInInt is that this method uses a String type for value, while setPlayerOptionInInt uses an Int type. The two cannot be used interchangeably. * - * @param key The key of the option. - * @param value The value of the key. + * @param key Key value. + * @param value Value. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setPlayerOptionInString(key: string, value: string): number; } /** - * This class provides methods to manage cached media files. + * This class provides methods to manage cached media files in the media player. */ export abstract class IMediaPlayerCacheManager { /** * Deletes all cached media files in the media player. * - * The cached media file currently being played will not be deleted. + * This method does not delete cached media files that are currently playing. * * @returns * 0: Success. @@ -612,9 +619,9 @@ export abstract class IMediaPlayerCacheManager { abstract removeAllCaches(): number; /** - * Deletes a cached media file that is the least recently used. + * Deletes the least recently used cached media file in the media player. * - * You can call this method to delete a cached media file when the storage space for the cached files is about to reach its limit. After you call this method, the SDK deletes the cached media file that is least used. The cached media file currently being played will not be deleted. + * When cached media files occupy too much space, you can call this method to clean up cache files. After calling this method, the SDK deletes the least recently used cached media file. When you call this method to delete cached media files, cached media files that are currently playing will not be deleted. * * @returns * 0: Success. @@ -623,11 +630,11 @@ export abstract class IMediaPlayerCacheManager { abstract removeOldCache(): number; /** - * Deletes a cached media file. + * Deletes the specified cached media file. * - * The cached media file currently being played will not be deleted. + * This method does not delete cached media files that are currently playing. * - * @param uri The URI (Uniform Resource Identifier) of the media file to be deleted. + * @param uri The URI (Uniform Resource Identifier) of the cache file to be deleted, used to identify the media file. * * @returns * 0: Success. @@ -636,11 +643,11 @@ export abstract class IMediaPlayerCacheManager { abstract removeCacheByUri(uri: string): number; /** - * Sets the storage path for the media files that you want to cache. + * Sets the storage path for media files to be cached. * - * Make sure IRtcEngine is initialized before you call this method. + * This method must be called after initializing IRtcEngine. * - * @param path The absolute path of the media files to be cached. Ensure that the directory for the media files exists and is writable. + * @param path The absolute path where the cache files are stored. Make sure the specified directory exists and is writable. * * @returns * 0: Success. @@ -649,9 +656,9 @@ export abstract class IMediaPlayerCacheManager { abstract setCacheDir(path: string): number; /** - * Sets the maximum number of media files that can be cached. + * Sets the upper limit on the number of cached media files. * - * @param count The maximum number of media files that can be cached. The default value is 1,000. + * @param count The upper limit on the number of media files that can be cached. The default value is 1000. * * @returns * 0: Success. @@ -660,9 +667,9 @@ export abstract class IMediaPlayerCacheManager { abstract setMaxCacheFileCount(count: number): number; /** - * Sets the maximum size of the aggregate storage space for cached media files. + * Sets the upper limit of the total cache size for cached media files. * - * @param cacheSize The maximum size (bytes) of the aggregate storage space for cached media files. The default value is 1 GB. + * @param cacheSize The upper limit of the total cache size for cached media files, in bytes. The default is 1 GB. * * @returns * 0: Success. @@ -671,11 +678,11 @@ export abstract class IMediaPlayerCacheManager { abstract setMaxCacheFileSize(cacheSize: number): number; /** - * Sets whether to delete cached media files automatically. + * Sets whether to enable automatic removal of cache files. * - * If you enable this function to remove cached media files automatically, when the cached media files exceed either the number or size limit you set, the SDK automatically deletes the least recently used cache file. + * After enabling automatic removal of cache files, when the number or total size of cached media files in the player exceeds the set limit, the SDK will automatically remove the least recently used cache file. * - * @param enable Whether to enable the SDK to delete cached media files automatically: true : Delete cached media files automatically. false : (Default) Do not delete cached media files automatically. + * @param enable Whether to automatically remove cache files: true : Enable automatic removal of cache files. false : (Default) Disable automatic removal of cache files. * * @returns * 0: Success. @@ -684,58 +691,61 @@ export abstract class IMediaPlayerCacheManager { abstract enableAutoRemoveCache(enable: boolean): number; /** - * Gets the storage path of the cached media files. + * Gets the storage path of the cache file. * - * If you have not called the setCacheDir method to set the storage path for the media files to be cached before calling this method, you get the default storage path used by the SDK. + * If you have not called the setCacheDir method to customize the storage path of the cache file before calling this method, it returns the SDK's default cache file storage path. * - * @param length An input parameter; the maximum length of the cache file storage path string. + * @param length Input parameter. The maximum length of the cache file storage path string. * * @returns - * The call succeeds, and the SDK returns the storage path of the cached media files. + * The storage path of the cache file, if the method call succeeds. * < 0: Failure. See MediaPlayerReason. */ abstract getCacheDir(length: number): string; /** - * Gets the maximum number of media files that can be cached. + * Gets the upper limit of the number of cache files set. * - * By default, the maximum number of media files that can be cached is 1,000. + * The default upper limit of the number of cache files in the SDK is 1000. * * @returns - * > 0: The call succeeds and returns the maximum number of media files that can be cached. + * > 0: Success. Returns the upper limit of the number of cache files. * < 0: Failure. See MediaPlayerReason. */ abstract getMaxCacheFileCount(): number; /** - * Gets the maximum size of the aggregate storage space for cached media files. + * Gets the upper limit of the total cache size of cache files set. * - * By default, the maximum size of the aggregate storage space for cached media files is 1 GB. You can call the setMaxCacheFileSize method to set the limit according to your scenarios. + * The default upper limit of the total cache size in the SDK is 1GB. You can call the setMaxCacheFileSize method to customize the upper limit of the total cache size. * * @returns - * > 0: The call succeeds and returns the maximum size (in bytes) of the aggregate storage space for cached media files. + * > 0: Success. Returns the upper limit of the total cache size of cache files in bytes. * < 0: Failure. See MediaPlayerReason. */ abstract getMaxCacheFileSize(): number; /** - * Gets the number of media files that are cached. + * Gets the total number of currently cached media files. * * @returns - * ≥ 0: The call succeeds and returns the number of media files that are cached. + * ≥ 0: Success. Returns the total number of currently cached media files. * < 0: Failure. See MediaPlayerReason. */ abstract getCacheFileCount(): number; } /** - * The video frame observer for the media player. + * Video data observer for media player. + * + * You can call registerVideoFrameObserver to register or unregister the IMediaPlayerVideoFrameObserver observer. */ export interface IMediaPlayerVideoFrameObserver { /** - * Occurs each time the player receives a video frame. + * Callback when a video frame is received. * - * After registering the video frame observer, the callback occurs every time the player receives a video frame, reporting the detailed information of the video frame. + * After registering the video observer, this callback is triggered every time a video frame is received to report video frame information. + * You are advised to implement this callback using the C++ API. * * @param frame Video frame information. See VideoFrame. */ diff --git a/src/IAgoraMediaPlayerSource.ts b/src/IAgoraMediaPlayerSource.ts index 44e7e4f5..d70ec056 100644 --- a/src/IAgoraMediaPlayerSource.ts +++ b/src/IAgoraMediaPlayerSource.ts @@ -11,16 +11,16 @@ import { } from './AgoraMediaPlayerTypes'; /** - * Provides callbacks for media players. + * Provides callbacks for the media player. */ export interface IMediaPlayerSourceObserver { /** - * Reports the changes of playback state. + * Reports player state changes. * - * When the state of the media player changes, the SDK triggers this callback to report the current playback state. + * When the player state changes, the SDK triggers this callback to report the new playback state. * - * @param state The playback state. See MediaPlayerState. - * @param reason The reason for the changes in the media player status. See MediaPlayerReason. + * @param state New playback state. See MediaPlayerState. + * @param reason Reason for the player state change. See MediaPlayerReason. */ onPlayerSourceStateChanged?( state: MediaPlayerState, @@ -28,22 +28,22 @@ export interface IMediaPlayerSourceObserver { ): void; /** - * Reports the playback progress of the media file. + * Reports the playback progress of the current media resource. * - * When playing media files, the SDK triggers this callback every two second to report current playback progress. + * While playing a media file, the SDK automatically triggers this callback every second to report the current playback progress. * - * @param positionMs The playback position (ms) of media files. - * @param timeStampMs The NTP timestamp (ms) of the current playback progress. + * @param positionMs Current playback progress in ms. + * @param timestampMs NTP timestamp of the current playback progress in ms. */ onPositionChanged?(positionMs: number, timestampMs: number): void; /** - * Reports the player events. + * Reports player events. * - * After calling the seek method, the SDK triggers the callback to report the results of the seek operation. + * After calling seek to locate playback, the SDK triggers this callback to report the result of the seek operation. * - * @param eventCode The player event. See MediaPlayerEvent. - * @param elapsedTime The time (ms) when the event occurs. + * @param eventCode Player event. See MediaPlayerEvent. + * @param elapsedTime Time of the event (in milliseconds). * @param message Information about the event. */ onPlayerEvent?( @@ -53,31 +53,31 @@ export interface IMediaPlayerSourceObserver { ): void; /** - * Occurs when the media metadata is received. + * Reports received media metadata. * - * The callback occurs when the player receives the media metadata and reports the detailed information of the media metadata. + * After parsing the media metadata, the SDK triggers this callback to report the data type and content of the metadata. * - * @param data The detailed data of the media metadata. - * @param length The data length (bytes). + * @param data Specific data in a user-defined format. + * @param length Data length in bytes. */ onMetaData?(data: Uint8Array, length: number): void; /** - * Reports the playback duration that the buffered data can support. + * Reports the playable duration of the current buffered data. * - * When playing online media resources, the SDK triggers this callback every two seconds to report the playback duration that the currently buffered data can support. - * When the playback duration supported by the buffered data is less than the threshold (0 by default), the SDK returns PlayerEventBufferLow (6). - * When the playback duration supported by the buffered data is greater than the threshold (0 by default), the SDK returns PlayerEventBufferRecover (7). + * While playing online media resources, the SDK triggers this callback every second to report the duration that the current buffered data can support for playback. + * When the playable duration of the buffered data is less than the threshold (default is 0), PlayerEventBufferLow (6) is returned. + * When the playable duration is greater than the threshold (default is 0), PlayerEventBufferRecover (7) is returned. * - * @param playCachedBuffer The playback duration (ms) that the buffered data can support. + * @param playCachedBuffer The duration (in milliseconds) that the current buffered data can support for playback. */ onPlayBufferUpdated?(playCachedBuffer: number): void; /** - * Reports the events of preloaded media resources. + * Reports events during media resource preloading. * - * @param src The URL of the media resource. - * @param event Events that occur when media resources are preloaded. See PlayerPreloadEvent. + * @param src Path of the media resource. + * @param event Event that occurred during media resource preloading. See PlayerPreloadEvent. */ onPreloadEvent?(src: string, event: PlayerPreloadEvent): void; @@ -92,46 +92,46 @@ export interface IMediaPlayerSourceObserver { onAgoraCDNTokenWillExpire?(): void; /** - * Occurs when the video bitrate of the media resource changes. + * Callback when video bitrate of media resource changes. * - * @param from Information about the video bitrate of the media resource being played. See SrcInfo. - * @param to Information about the changed video bitrate of media resource being played. See SrcInfo. + * @param from Information about the video bitrate of the media resource before the change. See SrcInfo. + * @param to Information about the video bitrate of the media resource after the change. See SrcInfo. */ onPlayerSrcInfoChanged?(from: SrcInfo, to: SrcInfo): void; /** - * Occurs when information related to the media player changes. + * Callback when media player information changes. * - * When the information about the media player changes, the SDK triggers this callback. You can use this callback for troubleshooting. + * When media player-related information changes, the SDK triggers this callback. You can use it for troubleshooting and diagnostics. * - * @param info Information related to the media player. See PlayerUpdatedInfo. + * @param info Media player-related information. See PlayerUpdatedInfo. */ onPlayerInfoUpdated?(info: PlayerUpdatedInfo): void; /** - * Reports the statistics of the media file being cached. + * Reports information about the currently cached media resources. * - * After you call the openWithMediaSource method and set enableCache as true, the SDK triggers this callback once per second to report the statistics of the media file being cached. + * After calling the openWithMediaSource method and setting the enableCache member to true, the SDK triggers this callback once per second after the media file is opened, reporting statistical data of the currently cached media files. * - * @param stats The statistics of the media file being cached. See CacheStatistics. + * @param stats Information about the media resources in the cache. See CacheStatistics. */ onPlayerCacheStats?(stats: CacheStatistics): void; /** - * Reports the statistics of the media file being played. + * Reports information about the currently playing media resource. * - * The SDK triggers this callback once per second to report the statistics of the media file being played. + * After the media resource starts playing, the SDK triggers this callback once per second to report information about the media resource. * - * @param stats The statistics of the media file. See PlayerPlaybackStats. + * @param stats Information about the media resource. See PlayerPlaybackStats. */ onPlayerPlaybackStats?(stats: PlayerPlaybackStats): void; /** - * Reports the volume of the media player. + * Audio volume indication callback for the media player. * - * The SDK triggers this callback every 200 milliseconds to report the current volume of the media player. + * The SDK triggers this callback every 200 ms to report the current volume of the media player. * - * @param volume The volume of the media player. The value ranges from 0 to 255. + * @param volume The current volume of the player, ranging from [0,255]. */ onAudioVolumeIndication?(volume: number): void; } diff --git a/src/IAgoraMusicContentCenter.ts b/src/IAgoraMusicContentCenter.ts index fe58a6f0..0c6f725e 100644 --- a/src/IAgoraMusicContentCenter.ts +++ b/src/IAgoraMusicContentCenter.ts @@ -151,15 +151,15 @@ export class MvProperty { } /** - * The climax parts of the music. + * @ignore */ export class ClimaxSegment { /** - * The time (ms) when the climax part begins. + * @ignore */ startTimeMs?: number; /** - * The time (ms) when the climax part ends. + * @ignore */ endTimeMs?: number; } diff --git a/src/IAgoraPip.ts b/src/IAgoraPip.ts index 48b8c3ff..2deb9f99 100644 --- a/src/IAgoraPip.ts +++ b/src/IAgoraPip.ts @@ -1,142 +1,193 @@ import { RtcRendererViewProps } from './AgoraRtcRenderView'; /** - * @ignore + * Layout configuration for picture-in-picture video streams. + * + * Since Available since v4.6.2. This class defines how multiple video streams are arranged in a flowing layout, from left to right and top to bottom. */ export class AgoraPipContentViewLayout { /** - * @ignore + * Padding around the entire layout in pixels. Used to create space between the layout edge and video streams. If null, no padding is applied. */ padding?: number; /** - * @ignore + * Horizontal and vertical spacing between video streams in pixels. Used to create consistent spacing between adjacent video streams. If null, video streams are placed directly adjacent. */ spacing?: number; /** - * @ignore + * Maximum number of rows allowed in the layout. Once the maximum is reached, no new rows are created even if more video streams exist. If null, rows are created as needed to accommodate all streams. Must be greater than 0 or null. */ row?: number; /** - * @ignore + * Maximum number of video streams per row. Once the maximum is reached, a new row starts. If null, video streams flow to fill the available width. Must be greater than 0 or null. */ column?: number; } /** - * @ignore + * Configuration options for Agora picture-in-picture mode. + * + * Since Available since v4.6.2. This class provides platform-specific options to configure picture-in-picture behavior on Android and iOS. */ export class AgoraPipOptions { /** - * @ignore + * Whether to automatically enter picture-in-picture mode. */ autoEnterEnabled?: boolean; /** - * @ignore + * Horizontal aspect ratio of the picture-in-picture window. + * + * (Android only) */ aspectRatioX?: number; /** - * @ignore + * Vertical aspect ratio of the picture-in-picture window. + * + * (Android only) */ aspectRatioY?: number; /** - * @ignore + * Left coordinate of the source rectangle hint. + * + * Used to specify the initial position of the picture-in-picture window. + * (Android only) */ sourceRectHintLeft?: number; /** - * @ignore + * Top coordinate of the source rectangle hint. + * + * Used to specify the initial position of the picture-in-picture window. + * (Android only) */ sourceRectHintTop?: number; /** - * @ignore + * Right coordinate of the source rectangle hint. + * + * Used to specify the initial position of the picture-in-picture window. + * (Android only) */ sourceRectHintRight?: number; /** - * @ignore + * Bottom coordinate of the source rectangle hint. + * + * Used to specify the initial position of the picture-in-picture window. + * (Android only) */ sourceRectHintBottom?: number; /** - * @ignore + * Whether to enable seamless resizing of the picture-in-picture window. + * + * When enabled, the window resizes smoothly. + * Default is false. + * (Android only) */ seamlessResizeEnabled?: boolean; /** - * @ignore + * Whether to use an external state monitor. + * + * When enabled, a dedicated thread is created to monitor the state of the picture-in-picture window. Use externalStateMonitorInterval to configure the monitoring frequency. + * Default is true. + * (Android only) */ useExternalStateMonitor?: boolean; /** - * @ignore + * Interval for external state monitoring, in milliseconds. + * + * Takes effect only when useExternalStateMonitor is true. + * Default is 100ms. + * (Android only) */ externalStateMonitorInterval?: number; /** - * @ignore + * Video transcoding configuration. + * + * Takes effect only when contentView is set to 0. When the SDK manages the views, all video streams are placed in the root view of the picture-in-picture window. + * (iOS only) */ videoStreams?: RtcRendererViewProps[]; /** - * @ignore + * Layout configuration for picture-in-picture video streams. + * + * Takes effect only when contentView is set to 0. + * (iOS only) */ contentViewLayout?: AgoraPipContentViewLayout; /** - * @ignore + * sourceContentView determines the source frame and restore target for picture-in-picture animation. Pass 0 to use the app's root view. For best animation experience, set this to the view containing video content. The system uses this view for enter/exit animations and as the restore target when returning to the app or stopping picture-in-picture. */ sourceContentView?: number; /** - * @ignore + * contentView determines which view will be displayed in the picture-in-picture window. If 0 is passed, the picture-in-picture controller automatically manages and displays all video streams. If a specific view ID is passed, you are responsible for managing the content displayed in the picture-in-picture window. */ contentView?: number; /** - * @ignore + * Preferred width of the picture-in-picture content. + * + * (iOS only) */ preferredContentWidth?: number; /** - * @ignore + * Preferred height of the picture-in-picture content. + * + * (iOS only) */ preferredContentHeight?: number; /** - * @ignore + * Control style of the picture-in-picture window. + * Available styles: + * 0: Show all system controls (default) + * 1: Hide forward and back buttons + * 2: Hide play/pause button and progress bar (recommended) + * 3: Hide all system controls, including close and restore buttons (iOS only) */ controlStyle?: number; } /** - * @ignore + * Represents the current state of Picture-in-Picture mode. + * + * Since Available since v4.6.2. */ export enum AgoraPipState { /** - * @ignore + * 0: Picture-in-Picture mode has started successfully. */ pipStateStarted = 0, /** - * @ignore + * 1: Picture-in-Picture mode has stopped. */ pipStateStopped = 1, /** - * @ignore + * 2: Failed to start Picture-in-Picture mode or encountered an error. */ pipStateFailed = 2, } /** - * @ignore + * Observer for picture-in-picture state changes. + * + * Since Available since v4.6.2. Implement this class to receive notifications of picture-in-picture state transitions and potential errors. */ export interface AgoraPipStateChangedObserver { /** @@ -146,60 +197,99 @@ export interface AgoraPipStateChangedObserver { } /** - * @ignore + * Controller interface for managing picture-in-picture functionality. + * + * Since Available since v4.6.2. This abstract class defines methods required to control picture-in-picture mode, including setup, state management, and lifecycle operations. */ export abstract class AgoraPip { /** - * @ignore + * Releases resources related to picture-in-picture. + * + * Since Available since v4.6.2. */ abstract release(): void; /** - * @ignore + * Registers a picture-in-picture state change observer. + * + * @param observer Picture-in-picture state change observer. See AgoraPipStateChangedObserver. */ abstract registerPipStateChangedObserver( observer: AgoraPipStateChangedObserver ): void; /** - * @ignore + * Unregisters the picture-in-picture state change observer. + * + * @param observer The picture-in-picture state change observer. See AgoraPipStateChangedObserver. */ abstract unregisterPipStateChangedObserver( observer: AgoraPipStateChangedObserver ): void; /** - * @ignore + * Checks whether the current device supports picture-in-picture mode. + * + * Since Available since v4.6.2. + * + * @returns + * true : The current device supports picture-in-picture mode. false : The current device does not support picture-in-picture mode. */ abstract pipIsSupported(): boolean; /** - * @ignore + * Checks whether auto-entering picture-in-picture mode is supported. + * + * Since Available since v4.6.2. + * + * @returns + * true : Auto-entering picture-in-picture mode is supported. false : Auto-entering picture-in-picture mode is not supported. */ abstract pipIsAutoEnterSupported(): boolean; /** - * @ignore + * Checks whether picture-in-picture mode is activated. + * + * Since Available since v4.6.2. + * + * @returns + * true : Picture-in-picture mode is activated. false : Picture-in-picture mode is not activated. */ abstract isPipActivated(): boolean; /** - * @ignore + * Configures picture-in-picture mode. + * + * Since Available since v4.6.2. + * + * @param options Picture-in-picture configuration options. See AgoraPipOptions. + * + * @returns + * true : The method call succeeds. false : The method call fails. */ abstract pipSetup(options: AgoraPipOptions): boolean; /** - * @ignore + * Starts picture-in-picture mode. + * + * Since Available since v4.6.2. + * + * @returns + * true : The method call succeeds. false : The method call fails. */ abstract pipStart(): boolean; /** - * @ignore + * Stops picture-in-picture mode. + * + * Since Available since v4.6.2. */ abstract pipStop(): void; /** - * @ignore + * Releases resources related to picture-in-picture. + * + * Since Available since v4.6.2. */ abstract pipDispose(): void; } diff --git a/src/IAgoraRhythmPlayer.ts b/src/IAgoraRhythmPlayer.ts index 0176a393..e8b376ca 100644 --- a/src/IAgoraRhythmPlayer.ts +++ b/src/IAgoraRhythmPlayer.ts @@ -5,63 +5,63 @@ import './extension/IAgoraRhythmPlayerExtension'; */ export enum RhythmPlayerStateType { /** - * 810: The virtual metronome is not enabled or disabled already. + * 810: The virtual metronome is not started or has been stopped. */ RhythmPlayerStateIdle = 810, /** - * 811: Opening the beat files. + * 811: Opening the rhythm audio file. */ RhythmPlayerStateOpening = 811, /** - * 812: Decoding the beat files. + * 812: Decoding the rhythm audio file. */ RhythmPlayerStateDecoding = 812, /** - * 813: The beat files are playing. + * 813: Playing the rhythm audio file. */ RhythmPlayerStatePlaying = 813, /** - * 814: Failed to start virtual metronome. You can use the reported errorCode to troubleshoot the cause of the error, or you can try to start the virtual metronome again. + * 814: Failed to start the virtual metronome. You can troubleshoot using the reported error code errorCode or try starting the virtual metronome again. */ RhythmPlayerStateFailed = 814, } /** - * Virtual Metronome error message. + * Error information for the virtual metronome. */ export enum RhythmPlayerReason { /** - * 0: The beat files are played normally without errors. + * 0: The metronome audio file is playing normally, no error. */ RhythmPlayerReasonOk = 0, /** - * 1: A general error; no specific reason. + * 1: General error with no specific reason. */ RhythmPlayerReasonFailed = 1, /** - * 801: There is an error when opening the beat files. + * 801: Failed to open the metronome audio file. */ RhythmPlayerReasonCanNotOpen = 801, /** - * 802: There is an error when playing the beat files. + * 802: Failed to play the metronome audio file. */ RhythmPlayerReasonCanNotPlay = 802, /** - * 803: The duration of the beat file exceeds the limit. The maximum duration is 1.2 seconds. + * 803: The duration of the metronome audio file exceeds the limit. The maximum duration is 1.2 seconds. */ RhythmPlayerReasonFileOverDurationLimit = 803, } /** - * The metronome configuration. + * Virtual metronome configuration. */ export class AgoraRhythmPlayerConfig { /** - * The number of beats per measure, which ranges from 1 to 9. The default value is 4, which means that each measure contains one downbeat and three upbeats. + * Number of beats per measure, range [1,9]. Default is 4, which includes 1 strong beat and 3 weak beats per measure. */ beatsPerMeasure?: number; /** - * The beat speed (beats/minute), which ranges from 60 to 360. The default value is 60, which means that the metronome plays 60 beats in one minute. + * Tempo (beats per minute), range [60,360]. Default is 60, i.e., 60 beats per minute. */ beatsPerMinute?: number; } diff --git a/src/IAgoraRtcEngine.ts b/src/IAgoraRtcEngine.ts index 2be583a0..46f74197 100644 --- a/src/IAgoraRtcEngine.ts +++ b/src/IAgoraRtcEngine.ts @@ -143,7 +143,7 @@ import { ILocalSpatialAudioEngine } from './IAgoraSpatialAudio'; import { IAudioDeviceManager } from './IAudioDeviceManager'; /** - * Media device types. + * Device type. */ export enum MediaDeviceType { /** @@ -155,7 +155,7 @@ export enum MediaDeviceType { */ AudioPlayoutDevice = 0, /** - * 1: Audio capturing device. + * 1: Audio recording device. */ AudioRecordingDevice = 1, /** @@ -163,11 +163,11 @@ export enum MediaDeviceType { */ VideoRenderDevice = 2, /** - * 3: Video capturing device. + * 3: Video capture device. */ VideoCaptureDevice = 3, /** - * 4: Audio playback device for an app. + * 4: Audio application playback device. */ AudioApplicationPlayoutDevice = 4, /** @@ -175,59 +175,66 @@ export enum MediaDeviceType { */ AudioVirtualPlayoutDevice = 5, /** - * 6: Virtual audio capturing device (virtual sound card). + * 6: Virtual audio recording device (virtual sound card). */ AudioVirtualRecordingDevice = 6, } /** - * The playback state of the music file. + * Music file playback state. */ export enum AudioMixingStateType { /** - * 710: The music file is playing. + * 710: Music file is playing normally. */ AudioMixingStatePlaying = 710, /** - * 711: The music file pauses playing. + * 711: Music file playback paused. */ AudioMixingStatePaused = 711, /** - * 713: The music file stops playing. The possible reasons include: AudioMixingReasonAllLoopsCompleted (723) AudioMixingReasonStoppedByUser (724) + * 713: Music file playback stopped. + * This state may be caused by the following reasons: + * AudioMixingReasonAllLoopsCompleted(723) + * AudioMixingReasonStoppedByUser(724) */ AudioMixingStateStopped = 713, /** - * 714: An error occurs during the playback of the audio mixing file. The possible reasons include: AudioMixingReasonCanNotOpen (701) AudioMixingReasonTooFrequentCall (702) AudioMixingReasonInterruptedEof (703) + * 714: Music file playback error. + * This state may be caused by the following reasons: + * AudioMixingReasonCanNotOpen(701) + * AudioMixingReasonTooFrequentCall(702) + * AudioMixingReasonInterruptedEof(703) */ AudioMixingStateFailed = 714, } /** - * The reason why the playback state of the music file changes. Reported in the onAudioMixingStateChanged callback. + * Reason for music file playback state change. Reported in the onAudioMixingStateChanged callback. */ export enum AudioMixingReasonType { /** - * 701: The SDK cannot open the music file. For example, the local music file does not exist, the SDK does not support the file format, or the the SDK cannot access the music file URL. + * 701: Failed to open music file. For example, the local music file does not exist, the file format is not supported, or the online music file URL is inaccessible. */ AudioMixingReasonCanNotOpen = 701, /** - * 702: The SDK opens the music file too frequently. If you need to call startAudioMixing multiple times, ensure that the call interval is more than 500 ms. + * 702: Music file opened too frequently. If you need to call startAudioMixing multiple times, ensure the interval between calls is more than 500 ms. */ AudioMixingReasonTooFrequentCall = 702, /** - * 703: The music file playback is interrupted. + * 703: Music file playback interrupted. */ AudioMixingReasonInterruptedEof = 703, /** - * 721: The music file completes a loop playback. + * 721: One loop of the music file playback completed. */ AudioMixingReasonOneLoopCompleted = 721, /** - * 723: The music file completes all loop playback. + * 723: All loops of the music file playback completed. */ AudioMixingReasonAllLoopsCompleted = 723, /** - * 724: Successfully call stopAudioMixing to stop playing the music file. + * 724: Successfully called stopAudioMixing to stop music file playback. */ AudioMixingReasonStoppedByUser = 724, /** @@ -235,7 +242,7 @@ export enum AudioMixingReasonType { */ AudioMixingReasonResumedByUser = 726, /** - * 0: The SDK opens music file successfully. + * 0: Music file opened successfully. */ AudioMixingReasonOk = 0, } @@ -291,73 +298,73 @@ export enum InjectStreamStatus { } /** - * The midrange frequency for audio equalization. + * Center frequency of the voice equalization band. */ export enum AudioEqualizationBandFrequency { /** - * 0: 31 Hz. + * 0: 31 Hz */ AudioEqualizationBand31 = 0, /** - * 1: 62 Hz. + * 1: 62 Hz */ AudioEqualizationBand62 = 1, /** - * 2: 125 Hz. + * 2: 125 Hz */ AudioEqualizationBand125 = 2, /** - * 3: 250 Hz. + * 3: 250 Hz */ AudioEqualizationBand250 = 3, /** - * 4: 500 Hz. + * 4: 500 Hz */ AudioEqualizationBand500 = 4, /** - * 5: 1 kHz. + * 5: 1 kHz */ AudioEqualizationBand1k = 5, /** - * 6: 2 kHz. + * 6: 2 kHz */ AudioEqualizationBand2k = 6, /** - * 7: 4 kHz. + * 7: 4 kHz */ AudioEqualizationBand4k = 7, /** - * 8: 8 kHz. + * 8: 8 kHz */ AudioEqualizationBand8k = 8, /** - * 9: 16 kHz. + * 9: 16 kHz */ AudioEqualizationBand16k = 9, } /** - * Audio reverberation types. + * Audio reverb type. */ export enum AudioReverbType { /** - * 0: The level of the dry signal (dB). The value is between -20 and 10. + * 0: Dry signal level, i.e., the original sound intensity. Range [-20,10], unit: dB. */ AudioReverbDryLevel = 0, /** - * 1: The level of the early reflection signal (wet signal) (dB). The value is between -20 and 10. + * 1: Wet signal level, i.e., early reflection signal strength. Range [-20,10], unit: dB. */ AudioReverbWetLevel = 1, /** - * 2: The room size of the reflection. The value is between 0 and 100. + * 2: Room size for the desired reverb effect. Generally, the larger the room, the stronger the reverb. Range [0,100], unit: dB. */ AudioReverbRoomSize = 2, /** - * 3: The length of the initial delay of the wet signal (ms). The value is between 0 and 200. + * 3: Initial delay length of the wet signal. Range [0,200], unit: ms. */ AudioReverbWetDelay = 3, /** - * 4: The reverberation strength. The value is between 0 and 100. + * 4: Intensity of the sustained reverb. Range [0,100]. */ AudioReverbStrength = 4, } @@ -419,7 +426,7 @@ export enum PriorityType { } /** - * The statistics of the local video stream. + * Statistics of the local video stream. */ export class LocalVideoStats { /** @@ -427,83 +434,83 @@ export class LocalVideoStats { */ uid?: number; /** - * The actual bitrate (Kbps) while sending the local video stream. This value does not include the bitrate for resending the video after packet loss. + * Actual sending bitrate (Kbps) Does not include the bitrate of retransmitted video after packet loss. */ sentBitrate?: number; /** - * The actual frame rate (fps) while sending the local video stream. This value does not include the frame rate for resending the video after packet loss. + * Actual sending frame rate (fps). Does not include the frame rate of retransmitted video after packet loss. */ sentFrameRate?: number; /** - * The frame rate (fps) for capturing the local video stream. + * Frame rate of the local video capture (fps). */ captureFrameRate?: number; /** - * The width (px) for capturing the local video stream. + * Width of the local video capture (px). */ captureFrameWidth?: number; /** - * The height (px) for capturing the local video stream. + * Height of the local video capture (px). */ captureFrameHeight?: number; /** - * The frame rate (fps) adjusted by the built-in video capture adapter (regulator) of the SDK for capturing the local video stream. The regulator adjusts the frame rate of the video captured by the camera according to the video encoding configuration. + * Frame rate (fps) of the video captured by the camera after being adjusted by the SDK's built-in video capture adapter (regulator). The regulator adjusts the camera capture frame rate based on the video encoding configuration. */ regulatedCaptureFrameRate?: number; /** - * The width (px) adjusted by the built-in video capture adapter (regulator) of the SDK for capturing the local video stream. The regulator adjusts the height and width of the video captured by the camera according to the video encoding configuration. + * Width (px) of the video captured by the camera after being adjusted by the SDK's built-in video capture adapter (regulator). The regulator adjusts the camera capture resolution based on the video encoding configuration. */ regulatedCaptureFrameWidth?: number; /** - * The height (px) adjusted by the built-in video capture adapter (regulator) of the SDK for capturing the local video stream. The regulator adjusts the height and width of the video captured by the camera according to the video encoding configuration. + * Height (px) of the video captured by the camera after being adjusted by the SDK's built-in video capture adapter (regulator). The regulator adjusts the camera capture resolution based on the video encoding configuration. */ regulatedCaptureFrameHeight?: number; /** - * The output frame rate (fps) of the local video encoder. + * Output frame rate of the local video encoder in fps. */ encoderOutputFrameRate?: number; /** - * The width of the encoded video (px). + * Width of the encoded video (px). */ encodedFrameWidth?: number; /** - * The height of the encoded video (px). + * Height of the encoded video (px). */ encodedFrameHeight?: number; /** - * The output frame rate (fps) of the local video renderer. + * Output frame rate of the local video renderer in fps. */ rendererOutputFrameRate?: number; /** - * The target bitrate (Kbps) of the current encoder. This is an estimate made by the SDK based on the current network conditions. + * Target encoding bitrate (Kbps) of the current encoder, estimated by the SDK based on the current network conditions. */ targetBitrate?: number; /** - * The target frame rate (fps) of the current encoder. + * Target encoding frame rate (fps) of the current encoder. */ targetFrameRate?: number; /** - * The quality adaptation of the local video stream in the reported interval (based on the target frame rate and target bitrate). See QualityAdaptIndication. + * Adaptation status of local video quality (based on target frame rate and target bitrate) during the statistics interval. See QualityAdaptIndication. */ qualityAdaptIndication?: QualityAdaptIndication; /** - * The bitrate (Kbps) while encoding the local video stream. This value does not include the bitrate for resending the video after packet loss. + * Video encoding bitrate (Kbps). Does not include the bitrate of retransmitted video after packet loss. */ encodedBitrate?: number; /** - * The number of the sent video frames, represented by an aggregate value. + * Number of video frames sent, cumulative value. */ encodedFrameCount?: number; /** - * The codec type of the local video. See VideoCodecType. + * Video codec type. See VideoCodecType. */ codecType?: VideoCodecType; /** - * The video packet loss rate (%) from the local client to the Agora server before applying the anti-packet loss strategies. + * Video packet loss rate (%) from the local end to the Agora edge server before anti-weak network measures. */ txPacketLossRate?: number; /** - * The brightness level of the video image captured by the local camera. See CaptureBrightnessLevelType. + * Brightness level of the locally captured video. See CaptureBrightnessLevelType. */ captureBrightnessLevel?: CaptureBrightnessLevelType; /** @@ -511,9 +518,9 @@ export class LocalVideoStats { */ dualStreamEnabled?: boolean; /** - * The local video encoding acceleration type. - * 0: Software encoding is applied without acceleration. - * 1: Hardware encoding is applied for acceleration. + * Local video encoding acceleration type. + * 0: Software encoding, no acceleration. + * 1: Hardware encoding acceleration. */ hwEncoderAccelerating?: number; /** @@ -531,47 +538,47 @@ export class LocalVideoStats { */ export class RemoteAudioStats { /** - * The user ID of the remote user. + * User ID of the remote user. */ uid?: number; /** - * The quality of the audio stream sent by the user. See QualityType. + * Audio stream quality sent by the remote user. See QualityType. */ quality?: number; /** - * The network delay (ms) from the sender to the receiver. + * Network delay from the audio sender to the receiver (ms). */ networkTransportDelay?: number; /** - * The network delay (ms) from the audio receiver to the jitter buffer. When the receiving end is an audience member and audienceLatencyLevel of ClientRoleOptions is 1, this parameter does not take effect. + * Network delay from the receiver to the jitter buffer (ms). This parameter is not effective when the receiver is an audience member and audienceLatencyLevel in ClientRoleOptions is 1. */ jitterBufferDelay?: number; /** - * The frame loss rate (%) of the remote audio stream in the reported interval. + * Audio frame loss rate (%) of the remote stream during the reporting interval. */ audioLossRate?: number; /** - * The number of audio channels. + * Number of audio channels. */ numChannels?: number; /** - * The sampling rate of the received audio stream in the reported interval. + * Sample rate of the remote audio stream received during the reporting interval. */ receivedSampleRate?: number; /** - * The average bitrate (Kbps) of the received audio stream in the reported interval. + * Average bitrate (Kbps) of the remote audio stream received during the reporting interval. */ receivedBitrate?: number; /** - * The total freeze time (ms) of the remote audio stream after the remote user joins the channel. In a session, audio freeze occurs when the audio frame loss rate reaches 4%. + * Total duration (ms) of audio freeze experienced by the remote user after joining the channel. An audio freeze is counted when the audio frame loss rate exceeds 4% during the call. */ totalFrozenTime?: number; /** - * The total audio freeze time as a percentage (%) of the total time when the audio is available. The audio is considered available when the remote user neither stops sending the audio stream nor disables the audio module after joining the channel. + * Percentage (%) of total frozen time over the total effective duration of the audio. The effective duration refers to the time after the remote user joins the channel during which the audio is neither stopped nor disabled. */ frozenRate?: number; /** - * The quality of the remote audio stream in the reported interval. The quality is determined by the Agora real-time audio MOS (Mean Opinion Score) measurement method. The return value range is [0, 500]. Dividing the return value by 100 gets the MOS score, which ranges from 0 to 5. The higher the score, the better the audio quality. The subjective perception of audio quality corresponding to the Agora real-time audio MOS scores is as follows: MOS score Perception of audio quality Greater than 4 Excellent. The audio sounds clear and smooth. From 3.5 to 4 Good. The audio has some perceptible impairment but still sounds clear. From 3 to 3.5 Fair. The audio freezes occasionally and requires attentive listening. From 2.5 to 3 Poor. The audio sounds choppy and requires considerable effort to understand. From 2 to 2.5 Bad. The audio has occasional noise. Consecutive audio dropouts occur, resulting in some information loss. The users can communicate only with difficulty. Less than 2 Very bad. The audio has persistent noise. Consecutive audio dropouts are frequent, resulting in severe information loss. Communication is nearly impossible. + * Quality score of the remote audio stream received during the reporting interval, evaluated using Agora's real-time audio MOS (Mean Opinion Score) method. The return value ranges from [0, 500]. Divide the value by 100 to get the MOS score, which ranges from [0, 5]. Higher scores indicate better audio quality. MOS Score Audio Quality Greater than 4 Excellent audio quality, clear and smooth. 3.5 - 4 Good audio quality, occasional artifacts, still clear. 3 - 3.5 Average audio quality, occasional stutters, not very smooth, requires some effort to understand. 2.5 - 3 Poor audio quality, frequent stutters, requires concentration to understand. 2 - 2.5 Very poor audio quality, occasional noise, partial loss of meaning, difficult to communicate. Less than 2 Extremely poor audio quality, frequent noise, significant loss of meaning, communication impossible. */ mosValue?: number; /** @@ -591,19 +598,20 @@ export class RemoteAudioStats { */ frozenTimeByCustom?: number; /** - * The total active time (ms) between the start of the audio call and the callback of the remote user. The active time refers to the total duration of the remote user without the mute state. + * Effective duration (ms) from the start of the audio call to this callback. + * Effective duration refers to the total time excluding when the remote user is muted. */ totalActiveTime?: number; /** - * The total duration (ms) of the remote audio stream. + * Total duration (ms) the remote audio stream was published. */ publishDuration?: number; /** - * The Quality of Experience (QoE) of the local user when receiving a remote audio stream. See ExperienceQualityType. + * Subjective experience quality of the local user when receiving remote audio. See ExperienceQualityType. */ qoeQuality?: number; /** - * Reasons why the QoE of the local user when receiving a remote audio stream is poor. See ExperiencePoorReason. + * Reason for poor subjective experience quality of the local user when receiving remote audio. See ExperiencePoorReason. */ qualityChangedReason?: number; /** @@ -611,7 +619,7 @@ export class RemoteAudioStats { */ rxAudioBytes?: number; /** - * End-to-end audio delay (in milliseconds), which refers to the time from when the audio is captured by the remote user to when it is played by the local user. + * End-to-end audio delay (ms), i.e., the total time from when the remote user captures the audio to when the local user starts playback. */ e2eDelay?: number; } @@ -621,27 +629,27 @@ export class RemoteAudioStats { */ export class RemoteVideoStats { /** - * The user ID of the remote user sending the video stream. + * User ID identifying which user's video stream it is. */ uid?: number; /** - * Deprecated: In scenarios where audio and video are synchronized, you can get the video delay data from networkTransportDelay and jitterBufferDelay in RemoteAudioStats. The video delay (ms). + * Delay (ms). Deprecated: In audio-video scenarios with A/V sync, you can refer to the networkTransportDelay and jitterBufferDelay members in RemoteAudioStats for video delay data. */ delay?: number; /** - * End-to-end video latency (ms). That is, the time elapsed from the video capturing on the remote user's end to the receiving and rendering of the video on the local user's end. + * End-to-end video delay (ms). That is, the total time from when the remote user captures the video to when the local user receives and renders it. */ e2eDelay?: number; /** - * The width (pixels) of the video. + * Width of the video stream (pixels). */ width?: number; /** - * The height (pixels) of the video. + * Height of the video stream (pixels). */ height?: number; /** - * The bitrate (Kbps) of the remote video received since the last count. + * Bitrate (Kbps) received since the last report. */ receivedBitrate?: number; /** @@ -649,47 +657,54 @@ export class RemoteVideoStats { */ decoderInputFrameRate?: number; /** - * The frame rate (fps) of decoding the remote video. + * Output frame rate of the remote video decoder, in fps. */ decoderOutputFrameRate?: number; /** - * The frame rate (fps) of rendering the remote video. + * Output frame rate of the remote video renderer, in fps. */ rendererOutputFrameRate?: number; /** - * The packet loss rate (%) of the remote video. + * Remote video frame loss rate (%). */ frameLossRate?: number; /** - * The packet loss rate (%) of the remote video after using the anti-packet-loss technology. + * Remote video packet loss rate (%) after applying anti-packet-loss techniques. */ packetLossRate?: number; /** - * The type of the video stream. See VideoStreamType. + * Video stream type: high or low stream. See VideoStreamType. */ rxStreamType?: VideoStreamType; /** - * The total freeze time (ms) of the remote video stream after the remote user joins the channel. In a video session where the frame rate is set to no less than 5 fps, video freeze occurs when the time interval between two adjacent renderable video frames is more than 500 ms. + * Total duration (ms) of video freeze experienced by the remote user after joining the channel. During the call, if the video frame rate is set to no less than 5 fps and the interval between two consecutive rendered frames exceeds 500 ms, it is counted as a video freeze. */ totalFrozenTime?: number; /** - * The total video freeze time as a percentage (%) of the total time the video is available. The video is considered available as long as that the remote user neither stops sending the video stream nor disables the video module after joining the channel. + * Percentage (%) of total frozen time over the total effective video duration after the remote user joins the channel. The effective duration refers to the time when the video is neither stopped nor disabled. */ frozenRate?: number; /** - * The amount of time (ms) that the audio is ahead of the video. If this value is negative, the audio is lagging behind the video. + * Time (ms) by which audio leads video. If the value is negative, it indicates that audio lags behind video. */ avSyncTimeMs?: number; /** - * The total active time (ms) of the video. As long as the remote user or host neither stops sending the video stream nor disables the video module after joining the channel, the video is available. + * Effective video duration (ms). + * The total effective video duration is the time after the remote user or host joins the channel without stopping the video stream or disabling the video module. */ totalActiveTime?: number; /** - * The total duration (ms) of the remote video stream. + * Total duration (ms) the remote video stream was published. */ publishDuration?: number; /** - * @ignore + * Quality of the remote audio stream during the reporting interval. This quality is measured using Agora's real-time audio MOS (Mean Opinion Score) method. The return value ranges from [0, 500]; divide by 100 to get the MOS score, which ranges from 0 to 5. Higher scores indicate better audio quality. The subjective audio quality corresponding to Agora's real-time audio MOS score is as follows: + * Greater than 4: Excellent audio quality, clear and smooth. + * 3.5 - 4: Good audio quality, occasional artifacts, still clear. + * 3 - 3.5: Average audio quality, occasional stutters, not very smooth, requires some effort to understand. + * 2.5 - 3: Poor audio quality, frequent stutters, requires concentration to understand. + * 2 - 2.5: Very poor audio quality, occasional noise, partial loss of meaning, difficult to communicate. + * Less than 2: Extremely poor audio quality, frequent noise, significant loss of meaning, communication impossible. */ mosValue?: number; /** @@ -809,17 +824,17 @@ export class InjectStreamConfig { } /** - * Lifecycle of the CDN live video stream. + * Lifecycle of server-side transcoding streaming. * - * Deprecated + * Deprecated Deprecated */ export enum RtmpStreamLifeCycleType { /** - * Bind to the channel lifecycle. If all hosts leave the channel, the CDN live streaming stops after 30 seconds. + * Bound to the channel lifecycle. When all hosts leave the channel, server-side transcoding streaming stops after 30 seconds. */ RtmpStreamLifeCycleBind2channel = 1, /** - * Bind to the owner of the RTMP stream. If the owner leaves the channel, the CDN live streaming stops immediately. + * Bound to the lifecycle of the host who started the server-side transcoding streaming. When this host leaves, the streaming stops immediately. */ RtmpStreamLifeCycleBind2owner = 2, } @@ -883,51 +898,51 @@ export class PublisherConfiguration { } /** - * The camera direction. + * Camera direction. */ export enum CameraDirection { /** - * 0: The rear camera. + * 0: Rear camera. */ CameraRear = 0, /** - * 1: (Default) The front camera. + * 1: (Default) Front camera. */ CameraFront = 1, } /** - * The cloud proxy type. + * Cloud proxy type. */ export enum CloudProxyType { /** - * 0: The automatic mode. The SDK has this mode enabled by default. In this mode, the SDK attempts a direct connection to SD-RTN™ and automatically switches to TCP/TLS 443 if the attempt fails. + * 0: Automatic mode. This is the default mode. In this mode, the SDK first tries to connect via SD-RTN™. If it fails, it automatically switches to TLS 443. */ NoneProxy = 0, /** - * 1: The cloud proxy for the UDP protocol, that is, the Force UDP cloud proxy mode. In this mode, the SDK always transmits data over UDP. + * 1: UDP cloud proxy, i.e., Force UDP mode. In this mode, the SDK always transmits data via UDP. */ UdpProxy = 1, /** - * 2: The cloud proxy for the TCP (encryption) protocol, that is, the Force TCP cloud proxy mode. In this mode, the SDK always transmits data over TCP/TLS 443. + * 2: TCP (encrypted) cloud proxy, i.e., Force TCP mode. In this mode, the SDK always transmits data via TLS 443. */ TcpProxy = 2, } /** - * The camera capturer preference. + * Camera capture configuration. */ export class CameraCapturerConfiguration { /** - * (Optional) The camera direction. See CameraDirection. + * (Optional) Camera direction. See CameraDirection. */ cameraDirection?: CameraDirection; /** - * (Optional) The camera focal length type. See CameraFocalLengthType. - * To set the focal length type of the camera, it is only supported to specify the camera through cameraDirection, and not supported to specify it through cameraId. - * For iOS devices equipped with multi-lens rear cameras, such as those featuring dual-camera (wide-angle and ultra-wide-angle) or triple-camera (wide-angle, ultra-wide-angle, and telephoto), you can use one of the following methods to capture video with an ultra-wide-angle perspective: - * Method one: Set this parameter to CameraFocalLengthUltraWide (2) (ultra-wide lens). - * Method two: Set this parameter to CameraFocalLengthDefault (0) (standard lens), then call setCameraZoomFactor to set the camera's zoom factor to a value less than 1.0, with the minimum setting being 0.5. The difference is that the size of the ultra-wide angle in method one is not adjustable, whereas method two supports adjusting the camera's zoom factor freely. + * (Optional) Camera focal length type. See CameraFocalLengthType. + * To set the camera focal length type, only cameraDirection is supported. cameraId is not supported. + * Some iOS devices have rear cameras composed of multiple lenses, such as dual (wide and ultra-wide) or triple (wide, ultra-wide, and telephoto) cameras. For such composite lenses with ultra-wide capability, you can achieve ultra-wide capture in either of the following ways: + * Option 1: Set this parameter to CameraFocalLengthUltraWide (2) (ultra-wide lens). + * Option 2: Set this parameter to CameraFocalLengthDefault (0) (standard lens), then call setCameraZoomFactor to set the camera zoom factor to a value less than 1.0, with a minimum of 0.5. The difference is that Option 1 provides a fixed ultra-wide angle, while Option 2 allows flexible adjustment of the zoom factor. */ cameraFocalLengthType?: CameraFocalLengthType; /** @@ -935,19 +950,19 @@ export class CameraCapturerConfiguration { */ deviceId?: string; /** - * (Optional) The camera ID. The default value is the camera ID of the front camera. You can get the camera ID through the Android native system API, see and for details. + * (Optional) Camera ID. Defaults to the ID of the front-facing camera. You can obtain the camera ID using the Android native system API. See [Camera.open()](https://developer.android.google.cn/reference/android/hardware/Camera#open(int)) and [CameraManager.getCameraIdList()](https://developer.android.google.cn/reference/android/hardware/camera2/CameraManager?hl=en#getCameraIdList). * This parameter is for Android only. - * This parameter and cameraDirection are mutually exclusive in specifying the camera; you can choose one based on your needs. The differences are as follows: - * Specifying the camera via cameraDirection is more straightforward. You only need to indicate the camera direction (front or rear), without specifying a specific camera ID; the SDK will retrieve and confirm the actual camera ID through Android native system APIs. - * Specifying via cameraId allows for more precise identification of a particular camera. For devices with multiple cameras, where cameraDirection cannot recognize or access all available cameras, it is recommended to use cameraId to specify the desired camera ID directly. + * Both this parameter and cameraDirection are used to specify the camera and are mutually exclusive. You can choose either based on your needs. The differences are as follows: + * Using cameraDirection is simpler. You only need to specify the camera direction (front or rear), without specifying the exact camera ID. The SDK will use system APIs to retrieve and determine the actual camera ID. + * Using cameraId allows you to specify a particular camera more precisely. On devices with multiple cameras, cameraDirection may not be able to identify or access all available cameras. In such cases, it is recommended to use cameraId to directly specify the desired camera ID. */ cameraId?: string; /** - * (Optional) Whether to follow the video aspect ratio set in setVideoEncoderConfiguration : true : (Default) Follow the set video aspect ratio. The SDK crops the captured video according to the set video aspect ratio and synchronously changes the local preview screen and the video frame in onCaptureVideoFrame and onPreEncodeVideoFrame. false : Do not follow the system default audio playback device. The SDK does not change the aspect ratio of the captured video frame. + * (Optional) Whether to follow the video aspect ratio set in setVideoEncoderConfiguration : true : (Default) Follow. The SDK crops the captured video to match the configured aspect ratio, and synchronously updates the local preview, onCaptureVideoFrame, and onPreEncodeVideoFrame. false : Do not follow. The SDK does not change the aspect ratio of the captured video frame. */ followEncodeDimensionRatio?: boolean; /** - * (Optional) The format of the video frame. See VideoFormat. + * (Optional) Video frame format. See VideoFormat. */ format?: VideoFormat; } @@ -1095,25 +1110,25 @@ export class ScreenCaptureSourceInfo { } /** - * The advanced options for audio. + * Advanced options for audio. */ export class AdvancedAudioOptions { /** - * The number of channels for audio preprocessing. See AudioProcessingChannels. + * Number of channels for audio preprocessing. See AudioProcessingChannels. */ audioProcessingChannels?: number; } /** - * Image configurations. + * Settings options for placeholder images. */ export class ImageTrackOptions { /** - * The image URL. Supported formats of images include JPEG, JPG, PNG and GIF. This method supports adding an image from the local absolute or relative file path. On the Android platform, adding images from /assets/ is not supported. + * URL of the placeholder image. Currently supports JPEG, JPG, PNG, and GIF formats. You can add a placeholder image from a local absolute or relative path. On Android, adding placeholder images from /assets/ is not supported. */ imageUrl?: string; /** - * The frame rate of the video streams being published. The value range is [1,30]. The default value is 1. + * Video frame rate, ranging from [1,30]. Default is 1. */ fps?: number; /** @@ -1123,37 +1138,37 @@ export class ImageTrackOptions { } /** - * The channel media options. + * Channel media configuration options. * - * Agora supports publishing multiple audio streams and one video stream at the same time and in the same RtcConnection. For example, publishMicrophoneTrack, publishCustomAudioTrack, and publishMediaPlayerAudioTrack can be set as true at the same time, but only one of publishCameraTrack, publishScreenCaptureVideo, publishCustomVideoTrack, or publishEncodedVideoTrack can be set as true. Agora recommends that you set member parameter values yourself according to your business scenario, otherwise the SDK will automatically assign values to member parameters. + * RtcConnection publishMicrophoneTrack publishCustomAudioTrack publishMediaPlayerAudioTrack true publishCameraTrack publishScreenCaptureVideo, publishCustomVideoTrack publishEncodedVideoTrack true It is recommended that you configure the member parameters based on your business scenario. Otherwise, the SDK will automatically assign values to the member parameters. */ export class ChannelMediaOptions { /** - * Whether to publish the video captured by the camera: true : Publish the video captured by the camera. false : Do not publish the video captured by the camera. + * Sets whether to publish the video captured by the camera: true : Publish the video captured by the camera. false : Do not publish the video captured by the camera. */ publishCameraTrack?: boolean; /** - * Whether to publish the video captured by the second camera: true : Publish the video captured by the second camera. false : Do not publish the video captured by the second camera. + * Sets whether to publish the video captured by the secondary camera: true : Publish the video captured by the secondary camera. false : Do not publish the video captured by the secondary camera. */ publishSecondaryCameraTrack?: boolean; /** - * Whether to publish the video captured by the third camera: true : Publish the video captured by the third camera. false : Do not publish the video captured by the third camera. This parameter is for Android only. + * This parameter is only applicable on Android. Sets whether to publish the video captured by the third camera: true : Publish the video captured by the third camera. false : Do not publish the video captured by the third camera. */ publishThirdCameraTrack?: boolean; /** - * Whether to publish the video captured by the fourth camera: true : Publish the video captured by the fourth camera. false : Do not publish the video captured by the fourth camera. This parameter is for Android only. + * This parameter is only applicable on Android. Sets whether to publish the video captured by the fourth camera: true : Publish the video captured by the fourth camera. false : Do not publish the video captured by the fourth camera. */ publishFourthCameraTrack?: boolean; /** - * Whether to publish the audio captured by the microphone: true : Publish the audio captured by the microphone. false : Do not publish the audio captured by the microphone. + * Sets whether to publish the audio captured by the microphone: true : Publish the audio captured by the microphone. false : Do not publish the audio captured by the microphone. */ publishMicrophoneTrack?: boolean; /** - * Whether to publish the audio captured from the screen: true : Publish the audio captured from the screen. false : Publish the audio captured from the screen. + * Sets whether to publish the audio captured from the screen: true : Publish the screen-captured audio. false : Do not publish the screen-captured audio. */ publishScreenCaptureAudio?: boolean; /** - * Whether to publish the video captured from the screen: true : Publish the video captured from the screen. false : Do not publish the video captured from the screen. + * Sets whether to publish the video captured from the screen: true : Publish the screen-captured video. false : Do not publish the screen-captured video. */ publishScreenCaptureVideo?: boolean; /** @@ -1161,7 +1176,7 @@ export class ChannelMediaOptions { */ publishScreenTrack?: boolean; /** - * Whether to publish the video captured from the second screen: true : Publish the video captured from the second screen. false : Do not publish the video captured from the second screen. + * Sets whether to publish the video captured from the secondary screen: true : Publish the video captured from the secondary screen. false : Do not publish the video captured from the secondary screen. */ publishSecondaryScreenTrack?: boolean; /** @@ -1173,51 +1188,51 @@ export class ChannelMediaOptions { */ publishFourthScreenTrack?: boolean; /** - * Whether to publish the audio captured from a custom source: true : Publish the audio captured from the custom source. false : Do not publish the captured audio from a custom source. + * Sets whether to publish custom-captured audio: true : Publish the custom-captured audio. false : Do not publish the custom-captured audio. */ publishCustomAudioTrack?: boolean; /** - * The ID of the custom audio track to be published. The default value is 0. You can obtain the custom audio track ID through the createCustomAudioTrack method. + * The ID of the custom audio track to be published. The default value is 0. You can get the custom audio track ID using the createCustomAudioTrack method. */ publishCustomAudioTrackId?: number; /** - * Whether to publish the video captured from a custom source: true : Publish the video captured from the custom source. false : Do not publish the captured video from a custom source. + * Sets whether to publish custom-captured video: true : Publish the custom-captured video. false : Do not publish the custom-captured video. */ publishCustomVideoTrack?: boolean; /** - * Whether to publish the encoded video: true : Publish the encoded video. false : Do not publish the encoded video. + * Sets whether to publish the encoded video: true : Publish the encoded video. false : Do not publish the encoded video. */ publishEncodedVideoTrack?: boolean; /** - * Whether to publish the audio from the media player: true : Publish the audio from the media player. false : Do not publish the audio from the media player. + * Sets whether to publish the audio from the media player: true : Publish the media player audio. false : Do not publish the media player audio. */ publishMediaPlayerAudioTrack?: boolean; /** - * Whether to publish the video from the media player: true : Publish the video from the media player. false : Do not publish the video from the media player. + * Sets whether to publish the video from the media player: true : Publish the media player video. false : Do not publish the media player video. */ publishMediaPlayerVideoTrack?: boolean; /** - * Whether to publish the local transcoded video: true : Publish the local transcoded video. false : Do not publish the local transcoded video. + * Sets whether to publish the local transcoded video: true : Publish the local transcoded video. false : Do not publish the local transcoded video. */ publishTranscodedVideoTrack?: boolean; /** - * Whether to publish the mixed audio track: true : Publish the mixed audio track. false : Do not publish the mixed audio track. + * Sets whether to publish the local audio mixing: true : Publish the local audio mixing. false : Do not publish the local audio mixing. */ publishMixedAudioTrack?: boolean; /** - * @ignore + * Sets whether to publish the video processed by the voice sync plugin: true : Publish the video processed by the voice sync plugin. false : (Default) Do not publish the video processed by the voice sync plugin. */ publishLipSyncTrack?: boolean; /** - * Whether to automatically subscribe to all remote audio streams when the user joins a channel: true : Subscribe to all remote audio streams. false : Do not automatically subscribe to any remote audio streams. + * Sets whether to automatically subscribe to all audio streams: true : Automatically subscribe to all audio streams. false : Do not automatically subscribe to any audio streams. */ autoSubscribeAudio?: boolean; /** - * Whether to automatically subscribe to all remote video streams when the user joins the channel: true : Subscribe to all remote video streams. false : Do not automatically subscribe to any remote video streams. + * Sets whether to automatically subscribe to all video streams: true : Automatically subscribe to all video streams. false : Do not automatically subscribe to any video streams. */ autoSubscribeVideo?: boolean; /** - * Whether to enable audio capturing or playback: true : Enable audio capturing or playback. false : Do not enable audio capturing or playback. If you need to publish the audio streams captured by your microphone, ensure this parameter is set as true. + * If you need to publish the audio stream captured by the microphone, make sure this parameter is set to true. Sets whether to enable audio recording or playback: true : Enable audio recording or playback. false : Do not enable audio recording or playback. */ enableAudioRecordingOrPlayout?: boolean; /** @@ -1225,23 +1240,24 @@ export class ChannelMediaOptions { */ publishMediaPlayerId?: number; /** - * The user role. See ClientRoleType. + * User role. See ClientRoleType. */ clientRoleType?: ClientRoleType; /** - * The latency level of an audience member in interactive live streaming. See AudienceLatencyLevelType. + * Audience latency level. See AudienceLatencyLevelType. */ audienceLatencyLevel?: AudienceLatencyLevelType; /** - * The default video-stream type. See VideoStreamType. + * Default video stream type to subscribe to: VideoStreamType. */ defaultVideoStreamType?: VideoStreamType; /** - * The channel profile. See ChannelProfileType. + * Channel usage scenario. See ChannelProfileType. */ channelProfile?: ChannelProfileType; /** - * Delay (in milliseconds) for sending audio frames. You can use this parameter to set the delay of the audio frames that need to be sent, to ensure audio and video synchronization. To switch off the delay, set the value to 0. + * Delay (in milliseconds) for sending audio frames. You can use this parameter to set the delay for sending audio frames to ensure audio-video sync. + * To disable the delay, set this parameter to 0. */ audioDelayMs?: number; /** @@ -1249,9 +1265,9 @@ export class ChannelMediaOptions { */ mediaPlayerAudioDelayMs?: number; /** - * (Optional) The token generated on your server for authentication. - * This parameter takes effect only when calling updateChannelMediaOptions or updateChannelMediaOptionsEx. - * Ensure that the App ID, channel name, and user name used for creating the token are the same as those used by the initialize method for initializing the RTC engine, and those used by the joinChannel and joinChannelEx methods for joining the channel. + * (Optional) A dynamic key generated on the server for authentication. See [Token Authentication](https://doc.shengwang.cn/doc/rtc/rn/basic-features/token-authentication). + * This parameter only takes effect when calling updateChannelMediaOptions or updateChannelMediaOptionsEx. + * Make sure the App ID, channel name, and user name used to generate the token are consistent with those used in the initialize method to initialize the engine and in the joinChannel or joinChannelEx method to join the channel. */ token?: string; /** @@ -1259,21 +1275,20 @@ export class ChannelMediaOptions { */ enableBuiltInMediaEncryption?: boolean; /** - * Whether to publish the sound of a metronome to remote users: true : Publish processed audio frames. Both the local user and remote users can hear the metronome. false : Do not publish the sound of the metronome. Only the local user can hear the metronome. + * Sets whether to publish the virtual metronome sound to remote users: true : Publish. Both local and remote users can hear the metronome. false : Do not publish. Only the local user can hear the metronome. */ publishRhythmPlayerTrack?: boolean; /** - * Whether to enable interactive mode: true : Enable interactive mode. Once this mode is enabled and the user role is set as audience, the user can receive remote video streams with low latency. false :Do not enable interactive mode. If this mode is disabled, the user receives the remote video streams in default settings. - * This parameter only applies to co-streaming scenarios. The cohosts need to call the joinChannelEx method to join the other host's channel as an audience member, and set isInteractiveAudience to true. - * This parameter takes effect only when the user role is ClientRoleAudience. + * This parameter is used for cross-room co-hosting scenarios. The co-host needs to call the joinChannelEx method to join the other room as an audience member and set isInteractiveAudience to true. + * This parameter takes effect only when the user role is ClientRoleAudience. Whether to enable interactive audience mode: true : Enable interactive audience mode. Once enabled, the local user, as an interactive audience member, receives low-latency and smooth remote video. false : Do not enable interactive audience mode. The local user, as a regular audience member, receives remote video with default settings. */ isInteractiveAudience?: boolean; /** - * The video track ID returned by calling the createCustomVideoTrack method. The default value is 0. + * The video track ID returned by the createCustomVideoTrack method. The default value is 0. */ customVideoTrackId?: number; /** - * Whether the audio stream being published is filtered according to the volume algorithm: true : The audio stream is filtered. If the audio stream filter is not enabled, this setting does not takes effect. false : The audio stream is not filtered. If you need to enable this function, contact. + * To enable this feature, please [contact sales](https://www.shengwang.cn/contact-sales/). Sets whether the current audio stream participates in stream selection based on volume algorithm. true : Participate in volume-based stream selection. If the volume-based stream selection feature is not enabled, this parameter has no effect. false : Do not participate in volume-based stream selection. */ isAudioFilterable?: boolean; /** @@ -1281,45 +1296,49 @@ export class ChannelMediaOptions { */ parameters?: string; /** - * @ignore + * Permissions and system requirements: + * Android: Android 7.0 or higher (API level 24 or above), requires ACCESS_NETWORK_STATE and CHANGE_NETWORK_STATE permissions. + * iOS: iOS 12.0 or higher. + * macOS: 10.14 or higher. + * Windows: Windows Vista or higher. Whether to enable multipath transmission: true : Enable multipath transmission. false : Disable multipath transmission. */ enableMultipath?: boolean; /** - * @ignore + * Uplink transmission mode. See MultipathMode. When using this parameter, make sure enableMultipath is set to true. */ uplinkMultipathMode?: MultipathMode; /** - * @ignore + * Downlink transmission mode. See MultipathMode. When using this parameter, make sure enableMultipath is set to true. */ downlinkMultipathMode?: MultipathMode; /** - * @ignore + * Preferred transmission path type. See MultipathType. When using this parameter, make sure enableMultipath is set to true. */ preferMultipathType?: MultipathType; } /** - * The cloud proxy type. + * Proxy type. */ export enum ProxyType { /** - * 0: Reserved for future use. + * 0: Reserved parameter, not supported yet. */ NoneProxyType = 0, /** - * 1: The cloud proxy for the UDP protocol, that is, the Force UDP cloud proxy mode. In this mode, the SDK always transmits data over UDP. + * 1: Cloud proxy using UDP protocol, i.e., Force UDP cloud proxy mode. In this mode, the SDK always transmits data via UDP protocol. */ UdpProxyType = 1, /** - * 2: The cloud proxy for the TCP (encryption) protocol, that is, the Force TCP cloud proxy mode. In this mode, the SDK always transmits data over TCP/TLS 443. + * 2: Cloud proxy using TCP (encrypted) protocol, i.e., Force TCP cloud proxy mode. In this mode, the SDK always transmits data via TLS 443. */ TcpProxyType = 2, /** - * 3: Reserved for future use. + * 3: Reserved parameter, not supported yet. */ LocalProxyType = 3, /** - * 4: Automatic mode. In this mode, the SDK attempts a direct connection to SD-RTN™ and automatically switches to TCP/TLS 443 if the attempt fails. + * 4: Auto mode. In this mode, the SDK first tries to connect to SD-RTN™. If the connection fails, it automatically switches to TLS 443. */ TcpProxyAutoFallbackType = 4, /** @@ -1333,73 +1352,73 @@ export enum ProxyType { } /** - * The type of the advanced feature. + * @ignore */ export enum FeatureType { /** - * 1: Virtual background. + * @ignore */ VideoVirtualBackground = 1, /** - * 2: Image enhancement. + * @ignore */ VideoBeautyEffect = 2, } /** - * The options for leaving a channel. + * Options for leaving the channel. */ export class LeaveChannelOptions { /** - * Whether to stop playing and mixing the music file when a user leaves the channel. true : (Default) Stop playing and mixing the music file. false : Do not stop playing and mixing the music file. + * Whether to stop playing music files and audio mixing when leaving the channel: true : (default) Stop playing music files and audio mixing. false : Do not stop playing music files and audio mixing. */ stopAudioMixing?: boolean; /** - * Whether to stop playing all audio effects when a user leaves the channel. true : (Default) Stop playing all audio effects. false : Do not stop playing any audio effect. + * Whether to stop playing sound effects when leaving the channel: true : (default) Stop playing sound effects. false : Do not stop playing sound effects. */ stopAllEffect?: boolean; /** - * Whether to stop microphone recording when a user leaves the channel. true : (Default) Stop microphone recording. false : Do not stop microphone recording. + * Whether to stop microphone capture when leaving the channel: true : (default) Stop microphone capture. false : Do not stop microphone capture. */ stopMicrophoneRecording?: boolean; } /** - * The SDK uses the IRtcEngineEventHandler interface to send event notifications to your app. Your app can get those notifications through methods that inherit this interface. + * The IRtcEngineEventHandler interface class is used by the SDK to send event notifications to the App. The App obtains SDK event notifications by inheriting methods of this interface class. * - * All methods in this interface have default (empty) implementation. You can choose to inherit events related to your app scenario. - * In the callbacks, avoid implementing time-consuming tasks or calling APIs that may cause thread blocking (such as sendMessage). Otherwise, the SDK may not work properly. - * The SDK no longer catches exceptions in the code logic that developers implement themselves in IRtcEngineEventHandler class. You need to handle this exception yourself, otherwise the app may crash when the exception occurs. + * All methods of this interface class have default (empty) implementations. The App can choose to inherit only the events it cares about. + * In the callback methods, the App should not perform time-consuming operations or call APIs that may cause blocking (such as sendMessage), otherwise it may affect the operation of the SDK. + * The SDK no longer catches exceptions in the code logic implemented by the developer in the callbacks of the IRtcEngineEventHandler class. You need to handle such exceptions yourself, otherwise the App may crash when exceptions occur. */ export interface IRtcEngineEventHandler { /** - * Occurs when a user joins a channel. + * Occurs when a user joins a channel successfully. * - * This callback notifies the application that a user joins a specified channel. + * This callback indicates that the client successfully joined the specified channel. * * @param connection The connection information. See RtcConnection. - * @param elapsed The time elapsed (ms) from the local user calling joinChannel until the SDK triggers this callback. + * @param elapsed The time elapsed (ms) from calling joinChannel until this event occurs. */ onJoinChannelSuccess?(connection: RtcConnection, elapsed: number): void; /** - * Occurs when a user rejoins the channel. + * Occurs when the user successfully rejoins the channel. * - * @param connection The connection information. See RtcConnection. - * @param elapsed Time elapsed (ms) from the local user calling joinChannel until the SDK triggers this callback. + * @param connection Connection information. See RtcConnection. + * @param elapsed Time elapsed (ms) from calling joinChannel to the triggering of this callback. */ onRejoinChannelSuccess?(connection: RtcConnection, elapsed: number): void; /** - * Reports the proxy connection state. + * Callback for proxy connection status. * - * You can use this callback to listen for the state of the SDK connecting to a proxy. For example, when a user calls setCloudProxy and joins a channel successfully, the SDK triggers this callback to report the user ID, the proxy type connected, and the time elapsed fromthe user calling joinChannel until this callback is triggered. + * You can use this callback to listen for the SDK's proxy connection status. For example, when a user calls setCloudProxy to set a proxy and successfully joins a channel, the SDK triggers this callback to report the user ID, type of connected proxy, and the time elapsed from calling joinChannel to triggering this callback. * - * @param channel The channel name. - * @param uid The user ID. - * @param proxyType The proxy type connected. See ProxyType. - * @param localProxyIp Reserved for future use. - * @param elapsed The time elapsed (ms) from the user calling joinChannel until this callback is triggered. + * @param channel Channel name. + * @param uid User ID + * @param proxyType Type of connected proxy. See ProxyType. + * @param localProxyIp Reserved parameter, not supported yet. + * @param elapsed Time elapsed (in milliseconds) from calling joinChannel to the SDK triggering this callback. */ onProxyConnected?( channel: string, @@ -1410,25 +1429,25 @@ export interface IRtcEngineEventHandler { ): void; /** - * Reports an error during SDK runtime. + * Occurs when an error is reported. * - * This callback indicates that an error (concerning network or media) occurs during SDK runtime. In most cases, the SDK cannot fix the issue and resume running. The SDK requires the app to take action or informs the user about the issue. + * This callback indicates that a network or media-related error occurred during SDK runtime. In most cases, errors reported by the SDK mean it cannot recover automatically and requires app intervention or user notification. * - * @param err Error code. See ErrorCodeType. - * @param msg The error message. + * @param err The error code. See ErrorCodeType. + * @param msg The error description. */ onError?(err: ErrorCodeType, msg: string): void; /** - * Reports the statistics of the audio stream sent by each remote user. + * Reports the audio quality of a remote user. * - * Deprecated: Use onRemoteAudioStats instead. The SDK triggers this callback once every two seconds to report the audio quality of each remote user who is sending an audio stream. If a channel has multiple users sending audio streams, the SDK triggers this callback as many times. + * Deprecated Deprecated: Use onRemoteAudioStats instead. This callback reports the audio quality of a remote user during a call. It is triggered every 2 seconds for each remote user/host. If there are multiple remote users/hosts, the callback is triggered multiple times every 2 seconds. * - * @param connection The connection information. See RtcConnection. - * @param remoteUid The user ID of the remote user sending the audio stream. - * @param quality Audio quality of the user. See QualityType. - * @param delay The network delay (ms) from the sender to the receiver, including the delay caused by audio sampling pre-processing, network transmission, and network jitter buffering. - * @param lost The packet loss rate (%) of the audio packet sent from the remote user to the receiver. + * @param connection Connection information. See RtcConnection. + * @param remoteUid The user ID of the sender of the audio stream. + * @param quality The audio quality. See QualityType. + * @param delay The delay (ms) from the sender to the receiver, including pre-processing, network transmission, and jitter buffer delay. + * @param lost The packet loss rate (%) from the sender to the receiver. */ onAudioQuality?( connection: RtcConnection, @@ -1439,26 +1458,28 @@ export interface IRtcEngineEventHandler { ): void; /** - * Reports the last mile network probe result. + * Callback for the last mile uplink and downlink quality probe result before a call. * - * The SDK triggers this callback within 30 seconds after the app calls startLastmileProbeTest. + * After calling startLastmileProbeTest, the SDK returns this callback within approximately 30 seconds. * - * @param result The uplink and downlink last-mile network probe test result. See LastmileProbeResult. + * @param result Last mile uplink and downlink quality probe result. See LastmileProbeResult. */ onLastmileProbeResult?(result: LastmileProbeResult): void; /** - * Reports the volume information of users. + * Occurs when the SDK reports the volume of users. * - * By default, this callback is disabled. You can enable it by calling enableAudioVolumeIndication. Once this callback is enabled and users send streams in the channel, the SDK triggers the onAudioVolumeIndication callback according to the time interval set in enableAudioVolumeIndication. The SDK triggers two independent onAudioVolumeIndication callbacks simultaneously, which separately report the volume information of the local user who sends a stream and the remote users (up to three) whose instantaneous volume is the highest. Once this callback is enabled, if the local user calls the muteLocalAudioStream method to mute, the SDK continues to report the volume indication of the local user. If a remote user whose volume is one of the three highest in the channel stops publishing the audio stream for 20 seconds, the callback excludes this user's information; if all remote users stop publishing audio streams for 20 seconds, the SDK stops triggering the callback for remote users. + * This callback is disabled by default. You can enable it by calling enableAudioVolumeIndication. Once enabled, as long as there are users publishing streams in the channel, the SDK triggers the onAudioVolumeIndication callback at the time interval set in enableAudioVolumeIndication after joining the channel. Two onAudioVolumeIndication callbacks are triggered each time: one reports the volume information of the local publishing user, and the other reports the volume information of the remote users (up to 3) with the highest instantaneous volume. After this feature is enabled, if a user mutes themselves (by calling muteLocalAudioStream), the SDK continues to report the local user's volume indication callback. + * If a remote user with the highest instantaneous volume mutes themselves, they will no longer be included in the remote volume indication callback after 20 seconds. If all remote users mute themselves, the SDK stops reporting remote volume indication callbacks after 20 seconds. * - * @param connection The connection information. See RtcConnection. - * @param speakers The volume information of the users. See AudioVolumeInfo. An empty speakers array in the callback indicates that no remote user is in the channel or is sending a stream. - * @param speakerNumber The total number of users. - * In the callback for the local user, if the local user is sending streams, the value of speakerNumber is 1. - * In the callback for remote users, the value range of speakerNumber is [0,3]. If the number of remote users who send streams is greater than or equal to three, the value of speakerNumber is 3. - * @param totalVolume The volume of the speaker. The value range is [0,255]. - * In the callback for the local user, totalVolume is the volume of the local user who sends a stream. In the callback for remote users, totalVolume is the sum of the volume of all remote users (up to three) whose instantaneous volume is the highest. + * @param connection Connection information. See RtcConnection. + * @param speakers User volume information. See the AudioVolumeInfo array. If speakers is empty, it means no remote users are publishing streams or there are no remote users. + * @param speakerNumber Number of users. + * In the local user's callback, as long as the local user is publishing, speakerNumber is always 1. + * In the remote users' callback, the value range of speakerNumber is [0,3]. If there are more than 3 remote publishing users, speakerNumber is 3 in this callback. + * @param totalVolume Total mixed volume, range [0,255]. + * In the local user's callback, totalVolume is the volume of the local publishing user. + * In the remote users' callback, totalVolume is the total mixed volume of the remote users (up to 3) with the highest instantaneous volume. */ onAudioVolumeIndication?( connection: RtcConnection, @@ -1468,20 +1489,20 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when a user leaves a channel. + * Occurs when a user leaves the channel. * - * You can obtain information such as the total duration of a call, and the data traffic that the SDK transmits and receives. + * You can use this callback to get information such as the total call duration and the amount of data sent and received by the SDK during the call. * - * @param connection The connection information. See RtcConnection. + * @param connection Connection information. See RtcConnection. * @param stats Call statistics. See RtcStats. */ onLeaveChannel?(connection: RtcConnection, stats: RtcStats): void; /** - * Reports the statistics about the current call. + * Reports the statistics of the current call. * - * @param connection The connection information. See RtcConnection. - * @param stats Statistics of the RTC engine. See RtcStats. + * @param connection Connection information. See RtcConnection. + * @param stats RTC engine statistics. See RtcStats. */ onRtcStats?(connection: RtcConnection, stats: RtcStats): void; @@ -1495,31 +1516,31 @@ export interface IRtcEngineEventHandler { ): void; /** - * Reports the playback progress of a music file. + * Reports the playback progress of the music file. * - * After you called the startAudioMixing method to play a music file, the SDK triggers this callback every two seconds to report the playback progress. + * After you call the startAudioMixing method to play a music file, the SDK triggers this callback every second to report the current playback progress. * - * @param position The playback progress (ms). + * @param position Current playback progress of the music file in ms. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ onAudioMixingPositionChanged?(position: number): void; /** - * Occurs when the playback of the local music file finishes. + * Occurs when the local music file playback ends. * - * Deprecated: Use onAudioMixingStateChanged instead. After you call startAudioMixing to play a local music file, this callback occurs when the playback finishes. If the call of startAudioMixing fails, the error code WARN_AUDIO_MIXING_OPEN_ERROR is returned. + * Deprecated Deprecated: Use onAudioMixingStateChanged instead. This callback is triggered when playback of the local music file started by startAudioMixing ends. If startAudioMixing fails, it returns the error code WARN_AUDIO_MIXING_OPEN_ERROR. */ onAudioMixingFinished?(): void; /** - * Occurs when the playback of the local music file finishes. + * Callback when the local audio effect file finishes playing. * - * This callback occurs when the local audio effect file finishes playing. + * This callback is triggered when the audio effect finishes playing. * - * @param soundId The ID of the audio effect. The unique ID of each audio effect file. + * @param soundId The ID of the specified audio effect. Each audio effect has a unique ID. */ onAudioEffectFinished?(soundId: number): void; @@ -1533,14 +1554,16 @@ export interface IRtcEngineEventHandler { ): void; /** - * Reports the last mile network quality of each user in the channel. + * Callback for the last mile uplink and downlink network quality report of each user during a call. * - * This callback reports the last mile network conditions of each user in the channel. Last mile refers to the connection between the local device and Agora's edge server. The SDK triggers this callback once every two seconds. If a channel includes multiple users, the SDK triggers this callback as many times. This callback provides feedback on network quality through sending and receiving broadcast packets within the channel. Excessive broadcast packets can lead to broadcast storms. To prevent broadcast storms from causing a large amount of data transmission within the channel, this callback supports feedback on the network quality of up to 4 remote hosts simultaneously by default. txQuality is Unknown when the user is not sending a stream; rxQuality is Unknown when the user is not receiving a stream. + * This callback describes the last mile network status of each user during a call, where the last mile refers to the network status from the device to the Agora edge server. + * This callback is triggered every 2 seconds. If there are multiple remote users, it will be triggered multiple times every 2 seconds. + * This callback reports network quality through broadcast packets in the channel. Excessive broadcast packets may cause a broadcast storm. To prevent a broadcast storm from causing large data transmission in the channel, this callback supports reporting the network quality of up to 4 remote hosts simultaneously by default. When the user does not send streams, txQuality is Unknown; when the user does not receive streams, rxQuality is Unknown. * - * @param connection The connection information. See RtcConnection. - * @param remoteUid The user ID. The network quality of the user with this user ID is reported. If the uid is 0, the local network quality is reported. - * @param txQuality Uplink network quality rating of the user in terms of the transmission bit rate, packet loss rate, average RTT (Round-Trip Time) and jitter of the uplink network. This parameter is a quality rating helping you understand how well the current uplink network conditions can support the selected video encoder configuration. For example, a 1000 Kbps uplink network may be adequate for video frames with a resolution of 640 × 480 and a frame rate of 15 fps in the LIVE_BROADCASTING profile, but might be inadequate for resolutions higher than 1280 × 720. QualityUnknown (0): The quality is unknown. QualityExcellent (1): The quality is excellent. QualityGood (2): The network quality seems excellent, but the bitrate can be slightly lower than excellent. QualityPoor (3): Users can feel the communication is slightly impaired. QualityBad (4): Users cannot communicate smoothly. QualityVbad (5): The quality is so bad that users can barely communicate. QualityDown (6): The network is down, and users cannot communicate at all. QualityDetecting (8): The last-mile probe test is in progress. - * @param rxQuality Downlink network quality rating of the user in terms of packet loss rate, average RTT, and jitter of the downlink network. QualityUnknown (0): The quality is unknown. QualityExcellent (1): The quality is excellent. QualityGood (2): The network quality seems excellent, but the bitrate can be slightly lower than excellent. QualityPoor (3): Users can feel the communication is slightly impaired. QualityBad (4): Users cannot communicate smoothly. QualityVbad (5): The quality is so bad that users can barely communicate. QualityDown (6): The network is down, and users cannot communicate at all. QualityDetecting (8): The last-mile probe test is in progress. + * @param connection Connection information. See RtcConnection. + * @param remoteUid User ID. Indicates the network quality of the user with this ID reported by the callback. If the uid is 0, it returns the network quality of the local user. + * @param txQuality The user's uplink network quality, calculated based on the sending bitrate, uplink packet loss rate, average round-trip time, and network jitter. This value represents the current uplink network quality and helps determine whether the current video encoding settings can be supported. For example, if the uplink bitrate is 1000 Kbps, it can support a resolution of 640 × 480 and frame rate of 15 fps in a live broadcast scenario, but may struggle to support 1280 × 720 resolution. + * @param rxQuality The user's downlink network quality, calculated based on the downlink packet loss rate, average round-trip time, and network jitter. */ onNetworkQuality?( connection: RtcConnection, @@ -1555,32 +1578,33 @@ export interface IRtcEngineEventHandler { onIntraRequestReceived?(connection: RtcConnection): void; /** - * Occurs when the uplink network information changes. + * Callback when uplink network information changes. * - * The SDK triggers this callback when the uplink network information changes. This callback only applies to scenarios where you push externally encoded video data in H.264 format to the SDK. + * The SDK triggers this callback only when uplink network information changes. This callback is applicable only in scenarios where H.264 format external encoded video data is pushed to the SDK. * - * @param info The uplink network information. See UplinkNetworkInfo. + * @param info Uplink network information. See UplinkNetworkInfo. */ onUplinkNetworkInfoUpdated?(info: UplinkNetworkInfo): void; /** - * Reports the last-mile network quality of the local user. + * Callback for the last mile network quality report. * - * This callback reports the last-mile network conditions of the local user before the user joins the channel. Last mile refers to the connection between the local device and Agora's edge server. Before the user joins the channel, this callback is triggered by the SDK once startLastmileProbeTest is called and reports the last-mile network conditions of the local user. + * This callback describes the result of the last mile network probe for the local user before joining a channel. Last mile refers to the network status from the device to the Agora edge server. + * Before joining a channel, after calling startLastmileProbeTest, the SDK triggers this callback to report the result of the local user's last mile network probe. * - * @param quality The last-mile network quality. QualityUnknown (0): The quality is unknown. QualityExcellent (1): The quality is excellent. QualityGood (2): The network quality seems excellent, but the bitrate can be slightly lower than excellent. QualityPoor (3): Users can feel the communication is slightly impaired. QualityBad (4): Users cannot communicate smoothly. QualityVbad (5): The quality is so bad that users can barely communicate. QualityDown (6): The network is down, and users cannot communicate at all. QualityDetecting (8): The last-mile probe test is in progress. See QualityType. + * @param quality Last mile network quality. See QualityType. */ onLastmileQuality?(quality: QualityType): void; /** - * Occurs when the first local video frame is displayed on the local video view. + * Callback when the first local video frame is rendered. * - * The SDK triggers this callback when the first local video frame is displayed on the local video view. + * This callback is triggered when the first local video frame is displayed in the local view. * - * @param source The type of the video source. See VideoSourceType. - * @param width The width (px) of the first local video frame. - * @param height The height (px) of the first local video frame. - * @param elapsed The time elapsed (ms) from the local user calling joinChannel to join the channel to when the SDK triggers this callback. If startPreviewWithoutSourceType / startPreview is called before joining the channel, this parameter indicates the time elapsed from calling startPreviewWithoutSourceType or startPreview to when this event occurred. + * @param source The type of video source. See VideoSourceType. + * @param width Width (px) of the locally rendered video. + * @param height Height (px) of the locally rendered video. + * @param elapsed Time elapsed in milliseconds from calling joinChannel to this event. If startPreviewWithoutSourceType / startPreview was called before joining the channel, this parameter indicates the time from calling startPreviewWithoutSourceType or startPreview to this event. */ onFirstLocalVideoFrame?( source: VideoSourceType, @@ -1590,15 +1614,15 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the first video frame is published. + * Callback when the first local video frame is published. * - * The SDK triggers this callback under one of the following circumstances: - * The local client enables the video module and calls joinChannel to join the channel successfully. - * The local client calls muteLocalVideoStream (true) and muteLocalVideoStream (false) in sequence. - * The local client calls disableVideo and enableVideo in sequence. + * The SDK triggers this callback in the following scenarios: + * After successfully joining a channel with the local video module enabled. + * After calling muteLocalVideoStream(true) and then muteLocalVideoStream(false). + * After calling disableVideo and then enableVideo. * - * @param connection The connection information. See RtcConnection. - * @param elapsed Time elapsed (ms) from the local user calling joinChannel until this callback is triggered. + * @param connection Connection information. See RtcConnection. + * @param elapsed Time interval in milliseconds from calling joinChannel to triggering this callback. */ onFirstLocalVideoFramePublished?( connection: RtcConnection, @@ -1606,20 +1630,20 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the first remote video frame is received and decoded. + * Callback when the first remote video frame is received and decoded. * - * The SDK triggers this callback under one of the following circumstances: - * The remote user joins the channel and sends the video stream. - * The remote user stops sending the video stream and re-sends it after 15 seconds. Reasons for such an interruption include: + * The SDK triggers this callback in the following scenarios: + * When the remote user sends video after joining the channel for the first time. + * When the remote user sends video again after going offline and coming back online. Possible reasons for interruption include: * The remote user leaves the channel. - * The remote user drops offline. - * The remote user calls disableVideo to disable video. + * The remote user is disconnected. + * The remote user calls disableVideo to turn off the video module. * - * @param connection The connection information. See RtcConnection. - * @param remoteUid The user ID of the remote user sending the video stream. - * @param width The width (px) of the video stream. - * @param height The height (px) of the video stream. - * @param elapsed The time elapsed (ms) from the local user calling joinChannel until the SDK triggers this callback. + * @param connection Connection information. See RtcConnection. + * @param remoteUid User ID specifying which user's video stream it is. + * @param width Width (px) of the video stream. + * @param height Height (px) of the video stream. + * @param elapsed Delay in milliseconds from calling joinChannel locally to triggering this callback. */ onFirstRemoteVideoDecoded?( connection: RtcConnection, @@ -1630,14 +1654,14 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the video size or rotation of a specified user changes. + * Occurs when the size or rotation of the local or remote video changes. * - * @param connection The connection information. See RtcConnection. - * @param sourceType The type of the video source. See VideoSourceType. - * @param uid The ID of the user whose video size or rotation changes. (The uid for the local user is 0. The video is the local user's video preview). - * @param width The width (pixels) of the video stream. - * @param height The height (pixels) of the video stream. - * @param rotation The rotation information. The value range is [0,360). On the iOS platform, the parameter value is always 0. + * @param connection Connection information. See RtcConnection. + * @param sourceType Type of video source. See VideoSourceType. + * @param uid User ID whose video size or rotation has changed (the uid of the local user is 0, indicating this is a local video preview). + * @param width Width of the video stream (pixels). + * @param height Height of the video stream (pixels). + * @param rotation Rotation information, range [0, 360). On iOS, this value is always 0. */ onVideoSizeChanged?( connection: RtcConnection, @@ -1649,18 +1673,25 @@ export interface IRtcEngineEventHandler { ): void; /** - * @ignore + * Callback triggered when a local video event occurs. + * + * Since Available since v4.6.1. You can use this callback to get the reason for the local video event. + * + * @param source Type of video source. See VideoSourceType. + * @param event Type of local video event. See LocalVideoEventType. */ onLocalVideoEvent?(source: VideoSourceType, event: LocalVideoEventType): void; /** - * Occurs when the local video stream state changes. + * Callback when the local video state changes. * - * When the status of the local video changes, the SDK triggers this callback to report the current local video state and the reason for the state change. + * This callback is triggered by the SDK when the state of the local video changes, reporting the current state and the reason for the change. + * Frame duplication detection only applies to video frames with resolution greater than 200 × 200, frame rate ≥ 10 fps, and bitrate less than 20 Kbps. + * If an exception occurs during video capture, you can usually troubleshoot the issue using the reason parameter in this callback. However, on some devices, when capture issues occur (e.g., freezing), Android may not throw any error callbacks, so the SDK cannot report the reason for the local video state change. In this case, you can determine whether there are no captured frames by checking if this callback reports state as LocalVideoStreamStateCapturing or LocalVideoStreamStateEncoding, and the captureFrameRate in the onLocalVideoStats callback is 0. * - * @param source The type of the video source. See VideoSourceType. - * @param state The state of the local video, see LocalVideoStreamState. - * @param reason The reasons for changes in local video state. See LocalVideoStreamReason. + * @param source Type of video source. See VideoSourceType. + * @param state Local video state. See LocalVideoStreamState. + * @param reason Reason for the local video state change. See LocalVideoStreamReason. */ onLocalVideoStateChanged?( source: VideoSourceType, @@ -1669,15 +1700,15 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the remote video stream state changes. + * Callback when the remote video state changes. * - * This callback does not work properly when the number of users (in the communication profile) or hosts (in the live streaming channel) in a channel exceeds 32. + * When the number of users (in communication) or hosts (in live broadcast) in the channel exceeds 32, this callback may be inaccurate. * - * @param connection The connection information. See RtcConnection. - * @param remoteUid The ID of the remote user whose video state changes. - * @param state The state of the remote video. See RemoteVideoState. - * @param reason The reason for the remote video state change. See RemoteVideoStateReason. - * @param elapsed Time elapsed (ms) from the local user calling the joinChannel method until the SDK triggers this callback. + * @param connection Connection information. See RtcConnection. + * @param remoteUid Remote user ID whose video state changed. + * @param state Remote video stream state. See RemoteVideoState. + * @param reason Specific reason for the remote video stream state change. See RemoteVideoStateReason. + * @param elapsed Time elapsed (in ms) from the local user calling joinChannel to this event. */ onRemoteVideoStateChanged?( connection: RtcConnection, @@ -1688,15 +1719,15 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the renderer receives the first frame of the remote video. + * Callback when the renderer receives the first remote video frame. * - * This callback is only triggered when the video frame is rendered by the SDK; it will not be triggered if the user employs custom video rendering.You need to implement this independently using methods outside the SDK. + * This callback is only triggered when SDK rendering is used; if custom video rendering is used, this callback will not be triggered and you need to implement it outside the SDK. * - * @param connection The connection information. See RtcConnection. - * @param remoteUid The user ID of the remote user sending the video stream. - * @param width The width (px) of the video stream. - * @param height The height (px) of the video stream. - * @param elapsed The time elapsed (ms) from the local user calling joinChannel until the SDK triggers this callback. + * @param connection Connection information. See RtcConnection. + * @param remoteUid User ID specifying which user's video stream it is. + * @param width Width (px) of the video stream. + * @param height Height (px) of the video stream. + * @param elapsed Time elapsed in milliseconds from calling joinChannel locally to this event. */ onFirstRemoteVideoFrame?( connection: RtcConnection, @@ -1707,14 +1738,14 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when a remote user (in the communication profile)/ host (in the live streaming profile) joins the channel. + * Occurs when a remote user (in Communication) or host (in Live Broadcast) joins the current channel. * - * In a communication channel, this callback indicates that a remote user joins the channel. The SDK also triggers this callback to report the existing users in the channel when a user joins the channel. - * In a live-broadcast channel, this callback indicates that a host joins the channel. The SDK also triggers this callback to report the existing hosts in the channel when a host joins the channel. Agora recommends limiting the number of co-hosts to 32, with a maximum of 17 video hosts. + * In the Communication profile, this callback indicates that a remote user has joined the channel. If other users are already in the channel when the user joins, the new user also receives callbacks for those existing users. + * In the Live Broadcast profile, this callback indicates that a host has joined the channel. If other hosts are already in the channel, the new host also receives callbacks for those existing hosts. It is recommended to limit the number of hosts in a call to 32 (no more than 17 with video). * - * @param connection The connection information. See RtcConnection. - * @param remoteUid The ID of the user or host who joins the channel. - * @param elapsed Time delay (ms) from the local user calling joinChannel until this callback is triggered. + * @param connection Connection information. See RtcConnection. + * @param remoteUid The ID of the remote user/host who just joined the channel. + * @param elapsed The time delay (ms) from the local user calling joinChannel to the triggering of this callback. */ onUserJoined?( connection: RtcConnection, @@ -1723,15 +1754,15 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when a remote user (in the communication profile)/ host (in the live streaming profile) leaves the channel. + * Callback when a remote user (in communication) or host (in live streaming) leaves the current channel. * - * There are generally two reasons for users to become offline: - * Leave the channel: When a user/host leaves the channel, the user/host sends a goodbye message. - * Drop offline: When no data packet of the user or host is received for a certain period of time (20 seconds for the communication profile, and more for the live broadcast profile), the SDK assumes that the user/host drops offline. A poor network connection may lead to false detections. It is recommended to use the Agora RTM SDK for reliable offline detection. + * Users may leave the channel for the following reasons: + * Normal leave: The remote user or host sends a 'goodbye'-like message and actively leaves the channel. + * Timeout disconnection: If no data packet is received from the other party within a certain period (20 seconds in communication, slightly delayed in live streaming), the user is considered disconnected. In poor network conditions, false reports may occur. It is recommended to use the RTM SDK for reliable disconnection detection. * - * @param connection The connection information. See RtcConnection. - * @param remoteUid The ID of the user who leaves the channel or goes offline. - * @param reason Reasons why a remote user (in the communication profile) or host (in the live streaming profile) goes offline. See UserOfflineReasonType. + * @param connection Connection information. See RtcConnection. + * @param remoteUid ID of the remote user or host who went offline. + * @param reason Reason why the remote user (in communication) or host (in live streaming) went offline. See UserOfflineReasonType. */ onUserOffline?( connection: RtcConnection, @@ -1740,13 +1771,13 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when a remote user (in the communication profile) or a host (in the live streaming profile) stops/resumes sending the audio stream. + * Occurs when a remote user (in the Communication profile) or host (in the Live Broadcast profile) stops or resumes sending audio streams. * - * The SDK triggers this callback when the remote user stops or resumes sending the audio stream by calling the muteLocalAudioStream method. This callback does not work properly when the number of users (in the communication profile) or hosts (in the live streaming channel) in a channel exceeds 32. + * This callback is triggered when a remote user calls muteLocalAudioStream to stop or resume sending audio. This callback may be inaccurate when the number of users (in the Communication profile) or hosts (in the Live Broadcast profile) in the channel exceeds 32. * - * @param connection The connection information. See RtcConnection. - * @param remoteUid The user ID. - * @param muted Whether the remote user's audio stream is muted: true : User's audio stream is muted. false : User's audio stream is unmuted. + * @param connection Connection information. See RtcConnection. + * @param remoteUid User ID. + * @param muted Whether the user is muted: true : The user has muted the audio. false : The user has unmuted the audio. */ onUserMuteAudio?( connection: RtcConnection, @@ -1757,11 +1788,11 @@ export interface IRtcEngineEventHandler { /** * Occurs when a remote user stops or resumes publishing the video stream. * - * When a remote user calls muteLocalVideoStream to stop or resume publishing the video stream, the SDK triggers this callback to report to the local user the state of the streams published by the remote user. This callback can be inaccurate when the number of users (in the communication profile) or hosts (in the live streaming profile) in a channel exceeds 32. + * This callback is triggered when a remote user calls muteLocalVideoStream to stop or resume publishing the video stream. The SDK reports the remote user's stream publishing status to the local user. When the number of users (in communication scenario) or hosts (in live streaming scenario) in the channel exceeds 32, this callback may be inaccurate. * - * @param connection The connection information. See RtcConnection. - * @param remoteUid The user ID of the remote user. - * @param muted Whether the remote user stops publishing the video stream: true : The remote user stops publishing the video stream. false : The remote user resumes publishing the video stream. + * @param connection Connection information. See RtcConnection. + * @param remoteUid Remote user ID. + * @param muted Whether the remote user stops publishing the video stream: true : Stops publishing the video stream. false : Publishes the video stream. */ onUserMuteVideo?( connection: RtcConnection, @@ -1772,11 +1803,12 @@ export interface IRtcEngineEventHandler { /** * Occurs when a remote user enables or disables the video module. * - * Once the video module is disabled, the user can only use a voice call. The user cannot send or receive any video. The SDK triggers this callback when a remote user enables or disables the video module by calling the enableVideo or disableVideo method. + * Disabling the video function means the user can only make voice calls, cannot display or send their own video, and cannot receive or display others' video. + * This callback is triggered when a remote user calls the enableVideo or disableVideo method to enable or disable the video module. * - * @param connection The connection information. See RtcConnection. - * @param remoteUid The user ID of the remote user. - * @param enabled true : The video module is enabled. false : The video module is disabled. + * @param connection Connection information. See RtcConnection. + * @param remoteUid The user ID indicating whose video stream is affected. + * @param enabled true : The user has enabled the video function. false : The user has disabled the video function. */ onUserEnableVideo?( connection: RtcConnection, @@ -1794,13 +1826,13 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when a specific remote user enables/disables the local video capturing function. + * Callback when a remote user enables/disables local video capture. * - * Deprecated: This callback is deprecated, use the following enumerations in the onRemoteVideoStateChanged callback: RemoteVideoStateStopped (0) and RemoteVideoStateReasonRemoteMuted (5). RemoteVideoStateDecoding (2) and RemoteVideoStateReasonRemoteUnmuted (6). The SDK triggers this callback when the remote user resumes or stops capturing the video stream by calling the enableLocalVideo method. + * Deprecated Deprecated: This callback has been deprecated. Use the following enumerations in the onRemoteVideoStateChanged callback instead: RemoteVideoStateStopped (0) and RemoteVideoStateReasonRemoteMuted (5). RemoteVideoStateDecoding (2) and RemoteVideoStateReasonRemoteUnmuted (6). This callback is triggered when a remote user calls the enableLocalVideo method to enable or disable video capture. * - * @param connection The connection information. See RtcConnection. - * @param remoteUid The user ID of the remote user. - * @param enabled Whether the specified remote user enables/disables local video capturing: true : The video module is enabled. Other users in the channel can see the video of this remote user. false : The video module is disabled. Other users in the channel can no longer receive the video stream from this remote user, while this remote user can still receive the video streams from other users. + * @param connection Connection information. See RtcConnection. + * @param remoteUid User ID indicating whose video stream it is. + * @param enabled Whether the remote user has enabled video capture: true : The user has enabled video. Other users can receive this user's video stream. false : The user has disabled video. The user can still receive video streams from others, but others cannot receive this user's video stream. */ onUserEnableLocalVideo?( connection: RtcConnection, @@ -1809,32 +1841,32 @@ export interface IRtcEngineEventHandler { ): void; /** - * Reports the transport-layer statistics of each remote audio stream. + * Reports the statistics of the audio stream sent by each remote user during a call. * - * The SDK triggers this callback once every two seconds for each remote user who is sending audio streams. If a channel includes multiple remote users, the SDK triggers this callback as many times. + * This callback is triggered every 2 seconds for each remote user/host who is sending an audio stream. If multiple remote users/hosts are sending audio streams, this callback is triggered multiple times every 2 seconds. * - * @param connection The connection information. See RtcConnection. - * @param stats The statistics of the received remote audio streams. See RemoteAudioStats. + * @param connection Connection information. See RtcConnection. + * @param stats The statistics of the received remote audio stream. See RemoteAudioStats. */ onRemoteAudioStats?(connection: RtcConnection, stats: RemoteAudioStats): void; /** - * Reports the statistics of the local audio stream. + * Reports the statistics of the local audio stream during a call. * - * The SDK triggers this callback once every two seconds. + * The SDK triggers this callback every 2 seconds. * - * @param connection The connection information. See RtcConnection. + * @param connection Connection information. See RtcConnection. * @param stats Local audio statistics. See LocalAudioStats. */ onLocalAudioStats?(connection: RtcConnection, stats: LocalAudioStats): void; /** - * Reports the statistics of the local video stream. + * Callback for local video stream statistics. * - * The SDK triggers this callback once every two seconds to report the statistics of the local video stream. + * This callback describes statistics of the video stream sent by the local device. It is triggered every 2 seconds. * - * @param connection The connection information. See RtcConnection. - * @param stats The statistics of the local video stream. See LocalVideoStats. + * @param connection Connection information. See RtcConnection. + * @param stats Local video stream statistics. See LocalVideoStats. */ onLocalVideoStats?( connection: RtcConnection, @@ -1843,31 +1875,31 @@ export interface IRtcEngineEventHandler { ): void; /** - * Reports the statistics of the video stream sent by each remote users. + * Callback for remote video stream statistics during a call. * - * Reports the statistics of the video stream from the remote users. The SDK triggers this callback once every two seconds for each remote user. If a channel has multiple users/hosts sending video streams, the SDK triggers this callback as many times. + * This callback describes end-to-end video stream statistics of remote users during a call. It is triggered every 2 seconds for each remote user/host. If there are multiple remote users/hosts, this callback is triggered multiple times every 2 seconds. * - * @param connection The connection information. See RtcConnection. - * @param stats Statistics of the remote video stream. See RemoteVideoStats. + * @param connection Connection information. See RtcConnection. + * @param stats Remote video statistics. See RemoteVideoStats. */ onRemoteVideoStats?(connection: RtcConnection, stats: RemoteVideoStats): void; /** - * Occurs when the camera turns on and is ready to capture the video. + * Callback when the camera is ready. * - * Deprecated: Use LocalVideoStreamStateCapturing (1) in onLocalVideoStateChanged instead. This callback indicates that the camera has been successfully turned on and you can start to capture video. + * Deprecated Deprecated: Use onLocalVideoStateChanged with LocalVideoStreamStateCapturing(1) instead. This callback indicates that the camera has been successfully opened and video capture can begin. */ onCameraReady?(): void; /** - * Occurs when the camera focus area changes. + * Callback when the camera focus area changes. * - * The SDK triggers this callback when the local user changes the camera focus position by calling setCameraFocusPositionInPreview. This callback is for Android and iOS only. + * This callback is triggered when the local user calls the setCameraFocusPositionInPreview method to change the focus position. This callback is applicable to Android and iOS only. * - * @param x The x-coordinate of the changed camera focus area. - * @param y The y-coordinate of the changed camera focus area. - * @param width The width of the changed camera focus area. - * @param height The height of the changed camera focus area. + * @param x The x-coordinate of the changed focus area. + * @param y The y-coordinate of the changed focus area. + * @param width The width of the changed focus area. + * @param height The height of the changed focus area. */ onCameraFocusAreaChanged?( x: number, @@ -1877,13 +1909,13 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the camera exposure area changes. + * Callback when the camera exposure area changes. * - * The SDK triggers this callback when the local user changes the camera exposure position by calling setCameraExposurePosition. This callback is for Android and iOS only. + * This callback is triggered when the local user calls the setCameraExposurePosition method to change the exposure position. This callback is applicable to Android and iOS only. * - * @param x The x coordinate of the changed camera exposure area. - * @param y The y coordinate of the changed camera exposure area. - * @param width The width of the changed camera exposure area. + * @param x The x-coordinate of the changed exposure area. + * @param y The y-coordinate of the changed exposure area. + * @param width The width of the changed exposure area. * @param height The height of the changed exposure area. */ onCameraExposureAreaChanged?( @@ -1894,20 +1926,20 @@ export interface IRtcEngineEventHandler { ): void; /** - * Reports the face detection result of the local user. + * Reports local face detection results. * - * Once you enable face detection by calling enableFaceDetection (true), you can get the following information on the local user in real-time: - * The width and height of the local video. - * The position of the human face in the local view. - * The distance between the human face and the screen. This value is based on the fitting calculation of the local video size and the position of the human face. - * When it is detected that the face in front of the camera disappears, the callback will be triggered immediately. When no human face is detected, the frequency of this callback to be triggered wil be decreased to reduce power consumption on the local device. - * The SDK stops triggering this callback when a human face is in close proximity to the screen. + * After calling enableFaceDetection(true) to enable local face detection, you can use this callback to get the following face detection information in real time: + * Size of the captured image + * Position of the face in the view + * Distance of the face from the device screen The distance of the face from the screen is estimated by the SDK based on the image size and face position in the view. + * When the face in front of the camera disappears, this callback is triggered immediately; when no face is detected, the callback frequency is reduced to save power. + * This callback is not triggered when the face is too close to the screen. * - * @param imageWidth The width (px) of the video image captured by the local camera. - * @param imageHeight The height (px) of the video image captured by the local camera. - * @param vecRectangle The information of the detected human face. See Rectangle. - * @param vecDistance The distance between the human face and the device screen (cm). - * @param numFaces The number of faces detected. If the value is 0, it means that no human face is detected. + * @param imageWidth Width (px) of the captured image. + * @param imageHeight Height (px) of the captured image. + * @param vecRectangle Detected face information. See Rectangle. + * @param vecDistance Distance between the face and the device screen (cm). + * @param numFaces Number of faces detected. If 0, no face is detected. */ onFacePositionChanged?( imageWidth: number, @@ -1918,18 +1950,18 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the video stops playing. + * Occurs when the video function is stopped. * - * Deprecated: Use LocalVideoStreamStateStopped (0) in the onLocalVideoStateChanged callback instead. The application can use this callback to change the configuration of the view (for example, displaying other pictures in the view) after the video stops playing. + * Deprecated Deprecated: Use the onLocalVideoStateChanged callback with LocalVideoStreamStateStopped (0) instead. If the app needs to perform other operations on the view after stopping the video (such as displaying other content), you can do so in this callback. */ onVideoStopped?(): void; /** * Occurs when the playback state of the music file changes. * - * This callback occurs when the playback state of the music file changes, and reports the current state and error code. + * This callback is triggered when the playback state of the music file changes and reports the current playback state and error code. * - * @param state The playback state of the music file. See AudioMixingStateType. + * @param state Playback state of the music file. See AudioMixingStateType. * @param reason Error code. See AudioMixingReasonType. */ onAudioMixingStateChanged?( @@ -1938,12 +1970,12 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the state of virtual metronome changes. + * Occurs when the state of the virtual metronome changes. * - * When the state of the virtual metronome changes, the SDK triggers this callback to report the current state of the virtual metronome. This callback indicates the state of the local audio stream and enables you to troubleshoot issues when audio exceptions occur. + * Deprecated Deprecated since v4.6.2. This callback is triggered when the state of the virtual metronome changes. When the virtual metronome encounters an issue, this callback helps you understand its current state and the reason for the failure, which facilitates troubleshooting. * - * @param state For the current virtual metronome status, see RhythmPlayerStateType. - * @param reason For the error codes and error messages related to virtual metronome errors, see RhythmPlayerReason. + * @param state The current state of the virtual metronome. See RhythmPlayerStateType. + * @param reason The error code and message when the virtual metronome encounters an error. See RhythmPlayerReason. */ onRhythmPlayerStateChanged?( state: RhythmPlayerStateType, @@ -1951,45 +1983,43 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the SDK cannot reconnect to Agora's edge server 10 seconds after its connection to the server is interrupted. + * Callback when the network connection is lost and the SDK cannot reconnect to the server within 10 seconds. * - * The SDK triggers this callback when it cannot connect to the server 10 seconds after calling the joinChannel method, regardless of whether it is in the channel. If the SDK fails to rejoin the channel 20 minutes after being disconnected from Agora's edge server, the SDK stops rejoining the channel. + * After calling joinChannel, this callback is triggered if the SDK fails to connect to the server within 10 seconds, regardless of whether the channel is joined successfully. If the SDK fails to rejoin the channel within 20 minutes after disconnection, it will stop trying to reconnect. * - * @param connection The connection information. See RtcConnection. + * @param connection Connection information. See RtcConnection. */ onConnectionLost?(connection: RtcConnection): void; /** - * Occurs when the connection between the SDK and the server is interrupted. + * Callback when the network connection is interrupted. * - * Deprecated: Use onConnectionStateChanged instead. The SDK triggers this callback when it loses connection with the server for more than four seconds after the connection is established. After triggering this callback, the SDK tries to reconnect to the server. You can use this callback to implement pop-up reminders. The differences between this callback and onConnectionLost are as follow: - * The SDK triggers the onConnectionInterrupted callback when it loses connection with the server for more than four seconds after it successfully joins the channel. - * The SDK triggers the onConnectionLost callback when it loses connection with the server for more than 10 seconds, whether or not it joins the channel. If the SDK fails to rejoin the channel 20 minutes after being disconnected from Agora's edge server, the SDK stops rejoining the channel. + * Deprecated Deprecated: Use onConnectionStateChanged instead. This callback is triggered when the SDK loses connection to the server for more than 4 seconds after a connection has been established. After the event is triggered, the SDK will attempt to reconnect to the server, so this event can be used for UI prompts. The difference between this callback and onConnectionLost is: onConnectionInterrupted is always triggered after successfully joining a channel and when the SDK has just lost connection to the server for more than 4 seconds. onConnectionLost is triggered regardless of whether the channel is joined successfully, as long as the SDK cannot connect to the server within 10 seconds. If the SDK fails to rejoin the channel within 20 minutes after disconnection, it will stop trying to reconnect. * - * @param connection The connection information. See RtcConnection. + * @param connection Connection information. See RtcConnection. */ onConnectionInterrupted?(connection: RtcConnection): void; /** - * Occurs when the connection is banned by the Agora server. + * Callback when the network connection is banned by the server. * - * Deprecated: Use onConnectionStateChanged instead. + * Deprecated Deprecated: Use onConnectionStateChanged instead. * - * @param connection The connection information. See RtcConnection. + * @param connection Connection information. See RtcConnection. */ onConnectionBanned?(connection: RtcConnection): void; /** - * Occurs when the local user receives the data stream from the remote user. + * Callback when receiving a data stream message from a remote user. * - * The SDK triggers this callback when the local user receives the stream message that the remote user sends by calling the sendStreamMessage method. + * This callback indicates that the local user has received a stream message sent by a remote user using the sendStreamMessage method. * - * @param connection The connection information. See RtcConnection. - * @param remoteUid The ID of the remote user sending the message. - * @param streamId The stream ID of the received message. - * @param data The data received. - * @param length The data length (byte). - * @param sentTs The time when the data stream is sent. + * @param connection Connection information. See RtcConnection. + * @param remoteUid User ID of the sender. + * @param streamId Stream ID of the received message. + * @param data Received data. + * @param length Length of the data in bytes. + * @param sentTs Timestamp when the data stream was sent. */ onStreamMessage?( connection: RtcConnection, @@ -2001,16 +2031,16 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the local user does not receive the data stream from the remote user. + * Callback when an error occurs while receiving a data stream message from a remote user. * - * The SDK triggers this callback when the local user fails to receive the stream message that the remote user sends by calling the sendStreamMessage method. + * This callback indicates that the local user failed to receive a stream message sent by a remote user using the sendStreamMessage method. * - * @param connection The connection information. See RtcConnection. - * @param remoteUid The ID of the remote user sending the message. - * @param streamId The stream ID of the received message. - * @param code Error code. - * @param missed The number of lost messages. - * @param cached Number of incoming cached messages when the data stream is interrupted. + * @param connection Connection information. See RtcConnection. + * @param remoteUid User ID of the sender. + * @param streamId Stream ID of the received message. + * @param code Error code. See ErrorCodeType. + * @param missed Number of missed messages. + * @param cached Number of messages cached after the data stream was interrupted. */ onStreamMessageError?( connection: RtcConnection, @@ -2052,28 +2082,29 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the token expires. + * Occurs when the token has expired. * - * The SDK triggers this callback if the token expires. When receiving this callback, you need to generate a new token on your token server and you can renew your token through one of the following ways: - * In scenarios involving one channel: + * During an audio or video call, if the token becomes invalid, the SDK triggers this callback to report that the token has expired. + * When you receive this callback, you need to generate a new token on your server and update it using one of the following methods: + * Single-channel scenario: * Call renewToken to pass in the new token. - * Call leaveChannel to leave the current channel and then pass in the new token when you call joinChannel to join a channel. - * In scenarios involving mutiple channels: Call updateChannelMediaOptionsEx to pass in the new token. + * Call leaveChannel to leave the current channel, then call joinChannel with the new token to rejoin the channel. + * Multi-channel scenario: Call updateChannelMediaOptionsEx with the new token. * - * @param connection The connection information. See RtcConnection. + * @param connection Connection information. See RtcConnection. */ onRequestToken?(connection: RtcConnection): void; /** - * Occurs when the token expires in 30 seconds. + * Occurs when the token is about to expire in 30 seconds. * - * When receiving this callback, you need to generate a new token on your token server and you can renew your token through one of the following ways: - * In scenarios involving one channel: + * When you receive this callback, you need to generate a new token on your server and update it using one of the following methods: + * Single-channel scenario: * Call renewToken to pass in the new token. - * Call leaveChannel to leave the current channel and then pass in the new token when you call joinChannel to join a channel. - * In scenarios involving mutiple channels: Call updateChannelMediaOptionsEx to pass in the new token. + * Call leaveChannel to leave the current channel, then call joinChannel with the new token to rejoin the channel. + * Multi-channel scenario: Call updateChannelMediaOptionsEx with the new token. * - * @param connection The connection information. See RtcConnection. + * @param connection Connection information. See RtcConnection. * @param token The token that is about to expire. */ onTokenPrivilegeWillExpire?(connection: RtcConnection, token: string): void; @@ -2087,15 +2118,15 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the first audio frame is published. + * Occurs when the first local audio frame is published. * - * The SDK triggers this callback under one of the following circumstances: - * The local client enables the audio module and calls joinChannel successfully. - * The local client calls muteLocalAudioStream (true) and muteLocalAudioStream (false) in sequence. - * The local client calls disableAudio and enableAudio in sequence. + * The SDK triggers this callback in the following scenarios: + * After successfully joining a channel by calling joinChannel with local audio enabled. + * After calling muteLocalAudioStream(true) and then muteLocalAudioStream(false). + * After calling disableAudio and then enableAudio. * - * @param connection The connection information. See RtcConnection. - * @param elapsed Time elapsed (ms) from the local user calling joinChannel until the SDK triggers this callback. + * @param connection Connection information. See RtcConnection. + * @param elapsed Time elapsed (ms) from calling joinChannel until this callback is triggered. */ onFirstLocalAudioFramePublished?( connection: RtcConnection, @@ -2103,19 +2134,19 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the SDK decodes the first remote audio frame for playback. + * Occurs when the first remote audio frame is decoded. * - * Deprecated: Use onRemoteAudioStateChanged instead. The SDK triggers this callback under one of the following circumstances: - * The remote user joins the channel and sends the audio stream for the first time. - * The remote user's audio is offline and then goes online to re-send audio. It means the local user cannot receive audio in 15 seconds. Reasons for such an interruption include: - * The remote user leaves channel. - * The remote user drops offline. - * The remote user calls muteLocalAudioStream to stop sending the audio stream. - * The remote user calls disableAudio to disable audio. + * Deprecated Deprecated: Use onRemoteAudioStateChanged instead. The SDK triggers this callback in the following scenarios: + * When the remote user sends audio after joining the channel for the first time. + * When the remote user goes offline and then comes back online to send audio. Offline means no audio packet is received locally within 15 seconds, which may be due to: + * The remote user leaving the channel + * The remote user disconnecting + * The remote user calling muteLocalAudioStream to stop sending audio + * The remote user calling disableAudio to disable audio * - * @param connection The connection information. See RtcConnection. - * @param uid The user ID of the remote user. - * @param elapsed The time elapsed (ms) from the local user calling joinChannel until the SDK triggers this callback. + * @param connection Connection information. See RtcConnection. + * @param uid Remote user ID. + * @param elapsed Time elapsed (ms) from the local user calling joinChannel until this callback is triggered. */ onFirstRemoteAudioDecoded?( connection: RtcConnection, @@ -2124,13 +2155,13 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the SDK receives the first audio frame from a specific remote user. + * Occurs when the first remote audio frame is received. * - * Deprecated: Use onRemoteAudioStateChanged instead. + * Deprecated Deprecated: Use onRemoteAudioStateChanged instead. * - * @param connection The connection information. See RtcConnection. - * @param userId The user ID of the remote user. - * @param elapsed The time elapsed (ms) from the local user calling joinChannel until the SDK triggers this callback. + * @param connection Connection information. See RtcConnection. + * @param userId User ID of the remote user who sent the audio frame. + * @param elapsed Time elapsed (ms) from the local user calling joinChannel until this callback is triggered. */ onFirstRemoteAudioFrame?( connection: RtcConnection, @@ -2139,13 +2170,13 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the local audio stream state changes. + * Occurs when the local audio state changes. * - * When the state of the local audio stream changes (including the state of the audio capture and encoding), the SDK triggers this callback to report the current state. This callback indicates the state of the local audio stream, and allows you to troubleshoot issues when audio exceptions occur. When the state is LocalAudioStreamStateFailed (3), you can view the error information in the error parameter. + * This callback is triggered when the local audio state changes, including changes in microphone capture and audio encoding. When local audio experiences issues, this callback helps you understand the current audio state and the cause of the issue, making it easier to troubleshoot. When the state is LocalAudioStreamStateFailed (3), you can check the error information in the error parameter. * - * @param connection The connection information. See RtcConnection. - * @param state The state of the local audio. See LocalAudioStreamState. - * @param reason Reasons for local audio state changes. See LocalAudioStreamReason. + * @param connection Connection information. See RtcConnection. + * @param state Current local audio state. See LocalAudioStreamState. + * @param reason Reason for the local audio state change. See LocalAudioStreamReason. */ onLocalAudioStateChanged?( connection: RtcConnection, @@ -2154,15 +2185,15 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the remote audio state changes. + * Occurs when the state of the remote audio stream changes. * - * When the audio state of a remote user (in a voice/video call channel) or host (in a live streaming channel) changes, the SDK triggers this callback to report the current state of the remote audio stream. This callback does not work properly when the number of users (in the communication profile) or hosts (in the live streaming channel) in a channel exceeds 32. + * Occurs when the audio state of a remote user (in the Communication profile) or host (in the Live Broadcast profile) changes. The SDK reports the current state of the remote audio stream to the local user through this callback. This callback may be inaccurate when the number of users (in the Communication profile) or hosts (in the Live Broadcast profile) in the channel exceeds 32. * - * @param connection The connection information. See RtcConnection. - * @param remoteUid The ID of the remote user whose audio state changes. - * @param state The state of the remote audio. See RemoteAudioState. - * @param reason The reason of the remote audio state change. See RemoteAudioStateReason. - * @param elapsed Time elapsed (ms) from the local user calling the joinChannel method until the SDK triggers this callback. + * @param connection Connection information. See RtcConnection. + * @param remoteUid The ID of the remote user whose audio state has changed. + * @param state The state of the remote audio stream. See RemoteAudioState. + * @param reason The reason for the remote audio state change. See RemoteAudioStateReason. + * @param elapsed The time elapsed (ms) from the local user calling joinChannel until this event occurs. */ onRemoteAudioStateChanged?( connection: RtcConnection, @@ -2175,12 +2206,13 @@ export interface IRtcEngineEventHandler { /** * Occurs when the most active remote speaker is detected. * - * After a successful call of enableAudioVolumeIndication, the SDK continuously detects which remote user has the loudest volume. During the current period, the remote user whose volume is detected as the loudest for the most times, is the most active user. When the number of users is no less than two and an active remote speaker exists, the SDK triggers this callback and reports the uid of the most active remote speaker. - * If the most active remote speaker is always the same user, the SDK triggers the onActiveSpeaker callback only once. - * If the most active remote speaker changes to another user, the SDK triggers this callback again and reports the uid of the new active remote speaker. + * After successfully calling enableAudioVolumeIndication, the SDK continuously monitors the remote user with the highest volume and counts the number of times the user is identified as the loudest. The remote user with the highest count during a given period is considered the most active speaker. + * When there are two or more users in the channel and there is an active remote user, the SDK triggers this callback and reports the uid of the most active remote speaker. + * If the most active speaker remains the same, the SDK does not trigger the onActiveSpeaker callback again. + * If the most active speaker changes, the SDK triggers this callback again and reports the new uid. * - * @param connection The connection information. See RtcConnection. - * @param uid The user ID of the most active speaker. + * @param connection Connection information. See RtcConnection. + * @param uid The ID of the most active remote speaker. */ onActiveSpeaker?(connection: RtcConnection, uid: number): void; @@ -2190,21 +2222,21 @@ export interface IRtcEngineEventHandler { onContentInspectResult?(result: ContentInspectResult): void; /** - * Reports the result of taking a video snapshot. + * Callback for snapshot result. * - * After a successful takeSnapshot method call, the SDK triggers this callback to report whether the snapshot is successfully taken as well as the details for the snapshot taken. + * After calling takeSnapshot successfully, the SDK triggers this callback to report whether the snapshot was successful and provide details. * - * @param connection The connection information. See RtcConnection. - * @param uid The user ID. One uid of 0 indicates the local user. - * @param filePath The local path of the snapshot. - * @param width The width (px) of the snapshot. - * @param height The height (px) of the snapshot. - * @param errCode The message that confirms success or gives the reason why the snapshot is not successfully taken: - * 0: Success. - * < 0: Failure: - * -1: The SDK fails to write data to a file or encode a JPEG image. - * -2: The SDK does not find the video stream of the specified user within one second after the takeSnapshot method call succeeds. The possible reasons are: local capture stops, remote end stops publishing, or video data processing is blocked. - * -3: Calling the takeSnapshot method too frequently. + * @param connection Connection information. See RtcConnection. + * @param uid User ID. If uid is 0, it indicates the local user. + * @param filePath Local path where the snapshot is saved. + * @param width Image width (px). + * @param height Image height (px). + * @param errCode Indicates success or reason for failure. + * 0: Snapshot succeeded. + * < 0: Snapshot failed. + * -1: Failed to write file or JPEG encoding failed. + * -2: No video frame from the specified user within 1 second after calling takeSnapshot. Possible causes: local capture stopped, remote user stopped publishing, or video processing is blocked. + * -3: takeSnapshot is called too frequently. */ onSnapshotTaken?( connection: RtcConnection, @@ -2216,12 +2248,14 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the user role or the audience latency level changes. + * Occurs when the user role or audience latency level is switched. + * + * This callback is not triggered if you call setClientRole and set the user role to BROADCASTER before joining a channel. * * @param connection The connection information. See RtcConnection. - * @param oldRole Role that the user switches from: ClientRoleType. - * @param newRole Role that the user switches to: ClientRoleType. - * @param newRoleOptions Properties of the role that the user switches to. See ClientRoleOptions. + * @param oldRole The previous role: ClientRoleType. + * @param newRole The new role: ClientRoleType. + * @param newRoleOptions The properties of the new role. See ClientRoleOptions. */ onClientRoleChanged?( connection: RtcConnection, @@ -2231,13 +2265,13 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when switching a user role fails. + * Occurs when the user role switch fails. * - * This callback informs you about the reason for failing to switching and your current user role. + * When the user role switch fails, you can use this callback to learn the reason for the failure and the current user role. * * @param connection The connection information. See RtcConnection. - * @param reason The reason for a user role switch failure. See ClientRoleChangeFailedReason. - * @param currentRole Current user role. See ClientRoleType. + * @param reason The reason for the user role switch failure. See ClientRoleChangeFailedReason. + * @param currentRole The current user role. See ClientRoleType. */ onClientRoleChangeFailed?( connection: RtcConnection, @@ -2255,13 +2289,13 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the state of Media Push changes. + * Callback when the RTMP streaming state changes. * - * When the state of Media Push changes, the SDK triggers this callback and reports the URL address and the current state of the Media Push. This callback indicates the state of the Media Push. When exceptions occur, you can troubleshoot issues by referring to the detailed error descriptions in the error code parameter. + * When the RTMP streaming state changes, the SDK triggers this callback and provides the URL where the change occurred and the current streaming state. This callback helps streaming users understand the current streaming status. If an error occurs during streaming, you can use the returned error code to identify the cause and troubleshoot accordingly. * - * @param url The URL address where the state of the Media Push changes. - * @param state The current state of the Media Push. See RtmpStreamPublishState. - * @param reason Reasons for the changes in the Media Push status. See RtmpStreamPublishReason. + * @param url The URL where the streaming state changed. + * @param state The current streaming state. See RtmpStreamPublishState. + * @param reason The reason for the change in streaming state. See RtmpStreamPublishReason. */ onRtmpStreamingStateChanged?( url: string, @@ -2270,41 +2304,41 @@ export interface IRtcEngineEventHandler { ): void; /** - * Reports events during the Media Push. + * Callback for CDN streaming events. * - * @param url The URL for Media Push. - * @param eventCode The event code of Media Push. See RtmpStreamingEvent. + * @param url The CDN streaming URL. + * @param eventCode The CDN streaming event code. See RtmpStreamingEvent. */ onRtmpStreamingEvent?(url: string, eventCode: RtmpStreamingEvent): void; /** - * Occurs when the publisher's transcoding is updated. + * Callback when the RTMP transcoding settings are updated. * - * When the LiveTranscoding class in the startRtmpStreamWithTranscoding method updates, the SDK triggers the onTranscodingUpdated callback to report the update information. If you call the startRtmpStreamWithTranscoding method to set the LiveTranscoding class for the first time, the SDK does not trigger this callback. + * When the LiveTranscoding parameters in the startRtmpStreamWithTranscoding method are updated, the onTranscodingUpdated callback is triggered to notify the host of the update. This callback is not triggered the first time you set the transcoding parameters LiveTranscoding using the startRtmpStreamWithTranscoding method. */ onTranscodingUpdated?(): void; /** - * Occurs when the local audio route changes. + * Callback when the audio route changes. * - * @param routing The current audio routing. - * -1: The default audio route. - * 0: The audio route is a headset with a microphone. - * 1: The audio route is an earpiece. - * 2: The audio route is a headset without a microphone. - * 3: The audio route is the speaker that comes with the device. - * 4: The audio route is an external speaker. (For iOS and macOS only) - * (5): The audio route is a Bluetooth headset. + * @param routing The current audio route: + * -1: Default audio route. + * 0: Headset with microphone. + * 1: Earpiece. + * 2: Headset without microphone. + * 3: Built-in speaker. + * 4: External speaker. (iOS and macOS only) + * 5: Bluetooth headset. */ onAudioRoutingChanged?(routing: number): void; /** - * Occurs when the state of the media stream relay changes. + * Occurs when the state of media stream relay across channels changes. * - * The SDK returns the state of the current media relay with any error message. + * Occurs when the state of media stream relay across channels changes. The SDK triggers this callback to report the current relay state and error information. * - * @param state The state code. See ChannelMediaRelayState. - * @param code The error code of the channel media relay. See ChannelMediaRelayError. + * @param state The state of media stream relay across channels. See ChannelMediaRelayState. + * @param code The error code of the media stream relay across channels. See ChannelMediaRelayError. */ onChannelMediaRelayStateChanged?( state: ChannelMediaRelayState, @@ -2312,14 +2346,14 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the remote media stream falls back to the audio-only stream due to poor network conditions or switches back to the video stream after the network conditions improve. + * Callback when the subscribed stream falls back to audio-only or recovers to audio and video. * - * If you call setRemoteSubscribeFallbackOption and set option to StreamFallbackOptionAudioOnly, the SDK triggers this callback in the following situations: - * The downstream network condition is poor, and the subscribed video stream is downgraded to audio-only stream. - * The downstream network condition has improved, and the subscribed stream has been restored to video stream. Once the remote media stream switches to the low-quality video stream due to weak network conditions, you can monitor the stream switch between a high-quality and low-quality stream in the onRemoteVideoStats callback. + * When you call setRemoteSubscribeFallbackOption and set option to StreamFallbackOptionAudioOnly, this callback is triggered in the following situations: + * The downlink network condition is poor, and the subscribed audio and video stream falls back to audio-only. + * The downlink network condition improves, and the subscribed audio-only stream recovers to audio and video. When the subscribed stream falls back to a lower-quality video stream due to poor network conditions, you can monitor the switch between high and low video streams through the onRemoteVideoStats callback. * - * @param uid The user ID of the remote user. - * @param isFallbackOrRecover true : The subscribed media stream falls back to audio-only due to poor network conditions. false : The subscribed media stream switches back to the video stream after the network conditions improve. + * @param uid Remote user's user ID. + * @param isFallbackOrRecover true : Due to poor network conditions, the subscribed stream has fallen back to audio-only. false : Due to improved network conditions, the subscribed stream has recovered to audio and video. */ onRemoteSubscribeFallbackToAudioOnly?( uid: number, @@ -2327,15 +2361,17 @@ export interface IRtcEngineEventHandler { ): void; /** - * Reports the transport-layer statistics of each remote audio stream. + * Reports the transport statistics of the audio stream sent by each remote user during a call. * - * Deprecated: Use onRemoteAudioStats instead. This callback reports the transport-layer statistics, such as the packet loss rate and network time delay after the local user receives an audio packet from a remote user. During a call, when the user receives the audio packet sent by the remote user, the callback is triggered every 2 seconds. + * Deprecated: + * Use onRemoteAudioStats instead. + * This callback reports the end-to-end network statistics of the remote user during a call, calculated based on audio packets. It reflects the current network status using objective data such as packet loss and network delay. During a call, when a user receives audio packets sent by a remote user/host, this callback is triggered every 2 seconds. * - * @param connection The connection information. See RtcConnection. - * @param remoteUid The ID of the remote user sending the audio streams. - * @param delay The network delay (ms) from the remote user to the receiver. - * @param lost The packet loss rate (%) of the audio packet sent from the remote user to the receiver. - * @param rxKBitrate The bitrate of the received audio (Kbps). + * @param connection Connection information. See RtcConnection. + * @param remoteUid The user ID that identifies which user/host's audio packet it is. + * @param delay The delay (ms) from the sender to the receiver of the audio packet. + * @param lost The packet loss rate (%) from sender to receiver of the audio packet. + * @param rxKBitrate The received bitrate (Kbps) of the remote audio packet. */ onRemoteAudioTransportStats?( connection: RtcConnection, @@ -2346,15 +2382,16 @@ export interface IRtcEngineEventHandler { ): void; /** - * Reports the transport-layer statistics of each remote video stream. + * Callback for remote video stream transport statistics during a call. * - * Deprecated: This callback is deprecated. Use onRemoteVideoStats instead. This callback reports the transport-layer statistics, such as the packet loss rate and network time delay after the local user receives a video packet from a remote user. During a call, when the user receives the video packet sent by the remote user/host, the callback is triggered every 2 seconds. + * Deprecated Deprecated: This callback has been deprecated. Use onRemoteVideoStats instead. This callback describes end-to-end network statistics of remote users during a call based on video packets. It uses objective data such as packet loss and network delay to reflect the current network status. + * During a call, once the user receives video data packets from a remote user/host, this callback is triggered every 2 seconds. * - * @param connection The connection information. See RtcConnection. - * @param remoteUid The ID of the remote user sending the video packets. - * @param delay The network delay (ms) from the sender to the receiver. - * @param lost The packet loss rate (%) of the video packet sent from the remote user. - * @param rxKBitRate The bitrate of the received video (Kbps). + * @param connection Connection information. See RtcConnection. + * @param remoteUid User ID indicating which user/host the video packet belongs to. + * @param delay Delay (ms) from sender to receiver for the video packet. + * @param lost Packet loss rate (%) from sender to receiver for the video packet. + * @param rxKBitRate Receiving bitrate (Kbps) of the remote video packet. */ onRemoteVideoTransportStats?( connection: RtcConnection, @@ -2365,13 +2402,13 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the network connection state changes. + * Callback when the network connection state changes. * - * When the network connection state changes, the SDK triggers this callback and reports the current connection state and the reason for the change. + * This callback is triggered when the network connection state changes, informing you of the current state and the reason for the change. * - * @param connection The connection information. See RtcConnection. - * @param state The current connection state. See ConnectionStateType. - * @param reason The reason for a connection state change. See ConnectionChangedReasonType. + * @param connection Connection information. See RtcConnection. + * @param state Current network connection state. See ConnectionStateType. + * @param reason Reason for the current connection state change. See ConnectionChangedReasonType. */ onConnectionStateChanged?( connection: RtcConnection, @@ -2380,22 +2417,22 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the local network type changes. + * Callback when the local network type changes. * - * This callback occurs when the connection state of the local user changes. You can get the connection state and reason for the state change in this callback. When the network connection is interrupted, this callback indicates whether the interruption is caused by a network type change or poor network conditions. + * When the local network connection type changes, the SDK triggers this callback and specifies the current network connection type in the callback. You can use this callback to get the network type being used. When the connection is interrupted, this callback can help identify whether the interruption is due to a network switch or poor network conditions. * - * @param connection The connection information. See RtcConnection. - * @param type The type of the local network connection. See NetworkType. + * @param connection Connection information. See RtcConnection. + * @param type Local network connection type. See NetworkType. */ onNetworkTypeChanged?(connection: RtcConnection, type: NetworkType): void; /** - * Reports the built-in encryption errors. + * Callback when an error occurs with built-in encryption. * - * When encryption is enabled by calling enableEncryption, the SDK triggers this callback if an error occurs in encryption or decryption on the sender or the receiver side. + * After calling enableEncryption to enable encryption, if an error occurs in encryption or decryption on the sender or receiver side, the SDK triggers this callback. * - * @param connection The connection information. See RtcConnection. - * @param errorType Details about the error type. See EncryptionErrorType. + * @param connection Connection information. See RtcConnection. + * @param errorType Error type. See EncryptionErrorType. */ onEncryptionError?( connection: RtcConnection, @@ -2403,36 +2440,38 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the SDK cannot get the device permission. + * Callback when failing to obtain device permissions. * - * When the SDK fails to get the device permission, the SDK triggers this callback to report which device permission cannot be got. + * When the SDK fails to obtain device permissions, it triggers this callback to report which device permission could not be obtained. * - * @param permissionType The type of the device permission. See PermissionType. + * @param permissionType Device permission type. See PermissionType. */ onPermissionError?(permissionType: PermissionType): void; /** - * @ignore + * Callback when permission is granted. + * + * @param permissionType Type of permission. See PermissionType. */ onPermissionGranted?(permissionType: PermissionType): void; /** - * Occurs when the local user registers a user account. + * Occurs when the local user successfully registers a User Account. * - * After the local user successfully calls registerLocalUserAccount to register the user account or calls joinChannelWithUserAccount to join a channel, the SDK triggers the callback and informs the local user's UID and User Account. + * After the local user successfully calls registerLocalUserAccount to register a User Account or calls joinChannelWithUserAccount to join a channel, the SDK triggers this callback and reports the local user's UID and User Account. * * @param uid The ID of the local user. - * @param userAccount The user account of the local user. + * @param userAccount The User Account of the local user. */ onLocalUserRegistered?(uid: number, userAccount: string): void; /** - * Occurs when the SDK gets the user ID and user account of the remote user. + * Occurs when the remote user's information is updated. * - * After a remote user joins the channel, the SDK gets the UID and user account of the remote user, caches them in a mapping table object, and triggers this callback on the local client. + * After a remote user joins the channel, the SDK obtains the user's UID and User Account, caches a mapping table containing the UID and User Account, and triggers this callback locally. * - * @param uid The user ID of the remote user. - * @param info The UserInfo object that contains the user ID and user account of the remote user. See UserInfo for details. + * @param uid The ID of the remote user. + * @param info The UserInfo object that identifies the user's information, including the UID and User Account. See the UserInfo class. */ onUserInfoUpdated?(uid: number, info: UserInfo): void; @@ -2446,14 +2485,14 @@ export interface IRtcEngineEventHandler { ): void; /** - * Video frame rendering event callback. + * Callback for video frame rendering events. * - * After calling the startMediaRenderingTracing method or joining a channel, the SDK triggers this callback to report the events of video frame rendering and the indicators during the rendering process. Developers can optimize the indicators to improve the efficiency of the first video frame rendering. + * After calling the startMediaRenderingTracing method or joining a channel, the SDK triggers this callback to report video frame rendering events and metrics during the rendering process. Developers can optimize based on these metrics to improve rendering efficiency. * - * @param connection The connection information. See RtcConnection. - * @param uid The user ID. - * @param currentEvent The current video frame rendering event. See MediaTraceEvent. - * @param tracingInfo The indicators during the video frame rendering process. Developers need to reduce the value of indicators as much as possible in order to improve the efficiency of the first video frame rendering. See VideoRenderingTracingInfo. + * @param connection Connection information. See RtcConnection. + * @param uid User ID. + * @param currentEvent Current video frame rendering event. See MediaTraceEvent. + * @param tracingInfo Metrics during the video frame rendering process. Developers should minimize the metric values to improve rendering efficiency. See VideoRenderingTracingInfo. */ onVideoRenderingTracingResult?( connection: RtcConnection, @@ -2463,12 +2502,12 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when there's an error during the local video mixing. + * Callback when a local video compositing error occurs. * - * When you fail to call startLocalVideoTranscoder or updateLocalTranscoderConfiguration, the SDK triggers this callback to report the reason. + * This callback is triggered by the SDK when startLocalVideoTranscoder or updateLocalTranscoderConfiguration fails, reporting the reason for the compositing failure. * - * @param stream The video streams that cannot be mixed during video mixing. See TranscodingVideoStream. - * @param error The reason for local video mixing error. See VideoTranscoderError. + * @param stream The video stream that failed to composite. See TranscodingVideoStream. + * @param error The reason for the local video compositing error. See VideoTranscoderError. */ onLocalVideoTranscoderError?( stream: TranscodingVideoStream, @@ -2486,13 +2525,13 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the audio subscribing state changes. + * Callback when the audio subscription state changes. * - * @param channel The channel name. - * @param uid The user ID of the remote user. - * @param oldState The previous subscribing status. See StreamSubscribeState. - * @param newState The current subscribing status. See StreamSubscribeState. - * @param elapseSinceLastState The time elapsed (ms) from the previous state to the current state. + * @param channel Channel name. + * @param uid Remote user ID. + * @param oldState Previous subscription state. See StreamSubscribeState. + * @param newState Current subscription state. See StreamSubscribeState. + * @param elapseSinceLastState Time interval between two state changes (ms). */ onAudioSubscribeStateChanged?( channel: string, @@ -2503,13 +2542,13 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the video subscribing state changes. + * Callback when the video subscription state changes. * - * @param channel The channel name. - * @param uid The user ID of the remote user. - * @param oldState The previous subscribing status. See StreamSubscribeState. - * @param newState The current subscribing status. See StreamSubscribeState. - * @param elapseSinceLastState The time elapsed (ms) from the previous state to the current state. + * @param channel Channel name. + * @param uid Remote user ID. + * @param oldState Previous subscription state. See StreamSubscribeState. + * @param newState Current subscription state. See StreamSubscribeState. + * @param elapseSinceLastState Time interval between two state changes (ms). */ onVideoSubscribeStateChanged?( channel: string, @@ -2520,12 +2559,12 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the audio publishing state changes. + * Callback for audio publish state changes. * - * @param channel The channel name. - * @param oldState The previous publishing state. See StreamPublishState. - * @param newState The current publishing stat. See StreamPublishState. - * @param elapseSinceLastState The time elapsed (ms) from the previous state to the current state. + * @param channel Channel name. + * @param oldState Previous publish state. See StreamPublishState. + * @param newState Current publish state. See StreamPublishState. + * @param elapseSinceLastState Time interval between state changes (ms). */ onAudioPublishStateChanged?( channel: string, @@ -2537,11 +2576,11 @@ export interface IRtcEngineEventHandler { /** * Occurs when the video publishing state changes. * - * @param source The type of the video source. See VideoSourceType. - * @param channel The channel name. - * @param oldState The previous publishing state. See StreamPublishState. - * @param newState The current publishing stat. See StreamPublishState. - * @param elapseSinceLastState The time elapsed (ms) from the previous state to the current state. + * @param source Type of video source. See VideoSourceType. + * @param channel Channel name. + * @param oldState Previous publishing state. See StreamPublishState. + * @param newState Current publishing state. See StreamPublishState. + * @param elapseSinceLastState Time interval between the two state changes (ms). */ onVideoPublishStateChanged?( source: VideoSourceType, @@ -2552,16 +2591,16 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the local user receives a mixed video stream carrying layout information. + * Callback for received mixed video stream with layout information. * - * When the local user receives a mixed video stream sent by the video mixing server for the first time, or when there is a change in the layout information of the mixed stream, the SDK triggers this callback, reporting the layout information of each sub-video stream within the mixed video stream. + * When the mixed video stream is received from the mixing server for the first time, or when the layout information of the mixed stream changes, the SDK triggers this callback to report the layout information of each sub-stream in the mixed video stream. * - * @param connection The connection information. See RtcConnection. - * @param uid User ID who published this mixed video stream. + * @param connection Connection information. See RtcConnection. + * @param uid User ID of the publisher of the mixed video stream. * @param width Width (px) of the mixed video stream. - * @param height Heitht (px) of the mixed video stream. - * @param layoutCount The number of layout information in the mixed video stream. - * @param layoutlist Layout information of a specific sub-video stream within the mixed stream. See VideoLayout. + * @param height Height (px) of the mixed video stream. + * @param layoutCount Number of layout entries in the mixed video stream. + * @param layoutlist Detailed layout information of a mixed video stream. See VideoLayout. */ onTranscodedStreamLayoutInfo?( connection: RtcConnection, @@ -2583,13 +2622,13 @@ export interface IRtcEngineEventHandler { ): void; /** - * The event callback of the extension. + * Plugin event callback. * - * To listen for events while the extension is running, you need to register this callback. + * To listen for plugin events, you need to register this callback. * - * @param context The context information of the extension, see ExtensionContext. - * @param key The key of the extension. - * @param value The value of the extension key. + * @param context Plugin context information. See ExtensionContext. + * @param key The key of the plugin property. + * @param value The value corresponding to the plugin property key. */ onExtensionEventWithContext?( context: ExtensionContext, @@ -2598,31 +2637,31 @@ export interface IRtcEngineEventHandler { ): void; /** - * Occurs when the extension is enabled. + * Callback when the extension is successfully enabled. * - * The callback is triggered after the extension is successfully enabled. + * This callback is triggered after the extension is successfully enabled. * - * @param context The context information of the extension, see ExtensionContext. + * @param context Extension context information. See ExtensionContext. */ onExtensionStartedWithContext?(context: ExtensionContext): void; /** - * Occurs when the extension is disabled. + * Callback when the extension is disabled. * - * The callback is triggered after the extension is successfully disabled. + * This callback is triggered after the extension is successfully disabled. * - * @param context The context information of the extension, see ExtensionContext. + * @param context Extension context information. See ExtensionContext. */ onExtensionStoppedWithContext?(context: ExtensionContext): void; /** - * Occurs when the extension runs incorrectly. + * Plugin error callback. * - * In case of extension enabling failure or runtime errors, the extension triggers this callback and reports the error code along with the reasons. + * When plugin enabling fails or the plugin encounters a runtime error, the plugin triggers this callback and reports the error code and reason. * - * @param context The context information of the extension, see ExtensionContext. - * @param error Error code. For details, see the extension documentation provided by the extension provider. - * @param message Reason. For details, see the extension documentation provided by the extension provider. + * @param context Plugin context information. See ExtensionContext. + * @param error Error code. See the plugin documentation provided by the plugin provider. + * @param message Error reason. See the plugin documentation provided by the plugin provider. */ onExtensionErrorWithContext?( context: ExtensionContext, @@ -2636,12 +2675,21 @@ export interface IRtcEngineEventHandler { onSetRtmFlagResult?(connection: RtcConnection, code: number): void; /** - * @ignore + * Callback for multipath transmission statistics. + * + * Since Added since v4.6.2. + * + * @param stats Multipath transmission statistics. See MultipathStats. */ onMultipathStats?(connection: RtcConnection, stats: MultipathStats): void; /** - * @ignore + * Callback for the result of calling the renewToken method. + * + * Since Added since v4.6.2. This callback is triggered after you call the renewToken method to update the token, to notify the update result. + * + * @param token The updated token. + * @param code Error code. See RenewTokenErrorCode. */ onRenewTokenResult?( connection: RtcConnection, @@ -2651,7 +2699,7 @@ export interface IRtcEngineEventHandler { } /** - * Video device management methods. + * @ignore */ export abstract class IVideoDeviceManager { /** @@ -2699,53 +2747,87 @@ export abstract class IVideoDeviceManager { } /** - * @ignore + * Video effect node types. + * + * Since Available since v4.6.2. */ export enum VideoEffectNodeId { /** - * @ignore + * (1): Beauty effect node. */ Beauty = 1 << 0, /** - * @ignore + * (2): Style makeup effect node. */ StyleMakeup = 1 << 1, /** - * @ignore + * (4): Filter effect node. */ Filter = 1 << 2, } /** - * @ignore + * Action types performed on video effect nodes. + * + * Since Available since v4.6.2. */ export enum VideoEffectAction { /** - * @ignore + * (1): Save the current parameters of the video effect. */ Save = 1, /** - * @ignore + * (2): Reset the video effect to default parameters. */ Reset = 2, } /** - * @ignore + * Used to manage and configure video effects such as beautification, styled makeup, and filters. + * + * Since Available since v4.6.2. */ export abstract class IVideoEffectObject { /** - * @ignore + * Adds or updates the effect of the specified video effect node and template. + * + * Since Added since v4.6.2. Priority rules: + * Style makeup nodes take precedence over filter effect nodes. + * To apply filter effects, you must first remove the style makeup effect node. + * + * @param nodeId The unique identifier or combination of identifiers of the video effect node. See VideoEffectNodeId. + * @param templateName The name of the effect template. If set to NULL or an empty string, the SDK loads the default configuration from the resource package. + * + * @returns + * 0: Success. + * < 0: Failure. */ abstract addOrUpdateVideoEffect(nodeId: number, templateName: string): number; /** - * @ignore + * Removes the video effect with the specified node ID. + * + * Since Added since v4.6.2. + * + * @param nodeId The unique identifier of the video effect node to be removed. See VideoEffectNodeId. + * + * @returns + * 0: Success. + * < 0: Failure. */ abstract removeVideoEffect(nodeId: number): number; /** - * @ignore + * Performs an action on the specified video effect node. + * + * Since Added since v4.6.2. + * + * @param nodeId The unique identifier of the video effect node. + * @param actionId The action to perform. See VideoEffectAction. + * + * @returns + * 0: Success. + * < 0: Failure. */ abstract performVideoEffectAction( nodeId: number, @@ -2753,7 +2835,17 @@ export abstract class IVideoEffectObject { ): number; /** - * @ignore + * Sets a float parameter for the video effect. + * + * Since Available since v4.6.2. + * + * @param option The category of the parameter option. + * @param key The key name of the parameter. + * @param param The float value to set. + * + * @returns + * 0: Success. + * < 0: Failure. */ abstract setVideoEffectFloatParam( option: string, @@ -2762,7 +2854,17 @@ export abstract class IVideoEffectObject { ): number; /** - * @ignore + * setVideoEffectIntParam : Sets an integer parameter for the video effect. + * + * Since Available since v4.6.2. + * + * @param option The category of the parameter option. + * @param key The key name of the parameter. + * @param param The integer value to set. + * + * @returns + * 0: Success. + * < 0: Failure. */ abstract setVideoEffectIntParam( option: string, @@ -2771,7 +2873,17 @@ export abstract class IVideoEffectObject { ): number; /** - * @ignore + * Sets a boolean parameter for the video effect. + * + * Since Available since v4.6.2. + * + * @param option The category of the parameter option. + * @param key The key name of the parameter. + * @param param The boolean value to set: true : Enable the option. false : Disable the option. + * + * @returns + * 0: Success. + * < 0: Failure. */ abstract setVideoEffectBoolParam( option: string, @@ -2780,27 +2892,53 @@ export abstract class IVideoEffectObject { ): number; /** - * @ignore + * Gets the value of the specified float parameter in the video effect. + * + * Since Available since v4.6.2. + * + * @param option The category of the parameter option. + * @param key The key name of the parameter. + * + * @returns + * If the parameter exists, returns the corresponding float value. + * If the parameter does not exist or an error occurs, returns 0.0f. */ abstract getVideoEffectFloatParam(option: string, key: string): number; /** - * @ignore + * Gets the integer parameter in the video effect. + * + * Since Available since v4.6.2. + * + * @param option The category of the parameter option. + * @param key The key name of the parameter. + * + * @returns + * If the parameter exists, returns the corresponding integer value. + * If the parameter does not exist or an error occurs, returns 0. */ abstract getVideoEffectIntParam(option: string, key: string): number; /** - * @ignore + * Gets the boolean parameter in the video effect. + * + * Since Available since v4.6.2. + * + * @param option The category of the parameter option. + * @param key The key name of the parameter. + * + * @returns + * true : The parameter is enabled. false : The parameter is not enabled or does not exist. */ abstract getVideoEffectBoolParam(option: string, key: string): boolean; } /** - * Configurations for the RtcEngineContext instance. + * Defines RtcEngineContext. */ export class RtcEngineContext { /** - * The App ID issued by Agora for your project. Only users in apps with the same App ID can join the same channel and communicate with each other. An App ID can only be used to create one IRtcEngine instance. To change your App ID, call release to destroy the current IRtcEngine instance, and then create a new one. + * The App ID issued by Agora to the App developer. Only apps using the same App ID can join the same channel for communication or live streaming. An App ID can only be used to create one IRtcEngine. To change the App ID, you must first call release to destroy the current IRtcEngine and then recreate it. */ appId?: string; /** @@ -2812,25 +2950,17 @@ export class RtcEngineContext { */ license?: string; /** - * The audio scenarios. Under different audio scenarios, the device uses different volume types. See AudioScenarioType. + * The audio scenario. Different audio scenarios use different volume types. + * See AudioScenarioType. */ audioScenario?: AudioScenarioType; /** - * The region for connection. This is an advanced feature and applies to scenarios that have regional restrictions. The area codes support bitwise operation. + * The region for connecting to the server. This is an advanced setting for scenarios with access security requirements. Supported regions are listed in AreaCode. Bitwise operations are supported for region codes. */ areaCode?: number; /** - * The SDK log files are: agorasdk.log, agorasdk.1.log, agorasdk.2.log, agorasdk.3.log, and agorasdk.4.log. - * The API call log files are: agoraapi.log, agoraapi.1.log, agoraapi.2.log, agoraapi.3.log, and agoraapi.4.log. - * The default size of each SDK log file and API log file is 2,048 KB. These log files are encoded in UTF-8. - * The SDK writes the latest logs in agorasdk.log or agoraapi.log. - * When agorasdk.log is full, the SDK processes the log files in the following order: - * Delete the agorasdk.4.log file (if any). - * Rename agorasdk.3.log to agorasdk.4.log. - * Rename agorasdk.2.log to agorasdk.3.log. - * Rename agorasdk.1.log to agorasdk.2.log. - * Create a new agorasdk.log file. - * The overwrite rules for the agoraapi.log file are the same as for agorasdk.log. Sets the log file size. See LogConfig. By default, the SDK generates five SDK log files and five API call log files with the following rules: + * Sets the log files output by the SDK. See LogConfig. + * By default, the SDK generates 5 SDK log files and 5 API call log files, with the following rules: */ logConfig?: LogConfig; /** @@ -2842,25 +2972,25 @@ export class RtcEngineContext { */ useExternalEglContext?: boolean; /** - * Whether to enable domain name restriction: true : Enables the domain name restriction. This value is suitable for scenarios where IoT devices use IoT cards for network access. The SDK will only connect to servers in the domain name or IP whitelist that has been reported to the operator. false : (Default) Disables the domain name restriction. This value is suitable for most common scenarios. + * Whether to enable domain restriction: true : Enables domain restriction. This setting is applicable when IoT devices access the network using IoT SIM cards. The SDK connects only to servers on the domain or IP whitelist reported to the carrier. false : (Default) Disables domain restriction. This setting is suitable for most common scenarios. */ domainLimit?: boolean; /** - * Whether to automatically register the Agora extensions when initializing IRtcEngine : true : (Default) Automatically register the Agora extensions when initializing IRtcEngine. false : Do not register the Agora extensions when initializing IRtcEngine. You need to call enableExtension to register the Agora extensions. + * Whether to automatically register Agora extensions when initializing IRtcEngine : true : (Default) Automatically registers Agora extensions when initializing IRtcEngine. false : Does not register Agora extensions when initializing IRtcEngine. You need to call enableExtension to register the Agora extensions. */ autoRegisterAgoraExtensions?: boolean; } /** - * Metadata type of the observer. We only support video metadata for now. + * The Metadata type of the observer. Currently only video-type Metadata is supported. */ export enum MetadataType { /** - * -1: The type of metadata is unknown. + * -1: Unknown Metadata type. */ UnknownMetadata = -1, /** - * 0: The type of metadata is video. + * 0: Metadata type is video. */ VideoMetadata = 0, } @@ -2888,135 +3018,141 @@ export enum MaxMetadataSizeType { */ export class Metadata { /** - * The channel name. + * Channel name. */ channelId?: string; /** - * The user ID. - * For the recipient: The ID of the remote user who sent the Metadata. - * For the sender: Ignore it. + * User ID. + * For receivers: The ID of the remote user who sent this Metadata. + * For senders: Ignore this field. */ uid?: number; /** - * The buffer size of the sent or received Metadata. + * Buffer size of the received or sent Metadata. */ size?: number; /** - * The buffer address of the received Metadata. + * Buffer address of the received Metadata. */ buffer?: Uint8Array; /** - * The timestamp (ms) of when the Metadata is sent. + * Timestamp of the sent Metadata, in milliseconds. */ timeStampMs?: number; } /** - * The metadata observer. + * Metadata observer. */ export interface IMetadataObserver { /** - * Occurs when the local user receives the metadata. + * The receiver has received metadata. * - * @param metadata The metadata received. See Metadata. + * @param metadata The received metadata. See Metadata. */ onMetadataReceived?(metadata: Metadata): void; } /** - * Reasons for the changes in CDN streaming status. + * Reason for CDN streaming state change. + * + * Deprecated Deprecated since v4.6.2. */ export enum DirectCdnStreamingReason { /** - * 0: No error. + * 0: Streaming state is normal. */ DirectCdnStreamingReasonOk = 0, /** - * 1: A general error; no specific reason. You can try to push the media stream again. + * 1: General error with no specific reason. You can try restarting the stream. */ DirectCdnStreamingReasonFailed = 1, /** - * 2: An error occurs when pushing audio streams. For example, the local audio capture device is not working properly, is occupied by another process, or does not get the permission required. + * 2: Error in audio streaming. For example, the local audio capture device is not working properly, is occupied by another process, or lacks permission. */ DirectCdnStreamingReasonAudioPublication = 2, /** - * 3: An error occurs when pushing video streams. For example, the local video capture device is not working properly, is occupied by another process, or does not get the permission required. + * 3: Error in video streaming. For example, the local video capture device is not working properly, is occupied by another process, or lacks permission. */ DirectCdnStreamingReasonVideoPublication = 3, /** - * 4: Fails to connect to the CDN. + * 4: Failed to connect to CDN. */ DirectCdnStreamingReasonNetConnect = 4, /** - * 5: The URL is already being used. Use a new URL for streaming. + * 5: The URL is already used for streaming. Please use a new URL. */ DirectCdnStreamingReasonBadName = 5, } /** - * The current CDN streaming state. + * Current CDN streaming state. + * + * Deprecated Deprecated since v4.6.2. */ export enum DirectCdnStreamingState { /** - * 0: The initial state before the CDN streaming starts. + * 0: Initial state, streaming has not started yet. */ DirectCdnStreamingStateIdle = 0, /** - * 1: Streams are being pushed to the CDN. The SDK returns this value when you call the startDirectCdnStreaming method to push streams to the CDN. + * 1: Streaming in progress. When you call startDirectCdnStreaming and the streaming starts successfully, the SDK returns this value. */ DirectCdnStreamingStateRunning = 1, /** - * 2: Stops pushing streams to the CDN. The SDK returns this value when you call the stopDirectCdnStreaming method to stop pushing streams to the CDN. + * 2: Streaming has ended normally. When you call stopDirectCdnStreaming to stop streaming manually, the SDK returns this value. */ DirectCdnStreamingStateStopped = 2, /** - * 3: Fails to push streams to the CDN. You can troubleshoot the issue with the information reported by the onDirectCdnStreamingStateChanged callback, and then push streams to the CDN again. + * 3: Streaming failed. You can troubleshoot the issue using the information reported in the onDirectCdnStreamingStateChanged callback, then restart streaming. */ DirectCdnStreamingStateFailed = 3, /** - * 4: Tries to reconnect the Agora server to the CDN. The SDK attempts to reconnect a maximum of 10 times; if the connection is not restored, the streaming state becomes DirectCdnStreamingStateFailed. + * 4: Attempting to reconnect to the Agora server and CDN. The SDK tries to reconnect up to 10 times. If reconnection still fails, the streaming state changes to DirectCdnStreamingStateFailed. */ DirectCdnStreamingStateRecovering = 4, } /** - * The statistics of the current CDN streaming. + * Current CDN streaming statistics. + * + * Deprecated Deprecated since v4.6.2. */ export class DirectCdnStreamingStats { /** - * The width (px) of the video frame. + * Video width (px). */ videoWidth?: number; /** - * The height (px) of the video frame. + * Video height (px). */ videoHeight?: number; /** - * The frame rate (fps) of the current video frame. + * Current video frame rate (fps). */ fps?: number; /** - * The bitrate (bps) of the current video frame. + * Current video bitrate (bps). */ videoBitrate?: number; /** - * The bitrate (bps) of the current audio frame. + * Current audio bitrate (bps). */ audioBitrate?: number; } /** - * The IDirectCdnStreamingEventHandler interface class is used by the SDK to send event notifications of CDN streaming to your app. Your app can get those notifications through methods that inherit this interface class. + * The IDirectCdnStreamingEventHandler interface class is used by the SDK to send CDN streaming event notifications to the App. The App obtains SDK event notifications by inheriting methods of this interface class. */ export interface IDirectCdnStreamingEventHandler { /** - * Occurs when the CDN streaming state changes. + * Callback when the CDN streaming state changes. * - * When the host directly pushes streams to the CDN, if the streaming state changes, the SDK triggers this callback to report the changed streaming state, error codes, and other information. You can troubleshoot issues by referring to this callback. + * After the host starts direct CDN streaming, when the streaming state changes, the SDK triggers this callback to report the new state, error code, and message. You can use this information to troubleshoot. * - * @param state The current CDN streaming state. See DirectCdnStreamingState. - * @param reason Reasons for changes in the status of CDN streaming. See DirectCdnStreamingReason. - * @param message The information about the changed streaming state. + * @param state The current streaming state. See DirectCdnStreamingState. + * @param reason The reason for the change in streaming state. See DirectCdnStreamingReason. + * @param message The message corresponding to the state change. */ onDirectCdnStreamingStateChanged?( state: DirectCdnStreamingState, @@ -3025,33 +3161,35 @@ export interface IDirectCdnStreamingEventHandler { ): void; /** - * Reports the CDN streaming statistics. + * Callback for CDN streaming statistics. * - * When the host directly pushes media streams to the CDN, the SDK triggers this callback every one second. + * During the process of pushing streams directly to CDN by the host, the SDK triggers this callback every second. * - * @param stats The statistics of the current CDN streaming. See DirectCdnStreamingStats. + * @param stats Current streaming statistics. See DirectCdnStreamingStats. */ onDirectCdnStreamingStats?(stats: DirectCdnStreamingStats): void; } /** - * The media setting options for the host. + * Media options for the host. + * + * Deprecated Deprecated since v4.6.2. */ export class DirectCdnStreamingMediaOptions { /** - * Sets whether to publish the video captured by the camera: true : Publish the video captured by the camera. false : (Default) Do not publish the video captured by the camera. + * Whether to publish video captured by the camera. true : Publish camera video. false : (Default) Do not publish camera video. */ publishCameraTrack?: boolean; /** - * Sets whether to publish the audio captured by the microphone: true : Publish the audio captured by the microphone. false : (Default) Do not publish the audio captured by the microphone. + * Whether to publish audio captured by the microphone. true : Publish microphone audio. false : (Default) Do not publish microphone audio. */ publishMicrophoneTrack?: boolean; /** - * Sets whether to publish the captured audio from a custom source: true : Publish the captured audio from a custom source. false : (Default) Do not publish the captured audio from the custom source. + * Whether to publish custom captured audio. true : Publish custom audio. false : (Default) Do not publish custom audio. */ publishCustomAudioTrack?: boolean; /** - * Sets whether to publish the captured video from a custom source: true : Publish the captured video from a custom source. false : (Default) Do not publish the captured video from the custom source. + * Whether to publish custom captured video. true : Publish custom video. false : (Default) Do not publish custom video. */ publishCustomVideoTrack?: boolean; /** @@ -3063,7 +3201,7 @@ export class DirectCdnStreamingMediaOptions { */ publishMediaPlayerId?: number; /** - * The video track ID returned by calling the createCustomVideoTrack method. The default value is 0. + * The video track ID returned by the createCustomVideoTrack method. Default is 0. */ customVideoTrackId?: number; } @@ -3091,24 +3229,28 @@ export class ExtensionInfo { } /** - * The basic interface of the Agora SDK that implements the core functions of real-time communication. + * The base interface class of the RTC SDK that implements the main functions of real-time audio and video. * - * IRtcEngine provides the main methods that your app can call. Before calling other APIs, you must call createAgoraRtcEngine to create an IRtcEngine object. + * IRtcEngine provides the main methods for the App to call. + * Before calling other APIs, you must first call createAgoraRtcEngine to create an IRtcEngine object. */ export abstract class IRtcEngine { /** - * All called methods provided by the IRtcEngine class are executed asynchronously. Agora recommends calling these methods in the same thread. + * Creates and initializes IRtcEngine. * - * @param context Configurations for the IRtcEngine instance. See RtcEngineContext. + * All interface functions of the IRtcEngine class, unless otherwise specified, are asynchronous. It is recommended to call them on the same thread. + * The SDK only supports creating one IRtcEngine instance per App. + * + * @param context Configuration for the IRtcEngine instance. See RtcEngineContext. * * @returns - * 0: Success. - * < 0: Failure. - * -1: A general error occurs (no specified reason). - * -2: The parameter is invalid. - * -7: The SDK is not initialized. - * -22: The resource request failed. The SDK fails to allocate resources because your app consumes too much system resource or the system resources are insufficient. - * -101: The App ID is invalid. + * 0: The method call succeeds. + * < 0: The method call fails. + * -1: General error (not specifically classified). + * -2: Invalid parameter set. + * -7: SDK initialization failed. + * -22: Resource allocation failed. This error is returned when the App uses too many resources or system resources are exhausted. + * -101: Invalid App ID. */ abstract initialize(context: RtcEngineContext): number; @@ -3131,45 +3273,51 @@ export abstract class IRtcEngine { abstract getErrorDescription(code: number): string; /** - * Queries the video codec capabilities of the SDK. + * Queries the video codec capabilities supported by the SDK. * * @returns - * If the call is successful, an object containing the following attributes is returned: codecInfo : The CodecCapInfo array, indicating the video codec capabillity of the device. size : The size of the CodecCapInfo array. - * If the call timeouts, please modify the call logic and do not invoke the method in the main thread. + * If the call succeeds, returns an object with the following properties: codecInfo : An array of CodecCapInfo, representing the SDK's video encoding capabilities. size : The size of the CodecCapInfo array. + * If the call times out, modify your logic to avoid calling this method on the main thread. */ abstract queryCodecCapability(): { codecInfo: CodecCapInfo[]; size: number }; /** - * Queries device score. + * Queries the device score level. * * @returns - * > 0: The method call succeeeds, the value is the current device's score, the range is [0,100], the larger the value, the stronger the device capability. Most devices are rated between 60 and 100. + * > 0: Success. The value is the current device score, ranging from [0,100]. A higher value indicates stronger device capability. Most device scores range from 60 to 100. * < 0: Failure. */ abstract queryDeviceScore(): number; /** - * Preloads a channel with token, channelId, and uid. + * Preloads a channel using token, channelId, and uid. * - * When audience members need to switch between different channels frequently, calling the method can help shortening the time of joining a channel, thus reducing the time it takes for audience members to hear and see the host. If you join a preloaded channel, leave it and want to rejoin the same channel, you do not need to call this method unless the token for preloading the channel expires. Failing to preload a channel does not mean that you can't join a channel, nor will it increase the time of joining a channel. + * Calling this method reduces the time it takes for a viewer to join a channel when frequently switching channels, thereby shortening the time to hear the host's first audio frame and see the first video frame, improving the viewer's video experience. + * If the channel has already been preloaded successfully, and the viewer leaves and rejoins the channel, as long as the Token used during preload is still valid, there is no need to preload again. If preload fails, it does not affect normal channel joining or increase the join time. + * When calling this method, ensure the user role is set to audience and the audio scenario is not AudioScenarioChorus, otherwise preload will not take effect. + * Ensure the channel name, user ID, and Token passed during preload match those used when joining the channel, otherwise preload will not take effect. + * A single IRtcEngine instance supports up to 20 preloaded channels. If exceeded, only the latest 20 preloaded channels are effective. * - * @param token The token generated on your server for authentication. When the token for preloading channels expires, you can update the token based on the number of channels you preload. - * When preloading one channel, calling this method to pass in the new token. - * When preloading more than one channels: - * If you use a wildcard token for all preloaded channels, call updatePreloadChannelToken to update the token. When generating a wildcard token, ensure the user ID is not set as 0. - * If you use different tokens to preload different channels, call this method to pass in your user ID, channel name and the new token. - * @param channelId The channel name that you want to preload. This parameter signifies the channel in which users engage in real-time audio and video interaction. Under the premise of the same App ID, users who fill in the same channel ID enter the same channel for audio and video interaction. The string length must be less than 64 bytes. Supported characters (89 characters in total): - * All lowercase English letters: a to z. - * All uppercase English letters: A to Z. - * All numeric characters: 0 to 9. - * "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", "{", "}", "|", "~", "," - * @param uid The user ID. This parameter is used to identify the user in the channel for real-time audio and video interaction. You need to set and manage user IDs yourself, and ensure that each user ID in the same channel is unique. This parameter is a 32-bit unsigned integer. The value range is 1 to 2 32 -1. If the user ID is not assigned (or set to 0), the SDK assigns a random user ID and onJoinChannelSuccess returns it in the callback. Your application must record and maintain the returned user ID, because the SDK does not do so. + * @param token A dynamic key generated on your server for authentication. See [Token Authentication](https://doc.shengwang.cn/doc/rtc/rn/basic-features/token-authentication). + * When the Token expires, depending on the number of preloaded channels, you can provide a new Token in different ways: + * For one preloaded channel: call this method again with the new Token. + * For multiple preloaded channels: + * If using a wildcard Token, call updatePreloadChannelToken to update the Token for all preloaded channels. When generating a wildcard Token, the user ID must not be 0. See [Using Wildcard Token](https://doc.shengwang.cn/doc/rtc/rn/best-practice/wildcard-token). + * If using different Tokens: call this method with the user ID, channel name, and the updated Token. + * @param channelId The name of the channel to preload. This parameter identifies the channel for real-time audio and video interaction. Users with the same App ID and channel name join the same channel. + * This parameter must be a string no longer than 64 bytes. Supported character set (89 characters total): + * 26 lowercase letters a~z + * 26 uppercase letters A~Z + * 10 digits 0~9 + * "!" "#" "$" "%" "&" "(" ")" "+" "-" ":" ";" "<" "=" "." ">" "?" "@" "[" "]" "^" "_" "{" "}" "|" "~" "," + * @param uid User ID. This parameter identifies the user in the real-time audio and video channel. You must set and manage the user ID yourself and ensure uniqueness within the channel. This parameter is a 32-bit unsigned integer. Recommended range: 1 to 2^32-1. If not specified (i.e., set to 0), the SDK automatically assigns one and returns it in the onJoinChannelSuccess callback. The application must store and manage this value; the SDK does not manage it. * * @returns - * 0: Success. - * < 0: Failure. - * -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine object before calling this method. - * -102: The channel name is invalid. You need to pass in a valid channel name and join the channel again. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -7: IRtcEngine is not initialized. Initialize IRtcEngine before calling this method. + * -102: Invalid channel name. Provide a valid channel name and rejoin the channel. */ abstract preloadChannel( token: string, @@ -3178,33 +3326,39 @@ export abstract class IRtcEngine { ): number; /** - * Preloads a channel with token, channelId, and userAccount. - * - * When audience members need to switch between different channels frequently, calling the method can help shortening the time of joining a channel, thus reducing the time it takes for audience members to hear and see the host. If you join a preloaded channel, leave it and want to rejoin the same channel, you do not need to call this method unless the token for preloading the channel expires. Failing to preload a channel does not mean that you can't join a channel, nor will it increase the time of joining a channel. - * - * @param token The token generated on your server for authentication. When the token for preloading channels expires, you can update the token based on the number of channels you preload. - * When preloading one channel, calling this method to pass in the new token. - * When preloading more than one channels: - * If you use a wildcard token for all preloaded channels, call updatePreloadChannelToken to update the token. When generating a wildcard token, ensure the user ID is not set as 0. - * If you use different tokens to preload different channels, call this method to pass in your user ID, channel name and the new token. - * @param channelId The channel name that you want to preload. This parameter signifies the channel in which users engage in real-time audio and video interaction. Under the premise of the same App ID, users who fill in the same channel ID enter the same channel for audio and video interaction. The string length must be less than 64 bytes. Supported characters (89 characters in total): - * All lowercase English letters: a to z. - * All uppercase English letters: A to Z. - * All numeric characters: 0 to 9. + * Preloads a channel using token, channelId, and userAccount. + * + * Calling this method can reduce the time it takes for a viewer to join a channel when frequently switching channels, thereby shortening the time to hear the first audio frame and see the first video frame from the host, and improving the video experience for viewers. + * If the channel has already been successfully preloaded, and the viewer leaves and rejoins the channel, as long as the token used during preloading is still valid, there is no need to preload it again. If preloading fails, it does not affect the subsequent normal channel join process, nor does it increase the time to join the channel. + * When calling this method, make sure the user role is set to audience and the audio scenario is not set to AudioScenarioChorus, otherwise preloading will not take effect. + * Make sure the channelId, userAccount, and token passed during preloading are the same as those used when joining the channel later; otherwise, preloading will not take effect. + * Currently, one IRtcEngine instance supports preloading up to 20 channels. If this limit is exceeded, only the latest 20 preloaded channels take effect. + * + * @param token A dynamic key generated on your server for authentication. See [Use Token Authentication](https://doc.shengwang.cn/doc/rtc/rn/basic-features/token-authentication). + * When the token expires, depending on the number of preloaded channels, you can pass a new token in different ways: + * For a single preloaded channel: call this method to pass the new token. + * For multiple preloaded channels: + * If you use a wildcard token, call updatePreloadChannelToken to update the token for all preloaded channels. When generating a wildcard token, the user ID must not be 0. See [Use Wildcard Token](https://doc.shengwang.cn/doc/rtc/rn/best-practice/wildcard-token). + * If you use different tokens: call this method and pass your user ID, corresponding channel name, and the updated token. + * @param channelId The name of the channel to preload. This parameter identifies the channel for real-time audio and video interaction. Under the same App ID, users with the same channel name will join the same channel for interaction. + * This parameter must be a string within 64 bytes. The supported character set includes 89 characters: + * 26 lowercase English letters a~z + * 26 uppercase English letters A~Z + * 10 digits 0~9 * "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", "{", "}", "|", "~", "," - * @param userAccount The user account. This parameter is used to identify the user in the channel for real-time audio and video engagement. You need to set and manage user accounts yourself and ensure that each user account in the same channel is unique. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null. Supported characters are as follows(89 in total): - * The 26 lowercase English letters: a to z. - * The 26 uppercase English letters: A to Z. - * All numeric characters: 0 to 9. - * Space + * @param userAccount The user's User Account. This parameter identifies the user in the real-time audio and video interaction channel. You need to set and manage the User Account yourself and ensure that each user in the same channel has a unique User Account. This parameter is required, must not exceed 255 bytes, and cannot be null. The supported character set includes 89 characters: + * 26 lowercase English letters a-z + * 26 uppercase English letters A-Z + * 10 digits 0-9 + * space * "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", "{", "}", "|", "~", "," * * @returns - * 0: Success. - * < 0: Failure. - * -2: The parameter is invalid. For example, the User Account is empty. You need to pass in a valid parameter and join the channel again. - * -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine object before calling this method. - * -102: The channel name is invalid. You need to pass in a valid channel name and join the channel again. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -2: Invalid parameter. For example, the User Account is empty. You need to provide valid parameters and rejoin the channel. + * -7: The IRtcEngine object is not initialized. You need to initialize the IRtcEngine object successfully before calling this method. + * -102: Invalid channel name. You need to provide a valid channel name and rejoin the channel. */ abstract preloadChannelWithUserAccount( token: string, @@ -3213,47 +3367,50 @@ export abstract class IRtcEngine { ): number; /** - * Updates the wildcard token for preloading channels. + * Updates the wildcard token for the preloaded channel. * - * You need to maintain the life cycle of the wildcard token by yourself. When the token expires, you need to generate a new wildcard token and then call this method to pass in the new token. + * You need to manage the lifecycle of the wildcard token yourself. When the wildcard token expires, you need to generate a new one on your server and pass it in through this method. * * @param token The new token. * * @returns * 0: Success. - * < 0: Failure. - * -2: The parameter is invalid. For example, the token is invalid. You need to pass in a valid parameter and join the channel again. - * -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine object before calling this method. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -2: The passed parameter is invalid. For example, an illegal token is used. You need to provide valid parameters and rejoin the channel. + * -7: The IRtcEngine object is not initialized. You need to initialize the IRtcEngine object before calling this method. */ abstract updatePreloadChannelToken(token: string): number; /** - * Joins a channel with media options. + * Sets media options and joins a channel. * - * This method supports setting the media options when joining a channel, such as whether to publish audio and video streams within the channel. or whether to automatically subscribe to the audio and video streams of all remote users when joining a channel. By default, the user subscribes to the audio and video streams of all the other users in the channel, giving rise to usage and billings. To stop subscribing to other streams, set the options parameter or call the corresponding mute methods. + * This method allows you to set media options when joining a channel, such as whether to publish audio and video streams in the channel. When a user joins a channel, whether to automatically subscribe to all remote audio and video streams in the channel. By default, the user subscribes to all other users' audio and video streams in the channel, which results in usage and affects billing. If you want to unsubscribe, you can do so by setting the options parameter or using the corresponding mute methods. + * This method only supports joining one channel at a time. + * Apps with different App IDs cannot communicate with each other. + * Before joining a channel, make sure the App ID used to generate the Token is the same as the one used in the initialize method to initialize the engine, otherwise joining the channel with the Token will fail. * - * @param token The token generated on your server for authentication. - * (Recommended) If your project has enabled the security mode (using APP ID and Token for authentication), this parameter is required. - * If you have only enabled the testing mode (using APP ID for authentication), this parameter is optional. You will automatically exit the channel 24 hours after successfully joining in. - * If you need to join different channels at the same time or switch between channels, Agora recommends using a wildcard token so that you don't need to apply for a new token every time joining a channel. - * @param channelId The channel name. This parameter signifies the channel in which users engage in real-time audio and video interaction. Under the premise of the same App ID, users who fill in the same channel ID enter the same channel for audio and video interaction. The string length must be less than 64 bytes. Supported characters (89 characters in total): - * All lowercase English letters: a to z. - * All uppercase English letters: A to Z. - * All numeric characters: 0 to 9. + * @param token A dynamic key generated on your server for authentication. See [Use Token Authentication](https://doc.shengwang.cn/doc/rtc/rn/basic-features/token-authentication). + * (Recommended) If your project enables the security mode, i.e., uses APP ID + Token for authentication, this parameter is required. + * If your project only enables debug mode, i.e., uses only the APP ID for authentication, you can join the channel without a Token. The user will automatically leave the channel 24 hours after successfully joining. + * If you need to join multiple channels simultaneously or switch channels frequently, Agora recommends using a wildcard Token to avoid requesting a new Token from the server each time. See [Use Wildcard Token](https://doc.shengwang.cn/doc/rtc/rn/best-practice/wildcard-token). + * @param channelId Channel name. This parameter identifies the channel for real-time audio and video interaction. Users with the same App ID and channel name will join the same channel. This parameter is a string of up to 64 bytes. Supported character set (89 characters total): + * 26 lowercase English letters a~z + * 26 uppercase English letters A~Z + * 10 digits 0~9 * "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", "{", "}", "|", "~", "," - * @param uid The user ID. This parameter is used to identify the user in the channel for real-time audio and video interaction. You need to set and manage user IDs yourself, and ensure that each user ID in the same channel is unique. This parameter is a 32-bit unsigned integer. The value range is 1 to 2 32 -1. If the user ID is not assigned (or set to 0), the SDK assigns a random user ID and onJoinChannelSuccess returns it in the callback. Your application must record and maintain the returned user ID, because the SDK does not do so. - * @param options The channel media options. See ChannelMediaOptions. + * @param uid User ID. This parameter identifies the user in the real-time audio and video interaction channel. You must set and manage the user ID yourself and ensure it is unique within the same channel. This parameter is a 32-bit unsigned integer. Recommended range: 1 to 2^32-1. If not specified (i.e., set to 0), the SDK automatically assigns one and returns it in the onJoinChannelSuccess callback. The application must remember and maintain this value; the SDK does not maintain it. + * @param options Channel media options. See ChannelMediaOptions. * * @returns - * 0: Success. - * < 0: Failure. - * -2: The parameter is invalid. For example, the token is invalid, the uid parameter is not set to an integer, or the value of a member in ChannelMediaOptions is invalid. You need to pass in a valid parameter and join the channel again. - * -3: Fails to initialize the IRtcEngine object. You need to reinitialize the IRtcEngine object. - * -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine object before calling this method. - * -8: The internal state of the IRtcEngine object is wrong. The typical cause is that after calling startEchoTest to start a call loop test, you call this method to join the channel without calling stopEchoTest to stop the test. You need to call stopEchoTest before calling this method. - * -17: The request to join the channel is rejected. The typical cause is that the user is already in the channel. Agora recommends that you use the onConnectionStateChanged callback to see whether the user is in the channel. Do not call this method to join the channel unless you receive the ConnectionStateDisconnected (1) state. - * -102: The channel name is invalid. You need to pass in a valid channel name in channelId to rejoin the channel. - * -121: The user ID is invalid. You need to pass in a valid user ID in uid to rejoin the channel. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -2: Invalid parameter. For example, an invalid Token is used, uid is not an integer, or a ChannelMediaOptions member is invalid. You need to provide valid parameters and rejoin the channel. + * -3: IRtcEngine object initialization failed. You need to reinitialize the IRtcEngine object. + * -7: IRtcEngine object is not initialized. You must initialize the IRtcEngine object before calling this method. + * -8: Internal state error of the IRtcEngine object. Possible reason: startEchoTest was called to start an echo test, but stopEchoTest was not called before calling this method. You must call stopEchoTest before this method. + * -17: Join channel request is rejected. Possible reason: the user is already in the channel. Use the onConnectionStateChanged callback to check if the user is in the channel. Do not call this method again unless you receive the ConnectionStateDisconnected (1) state. + * -102: Invalid channel name. You must provide a valid channel name in channelId and rejoin the channel. + * -121: Invalid user ID. You must provide a valid user ID in uid and rejoin the channel. */ abstract joinChannel( token: string, @@ -3263,79 +3420,83 @@ export abstract class IRtcEngine { ): number; /** - * Updates the channel media options after joining the channel. + * Updates the channel media options after joining a channel. * * @param options The channel media options. See ChannelMediaOptions. * * @returns * 0: Success. - * < 0: Failure. - * -2: The value of a member in ChannelMediaOptions is invalid. For example, the token or the user ID is invalid. You need to fill in a valid parameter. - * -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine object before calling this method. - * -8: The internal state of the IRtcEngine object is wrong. The possible reason is that the user is not in the channel. Agora recommends that you use the onConnectionStateChanged callback to see whether the user is in the channel. If you receive the ConnectionStateDisconnected (1) or ConnectionStateFailed (5) state, the user is not in the channel. You need to call joinChannel to join a channel before calling this method. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -2: The value of a ChannelMediaOptions member is invalid. For example, an illegal token is used or an invalid user role is set. You need to provide valid parameters. + * -7: The IRtcEngine object is not initialized. You need to initialize the IRtcEngine object before calling this method. + * -8: The internal state of the IRtcEngine object is incorrect. A possible reason is that the user is not in a channel. It is recommended to determine whether the user is in a channel through the onConnectionStateChanged callback. If you receive ConnectionStateDisconnected (1) or ConnectionStateFailed (5), it means the user is not in a channel. You need to call joinChannel before calling this method. */ abstract updateChannelMediaOptions(options: ChannelMediaOptions): number; /** * Sets channel options and leaves the channel. * - * After calling this method, the SDK terminates the audio and video interaction, leaves the current channel, and releases all resources related to the session. After joining the channel, you must call this method to end the call; otherwise, the next call cannot be started. If you have called joinChannelEx to join multiple channels, calling this method will leave all the channels you joined. This method call is asynchronous. When this method returns, it does not necessarily mean that the user has left the channel. + * After calling this method, the SDK stops all audio and video interactions, leaves the current channel, and releases all session-related resources. + * You must call this method after successfully joining a channel to end the call; otherwise, you cannot start a new call. If you have joined multiple channels using joinChannelEx, calling this method leaves all joined channels. This method is asynchronous. When the call returns, the channel has not actually been left. + * If you call release immediately after this method, the SDK will not trigger the onLeaveChannel callback. * - * @param options The options for leaving the channel. See LeaveChannelOptions. + * @param options Options for leaving the channel. See LeaveChannelOptions. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract leaveChannel(options?: LeaveChannelOptions): number; /** * Renews the token. * - * This method is used to update the token. After successfully calling this method, the SDK will trigger the callback. A token will expire after a certain period of time, at which point the SDK will be unable to establish a connection with the server. + * This method is used to renew the token. The token will expire after a certain period, after which the SDK will not be able to connect to the server. * - * @param token The new token. + * @param token The newly generated token. * * @returns - * 0: Success. - * < 0: Failure. - * -2: The parameter is invalid. For example, the token is empty. - * -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine object before calling this method. - * 110: Invalid token. Ensure the following: - * The user ID specified when generating the token is consistent with the user ID used when joining the channel. - * The generated token is the same as the token passed in to join the channel. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -2: Invalid parameter. For example, the token is empty. + * -7: The IRtcEngine object is not initialized. You need to initialize the IRtcEngine object successfully before calling this method. + * -110: Invalid token. Make sure: + * The user ID specified when generating the token matches the one used to join the channel, + * The generated token matches the one used to join the channel. */ abstract renewToken(token: string): number; /** * Sets the channel profile. * - * You can call this method to set the channel profile. The SDK adopts different optimization strategies for different channel profiles. For example, in a live streaming scenario, the SDK prioritizes video quality. After initializing the SDK, the default channel profile is the live streaming profile. In different channel scenarios, the default audio routing of the SDK is different. See setDefaultAudioRouteToSpeakerphone. + * You can call this method to set the channel profile. The SDK uses different optimization strategies for different scenarios. For example, in the live streaming scenario, it prioritizes video quality. The default channel profile after SDK initialization is live streaming. Under different channel profiles, the SDK uses different default audio routes. See the description in setDefaultAudioRouteToSpeakerphone. + * To ensure real-time audio and video quality, all users in the same channel must use the same channel profile. * * @param profile The channel profile. See ChannelProfileType. * * @returns - * 0: Success. - * < 0: Failure. - * -2: The parameter is invalid. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -2: Invalid parameter. * -7: The SDK is not initialized. */ abstract setChannelProfile(profile: ChannelProfileType): number; /** - * Sets the user role and the audience latency level in a live streaming scenario. + * Sets the user role and audience latency level in a live streaming scenario. * - * By default,the SDK sets the user role as audience. You can call this method to set the user role as host. The user role (roles) determines the users' permissions at the SDK level, including whether they can publish audio and video streams in a channel. + * By default, the SDK sets the user role to audience. You can call this method to set the user role to broadcaster. The user role (role) determines the user's permissions at the SDK level, such as whether they can publish streams. When the user role is set to broadcaster, the audience latency level only supports AudienceLatencyLevelUltraLowLatency (ultra-low latency). + * If you call this method before joining the channel and set role to BROADCASTER, the local onClientRoleChanged callback is not triggered. * - * @param role The user role. See ClientRoleType. If you set the user role as an audience member, you cannot publish audio and video streams in the channel. If you want to publish media streams in a channel during live streaming, ensure you set the user role as broadcaster. - * @param options The detailed options of a user, including the user level. See ClientRoleOptions. + * @param role The user role. See ClientRoleType. Users with the audience role cannot publish audio or video streams in the channel. When publishing in a live streaming scenario, ensure the user role is switched to broadcaster. + * @param options User-specific settings, including user level. See ClientRoleOptions. * * @returns - * 0: Success. - * < 0: Failure. - * -1: A general error occurs (no specified reason). - * -2: The parameter is invalid. - * -5: The request is rejected. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -1: General error (not categorized). + * -2: Invalid parameter. + * -5: The call was rejected. * -7: The SDK is not initialized. */ abstract setClientRole( @@ -3344,53 +3505,57 @@ export abstract class IRtcEngine { ): number; /** - * Starts an audio device loopback test. + * Starts an audio and video call loopback test. * - * To test whether the user's local sending and receiving streams are normal, you can call this method to perform an audio and video call loop test, which tests whether the audio and video devices and the user's upstream and downstream networks are working properly. After starting the test, the user needs to make a sound or face the camera. The audio or video is output after about two seconds. If the audio playback is normal, the audio device and the user's upstream and downstream networks are working properly; if the video playback is normal, the video device and the user's upstream and downstream networks are working properly. + * To test whether local audio/video sending and receiving are functioning properly, you can call this method to start an audio and video call loopback test, which checks whether the system's audio/video devices and the user's uplink/downlink network are working correctly. + * After the test starts, the user should speak or face the camera. The audio or video will play back after about 2 seconds. If audio plays back normally, it means the system audio devices and network are working. If video plays back normally, it means the system video devices and network are working. + * When calling this method in a channel, ensure no audio/video streams are being published. + * After calling this method, you must call stopEchoTest to end the test. Otherwise, the user cannot perform another loopback test or join a channel. + * In live streaming scenarios, only the host can call this method. * - * @param config The configuration of the audio and video call loop test. See EchoTestConfiguration. + * @param config Configuration for the audio and video call loopback test. See EchoTestConfiguration. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract startEchoTest(config: EchoTestConfiguration): number; /** - * Stops the audio call test. + * Stops the audio call loopback test. * - * After calling startEchoTest, you must call this method to end the test; otherwise, the user cannot perform the next audio and video call loop test and cannot join the channel. + * After calling startEchoTest, you must call this method to end the test. Otherwise, the user will not be able to perform the next audio/video call loopback test or join a channel. * * @returns * 0: Success. - * < 0: Failure. - * -5(ERR_REFUSED): Failed to stop the echo test. The echo test may not be running. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -5(ERR_REFUSED): Failed to stop the test. The test may not be running. */ abstract stopEchoTest(): number; /** * Enables or disables multi-camera capture. * - * In scenarios where there are existing cameras to capture video, Agora recommends that you use the following steps to capture and publish video with multiple cameras: - * Call this method to enable multi-channel camera capture. - * Call startPreview to start the local video preview. - * Call startCameraCapture, and set sourceType to start video capture with the second camera. - * Call joinChannelEx, and set publishSecondaryCameraTrack to true to publish the video stream captured by the second camera in the channel. If you want to disable multi-channel camera capture, use the following steps: + * In scenarios where video is already being captured by a camera, Agora recommends the following steps to implement multi-camera capture and video publishing: + * Call this method to enable multi-camera capture. + * Call startPreview to start local video preview. + * Call startCameraCapture and set sourceType to specify the second camera to start capturing. + * Call joinChannelEx and set publishSecondaryCameraTrack to true to publish the video stream from the second camera in the channel. To disable multi-camera capture, refer to the following steps: * Call stopCameraCapture. - * Call this method with enabled set to false. You can call this method before and after startPreview to enable multi-camera capture: - * If it is enabled before startPreview, the local video preview shows the image captured by the two cameras at the same time. - * If it is enabled after startPreview, the SDK stops the current camera capture first, and then enables the primary camera and the second camera. The local video preview appears black for a short time, and then automatically returns to normal. This method applies to iOS only. When using this function, ensure that the system version is 13.0 or later. The minimum iOS device types that support multi-camera capture are as follows: + * Call this method and set enabled to false. This method is for iOS only. When using multi-camera video capture, ensure the system version is 13.0 or above. The minimum supported iOS device models for multi-camera capture are: * iPhone XR * iPhone XS * iPhone XS Max - * iPad Pro 3rd generation and later + * iPad Pro (3rd generation and later) You can call this method before or after startPreview to enable multi-camera capture: + * If called before startPreview, the local video preview will display the images captured by both cameras. + * If called after startPreview, the SDK will first stop the current camera capture, then start both the original and second cameras. The local video preview will briefly go black and then automatically recover. * - * @param enabled Whether to enable multi-camera video capture mode: true : Enable multi-camera capture mode; the SDK uses multiple cameras to capture video. false : Disable multi-camera capture mode; the SDK uses a single camera to capture video. + * @param enabled Whether to enable multi-camera video capture mode: true : Enable multi-camera capture mode. The SDK uses multiple cameras to capture video. false : Disable multi-camera capture mode. The SDK uses only a single camera to capture video. * @param config Capture configuration for the second camera. See CameraCapturerConfiguration. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract enableMultiCamera( enabled: boolean, @@ -3400,103 +3565,113 @@ export abstract class IRtcEngine { /** * Enables the video module. * - * The video module is disabled by default, call this method to enable it. If you need to disable the video module later, you need to call disableVideo. + * The video module is disabled by default and needs to be enabled by calling this method. To disable the video module later, call the disableVideo method. + * This method sets the internal engine to the enabled state and remains effective after leaving the channel. + * Calling this method resets the entire engine and has a relatively slow response time. Depending on your needs, you can use the following methods to independently control specific video module functions: enableLocalVideo : Whether to start camera capture and create a local video stream. muteLocalVideoStream : Whether to publish the local video stream. muteRemoteVideoStream : Whether to receive and play remote video streams. muteAllRemoteVideoStreams : Whether to receive and play all remote video streams. + * When called in a channel, this method resets the settings of enableLocalVideo, muteRemoteVideoStream, and muteAllRemoteVideoStreams, so use with caution. * * @returns - * 0: Success. - * < 0: Failure. + * 0: Method call succeeds. + * < 0: Method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract enableVideo(): number; /** * Disables the video module. * - * This method is used to disable the video module. + * This method disables the video module. + * This method sets the internal engine to a disabled state and remains effective after leaving the channel. + * Calling this method resets the entire engine and may take longer to respond. You can use the following methods to control specific video module features as needed: enableLocalVideo : Whether to enable camera capture and create a local video stream. muteLocalVideoStream : Whether to publish the local video stream. muteRemoteVideoStream : Whether to receive and play the remote video stream. muteAllRemoteVideoStreams : Whether to receive and play all remote video streams. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract disableVideo(): number; /** - * Enables the local video preview and specifies the video source for the preview. + * Starts video preview and specifies the video source for preview. * - * This method is used to start local video preview and specify the video source that appears in the preview screen. + * This method starts local video preview and specifies the video source to appear in the preview. + * Local preview enables mirror mode by default. + * After leaving the channel, the local preview remains active. You need to call stopPreview to stop the local preview. * - * @param sourceType The type of the video source. See VideoSourceType. + * @param sourceType The type of video source. See VideoSourceType. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract startPreview(sourceType?: VideoSourceType): number; /** - * Stops the local video preview. + * Stops video preview. * - * @param sourceType The type of the video source. See VideoSourceType. + * @param sourceType The type of video source. See VideoSourceType. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract stopPreview(sourceType?: VideoSourceType): number; /** - * Starts the last mile network probe test. + * Starts a last-mile network probe test before a call. * - * This method starts the last-mile network probe test before joining a channel to get the uplink and downlink last mile network statistics, including the bandwidth, packet loss, jitter, and round-trip time (RTT). + * Starts a last-mile network probe test before a call to provide feedback on uplink and downlink bandwidth, packet loss, jitter, and round-trip time. * - * @param config The configurations of the last-mile network probe test. See LastmileProbeConfig. + * @param config Configuration for the last-mile network probe. See LastmileProbeConfig. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract startLastmileProbeTest(config: LastmileProbeConfig): number; /** - * Stops the last mile network probe test. + * Stops the last mile network probe test before a call. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract stopLastmileProbeTest(): number; /** - * Sets the video encoder configuration. + * Sets video encoding properties. * - * Sets the encoder configuration for the local video. Each configuration profile corresponds to a set of video parameters, including the resolution, frame rate, and bitrate. + * Sets the encoding properties for local video. Each video encoding configuration corresponds to a series of video-related parameter settings, including resolution, frame rate, and bitrate. + * The config parameter of this method sets the maximum values achievable under ideal network conditions. If the network is poor, the video engine will not use this config to render local video and will automatically downgrade to suitable video parameters. * - * @param config Video profile. See VideoEncoderConfiguration. + * @param config Video encoding parameter configuration. See VideoEncoderConfiguration. * * @returns - * 0: Success. - * < 0: Failure. + * 0: Method call succeeds. + * < 0: Method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setVideoEncoderConfiguration( config: VideoEncoderConfiguration ): number; /** - * Sets the image enhancement options. + * Sets beauty effect options. * - * Enables or disables image enhancement, and sets the options. + * Enables the local beauty effect and sets the beauty effect options. + * This method depends on the video enhancement dynamic library libagora_clear_vision_extension.dll. Removing this library will prevent the feature from working properly. + * This feature has high performance requirements. When calling this method, the SDK automatically checks the current device capability. * - * @param enabled Whether to enable the image enhancement function: true : Enable the image enhancement function. false : (Default) Disable the image enhancement function. - * @param options The image enhancement options. See BeautyOptions. - * @param type The type of the media source to which the filter effect is applied. See MediaSourceType. In this method, this parameter supports only the following two settings: - * Use the default value PrimaryCameraSource if you use camera to capture local video. - * Set this parameter to CustomVideoSource if you use custom video source. + * @param enabled Whether to enable the beauty effect: true : Enable the beauty effect. false : (default) Disable the beauty effect. + * @param options Beauty options. See BeautyOptions for details. + * @param type The media source type to apply the effect to. See MediaSourceType. This method only supports the following two settings: + * For local video captured by the camera, keep the default value PrimaryCameraSource. + * For custom captured video, set this parameter to CustomVideoSource. * * @returns * 0: Success. - * < 0: Failure. - * -4: The current device does not support this feature. Possible reasons include: - * The current device capabilities do not meet the requirements for image enhancement. Agora recommends you replace it with a high-performance device. - * The current device version is lower than Android 5.0 and does not support this feature. Agora recommends you replace the device or upgrade the operating system. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -4: The current device does not support this feature. Possible reasons: + * The device does not meet the performance requirements for beauty effects. Consider using a higher-performance device. + * The device runs a version lower than Android 5.0, which does not support this operation. Consider upgrading the OS or using a different device. */ abstract setBeautyEffectOptions( enabled: boolean, @@ -3505,7 +3680,25 @@ export abstract class IRtcEngine { ): number; /** - * @ignore + * Sets face shaping effect options and specifies the media source. + * + * Call this method to apply preset parameters for facial modifications such as face slimming, eye enlargement, and nose slimming in one go, and to adjust the overall intensity of the effects. Face shaping is a value-added service. For billing details, see [Billing Strategy](https://doc.shengwang.cn/doc/rtc/android/billing/billing-strategy). + * On Android, this method is only supported on Android 4.4 and above. + * This method depends on the video enhancement dynamic library libagora_clear_vision_extension.dll. Removing this library will cause the feature to fail. + * This feature has high performance requirements. When calling this method, the SDK automatically checks the capabilities of the current device. + * + * @param enabled Whether to enable face shaping effects: true : Enable face shaping. false : (Default) Disable face shaping. + * @param options Face shaping style options. See FaceShapeBeautyOptions. + * @param type The media source type to which the effect is applied. See MediaSourceType. In this method, only the following two settings are supported: + * When using the camera to capture local video, keep the default value PrimaryCameraSource. + * To use custom captured video, set this parameter to CustomVideoSource. + * + * @returns + * 0: Success. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -4: The current device does not support this feature. Possible reasons include: + * The device does not meet the performance requirements for beauty effects. Consider using a higher-performance device. + * The device runs a version lower than Android 4.4, which is not supported. Consider upgrading the OS or changing the device. */ abstract setFaceShapeBeautyOptions( enabled: boolean, @@ -3514,7 +3707,24 @@ export abstract class IRtcEngine { ): number; /** - * @ignore + * Sets face shape area options and specifies the media source. + * + * If the preset face shaping effects implemented in the setFaceShapeBeautyOptions method do not meet your expectations, you can use this method to set face shape area options to fine-tune individual facial features for more refined face shaping effects. Face shaping is a value-added service. For billing details, see [Billing Strategy](https://doc.shengwang.cn/doc/rtc/android/billing/billing-strategy). + * On Android, this method is only supported on Android 4.4 and above. + * This method depends on the video enhancement dynamic library libagora_clear_vision_extension.dll. Removing this library will cause the feature to fail. + * This feature has high performance requirements. When calling this method, the SDK automatically checks the capabilities of the current device. + * + * @param options Face shape area options. See FaceShapeAreaOptions. + * @param type The media source type to which the effect is applied. See MediaSourceType. In this method, only the following two settings are supported: + * When using the camera to capture local video, keep the default value PrimaryCameraSource. + * To use custom captured video, set this parameter to CustomVideoSource. + * + * @returns + * 0: Success. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -4: The current device does not support this feature. Possible reasons include: + * The device does not meet the performance requirements for beauty effects. Consider using a higher-performance device. + * The device runs a version lower than Android 4.4, which is not supported. Consider upgrading the OS or changing the device. */ abstract setFaceShapeAreaOptions( options: FaceShapeAreaOptions, @@ -3522,14 +3732,35 @@ export abstract class IRtcEngine { ): number; /** - * @ignore + * Gets face beauty effect options. + * + * Call this method to get the current parameter settings of the face beauty effect. + * + * @param type The media source type to apply the effect to. See MediaSourceType. This method only supports the following two settings: + * For local video captured by the camera, keep the default value PrimaryCameraSource. + * For custom captured video, set this parameter to CustomVideoSource. + * + * @returns + * If the method call succeeds, returns a FaceShapeBeautyOptions object. + * If the method call fails, returns null. */ abstract getFaceShapeBeautyOptions( type?: MediaSourceType ): FaceShapeBeautyOptions; /** - * @ignore + * Gets face shaping area options. + * + * Call this method to get the current parameter settings of a face shaping area. + * + * @param shapeArea The face shaping area. See FaceShapeArea. + * @param type The media source type to apply the effect to. See MediaSourceType. This method only supports the following two settings: + * For local video captured by the camera, keep the default value PrimaryCameraSource. + * For custom captured video, set this parameter to CustomVideoSource. + * + * @returns + * If the method call succeeds, returns a FaceShapeAreaOptions object. + * If the method call fails, returns null. */ abstract getFaceShapeAreaOptions( shapeArea: FaceShapeArea, @@ -3537,17 +3768,20 @@ export abstract class IRtcEngine { ): FaceShapeAreaOptions; /** - * Sets the filter effect options and specifies the media source. + * Sets filter effect options and specifies the media source. * - * @param enabled Whether to enable the filter effect: true : Yes. false : (Default) No. - * @param options The filter effect options. See FilterEffectOptions. - * @param type The type of the media source to which the filter effect is applied. See MediaSourceType. In this method, this parameter supports only the following two settings: - * Use the default value PrimaryCameraSource if you use camera to capture local video. - * Set this parameter to CustomVideoSource if you use custom video source. + * This method depends on the video enhancement dynamic library libagora_clear_vision_extension.dll. Removing this library will cause the feature to fail. + * This feature has high performance requirements. When calling this method, the SDK automatically checks the capabilities of the current device. + * + * @param enabled Whether to enable filter effects: true : Enable filter effects. false : (Default) Disable filter effects. + * @param options Filter options. See FilterEffectOptions. + * @param type The media source type to which the effect is applied. See MediaSourceType. In this method, only the following two settings are supported: + * When using the camera to capture local video, keep the default value PrimaryCameraSource. + * To use custom captured video, set this parameter to CustomVideoSource. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setFilterEffectOptions( enabled: boolean, @@ -3556,7 +3790,16 @@ export abstract class IRtcEngine { ): number; /** - * @ignore + * Creates an IVideoEffectObject video effect object. + * + * Since Available since v4.6.2. + * + * @param bundlePath The path to the video effect resource bundle. + * @param type The media source type. See MediaSourceType. + * + * @returns + * The IVideoEffectObject object, if the method call succeeds. See IVideoEffectObject. + * An empty pointer , if the method call fails. */ abstract createVideoEffectObject( bundlePath: string, @@ -3564,26 +3807,39 @@ export abstract class IRtcEngine { ): IVideoEffectObject; /** - * @ignore + * Destroys the video effect object. + * + * Since Available since v4.6.2. + * + * @param videoEffectObject The video effect object to destroy. See IVideoEffectObject. + * + * @returns + * 0: Success. + * < 0: Failure. */ abstract destroyVideoEffectObject( videoEffectObject: IVideoEffectObject ): number; /** - * Sets low-light enhancement. + * Enables low-light enhancement. * - * You can call this method to enable the color enhancement feature and set the options of the color enhancement effect. + * You can call this method to enable low-light enhancement and configure its effect. + * This method depends on the video enhancement dynamic library libagora_clear_vision_extension.dll. Removing this library will cause the feature to fail. + * Low-light enhancement has certain performance requirements. If the device overheats or experiences issues after enabling this feature, consider lowering the enhancement level or disabling the feature. + * To achieve high-quality low-light enhancement (LowLightEnhanceLevelHighQuality), you must first call setVideoDenoiserOptions to enable video denoising. The mapping is as follows: + * When low-light enhancement is in auto mode (LowLightEnhanceAuto), video denoising must be set to high quality (VideoDenoiserLevelHighQuality) and auto mode (VideoDenoiserAuto). + * When low-light enhancement is in manual mode (LowLightEnhanceManual), video denoising must be set to high quality (VideoDenoiserLevelHighQuality) and manual mode (VideoDenoiserManual). * * @param enabled Whether to enable low-light enhancement: true : Enable low-light enhancement. false : (Default) Disable low-light enhancement. - * @param options The low-light enhancement options. See LowlightEnhanceOptions. - * @param type The type of the media source to which the filter effect is applied. See MediaSourceType. In this method, this parameter supports only the following two settings: - * Use the default value PrimaryCameraSource if you use camera to capture local video. - * Set this parameter to CustomVideoSource if you use custom video source. + * @param options Low-light enhancement options to configure the effect. See LowlightEnhanceOptions. + * @param type The media source type to which the effect is applied. See MediaSourceType. In this method, only the following two settings are supported: + * When using the camera to capture local video, keep the default value PrimaryCameraSource. + * To use custom captured video, set this parameter to CustomVideoSource. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setLowlightEnhanceOptions( enabled: boolean, @@ -3592,19 +3848,21 @@ export abstract class IRtcEngine { ): number; /** - * Sets video noise reduction. + * Enables video denoising. * - * You can call this method to enable the video noise reduction feature and set the options of the video noise reduction effect. If the noise reduction implemented by this method does not meet your needs, Agora recommends that you call the setBeautyEffectOptions method to enable the beauty and skin smoothing function to achieve better video noise reduction effects. The recommended BeautyOptions settings for intense noise reduction effect are as follows: lighteningContrastLevel LighteningContrastNormal lighteningLevel : 0.0 smoothnessLevel : 0.5 rednessLevel : 0.0 sharpnessLevel : 0.1 + * You can call this method to enable video denoising and configure its effect. If the denoising intensity provided by this method does not meet your needs, Agora recommends using the setBeautyEffectOptions method to enable the skin smoothing feature for better denoising. Recommended BeautyOptions settings for strong denoising: lighteningContrastLevel : LighteningContrastNormal lighteningLevel : 0.0 smoothnessLevel : 0.5 rednessLevel : 0.0 sharpnessLevel : 0.1 + * This method depends on the video enhancement dynamic library libagora_clear_vision_extension.dll. Removing this library will cause the feature to fail. + * Video denoising has certain performance requirements. If the device overheats or experiences issues after enabling this feature, consider lowering the denoising level or disabling the feature. * - * @param enabled Whether to enable video noise reduction: true : Enable video noise reduction. false : (Default) Disable video noise reduction. - * @param options The video noise reduction options. See VideoDenoiserOptions. - * @param type The type of the media source to which the filter effect is applied. See MediaSourceType. In this method, this parameter supports only the following two settings: - * Use the default value PrimaryCameraSource if you use camera to capture local video. - * Set this parameter to CustomVideoSource if you use custom video source. + * @param enabled Whether to enable video denoising: true : Enable video denoising. false : (Default) Disable video denoising. + * @param options Video denoising options to configure the effect. See VideoDenoiserOptions. + * @param type The media source type to which the effect is applied. See MediaSourceType. In this method, only the following two settings are supported: + * When using the camera to capture local video, keep the default value PrimaryCameraSource. + * To use custom captured video, set this parameter to CustomVideoSource. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setVideoDenoiserOptions( enabled: boolean, @@ -3613,22 +3871,23 @@ export abstract class IRtcEngine { ): number; /** - * Sets color enhancement. + * Sets color enhancement options. * - * The video images captured by the camera can have color distortion. The color enhancement feature intelligently adjusts video characteristics such as saturation and contrast to enhance the video color richness and color reproduction, making the video more vivid. You can call this method to enable the color enhancement feature and set the options of the color enhancement effect. - * Call this method after calling enableVideo. - * The color enhancement feature has certain performance requirements on devices. With color enhancement turned on, Agora recommends that you change the color enhancement level to one that consumes less performance or turn off color enhancement if your device is experiencing severe heat problems. - * This method relies on the image enhancement dynamic library libagora_clear_vision_extension.dll. If the dynamic library is deleted, the function cannot be enabled normally. + * Video captured by the camera may have color distortion. The color enhancement feature intelligently adjusts video characteristics such as saturation and contrast to improve color richness and accuracy, resulting in more vivid video. + * You can call this method to enable the color enhancement feature and set its effect. + * Call this method after enableVideo. + * Color enhancement requires certain device performance. If the device overheats or encounters issues after enabling it, consider lowering the enhancement level or disabling the feature. + * This method depends on the video enhancement dynamic library libagora_clear_vision_extension.dll. Removing this library will prevent the feature from working properly. * - * @param enabled Whether to enable color enhancement: true Enable color enhancement. false : (Default) Disable color enhancement. - * @param options The color enhancement options. See ColorEnhanceOptions. - * @param type The type of the media source to which the filter effect is applied. See MediaSourceType. In this method, this parameter supports only the following two settings: - * Use the default value PrimaryCameraSource if you use camera to capture local video. - * Set this parameter to CustomVideoSource if you use custom video source. + * @param enabled Whether to enable the color enhancement feature: true : Enable color enhancement. false : (default) Disable color enhancement. + * @param options Color enhancement options used to set the enhancement effect. See ColorEnhanceOptions. + * @param type The media source type to apply the effect to. See MediaSourceType. This method only supports the following two settings: + * For local video captured by the camera, keep the default value PrimaryCameraSource. + * For custom captured video, set this parameter to CustomVideoSource. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setColorEnhanceOptions( enabled: boolean, @@ -3637,38 +3896,39 @@ export abstract class IRtcEngine { ): number; /** - * Enables/Disables the virtual background. - * - * The virtual background feature enables the local user to replace their original background with a static image, dynamic video, blurred background, or portrait-background segmentation to achieve picture-in-picture effect. Once the virtual background feature is enabled, all users in the channel can see the custom background. Call this method after calling enableVideo or startPreview. - * Using a video as a your virtual background will lead to continuous increase in memory usage, which may cause issues such as app crashes. Therefore,it is recommended to reduce the resolution and frame rate of the video when using it. - * This feature has high requirements on device performance. When calling this method, the SDK automatically checks the capabilities of the current device. Agora recommends you use virtual background on devices with the following processors: - * Snapdragon 700 series 750G and later - * Snapdragon 800 series 835 and later - * Dimensity 700 series 720 and later - * Kirin 800 series 810 and later - * Kirin 900 series 980 and later - * Devices with an A9 chip and better, as follows: - * iPhone 6S and later - * iPad Air 3rd generation and later - * iPad 5th generation and later - * iPad Pro 1st generation and later - * iPad mini 5th generation and later - * Agora recommends that you use this feature in scenarios that meet the following conditions: - * A high-definition camera device is used, and the environment is uniformly lit. - * There are few objects in the captured video. Portraits are half-length and unobstructed. Ensure that the background is a solid color that is different from the color of the user's clothing. - * This method relies on the virtual background dynamic library libagora_segmentation_extension.dll. If the dynamic library is deleted, the function cannot be enabled normally. + * Enables/disables the virtual background. + * + * The virtual background feature allows replacing the local user's original background with a static image, dynamic video, blur effect, or separating the portrait from the background to create a picture-in-picture effect. Once enabled successfully, all users in the channel can see the customized background. + * Call this method after enableVideo or startPreview. + * Using a video as the virtual background increases memory usage over time, which may cause the app to crash. To avoid this, reduce the resolution and frame rate of the video. + * This feature requires high device performance. The SDK automatically checks the device capability when calling this method. Recommended devices include: + * Snapdragon 700 series 750G and above + * Snapdragon 800 series 835 and above + * Dimensity 700 series 720 and above + * Kirin 800 series 810 and above + * Kirin 900 series 980 and above + * Devices with A9 chip and above: + * iPhone 6S and above + * iPad Air 3rd generation and above + * iPad 5th generation and above + * iPad Pro 1st generation and above + * iPad mini 5th generation and above + * Recommended usage scenarios: + * Use a high-definition camera and ensure even lighting. + * Few objects in the frame, half-body portrait with minimal occlusion, and a background color distinct from clothing. + * This method depends on the virtual background dynamic library libagora_segmentation_extension.dll. Deleting this library will prevent the feature from working. * * @param enabled Whether to enable virtual background: true : Enable virtual background. false : Disable virtual background. - * @param backgroundSource The custom background. See VirtualBackgroundSource. To adapt the resolution of the custom background image to that of the video captured by the SDK, the SDK scales and crops the custom background image while ensuring that the content of the custom background image is not distorted. - * @param segproperty Processing properties for background images. See SegmentationProperty. - * @param type The type of the media source to which the filter effect is applied. See MediaSourceType. In this method, this parameter supports only the following two settings: - * Use the default value PrimaryCameraSource if you use camera to capture local video. - * Set this parameter to CustomVideoSource if you use custom video source. + * @param backgroundSource Custom background. See VirtualBackgroundSource. To adapt the resolution of the custom background image to the SDK's video capture resolution, the SDK scales and crops the image without distortion. + * @param segproperty Processing properties of the background image. See SegmentationProperty. + * @param type Media source type for applying the effect. See MediaSourceType. This parameter only supports the following settings: + * For video captured by the camera, use the default PrimaryCameraSource. + * For custom captured video, set this parameter to CustomVideoSource. * * @returns * 0: Success. - * < 0: Failure. - * -4: The device capabilities do not meet the requirements for the virtual background feature. Agora recommends you try it on devices with higher performance. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -4: Device capability does not meet the requirements for using the virtual background. Consider using a higher-performance device. */ abstract enableVirtualBackground( enabled: boolean, @@ -3688,31 +3948,32 @@ export abstract class IRtcEngine { abstract setupLocalVideo(canvas: VideoCanvas): number; /** - * Sets video application scenarios. + * Sets the video application scenario. * - * After successfully calling this method, the SDK will automatically enable the best practice strategies and adjust key performance metrics based on the specified scenario, to optimize the video experience. Call this method before joining a channel. + * After successfully calling this method to set the video application scenario, the SDK applies best practice strategies based on the specified scenario, automatically adjusting key performance indicators to optimize video experience quality. This method must be called before joining a channel. * - * @param scenarioType The type of video application scenario. See VideoApplicationScenarioType. ApplicationScenarioMeeting (1) is suitable for meeting scenarios. The SDK automatically enables the following strategies: - * In meeting scenarios where low-quality video streams are required to have a high bitrate, the SDK automatically enables multiple technologies used to deal with network congestions, to enhance the performance of the low-quality streams and to ensure the smooth reception by subscribers. - * The SDK monitors the number of subscribers to the high-quality video stream in real time and dynamically adjusts its configuration based on the number of subscribers. - * If nobody subscribers to the high-quality stream, the SDK automatically reduces its bitrate and frame rate to save upstream bandwidth. - * If someone subscribes to the high-quality stream, the SDK resets the high-quality stream to the VideoEncoderConfiguration configuration used in the most recent calling of setVideoEncoderConfiguration. If no configuration has been set by the user previously, the following values are used: + * @param scenarioType Video application scenario. See VideoApplicationScenarioType. ApplicationScenarioMeeting (1) is suitable for meeting scenarios. If the user has called setDualStreamMode to set the low stream to always not send (DisableSimulcastStream), the meeting scenario has no effect on dynamic switching of the low stream. + * This enum value only applies to broadcaster vs broadcaster scenarios. The SDK enables the following strategies for this scenario: + * For high bitrate requirements of low streams in meeting scenarios, multiple weak network resistance technologies are automatically enabled to improve the low stream's resistance and ensure smoothness when subscribing to multiple streams. + * Real-time monitoring of the number of subscribers to the high stream, dynamically adjusting high stream configuration: + * When no one subscribes to the high stream, bitrate and frame rate are automatically reduced to save upstream bandwidth and consumption. + * When someone subscribes to the high stream, it resets to the VideoEncoderConfiguration set by the user's most recent call to setVideoEncoderConfiguration. If not previously set, the following values are used: * Resolution: 960 × 540 * Frame rate: 15 fps * Bitrate: 1000 Kbps - * The SDK monitors the number of subscribers to the low-quality video stream in real time and dynamically enables or disables it based on the number of subscribers. If the user has called setDualStreamMode to set that never send low-quality video stream (DisableSimulcastStream), the dynamic adjustment of the low-quality stream in meeting scenarios will not take effect. - * If nobody subscribes to the low-quality stream, the SDK automatically disables it to save upstream bandwidth. - * If someone subscribes to the low-quality stream, the SDK enables the low-quality stream and resets it to the SimulcastStreamConfig configuration used in the most recent calling of setDualStreamMode. If no configuration has been set by the user previously, the following values are used: + * Real-time monitoring of the number of subscribers to the low stream, dynamically enabling or disabling the low stream: + * When no one subscribes to the low stream, it is automatically disabled to save upstream bandwidth and consumption. + * When someone subscribes to the low stream, it is enabled and reset to the SimulcastStreamConfig set by the user's most recent call to setDualStreamMode. If not previously set, the following values are used: * Resolution: 480 × 272 * Frame rate: 15 fps - * Bitrate: 500 Kbps ApplicationScenario1v1 (2) This is applicable to the scenario. To meet the requirements for low latency and high-quality video in this scenario, the SDK optimizes its strategies, improving performance in terms of video quality, first frame rendering, latency on mid-to-low-end devices, and smoothness under weak network conditions. This enumeration value is only applicable to the broadcaster vs. broadcaster scenario. ApplicationScenarioLiveshow (3) This is applicable to the scenario. In this scenario, fast video rendering and high image quality are crucial. The SDK implements several performance optimizations, including automatically enabling accelerated audio and video frame rendering to minimize first-frame latency (no need to call enableInstantMediaRendering), and B-frame encoding to achieve better image quality and bandwidth efficiency. The SDK also provides enhanced video quality and smooth playback, even in poor network conditions or on lower-end devices. + * Bitrate: 500 Kbps ApplicationScenario1v1 (2) is suitable for [1v1 video call](https://doc.shengwang.cn/doc/one-to-one-live/android/rtm/overview/product-overview) scenarios. The SDK optimizes strategies for low latency and high video quality, improving performance in image quality, first frame rendering, latency on mid-to-low-end devices, and smoothness under weak networks. ApplicationScenarioLiveshow (3) is suitable for [showroom live streaming](https://doc.shengwang.cn/doc/showroom/android/overview/product-overview) scenarios. For this scenario's high requirements on first frame rendering time and image clarity, the SDK applies optimizations such as enabling audio/video frame accelerated rendering by default to enhance first frame experience (no need to call enableInstantMediaRendering), and enabling B-frames by default to ensure high image quality and transmission efficiency. It also enhances video quality and smoothness under weak networks and on low-end devices. * * @returns - * 0: Success. - * < 0: Failure. - * -1: A general error occurs (no specified reason). - * -4: Video application scenarios are not supported. Possible reasons include that you use the Voice SDK instead of the Video SDK. - * -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine object before calling this method. + * 0: Method call succeeds. + * < 0: Method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -1: General error (not specifically categorized). + * -4: Setting video scenario is not supported. Possible reason: using an audio-only SDK. + * -7: IRtcEngine object is not initialized. You need to initialize the IRtcEngine object successfully before calling this method. */ abstract setVideoScenario(scenarioType: VideoApplicationScenarioType): number; @@ -3724,34 +3985,39 @@ export abstract class IRtcEngine { /** * Enables the audio module. * - * The audio module is enabled by default After calling disableAudio to disable the audio module, you can call this method to re-enable it. + * The audio module is enabled by default. If you have disabled it using disableAudio, you can call this method to re-enable it. + * Calling this method resets the entire engine and has a slower response time. You can use the following methods to control specific audio module functions as needed: enableLocalAudio : Whether to enable microphone capture and create a local audio stream. muteLocalAudioStream : Whether to publish the local audio stream. muteRemoteAudioStream : Whether to receive and play the remote audio stream. muteAllRemoteAudioStreams : Whether to receive and play all remote audio streams. + * When called in a channel, this method resets the settings of enableLocalAudio, muteRemoteAudioStream, and muteAllRemoteAudioStreams. Use with caution. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract enableAudio(): number; /** * Disables the audio module. * - * The audio module is enabled by default, and you can call this method to disable the audio module. + * The audio module is enabled by default. You can call this method to disable it. This method resets the entire engine and has a slower response time. Therefore, Agora recommends the following methods to control the audio module: enableLocalAudio : Whether to enable microphone capture and create a local audio stream. muteLocalAudioStream : Whether to publish the local audio stream. muteRemoteAudioStream : Whether to receive and play the remote audio stream. muteAllRemoteAudioStreams : Whether to receive and play all remote audio streams. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract disableAudio(): number; /** - * Sets the audio profile and audio scenario. + * Sets the audio encoding profile and scenario. * - * @param profile The audio profile, including the sampling rate, bitrate, encoding mode, and the number of channels. See AudioProfileType. - * @param scenario The audio scenarios. Under different audio scenarios, the device uses different volume types. See AudioScenarioType. + * Due to iOS system limitations, some audio routes cannot be recognized in the communication volume mode. Therefore, if you need to use an external sound card, we recommend setting the audio scenario to the high-quality scenario AudioScenarioGameStreaming (3). In this scenario, the SDK switches to media volume to avoid the issue. + * + * @param profile The audio encoding profile, including sample rate, bitrate, encoding mode, and the number of channels. See AudioProfileType. + * @param scenario The audio scenario. The volume type of the device varies depending on the audio scenario. + * See AudioScenarioType. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and troubleshooting. */ abstract setAudioProfile( profile: AudioProfileType, @@ -3761,150 +4027,160 @@ export abstract class IRtcEngine { /** * Sets the audio scenario. * - * @param scenario The audio scenarios. Under different audio scenarios, the device uses different volume types. See AudioScenarioType. + * Due to iOS system limitations, some audio routes cannot be recognized in the communication volume mode. Therefore, if you need to use an external sound card, we recommend setting the audio scenario to the high-quality scenario AudioScenarioGameStreaming (3). In this scenario, the SDK switches to media volume to avoid the issue. + * + * @param scenario The audio scenario. The volume type of the device varies depending on the audio scenario. + * See AudioScenarioType. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and troubleshooting. */ abstract setAudioScenario(scenario: AudioScenarioType): number; /** * Enables or disables the local audio capture. * - * The audio function is enabled by default when users joining a channel. This method disables or re-enables the local audio function to stop or restart local audio capturing. The difference between this method and muteLocalAudioStream are as follows: enableLocalAudio : Disables or re-enables the local audio capturing and processing. If you disable or re-enable local audio capturing using the enableLocalAudio method, the local user might hear a pause in the remote audio playback. muteLocalAudioStream : Sends or stops sending the local audio streams without affecting the audio capture status. + * When a user joins a channel, the audio function is enabled by default. You can call this method to disable or re-enable the local audio function, that is, to stop or resume local audio capture. + * The difference between this method and muteLocalAudioStream is: enableLocalAudio : Enables or disables local audio capture and processing. When you disable or enable local capture using enableLocalAudio, there will be a brief interruption in playing remote audio locally. muteLocalAudioStream : Stops or resumes sending the local audio stream without affecting the state of audio capture. * - * @param enabled true : (Default) Re-enable the local audio function, that is, to start the local audio capturing device (for example, the microphone). false : Disable the local audio function, that is, to stop local audio capturing. + * @param enabled true : Re-enables the local audio function, that is, starts local audio capture (default); false : Disables the local audio function, that is, stops local audio capture. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract enableLocalAudio(enabled: boolean): number; /** * Stops or resumes publishing the local audio stream. * - * This method is used to control whether to publish the locally captured audio stream. If you call this method to stop publishing locally captured audio streams, the audio capturing device will still work and won't be affected. + * This method controls whether to publish the locally captured audio stream. Not publishing the local audio stream does not disable the audio capturing device, so it does not affect the audio capture status. * - * @param mute Whether to stop publishing the local audio stream: true : Stops publishing the local audio stream. false : (Default) Resumes publishing the local audio stream. + * @param mute Whether to stop publishing the local audio stream. true : Stop publishing. false : (Default) Publish. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract muteLocalAudioStream(mute: boolean): number; /** - * Stops or resumes subscribing to the audio streams of all remote users. + * Stops or resumes subscribing to all remote users' audio streams. * - * After successfully calling this method, the local user stops or resumes subscribing to the audio streams of all remote users, including all subsequent users. By default, the SDK subscribes to the audio streams of all remote users when joining a channel. To modify this behavior, you can set autoSubscribeAudio to false when calling joinChannel to join the channel, which will cancel the subscription to the audio streams of all users upon joining the channel. + * After successfully calling this method, the local user stops or resumes subscribing to all remote users' audio streams, including those who join the channel after the method is called. By default, the SDK subscribes to all remote users' audio streams upon joining the channel. To change this behavior, set autoSubscribeAudio to false when calling joinChannel. + * If enableAudio or disableAudio is called after this method, the latter will take effect. * - * @param mute Whether to stop subscribing to the audio streams of all remote users: true : Stops subscribing to the audio streams of all remote users. false : (Default) Subscribes to the audio streams of all remote users by default. + * @param mute Whether to stop subscribing to all remote users' audio streams: true : Stop subscribing to all remote users' audio streams. false : (Default) Subscribe to all remote users' audio streams. * * @returns - * 0: Success. - * < 0: Failure. + * 0: Method call succeeds. + * < 0: Method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract muteAllRemoteAudioStreams(mute: boolean): number; /** - * Stops or resumes subscribing to the audio stream of a specified user. + * Stops or resumes subscribing to the specified remote user's audio stream. * * @param uid The user ID of the specified user. - * @param mute Whether to subscribe to the specified remote user's audio stream. true : Stop subscribing to the audio stream of the specified user. false : (Default) Subscribe to the audio stream of the specified user. + * @param mute Whether to stop subscribing to the specified remote user's audio stream. true : Stop subscribing to the specified user's audio stream. false : (Default) Subscribe to the specified user's audio stream. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract muteRemoteAudioStream(uid: number, mute: boolean): number; /** * Stops or resumes publishing the local video stream. * - * This method is used to control whether to publish the locally captured video stream. If you call this method to stop publishing locally captured video streams, the video capturing device will still work and won't be affected. Compared to enableLocalVideo (false), which can also cancel the publishing of local video stream by turning off the local video stream capture, this method responds faster. + * This method controls whether to publish the locally captured video stream. Not publishing the local video stream does not disable the video capturing device, so it does not affect the video capture status. + * Compared to calling enableLocalVideo(false) to disable video capture and thus stop publishing the local video stream, this method responds faster. * - * @param mute Whether to stop publishing the local video stream. true : Stop publishing the local video stream. false : (Default) Publish the local video stream. + * @param mute Whether to stop sending the local video stream. true : Stop sending the local video stream. false : (Default) Send the local video stream. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract muteLocalVideoStream(mute: boolean): number; /** - * Enables/Disables the local video capture. + * Enables or disables local video capture. * - * This method disables or re-enables the local video capture, and does not affect receiving the remote video stream. After calling enableVideo, the local video capture is enabled by default. If you call enableLocalVideo (false) to disable local video capture within the channel, it also simultaneously stops publishing the video stream within the channel. If you want to restart video catpure, you can call enableLocalVideo (true) and then call updateChannelMediaOptions to set the options parameter to publish the locally captured video stream in the channel. After the local video capturer is successfully disabled or re-enabled, the SDK triggers the onRemoteVideoStateChanged callback on the remote client. - * You can call this method either before or after joining a channel. However, if you call it before joining, the settings will only take effect once you have joined the channel. - * This method enables the internal engine and is valid after leaving the channel. + * This method disables or re-enables local video capture without affecting the reception of remote video. + * After calling enableVideo, local video capture is enabled by default. + * If you call enableLocalVideo(false) in a channel, it stops local video capture and also stops publishing the video stream in the channel. To re-enable it, call enableLocalVideo(true), then call updateChannelMediaOptions and set the options parameter to publish the locally captured video stream to the channel. + * After successfully enabling or disabling local video capture, the remote side triggers the onRemoteVideoStateChanged callback. + * This method can be called before or after joining a channel, but the settings take effect only after joining the channel. + * This method sets the internal engine to the enabled state and remains effective after leaving the channel. * - * @param enabled Whether to enable the local video capture. true : (Default) Enable the local video capture. false : Disable the local video capture. Once the local video is disabled, the remote users cannot receive the video stream of the local user, while the local user can still receive the video streams of remote users. When set to false, this method does not require a local camera. + * @param enabled Whether to enable local video capture. true : (Default) Enables local video capture. false : Disables local video capture. After disabling, remote users will not receive the local user's video stream, but the local user can still receive remote video streams. When set to false, this method does not require a local camera. * * @returns - * 0: Success. - * < 0: Failure. + * 0: Method call succeeds. + * < 0: Method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract enableLocalVideo(enabled: boolean): number; /** - * Stops or resumes subscribing to the video streams of all remote users. + * Stops or resumes subscribing to all remote users' video streams. * - * After successfully calling this method, the local user stops or resumes subscribing to the video streams of all remote users, including all subsequent users. By default, the SDK subscribes to the video streams of all remote users when joining a channel. To modify this behavior, you can set autoSubscribeVideo to false when calling joinChannel to join the channel, which will cancel the subscription to the video streams of all users upon joining the channel. + * After successfully calling this method, the local user stops or resumes subscribing to all remote users' video streams, including those who join the channel after the method is called. By default, the SDK subscribes to all remote users' video streams upon joining the channel. To change this behavior, set autoSubscribeVideo to false when calling joinChannel. + * If enableVideo or disableVideo is called after this method, the latter will take effect. * - * @param mute Whether to stop subscribing to the video streams of all remote users. true : Stop subscribing to the video streams of all remote users. false : (Default) Subscribe to the video streams of all remote users by default. + * @param mute Whether to stop subscribing to all remote users' video streams. true : Stop subscribing to all users' video streams. false : (Default) Subscribe to all users' video streams. * * @returns - * 0: Success. - * < 0: Failure. + * 0: Method call succeeds. + * < 0: Method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract muteAllRemoteVideoStreams(mute: boolean): number; /** * Sets the default video stream type to subscribe to. * - * The SDK will dynamically adjust the size of the corresponding video stream based on the size of the video window to save bandwidth and computing resources. The default aspect ratio of the low-quality video stream is the same as that of the high-quality video stream. According to the current aspect ratio of the high-quality video stream, the system will automatically allocate the resolution, frame rate, and bitrate of the low-quality video stream. Depending on the default behavior of the sender and the specific settings when calling setDualStreamMode, the scenarios for the receiver calling this method are as follows: - * The SDK enables low-quality video stream adaptive mode (AutoSimulcastStream) on the sender side by default, meaning only the high-quality video stream is transmitted. Only the receiver with the role of the host can call this method to initiate a low-quality video stream request. Once the sender receives the request, it starts automatically sending the low-quality video stream. At this point, all users in the channel can call this method to switch to low-quality video stream subscription mode. - * If the sender calls setDualStreamMode and sets mode to DisableSimulcastStream (never send low-quality video stream), then calling this method will have no effect. - * If the sender calls setDualStreamMode and sets mode to EnableSimulcastStream (always send low-quality video stream), both the host and audience receivers can call this method to switch to low-quality video stream subscription mode. + * Depending on the sender's default behavior and the setDualStreamMode settings, the receiver's call to this method results in the following: + * By default, the SDK enables adaptive low-quality stream mode (AutoSimulcastStream) on the sender side, meaning the sender only sends the high-quality stream. Only receivers with host role can call this method to request the low-quality stream. Once the sender receives the request, it starts sending the low-quality stream. At this point, all users in the channel can call this method to switch to low-quality stream subscription mode. + * If the sender calls setDualStreamMode and sets mode to DisableSimulcastStream (never send low-quality stream), this method has no effect. + * If the sender calls setDualStreamMode and sets mode to EnableSimulcastStream (always send low-quality stream), both host and audience receivers can call this method to switch to low-quality stream subscription mode. When receiving the low-quality stream, the SDK dynamically adjusts the video stream size based on the size of the video window to save bandwidth and computing resources. The aspect ratio of the low-quality stream is the same as that of the high-quality stream. Based on the current high-quality stream's aspect ratio, the system automatically assigns the resolution, frame rate, and bitrate for the low-quality stream. If you call both this method and setRemoteVideoStreamType, the SDK uses the settings from setRemoteVideoStreamType. * - * @param streamType The default video-stream type. See VideoStreamType. + * @param streamType The default video stream type to subscribe to: VideoStreamType. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Code](https://doc.shengwang.cn/api-ref/rtc/rn/error-code) for details and resolution suggestions. */ abstract setRemoteDefaultVideoStreamType(streamType: VideoStreamType): number; /** - * Stops or resumes subscribing to the video stream of a specified user. + * Stops or resumes subscribing to the video stream of a specified remote user. * - * @param uid The user ID of the specified user. - * @param mute Whether to subscribe to the specified remote user's video stream. true : Stop subscribing to the video streams of the specified user. false : (Default) Subscribe to the video stream of the specified user. + * @param uid The user ID of the specified remote user. + * @param mute Whether to stop subscribing to the video stream of the specified remote user. true : Stop subscribing to the video stream of the specified user. false : (Default) Subscribe to the video stream of the specified user. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Code](https://doc.shengwang.cn/api-ref/rtc/rn/error-code) for details and resolution suggestions. */ abstract muteRemoteVideoStream(uid: number, mute: boolean): number; /** * Sets the video stream type to subscribe to. * - * Depending on the default behavior of the sender and the specific settings when calling setDualStreamMode, the scenarios for the receiver calling this method are as follows: - * The SDK enables low-quality video stream adaptive mode (AutoSimulcastStream) on the sender side by default, meaning only the high-quality video stream is transmitted. Only the receiver with the role of the host can call this method to initiate a low-quality video stream request. Once the sender receives the request, it starts automatically sending the low-quality video stream. At this point, all users in the channel can call this method to switch to low-quality video stream subscription mode. - * If the sender calls setDualStreamMode and sets mode to DisableSimulcastStream (never send low-quality video stream), then calling this method will have no effect. - * If the sender calls setDualStreamMode and sets mode to EnableSimulcastStream (always send low-quality video stream), both the host and audience receivers can call this method to switch to low-quality video stream subscription mode. The SDK will dynamically adjust the size of the corresponding video stream based on the size of the video window to save bandwidth and computing resources. The default aspect ratio of the low-quality video stream is the same as that of the high-quality video stream. According to the current aspect ratio of the high-quality video stream, the system will automatically allocate the resolution, frame rate, and bitrate of the low-quality video stream. + * Depending on the sender's default behavior and the setDualStreamMode settings, the receiver's call to this method results in the following: + * By default, the SDK enables adaptive low-quality stream mode (AutoSimulcastStream) on the sender side, meaning the sender only sends the high-quality stream. Only receivers with host role can call this method to request the low-quality stream. Once the sender receives the request, it starts sending the low-quality stream. At this point, all users in the channel can call this method to switch to low-quality stream subscription mode. + * If the sender calls setDualStreamMode and sets mode to DisableSimulcastStream (never send low-quality stream), this method has no effect. + * If the sender calls setDualStreamMode and sets mode to EnableSimulcastStream (always send low-quality stream), both host and audience receivers can call this method to switch to low-quality stream subscription mode. When receiving the low-quality stream, the SDK dynamically adjusts the video stream size based on the size of the video window to save bandwidth and computing resources. The aspect ratio of the low-quality stream is the same as that of the high-quality stream. Based on the current high-quality stream's aspect ratio, the system automatically assigns the resolution, frame rate, and bitrate for the low-quality stream. * You can call this method either before or after joining a channel. - * If you call both this method and setRemoteDefaultVideoStreamType, the setting of this method takes effect. + * If you call both this method and setRemoteDefaultVideoStreamType, the SDK uses the settings from this method. * * @param uid The user ID. - * @param streamType The video stream type, see VideoStreamType. + * @param streamType The video stream type: VideoStreamType. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Code](https://doc.shengwang.cn/api-ref/rtc/rn/error-code) for details and resolution suggestions. */ abstract setRemoteVideoStreamType( uid: number, @@ -3912,19 +4188,19 @@ export abstract class IRtcEngine { ): number; /** - * Options for subscribing to remote video streams. + * Sets the subscription options for remote video streams. * - * When a remote user has enabled dual-stream mode, you can call this method to choose the option for subscribing to the video streams sent by the remote user. The default subscription behavior of the SDK for remote video streams depends on the type of registered video observer: - * If the IVideoFrameObserver observer is registered, the default is to subscribe to both raw data and encoded data. - * If the IVideoEncodedFrameObserver observer is registered, the default is to subscribe only to the encoded data. - * If both types of observers are registered, the default behavior follows the last registered video observer. For example, if the last registered observer is the IVideoFrameObserver observer, the default is to subscribe to both raw data and encoded data. If you want to modify the default behavior, or set different subscription options for different uids, you can call this method to set it. + * When the remote user sends dual streams, you can call this method to set the subscription options for the remote video stream. The SDK's default subscription behavior for remote video streams depends on the type of registered video observer: + * If IVideoFrameObserver is registered, both raw and encoded data are subscribed by default. + * If IVideoEncodedFrameObserver is registered, only encoded data is subscribed by default. + * If both observers are registered, the default behavior follows the later registered observer. For example, if IVideoFrameObserver is registered later, both raw and encoded data are subscribed by default. If you want to change the default behavior above or set different subscription options for different uid s, you can call this method. * - * @param uid The user ID of the remote user. - * @param options The video subscription options. See VideoSubscriptionOptions. + * @param uid Remote user ID. + * @param options Subscription settings for the video stream. See VideoSubscriptionOptions. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setRemoteVideoSubscriptionOptions( uid: number, @@ -3932,20 +4208,21 @@ export abstract class IRtcEngine { ): number; /** - * Sets the blocklist of subscriptions for audio streams. + * Sets the audio subscription blocklist. * - * You can call this method to specify the audio streams of a user that you do not want to subscribe to. - * You can call this method either before or after joining a channel. - * The blocklist is not affected by the setting in muteRemoteAudioStream, muteAllRemoteAudioStreams, and autoSubscribeAudio in ChannelMediaOptions. - * Once the blocklist of subscriptions is set, it is effective even if you leave the current channel and rejoin the channel. - * If a user is added in the allowlist and blocklist at the same time, only the blocklist takes effect. + * You can call this method to specify the audio streams you do not want to subscribe to. + * This method can be called before or after joining a channel. + * The audio subscription blocklist is not affected by muteRemoteAudioStream, muteAllRemoteAudioStreams, or the autoSubscribeAudio setting in ChannelMediaOptions. + * After setting the blocklist, if you leave and rejoin the channel, the blocklist remains effective. + * If a user is included in both the audio subscription allowlist and blocklist, only the blocklist takes effect. * - * @param uidList The user ID list of users that you do not want to subscribe to. If you want to specify the audio streams of a user that you do not want to subscribe to, add the user ID in this list. If you want to remove a user from the blocklist, you need to call the setSubscribeAudioBlocklist method to update the user ID list; this means you only add the uid of users that you do not want to subscribe to in the new user ID list. - * @param uidNumber The number of users in the user ID list. + * @param uidList List of user IDs in the subscription blocklist. + * If you want to exclude a specific user's audio stream from being subscribed to, add that user's ID to this list. To remove a user from the blocklist, call setSubscribeAudioBlocklist again with an updated list that excludes the uid of the user you want to remove. + * @param uidNumber Number of users in the blocklist. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setSubscribeAudioBlocklist( uidList: number[], @@ -3953,20 +4230,21 @@ export abstract class IRtcEngine { ): number; /** - * Sets the allowlist of subscriptions for audio streams. + * Sets the audio subscription allowlist. * - * You can call this method to specify the audio streams of a user that you want to subscribe to. - * If a user is added in the allowlist and blocklist at the same time, only the blocklist takes effect. - * You can call this method either before or after joining a channel. - * The allowlist is not affected by the setting in muteRemoteAudioStream, muteAllRemoteAudioStreams and autoSubscribeAudio in ChannelMediaOptions. - * Once the allowlist of subscriptions is set, it is effective even if you leave the current channel and rejoin the channel. + * You can call this method to specify the audio streams you want to subscribe to. + * This method can be called before or after joining a channel. + * The audio subscription allowlist is not affected by muteRemoteAudioStream, muteAllRemoteAudioStreams, or the autoSubscribeAudio setting in ChannelMediaOptions. + * After setting the allowlist, if you leave and rejoin the channel, the allowlist remains effective. + * If a user is included in both the audio subscription allowlist and blocklist, only the blocklist takes effect. * - * @param uidList The user ID list of users that you want to subscribe to. If you want to specify the audio streams of a user for subscription, add the user ID in this list. If you want to remove a user from the allowlist, you need to call the setSubscribeAudioAllowlist method to update the user ID list; this means you only add the uid of users that you want to subscribe to in the new user ID list. - * @param uidNumber The number of users in the user ID list. + * @param uidList List of user IDs in the audio subscription allowlist. + * If you want to subscribe to a specific user's audio stream, add that user's ID to this list. To remove a user from the allowlist, call setSubscribeAudioAllowlist again with an updated list that excludes the uid of the user you want to remove. + * @param uidNumber Number of users in the allowlist. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setSubscribeAudioAllowlist( uidList: number[], @@ -3974,20 +4252,21 @@ export abstract class IRtcEngine { ): number; /** - * Sets the blocklist of subscriptions for video streams. + * Sets the video subscription blocklist. * - * You can call this method to specify the video streams of a user that you do not want to subscribe to. - * If a user is added in the allowlist and blocklist at the same time, only the blocklist takes effect. - * Once the blocklist of subscriptions is set, it is effective even if you leave the current channel and rejoin the channel. + * You can call this method to specify the video streams you do not want to subscribe to. * You can call this method either before or after joining a channel. - * The blocklist is not affected by the setting in muteRemoteVideoStream, muteAllRemoteVideoStreams and autoSubscribeAudio in ChannelMediaOptions. + * The video subscription blocklist is not affected by muteRemoteVideoStream, muteAllRemoteVideoStreams, or autoSubscribeVideo in ChannelMediaOptions. + * After setting the blocklist, if you leave and rejoin the channel, the blocklist remains effective. + * If a user is in both the audio subscription blocklist and allowlist, only the blocklist takes effect. * - * @param uidList The user ID list of users that you do not want to subscribe to. If you want to specify the video streams of a user that you do not want to subscribe to, add the user ID of that user in this list. If you want to remove a user from the blocklist, you need to call the setSubscribeVideoBlocklist method to update the user ID list; this means you only add the uid of users that you do not want to subscribe to in the new user ID list. - * @param uidNumber The number of users in the user ID list. + * @param uidList User ID list of the video subscription blocklist. + * If you want to exclude a specific user's video stream from being subscribed to, add that user's ID to this list. To remove a user from the blocklist, you need to call the setSubscribeVideoBlocklist method again and update the user ID list to exclude the uid of the user you want to remove. + * @param uidNumber The number of users in the blocklist. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setSubscribeVideoBlocklist( uidList: number[], @@ -3995,20 +4274,21 @@ export abstract class IRtcEngine { ): number; /** - * Sets the allowlist of subscriptions for video streams. + * Sets the video subscription allowlist. * - * You can call this method to specify the video streams of a user that you want to subscribe to. - * If a user is added in the allowlist and blocklist at the same time, only the blocklist takes effect. - * Once the allowlist of subscriptions is set, it is effective even if you leave the current channel and rejoin the channel. + * You can call this method to specify the video streams you want to subscribe to. * You can call this method either before or after joining a channel. - * The allowlist is not affected by the setting in muteRemoteVideoStream, muteAllRemoteVideoStreams and autoSubscribeAudio in ChannelMediaOptions. + * The video subscription allowlist is not affected by muteRemoteVideoStream, muteAllRemoteVideoStreams, or autoSubscribeVideo in ChannelMediaOptions. + * After setting the allowlist, if you leave and rejoin the channel, the allowlist remains effective. + * If a user is in both the audio subscription blocklist and allowlist, only the blocklist takes effect. * - * @param uidList The user ID list of users that you want to subscribe to. If you want to specify the video streams of a user for subscription, add the user ID of that user in this list. If you want to remove a user from the allowlist, you need to call the setSubscribeVideoAllowlist method to update the user ID list; this means you only add the uid of users that you want to subscribe to in the new user ID list. - * @param uidNumber The number of users in the user ID list. + * @param uidList User ID list of the video subscription allowlist. + * If you want to subscribe only to a specific user's video stream, add that user's ID to this list. To remove a user from the allowlist, you need to call the setSubscribeVideoAllowlist method again and update the video subscription allowlist to exclude the uid of the user you want to remove. + * @param uidNumber The number of users in the allowlist. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setSubscribeVideoAllowlist( uidList: number[], @@ -4016,19 +4296,19 @@ export abstract class IRtcEngine { ): number; /** - * Enables the reporting of users' volume indication. + * Enables audio volume indication. * - * This method enables the SDK to regularly report the volume information to the app of the local user who sends a stream and remote users (three users at most) whose instantaneous volumes are the highest. + * This method allows the SDK to periodically report the volume information of the local user who is sending a stream and up to 3 remote users with the highest instantaneous volume to the app. * - * @param interval Sets the time interval between two consecutive volume indications: - * ≤ 0: Disables the volume indication. - * > 0: Time interval (ms) between two consecutive volume indications. Ensure this parameter is set to a value greater than 10, otherwise you will not receive the onAudioVolumeIndication callback. Agora recommends that this value is set as greater than 100. - * @param smooth The smoothing factor that sets the sensitivity of the audio volume indicator. The value ranges between 0 and 10. The recommended value is 3. The greater the value, the more sensitive the indicator. - * @param reportVad true : Enables the voice activity detection of the local user. Once it is enabled, the vad parameter of the onAudioVolumeIndication callback reports the voice activity status of the local user. false : (Default) Disables the voice activity detection of the local user. Once it is disabled, the vad parameter of the onAudioVolumeIndication callback does not report the voice activity status of the local user, except for the scenario where the engine automatically detects the voice activity of the local user. + * @param interval Sets the time interval for volume indication: + * ≤ 0: Disables the volume indication feature. + * > 0: The time interval (ms) for returning volume indications. It is recommended to set it to more than 100 ms. If it is less than 10 ms, the onAudioVolumeIndication callback may not be received. + * @param smooth Smoothness factor that specifies the sensitivity of the volume indication. The range is [0,10], and the recommended value is 3. The larger the value, the more sensitive the fluctuation; the smaller the value, the smoother the fluctuation. + * @param reportVad true : Enables local voice activity detection. When enabled, the vad parameter in the onAudioVolumeIndication callback reports whether voice is detected locally. false : (Default) Disables local voice activity detection. Unless the engine automatically performs local voice detection, the vad parameter in the onAudioVolumeIndication callback does not report local voice detection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract enableAudioVolumeIndication( interval: number, @@ -4037,28 +4317,20 @@ export abstract class IRtcEngine { ): number; /** - * Starts audio recording on the client and sets recording configurations. - * - * The Agora SDK allows recording during a call. After successfully calling this method, you can record the audio of users in the channel and get an audio recording file. Supported formats of audio files are as follows: - * WAV: High-fidelity files with typically larger file sizes. For example, if the sample rate is 32,000 Hz, the file size for 10-minute recording is approximately 73 MB. - * AAC: Low-fidelity files with typically smaller file sizes. For example, if the sample rate is 32,000 Hz and the recording quality is AudioRecordingQualityMedium, the file size for 10-minute recording is approximately 2 MB. Once the user leaves the channel, the recording automatically stops. - * - * @param config Recording configurations. See AudioRecordingConfiguration. - * - * @returns - * 0: Success. - * < 0: Failure. + * @ignore */ abstract startAudioRecording(config: AudioRecordingConfiguration): number; /** - * Registers an encoded audio observer. + * Registers an audio encoded frame observer. * * Call this method after joining a channel. - * You can call this method or startAudioRecording to set the recording type and quality of audio files, but Agora does not recommend using this method and startAudioRecording at the same time. Only the method called later will take effect. + * Since this method and startAudioRecording both set audio content and quality, it is not recommended to use this method together with startAudioRecording. Otherwise, only the method called later will take effect. + * + * @param config Configuration for the encoded audio observer. See AudioEncodedFrameObserverConfig. * * @returns - * One IAudioEncodedFrameObserver object. + * An IAudioEncodedFrameObserver object. */ abstract registerAudioEncodedFrameObserver( config: AudioEncodedFrameObserverConfig, @@ -4066,33 +4338,23 @@ export abstract class IRtcEngine { ): number; /** - * Stops the audio recording on the client. - * - * @returns - * 0: Success. - * < 0: Failure. + * @ignore */ abstract stopAudioRecording(): number; /** - * Creates a media player object. - * - * Before calling any APIs in the IMediaPlayer class, you need to call this method to create an instance of the media player. If you need to create multiple instances, you can call this method multiple times. - * - * @returns - * An IMediaPlayer object, if the method call succeeds. - * An empty pointer, if the method call fails. + * @ignore */ abstract createMediaPlayer(): IMediaPlayer; /** - * Destroys the media player instance. + * Destroys the media player. * - * @param mediaPlayer IMediaPlayer object. + * @param mediaPlayer The IMediaPlayer object. * * @returns - * ≥ 0: Success. Returns the ID of media player instance. - * < 0: Failure. + * ≥ 0: Success. Returns the media player ID. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract destroyMediaPlayer(mediaPlayer: IMediaPlayer): number; @@ -4107,28 +4369,33 @@ export abstract class IRtcEngine { abstract destroyMediaRecorder(mediaRecorder: IMediaRecorder): number; /** - * Starts playing the music file. + * Starts playing a music file. * - * For the audio file formats supported by this method, see What formats of audio files does the Agora RTC SDK support. If the local music file does not exist, the SDK does not support the file format, or the the SDK cannot access the music file URL, the SDK reports AudioMixingReasonCanNotOpen. + * For supported audio file formats, see [What audio formats does the RTC SDK support](https://doc.shengwang.cn/faq/general-product-inquiry/audio-format). If the local music file does not exist, the file format is not supported, or the online music file URL is inaccessible, the SDK reports AudioMixingReasonCanNotOpen. + * Using this method to play short sound effect files may result in failure. If you need to play sound effects, use playEffect instead. + * If you need to call this method multiple times, make sure the interval between calls is greater than 500 ms. + * When calling this method on Android, note the following: + * Make sure to use a device running Android 4.2 or later with API Level 16 or higher. + * If playing an online music file, avoid using a redirect URL. Redirect URLs may not open on some devices. + * If calling this method on an emulator, ensure the music file is located in the /sdcard/ directory and is in MP3 format. * * @param filePath File path: - * Android: The file path, which needs to be accurate to the file name and suffix. Agora supports URL addresses, absolute paths, or file paths that start with /assets/. You might encounter permission issues if you use an absolute path to access a local file, so Agora recommends using a URI address instead. For example: content://com.android.providers.media.documents/document/audio%3A14441 - * iOS: The absolute path or URL address (including the suffixes of the filename) of the audio effect file. For example: /var/mobile/Containers/Data/audio.mp4. - * @param loopback Whether to only play music files on the local client: true : Only play music files on the local client so that only the local user can hear the music. false : Publish music files to remote clients so that both the local user and remote users can hear the music. - * @param cycle The number of times the music file plays. - * > 0: The number of times for playback. For example, 1 represents playing 1 time. - * -1: Play the audio file in an infinite loop. - * @param startPos The playback position (ms) of the music file. + * Android: File path, must include the file name and extension. Supports URL addresses for online files, URI addresses for local files, absolute paths, or paths starting with /assets/. Accessing local files via absolute path may cause permission issues. Using URI addresses is recommended. For example: content://com.android.providers.media.documents/document/audio%3A14441. + * iOS: Absolute path or URL of the audio file, must include the file name and extension. For example: /var/mobile/Containers/Data/audio.mp4. + * @param loopback Whether to play the music file only locally: true : Play the music file locally only. Only the local user can hear the music. false : Publish the locally played music file to remote users. Both local and remote users can hear the music. + * @param cycle Number of times to play the music file. + * > 0: Number of times to play. For example, 1 means play once. + * -1: Play in an infinite loop. + * @param startPos Playback position of the music file in milliseconds. * * @returns * 0: Success. - * < 0: Failure. - * -1: A general error occurs (no specified reason). - * -2: The parameter is invalid. - * -3: The SDK is not ready. - * The audio module is disabled. - * The program is not complete. - * The initialization of IRtcEngine fails. Reinitialize the IRtcEngine. + * < 0: Failure: + * -1: General error (uncategorized). + * -2: Invalid parameter. + * -3: SDK not ready: + * Check if the audio module is enabled. + * Check the integrity of the assembly. IRtcEngine initialization failed. Please reinitialize IRtcEngine. */ abstract startAudioMixing( filePath: string, @@ -4140,235 +4407,237 @@ export abstract class IRtcEngine { /** * Stops playing the music file. * - * After calling startAudioMixing to play a music file, you can call this method to stop the playing. If you only need to pause the playback, call pauseAudioMixing. + * After calling the startAudioMixing method to play a music file, you can call this method to stop playback. If you only need to pause playback, call pauseAudioMixing instead. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract stopAudioMixing(): number; /** - * Pauses playing and mixing the music file. + * Pauses the playback of a music file. * - * After calling startAudioMixing to play a music file, you can call this method to pause the playing. If you need to stop the playback, call stopAudioMixing. + * After you call the startAudioMixing method to play a music file, call this method to pause the playback. To stop the playback, call stopAudioMixing. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract pauseAudioMixing(): number; /** - * Resumes playing and mixing the music file. + * Resumes the playback of a music file. * - * After calling pauseAudioMixing to pause the playback, you can call this method to resume the playback. + * After you call pauseAudioMixing to pause the playback of a music file, call this method to resume playback. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract resumeAudioMixing(): number; /** - * Selects the audio track used during playback. + * Specifies the audio track to play in the current music file. * - * After getting the track index of the audio file, you can call this method to specify any track to play. For example, if different tracks of a multi-track file store songs in different languages, you can call this method to set the playback language. - * For the supported formats of audio files, see. - * You need to call this method after calling startAudioMixing and receiving the onAudioMixingStateChanged (AudioMixingStatePlaying) callback. + * After retrieving the number of audio tracks in a music file, you can call this method to specify any track for playback. For example, if different tracks in a multi-track file contain songs in different languages, you can use this method to set the playback language. + * For supported audio file formats, see [What audio file formats does the RTC SDK support?](https://doc.shengwang.cn/faq/general-product-inquiry/audio-format). + * You must call this method after calling startAudioMixing and receiving the onAudioMixingStateChanged(AudioMixingStatePlaying) callback. * - * @param index The audio track you want to specify. The value should be greater than 0 and less than that of returned by getAudioTrackCount. + * @param index The specified audio track to play. The value must be greater than or equal to 0 and less than the return value of getAudioTrackCount. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract selectAudioTrack(index: number): number; /** - * Gets the index of audio tracks of the current music file. + * Gets the audio track index of the current music file. * - * You need to call this method after calling startAudioMixing and receiving the onAudioMixingStateChanged (AudioMixingStatePlaying) callback. + * You need to call this method after calling startAudioMixing and receiving the onAudioMixingStateChanged(AudioMixingStatePlaying) callback. * * @returns - * The SDK returns the index of the audio tracks if the method call succeeds. - * < 0: Failure. + * Returns the audio track index of the current music file if the method call succeeds. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract getAudioTrackCount(): number; /** - * Adjusts the volume during audio mixing. + * Adjusts the playback volume of the music file. * - * This method adjusts the audio mixing volume on both the local client and remote clients. This method does not affect the volume of the audio file set in the playEffect method. + * This method adjusts the playback volume of the mixed music file on both local and remote sides. Calling this method does not affect the playback volume of sound effects set in the playEffect method. * - * @param volume Audio mixing volume. The value ranges between 0 and 100. The default value is 100, which means the original volume. + * @param volume The volume range of the music file is 0~100. 100 (default) is the original file volume. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract adjustAudioMixingVolume(volume: number): number; /** - * Adjusts the volume of audio mixing for publishing. + * Adjusts the remote playback volume of the music file. * - * This method adjusts the volume of audio mixing for publishing (sending to other users). + * This method adjusts the playback volume of the mixed music file on the remote side. * - * @param volume The volume of audio mixing for local playback. The value ranges between 0 and 100 (default). 100 represents the original volume. + * @param volume The volume of the music file. The range is [0,100]. 100 (default) is the original volume. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract adjustAudioMixingPublishVolume(volume: number): number; /** - * Retrieves the audio mixing volume for publishing. + * Gets the remote playback volume of the music file. * - * This method helps troubleshoot audio volume‑related issues. You need to call this method after calling startAudioMixing and receiving the onAudioMixingStateChanged (AudioMixingStatePlaying) callback. + * This API helps developers troubleshoot volume-related issues. You need to call this method after calling startAudioMixing and receiving the onAudioMixingStateChanged(AudioMixingStatePlaying) callback. * * @returns - * ≥ 0: The audio mixing volume, if this method call succeeds. The value range is [0,100]. - * < 0: Failure. + * ≥ 0: Success. Returns the volume value, range is [0,100]. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract getAudioMixingPublishVolume(): number; /** - * Adjusts the volume of audio mixing for local playback. + * Adjusts the local playback volume of the music file. * - * @param volume The volume of audio mixing for local playback. The value ranges between 0 and 100 (default). 100 represents the original volume. + * @param volume The volume of the music file. The range is [0,100]. 100 (default) is the original volume. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract adjustAudioMixingPlayoutVolume(volume: number): number; /** - * Retrieves the audio mixing volume for local playback. + * Gets the local playback volume of the music file. * - * You can call this method to get the local playback volume of the mixed audio file, which helps in troubleshooting volume‑related issues. + * You can call this method to get the local playback volume of the mixed music file, which helps troubleshoot volume-related issues. * * @returns - * ≥ 0: The audio mixing volume, if this method call succeeds. The value range is [0,100]. - * < 0: Failure. + * ≥ 0: Success. Returns the volume value, range is [0,100]. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract getAudioMixingPlayoutVolume(): number; /** - * Retrieves the duration (ms) of the music file. + * Gets the total duration of the music file. * - * Retrieves the total duration (ms) of the audio. + * This method gets the total duration of the music file, in milliseconds. * * @returns - * ≥ 0: The audio mixing duration, if this method call succeeds. - * < 0: Failure. + * ≥ 0: Success. Returns the duration of the music file. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract getAudioMixingDuration(): number; /** - * Retrieves the playback position (ms) of the music file. + * Gets the playback progress of the music file. * - * Retrieves the playback position (ms) of the audio. You need to call this method after calling startAudioMixing and receiving the onAudioMixingStateChanged (AudioMixingStatePlaying) callback. - * If you need to call getAudioMixingCurrentPosition multiple times, ensure that the time interval between calling this method is more than 500 ms. + * This method gets the current playback progress of the music file, in milliseconds. + * You need to call this method after calling startAudioMixing and receiving the onAudioMixingStateChanged(AudioMixingStatePlaying) callback. + * If you need to call getAudioMixingCurrentPosition multiple times, make sure the interval between calls is greater than 500 ms. * * @returns - * ≥ 0: The current playback position (ms) of the audio mixing, if this method call succeeds. 0 represents that the current music file does not start playing. - * < 0: Failure. + * ≥ 0: Success. Returns the current playback position of the music file (ms). 0 means the music file has not started playing. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract getAudioMixingCurrentPosition(): number; /** - * Sets the audio mixing position. + * Sets the playback position of the music file. * - * Call this method to set the playback position of the music file to a different starting position (the default plays from the beginning). + * This method sets the playback position of an audio file, allowing you to play from a specific point instead of from the beginning. * - * @param pos Integer. The playback position (ms). + * @param pos An integer. The position in the progress bar, in milliseconds. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setAudioMixingPosition(pos: number): number; /** * Sets the channel mode of the current audio file. * - * In a stereo music file, the left and right channels can store different audio data. According to your needs, you can set the channel mode to original mode, left channel mode, right channel mode, or mixed channel mode. + * In stereo audio files, the left and right channels can store different audio data. Depending on your needs, you can set the channel mode to original, left channel, right channel, or mixed mode. This method applies to stereo audio files only. * * @param mode The channel mode. See AudioMixingDualMonoMode. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setAudioMixingDualMonoMode(mode: AudioMixingDualMonoMode): number; /** - * Sets the pitch of the local music file. + * Adjusts the pitch of the music file played locally. * - * When a local music file is mixed with a local human voice, call this method to set the pitch of the local music file only. + * When mixing local vocals with a music file, you can call this method to adjust only the pitch of the music file. * - * @param pitch Sets the pitch of the local music file by the chromatic scale. The default value is 0, which means keeping the original pitch. The value ranges from -12 to 12, and the pitch value between consecutive values is a chromatic value. The greater the absolute value of this parameter, the higher or lower the pitch of the local music file. + * @param pitch Adjusts the pitch of the music file played locally in semitone steps. The default value is 0, which means no pitch adjustment. The valid range is [-12,12]. Each adjacent value differs by one semitone. The greater the absolute value, the more the pitch is raised or lowered. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setAudioMixingPitch(pitch: number): number; /** - * Sets the playback speed of the current audio file. + * Sets the playback speed of the current music file. * - * Ensure you call this method after calling startAudioMixing receiving the onAudioMixingStateChanged callback reporting the state as AudioMixingStatePlaying. + * You must call this method after calling startAudioMixing and receiving the onAudioMixingStateChanged callback reporting the playback state as AudioMixingStatePlaying. * - * @param speed The playback speed. Agora recommends that you set this to a value between 50 and 400, defined as follows: - * 50: Half the original speed. - * 100: The original speed. - * 400: 4 times the original speed. + * @param speed The playback speed of the music file. The recommended range is [50,400], where: + * 50: 0.5x speed. + * 100: Original speed. + * 400: 4x speed. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setAudioMixingPlaybackSpeed(speed: number): number; /** - * Retrieves the volume of the audio effects. + * Gets the playback volume of the sound effect file. * - * The volume is an integer ranging from 0 to 100. The default value is 100, which means the original volume. Call this method after playEffect. + * Volume range is 0~100. 100 (default) is the original file volume. You must call this method after playEffect. * * @returns - * Volume of the audio effects, if this method call succeeds. - * < 0: Failure. + * Volume of the sound effect file. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract getEffectsVolume(): number; /** - * Sets the volume of the audio effects. + * Sets the playback volume of audio effect files. * - * @param volume The playback volume. The value range is [0, 100]. The default value is 100, which represents the original volume. + * @param volume Playback volume. The range is [0,100]. The default value is 100, which means the original volume. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setEffectsVolume(volume: number): number; /** - * Preloads a specified audio effect file into the memory. + * Loads the audio effect file into memory. * - * Ensure the size of all preloaded files does not exceed the limit. For the audio file formats supported by this method, see What formats of audio files does the Agora RTC SDK support. + * To ensure smooth communication, pay attention to the size of the audio effect files you preload. + * Supported audio formats for preloading are listed in [Supported Audio Formats](https://doc.shengwang.cn/faq/general-product-inquiry/audio-format). * - * @param soundId The audio effect ID. The ID of each audio effect file is unique. + * @param soundId The ID of the audio effect. Each audio effect has a unique ID. * @param filePath File path: - * Android: The file path, which needs to be accurate to the file name and suffix. Agora supports URL addresses, absolute paths, or file paths that start with /assets/. You might encounter permission issues if you use an absolute path to access a local file, so Agora recommends using a URI address instead. For example: content://com.android.providers.media.documents/document/audio%3A14441 - * iOS: The absolute path or URL address (including the suffixes of the filename) of the audio effect file. For example: /var/mobile/Containers/Data/audio.mp4. - * @param startPos The playback position (ms) of the audio effect file. + * Android: The file path must include the file name and extension. Supports URL of online files, URI of local files, absolute path, or paths starting with /assets/. Accessing local files via absolute path may require permissions. It is recommended to use URI to access local files. For example: content://com.android.providers.media.documents/document/audio%3A14441. + * iOS: The absolute path or URL of the audio file. Must include the file name and extension. For example: /var/mobile/Containers/Data/audio.mp4. + * @param startPos The start position for loading the audio effect file, in milliseconds. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract preloadEffect( soundId: number, @@ -4379,25 +4648,25 @@ export abstract class IRtcEngine { /** * Plays the specified local or online audio effect file. * - * To play multiple audio effect files at the same time, call this method multiple times with different soundId and filePath. To achieve the optimal user experience, Agora recommends that you do not playing more than three audio files at the same time. + * You can call this method multiple times with different soundID and filePath to play multiple audio effect files simultaneously. For optimal user experience, it is recommended not to play more than 3 audio effects at the same time. If you need to play online audio effect files, Agora recommends caching them to the local device first, preloading them into memory using preloadEffect, and then calling this method to play them. Otherwise, playback may fail or be silent due to timeout or failure in loading the online file. * - * @param soundId The audio effect ID. The ID of each audio effect file is unique. If you have preloaded an audio effect into memory by calling preloadEffect, ensure that the value of this parameter is the same as that of soundId in preloadEffect. - * @param filePath The file path. The SDK supports URLs and absolute path of local files. The absolute path needs to be accurate to the file name and extension. Supported audio formats include MP3, AAC, M4A, MP4, WAV, and 3GP. If you have preloaded an audio effect into memory by calling preloadEffect, ensure that the value of this parameter is the same as that of filePath in preloadEffect. + * @param soundId The ID of the audio effect. Each audio effect has a unique ID. If you have preloaded the audio effect using preloadEffect, make sure this parameter matches the soundId set in preloadEffect. + * @param filePath The path of the file to play. Supports URL of online files and absolute path of local files. Must include the file name and extension. Supported audio formats include MP3, AAC, M4A, MP4, WAV, 3GP, etc. If you have preloaded the audio effect using preloadEffect, make sure this parameter matches the filePath set in preloadEffect. * @param loopCount The number of times the audio effect loops. - * ≥ 0: The number of playback times. For example, 1 means looping one time, which means playing the audio effect two times in total. - * -1: Play the audio file in an infinite loop. - * @param pitch The pitch of the audio effect. The value range is 0.5 to 2.0. The default value is 1.0, which means the original pitch. The lower the value, the lower the pitch. - * @param pan The spatial position of the audio effect. The value ranges between -1.0 and 1.0: - * -1.0: The audio effect is heard on the left of the user. - * 0.0: The audio effect is heard in front of the user. - * 1.0: The audio effect is heard on the right of the user. - * @param gain The volume of the audio effect. The value range is 0.0 to 100.0. The default value is 100.0, which means the original volume. The smaller the value, the lower the volume. - * @param publish Whether to publish the audio effect to the remote users: true : Publish the audio effect to the remote users. Both the local user and remote users can hear the audio effect. false : Do not publish the audio effect to the remote users. Only the local user can hear the audio effect. - * @param startPos The playback position (ms) of the audio effect file. + * ≥ 0: Number of loops. For example, 1 means loop once, i.e., play twice in total. + * -1: Loop indefinitely. + * @param pitch The pitch of the audio effect. The range is [0.5,2.0]. The default value is 1.0, which represents the original pitch. The smaller the value, the lower the pitch. + * @param pan The spatial position of the audio effect. The range is [-1.0,1.0], for example: + * -1.0: The audio effect appears on the left + * 0.0: The audio effect appears in the center + * 1.0: The audio effect appears on the right + * @param gain The volume of the audio effect. The range is [0.0,100.0]. The default value is 100.0, which represents the original volume. The smaller the value, the lower the volume. + * @param publish Whether to publish the audio effect to remote users: true : Publishes the audio effect to remote users. Both local and remote users can hear it. false : Does not publish the audio effect to remote users. Only local users can hear it. + * @param startPos The playback position of the audio effect file in milliseconds. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract playEffect( soundId: number, @@ -4413,23 +4682,23 @@ export abstract class IRtcEngine { /** * Plays all audio effect files. * - * After calling preloadEffect multiple times to preload multiple audio effects into the memory, you can call this method to play all the specified audio effects for all users in the channel. + * After calling preloadEffect multiple times to preload multiple audio effect files, you can call this method to play all preloaded audio effect files. * * @param loopCount The number of times the audio effect loops: - * -1: Play the audio effect files in an indefinite loop until you call stopEffect or stopAllEffects. - * 0: Play the audio effect once. - * 1: Play the audio effect twice. - * @param pitch The pitch of the audio effect. The value ranges between 0.5 and 2.0. The default value is 1.0 (original pitch). The lower the value, the lower the pitch. - * @param pan The spatial position of the audio effect. The value ranges between -1.0 and 1.0: - * -1.0: The audio effect shows on the left. - * 0: The audio effect shows ahead. - * 1.0: The audio effect shows on the right. - * @param gain The volume of the audio effect. The value range is [0, 100]. The default value is 100 (original volume). The smaller the value, the lower the volume. - * @param publish Whether to publish the audio effect to the remote users: true : Publish the audio effect to the remote users. Both the local user and remote users can hear the audio effect. false : (Default) Do not publish the audio effect to the remote users. Only the local user can hear the audio effect. + * -1: Loops indefinitely until stopEffect or stopAllEffects is called. + * 0: Plays the audio effect once. + * 1: Plays the audio effect twice. + * @param pitch The pitch of the audio effect. The range is [0.5,2.0]. The default value is 1.0, which represents the original pitch. The smaller the value, the lower the pitch. + * @param pan The spatial position of the audio effect. The range is [-1.0,1.0]: + * -1.0: The audio effect appears on the left. + * 0: The audio effect appears in the center. + * 1.0: The audio effect appears on the right. + * @param gain The volume of the audio effect. The range is [0,100]. 100 is the default value, representing the original volume. The smaller the value, the lower the volume. + * @param publish Whether to publish the audio effect to remote users: true : Publishes the audio effect to remote users. Both local and remote users can hear it. false : (Default) Does not publish the audio effect to remote users. Only local users can hear it. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract playAllEffects( loopCount: number, @@ -4440,189 +4709,190 @@ export abstract class IRtcEngine { ): number; /** - * Gets the volume of a specified audio effect file. + * Gets the playback volume of the specified audio effect file. * * @param soundId The ID of the audio effect file. * * @returns - * ≥ 0: Returns the volume of the specified audio effect, if the method call is successful. The value ranges between 0 and 100. 100 represents the original volume. - * < 0: Failure. + * ≥ 0: The method call succeeds and returns the playback volume. The volume range is [0,100], where 100 is the original volume. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract getVolumeOfEffect(soundId: number): number; /** - * Gets the volume of a specified audio effect file. + * Sets the playback volume of the specified audio effect file. * - * @param soundId The ID of the audio effect. The unique ID of each audio effect file. - * @param volume The playback volume. The value range is [0, 100]. The default value is 100, which represents the original volume. + * @param soundId The ID of the specified audio effect. Each audio effect has a unique ID. + * @param volume Playback volume. The range is [0,100]. The default value is 100, which means the original volume. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setVolumeOfEffect(soundId: number, volume: number): number; /** - * Pauses a specified audio effect file. + * Pauses playback of the specified audio effect file. * - * @param soundId The audio effect ID. The ID of each audio effect file is unique. + * @param soundId The ID of the audio effect. Each audio effect has a unique ID. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract pauseEffect(soundId: number): number; /** - * Pauses all audio effects. + * Pauses playback of all audio effect files. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract pauseAllEffects(): number; /** - * Resumes playing a specified audio effect. + * Resumes playing the specified audio effect file. * - * @param soundId The audio effect ID. The ID of each audio effect file is unique. + * @param soundId The ID of the audio effect. Each audio effect has a unique ID. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract resumeEffect(soundId: number): number; /** - * Resumes playing all audio effect files. + * Resumes playback of all audio effect files. * - * After you call pauseAllEffects to pause the playback, you can call this method to resume the playback. + * After calling pauseAllEffects to pause all audio effect files, you can call this method to resume playback. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract resumeAllEffects(): number; /** - * Stops playing a specified audio effect. + * Stops playing the specified audio effect file. * - * When you no longer need to play the audio effect, you can call this method to stop the playback. If you only need to pause the playback, call pauseEffect. + * When you no longer need to play a specific audio effect file, you can call this method to stop playback. If you only want to pause playback, call pauseEffect. * - * @param soundId The ID of the audio effect. Each audio effect has a unique ID. + * @param soundId The ID of the specified audio effect file. Each audio effect file has a unique ID. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract stopEffect(soundId: number): number; /** - * Stops playing all audio effects. + * Stops playing all audio effect files. * - * When you no longer need to play the audio effect, you can call this method to stop the playback. If you only need to pause the playback, call pauseAllEffects. + * When you no longer need to play audio effect files, you can call this method to stop playback. If you only want to pause playback, call pauseAllEffects. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract stopAllEffects(): number; /** - * Releases a specified preloaded audio effect from the memory. + * Releases a preloaded audio effect file from memory. * - * After loading the audio effect file into memory using preloadEffect, if you need to release the audio effect file, call this method. + * After calling preloadEffect to load an audio effect file into memory, call this method to release the file. * - * @param soundId The ID of the audio effect. Each audio effect has a unique ID. + * @param soundId The ID of the specified audio effect file. Each audio effect file has a unique ID. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract unloadEffect(soundId: number): number; /** - * Releases a specified preloaded audio effect from the memory. + * Releases all preloaded audio effect files from memory. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract unloadAllEffects(): number; /** - * Retrieves the duration of the audio effect file. + * Gets the total duration of the specified sound effect file. * - * Call this method after joining a channel. + * You must call this method after joining a channel. * * @param filePath File path: - * Android: The file path, which needs to be accurate to the file name and suffix. Agora supports URL addresses, absolute paths, or file paths that start with /assets/. You might encounter permission issues if you use an absolute path to access a local file, so Agora recommends using a URI address instead. For example: content://com.android.providers.media.documents/document/audio%3A14441 - * iOS: The absolute path or URL address (including the suffixes of the filename) of the audio effect file. For example: /var/mobile/Containers/Data/audio.mp4. + * Android: File path, must include the file name and extension. Supports URL addresses for online files, URI addresses for local files, absolute paths, or paths starting with /assets/. Accessing local files via absolute path may cause permission issues. Using URI addresses is recommended. For example: content://com.android.providers.media.documents/document/audio%3A14441. + * iOS: Absolute path or URL of the audio file, must include the file name and extension. For example: /var/mobile/Containers/Data/audio.mp4. * * @returns - * The total duration (ms) of the specified audio effect file, if the method call succeeds. - * < 0: Failure. + * If the method call succeeds, returns the duration of the specified sound effect file (in milliseconds). + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract getEffectDuration(filePath: string): number; /** - * Sets the playback position of an audio effect file. + * Sets the playback position of the specified audio effect file. * - * After a successful setting, the local audio effect file starts playing at the specified position. Call this method after playEffect. + * After the setting is successful, the local audio effect file starts playing from the specified position. You need to call this method after playEffect. * - * @param soundId The audio effect ID. The ID of each audio effect file is unique. - * @param pos The playback position (ms) of the audio effect file. + * @param soundId The ID of the audio effect. Each audio effect has a unique ID. + * @param pos The playback position of the audio effect file, in milliseconds. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setEffectPosition(soundId: number, pos: number): number; /** - * Retrieves the playback position of the audio effect file. + * Gets the playback progress of the specified sound effect file. * - * Call this method after playEffect. + * You must call this method after playEffect. * - * @param soundId The audio effect ID. The ID of each audio effect file is unique. + * @param soundId ID of the sound effect. Each sound effect has a unique ID. * * @returns - * The playback position (ms) of the specified audio effect file, if the method call succeeds. - * < 0: Failure. + * If the method call succeeds, returns the playback progress of the specified sound effect file (in milliseconds). + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract getEffectCurrentPosition(soundId: number): number; /** - * Enables or disables stereo panning for remote users. + * Enables/disables stereo sound for remote users. * - * Ensure that you call this method before joining a channel to enable stereo panning for remote users so that the local user can track the position of a remote user by calling setRemoteVoicePosition. + * To use setRemoteVoicePosition for spatial audio positioning, make sure to call this method before joining a channel to enable stereo sound for remote users. * - * @param enabled Whether to enable stereo panning for remote users: true : Enable stereo panning. false : Disable stereo panning. + * @param enabled Whether to enable stereo sound for remote users: true : Enable stereo sound. false : Disable stereo sound. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract enableSoundPositionIndication(enabled: boolean): number; /** - * Sets the 2D position (the position on the horizontal plane) of the remote user's voice. + * Sets the 2D position of a remote user's voice, i.e., horizontal position. * - * This method sets the 2D position and volume of a remote user, so that the local user can easily hear and identify the remote user's position. When the local user calls this method to set the voice position of a remote user, the voice difference between the left and right channels allows the local user to track the real-time position of the remote user, creating a sense of space. This method applies to massive multiplayer online games, such as Battle Royale games. - * For this method to work, enable stereo panning for remote users by calling the enableSoundPositionIndication method before joining a channel. - * For the best voice positioning, Agora recommends using a wired headset. - * Call this method after joining a channel. + * Sets the 2D position and volume of a remote user's voice to help the local user locate the sound source. + * By calling this method, you can set the position where the remote user's voice appears. The difference between the left and right channels creates a sense of direction, allowing the user to determine the real-time position of the remote user. In multiplayer online games such as battle royale, this method can effectively enhance the sense of direction of game characters and simulate a real environment. + * Before using this method, you must call enableSoundPositionIndication before joining the channel to enable stereo sound for remote users. + * For the best audio experience, it is recommended to use wired headphones when using this method. + * This method must be called after joining a channel. * - * @param uid The user ID of the remote user. - * @param pan The voice position of the remote user. The value ranges from -1.0 to 1.0: - * 0.0: (Default) The remote voice comes from the front. - * -1.0: The remote voice comes from the left. - * 1.0: The remote voice comes from the right. - * @param gain The volume of the remote user. The value ranges from 0.0 to 100.0. The default value is 100.0 (the original volume of the remote user). The smaller the value, the lower the volume. + * @param uid The ID of the remote user + * @param pan Sets the 2D position of the remote user's voice. The range is [-1.0, 1.0]: + * (Default) 0.0: Voice appears in the center. + * -1.0: Voice appears on the left. + * 1.0: Voice appears on the right. + * @param gain Sets the volume of the remote user's voice. The range is [0.0, 100.0], with a default of 100.0, representing the original volume. The smaller the value, the lower the volume. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setRemoteVoicePosition( uid: number, @@ -4631,28 +4901,30 @@ export abstract class IRtcEngine { ): number; /** - * Enables or disables the spatial audio effect. + * Enables or disables spatial audio. * - * After enabling the spatial audio effect, you can call setRemoteUserSpatialAudioParams to set the spatial audio effect parameters of the remote user. - * You can call this method either before or after joining a channel. - * This method relies on the spatial audio dynamic library libagora_spatial_audio_extension.dll. If the dynamic library is deleted, the function cannot be enabled normally. + * After enabling spatial audio, you can call setRemoteUserSpatialAudioParams to set spatial audio parameters for remote users. + * This method can be called before or after joining a channel. + * This method depends on the spatial audio dynamic library libagora_spatial_audio_extension.dll. Removing this library will prevent the feature from working properly. * - * @param enabled Whether to enable the spatial audio effect: true : Enable the spatial audio effect. false : Disable the spatial audio effect. + * @param enabled Whether to enable spatial audio: true : Enable spatial audio. false : Disable spatial audio. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract enableSpatialAudio(enabled: boolean): number; /** - * Sets the spatial audio effect parameters of the remote user. + * Sets the spatial audio parameters for a remote user. * - * Call this method after enableSpatialAudio. After successfully setting the spatial audio effect parameters of the remote user, the local user can hear the remote user with a sense of space. + * You need to call this method after enableSpatialAudio. After successfully setting the spatial audio parameters for the remote user, the local user will hear spatial audio effects from the remote user. + * + * @param uid User ID. Must be the same as the user ID used when joining the channel. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setRemoteUserSpatialAudioParams( uid: number, @@ -4660,69 +4932,81 @@ export abstract class IRtcEngine { ): number; /** - * Sets a preset voice beautifier effect. + * Sets a predefined voice beautifier effect. * - * Call this method to set a preset voice beautifier effect for the local user who sends an audio stream. After setting a voice beautifier effect, all users in the channel can hear the effect. You can set different voice beautifier effects for different scenarios. + * Call this method to set a predefined voice beautifier effect for the local user who sends the stream. After the effect is set, all users in the channel can hear it. You can choose different beautifier effects for different scenarios. + * Do not set the profile parameter in setAudioProfile to AudioProfileSpeechStandard (1) or AudioProfileIot (6), or this method will not take effect. + * This method works best for voice processing. It is not recommended to use it for audio data that contains music. + * After calling setVoiceBeautifierPreset, it is not recommended to call the following methods, or the effect set by setVoiceBeautifierPreset will be overridden: setAudioEffectPreset setAudioEffectParameters setLocalVoicePitch setLocalVoiceEqualization setLocalVoiceReverb setVoiceBeautifierParameters setVoiceConversionPreset + * This method depends on the voice beautifier dynamic library libagora_audio_beauty_extension.dll. If the library is deleted, this feature will not work properly. * - * @param preset The preset voice beautifier effect options: VoiceBeautifierPreset. + * @param preset Predefined voice beautifier effect option. See VoiceBeautifierPreset. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setVoiceBeautifierPreset(preset: VoiceBeautifierPreset): number; /** - * Sets an SDK preset audio effect. + * Sets the SDK's preset voice effects. * - * Call this method to set an SDK preset audio effect for the local user who sends an audio stream. This audio effect does not change the gender characteristics of the original voice. After setting an audio effect, all users in the channel can hear the effect. + * Call this method to set the SDK's preset voice effects for the local user who is sending the stream. This does not change the gender characteristics of the original voice. After setting the effect, all users in the channel can hear it. + * Do not set the profile parameter of setAudioProfile to AudioProfileSpeechStandard (1) or AudioProfileIot (6), otherwise this method will not take effect. + * If you call setAudioEffectPreset and set an enum other than RoomAcoustics3dVoice or PitchCorrection, do not call setAudioEffectParameters, or the effect set by setAudioEffectPreset will be overridden. + * After calling setAudioEffectPreset, it is not recommended to call the following methods, or the effect set by setAudioEffectPreset will be overridden: setVoiceBeautifierPreset setLocalVoicePitch setLocalVoiceEqualization setLocalVoiceReverb setVoiceBeautifierParameters setVoiceConversionPreset + * This method depends on the beautifier dynamic library libagora_audio_beauty_extension.dll. Deleting this library will cause the feature to fail to start properly. * - * @param preset The options for SDK preset audio effects. See AudioEffectPreset. + * @param preset Preset audio effect option. See AudioEffectPreset. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setAudioEffectPreset(preset: AudioEffectPreset): number; /** - * Sets a preset voice beautifier effect. + * Sets a predefined voice conversion effect. * - * Call this method to set a preset voice changing effect for the local user who publishes an audio stream in a channel. After setting the voice changing effect, all users in the channel can hear the effect. You can set different voice changing effects for the user depending on different scenarios. + * Call this method to set a predefined voice conversion effect for the local user who sends the stream. After the effect is set, all users in the channel can hear it. You can choose different voice conversion effects for different scenarios. + * Do not set the profile parameter in setAudioProfile to AudioProfileSpeechStandard (1) or AudioProfileIot (6), or this method will not take effect. + * This method works best for voice processing. It is not recommended to use it for audio data that contains music. + * After calling setVoiceConversionPreset, it is not recommended to call the following methods, or the effect set by setVoiceConversionPreset will be overridden: setAudioEffectPreset setAudioEffectParameters setVoiceBeautifierPreset setVoiceBeautifierParameters setLocalVoicePitch setLocalVoiceFormant setLocalVoiceEqualization setLocalVoiceReverb + * This method depends on the voice beautifier dynamic library libagora_audio_beauty_extension.dll. If the library is deleted, this feature will not work properly. * - * @param preset The options for the preset voice beautifier effects: VoiceConversionPreset. + * @param preset Predefined voice conversion effect option: VoiceConversionPreset. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setVoiceConversionPreset(preset: VoiceConversionPreset): number; /** - * Sets parameters for SDK preset audio effects. + * Sets parameters for SDK preset voice effects. * - * To achieve better vocal effects, it is recommended that you call the following APIs before calling this method: - * Call setAudioScenario to set the audio scenario to high-quality audio scenario, namely AudioScenarioGameStreaming (3). - * Call setAudioProfile to set the profile parameter to AudioProfileMusicHighQuality (4) or AudioProfileMusicHighQualityStereo (5). Call this method to set the following parameters for the local user who sends an audio stream: - * 3D voice effect: Sets the cycle period of the 3D voice effect. - * Pitch correction effect: Sets the basic mode and tonic pitch of the pitch correction effect. Different songs have different modes and tonic pitches. Agora recommends bounding this method with interface elements to enable users to adjust the pitch correction interactively. After setting the audio parameters, all users in the channel can hear the effect. - * Do not set the profile parameter in setAudioProfile to AudioProfileSpeechStandard (1) or AudioProfileIot (6), or the method does not take effect. - * You can call this method either before or after joining a channel. - * This method has the best effect on human voice processing, and Agora does not recommend calling this method to process audio data containing music. - * After calling setAudioEffectParameters, Agora does not recommend you to call the following methods, otherwise the effect set by setAudioEffectParameters will be overwritten: setAudioEffectPreset setVoiceBeautifierPreset setLocalVoicePitch setLocalVoiceEqualization setLocalVoiceReverb setVoiceBeautifierParameters setVoiceConversionPreset - * This method relies on the voice beautifier dynamic library libagora_audio_beauty_extension.dll. If the dynamic library is deleted, the function cannot be enabled normally. + * You can call this method to configure the following for local stream users: + * 3D voice effect: Set the surround cycle for the 3D voice effect. + * Pitch correction effect: Set the base scale and tonic pitch. To allow users to adjust pitch correction effects easily, it is recommended to bind the base scale and tonic pitch settings to your application's UI elements. After the settings are applied, all users in the channel can hear the effect. To achieve better voice effects, it is recommended to: + * Call setAudioScenario to set the audio scenario to high-quality, i.e., AudioScenarioGameStreaming (3). + * Call setAudioProfile to set profile to AudioProfileMusicHighQuality (4) or AudioProfileMusicHighQualityStereo (5). + * This method can be called before or after joining the channel. + * Do not set the profile parameter of setAudioProfile to AudioProfileSpeechStandard (1) or AudioProfileIot (6), otherwise this method will not take effect. + * This method works best for voice processing and is not recommended for audio data containing music. + * After calling setAudioEffectParameters, avoid calling the following methods as they will override the effects set by setAudioEffectParameters : setAudioEffectPreset setVoiceBeautifierPreset setLocalVoicePitch setLocalVoiceEqualization setLocalVoiceReverb setVoiceBeautifierParameters setVoiceConversionPreset + * This method depends on the voice beautifier dynamic library libagora_audio_beauty_extension.dll. Removing the dynamic library will cause the feature to fail. * - * @param preset The options for SDK preset audio effects: RoomAcoustics3dVoice, 3D voice effect: - * You need to set the profile parameter in setAudioProfile to AudioProfileMusicStandardStereo (3) or AudioProfileMusicHighQualityStereo (5) before setting this enumerator; otherwise, the enumerator setting does not take effect. - * If the 3D voice effect is enabled, users need to use stereo audio playback devices to hear the anticipated voice effect. PitchCorrection, Pitch correction effect: - * @param param1 If you set preset to RoomAcoustics3dVoice, param1 sets the cycle period of the 3D voice effect. The value range is [1,60] and the unit is seconds. The default value is 10, indicating that the voice moves around you every 10 seconds. - * If you set preset to PitchCorrection, param1 indicates the basic mode of the pitch correction effect: 1 : (Default) Natural major scale. 2 : Natural minor scale. 3 : Japanese pentatonic scale. - * @param param2 If you set preset to RoomAcoustics3dVoice , you need to set param2 to 0. - * If you set preset to PitchCorrection, param2 indicates the tonic pitch of the pitch correction effect: 1 : A 2 : A# 3 : B 4 : (Default) C 5 : C# 6 : D 7 : D# 8 : E 9 : F 10 : F# 11 : G 12 : G# + * @param preset SDK preset audio effects. The following are supported: RoomAcoustics3dVoice : 3D voice effect. + * Before using this enum, you need to set the profile parameter of setAudioProfile to AudioProfileMusicStandardStereo (3) or AudioProfileMusicHighQualityStereo (5), otherwise the enum setting is invalid. + * After enabling 3D voice, users must use audio playback devices that support stereo to hear the expected effect. PitchCorrection : Pitch correction effect. + * @param param1 If preset is set to RoomAcoustics3dVoice, then param1 represents the surround cycle of the 3D voice effect. Value range: [1,60] seconds. Default is 10, which means the voice surrounds 360 degrees in 10 seconds. + * If preset is set to PitchCorrection, then param1 represents the base scale: 1 : (Default) Major natural scale. 2 : Minor natural scale. 3 : Minor pentatonic scale. + * @param param2 If preset is set to RoomAcoustics3dVoice, set param2 to 0. + * If preset is set to PitchCorrection, then param2 represents the tonic pitch: 1 : A 2 : A# 3 : B 4 : (Default) C 5 : C# 6 : D 7 : D# 8 : E 9 : F 10 : F# 11 : G 12 : G# * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setAudioEffectParameters( preset: AudioEffectPreset, @@ -4731,24 +5015,25 @@ export abstract class IRtcEngine { ): number; /** - * Sets parameters for the preset voice beautifier effects. + * Sets parameters for preset voice beautifier effects. * - * To achieve better vocal effects, it is recommended that you call the following APIs before calling this method: - * Call setAudioScenario to set the audio scenario to high-quality audio scenario, namely AudioScenarioGameStreaming (3). - * Call setAudioProfile to set the profile parameter to AudioProfileMusicHighQuality (4) or AudioProfileMusicHighQualityStereo (5). Call this method to set a gender characteristic and a reverberation effect for the singing beautifier effect. This method sets parameters for the local user who sends an audio stream. After setting the audio parameters, all users in the channel can hear the effect. - * Do not set the profile parameter in setAudioProfile to AudioProfileSpeechStandard (1) or AudioProfileIot (6), or the method does not take effect. - * You can call this method either before or after joining a channel. - * This method has the best effect on human voice processing, and Agora does not recommend calling this method to process audio data containing music. - * After calling setVoiceBeautifierParameters, Agora does not recommend calling the following methods, otherwise the effect set by setVoiceBeautifierParameters will be overwritten: setAudioEffectPreset setAudioEffectParameters setVoiceBeautifierPreset setLocalVoicePitch setLocalVoiceEqualization setLocalVoiceReverb setVoiceConversionPreset - * This method relies on the voice beautifier dynamic library libagora_audio_beauty_extension.dll. If the dynamic library is deleted, the function cannot be enabled normally. + * Call this method to set the gender characteristics and reverb effects of the singing beautifier. This method applies to the local user who is sending the stream. After setting, all users in the channel can hear the effect. + * To achieve better voice effects, it is recommended to perform the following operations before calling this method: + * Call setAudioScenario to set the audio scenario to high-quality mode, i.e., AudioScenarioGameStreaming (3). + * Call setAudioProfile to set the profile to AudioProfileMusicHighQuality (4) or AudioProfileMusicHighQualityStereo (5). + * This method can be called before or after joining a channel. + * Do not set the profile parameter of setAudioProfile to AudioProfileSpeechStandard (1) or AudioProfileIot (6), otherwise this method will not take effect. + * This method is optimized for voice and is not recommended for audio data containing music. + * After calling setVoiceBeautifierParameters, it is not recommended to call the following methods, or the effect set by setVoiceBeautifierParameters will be overridden: setAudioEffectPreset setAudioEffectParameters setVoiceBeautifierPreset setLocalVoicePitch setLocalVoiceEqualization setLocalVoiceReverb setVoiceConversionPreset + * This method depends on the beautifier dynamic library libagora_audio_beauty_extension.dll. Deleting this library will cause the feature to fail to start properly. * - * @param preset The option for the preset audio effect: SINGING_BEAUTIFIER : The singing beautifier effect. - * @param param1 The gender characteristics options for the singing voice: 1 : A male-sounding voice. 2 : A female-sounding voice. - * @param param2 The reverberation effect options for the singing voice: 1 : The reverberation effect sounds like singing in a small room. 2 : The reverberation effect sounds like singing in a large room. 3 : The reverberation effect sounds like singing in a hall. + * @param preset Preset effect: SINGING_BEAUTIFIER : Singing beautifier. + * @param param1 Gender characteristics of the singing voice: 1 : Male voice. 2 : Female voice. + * @param param2 Reverb effect of the singing voice: 1 : Small room reverb. 2 : Large room reverb. 3 : Hall reverb. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setVoiceBeautifierParameters( preset: VoiceBeautifierPreset, @@ -4766,38 +5051,38 @@ export abstract class IRtcEngine { ): number; /** - * Changes the voice pitch of the local speaker. + * Sets the local voice pitch. * - * @param pitch The local voice pitch. The value range is [0.5,2.0]. The lower the value, the lower the pitch. The default value is 1.0 (no change to the pitch). + * @param pitch Voice frequency. Can be set within the range [0.5, 2.0]. The smaller the value, the lower the pitch. The default is 1.0, meaning no pitch change. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setLocalVoicePitch(pitch: number): number; /** - * Sets the formant ratio to change the timbre of human voice. + * Sets the formant ratio to change the voice timbre. * - * Formant ratio affects the timbre of voice. The smaller the value, the deeper the sound will be, and the larger, the sharper. After you set the formant ratio, all users in the channel can hear the changed voice. If you want to change the timbre and pitch of voice at the same time, Agora recommends using this method together with setLocalVoicePitch. + * Formant ratio is a parameter that affects the timbre of the voice. A smaller value results in a deeper voice, while a larger value results in a sharper voice. After setting the formant ratio, all users in the channel can hear the effect. If you want to change both timbre and pitch, Agora recommends using it together with setLocalVoicePitch. * - * @param formantRatio The formant ratio. The value range is [-1.0, 1.0]. The default value is 0.0, which means do not change the timbre of the voice. Agora recommends setting this value within the range of [-0.4, 0.6]. Otherwise, the voice may be seriously distorted. + * @param formantRatio Formant ratio. The value range is [-1.0, 1.0]. The default is 0.0, which means no change to the original timbre. Agora recommends a value range of [-0.4, 0.6]. Effects outside this range may sound suboptimal. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setLocalVoiceFormant(formantRatio: number): number; /** * Sets the local voice equalization effect. * - * @param bandFrequency The band frequency. The value ranges between 0 and 9; representing the respective 10-band center frequencies of the voice effects, including 31, 62, 125, 250, 500, 1k, 2k, 4k, 8k, and 16k Hz. See AudioEqualizationBandFrequency. - * @param bandGain The gain of each band in dB. The value ranges between -15 and 15. The default value is 0. + * @param bandFrequency Index of the frequency band. The value range is [0,9], representing 10 frequency bands. The corresponding center frequencies are [31, 62, 125, 250, 500, 1k, 2k, 4k, 8k, 16k] Hz. See AudioEqualizationBandFrequency. + * @param bandGain Gain of each band in dB. The value range is [-15,15], and the default is 0. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setLocalVoiceEqualization( bandFrequency: AudioEqualizationBandFrequency, @@ -4805,16 +5090,16 @@ export abstract class IRtcEngine { ): number; /** - * Sets the local voice reverberation. + * Sets local voice reverb effects. * - * The SDK provides an easier-to-use method, setAudioEffectPreset, to directly implement preset reverb effects for such as pop, R&B, and KTV. You can call this method either before or after joining a channel. + * The SDK provides a simpler method setAudioEffectPreset to directly achieve preset reverb effects such as Pop, R&B, and KTV. This method can be called before or after joining a channel. * - * @param reverbKey The reverberation key. Agora provides five reverberation keys, see AudioReverbType. - * @param value The value of the reverberation key. + * @param reverbKey Reverb effect key. There are 5 reverb effect keys in total. See AudioReverbType. + * @param value Value corresponding to each reverb effect key. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setLocalVoiceReverb( reverbKey: AudioReverbType, @@ -4822,108 +5107,110 @@ export abstract class IRtcEngine { ): number; /** - * Sets the preset headphone equalization effect. + * Sets a preset headphone equalizer effect. * - * This method is mainly used in spatial audio effect scenarios. You can select the preset headphone equalizer to listen to the audio to achieve the expected audio experience. If the headphones you use already have a good equalization effect, you may not get a significant improvement when you call this method, and could even diminish the experience. + * This method is mainly used in spatial audio scenarios. You can select a preset headphone equalizer to listen to audio and achieve the desired audio experience. If the headphones you are using already have good equalization, calling this method may not significantly improve the experience and may even degrade it. * - * @param preset The preset headphone equalization effect. See HeadphoneEqualizerPreset. + * @param preset Preset headphone equalizer effect. See HeadphoneEqualizerPreset. * * @returns * 0: Success. - * < 0: Failure. - * -1: A general error occurs (no specified reason). + * < 0: Failure + * -1: General error (not specifically classified). */ abstract setHeadphoneEQPreset(preset: HeadphoneEqualizerPreset): number; /** - * Sets the low- and high-frequency parameters of the headphone equalizer. + * Sets the low and high frequency parameters of the headphone equalizer. * - * In a spatial audio effect scenario, if the preset headphone equalization effect is not achieved after calling the setHeadphoneEQPreset method, you can further adjust the headphone equalization effect by calling this method. + * In spatial audio scenarios, if the expected effect is not achieved after calling setHeadphoneEQPreset to use a preset headphone equalizer effect, you can call this method to further adjust the headphone equalizer. * - * @param lowGain The low-frequency parameters of the headphone equalizer. The value range is [-10,10]. The larger the value, the deeper the sound. - * @param highGain The high-frequency parameters of the headphone equalizer. The value range is [-10,10]. The larger the value, the sharper the sound. + * @param lowGain Low frequency parameter of the headphone equalizer. Range: [-10, 10]. The higher the value, the deeper the sound. + * @param highGain High frequency parameter of the headphone equalizer. Range: [-10, 10]. The higher the value, the sharper the sound. * * @returns * 0: Success. - * < 0: Failure. - * -1: A general error occurs (no specified reason). + * < 0: Failure + * -1: General error (not specifically classified). */ abstract setHeadphoneEQParameters(lowGain: number, highGain: number): number; /** - * Enables or disables the voice AI tuner. + * Enables or disables the AI tuner feature. * - * The voice AI tuner supports enhancing sound quality and adjusting tone style. + * The AI tuner feature enhances voice quality and adjusts voice tone style. * - * @param enabled Whether to enable the voice AI tuner: true : Enables the voice AI tuner. false : (Default) Disable the voice AI tuner. - * @param type Voice AI tuner sound types, see VoiceAiTunerType. + * @param enabled Whether to enable the AI tuner feature: true : Enable the AI tuner feature. false : (Default) Disable the AI tuner feature. + * @param type AI tuner effect type. See VoiceAiTunerType. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract enableVoiceAITuner(enabled: boolean, type: VoiceAiTunerType): number; /** * Sets the log file. * - * Deprecated: This method is deprecated. Set the log file path by configuring the context parameter when calling initialize. Specifies an SDK output log file. The log file records all log data for the SDK’s operation. + * Deprecated Deprecated: This method is deprecated. Please set the log file path via the context parameter when calling initialize. Sets the SDK's output log file. All logs generated during SDK runtime will be written to this file. The app must ensure that the specified directory exists and is writable. * - * @param filePath The complete path of the log files. These log files are encoded in UTF-8. + * @param filePath Full path of the log file. The log file is UTF-8 encoded. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and troubleshooting. */ abstract setLogFile(filePath: string): number; /** - * Sets the log output level of the SDK. + * Sets the log output level. * - * Deprecated: Use logConfig in initialize instead. This method sets the output log level of the SDK. You can use one or a combination of the log filter levels. The log level follows the sequence of LogFilterOff, LogFilterCritical, LogFilterError, LogFilterWarn, LogFilterInfo, and LogFilterDebug. Choose a level to see the logs preceding that level. If, for example, you set the log level to LogFilterWarn, you see the logs within levels LogFilterCritical, LogFilterError and LogFilterWarn. + * Deprecated Deprecated: Use logConfig in initialize instead. This method sets the log output level of the SDK. Different output levels can be used individually or in combination. The log levels in order are: LogFilterOff, LogFilterCritical, LogFilterError, LogFilterWarn, LogFilterInfo, and LogFilterDebug. + * When you select a level, you will see logs for that level and all levels before it. + * For example, if you select LogFilterWarn, you will see logs for LogFilterCritical, LogFilterError, and LogFilterWarn. * - * @param filter The output log level of the SDK. See LogFilterType. + * @param filter Log filter level. See LogFilterType. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setLogFilter(filter: LogFilterType): number; /** - * Sets the output log level of the SDK. + * Sets the log output level of the SDK. * - * Deprecated: This method is deprecated. Set the log file level by configuring the context parameter when calling initialize. Choose a level to see the logs preceding that level. + * Deprecated Deprecated: This method is deprecated. Set the log output level via the context parameter when calling initialize. When you select a level, you will see log information for that level. * - * @param level The log level. See LogLevel. + * @param level Log level. See LogLevel. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setLogLevel(level: LogLevel): number; /** - * Sets the log file size. + * Sets the size of the SDK log output file. * - * Deprecated: Use the logConfig parameter in initialize instead. By default, the SDK generates five SDK log files and five API call log files with the following rules: - * The SDK log files are: agorasdk.log, agorasdk.1.log, agorasdk.2.log, agorasdk.3.log, and agorasdk.4.log. - * The API call log files are: agoraapi.log, agoraapi.1.log, agoraapi.2.log, agoraapi.3.log, and agoraapi.4.log. - * The default size of each SDK log file and API log file is 2,048 KB. These log files are encoded in UTF-8. - * The SDK writes the latest logs in agorasdk.log or agoraapi.log. - * When agorasdk.log is full, the SDK processes the log files in the following order: - * Delete the agorasdk.4.log file (if any). + * Deprecated Deprecated: This method is deprecated. Use the logConfig parameter in initialize instead to set the log file size. By default, the SDK generates 5 SDK log files and 5 API call log files, as follows: + * SDK log file names: agorasdk.log, agorasdk.1.log, agorasdk.2.log, agorasdk.3.log, agorasdk.4.log. + * API call log file names: agoraapi.log, agoraapi.1.log, agoraapi.2.log, agoraapi.3.log, agoraapi.4.log. + * Each SDK log file has a default size of 2,048 KB; API call log files also default to 2,048 KB. All log files are UTF-8 encoded. + * The latest logs are always written to agorasdk.log and agoraapi.log. + * When agorasdk.log is full, the SDK performs the following operations in order: + * Delete the agorasdk.4.log file (if it exists). * Rename agorasdk.3.log to agorasdk.4.log. * Rename agorasdk.2.log to agorasdk.3.log. * Rename agorasdk.1.log to agorasdk.2.log. * Create a new agorasdk.log file. - * The overwrite rules for the agoraapi.log file are the same as for agorasdk.log. This method is used to set the size of the agorasdk.log file only and does not effect the agoraapi.log file. + * The rollover rules for agoraapi.log are the same as for agorasdk.log. This method only sets the size of the agorasdk.log file and does not affect agoraapi.log. * - * @param fileSizeInKBytes The size (KB) of an agorasdk.log file. The value range is [128,20480]. The default value is 2,048 KB. If you set fileSizeInKByte smaller than 128 KB, the SDK automatically adjusts it to 128 KB; if you set fileSizeInKByte greater than 20,480 KB, the SDK automatically adjusts it to 20,480 KB. + * @param fileSizeInKBytes The size of a single agorasdk.log log file in KB. The valid range is [128,20480], and the default is 2,048 KB. If you set fileSizeInKByte to less than 128 KB, the SDK automatically adjusts it to 128 KB. If you set it to more than 20,480 KB, the SDK automatically adjusts it to 20,480 KB. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setLogFileSize(fileSizeInKBytes: number): number; @@ -4938,16 +5225,16 @@ export abstract class IRtcEngine { abstract writeLog(level: LogLevel, fmt: string): number; /** - * Updates the display mode of the local video view. + * Updates the local view display mode. * - * After initializing the local video view, you can call this method to update its rendering and mirror modes. It affects only the video view that the local user sees and does not impact the publishing of the local video. + * After initializing the local user view, you can call this method to update the rendering and mirror mode of the local user view. This method only affects the video image seen by the local user and does not affect the publishing of the local video. * - * @param renderMode The local video display mode. See RenderModeType. - * @param mirrorMode The mirror mode of the local video view. See VideoMirrorModeType. If you use a front camera, the SDK enables the mirror mode by default; if you use a rear camera, the SDK disables the mirror mode by default. + * @param renderMode The display mode of the local view. See RenderModeType. + * @param mirrorMode The mirror mode of the local view. See VideoMirrorModeType. If you use the front camera, the mirror mode of the local user view is enabled by default; if you use the rear camera, the mirror mode is disabled by default. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setLocalRenderMode( renderMode: RenderModeType, @@ -4955,18 +5242,18 @@ export abstract class IRtcEngine { ): number; /** - * Updates the display mode of the video view of a remote user. + * Updates the remote view display mode. * - * After initializing the video view of a remote user, you can call this method to update its rendering and mirror modes. This method affects only the video view that the local user sees. - * During a call, you can call this method as many times as necessary to update the display mode of the video view of a remote user. + * After initializing the remote user view, you can call this method to update the rendering and mirror mode of the remote user view as displayed locally. This method only affects the video image seen by the local user. + * You can call this method multiple times during a call to update the display mode of the remote user view. * - * @param uid The user ID of the remote user. - * @param renderMode The rendering mode of the remote user view. + * @param uid Remote user ID. + * @param renderMode The rendering mode of the remote user view. See RenderModeType. * @param mirrorMode The mirror mode of the remote user view. See VideoMirrorModeType. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setRemoteRenderMode( uid: number, @@ -4975,14 +5262,14 @@ export abstract class IRtcEngine { ): number; /** - * Sets the maximum frame rate for rendering local video. + * Sets the maximum frame rate for local video rendering. * - * @param sourceType The type of the video source. See VideoSourceType. - * @param targetFps The capture frame rate (fps) of the local video. Sopported values are: 1, 7, 10, 15, 24, 30, 60. Set this parameter to a value lower than the actual video frame rate; otherwise, the settings do not take effect. + * @param sourceType The type of video source. See VideoSourceType. + * @param targetFps Maximum rendering frame rate (fps). Supported values: 1, 7, 10, 15, 24, 30, 60. Set this parameter to a value lower than the actual frame rate of the video, otherwise the setting will not take effect. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setLocalRenderTargetFps( sourceType: VideoSourceType, @@ -4990,45 +5277,45 @@ export abstract class IRtcEngine { ): number; /** - * Sets the maximum frame rate for rendering remote video. + * Sets the maximum frame rate for remote video rendering. * - * @param targetFps The capture frame rate (fps) of the local video. Sopported values are: 1, 7, 10, 15, 24, 30, 60. Set this parameter to a value lower than the actual video frame rate; otherwise, the settings do not take effect. + * @param targetFps Maximum rendering frame rate (fps). Supported values: 1, 7, 10, 15, 24, 30, 60. Set this parameter to a value lower than the actual frame rate of the video, otherwise the setting will not take effect. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setRemoteRenderTargetFps(targetFps: number): number; /** * Sets the local video mirror mode. * - * Deprecated: This method is deprecated. Use setLocalRenderMode instead. + * Deprecated Deprecated: This method is deprecated. * - * @param mirrorMode The local video mirror mode. See VideoMirrorModeType. + * @param mirrorMode The mirror mode of the local video. See VideoMirrorModeType. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setLocalVideoMirrorMode(mirrorMode: VideoMirrorModeType): number; /** - * Sets the dual-stream mode on the sender side and the low-quality video stream. + * Enables or disables the dual-stream mode and sets the low-quality video stream on the sender side. * - * Deprecated: This method is deprecated as of v4.2.0. Use setDualStreamMode instead. You can call this method to enable or disable the dual-stream mode on the publisher side. Dual streams are a pairing of a high-quality video stream and a low-quality video stream: - * High-quality video stream: High bitrate, high resolution. - * Low-quality video stream: Low bitrate, low resolution. After you enable dual-stream mode, you can call setRemoteVideoStreamType to choose to receive either the high-quality video stream or the low-quality video stream on the subscriber side. - * This method is applicable to all types of streams from the sender, including but not limited to video streams collected from cameras, screen sharing streams, and custom-collected video streams. - * If you need to enable dual video streams in a multi-channel scenario, you can call the enableDualStreamModeEx method. - * You can call this method either before or after joining a channel. + * Deprecated Deprecated: Deprecated since v4.2.0. Use setDualStreamMode instead. You can call this method on the sender side to enable or disable dual-stream mode. Dual-stream refers to high-quality and low-quality video streams: + * High-quality stream: High resolution and high frame rate video stream. + * Low-quality stream: Low resolution and low frame rate video stream. After enabling dual-stream mode, you can call setRemoteVideoStreamType on the receiver side to choose to receive either the high-quality or low-quality video stream. + * This method applies to all types of streams sent by the sender, including but not limited to camera-captured video, screen sharing, and custom video streams. + * To enable dual-stream mode in multi-channel scenarios, call enableDualStreamModeEx. + * This method can be called before or after joining a channel. * * @param enabled Whether to enable dual-stream mode: true : Enable dual-stream mode. false : (Default) Disable dual-stream mode. - * @param streamConfig The configuration of the low-quality video stream. See SimulcastStreamConfig. When setting mode to DisableSimulcastStream, setting streamConfig will not take effect. + * @param streamConfig Configuration for the low-quality video stream. See SimulcastStreamConfig. When mode is set to DisableSimulcastStream, setting streamConfig has no effect. * * @returns - * 0: Success. - * < 0: Failure. + * 0: Method call succeeds. + * < 0: Method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract enableDualStreamMode( enabled: boolean, @@ -5036,21 +5323,21 @@ export abstract class IRtcEngine { ): number; /** - * Sets dual-stream mode configuration on the sender side. + * Sets the dual-stream mode and configures the low-quality video stream on the sender side. * - * The SDK defaults to enabling low-quality video stream adaptive mode (AutoSimulcastStream) on the sender side, which means the sender does not actively send low-quality video stream. The receiving end with the role of the host can initiate a low-quality video stream request by calling setRemoteVideoStreamType, and upon receiving the request, the sending end automatically starts sending low-quality stream. - * If you want to modify this behavior, you can call this method and set mode to DisableSimulcastStream (never send low-quality video streams) or EnableSimulcastStream (always send low-quality video streams). - * If you want to restore the default behavior after making changes, you can call this method again with mode set to AutoSimulcastStream. The difference and connection between this method and enableDualStreamMode is as follows: - * When calling this method and setting mode to DisableSimulcastStream, it has the same effect as calling enableDualStreamMode and setting enabled to false. - * When calling this method and setting mode to EnableSimulcastStream, it has the same effect as calling enableDualStreamMode and setting enabled to true. - * Both methods can be called before and after joining a channel. If both methods are used, the settings in the method called later takes precedence. + * By default, the SDK enables the adaptive low-quality stream mode (AutoSimulcastStream) on the sender side, meaning the sender does not actively send the low-quality stream. A receiver with host role can call setRemoteVideoStreamType to request the low-quality stream, and the sender starts sending it automatically upon receiving the request. + * To change this behavior, call this method and set mode to DisableSimulcastStream (never send low-quality stream) or EnableSimulcastStream (always send low-quality stream). + * To revert to the default behavior after making changes, call this method again and set mode to AutoSimulcastStream. The differences and similarities between this method and enableDualStreamMode are as follows: + * Calling this method with mode set to DisableSimulcastStream is equivalent to calling enableDualStreamMode with enabled set to false. + * Calling this method with mode set to EnableSimulcastStream is equivalent to calling enableDualStreamMode with enabled set to true. + * Both methods can be called before or after joining a channel. If both are used, the settings from the later call take precedence. * - * @param mode The mode in which the video stream is sent. See SimulcastStreamMode. - * @param streamConfig The configuration of the low-quality video stream. See SimulcastStreamConfig. When setting mode to DisableSimulcastStream, setting streamConfig will not take effect. + * @param mode The mode for sending video streams. See SimulcastStreamMode. + * @param streamConfig Configuration for the low-quality video stream. See SimulcastStreamConfig. When mode is set to DisableSimulcastStream, streamConfig has no effect. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Code](https://doc.shengwang.cn/api-ref/rtc/rn/error-code) for details and resolution suggestions. */ abstract setDualStreamMode( mode: SimulcastStreamMode, @@ -5063,16 +5350,17 @@ export abstract class IRtcEngine { abstract setSimulcastConfig(simulcastConfig: SimulcastConfig): number; /** - * Sets whether to enable the local playback of external audio source. + * Sets whether to play external audio sources locally. * - * Ensure you have called the createCustomAudioTrack method to create a custom audio track before calling this method. After calling this method to enable the local playback of external audio source, if you need to stop local playback, you can call this method again and set enabled to false. You can call adjustCustomAudioPlayoutVolume to adjust the local playback volume of the custom audio track. + * After calling this method to enable local playback of externally captured audio sources, you can call this method again and set enabled to false to stop local playback. + * You can call adjustCustomAudioPlayoutVolume to adjust the local playback volume of the custom audio capture track. Before calling this method, make sure you have already called the createCustomAudioTrack method to create a custom audio capture track. * - * @param trackId The audio track ID. Set this parameter to the custom audio track ID returned in createCustomAudioTrack. - * @param enabled Whether to play the external audio source: true : Play the external audio source. false : (Default) Do not play the external source. + * @param trackId Audio track ID. Set this parameter to the custom audio track ID returned by the createCustomAudioTrack method. + * @param enabled Whether to play the external audio source locally: true : Play locally. false : (Default) Do not play locally. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract enableCustomAudioLocalPlayback( trackId: number, @@ -5080,20 +5368,20 @@ export abstract class IRtcEngine { ): number; /** - * Sets the format of the captured raw audio data. + * Sets the data format of the recorded raw audio. * - * The SDK calculates the sampling interval based on the samplesPerCall, sampleRate and channel parameters set in this method. Sample interval (sec) = samplePerCall /(sampleRate × channel). Ensure that the sample interval ≥ 0.01 (s). The SDK triggers the onRecordAudioFrame callback according to the sampling interval. + * The SDK calculates the sampling interval using the samplesPerCall, sampleRate, and channel parameters in this method. The formula is: sampling interval = samplesPerCall / (sampleRate × channel). Ensure that the sampling interval is no less than 0.01 seconds. The SDK triggers the onRecordAudioFrame callback based on this interval. * - * @param sampleRate The sample rate returned in the callback, which can be set as 8000, 16000, 32000, 44100, or 48000 Hz. - * @param channel The number of audio channels. You can set the value as 1 or 2. + * @param sampleRate The sample rate (Hz) of the audio data. You can set it to 8000, 16000, 32000, 44100, or 48000. + * @param channel The number of audio channels. You can set it to 1 or 2: * 1: Mono. * 2: Stereo. - * @param mode The use mode of the audio frame. See RawAudioFrameOpModeType. - * @param samplesPerCall The number of data samples, such as 1024 for the Media Push. + * @param mode The operation mode of the audio frame. See RawAudioFrameOpModeType. + * @param samplesPerCall The number of audio samples per call. Typically set to 1024 in scenarios such as CDN streaming. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and troubleshooting. */ abstract setRecordingAudioFrameParameters( sampleRate: number, @@ -5103,20 +5391,20 @@ export abstract class IRtcEngine { ): number; /** - * Sets the format of the raw audio playback data. + * Sets the data format of the playback raw audio. * - * The SDK calculates the sampling interval based on the samplesPerCall, sampleRate and channel parameters set in this method. Sample interval (sec) = samplePerCall /(sampleRate × channel). Ensure that the sample interval ≥ 0.01 (s). The SDK triggers the onPlaybackAudioFrame callback according to the sampling interval. + * The SDK calculates the sampling interval using the samplesPerCall, sampleRate, and channel parameters in this method. The formula is: sampling interval = samplesPerCall / (sampleRate × channel). Ensure that the sampling interval is no less than 0.01 seconds. The SDK triggers the onPlaybackAudioFrame callback based on this interval. * - * @param sampleRate The sample rate returned in the callback, which can be set as 8000, 16000, 32000, 44100, or 48000 Hz. - * @param channel The number of audio channels. You can set the value as 1 or 2. + * @param sampleRate The sample rate (Hz) of the audio data. You can set it to 8000, 16000, 24000, 32000, 44100, or 48000. + * @param channel The number of audio channels. You can set it to 1 or 2: * 1: Mono. * 2: Stereo. - * @param mode The use mode of the audio frame. See RawAudioFrameOpModeType. - * @param samplesPerCall The number of data samples, such as 1024 for the Media Push. + * @param mode The operation mode of the audio frame. See RawAudioFrameOpModeType. + * @param samplesPerCall The number of audio samples per call. Typically set to 1024 in scenarios such as CDN streaming. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and troubleshooting. */ abstract setPlaybackAudioFrameParameters( sampleRate: number, @@ -5126,19 +5414,19 @@ export abstract class IRtcEngine { ): number; /** - * Sets the format of the raw audio data after mixing for audio capture and playback. + * Sets the raw audio data format after audio capture and playback mixing. * - * The SDK calculates the sampling interval based on the samplesPerCall, sampleRate and channel parameters set in this method. Sample interval (sec) = samplePerCall /(sampleRate × channel). Ensure that the sample interval ≥ 0.01 (s). The SDK triggers the onMixedAudioFrame callback according to the sampling interval. + * The SDK calculates the sampling interval using the samplesPerCall, sampleRate, and channel parameters in this method. The formula is: sampling interval = samplesPerCall / (sampleRate × channel). Ensure the interval is no less than 0.01 seconds. The SDK triggers the onMixedAudioFrame callback based on this interval. * - * @param sampleRate The sample rate returned in the callback, which can be set as 8000, 16000, 32000, 44100, or 48000 Hz. - * @param channel The number of audio channels. You can set the value as 1 or 2. + * @param sampleRate The sample rate (Hz) of the audio data, can be set to 8000, 16000, 32000, 44100, or 48000. + * @param channel The number of audio channels, can be set to 1 or 2: * 1: Mono. * 2: Stereo. - * @param samplesPerCall The number of data samples, such as 1024 for the Media Push. + * @param samplesPerCall The number of audio samples, typically 1024 in scenarios like CDN streaming. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setMixedAudioFrameParameters( sampleRate: number, @@ -5147,22 +5435,22 @@ export abstract class IRtcEngine { ): number; /** - * Sets the format of the in-ear monitoring raw audio data. + * Sets the audio data format for in-ear monitoring. * - * This method is used to set the in-ear monitoring audio data format reported by the onEarMonitoringAudioFrame callback. - * Before calling this method, you need to call enableInEarMonitoring, and set includeAudioFilters to EarMonitoringFilterBuiltInAudioFilters or EarMonitoringFilterNoiseSuppression. - * The SDK calculates the sampling interval based on the samplesPerCall, sampleRate and channel parameters set in this method. Sample interval (sec) = samplePerCall /(sampleRate × channel). Ensure that the sample interval ≥ 0.01 (s). The SDK triggers the onEarMonitoringAudioFrame callback according to the sampling interval. + * This method sets the audio data format for the onEarMonitoringAudioFrame callback. + * Before calling this method, you need to call enableInEarMonitoring and set includeAudioFilters to EarMonitoringFilterBuiltInAudioFilters or EarMonitoringFilterNoiseSuppression. + * The SDK calculates the sampling interval using the samplesPerCall, sampleRate, and channel parameters in this method. The formula is: sampling interval = samplesPerCall / (sampleRate × channel). Ensure the interval is no less than 0.01 seconds. The SDK triggers the onEarMonitoringAudioFrame callback based on this interval. * - * @param sampleRate The sample rate of the audio data reported in the onEarMonitoringAudioFrame callback, which can be set as 8,000, 16,000, 32,000, 44,100, or 48,000 Hz. - * @param channel The number of audio channels reported in the onEarMonitoringAudioFrame callback. + * @param sampleRate The sample rate (Hz) of the audio reported in onEarMonitoringAudioFrame, can be set to 8000, 16000, 32000, 44100, or 48000. + * @param channel The number of audio channels reported in onEarMonitoringAudioFrame, can be set to 1 or 2: * 1: Mono. * 2: Stereo. - * @param mode The use mode of the audio frame. See RawAudioFrameOpModeType. - * @param samplesPerCall The number of data samples reported in the onEarMonitoringAudioFrame callback, such as 1,024 for the Media Push. + * @param mode The usage mode of the audio frame. See RawAudioFrameOpModeType. + * @param samplesPerCall The number of audio samples reported in onEarMonitoringAudioFrame, typically 1024 in scenarios like CDN streaming. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setEarMonitoringAudioFrameParameters( sampleRate: number, @@ -5172,18 +5460,19 @@ export abstract class IRtcEngine { ): number; /** - * Sets the format of the raw audio playback data before mixing. + * Sets the raw audio playback data format before mixing. * - * The SDK triggers the onPlaybackAudioFrameBeforeMixing callback according to the sampling interval. + * The SDK triggers the onPlaybackAudioFrameBeforeMixing callback based on this sampling interval. * - * @param channel The number of audio channels. You can set the value as 1 or 2. + * @param sampleRate The sample rate (Hz) of the audio data, can be set to 8000, 16000, 32000, 44100, or 48000. + * @param channel The number of audio channels, can be set to 1 or 2: * 1: Mono. * 2: Stereo. - * @param sampleRate The sample rate returned in the callback, which can be set as 8000, 16000, 32000, 44100, or 48000 Hz. + * @param samplesPerCall Sets the number of audio samples returned in the onPlaybackAudioFrameBeforeMixing callback. In RTMP streaming scenarios, it is recommended to set this to 1024. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setPlaybackAudioFrameBeforeMixingParameters( sampleRate: number, @@ -5192,39 +5481,39 @@ export abstract class IRtcEngine { ): number; /** - * Turns on audio spectrum monitoring. + * Enables audio spectrum monitoring. * - * If you want to obtain the audio spectrum data of local or remote users, you can register the audio spectrum observer and enable audio spectrum monitoring. You can call this method either before or after joining a channel. + * If you want to obtain the audio spectrum data of local or remote users, register an audio spectrum observer and enable audio spectrum monitoring. This method can be called before or after joining a channel. * - * @param intervalInMS The interval (in milliseconds) at which the SDK triggers the onLocalAudioSpectrum and onRemoteAudioSpectrum callbacks. The default value is 100. Do not set this parameter to a value less than 10, otherwise calling this method would fail. + * @param intervalInMS The interval (ms) at which the SDK triggers the onLocalAudioSpectrum and onRemoteAudioSpectrum callbacks. Default is 100 ms. The value must not be less than 10 ms, otherwise the method call will fail. * * @returns * 0: Success. - * < 0: Failure. - * -2: Invalid parameters. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -2: Invalid parameter settings. */ abstract enableAudioSpectrumMonitor(intervalInMS?: number): number; /** * Disables audio spectrum monitoring. * - * After calling enableAudioSpectrumMonitor, if you want to disable audio spectrum monitoring, you can call this method. You can call this method either before or after joining a channel. + * Call this method to disable audio spectrum monitoring after calling enableAudioSpectrumMonitor. This method can be called before or after joining a channel. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract disableAudioSpectrumMonitor(): number; /** * Registers an audio spectrum observer. * - * After successfully registering the audio spectrum observer and calling enableAudioSpectrumMonitor to enable the audio spectrum monitoring, the SDK reports the callback that you implement in the IAudioSpectrumObserver class according to the time interval you set. You can call this method either before or after joining a channel. + * After successfully registering an audio spectrum observer and calling enableAudioSpectrumMonitor to enable audio spectrum monitoring, the SDK reports callbacks implemented in the IAudioSpectrumObserver class at the interval you set. This method can be called before or after joining a channel. * * @param observer The audio spectrum observer. See IAudioSpectrumObserver. * * @returns - * One IAudioSpectrumObserver object. + * The IAudioSpectrumObserver object. */ abstract registerAudioSpectrumObserver( observer: IAudioSpectrumObserver @@ -5233,90 +5522,91 @@ export abstract class IRtcEngine { /** * Unregisters the audio spectrum observer. * - * After calling registerAudioSpectrumObserver, if you want to disable audio spectrum monitoring, you can call this method. You can call this method either before or after joining a channel. + * Call this method to unregister the audio spectrum observer after calling registerAudioSpectrumObserver. This method can be called before or after joining a channel. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract unregisterAudioSpectrumObserver( observer: IAudioSpectrumObserver ): number; /** - * Adjusts the capturing signal volume. + * Adjusts the recording signal volume. * - * If you only need to mute the audio signal, Agora recommends that you use muteRecordingSignal instead. + * If you only want to mute the audio signal, we recommend using muteRecordingSignal. * - * @param volume The volume of the user. The value range is [0,400]. + * @param volume The volume. The value ranges from [0,400]. * 0: Mute. - * 100: (Default) The original volume. - * 400: Four times the original volume (amplifying the audio signals by four times). + * 100: (Default) Original volume. + * 400: Four times the original volume with overflow protection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract adjustRecordingSignalVolume(volume: number): number; /** * Whether to mute the recording signal. * - * If you have already called adjustRecordingSignalVolume to adjust the recording signal volume, when you call this method and set it to true, the SDK behaves as follows: - * Records the adjusted volume. - * Mutes the recording signal. When you call this method again and set it to false, the recording signal volume will be restored to the volume recorded by the SDK before muting. + * If you have already called adjustRecordingSignalVolume to adjust the volume of the audio capture signal, then calling this method with true will cause the SDK to: + * Record the adjusted volume. + * Mute the audio capture signal. When you call this method again with false, the recording signal will be restored to the volume recorded by the SDK before muting. * - * @param mute true : Mute the recording signal. false : (Default) Do not mute the recording signal. + * @param mute true : Mute. false : (Default) Original volume. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract muteRecordingSignal(mute: boolean): number; /** - * Adjusts the playback signal volume of all remote users. + * Adjusts the signal volume of all remote users for local playback. * - * This method is used to adjust the signal volume of all remote users mixed and played locally. If you need to adjust the signal volume of a specified remote user played locally, it is recommended that you call adjustUserPlaybackSignalVolume instead. + * This method adjusts the signal volume of all remote users after mixing for local playback. If you need to adjust the signal volume of a specific remote user for local playback, it is recommended to call adjustUserPlaybackSignalVolume. * - * @param volume The volume of the user. The value range is [0,400]. + * @param volume Volume, range is [0,400]. * 0: Mute. - * 100: (Default) The original volume. - * 400: Four times the original volume (amplifying the audio signals by four times). + * 100: (Default) Original volume. + * 400: 4 times the original volume, with built-in overflow protection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract adjustPlaybackSignalVolume(volume: number): number; /** - * Adjusts the playback signal volume of a specified remote user. + * Adjusts the playback volume of a specified remote user locally. * - * You can call this method to adjust the playback volume of a specified remote user. To adjust the playback volume of different remote users, call the method as many times, once for each remote user. + * You can call this method during a call to adjust the playback volume of a specified remote user locally. To adjust the playback volume of multiple users locally, call this method multiple times. * - * @param uid The user ID of the remote user. - * @param volume The volume of the user. The value range is [0,400]. + * @param uid Remote user ID. + * @param volume Volume. The range is [0,400]. * 0: Mute. - * 100: (Default) The original volume. - * 400: Four times the original volume (amplifying the audio signals by four times). + * 100: (Default) Original volume. + * 400: Four times the original volume with built-in overflow protection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract adjustUserPlaybackSignalVolume(uid: number, volume: number): number; /** - * Sets the fallback option for the subscribed video stream based on the network conditions. + * Sets the fallback option for subscribed audio and video streams under poor network conditions. * - * An unstable network affects the audio and video quality in a video call or interactive live video streaming. If option is set as StreamFallbackOptionVideoStreamLow or StreamFallbackOptionAudioOnly, the SDK automatically switches the video from a high-quality stream to a low-quality stream or disables the video when the downlink network conditions cannot support both audio and video to guarantee the quality of the audio. Meanwhile, the SDK continuously monitors network quality and resumes subscribing to audio and video streams when the network quality improves. When the subscribed video stream falls back to an audio-only stream, or recovers from an audio-only stream to an audio-video stream, the SDK triggers the onRemoteSubscribeFallbackToAudioOnly callback. + * Under poor network conditions, the quality of real-time audio and video may degrade. You can call this method and set option to StreamFallbackOptionVideoStreamLow or StreamFallbackOptionAudioOnly. When the downlink network is weak and audio/video quality is severely affected, the SDK will switch the video stream to a lower stream or disable the video stream to ensure audio quality. The SDK continuously monitors network quality and resumes audio and video subscription when conditions improve. + * When the subscribed stream falls back to audio or recovers to audio and video, the SDK triggers the onRemoteSubscribeFallbackToAudioOnly callback. * - * @param option Fallback options for the subscribed stream. See STREAM_FALLBACK_OPTIONS. + * @param option Fallback option for the subscribed stream. See STREAM_FALLBACK_OPTIONS. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setRemoteSubscribeFallbackOption( option: StreamFallbackOptions @@ -5332,17 +5622,20 @@ export abstract class IRtcEngine { ): number; /** - * Enables or disables extensions. + * Enables/disables an extension. + * + * To enable multiple extensions, call this method multiple times. + * After this method is called successfully, no other extensions can be loaded. * * @param provider The name of the extension provider. * @param extension The name of the extension. * @param enable Whether to enable the extension: true : Enable the extension. false : Disable the extension. - * @param type Source type of the extension. See MediaSourceType. + * @param type The media source type of the extension. See MediaSourceType. * * @returns * 0: Success. - * < 0: Failure. - * -3: The extension library is not loaded. Agora recommends that you check the storage location or the name of the dynamic library. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -3: The extension dynamic library is not loaded. Agora recommends checking whether the library is placed in the expected location or whether the library name is correct. */ abstract enableExtension( provider: string, @@ -5352,19 +5645,19 @@ export abstract class IRtcEngine { ): number; /** - * Sets the properties of the extension. + * Sets a plugin property. * - * After enabling the extension, you can call this method to set the properties of the extension. + * After enabling a plugin, you can call this method to set its properties. To set properties for multiple plugins, call this method multiple times. * - * @param provider The name of the extension provider. - * @param extension The name of the extension. - * @param key The key of the extension. - * @param value The value of the extension key. - * @param type Source type of the extension. See MediaSourceType. + * @param provider The name of the plugin provider. + * @param extension The name of the plugin. + * @param key The key of the plugin property. + * @param value The value corresponding to the plugin property key. + * @param type The media source type of the plugin. See MediaSourceType. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setExtensionProperty( provider: string, @@ -5375,17 +5668,17 @@ export abstract class IRtcEngine { ): number; /** - * Gets detailed information on the extensions. + * Gets detailed information about the plugin. * - * @param provider The name of the extension provider. - * @param extension The name of the extension. - * @param key The key of the extension. - * @param bufLen Maximum length of the JSON string indicating the extension property. The maximum value is 512 bytes. - * @param type Source type of the extension. See MediaSourceType. + * @param provider The name of the plugin provider. + * @param extension The name of the plugin. + * @param key The key of the plugin property. + * @param bufLen The maximum length of the plugin property JSON string. Maximum value is 512 bytes. + * @param type The media source type of the plugin. See MediaSourceType. * * @returns - * The extension information, if the method call succeeds. - * An empty string, if the method call fails. + * If the method call succeeds, returns the plugin information. + * If the method call fails, returns an empty string. */ abstract getExtensionProperty( provider: string, @@ -5416,15 +5709,15 @@ export abstract class IRtcEngine { /** * Enables in-ear monitoring. * - * This method enables or disables in-ear monitoring. + * Enables or disables in-ear monitoring. Users must wear headphones (wired or Bluetooth) to hear the in-ear monitoring. * - * @param enabled Enables or disables in-ear monitoring. true : Enables in-ear monitoring. false : (Default) Disables in-ear monitoring. - * @param includeAudioFilters The audio filter types of in-ear monitoring. See EarMonitoringFilterType. + * @param enabled Whether to enable in-ear monitoring: true : Enable in-ear monitoring. false : (Default) Disable in-ear monitoring. + * @param includeAudioFilters The type of audio filter for in-ear monitoring. See EarMonitoringFilterType. * * @returns * 0: Success. - * < 0: Failure. - * - 8: Make sure the current audio routing is Bluetooth or headset. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -8: Make sure the current audio route is set to Bluetooth or headphones. */ abstract enableInEarMonitoring( enabled: boolean, @@ -5432,31 +5725,32 @@ export abstract class IRtcEngine { ): number; /** - * Sets the volume of the in-ear monitor. + * Sets the in-ear monitoring volume. * - * @param volume The volume of the user. The value range is [0,400]. + * @param volume Volume, range: [0,400]. * 0: Mute. - * 100: (Default) The original volume. - * 400: Four times the original volume (amplifying the audio signals by four times). + * 100: (Default) Original volume. + * 400: 4 times the original volume, with built-in overflow protection. * * @returns - * 0: Success. - * < 0: Failure. - * -2: Invalid parameter settings, such as in-ear monitoring volume exceeding the valid range (< 0 or > 400). + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -2: Invalid parameter setting, such as in-ear monitoring volume out of range (< 0 or > 400). */ abstract setInEarMonitoringVolume(volume: number): number; /** - * Loads an extension. + * Loads a plugin. * - * This method is used to add extensions external to the SDK (such as those from Extensions Marketplace and SDK extensions) to the SDK. + * This method adds external SDK plugins (such as marketplace plugins and SDK extension plugins) to the SDK. To load multiple plugins, call this method multiple times. + * This method is for Android only. * - * @param path The extension library path and name. For example: /library/libagora_segmentation_extension.dll. - * @param unloadAfterUse Whether to uninstall the current extension when you no longer using it: true : Uninstall the extension when the IRtcEngine is destroyed. false : (Rcommended) Do not uninstall the extension until the process terminates. + * @param path The path and name of the plugin dynamic library. For example: /library/libagora_segmentation_extension.dll. + * @param unloadAfterUse Whether to automatically unload the plugin after use: true : Automatically unloads the plugin when IRtcEngine is destroyed. false : Does not automatically unload the plugin until the process exits (recommended). * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract loadExtensionProvider( path: string, @@ -5464,17 +5758,17 @@ export abstract class IRtcEngine { ): number; /** - * Sets the properties of the extension provider. + * Sets a property for the plugin provider. * - * You can call this method to set the attributes of the extension provider and initialize the relevant parameters according to the type of the provider. + * You can call this method to set properties for the plugin provider and initialize related parameters based on the provider type. To set properties for multiple plugin providers, call this method multiple times. * - * @param provider The name of the extension provider. - * @param key The key of the extension. - * @param value The value of the extension key. + * @param provider The name of the plugin provider. + * @param key The key of the plugin property. + * @param value The value corresponding to the plugin property key. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setExtensionProviderProperty( provider: string, @@ -5485,11 +5779,13 @@ export abstract class IRtcEngine { /** * Registers an extension. * - * For extensions external to the SDK (such as those from Extensions Marketplace and SDK Extensions), you need to load them before calling this method. Extensions internal to the SDK (those included in the full SDK package) are automatically loaded and registered after the initialization of IRtcEngine. + * For external SDK extensions (such as marketplace plugins and SDK extension plugins), after loading the plugin, you need to call this method to register it. Internal SDK plugins (included in the SDK package) are automatically loaded and registered after initializing IRtcEngine, so you don't need to call this method. + * To register multiple plugins, call this method multiple times. + * The order in which different plugins process data in the SDK is determined by the order in which they are registered. That is, plugins registered earlier process data first. * - * @param provider The name of the extension provider. - * @param extension The name of the extension. - * @param type Source type of the extension. See MediaSourceType. + * @param provider The name of the plugin provider. + * @param extension The name of the plugin. + * @param type The media source type of the plugin. See MediaSourceType. */ abstract registerExtension( provider: string, @@ -5500,11 +5796,14 @@ export abstract class IRtcEngine { /** * Sets the camera capture configuration. * - * @param config The camera capture configuration. See CameraCapturerConfiguration. In this method, you do not need to set the deviceId parameter. + * Before adjusting the camera's focal length configuration, it is recommended to call queryCameraFocalLengthCapability to query the device's supported focal length capabilities, and configure accordingly. + * Due to limitations on some Android devices, even if you configure the focal length type based on the result of queryCameraFocalLengthCapability, the setting may not take effect. + * + * @param config Camera capture configuration. See CameraCapturerConfiguration. You do not need to set the deviceId parameter in this method. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and troubleshooting. */ abstract setCameraCapturerConfiguration( config: CameraCapturerConfiguration @@ -5513,14 +5812,14 @@ export abstract class IRtcEngine { /** * Creates a custom video track. * - * To publish a custom video source, see the following steps: - * Call this method to create a video track and get the video track ID. - * Call joinChannel to join the channel. In ChannelMediaOptions, set customVideoTrackId to the video track ID that you want to publish, and set publishCustomVideoTrack to true. - * Call pushVideoFrame and specify videoTrackId as the video track ID set in step 2. You can then publish the corresponding custom video source in the channel. + * When you need to publish a custom captured video in the channel, refer to the following steps: + * Call this method to create a video track and obtain the video track ID. + * When calling joinChannel to join the channel, set customVideoTrackId in ChannelMediaOptions to the video track ID you want to publish, and set publishCustomVideoTrack to true. + * Call pushVideoFrame and specify videoTrackId as the video track ID specified in step 2 to publish the corresponding custom video source in the channel. * * @returns - * If the method call is successful, the video track ID is returned as the unique identifier of the video track. - * If the method call fails, 0xffffffff is returned. + * If the method call succeeds, returns the video track ID as the unique identifier of the video track. + * If the method call fails, returns 0xffffffff. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract createCustomVideoTrack(): number; @@ -5532,11 +5831,11 @@ export abstract class IRtcEngine { /** * Destroys the specified video track. * - * @param videoTrackId The video track ID returned by calling the createCustomVideoTrack method. + * @param videoTrackId The video track ID returned by the createCustomVideoTrack method. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract destroyCustomVideoTrack(videoTrackId: number): number; @@ -5548,11 +5847,11 @@ export abstract class IRtcEngine { /** * Switches between front and rear cameras. * - * You can call this method to dynamically switch cameras based on the actual camera availability during the app's runtime, without having to restart the video stream or reconfigure the video source. + * You can call this method during the app's runtime to dynamically switch between cameras based on the actual availability, without restarting the video stream or reconfiguring the video source. This method only switches the camera for the first video stream captured by the camera, that is, the video source set to VideoSourceCamera (0) when calling startCameraCapture. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract switchCamera(): number; @@ -5567,8 +5866,8 @@ export abstract class IRtcEngine { /** * Checks whether the device camera supports face detection. * - * This method must be called after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). - * This method is for Android and iOS only. + * This method is only applicable to Android and iOS. + * You must call this method after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). * * @returns * true : The device camera supports face detection. false : The device camera does not support face detection. @@ -5576,84 +5875,84 @@ export abstract class IRtcEngine { abstract isCameraFaceDetectSupported(): boolean; /** - * Checks whether the device supports camera flash. + * Checks whether the device supports keeping the flashlight on. * - * This method must be called after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). - * The app enables the front camera by default. If your front camera does not support flash, this method returns false. If you want to check whether the rear camera supports the flash function, call switchCamera before this method. - * On iPads with system version 15, even if isCameraTorchSupported returns true, you might fail to successfully enable the flash by calling setCameraTorchOn due to system issues. + * You must call this method after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). + * In general, the app uses the front camera by default. If the front camera does not support keeping the flashlight on, this method returns false. To check whether the rear camera supports this feature, use switchCamera to switch the camera first, then call this method. + * On iPads with system version 15, even if isCameraTorchSupported returns true, due to system limitations, you may still fail to turn on the flashlight using setCameraTorchOn. * * @returns - * true : The device supports camera flash. false : The device does not support camera flash. + * true : The device supports keeping the flashlight on. false : The device does not support keeping the flashlight on. */ abstract isCameraTorchSupported(): boolean; /** - * Check whether the device supports the manual focus function. + * Checks whether the device supports manual focus. * - * This method must be called after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). + * You must call this method after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). * * @returns - * true : The device supports the manual focus function. false : The device does not support the manual focus function. + * true : The device supports manual focus. false : The device does not support manual focus. */ abstract isCameraFocusSupported(): boolean; /** - * Checks whether the device supports the face auto-focus function. + * Checks whether the device supports face auto-focus. * * This method must be called after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). * * @returns - * true : The device supports the face auto-focus function. false : The device does not support the face auto-focus function. + * true : The device supports face auto-focus. false : The device does not support face auto-focus. */ abstract isCameraAutoFocusFaceModeSupported(): boolean; /** - * Sets the camera zoom factor. + * Sets the zoom factor of the camera. * - * For iOS devices equipped with multi-lens rear cameras, such as those featuring dual-camera (wide-angle and ultra-wide-angle) or triple-camera (wide-angle, ultra-wide-angle, and telephoto), you can call setCameraCapturerConfiguration first to set the cameraFocalLengthType as CameraFocalLengthDefault (0) (standard lens). Then, adjust the camera zoom factor to a value less than 1.0. This configuration allows you to capture video with an ultra-wide-angle perspective. - * You must call this method after enableVideo. The setting result will take effect after the camera is successfully turned on, that is, after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). + * Some iOS devices have rear cameras composed of multiple lenses, such as dual cameras (wide-angle and ultra-wide-angle) or triple cameras (wide-angle, ultra-wide-angle, and telephoto). For such composite lenses with ultra-wide-angle capabilities, you can call setCameraCapturerConfiguration and set cameraFocalLengthType to CameraFocalLengthDefault (0) (standard lens), then call this method to set the camera zoom factor to a value less than 1.0 to achieve an ultra-wide-angle shooting effect. + * You must call this method after enableVideo. The setting takes effect after the camera is successfully turned on, that is, when the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). * - * @param factor Camera zoom factor. For devices that do not support ultra-wide-angle, the value ranges from 1.0 to the maximum zoom factor; for devices that support ultra-wide-angle, the value ranges from 0.5 to the maximum zoom factor. You can get the maximum zoom factor supported by the device by calling the getCameraMaxZoomFactor method. + * @param factor The zoom factor of the camera. For devices that do not support ultra-wide-angle, the range is from 1.0 to the maximum zoom factor; for devices that support ultra-wide-angle, the range is from 0.5 to the maximum zoom factor. You can use getCameraMaxZoomFactor to get the maximum zoom factor supported by the device. * * @returns - * The camera zoom factor value, if successful. - * < 0: if the method if failed. + * If the method call succeeds: returns the set factor value. + * If the method call fails: returns a value < 0. */ abstract setCameraZoomFactor(factor: number): number; /** - * Enables or disables face detection for the local user. + * Enables/disables local face detection. * - * @param enabled Whether to enable face detection for the local user: true : Enable face detection. false : (Default) Disable face detection. + * @param enabled Whether to enable face detection: true : Enable face detection. false : (Default) Disable face detection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract enableFaceDetection(enabled: boolean): number; /** - * Gets the maximum zoom ratio supported by the camera. + * Gets the maximum zoom factor supported by the camera. * * This method must be called after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). * * @returns - * The maximum zoom ratio supported by the camera. + * The maximum zoom factor supported by the device camera. */ abstract getCameraMaxZoomFactor(): number; /** - * Sets the camera manual focus position. + * Sets the manual focus position and triggers focusing. * - * You must call this method after enableVideo. The setting result will take effect after the camera is successfully turned on, that is, after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). - * After a successful method call, the SDK triggers the onCameraFocusAreaChanged callback. + * You must call this method after enableVideo. The setting takes effect after the camera is successfully turned on, that is, when the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). + * After this method is successfully called, the local client triggers the onCameraFocusAreaChanged callback. * - * @param positionX The horizontal coordinate of the touchpoint in the view. - * @param positionY The vertical coordinate of the touchpoint in the view. + * @param positionX The X coordinate of the touch point relative to the view. + * @param positionY The Y coordinate of the touch point relative to the view. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setCameraFocusPositionInPreview( positionX: number, @@ -5661,28 +5960,28 @@ export abstract class IRtcEngine { ): number; /** - * Enables the camera flash. + * Sets whether to turn on the flashlight. * - * You must call this method after enableVideo. The setting result will take effect after the camera is successfully turned on, that is, after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). + * You must call this method after enableVideo. The setting takes effect after the camera is successfully turned on, that is, when the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). * - * @param isOn Whether to turn on the camera flash: true : Turn on the flash. false : (Default) Turn off the flash. + * @param isOn Whether to turn on the flashlight: true : Turn on the flashlight. false : (default) Turn off the flashlight. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setCameraTorchOn(isOn: boolean): number; /** - * Enables the camera auto-face focus function. + * Enables or disables face auto focus. * - * By default, the SDK disables face autofocus on Android and enables face autofocus on iOS. To set face autofocus, call this method. + * By default, the SDK disables face auto focus on Android and enables it on iOS. To configure face auto focus manually, call this method. * - * @param enabled Whether to enable face autofocus: true : Enable the camera auto-face focus function. false : Disable face auto-focus. + * @param enabled Whether to enable face auto focus: true : Enable face auto focus. false : Disable face auto focus. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and troubleshooting. */ abstract setCameraAutoFocusFaceModeEnabled(enabled: boolean): number; @@ -5697,17 +5996,17 @@ export abstract class IRtcEngine { abstract isCameraExposurePositionSupported(): boolean; /** - * Sets the camera exposure position. + * Sets the manual exposure position. * - * You must call this method after enableVideo. The setting result will take effect after the camera is successfully turned on, that is, after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). - * After a successful method call, the SDK triggers the onCameraExposureAreaChanged callback. + * You must call this method after enableVideo. The setting takes effect after the camera is successfully turned on, that is, when the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). + * After this method is successfully called, the local client triggers the onCameraExposureAreaChanged callback. * - * @param positionXinView The horizontal coordinate of the touchpoint in the view. - * @param positionYinView The vertical coordinate of the touchpoint in the view. + * @param positionXinView The X coordinate of the touch point relative to the view. + * @param positionYinView The Y coordinate of the touch point relative to the view. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setCameraExposurePosition( positionXinView: number, @@ -5715,98 +6014,100 @@ export abstract class IRtcEngine { ): number; /** - * Queries whether the current camera supports adjusting exposure value. + * Checks whether the current camera supports exposure adjustment. * - * This method must be called after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). - * Before calling setCameraExposureFactor, Agora recoomends that you call this method to query whether the current camera supports adjusting the exposure value. - * By calling this method, you adjust the exposure value of the currently active camera, that is, the camera specified when calling setCameraCapturerConfiguration. + * You must call this method after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). + * It is recommended to call this method before using setCameraExposureFactor to adjust the exposure factor, to check whether the current camera supports exposure adjustment. + * This method checks whether the currently used camera supports exposure adjustment, that is, the camera specified by setCameraCapturerConfiguration. * * @returns - * true : Success. false : Failure. + * true : The method call succeeds. false : The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and troubleshooting. */ abstract isCameraExposureSupported(): boolean; /** - * Sets the camera exposure value. + * Sets the exposure factor of the current camera. * - * Insufficient or excessive lighting in the shooting environment can affect the image quality of video capture. To achieve optimal video quality, you can use this method to adjust the camera's exposure value. - * You must call this method after enableVideo. The setting result will take effect after the camera is successfully turned on, that is, after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). - * Before calling this method, Agora recommends calling isCameraExposureSupported to check whether the current camera supports adjusting the exposure value. - * By calling this method, you adjust the exposure value of the currently active camera, that is, the camera specified when calling setCameraCapturerConfiguration. + * When the lighting in the shooting environment is insufficient or too bright, it can affect the quality of the captured video. To achieve better video effects, you can use this method to adjust the exposure factor of the camera. + * You must call this method after enableVideo. The setting takes effect after the camera is successfully turned on, that is, when the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). + * It is recommended that you call isCameraExposureSupported before using this method to check whether the current camera supports adjusting the exposure factor. + * When you call this method, it sets the exposure factor for the currently used camera, which is the one specified in setCameraCapturerConfiguration. * - * @param factor The camera exposure value. The default value is 0, which means using the default exposure of the camera. The larger the value, the greater the exposure. When the video image is overexposed, you can reduce the exposure value; when the video image is underexposed and the dark details are lost, you can increase the exposure value. If the exposure value you specified is beyond the range supported by the device, the SDK will automatically adjust it to the actual supported range of the device. On Android, the value range is [-20.0, 20.0]. On iOS, the value range is [-8.0, 8.0]. + * @param factor The exposure factor of the camera. The default value is 0, which means using the camera's default exposure. The larger the value, the greater the exposure. If the video image is overexposed, you can lower the exposure factor; if the video image is underexposed and dark details are lost, you can increase the exposure factor. If the specified exposure factor exceeds the supported range of the device, the SDK automatically adjusts it to the supported range. + * On Android, the range is [-20.0,20.0]; on iOS, the range is [-8.0,8.0]. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setCameraExposureFactor(factor: number): number; /** * Checks whether the device supports auto exposure. * - * This method must be called after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). - * This method applies to iOS only. - * * @returns * true : The device supports auto exposure. false : The device does not support auto exposure. */ abstract isCameraAutoExposureFaceModeSupported(): boolean; /** - * Sets whether to enable auto exposure. - * - * You must call this method after enableVideo. The setting result will take effect after the camera is successfully turned on, that is, after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). - * This method applies to iOS only. + * Enables or disables auto exposure. * * @param enabled Whether to enable auto exposure: true : Enable auto exposure. false : Disable auto exposure. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and troubleshooting. */ abstract setCameraAutoExposureFaceModeEnabled(enabled: boolean): number; /** * Sets the camera stabilization mode. * - * This method applies to iOS only. The camera stabilization mode is off by default. You need to call this method to turn it on and set the appropriate stabilization mode. + * Camera stabilization is disabled by default. You need to call this method to enable and set an appropriate stabilization mode. This method is for iOS only. + * Camera stabilization only takes effect when the video resolution is greater than 1280 × 720. + * The higher the stabilization level, the smaller the camera's field of view and the greater the camera delay. To ensure user experience, we recommend setting the mode parameter to CameraStabilizationModeLevel1. * * @param mode Camera stabilization mode. See CameraStabilizationMode. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setCameraStabilizationMode(mode: CameraStabilizationMode): number; /** - * Sets the default audio playback route. + * Sets the default audio route. * - * Most mobile phones have two audio routes: an earpiece at the top, and a speakerphone at the bottom. The earpiece plays at a lower volume, and the speakerphone at a higher volume. When setting the default audio route, you determine whether audio playback comes through the earpiece or speakerphone when no external audio device is connected. In different scenarios, the default audio routing of the system is also different. See the following: - * Voice call: Earpiece. - * Audio broadcast: Speakerphone. - * Video call: Speakerphone. - * Video broadcast: Speakerphone. You can call this method to change the default audio route. After calling this method to set the default audio route, the actual audio route of the system will change with the connection of external audio devices (wired headphones or Bluetooth headphones). + * Mobile devices typically have two audio routes: the earpiece at the top, which plays sound at a lower volume, and the speaker at the bottom, which plays sound at a higher volume. Setting the default audio route means specifying whether the system uses the earpiece or speaker to play audio when no external device is connected. + * The system defaults vary by scenario: + * Voice call: Earpiece + * Voice live streaming: Speaker + * Video call: Speaker + * Video live streaming: Speaker Calling this API allows you to change the default audio route above. After setting the default audio route using this method, the actual audio route may change when external audio devices (wired or Bluetooth headsets) are connected. See [Audio Route](https://doc.shengwang.cn/doc/rtc/android/advanced-features/audio-route). * - * @param defaultToSpeaker Whether to set the speakerphone as the default audio route: true : Set the speakerphone as the default audio route. false : Set the earpiece as the default audio route. + * @param defaultToSpeaker Whether to use the speaker as the default audio route: true : Set the default audio route to speaker. false : Set the default audio route to earpiece. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setDefaultAudioRouteToSpeakerphone( defaultToSpeaker: boolean ): number; /** - * Enables/Disables the audio route to the speakerphone. + * Enables or disables speakerphone playback. * - * @param speakerOn Sets whether to enable the speakerphone or earpiece: true : Enable device state monitoring. The audio route is the speakerphone. false : Disable device state monitoring. The audio route is the earpiece. + * For default audio routes in different scenarios, see [Audio Route](https://doc.shengwang.cn/doc/rtc/android/advanced-features/audio-route). + * This method only sets the audio route used by the user in the current channel and does not affect the SDK's default audio route. If the user leaves the current channel and joins a new one, the SDK's default audio route will still be used. + * If the user uses external audio playback devices such as Bluetooth or wired headsets, this method has no effect, and audio will only be played through the external device. If multiple external devices are connected, audio will be played through the most recently connected device. + * + * @param speakerOn Whether to enable speakerphone playback: true : Enable. Audio route is speaker. false : Disable. Audio route is earpiece. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setEnableSpeakerphone(speakerOn: boolean): number; @@ -5814,50 +6115,59 @@ export abstract class IRtcEngine { * Checks whether the speakerphone is enabled. * * @returns - * true : The speakerphone is enabled, and the audio plays from the speakerphone. false : The speakerphone is not enabled, and the audio plays from devices other than the speakerphone. For example, the headset or earpiece. + * true : The speakerphone is enabled, and audio is routed to the speaker. false : The speakerphone is not enabled, and audio is routed to a non-speaker device (earpiece, headset, etc.). */ abstract isSpeakerphoneEnabled(): boolean; /** - * Selects the audio playback route in communication audio mode. + * Selects the audio route in communication volume mode. * - * This method is used to switch the audio route from Bluetooth headphones to earpiece, wired headphones or speakers in communication audio mode (). This method is for Android only. + * This method is used to switch the audio route from a Bluetooth headset to the earpiece, wired headset, or speaker in communication volume mode ([MODE_IN_COMMUNICATION](https://developer.android.google.cn/reference/kotlin/android/media/AudioManager?hl=en#mode_in_communication)). This method is for Android only. + * When used together with setEnableSpeakerphone, it may cause conflicts. Agora recommends using setRouteInCommunicationMode alone. * - * @param route The audio playback route you want to use: - * -1: The default audio route. - * 0: Headphones with microphone. - * 1: Handset. - * 2: Headphones without microphone. - * 3: Device's built-in speaker. - * 4: (Not supported yet) External speakers. - * 5: Bluetooth headphones. + * @param route The desired audio route: + * -1: The system default audio route. + * 0: Headset with microphone. + * 1: Earpiece. + * 2: Headset without microphone. + * 3: Built-in speaker. + * 4: (Not supported) External speaker. + * 5: Bluetooth headset. * 6: USB device. * * @returns - * Without practical meaning. + * No practical meaning. */ abstract setRouteInCommunicationMode(route: number): number; /** - * Checks if the camera supports portrait center stage. + * Checks whether the camera supports Center Stage. * - * This method applies to iOS only. Before calling enableCameraCenterStage to enable portrait center stage, it is recommended to call this method to check if the current device supports the feature. + * Before calling enableCameraCenterStage to enable the Center Stage feature, you are advised to call this method to check whether the current device supports Center Stage. This method is only available on iOS. * * @returns - * true : The current camera supports the portrait center stage. false : The current camera supports the portrait center stage. + * true : The current camera supports Center Stage. false : The current camera does not support Center Stage. */ abstract isCameraCenterStageSupported(): boolean; /** - * Enables or disables portrait center stage. + * Enables or disables the Center Stage feature. * - * The portrait center stage feature is off by default. You need to call this method to turn it on. If you need to disable this feature, you need to call this method again and set enabled to false. This method applies to iOS only. + * Center Stage is disabled by default. You need to call this method to enable it. To disable the feature, call this method again and set enabled to false. This method is for iOS only. + * Because this feature requires high device performance, you need to use it on the following or higher-end devices: + * iPad: + * 12.9-inch iPad Pro (5th generation) + * 11-inch iPad Pro (3rd generation) + * iPad (9th generation) + * iPad mini (6th generation) + * iPad Air (5th generation) + * 2020 M1 MacBook Pro 13" + iPhone 11 (using iPhone as an external camera for MacBook) Agora recommends calling isCameraCenterStageSupported to check whether the current device supports Center Stage before enabling this feature. * - * @param enabled Whether to enable the portrait center stage: true : Enable portrait center stage. false : Disable portrait center stage. + * @param enabled Whether to enable the Center Stage feature: true : Enable Center Stage. false : Disable Center Stage. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract enableCameraCenterStage(enabled: boolean): number; @@ -5871,17 +6181,18 @@ export abstract class IRtcEngine { ): ScreenCaptureSourceInfo[]; /** - * Sets the operational permission of the SDK on the audio session. + * Sets the SDK’s operation permissions on the Audio Session. * - * The SDK and the app can both configure the audio session by default. If you need to only use the app to configure the audio session, this method restricts the operational permission of the SDK on the audio session. You can call this method either before or after joining a channel. Once you call this method to restrict the operational permission of the SDK on the audio session, the restriction takes effect when the SDK needs to change the audio session. - * This method is only available for iOS. - * This method does not restrict the operational permission of the app on the audio session. + * By default, both the SDK and the app have permission to operate the Audio Session. If you want only the app to operate the Audio Session, you can call this method to restrict the SDK’s permission. + * You can call this method before or after joining a channel. Once this method is called to restrict the SDK’s operation permission, the restriction takes effect when the SDK attempts to change the Audio Session. + * This method applies only to the iOS platform. + * This method does not restrict the app’s permission to operate the Audio Session. * - * @param restriction The operational permission of the SDK on the audio session. See AudioSessionOperationRestriction. This parameter is in bit mask format, and each bit corresponds to a permission. + * @param restriction The SDK’s operation permission on the Audio Session. See AudioSessionOperationRestriction. This parameter is a bit mask, and each bit corresponds to a permission. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and troubleshooting. */ abstract setAudioSessionOperationRestriction( restriction: AudioSessionOperationRestriction @@ -5908,13 +6219,13 @@ export abstract class IRtcEngine { /** * Gets the audio device information. * - * After calling this method, you can get whether the audio device supports ultra-low-latency capture and playback. - * You can call this method either before or after joining a channel. + * After calling this method, you can get whether the audio device supports ultra-low latency capture and playback. + * This method can be called before or after joining a channel. * * @returns - * The DeviceInfo object that identifies the audio device information. - * Not null: Success. - * Null: Failure. + * A DeviceInfo object containing the audio device information. + * Non-null: The method call succeeds. + * Null: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract getAudioDeviceInfo(): DeviceInfo; @@ -5928,45 +6239,45 @@ export abstract class IRtcEngine { ): number; /** - * Sets the content hint for screen sharing. + * Sets the content type of screen sharing. * - * A content hint suggests the type of the content being shared, so that the SDK applies different optimization algorithms to different types of content. If you don't call this method, the default content hint is ContentHintNone. You can call this method either before or after you start screen sharing. + * The SDK optimizes the sharing experience using different algorithms based on the content type. If you do not call this method, the SDK defaults the screen sharing content type to ContentHintNone, meaning no specific content type. This method can be called before or after starting screen sharing. * - * @param contentHint The content hint for screen sharing. See VideoContentHint. + * @param contentHint The content type of screen sharing. See VideoContentHint. * * @returns - * 0: Success. - * < 0: Failure. - * -2: The parameter is invalid. - * -8: The screen sharing state is invalid. Probably because you have shared other screens or windows. Try calling stopScreenCapture to stop the current sharing and start sharing the screen again. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -2: Invalid parameter. + * -8: Invalid screen sharing state. This may occur if you are already sharing another screen or window. Try calling stopScreenCapture to stop the current sharing, then restart screen sharing. */ abstract setScreenCaptureContentHint(contentHint: VideoContentHint): number; /** - * Updates the screen capturing region. + * Updates the screen capture region. * - * Call this method after starting screen sharing or window sharing. + * Call this method after screen sharing or window sharing is enabled. * * @returns * 0: Success. - * < 0: Failure. - * -2: The parameter is invalid. - * -8: The screen sharing state is invalid. Probably because you have shared other screens or windows. Try calling stopScreenCapture to stop the current sharing and start sharing the screen again. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -2: The parameter passed in is invalid. + * -8: The screen sharing state is invalid. This may be because you are already sharing another screen or window. Try calling stopScreenCapture to stop the current sharing and start screen sharing again. */ abstract updateScreenCaptureRegion(regionRect: Rectangle): number; /** - * Updates the screen capturing parameters. + * Updates the parameter configuration for screen capture. * - * Call this method after starting screen sharing or window sharing. + * Call this method after screen sharing or window sharing is enabled. * - * @param captureParams The screen sharing encoding parameters. The video properties of the screen sharing stream only need to be set through this parameter, and are unrelated to setVideoEncoderConfiguration. + * @param captureParams Encoding parameter configuration for screen sharing. See ScreenCaptureParameters2. The video properties of the screen sharing stream only need to be set through this parameter and are not related to setVideoEncoderConfiguration. * * @returns * 0: Success. - * < 0: Failure. - * -2: The parameter is invalid. - * -8: The screen sharing state is invalid. Probably because you have shared other screens or windows. Try calling stopScreenCapture to stop the current sharing and start sharing the screen again. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -2: The parameter passed in is invalid. + * -8: The screen sharing state is invalid. This may be because you are already sharing another screen or window. Try calling stopScreenCapture to stop the current sharing and start screen sharing again. */ abstract updateScreenCaptureParameters( captureParams: ScreenCaptureParameters @@ -5975,56 +6286,66 @@ export abstract class IRtcEngine { /** * Starts screen capture. * - * The billing for the screen sharing stream is based on the dimensions in ScreenVideoParameters : - * When you do not pass in a value, Agora bills you at 1280 × 720. - * When you pass in a value, Agora bills you at that value. - * - * @param captureParams The screen sharing encoding parameters. See ScreenCaptureParameters2. - * - * @returns - * 0: Success. - * < 0: Failure. - * -2 (iOS platform): Empty parameter. - * -2 (Android platform): The system version is too low. Ensure that the Android API level is not lower than 21. - * -3 (Android platform): Unable to capture system audio. Ensure that the Android API level is not lower than 29. + * The billing standard for screen sharing streams is based on the dimensions value in ScreenVideoParameters : + * If not specified, billing is based on 1280 × 720. + * If specified, billing is based on the value you provide. + * On iOS, screen sharing is only supported on iOS 12.0 and later. + * On iOS, if you use custom audio capture instead of SDK audio capture, to prevent screen sharing from stopping when the app goes to the background, it is recommended to implement a keep-alive mechanism. + * On iOS, this feature requires high device performance. It is recommended to use it on iPhone X or later. + * On iOS, this method depends on the screen sharing dynamic library AgoraReplayKitExtension.xcframework. Removing this library will cause screen sharing to fail. + * On Android, if the user does not grant screen capture permission to the app, the SDK triggers the onPermissionError(2) callback. + * On Android 9 and later, to prevent the system from killing the app when it goes to the background, it is recommended to add the foreground service permission android.permission.FOREGROUND_SERVICE in /app/Manifests/AndroidManifest.xml. + * Due to Android performance limitations, screen sharing is not supported on Android TV. + * Due to Android system limitations, when using Huawei phones for screen sharing, to avoid crashes, do not change the video encoding resolution during sharing. + * Due to Android system limitations, some Xiaomi phones do not support capturing system audio during screen sharing. + * To improve the success rate of capturing system audio during screen sharing, it is recommended to set the audio scenario to AudioScenarioGameStreaming using the setAudioScenario method before joining the channel. + * + * @param captureParams The configuration for screen sharing encoding parameters. See ScreenCaptureParameters2. + * + * @returns + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -2 (iOS): Parameter is null. + * -2 (Android): System version too low. Make sure the Android API level is at least 21. + * -3 (Android): Cannot capture system audio. Make sure the Android API level is at least 29. */ abstract startScreenCapture(captureParams: ScreenCaptureParameters2): number; /** - * Updates the screen capturing parameters. + * Updates the parameter configuration for screen capture. * - * If the system audio is not captured when screen sharing is enabled, and then you want to update the parameter configuration and publish the system audio, you can refer to the following steps: - * Call this method, and set captureAudio to true. - * Call updateChannelMediaOptions, and set publishScreenCaptureAudio to true to publish the audio captured by the screen. - * This method is for Android and iOS only. - * On the iOS platform, screen sharing is only available on iOS 12.0 and later. + * If system audio is not captured when screen sharing is enabled and you want to update the parameter configuration to publish system audio, follow these steps: + * Call this method and set captureAudio to true. + * Call updateChannelMediaOptions and set publishScreenCaptureAudio to true to publish the audio captured from the screen. + * This method is applicable to Android and iOS only. + * On iOS, screen sharing is supported on iOS 12.0 and later. * - * @param captureParams The screen sharing encoding parameters. See ScreenCaptureParameters2. + * @param captureParams Encoding parameter configuration for screen sharing. See ScreenCaptureParameters2. * * @returns * 0: Success. * < 0: Failure. - * -2: The parameter is invalid. - * -8: The screen sharing state is invalid. Probably because you have shared other screens or windows. Try calling stopScreenCapture to stop the current sharing and start sharing the screen again. + * -2: The parameter passed in is invalid. + * -8: The screen sharing state is invalid. This may be because you are already sharing another screen or window. Try calling stopScreenCapture to stop the current sharing and start screen sharing again. */ abstract updateScreenCapture(captureParams: ScreenCaptureParameters2): number; /** - * Queries the highest frame rate supported by the device during screen sharing. + * Queries the maximum frame rate supported by the device for screen sharing. * * @returns - * The highest frame rate supported by the device, if the method is called successfully. See ScreenCaptureFramerateCapability. - * < 0: Failure. + * If the method call succeeds, returns the maximum frame rate supported by the device. See ScreenCaptureFramerateCapability. + * <0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract queryScreenCaptureCapability(): number; /** - * Queries the focal length capability supported by the camera. + * Queries the focal length capabilities supported by the camera. * - * If you want to enable the wide-angle or ultra-wide-angle mode for camera capture, it is recommended to start by calling this method to check whether the device supports the required focal length capability. Then, adjust the camera's focal length configuration based on the query result by calling setCameraCapturerConfiguration, ensuring the best camera capture performance. + * To enable wide-angle or ultra-wide-angle camera modes, it is recommended to call this method first to check whether the device supports the corresponding focal length capabilities. Then, based on the query result, call setCameraCapturerConfiguration to adjust the camera's focal length configuration for optimal capture performance. * * @returns - * Returns an object containing the following properties: focalLengthInfos : An array of FocalLengthInfo objects, which contain the camera's orientation and focal length type. size : The number of focal length information items retrieved. + * Returns an object with the following properties: focalLengthInfos : An array of FocalLengthInfo objects that include the camera's direction and focal length type. size : The number of focal length entries actually returned. */ abstract queryCameraFocalLengthCapability(): { focalLengthInfos: FocalLengthInfo[]; @@ -6032,28 +6353,30 @@ export abstract class IRtcEngine { }; /** - * Configures MediaProjection outside of the SDK to capture screen video streams. + * Sets an external MediaProjection to capture screen video streams. * - * This method is for Android only. After successfully calling this method, the external MediaProjection you set will replace the MediaProjection requested by the SDK to capture the screen video stream. When the screen sharing is stopped or IRtcEngine is destroyed, the SDK will automatically release the MediaProjection. + * After successfully calling this method, the external MediaProjection you set will replace the MediaProjection obtained by the SDK to capture screen video streams. + * When screen sharing stops or IRtcEngine is destroyed, the SDK automatically releases the MediaProjection. This method is for Android only. + * You must obtain the MediaProjection permission before calling this method. * - * @param mediaProjection An object used to capture screen video streams. + * @param mediaProjection A [MediaProjection](https://developer.android.com/reference/android/media/projection/MediaProjection) object used to capture screen video streams. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setExternalMediaProjection(mediaProjection: any): number; /** * Sets the screen sharing scenario. * - * When you start screen sharing or window sharing, you can call this method to set the screen sharing scenario. The SDK adjusts the video quality and experience of the sharing according to the scenario. Agora recommends that you call this method before joining a channel. + * When starting screen or window sharing, you can call this method to set the screen sharing scenario. The SDK adjusts the shared video quality based on the scenario you set. Agora recommends calling this method before joining the channel. * * @param screenScenario The screen sharing scenario. See ScreenScenarioType. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setScreenCaptureScenario(screenScenario: ScreenScenarioType): number; @@ -6061,91 +6384,97 @@ export abstract class IRtcEngine { * Stops screen capture. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract stopScreenCapture(): number; /** - * Retrieves the call ID. + * Get the call ID. * - * When a user joins a channel on a client, a callId is generated to identify the call from the client. You can call this method to get callId, and pass it in when calling methods such as rate and complain. + * Each time the client joins a channel, a corresponding callId is generated to identify the call session. You can call this method to obtain the callId parameter, then pass it to methods like rate and complain. * * @returns - * The current call ID, if the method succeeds. - * An empty string, if the method call fails. + * Returns the current call ID if the method call succeeds. + * Returns an empty string if the method call fails. */ abstract getCallId(): string; /** - * Allows a user to rate a call after the call ends. + * Rates a call. * - * Ensure that you call this method after leaving a channel. + * You need to call this method after leaving the channel. * - * @param callId The current call ID. You can get the call ID by calling getCallId. - * @param rating The value is between 1 (the lowest score) and 5 (the highest score). - * @param description A description of the call. The string length should be less than 800 bytes. + * @param callId Call ID. You can get this parameter by calling getCallId. + * @param rating Rating for the call, from 1 (lowest) to 5 (highest). + * @param description Description of the call. The length must be less than 800 bytes. * * @returns * 0: Success. - * < 0: Failure. - * -1: A general error occurs (no specified reason). - * -2: The parameter is invalid. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and troubleshooting. + * -1: General error (not classified). + * -2: Invalid parameter. */ abstract rate(callId: string, rating: number, description: string): number; /** - * Allows a user to complain about the call quality after a call ends. + * Report call quality issues. * - * This method allows users to complain about the quality of the call. Call this method after the user leaves the channel. + * This method allows users to report call quality issues. It must be called after leaving the channel. * - * @param callId The current call ID. You can get the call ID by calling getCallId. - * @param description A description of the call. The string length should be less than 800 bytes. + * @param callId Call ID. You can obtain this by calling getCallId. + * @param description Description of the call. The length should be less than 800 bytes. * * @returns * 0: Success. - * < 0: Failure. - * -1: A general error occurs (no specified reason). - * -2: The parameter is invalid. - * -7: The method is called before IRtcEngine is initialized. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -1: General error (not specifically classified). + * -2: Invalid parameter. + * -7: Method called before IRtcEngine is initialized. */ abstract complain(callId: string, description: string): number; /** - * Starts pushing media streams to a CDN without transcoding. + * Starts RTMP streaming without transcoding. * - * Call this method after joining a channel. - * Only hosts in the LIVE_BROADCASTING profile can call this method. - * If you want to retry pushing streams after a failed push, make sure to call stopRtmpStream first, then call this method to retry pushing streams; otherwise, the SDK returns the same error code as the last failed push. Agora recommends that you use the server-side Media Push function. You can call this method to push an audio or video stream to the specified CDN address. This method can push media streams to only one CDN address at a time, so if you need to push streams to multiple addresses, call this method multiple times. After you call this method, the SDK triggers the onRtmpStreamingStateChanged callback on the local client to report the state of the streaming. + * Agora recommends using the more advanced server-side streaming feature. See [Implement server-side streaming](https://doc.shengwang.cn/doc/media-push/restful/landing-page). + * Call this method to push live audio and video streams to the specified RTMP streaming URL. This method can push to only one URL at a time. To push to multiple URLs, call this method multiple times. + * After calling this method, the SDK triggers the onRtmpStreamingStateChanged callback locally to report the streaming status. + * Call this method after joining a channel. + * Only hosts in live streaming scenarios can call this method. + * If the streaming fails and you want to restart it, make sure to call stopRtmpStream first before calling this method again. Otherwise, the SDK will return the same error code as the previous failed attempt. * - * @param url The address of Media Push. The format is RTMP or RTMPS. The character length cannot exceed 1024 bytes. Special characters such as Chinese characters are not supported. + * @param url The RTMP or RTMPS streaming URL. The maximum length is 1024 bytes. Chinese characters and special characters are not supported. * * @returns * 0: Success. - * < 0: Failure. - * -2: The URL or configuration of transcoding is invalid; check your URL and transcoding configurations. - * -7: The SDK is not initialized before calling this method. - * -19: The Media Push URL is already in use; use another URL instead. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and troubleshooting. + * -2: Invalid URL or transcoding parameter. Check your URL or parameter settings. + * -7: SDK not initialized before calling this method. + * -19: The RTMP streaming URL is already in use. Use a different URL. */ abstract startRtmpStreamWithoutTranscoding(url: string): number; /** - * Starts Media Push and sets the transcoding configuration. + * Starts pushing media streams to a CDN and sets the transcoding configuration. * - * Agora recommends that you use the server-side Media Push function. You can call this method to push a live audio-and-video stream to the specified CDN address and set the transcoding configuration. This method can push media streams to only one CDN address at a time, so if you need to push streams to multiple addresses, call this method multiple times. Under one Agora project, the maximum number of concurrent tasks to push media streams is 200 by default. If you need a higher quota, contact. After you call this method, the SDK triggers the onRtmpStreamingStateChanged callback on the local client to report the state of the streaming. + * Agora recommends using the more comprehensive server-side CDN streaming service. See [Implement server-side CDN streaming](https://doc.shengwang.cn/doc/media-push/restful/landing-page). + * Call this method to push live audio and video streams to the specified CDN streaming URL and set the transcoding configuration. This method can only push media streams to one URL at a time. To push to multiple URLs, call this method multiple times. + * Each push stream represents a streaming task. The maximum number of concurrent tasks is 200 by default, which means you can run up to 200 streaming tasks simultaneously under one Agora project. To increase the quota, [contact technical support](https://ticket.shengwang.cn/). + * After calling this method, the SDK triggers the onRtmpStreamingStateChanged callback locally to report the streaming status. * Call this method after joining a channel. - * Only hosts in the LIVE_BROADCASTING profile can call this method. - * If you want to retry pushing streams after a failed push, make sure to call stopRtmpStream first, then call this method to retry pushing streams; otherwise, the SDK returns the same error code as the last failed push. + * Only hosts in a live streaming scenario can call this method. + * If the streaming fails and you want to restart it, you must call stopRtmpStream before calling this method again. Otherwise, the SDK returns the same error code as the previous failure. * - * @param url The address of Media Push. The format is RTMP or RTMPS. The character length cannot exceed 1024 bytes. Special characters such as Chinese characters are not supported. - * @param transcoding The transcoding configuration for Media Push. See LiveTranscoding. + * @param url The CDN streaming URL. The format must be RTMP or RTMPS. The character length must not exceed 1024 bytes. Chinese characters and other special characters are not supported. + * @param transcoding The transcoding configuration for CDN streaming. See LiveTranscoding. * * @returns - * 0: Success. - * < 0: Failure. - * -2: The URL or configuration of transcoding is invalid; check your URL and transcoding configurations. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -2: The URL or transcoding parameter is invalid. Check your URL or parameter settings. * -7: The SDK is not initialized before calling this method. - * -19: The Media Push URL is already in use; use another URL instead. + * -19: The CDN streaming URL is already in use. Use another CDN streaming URL. */ abstract startRtmpStreamWithTranscoding( url: string, @@ -6153,131 +6482,138 @@ export abstract class IRtcEngine { ): number; /** - * Updates the transcoding configuration. + * Updates the transcoding configuration for CDN streaming. * - * Agora recommends that you use the server-side Media Push function. After you start pushing media streams to CDN with transcoding, you can dynamically update the transcoding configuration according to the scenario. The SDK triggers the onTranscodingUpdated callback after the transcoding configuration is updated. + * Agora recommends using the more comprehensive server-side CDN streaming service. See [Implement server-side CDN streaming](https://doc.shengwang.cn/doc/media-push/restful/landing-page). + * After enabling transcoding streaming, you can dynamically update the transcoding configuration based on your scenario. After the update, the SDK triggers the onTranscodingUpdated callback. * - * @param transcoding The transcoding configuration for Media Push. See LiveTranscoding. + * @param transcoding The transcoding configuration for CDN streaming. See LiveTranscoding. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract updateRtmpTranscoding(transcoding: LiveTranscoding): number; /** - * Starts the local video mixing. + * Starts local video compositing. * - * After calling this method, you can merge multiple video streams into one video stream locally. For example, you can merge the video streams captured by the camera, screen sharing, media player, remote video, video files, images, etc. into one video stream, and then publish the mixed video stream to the channel. + * After calling this method, you can merge multiple video streams locally into a single stream. For example, merge video from the camera, screen sharing, media player, remote users, video files, images, etc., into one stream, and then publish the composited stream to the channel. + * Local compositing consumes significant CPU resources. Agora recommends enabling this feature on high-performance devices. + * If you need to composite locally captured video streams, the SDK supports the following combinations: + * On Android and iOS, up to 2 camera video streams (requires device support for dual cameras or external cameras) + 1 screen sharing stream. + * When configuring compositing, ensure that the camera video stream capturing the portrait has a higher layer index than the screen sharing stream. Otherwise, the portrait may be covered and not appear in the final composited stream. * - * @param config Configuration of the local video mixing, see LocalTranscoderConfiguration. - * The maximum resolution of each video stream participating in the local video mixing is 4096 × 2160. If this limit is exceeded, video mixing does not take effect. - * The maximum resolution of the mixed video stream is 4096 × 2160. + * @param config Local compositing configuration. See LocalTranscoderConfiguration. + * The maximum resolution for each video stream in the compositing is 4096 × 2160. Exceeding this limit will cause the compositing to fail. + * The maximum resolution of the composited video stream is 4096 × 2160. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract startLocalVideoTranscoder( config: LocalTranscoderConfiguration ): number; /** - * Updates the local video mixing configuration. + * Updates the local video compositing configuration. * - * After calling startLocalVideoTranscoder, call this method if you want to update the local video mixing configuration. If you want to update the video source type used for local video mixing, such as adding a second camera or screen to capture video, you need to call this method after startCameraCapture or startScreenCapture. + * After calling startLocalVideoTranscoder, if you want to update the local video compositing configuration, call this method. If you want to update the type of local video source used for compositing, such as adding a second camera or screen capture video, you need to call this method after startCameraCapture or startScreenCapture. * - * @param config Configuration of the local video mixing, see LocalTranscoderConfiguration. + * @param config Configuration for local video compositing. See LocalTranscoderConfiguration. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract updateLocalTranscoderConfiguration( config: LocalTranscoderConfiguration ): number; /** - * Stops pushing media streams to a CDN. + * Stops CDN streaming. * - * Agora recommends that you use the server-side Media Push function. You can call this method to stop the live stream on the specified CDN address. This method can stop pushing media streams to only one CDN address at a time, so if you need to stop pushing streams to multiple addresses, call this method multiple times. After you call this method, the SDK triggers the onRtmpStreamingStateChanged callback on the local client to report the state of the streaming. + * Agora recommends using the more comprehensive server-side CDN streaming service. See [Implement server-side CDN streaming](https://doc.shengwang.cn/doc/media-push/restful/landing-page). + * Call this method to stop the live streaming to the specified CDN streaming URL. This method can only stop streaming to one URL at a time. To stop streaming to multiple URLs, call this method multiple times. + * After calling this method, the SDK triggers the onRtmpStreamingStateChanged callback locally to report the streaming status. * - * @param url The address of Media Push. The format is RTMP or RTMPS. The character length cannot exceed 1024 bytes. Special characters such as Chinese characters are not supported. + * @param url The CDN streaming URL. The format must be RTMP or RTMPS. The character length must not exceed 1024 bytes. Chinese characters and other special characters are not supported. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract stopRtmpStream(url: string): number; /** - * Stops the local video mixing. + * Stops local video compositing. * - * After calling startLocalVideoTranscoder, call this method if you want to stop the local video mixing. + * After calling startLocalVideoTranscoder, if you want to stop local video compositing, call this method. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract stopLocalVideoTranscoder(): number; /** * Starts local audio mixing. * - * This method supports merging multiple audio streams into one audio stream locally. For example, merging the audio streams captured from the local microphone, and that from the media player, the sound card, and the remote users into one audio stream, and then publish the merged audio stream to the channel. - * If you want to mix the locally captured audio streams, you can set publishMixedAudioTrack in ChannelMediaOptions to true, and then publish the mixed audio stream to the channel. - * If you want to mix the remote audio stream, ensure that the remote audio stream has been published in the channel and you have subcribed to the audio stream that you need to mix. + * This method allows you to mix multiple local audio streams into a single stream. For example, you can mix audio from the local microphone, media player, sound card, and remote users into one audio stream and publish it to the channel. + * To mix locally captured audio, set publishMixedAudioTrack in ChannelMediaOptions to true to publish the mixed audio stream to the channel. + * To mix remote audio streams, ensure that the remote streams are published in the channel and have been subscribed to. To ensure audio quality, it is recommended that the number of audio streams involved in local mixing does not exceed 10. * - * @param config The configurations for mixing the lcoal audio. See LocalAudioMixerConfiguration. + * @param config Configuration for local audio mixing. See LocalAudioMixerConfiguration. * * @returns * 0: Success. - * < 0: Failure. - * -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine object before calling this method. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -7: IRtcEngine object is not initialized. You must initialize the IRtcEngine object before calling this method. */ abstract startLocalAudioMixer(config: LocalAudioMixerConfiguration): number; /** - * Updates the configurations for mixing audio streams locally. + * Updates the configuration for local audio mixing. * - * After calling startLocalAudioMixer, call this method if you want to update the local audio mixing configuration. + * After calling startLocalAudioMixer, if you want to update the configuration for local audio mixing, call this method. To ensure audio quality, it is recommended that the number of audio streams participating in local mixing does not exceed 10. * - * @param config The configurations for mixing the lcoal audio. See LocalAudioMixerConfiguration. + * @param config Configuration for local audio mixing. See LocalAudioMixerConfiguration. * * @returns * 0: Success. - * < 0: Failure. - * -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine object before calling this method. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -7: The IRtcEngine object is not initialized. You need to successfully initialize the IRtcEngine object before calling this method. */ abstract updateLocalAudioMixerConfiguration( config: LocalAudioMixerConfiguration ): number; /** - * Stops the local audio mixing. + * Stops local audio mixing. * - * After calling startLocalAudioMixer, call this method if you want to stop the local audio mixing. + * After calling startLocalAudioMixer, if you want to stop local audio mixing, call this method. * * @returns * 0: Success. - * < 0: Failure. - * -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine object before calling this method. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -7: The IRtcEngine object is not initialized. You need to successfully initialize the IRtcEngine object before calling this method. */ abstract stopLocalAudioMixer(): number; /** - * Starts camera capture. + * Starts video capture using the camera. * - * You can call this method to start capturing video from one or more cameras by specifying sourceType. On the iOS platform, if you want to enable multi-camera capture, you need to call enableMultiCamera and set enabled to true before calling this method. + * Call this method to start multiple camera captures simultaneously by specifying sourceType. On iOS, to enable multiple camera captures, you must call enableMultiCamera and set enabled to true before calling this method. * - * @param sourceType The type of the video source. See VideoSourceType. - * On iOS devices, you can capture video from up to 2 cameras, provided the device has multiple cameras or supports external cameras. - * On Android devices, you can capture video from up to 4 cameras, provided the device has multiple cameras or supports external cameras. - * @param config The configuration of the video capture. See CameraCapturerConfiguration. On the iOS platform, this parameter has no practical function. Use the config parameter in enableMultiCamera instead to set the video capture configuration. + * @param sourceType Type of video source. See VideoSourceType. + * iOS devices support up to 2 video streams from camera capture (requires devices with multiple cameras or external camera support). + * Android devices support up to 4 video streams from camera capture (requires devices with multiple cameras or external camera support). + * @param config Video capture configuration. See CameraCapturerConfiguration. On iOS, this parameter has no effect. Use the config parameter in enableMultiCamera to configure video capture. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract startCameraCapture( sourceType: VideoSourceType, @@ -6285,30 +6621,30 @@ export abstract class IRtcEngine { ): number; /** - * Stops camera capture. + * Stops capturing video from the camera. * - * After calling startCameraCapture to start capturing video through one or more cameras, you can call this method and set the sourceType parameter to stop the capture from the specified cameras. On the iOS platform, if you want to disable multi-camera capture, you need to call enableMultiCamera after calling this method and set enabled to false. If you are using the local video mixing function, calling this method can cause the local video mixing to be interrupted. + * After calling startCameraCapture to start one or more camera video streams, you can call this method and specify sourceType to stop one or more of the camera video captures. On iOS, to stop multiple camera captures, you need to call this method first, then call enableMultiCamera and set enabled to false. + * If you are using the local composite layout feature, calling this method to stop video capture from the first camera will interrupt the local composite layout. * - * @param sourceType The type of the video source. See VideoSourceType. + * @param sourceType The type of video source. See VideoSourceType. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract stopCameraCapture(sourceType: VideoSourceType): number; /** * Sets the rotation angle of the captured video. * - * You must call this method after enableVideo. The setting result will take effect after the camera is successfully turned on, that is, after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). - * When the video capture device does not have the gravity sensing function, you can call this method to manually adjust the rotation angle of the captured video. + * You must call this method after enableVideo. The setting takes effect after the camera is successfully turned on, that is, when the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). + * If the video capture device does not support gravity sensing, you can call this method to manually adjust the rotation angle of the captured video frame. * - * @param type The video source type. See VideoSourceType. - * @param orientation The clockwise rotation angle. See VideoOrientation. + * @param type The type of video source. See VideoSourceType. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setCameraDeviceOrientation( type: VideoSourceType, @@ -6324,34 +6660,36 @@ export abstract class IRtcEngine { ): number; /** - * Gets the current connection state of the SDK. + * Gets the current network connection state. * * @returns - * The current connection state. See ConnectionStateType. + * The current network connection state. See ConnectionStateType. */ abstract getConnectionState(): ConnectionStateType; /** - * Adds event handlers. + * Adds a primary callback event. * - * The SDK uses the IRtcEngineEventHandler class to send callbacks to the app. The app inherits the methods of this class to receive these callbacks. All methods in this class have default (empty) implementations. Therefore, apps only need to inherits callbacks according to the scenarios. In the callbacks, avoid time-consuming tasks or calling APIs that can block the thread, such as the sendStreamMessage method. Otherwise, the SDK may not work properly. + * The interface class IRtcEngineEventHandler is used by the SDK to send callback event notifications to the app. The app obtains SDK event notifications by inheriting the methods of this interface class. + * All methods of the interface class have default (empty) implementations. The app can inherit only the events it cares about as needed. In the callback methods, the app should not perform time-consuming operations or call APIs that may cause blocking (such as sendStreamMessage), + * otherwise it may affect the operation of the SDK. * - * @param eventHandler Callback events to be added. See IRtcEngineEventHandler. + * @param eventHandler The callback event to be added. See IRtcEngineEventHandler. * * @returns - * true : Success. false : Failure. + * true : The method call succeeds. false : The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract registerEventHandler(eventHandler: IRtcEngineEventHandler): boolean; /** - * Removes the specified callback events. + * Removes the specified callback event. * - * You can call this method too remove all added callback events. + * This method removes all previously added callback events. * - * @param eventHandler Callback events to be removed. See IRtcEngineEventHandler. + * @param eventHandler The callback event to be removed. See IRtcEngineEventHandler. * * @returns - * true : Success. false : Failure. + * true : The method call succeeds. false : The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract unregisterEventHandler( eventHandler: IRtcEngineEventHandler @@ -6366,49 +6704,54 @@ export abstract class IRtcEngine { ): number; /** - * Enables or disables the built-in encryption. + * Enable or disable built-in encryption. * - * After the user leaves the channel, the SDK automatically disables the built-in encryption. To enable the built-in encryption, call this method before the user joins the channel again. + * After the user leaves the channel, the SDK automatically disables encryption. To re-enable encryption, you need to call this method before the user joins the channel again. + * All users in the same channel must use the same encryption mode and key when calling this method. + * If built-in encryption is enabled, you cannot use the CDN streaming feature. * - * @param enabled Whether to enable built-in encryption: true : Enable the built-in encryption. false : (Default) Disable the built-in encryption. - * @param config Built-in encryption configurations. See EncryptionConfig. + * @param enabled Whether to enable built-in encryption: true : Enable built-in encryption. false : (default) Disable built-in encryption. + * @param config Configure the built-in encryption mode and key. See EncryptionConfig. * * @returns * 0: Success. - * < 0: Failure. - * -2: An invalid parameter is used. Set the parameter with a valid value. - * -4: The built-in encryption mode is incorrect or the SDK fails to load the external encryption library. Check the enumeration or reload the external encryption library. - * -7: The SDK is not initialized. Initialize the IRtcEngine instance before calling this method. + * < 0: Failure + * -2: Invalid parameter. You need to re-specify the parameter. + * -4: Incorrect encryption mode or failed to load external encryption library. Check if the enum value is correct or reload the external encryption library. + * -7: SDK not initialized. You must create the IRtcEngine object and complete initialization before calling the API. */ abstract enableEncryption(enabled: boolean, config: EncryptionConfig): number; /** * Creates a data stream. * - * @param config The configurations for the data stream. See DataStreamConfig. + * Within the lifecycle of IRtcEngine, each user can create up to 5 data streams. The data streams are destroyed when leaving the channel. You need to recreate them to use again. + * + * @param config Data stream configuration. See DataStreamConfig. * * @returns - * ID of the created data stream, if the method call succeeds. - * < 0: Failure. + * The ID of the created data stream: Success. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract createDataStream(config: DataStreamConfig): number; /** - * Sends data stream messages. + * Sends a data stream. * - * After calling createDataStream, you can call this method to send data stream messages to all users in the channel. The SDK has the following restrictions on this method: - * Each client within the channel can have up to 5 data channels simultaneously, with a total shared packet bitrate limit of 30 KB/s for all data channels. - * Each data channel can send up to 60 packets per second, with each packet being a maximum of 1 KB. A successful method call triggers the onStreamMessage callback on the remote client, from which the remote user gets the stream message. A failed method call triggers the onStreamMessageError callback on the remote client. - * This method needs to be called after createDataStream and joining the channel. - * This method applies to broadcasters only. + * After calling createDataStream, you can call this method to send data stream messages to all users in the channel. + * The SDK imposes the following restrictions on this method: + * Each client in the channel can have up to 5 data channels simultaneously, and the total sending bitrate of all data channels is limited to 30 KB/s. + * Each data channel can send up to 60 packets per second, with a maximum size of 1 KB per packet. After the method is successfully called, the remote end triggers the onStreamMessage callback, where the remote user can retrieve the received stream message. If the call fails, the remote end triggers the onStreamMessageError callback. + * This method must be called after joining a channel and after creating a data channel using createDataStream. + * This method applies to broadcaster users only. * - * @param streamId The data stream ID. You can get the data stream ID by calling createDataStream. - * @param data The message to be sent. - * @param length The length of the data. + * @param streamId Data stream ID. Obtained via createDataStream. + * @param data Data to be sent. + * @param length Length of the data. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract sendStreamMessage( streamId: number, @@ -6436,24 +6779,25 @@ export abstract class IRtcEngine { ): number; /** - * Adds a watermark image to the local video. + * Adds a local video watermark. * - * This method adds a PNG watermark image to the local video in the live streaming. Once the watermark image is added, all the audience in the channel (CDN audience included), and the capturing device can see and capture it. The Agora SDK supports adding only one watermark image onto a live video stream. The newly added watermark image replaces the previous one. The watermark coordinates are dependent on the settings in the setVideoEncoderConfiguration method: - * If the orientation mode of the encoding video (OrientationMode) is fixed landscape mode or the adaptive landscape mode, the watermark uses the landscape orientation. - * If the orientation mode of the encoding video (OrientationMode) is fixed portrait mode or the adaptive portrait mode, the watermark uses the portrait orientation. - * When setting the watermark position, the region must be less than the dimensions set in the setVideoEncoderConfiguration method; otherwise, the watermark image will be cropped. - * Ensure that calling this method after enableVideo. - * If you only want to add a watermark to the media push, you can call this method or the startRtmpStreamWithTranscoding method. - * This method supports adding a watermark image in the PNG file format only. Supported pixel formats of the PNG image are RGBA, RGB, Palette, Gray, and Alpha_gray. - * If the dimensions of the PNG image differ from your settings in this method, the image will be cropped or zoomed to conform to your settings. - * If you have enabled the mirror mode for the local video, the watermark on the local video is also mirrored. To avoid mirroring the watermark, Agora recommends that you do not use the mirror and watermark functions for the local video at the same time. You can implement the watermark function in your application layer. + * Deprecated Deprecated: This method is deprecated. Use addVideoWatermarkWithConfig instead. This method adds a PNG image as a watermark to the local published live video stream. Users in the same live channel, audience of the CDN live stream, and capture devices can all see or capture the watermark image. Currently, only one watermark can be added to the live video stream. A newly added watermark replaces the previous one. + * The watermark coordinates depend on the settings in the setVideoEncoderConfiguration method: + * If the video encoding orientation (OrientationMode) is fixed to landscape or landscape in adaptive mode, landscape coordinates are used for the watermark. + * If the video encoding orientation (OrientationMode) is fixed to portrait or portrait in adaptive mode, portrait coordinates are used. + * When setting the watermark coordinates, the image area of the watermark must not exceed the video dimensions set in the setVideoEncoderConfiguration method. Otherwise, the exceeding part will be cropped. + * You must call this method after calling enableVideo. + * If you only want to add a watermark for CDN streaming, you can use this method or startRtmpStreamWithTranscoding. + * The watermark image must be in PNG format. This method supports all pixel formats of PNG: RGBA, RGB, Palette, Gray, and Alpha_gray. + * If the size of the PNG image to be added does not match the size you set in this method, the SDK will scale or crop the PNG image to match the setting. + * If local video is set to mirror mode, the local watermark will also be mirrored. To avoid the watermark being mirrored when local users view the local video, it is recommended not to use both mirror and watermark features for local video. Implement the local watermark feature at the application level. * - * @param watermarkUrl The local file path of the watermark image to be added. This method supports adding a watermark image from the local absolute or relative file path. - * @param options The options of the watermark image to be added. See WatermarkOptions. + * @param watermarkUrl The local path of the watermark image to be added. This method supports adding watermark images from local absolute/relative paths. + * @param options Settings for the watermark image to be added. See WatermarkOptions. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract addVideoWatermark( watermarkUrl: string, @@ -6461,16 +6805,24 @@ export abstract class IRtcEngine { ): number; /** - * @ignore + * Removes the watermark image from the local video. + * + * Since Available since v4.6.2. This method removes a previously added watermark image from the local video stream based on the specified unique ID. + * + * @param id The ID of the watermark to be removed. This value must match the ID used when adding the watermark. + * + * @returns + * 0: Success. + * < 0: Failure. */ abstract removeVideoWatermark(id: string): number; /** - * Removes the watermark image from the video stream. + * Removes added video watermarks. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract clearVideoWatermarks(): number; @@ -6485,22 +6837,23 @@ export abstract class IRtcEngine { abstract resumeAudio(): number; /** - * Enables interoperability with the Agora Web SDK (applicable only in the live streaming scenarios). + * Enable interoperability with the Web SDK (live broadcast only). * - * Deprecated: The SDK automatically enables interoperability with the Web SDK, so you no longer need to call this method. You can call this method to enable or disable interoperability with the Agora Web SDK. If a channel has Web SDK users, ensure that you call this method, or the video of the Native user will be a black screen for the Web user. This method is only applicable in live streaming scenarios, and interoperability is enabled by default in communication scenarios. + * Deprecated Deprecated: This method is deprecated. The SDK automatically enables interoperability with the Web SDK, so you do not need to call this method. This method enables or disables interoperability with the Web SDK. If there are users joining the channel via the Web SDK, make sure to call this method. Otherwise, Web users may see a black screen from the Native side. + * This method is applicable only in live broadcast scenarios. In communication scenarios, interoperability is enabled by default. * - * @param enabled Whether to enable interoperability: true : Enable interoperability. false : (Default) Disable interoperability. + * @param enabled Whether to enable interoperability with the Web SDK: true : Enable interoperability. false : (default) Disable interoperability. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract enableWebSdkInteroperability(enabled: boolean): number; /** - * Reports customized messages. + * Sends a custom report message. * - * Agora supports reporting and analyzing customized messages. This function is in the beta stage with a free trial. The ability provided in its beta test version is reporting a maximum of 10 message pieces within 6 seconds, with each message piece not exceeding 256 bytes and each string not exceeding 100 bytes. To try out this function, contact and discuss the format of customized messages with us. + * Agora provides custom data reporting and analytics services. This service is currently in a free beta period. During the beta, you can send up to 10 custom data messages within 6 seconds. Each message must not exceed 256 bytes, and each string must not exceed 100 bytes. To try this service, please [contact sales](https://www.shengwang.cn/contact-sales/) to enable it and agree on the custom data format. */ abstract sendCustomReportMessage( id: string, @@ -6511,17 +6864,16 @@ export abstract class IRtcEngine { ): number; /** - * Registers the metadata observer. + * Registers a media metadata observer to receive or send metadata. * - * You need to implement the IMetadataObserver class and specify the metadata type in this method. This method enables you to add synchronized metadata in the video stream for more diversified - * live interactive streaming, such as sending shopping links, digital coupons, and online quizzes. Call this method before joinChannel. + * You need to implement the IMetadataObserver class yourself and specify the metadata type in this method. This method allows you to add synchronized metadata to the video stream for interactive live streaming, such as sending shopping links, e-coupons, and online quizzes. Call this method before joinChannel. * * @param observer The metadata observer. See IMetadataObserver. - * @param type The metadata type. The SDK currently only supports VideoMetadata. See MetadataType. + * @param type The metadata type. Currently, only VideoMetadata is supported. See MetadataType. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract registerMediaMetadataObserver( observer: IMetadataObserver, @@ -6529,14 +6881,14 @@ export abstract class IRtcEngine { ): number; /** - * Unregisters the specified metadata observer. + * Unregisters the media metadata observer. * * @param observer The metadata observer. See IMetadataObserver. - * @param type The metadata type. The SDK currently only supports VideoMetadata. See MetadataType. + * @param type The metadata type. Currently, only VideoMetadata is supported. See MetadataType. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract unregisterMediaMetadataObserver( observer: IMetadataObserver, @@ -6566,81 +6918,83 @@ export abstract class IRtcEngine { ): number; /** - * Sets whether to enable the AI ​​noise suppression function and set the noise suppression mode. + * Enables or disables AI noise reduction and sets the noise reduction mode. * - * You can call this method to enable AI noise suppression function. Once enabled, the SDK automatically detects and reduces stationary and non-stationary noise from your audio on the premise of ensuring the quality of human voice. Stationary noise refers to noise signal with constant average statistical properties and negligibly small fluctuations of level within the period of observation. Common sources of stationary noises are: - * Television; - * Air conditioner; - * Machinery, etc. Non-stationary noise refers to noise signal with huge fluctuations of level within the period of observation; common sources of non-stationary noises are: - * Thunder; - * Explosion; - * Cracking, etc. + * You can call this method to enable AI noise reduction. This feature intelligently detects and reduces various steady-state and non-steady-state background noises while ensuring voice quality, making the voice clearer. + * Steady-state noise refers to noise with the same frequency at any point in time. Common steady-state noises include: + * TV noise + * Air conditioner noise + * Factory machinery noise, etc. Non-steady-state noise refers to noise that changes rapidly over time. Common non-steady-state noises include: + * Thunder + * Explosions + * Cracking sounds, etc. + * This method depends on the AI noise reduction dynamic library. Removing the dynamic library will cause the feature to fail. For the name of the AI noise reduction dynamic library, see [Plugin List](https://doc.shengwang.cn/doc/rtc/rn/best-practice/reduce-app-size#%E6%8F%92%E4%BB%B6%E5%88%97%E8%A1%A8). + * Currently, it is not recommended to enable this feature on devices running Android 6.0 or below. * - * @param enabled Whether to enable the AI noise suppression function: true : Enable the AI noise suppression. false : (Default) Disable the AI noise suppression. - * @param mode The AI noise suppression modes. See AudioAinsMode. + * @param enabled Whether to enable AI noise reduction: true : Enable AI noise reduction. false : (Default) Disable AI noise reduction. + * @param mode Noise reduction mode. See AudioAinsMode. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setAINSMode(enabled: boolean, mode: AudioAinsMode): number; /** - * Registers a user account. + * Registers the local user's User Account. * - * Once registered, the user account can be used to identify the local user when the user joins the channel. After the registration is successful, the user account can identify the identity of the local user, and the user can use it to join the channel. This method is optional. If you want to join a channel using a user account, you can choose one of the following methods: - * Call the registerLocalUserAccount method to register a user account, and then call the joinChannelWithUserAccount method to join a channel, which can shorten the time it takes to enter the channel. - * Call the joinChannelWithUserAccount method to join a channel. - * Starting from v4.6.0, the SDK will no longer automatically map Int UID to the String userAccount used when registering a User Account. If you want to join a channel with the original String userAccount used during registration, call the joinChannelWithUserAccount method to join the channel, instead of calling joinChannel and pass in the Int UID obtained through this method - * Ensure that the userAccount is unique in the channel. - * To ensure smooth communication, use the same parameter type to identify the user. For example, if a user joins the channel with a UID, then ensure all the other users use the UID too. The same applies to the user account. If a user joins the channel with the Agora Web SDK, ensure that the ID of the user is set to the same parameter type. + * This method registers a User Account for the local user. After successful registration, the User Account can be used to identify the local user and join channels. + * This method is optional. If you want users to join channels using a User Account, you can implement it using either of the following approaches: + * Call registerLocalUserAccount to register the account, then call joinChannelWithUserAccount to join the channel. This can reduce the time to join the channel. + * Directly call joinChannelWithUserAccount to join the channel. + * Ensure the userAccount set in this method is unique within the channel. + * To ensure communication quality, make sure all users in a channel use the same type of identifier. That is, all users in the same channel must use either UID or User Account. If users join via the Web SDK, ensure they also use the same identifier type. * - * @param appId The App ID of your project on Agora Console. - * @param userAccount The user account. This parameter is used to identify the user in the channel for real-time audio and video engagement. You need to set and manage user accounts yourself and ensure that each user account in the same channel is unique. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null. Supported characters are as follow(89 in total): - * The 26 lowercase English letters: a to z. - * The 26 uppercase English letters: A to Z. - * All numeric characters: 0 to 9. - * Space + * @param appId The App ID of your project registered in the console. + * @param userAccount The user's User Account. This parameter identifies the user in the real-time audio and video interaction channel. You need to set and manage the User Account yourself and ensure that each user in the same channel has a unique User Account. This parameter is required, must not exceed 255 bytes, and cannot be null. The supported character set includes 89 characters: + * 26 lowercase English letters a-z + * 26 uppercase English letters A-Z + * 10 digits 0-9 + * space * "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", "{", "}", "|", "~", "," * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract registerLocalUserAccount(appId: string, userAccount: string): number; /** - * Join a channel using a user account and token, and set the media options. + * Joins a channel using a User Account and Token, and sets channel media options. * - * Before calling this method, if you have not called registerLocalUserAccount to register a user account, when you call this method to join a channel, the SDK automatically creates a user account for you. Calling the registerLocalUserAccount method to register a user account, and then calling this method to join a channel can shorten the time it takes to enter the channel. Once a user joins the channel, the user subscribes to the audio and video streams of all the other users in the channel by default, giving rise to usage and billings. To stop subscribing to a specified stream or all remote streams, call the corresponding mute methods. To ensure smooth communication, use the same parameter type to identify the user. For example, if a user joins the channel with a UID, then ensure all the other users use the UID too. The same applies to the user account. If a user joins the channel with the Agora Web SDK, ensure that the ID of the user is set to the same parameter type. + * If you have not called registerLocalUserAccount to register a User Account before calling this method, the SDK automatically creates one for you. Calling registerLocalUserAccount before this method can reduce the time it takes to join the channel. + * After successfully joining a channel, the user subscribes to all other users' audio and video streams by default, which results in usage and affects billing. If you want to unsubscribe, you can do so by calling the corresponding mute methods. To ensure communication quality, make sure the same type of user identifier is used in the channel. That is, use either UID or User Account consistently within the same channel. If users join the channel using the Web SDK, make sure they use the same identifier type. + * This method only supports joining one channel at a time. + * Apps with different App IDs cannot communicate with each other. + * Before joining a channel, make sure the App ID used to generate the Token is the same as the one used in the initialize method to initialize the engine, otherwise joining the channel with the Token will fail. * - * @param token The token generated on your server for authentication. - * (Recommended) If your project has enabled the security mode (using APP ID and Token for authentication), this parameter is required. - * If you have only enabled the testing mode (using APP ID for authentication), this parameter is optional. You will automatically exit the channel 24 hours after successfully joining in. - * If you need to join different channels at the same time or switch between channels, Agora recommends using a wildcard token so that you don't need to apply for a new token every time joining a channel. - * @param channelId The channel name. This parameter signifies the channel in which users engage in real-time audio and video interaction. Under the premise of the same App ID, users who fill in the same channel ID enter the same channel for audio and video interaction. The string length must be less than 64 bytes. Supported characters (89 characters in total): - * All lowercase English letters: a to z. - * All uppercase English letters: A to Z. - * All numeric characters: 0 to 9. - * "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", "{", "}", "|", "~", "," - * @param userAccount The user account. This parameter is used to identify the user in the channel for real-time audio and video engagement. You need to set and manage user accounts yourself and ensure that each user account in the same channel is unique. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null. Supported characters are as follows(89 in total): - * The 26 lowercase English letters: a to z. - * The 26 uppercase English letters: A to Z. - * All numeric characters: 0 to 9. + * @param token A dynamic key generated on your server for authentication. See [Use Token Authentication](https://doc.shengwang.cn/doc/rtc/rn/basic-features/token-authentication). + * (Recommended) If your project enables the security mode, i.e., uses APP ID + Token for authentication, this parameter is required. + * If your project only enables debug mode, i.e., uses only the APP ID for authentication, you can join the channel without a Token. The user will automatically leave the channel 24 hours after successfully joining. + * If you need to join multiple channels simultaneously or switch channels frequently, Agora recommends using a wildcard Token to avoid requesting a new Token from the server each time. See [Use Wildcard Token](https://doc.shengwang.cn/doc/rtc/rn/best-practice/wildcard-token). + * @param userAccount User Account. This parameter identifies the user in the real-time audio and video interaction channel. You must set and manage the User Account yourself and ensure it is unique within the same channel. This parameter is required, must not exceed 255 bytes, and cannot be null. Supported character set (89 characters total): + * 26 lowercase English letters a-z + * 26 uppercase English letters A-Z + * 10 digits 0-9 * Space * "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", "{", "}", "|", "~", "," - * @param options The channel media options. See ChannelMediaOptions. + * @param options Channel media options. See ChannelMediaOptions. * * @returns - * 0: Success. - * < 0: Failure. - * -2: The parameter is invalid. For example, the token is invalid, the uid parameter is not set to an integer, or the value of a member in ChannelMediaOptions is invalid. You need to pass in a valid parameter and join the channel again. - * -3: Fails to initialize the IRtcEngine object. You need to reinitialize the IRtcEngine object. - * -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine object before calling this method. - * -8: The internal state of the IRtcEngine object is wrong. The typical cause is that after calling startEchoTest to start a call loop test, you call this method to join the channel without calling stopEchoTest to stop the test. You need to call stopEchoTest before calling this method. - * -17: The request to join the channel is rejected. The typical cause is that the user is already in the channel. Agora recommends that you use the onConnectionStateChanged callback to see whether the user is in the channel. Do not call this method to join the channel unless you receive the ConnectionStateDisconnected (1) state. - * -102: The channel name is invalid. You need to pass in a valid channel name in channelId to rejoin the channel. - * -121: The user ID is invalid. You need to pass in a valid user ID in uid to rejoin the channel. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -2: Invalid parameter. For example, an invalid Token is used, uid is not an integer, or a ChannelMediaOptions member is invalid. You need to provide valid parameters and rejoin the channel. + * -3: IRtcEngine object initialization failed. You need to reinitialize the IRtcEngine object. + * -7: IRtcEngine object is not initialized. You must initialize the IRtcEngine object before calling this method. + * -8: Internal state error of the IRtcEngine object. Possible reason: startEchoTest was called to start an echo test, but stopEchoTest was not called before calling this method. You must call stopEchoTest before this method. + * -17: Join channel request is rejected. Possible reason: the user is already in the channel. Use the onConnectionStateChanged callback to check if the user is in the channel. Do not call this method again unless you receive the ConnectionStateDisconnected (1) state. + * -102: Invalid channel name. You must provide a valid channel name in channelId and rejoin the channel. + * -121: Invalid user ID. You must provide a valid user ID in uid and rejoin the channel. */ abstract joinChannelWithUserAccount( token: string, @@ -6650,37 +7004,36 @@ export abstract class IRtcEngine { ): number; /** - * Join a channel using a user account and token, and set the media options. + * Joins a channel using a User Account and Token, and sets channel media options. * - * Before calling this method, if you have not called registerLocalUserAccount to register a user account, when you call this method to join a channel, the SDK automatically creates a user account for you. Calling the registerLocalUserAccount method to register a user account, and then calling this method to join a channel can shorten the time it takes to enter the channel. Once a user joins the channel, the user subscribes to the audio and video streams of all the other users in the channel by default, giving rise to usage and billings. If you want to stop subscribing to the media stream of other users, you can set the options parameter or call the corresponding mute method. To ensure smooth communication, use the same parameter type to identify the user. For example, if a user joins the channel with a UID, then ensure all the other users use the UID too. The same applies to the user account. If a user joins the channel with the Agora Web SDK, ensure that the ID of the user is set to the same parameter type. + * Before calling this method, if you have not called registerLocalUserAccount to register a User Account, the SDK automatically creates one for you when you join the channel. Calling registerLocalUserAccount before this method shortens the time needed to join the channel. + * After successfully joining a channel, the user subscribes to all remote users' audio and video streams by default, which incurs usage and affects billing. To unsubscribe, you can set the options parameter or call the corresponding mute methods. To ensure communication quality, make sure all users in the channel use the same type of user identity. That is, either UID or User Account must be used consistently within the same channel. If users join via the Web SDK, ensure they use the same identity type. + * This method only supports joining one channel at a time. + * Apps with different App IDs cannot communicate with each other. + * Before joining a channel, ensure the App ID used to generate the Token is the same as the one used to initialize the engine with initialize, otherwise joining the channel with Token will fail. * - * @param token The token generated on your server for authentication. - * (Recommended) If your project has enabled the security mode (using APP ID and Token for authentication), this parameter is required. - * If you have only enabled the testing mode (using APP ID for authentication), this parameter is optional. You will automatically exit the channel 24 hours after successfully joining in. - * If you need to join different channels at the same time or switch between channels, Agora recommends using a wildcard token so that you don't need to apply for a new token every time joining a channel. - * @param channelId The channel name. This parameter signifies the channel in which users engage in real-time audio and video interaction. Under the premise of the same App ID, users who fill in the same channel ID enter the same channel for audio and video interaction. The string length must be less than 64 bytes. Supported characters (89 characters in total): - * All lowercase English letters: a to z. - * All uppercase English letters: A to Z. - * All numeric characters: 0 to 9. - * "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", "{", "}", "|", "~", "," - * @param userAccount The user account. This parameter is used to identify the user in the channel for real-time audio and video engagement. You need to set and manage user accounts yourself and ensure that each user account in the same channel is unique. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null. Supported characters are as follows(89 in total): - * The 26 lowercase English letters: a to z. - * The 26 uppercase English letters: A to Z. - * All numeric characters: 0 to 9. + * @param token A dynamic key generated on your server for authentication. See [Token Authentication](https://doc.shengwang.cn/doc/rtc/rn/basic-features/token-authentication). + * (Recommended) If your project has enabled the security mode using APP ID + Token for authentication, this parameter is required. + * If your project is in debug mode using only APP ID for authentication, you can join the channel without a Token. You will automatically leave the channel 24 hours after joining. + * If you need to join multiple channels or switch frequently, Agora recommends using a wildcard Token to avoid requesting a new Token from your server each time. See [Using Wildcard Token](https://doc.shengwang.cn/doc/rtc/rn/best-practice/wildcard-token). + * @param userAccount The user's User Account. This parameter identifies the user in the real-time audio and video channel. You must set and manage the User Account yourself and ensure that each user in the same channel has a unique User Account. This parameter is required and must not exceed 255 bytes or be null. Supported character set (89 characters total): + * 26 lowercase letters a-z + * 26 uppercase letters A-Z + * 10 digits 0-9 * Space - * "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", "{", "}", "|", "~", "," - * @param options The channel media options. See ChannelMediaOptions. + * "!" "#" "$" "%" "&" "(" ")" "+" "-" ":" ";" "<" "=" "." ">" "?" "@" "[" "]" "^" "_" "{" "}" "|" "~" "," + * @param options Channel media options. See ChannelMediaOptions. * * @returns - * 0: Success. - * < 0: Failure. - * -2: The parameter is invalid. For example, the token is invalid, the uid parameter is not set to an integer, or the value of a member in ChannelMediaOptions is invalid. You need to pass in a valid parameter and join the channel again. - * -3: Fails to initialize the IRtcEngine object. You need to reinitialize the IRtcEngine object. - * -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine object before calling this method. - * -8: The internal state of the IRtcEngine object is wrong. The typical cause is that after calling startEchoTest to start a call loop test, you call this method to join the channel without calling stopEchoTest to stop the test. You need to call stopEchoTest before calling this method. - * -17: The request to join the channel is rejected. The typical cause is that the user is already in the channel. Agora recommends that you use the onConnectionStateChanged callback to see whether the user is in the channel. Do not call this method to join the channel unless you receive the ConnectionStateDisconnected (1) state. - * -102: The channel name is invalid. You need to pass in a valid channel name in channelId to rejoin the channel. - * -121: The user ID is invalid. You need to pass in a valid user ID in uid to rejoin the channel. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -2: Invalid parameter. For example, the Token is invalid, the uid is not an integer, or ChannelMediaOptions contains invalid values. Provide valid parameters and rejoin the channel. + * -3: IRtcEngine initialization failed. Reinitialize the IRtcEngine object. + * -7: IRtcEngine is not initialized. Initialize IRtcEngine before calling this method. + * -8: Internal state error of IRtcEngine. Possible reason: startEchoTest was called but stopEchoTest was not called before joining the channel. Call stopEchoTest before this method. + * -17: Join channel request rejected. Possible reason: the user is already in the channel. Use the onConnectionStateChanged callback to check if the user is in the channel. Do not call this method again unless the state is ConnectionStateDisconnected (1). + * -102: Invalid channel name. Provide a valid channelId and rejoin the channel. + * -121: Invalid user ID. Provide a valid uid and rejoin the channel. */ abstract joinChannelWithUserAccountEx( token: string, @@ -6690,133 +7043,138 @@ export abstract class IRtcEngine { ): number; /** - * Gets the user information by passing in the user account. + * Gets user information by User Account. * - * After a remote user joins the channel, the SDK gets the UID and user account of the remote user, caches them in a mapping table object, and triggers the onUserInfoUpdated callback on the local client. After receiving the callback, you can call this method and pass in the user account to get the UID of the remote user from the UserInfo object. + * After a remote user joins the channel, the SDK obtains the UID and User Account of the remote user, then caches a mapping table containing the UID and User Account of the remote user, and triggers the onUserInfoUpdated callback locally. After receiving the callback, call this method and pass in the User Account to get the UserInfo object containing the specified user's UID. * - * @param userAccount The user account. + * @param userAccount User Account. * * @returns - * A pointer to the UserInfo instance, if the method call succeeds. - * If the call fails, returns null. + * The UserInfo object, if the method call succeeds. null, if the method call fails. */ abstract getUserInfoByUserAccount(userAccount: string): UserInfo; /** - * Gets the user information by passing in the user ID. + * Gets user information by UID. * - * After a remote user joins the channel, the SDK gets the UID and user account of the remote user, caches them in a mapping table object, and triggers the onUserInfoUpdated callback on the local client. After receiving the callback, you can call this method and pass in the UID to get the user account of the specified user from the UserInfo object. + * After a remote user joins the channel, the SDK obtains the UID and User Account of the remote user, then caches a mapping table containing the UID and User Account of the remote user, and triggers the onUserInfoUpdated callback locally. After receiving the callback, call this method and pass in the UID to get the UserInfo object containing the specified user's User Account. * - * @param uid The user ID. + * @param uid User ID. * * @returns - * A pointer to the UserInfo instance, if the method call succeeds. - * If the call fails, returns null. + * The UserInfo object, if the method call succeeds. null, if the method call fails. */ abstract getUserInfoByUid(uid: number): UserInfo; /** - * Starts relaying media streams across channels or updates channels for media relay. + * Starts or updates cross-channel media stream forwarding. * - * The first successful call to this method starts relaying media streams from the source channel to the destination channels. To relay the media stream to other channels, or exit one of the current media relays, you can call this method again to update the destination channels. This feature supports relaying media streams to a maximum of six destination channels. After a successful method call, the SDK triggers the onChannelMediaRelayStateChanged callback, and this callback returns the state of the media stream relay. Common states are as follows: - * If the onChannelMediaRelayStateChanged callback returns RelayStateRunning (2) and RelayOk (0), it means that the SDK starts relaying media streams from the source channel to the destination channel. - * If the onChannelMediaRelayStateChanged callback returns RelayStateFailure (3), an exception occurs during the media stream relay. - * Call this method after joining the channel. - * This method takes effect only when you are a host in a live streaming channel. - * The relaying media streams across channels function needs to be enabled by contacting. - * Agora does not support string user accounts in this API. + * The first successful call to this method starts media stream forwarding across channels. To forward streams to multiple destination channels or to leave a forwarding channel, you can call this method again to add or remove destination channels. This feature supports forwarding media streams to up to 6 destination channels. + * After successfully calling this method, the SDK triggers the onChannelMediaRelayStateChanged callback to report the current cross-channel media stream forwarding state. Common states include: + * If the onChannelMediaRelayStateChanged callback reports RelayStateRunning (2) and RelayOk (0), it means the SDK has started forwarding media streams between the source and destination channels. + * If the callback reports RelayStateFailure (3), it means an error occurred in cross-channel media stream forwarding. + * Call this method after successfully joining a channel. + * In a live streaming scenario, only users with the broadcaster role can call this method. + * To use the cross-channel media stream forwarding feature, you need to [contact technical support](https://ticket.shengwang.cn/) to enable it. + * This feature does not support string-type UIDs. * - * @param configuration The configuration of the media stream relay. See ChannelMediaRelayConfiguration. + * @param configuration Configuration for cross-channel media stream forwarding. See ChannelMediaRelayConfiguration. * * @returns - * 0: Success. - * < 0: Failure. - * -1: A general error occurs (no specified reason). - * -2: The parameter is invalid. - * -8: Internal state error. Probably because the user is not a broadcaster. + * 0: The method call was successful. + * < 0: The method call failed. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -1: General error (not specifically classified). + * -2: Invalid parameter. + * -8: Internal state error. Possibly because the user role is not broadcaster. */ abstract startOrUpdateChannelMediaRelay( configuration: ChannelMediaRelayConfiguration ): number; /** - * Stops the media stream relay. Once the relay stops, the host quits all the target channels. + * Stops the media stream relay across channels. Once stopped, the host leaves all destination channels. * - * After a successful method call, the SDK triggers the onChannelMediaRelayStateChanged callback. If the callback reports RelayStateIdle (0) and RelayOk (0), the host successfully stops the relay. If the method call fails, the SDK triggers the onChannelMediaRelayStateChanged callback with the RelayErrorServerNoResponse (2) or RelayErrorServerConnectionLost (8) status code. You can call the leaveChannel method to leave the channel, and the media stream relay automatically stops. + * After a successful call, the SDK triggers the onChannelMediaRelayStateChanged callback. If it reports RelayStateIdle (0) and RelayOk (0), it indicates that the media stream relay has stopped. If the method call fails, the SDK triggers the onChannelMediaRelayStateChanged callback and reports error codes RelayErrorServerNoResponse (2) or RelayErrorServerConnectionLost (8). You can call the leaveChannel method to leave the channel, and the media stream relay will automatically stop. * * @returns * 0: Success. - * < 0: Failure. - * -5: The method call was rejected. There is no ongoing channel media relay. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and troubleshooting. + * -5: The method call is rejected. There is no ongoing media stream relay across channels. */ abstract stopChannelMediaRelay(): number; /** - * Pauses the media stream relay to all target channels. + * Pauses media stream forwarding to all destination channels. * - * After the cross-channel media stream relay starts, you can call this method to pause relaying media streams to all target channels; after the pause, if you want to resume the relay, call resumeAllChannelMediaRelay. Call this method after startOrUpdateChannelMediaRelay. + * After starting media stream forwarding across channels, you can call this method to pause forwarding to all channels. To resume forwarding, call the resumeAllChannelMediaRelay method. You must call this method after calling startOrUpdateChannelMediaRelay to start media stream forwarding across channels. * * @returns - * 0: Success. - * < 0: Failure. - * -5: The method call was rejected. There is no ongoing channel media relay. + * 0: The method call was successful. + * < 0: The method call failed. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -5: This method call was rejected. No ongoing cross-channel media stream forwarding exists. */ abstract pauseAllChannelMediaRelay(): number; /** - * Resumes the media stream relay to all target channels. + * Resumes media stream forwarding to all destination channels. * - * After calling the pauseAllChannelMediaRelay method, you can call this method to resume relaying media streams to all destination channels. Call this method after pauseAllChannelMediaRelay. + * After calling the pauseAllChannelMediaRelay method, you can call this method to resume media stream forwarding to all destination channels. You must call this method after pauseAllChannelMediaRelay. * * @returns - * 0: Success. - * < 0: Failure. - * -5: The method call was rejected. There is no paused channel media relay. + * 0: The method call was successful. + * < 0: The method call failed. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -5: This method call was rejected. No paused cross-channel media stream forwarding exists. */ abstract resumeAllChannelMediaRelay(): number; /** - * Sets the audio profile of the audio streams directly pushed to the CDN by the host. - * - * When you set the publishMicrophoneTrack or publishCustomAudioTrack in the DirectCdnStreamingMediaOptions as true to capture audios, you can call this method to set the audio profile. + * Sets the audio encoding profile for direct CDN streaming from the host. * - * @param profile The audio profile, including the sampling rate, bitrate, encoding mode, and the number of channels. See AudioProfileType. + * Deprecated Deprecated since v4.6.2. This method is only effective for audio collected from the microphone or custom audio sources, i.e., when publishMicrophoneTrack or publishCustomAudioTrack is set to true in DirectCdnStreamingMediaOptions. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setDirectCdnStreamingAudioConfiguration( profile: AudioProfileType ): number; /** - * Sets the video profile of the media streams directly pushed to the CDN by the host. + * Sets the video encoding profile for direct CDN streaming from the host. * - * This method only affects video streams captured by cameras or screens, or from custom video capture sources. That is, when you set publishCameraTrack or publishCustomVideoTrack in DirectCdnStreamingMediaOptions as true to capture videos, you can call this method to set the video profiles. If your local camera does not support the video resolution you set,the SDK automatically adjusts the video resolution to a value that is closest to your settings for capture, encoding or streaming, with the same aspect ratio as the resolution you set. You can get the actual resolution of the video streams through the onDirectCdnStreamingStats callback. + * Deprecated Deprecated since v4.6.2. This method is only effective for video collected from the camera, screen sharing, or custom video sources, i.e., when publishCameraTrack or publishCustomVideoTrack is set to true in DirectCdnStreamingMediaOptions. + * If the resolution you set exceeds the capabilities of your camera device, the SDK adapts the resolution to the closest supported value with the same aspect ratio for capturing, encoding, and streaming. You can use the onDirectCdnStreamingStats callback to get the actual resolution of the pushed video stream. * - * @param config Video profile. See VideoEncoderConfiguration. During CDN live streaming, Agora only supports setting OrientationMode as OrientationFixedLandscape or OrientationFixedPortrait. + * @param config Video encoding configuration. See VideoEncoderConfiguration. When streaming directly to CDN, the SDK currently only supports setting OrientationMode to landscape (OrientationFixedLandscape) or portrait (OrientationFixedPortrait). * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setDirectCdnStreamingVideoConfiguration( config: VideoEncoderConfiguration ): number; /** - * Starts pushing media streams to the CDN directly. + * Starts direct CDN streaming from the host. * - * Aogra does not support pushing media streams to one URL repeatedly. Media options Agora does not support setting the value of publishCameraTrack and publishCustomVideoTrack as true, or the value of publishMicrophoneTrack and publishCustomAudioTrack as true at the same time. When choosing media setting options (DirectCdnStreamingMediaOptions), you can refer to the following examples: If you want to push audio and video streams captured by the host from a custom source, the media setting options should be set as follows: publishCustomAudioTrack is set as true and call the pushAudioFrame method publishCustomVideoTrack is set as true and call the pushVideoFrame method publishCameraTrack is set as false (the default value) publishMicrophoneTrack is set as false (the default value) As of v4.2.0, Agora SDK supports audio-only live streaming. You can set publishCustomAudioTrack or publishMicrophoneTrack in DirectCdnStreamingMediaOptions as true and call pushAudioFrame to push audio streams. Agora only supports pushing one audio and video streams or one audio streams to CDN. + * Deprecated Deprecated since v4.6.2. The SDK does not support pushing streams to the same URL simultaneously. + * Media options: + * The SDK does not support setting both publishCameraTrack and publishCustomVideoTrack to true, nor both publishMicrophoneTrack and publishCustomAudioTrack to true. You can configure the media options (DirectCdnStreamingMediaOptions) based on your scenario. For example: + * If you want to push custom audio and video streams from the host, set the media options as follows: + * Set publishCustomAudioTrack to true and call pushAudioFrame + * Set publishCustomVideoTrack to true and call pushVideoFrame + * Ensure publishCameraTrack is false (default) + * Ensure publishMicrophoneTrack is false (default) Since v4.2.0, the SDK supports pushing audio-only streams. You can set publishCustomAudioTrack or publishMicrophoneTrack to true in DirectCdnStreamingMediaOptions and call pushAudioFrame to push audio-only streams. * * @param eventHandler See onDirectCdnStreamingStateChanged and onDirectCdnStreamingStats. - * @param publishUrl The CDN live streaming URL. - * @param options The media setting options for the host. See DirectCdnStreamingMediaOptions. + * @param publishUrl CDN streaming URL. + * @param options Media options for the host. See DirectCdnStreamingMediaOptions. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract startDirectCdnStreaming( eventHandler: IDirectCdnStreamingEventHandler, @@ -6825,11 +7183,13 @@ export abstract class IRtcEngine { ): number; /** - * Stops pushing media streams to the CDN directly. + * Stops direct CDN streaming from the host. + * + * Deprecated Deprecated since v4.6.2. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract stopDirectCdnStreaming(): number; @@ -6841,19 +7201,20 @@ export abstract class IRtcEngine { ): number; /** - * Enables the virtual metronome. + * Starts the virtual metronome. * - * After enabling the virtual metronome, the SDK plays the specified audio effect file from the beginning, and controls the playback duration of each file according to beatsPerMinute you set in AgoraRhythmPlayerConfig. For example, if you set beatsPerMinute as 60, the SDK plays one beat every second. If the file duration exceeds the beat duration, the SDK only plays the audio within the beat duration. - * By default, the sound of the virtual metronome is published in the channel. If you want the sound to be heard by the remote users, you can set publishRhythmPlayerTrack in ChannelMediaOptions as true. + * Deprecated Deprecated since v4.6.2. + * After the virtual metronome is enabled, the SDK starts playing the specified audio files from the beginning and controls the playback duration of each file based on the beatsPerMinute you set in AgoraRhythmPlayerConfig. For example, if beatsPerMinute is set to 60, the SDK plays one beat per second. If the file duration exceeds the beat duration, the SDK only plays the portion of the audio corresponding to the beat duration. + * By default, the sound of the virtual metronome is not published to remote users. If you want remote users to hear the sound of the virtual metronome, you can set publishRhythmPlayerTrack in ChannelMediaOptions to true after calling this method. * - * @param sound1 The absolute path or URL address (including the filename extensions) of the file for the downbeat. For example, C:\music\audio.mp4. For the audio file formats supported by this method, see What formats of audio files does the Agora RTC SDK support. - * @param sound2 The absolute path or URL address (including the filename extensions) of the file for the upbeats. For example, C:\music\audio.mp4. For the audio file formats supported by this method, see What formats of audio files does the Agora RTC SDK support. - * @param config The metronome configuration. See AgoraRhythmPlayerConfig. + * @param sound1 The absolute path or URL of the strong beat file, including the file name and extension. For example, C:\music\audio.mp4. For supported audio formats, see [Supported Audio Formats in RTC SDK](https://doc.shengwang.cn/faq/general-product-inquiry/audio-format). + * @param sound2 The absolute path or URL of the weak beat file, including the file name and extension. For example, C:\music\audio.mp4. For supported audio formats, see [Supported Audio Formats in RTC SDK](https://doc.shengwang.cn/faq/general-product-inquiry/audio-format). + * @param config Metronome configuration. See AgoraRhythmPlayerConfig. * * @returns * 0: Success. - * < 0: Failure. - * -22: Cannot find audio effect files. Please set the correct paths for sound1 and sound2. + * < 0: Failure + * -22: Audio file not found. Please provide valid sound1 and sound2. */ abstract startRhythmPlayer( sound1: string, @@ -6862,58 +7223,64 @@ export abstract class IRtcEngine { ): number; /** - * Disables the virtual metronome. + * Stops the virtual metronome. * - * After calling startRhythmPlayer, you can call this method to disable the virtual metronome. + * After calling startRhythmPlayer, you can call this method to stop the virtual metronome. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract stopRhythmPlayer(): number; /** * Configures the virtual metronome. * - * After calling startRhythmPlayer, you can call this method to reconfigure the virtual metronome. - * After enabling the virtual metronome, the SDK plays the specified audio effect file from the beginning, and controls the playback duration of each file according to beatsPerMinute you set in AgoraRhythmPlayerConfig. For example, if you set beatsPerMinute as 60, the SDK plays one beat every second. If the file duration exceeds the beat duration, the SDK only plays the audio within the beat duration. - * By default, the sound of the virtual metronome is published in the channel. If you want the sound to be heard by the remote users, you can set publishRhythmPlayerTrack in ChannelMediaOptions as true. + * Deprecated Deprecated since v4.6.2. + * After calling startRhythmPlayer, you can call this method to reconfigure the virtual metronome. + * After the virtual metronome is enabled, the SDK starts playing the specified audio files from the beginning and controls the playback duration of each file based on the beatsPerMinute you set in AgoraRhythmPlayerConfig. For example, if beatsPerMinute is set to 60, the SDK plays one beat per second. If the file duration exceeds the beat duration, the SDK only plays the portion of the audio corresponding to the beat duration. + * By default, the sound of the virtual metronome is not published to remote users. If you want remote users to hear the sound of the virtual metronome, you can set publishRhythmPlayerTrack in ChannelMediaOptions to true after calling this method. * - * @param config The metronome configuration. See AgoraRhythmPlayerConfig. + * @param config Metronome configuration. See AgoraRhythmPlayerConfig. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract configRhythmPlayer(config: AgoraRhythmPlayerConfig): number; /** - * Takes a snapshot of a video stream. + * Takes a snapshot of the video. * - * This method takes a snapshot of a video stream from the specified user, generates a JPG image, and saves it to the specified path. + * This method captures a snapshot of the specified user's video stream, generates a JPG image, and saves it to the specified path. + * When this method returns, the SDK has not actually captured the snapshot. + * When used for local video snapshot, it captures the video stream specified for publishing in ChannelMediaOptions. + * If the video has been pre-processed, such as adding watermark or beauty effects, the snapshot will include those effects. * - * @param uid The user ID. Set uid as 0 if you want to take a snapshot of the local user's video. - * @param filePath The local path (including filename extensions) of the snapshot. For example: + * @param uid User ID. Set to 0 to capture the local user's video. + * @param filePath Make sure the directory exists and is writable. Local path to save the snapshot, including file name and format. For example: * iOS: /App Sandbox/Library/Caches/example.jpg - * Android: /storage/emulated/0/Android/data//files/example.jpg Ensure that the path you specify exists and is writable. + * Android: /storage/emulated/0/Android/data//files/example.jpg * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract takeSnapshot(uid: number, filePath: string): number; /** - * Enables or disables video screenshot and upload. + * Enables/disables local snapshot upload. * - * When video screenshot and upload function is enabled, the SDK takes screenshots and uploads videos sent by local users based on the type and frequency of the module you set in ContentInspectConfig. After video screenshot and upload, the Agora server sends the callback notification to your app server in HTTPS requests and sends all screenshots to the third-party cloud storage service. + * After local snapshot upload is enabled, the SDK captures and uploads snapshots of the video sent by the local user based on the module type and frequency you set in ContentInspectConfig. Once the snapshot is complete, the Agora server sends a callback notification to your server via HTTPS and uploads all snapshots to your specified third-party cloud storage. + * Before calling this method, make sure you have enabled the local snapshot upload service in the Agora Console. + * When using the Agora self-developed plugin for video moderation (ContentInspectSupervision), you must integrate the local snapshot upload dynamic library libagora_content_inspect_extension.dll. Deleting this library will prevent the local snapshot upload function from working properly. * - * @param enabled Whether to enalbe video screenshot and upload: true : Enables video screenshot and upload. false : Disables video screenshot and upload. - * @param config Screenshot and upload configuration. See ContentInspectConfig. + * @param enabled Specifies whether to enable local snapshot upload: true : Enable local snapshot upload. false : Disable local snapshot upload. + * @param config Configuration for local snapshot upload. See ContentInspectConfig. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract enableContentInspect( enabled: boolean, @@ -6921,16 +7288,16 @@ export abstract class IRtcEngine { ): number; /** - * Adjusts the volume of the custom audio track played remotely. + * Adjusts the playback volume of a custom audio capture track on the remote end. * - * Ensure you have called the createCustomAudioTrack method to create a custom audio track before calling this method. If you want to change the volume of the audio played remotely, you need to call this method again. + * After calling this method to set the playback volume of the audio on the remote end, you can call this method again to readjust the volume. Before calling this method, make sure you have already called the createCustomAudioTrack method to create a custom audio capture track. * - * @param trackId The audio track ID. Set this parameter to the custom audio track ID returned in createCustomAudioTrack. - * @param volume The volume of the audio source. The value can range from 0 to 100. 0 means mute; 100 means the original volume. + * @param trackId Audio track ID. Set this parameter to the custom audio track ID returned by the createCustomAudioTrack method. + * @param volume Playback volume of the custom captured audio, in the range [0,100]. 0 means mute, 100 means original volume. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract adjustCustomAudioPublishVolume( trackId: number, @@ -6938,16 +7305,16 @@ export abstract class IRtcEngine { ): number; /** - * Adjusts the volume of the custom audio track played locally. + * Adjusts the playback volume of a custom audio capture track locally. * - * Ensure you have called the createCustomAudioTrack method to create a custom audio track before calling this method. If you want to change the volume of the audio to be played locally, you need to call this method again. + * After calling this method to set the local playback volume of audio, if you want to readjust the volume, you can call this method again. Before calling this method, make sure you have already called the createCustomAudioTrack method to create a custom audio capture track. * - * @param trackId The audio track ID. Set this parameter to the custom audio track ID returned in createCustomAudioTrack. - * @param volume The volume of the audio source. The value can range from 0 to 100. 0 means mute; 100 means the original volume. + * @param trackId Audio track ID. Set this parameter to the custom audio track ID returned by the createCustomAudioTrack method. + * @param volume Playback volume of the custom captured audio, ranging from [0, 100]. 0 means mute, 100 means original volume. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call was successful. + * < 0: The method call failed. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract adjustCustomAudioPlayoutVolume( trackId: number, @@ -6955,20 +7322,24 @@ export abstract class IRtcEngine { ): number; /** - * Sets up cloud proxy service. + * Sets the cloud proxy service. * - * When users' network access is restricted by a firewall, configure the firewall to allow specific IP addresses and ports provided by Agora; then, call this method to enable the cloud proxyType and set the cloud proxy type with the proxyType parameter. After successfully connecting to the cloud proxy, the SDK triggers the onConnectionStateChanged (ConnectionStateConnecting, ConnectionChangedSettingProxyServer) callback. To disable the cloud proxy that has been set, call the setCloudProxy (NoneProxy). To change the cloud proxy type that has been set, call the setCloudProxy (NoneProxy) first, and then call the setCloudProxy to set the proxyType you want. - * Agora recommends that you call this method before joining a channel. - * When a user is behind a firewall and uses the Force UDP cloud proxy, the services for Media Push and cohosting across channels are not available. - * When you use the Force TCP cloud proxy, note that an error would occur when calling the startAudioMixing method to play online music files in the HTTP protocol. The services for Media Push and cohosting across channels use the cloud proxy with the TCP protocol. + * When a user's network is restricted by a firewall, you need to add the IP addresses and port numbers provided by Agora to the firewall whitelist, then call this method to enable the cloud proxy and set the proxy type via the proxyType parameter. + * After successfully connecting to the cloud proxy, the SDK triggers the onConnectionStateChanged (ConnectionStateConnecting, ConnectionChangedSettingProxyServer) callback. + * To disable a configured Force UDP or Force TCP cloud proxy, call setCloudProxy(NoneProxy). + * To change the configured cloud proxy type, first call setCloudProxy(NoneProxy), then call setCloudProxy again with the desired proxyType. + * It is recommended to call this method outside the channel. + * If the user is in an intranet firewall environment, the features of CDN live streaming and cross-channel media relay are not available when using Force UDP cloud proxy. + * When using Force UDP cloud proxy, online audio files using HTTP protocol cannot be played via startAudioMixing. CDN live streaming and cross-channel media relay use TCP cloud proxy. * - * @param proxyType The type of the cloud proxy. See CloudProxyType. This parameter is mandatory. The SDK reports an error if you do not pass in a value. + * @param proxyType Cloud proxy type. See CloudProxyType. + * This parameter is required. If not set, the SDK returns an error. * * @returns * 0: Success. - * < 0: Failure. - * -2: The parameter is invalid. - * -7: The SDK is not initialized. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and troubleshooting. + * -2: Invalid parameter. + * -7: SDK not initialized. */ abstract setCloudProxy(proxyType: CloudProxyType): number; @@ -6978,15 +7349,15 @@ export abstract class IRtcEngine { abstract setLocalAccessPoint(config: LocalAccessPointConfiguration): number; /** - * Sets audio advanced options. + * Sets advanced audio options. * - * If you have advanced audio processing requirements, such as capturing and sending stereo audio, you can call this method to set advanced audio options. Call this method after calling joinChannel, enableAudio and enableLocalAudio. + * If you have advanced requirements for audio processing, such as capturing and sending stereo sound, you can call this method to set advanced audio options. You need to call this method before joinChannel, enableAudio, and enableLocalAudio. * - * @param options The advanced options for audio. See AdvancedAudioOptions. + * @param options Advanced audio options. See AdvancedAudioOptions. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setAdvancedAudioOptions( options: AdvancedAudioOptions, @@ -6999,16 +7370,17 @@ export abstract class IRtcEngine { abstract setAVSyncSource(channelId: string, uid: number): number; /** - * Sets whether to replace the current video feeds with images when publishing video streams. + * Enables or disables image placeholder streaming. * - * When publishing video streams, you can call this method to replace the current video feeds with custom images. Once you enable this function, you can select images to replace the video feeds through the ImageTrackOptions parameter. If you disable this function, the remote users see the video feeds that you publish. + * When publishing a video stream, you can call this method to use a custom image to replace the current video stream for streaming. + * After enabling this feature, you can customize the placeholder image using the ImageTrackOptions parameter. After disabling the feature, remote users will continue to see the current published video stream. * - * @param enable Whether to replace the current video feeds with custom images: true : Replace the current video feeds with custom images. false : (Default) Do not replace the current video feeds with custom images. - * @param options Image configurations. See ImageTrackOptions. + * @param enable Whether to enable image placeholder streaming: true : Enable image placeholder streaming. false : (Default) Disable image placeholder streaming. + * @param options Placeholder image settings. See ImageTrackOptions. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract enableVideoImageSource( enable: boolean, @@ -7016,91 +7388,93 @@ export abstract class IRtcEngine { ): number; /** - * Gets the current Monotonic Time of the SDK. + * Gets the current Monotonic Time from the SDK. * - * Monotonic Time refers to a monotonically increasing time series whose value increases over time. The unit is milliseconds. In custom video capture and custom audio capture scenarios, in order to ensure audio and video synchronization, Agora recommends that you call this method to obtain the current Monotonic Time of the SDK, and then pass this value into the timestamp parameter in the captured video frame (VideoFrame) and audio frame (AudioFrame). + * Monotonic Time refers to a monotonically increasing time sequence whose value increases over time. The unit is milliseconds. + * In scenarios such as custom video capture and custom audio capture, to ensure audio-video synchronization, Agora recommends that you call this method to get the current Monotonic Time from the SDK and pass this value to the timestamp parameter of the captured VideoFrame or AudioFrame. * * @returns - * ≥ 0: The method call is successful, and returns the current Monotonic Time of the SDK (in milliseconds). - * < 0: Failure. + * ≥ 0: Success. Returns the current Monotonic Time (milliseconds) from the SDK. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract getCurrentMonotonicTimeInMs(): number; /** - * Gets the type of the local network connection. + * Gets the local network connection type. * - * You can use this method to get the type of network in use at any stage. You can call this method either before or after joining a channel. + * You can call this method at any time to get the current network type in use. This method can be called before and after joining a channel. * * @returns - * ≥ 0: The method call is successful, and the local network connection type is returned. - * 0: The SDK disconnects from the network. - * 1: The network type is LAN. - * 2: The network type is Wi-Fi (including hotspots). - * 3: The network type is mobile 2G. - * 4: The network type is mobile 3G. - * 5: The network type is mobile 4G. - * 6: The network type is mobile 5G. - * < 0: The method call failed with an error code. - * -1: The network type is unknown. + * ≥ 0: Success. Returns the local network connection type. + * 0: Network disconnected. + * 1: LAN. + * 2: Wi-Fi (including hotspot). + * 3: 2G mobile network. + * 4: 3G mobile network. + * 5: 4G mobile network. + * 6: 5G mobile network. + * < 0: Failure. Returns an error code. + * -1: Unknown network connection type. */ abstract getNetworkType(): number; /** - * Provides technical preview functionalities or special customizations by configuring the SDK with JSON options. + * The SDK's JSON configuration, used for technical preview or customized features. * - * @param parameters Pointer to the set parameters in a JSON string. + * @param parameters Parameters in JSON string format. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setParameters(parameters: string): number; /** - * Enables tracing the video frame rendering process. + * Starts video frame rendering tracing. * - * The SDK starts tracing the rendering status of the video frames in the channel from the moment this method is successfully called and reports information about the event through the onVideoRenderingTracingResult callback. - * If you have not called this method, the SDK tracks the rendering events of the video frames from the moment you call joinChannel to join the channel. You can call this method at an appropriate time according to the actual application scenario to set the starting position for tracking video rendering events. - * After the local user leaves the current channel, the SDK automatically tracks the video rendering events from the moment you join a channel. + * After this method is successfully called, the SDK uses the time of the call as the starting point and reports video frame rendering information through the onVideoRenderingTracingResult callback. + * If you do not call this method, the SDK uses the time of calling joinChannel to join the channel as the default starting point and automatically starts tracing video rendering events. You can call this method at an appropriate time based on your business scenario to customize the tracing point. + * After leaving the current channel, the SDK automatically resets the tracing point to the next time you join a channel. * * @returns * 0: Success. - * < 0: Failure. - * -7: The method is called before IRtcEngine is initialized. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -7: IRtcEngine is not initialized before calling the method. */ abstract startMediaRenderingTracing(): number; /** - * Enables audio and video frame instant rendering. + * Enables accelerated rendering of audio and video frames. * - * After successfully calling this method, the SDK enables the instant frame rendering mode, which can speed up the first frame rendering after the user joins the channel. + * After successfully calling this method, the SDK enables accelerated rendering for both video and audio frames, which speeds up the time to first frame and first audio after a user joins a channel. Both broadcaster and audience must call this method to experience accelerated audio and video rendering. + * Once this method is successfully called, you can only disable accelerated rendering by calling the release method to destroy the IRtcEngine object. * * @returns * 0: Success. - * < 0: Failure. - * -7: The method is called before IRtcEngine is initialized. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -7: IRtcEngine not initialized before calling the method. */ abstract enableInstantMediaRendering(): number; /** * Gets the current NTP (Network Time Protocol) time. * - * In the real-time chorus scenario, especially when the downlink connections are inconsistent due to network issues among multiple receiving ends, you can call this method to obtain the current NTP time as the reference time, in order to align the lyrics and music of multiple receiving ends and achieve chorus synchronization. + * In real-time chorus scenarios, especially when downlink inconsistencies occur at different receiving ends due to network issues, you can call this method to get the current NTP time as the reference time to align lyrics and music across multiple receivers for chorus synchronization. * * @returns - * The Unix timestamp (ms) of the current NTP time. + * The current NTP time as a Unix timestamp (milliseconds). */ abstract getNtpWallTimeInMs(): number; /** * Checks whether the device supports the specified advanced feature. * - * Checks whether the capabilities of the current device meet the requirements for advanced features such as virtual background and image enhancement. + * Checks whether the current device meets the requirements for advanced features such as virtual background and beauty effects. * - * @param type The type of the advanced feature, see FeatureType. + * @param type The type of advanced feature. See FeatureType. * * @returns - * true : The current device supports the specified feature. false : The current device does not support the specified feature. + * true : The device supports the specified advanced feature. false : The device does not support the specified advanced feature. */ abstract isFeatureAvailableOnDevice(type: FeatureType): boolean; @@ -7115,7 +7489,15 @@ export abstract class IRtcEngine { abstract queryHDRCapability(videoModule: VideoModuleType): HdrCapability; /** - * @ignore + * Adds a watermark image to the local video stream. + * + * Since Available since v4.6.2. You can use this method to overlay a watermark image on the local video stream and configure the position, size, and visibility of the watermark in the preview using WatermarkConfig. + * + * @param configs Watermark configuration. See WatermarkConfig. + * + * @returns + * 0: Success. + * < 0: Failure. */ abstract addVideoWatermarkWithConfig(configs: WatermarkConfig): number; @@ -7133,24 +7515,27 @@ export abstract class IRtcEngine { abstract stopScreenCaptureBySourceType(sourceType: VideoSourceType): number; /** - * Releases the IRtcEngine instance. + * Destroys the IRtcEngine object. * - * This method releases all resources used by the Agora SDK. Use this method for apps in which users occasionally make voice or video calls. When users do not make calls, you can free up resources for other operations. After a successful method call, you can no longer use any method or callback in the SDK anymore. If you want to use the real-time communication functions again, you must call createAgoraRtcEngine and initialize to create a new IRtcEngine instance. - * This method can be called synchronously. You need to wait for the resource of IRtcEngine to be released before performing other operations (for example, create a new IRtcEngine object). Therefore, Agora recommends calling this method in the child thread to avoid blocking the main thread. - * Agora does not recommend you calling release in any callback of the SDK. Otherwise, the SDK cannot release the resources until the callbacks return results, which may result in a deadlock. + * This method releases all resources used by the SDK. Some apps only use real-time audio and video communication when needed, and release the resources when not in use for other operations. This method is suitable for such cases. + * After calling this method, you can no longer use other methods and callbacks of the SDK. To use the real-time audio and video communication function again, you must call createAgoraRtcEngine and initialize in sequence to create a new IRtcEngine object. + * This method is a synchronous call. You need to wait for the IRtcEngine resources to be released before performing other operations (such as creating a new IRtcEngine object), so it is recommended to call this method in a sub-thread to avoid blocking the main thread. + * It is not recommended to call release in the SDK's callback, otherwise the SDK will wait for the callback to return before recycling the related object resources, which may cause a deadlock. * - * @param sync Whether the method is called synchronously: true : Synchronous call. false : Asynchronous call. Currently this method only supports synchronous calls. Do not set this parameter to this value. + * @param sync Whether the method is a synchronous call: true : The method is synchronous. false : The method is asynchronous. Currently, only synchronous calls are supported. Do not set this parameter to this value. */ abstract release(sync?: boolean): void; /** - * Enables the local video preview. + * Starts video preview. * - * You can call this method to enable local video preview. + * This method starts the local video preview. + * Local preview enables mirroring by default. + * After leaving the channel, local preview remains active. You need to call stopPreview to stop the local preview. * * @returns - * 0: Success. - * < 0: Failure. + * 0: Method call succeeds. + * < 0: Method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract startPreviewWithoutSourceType(): number; @@ -7158,7 +7543,7 @@ export abstract class IRtcEngine { * Gets the IAudioDeviceManager object to manage audio devices. * * @returns - * One IAudioDeviceManager object. + * An IAudioDeviceManager object. */ abstract getAudioDeviceManager(): IAudioDeviceManager; @@ -7166,35 +7551,32 @@ export abstract class IRtcEngine { * Gets the IVideoDeviceManager object to manage video devices. * * @returns - * One IVideoDeviceManager object. + * An IVideoDeviceManager object. */ abstract getVideoDeviceManager(): IVideoDeviceManager; /** - * Gets IMusicContentCenter. - * - * @returns - * One IMusicContentCenter object. + * @ignore */ abstract getMusicContentCenter(): IMusicContentCenter; /** - * Gets one IMediaEngine object. + * Gets the IMediaEngine object. * - * Make sure the IRtcEngine is initialized before you call this method. + * This method must be called after initializing the IRtcEngine object. * * @returns - * One IMediaEngine object. + * IMediaEngine object. */ abstract getMediaEngine(): IMediaEngine; /** - * Gets one ILocalSpatialAudioEngine object. + * Gets the ILocalSpatialAudioEngine object. * - * Make sure the IRtcEngine is initialized before you call this method. + * This method must be called after initializing the IRtcEngine object. * * @returns - * One ILocalSpatialAudioEngine object. + * An ILocalSpatialAudioEngine object. */ abstract getLocalSpatialAudioEngine(): ILocalSpatialAudioEngine; @@ -7206,14 +7588,14 @@ export abstract class IRtcEngine { /** * Sends media metadata. * - * If the metadata is sent successfully, the SDK triggers the onMetadataReceived callback on the receiver. + * If the media metadata is sent successfully, the receiver will receive the onMetadataReceived callback. * - * @param metadata Media metadata. See Metadata. - * @param sourceType The type of the video source. See VideoSourceType. + * @param metadata The media metadata. See Metadata. + * @param sourceType The type of video source. See VideoSourceType. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract sendMetaData( metadata: Metadata, @@ -7221,15 +7603,15 @@ export abstract class IRtcEngine { ): number; /** - * Sets the maximum size of the media metadata. + * Sets the maximum size of media metadata. * - * After calling registerMediaMetadataObserver, you can call this method to set the maximum size of the media metadata. + * After calling registerMediaMetadataObserver, you can call this method to set the maximum size of media metadata. * * @param size The maximum size of media metadata. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setMaxMetadataSize(size: number): number; @@ -7248,13 +7630,13 @@ export abstract class IRtcEngine { ): void; /** - * Unregisters the encoded audio frame observer. + * Unregisters an audio encoded frame observer. * - * @param observer The encoded audio observer. See IAudioEncodedFrameObserver. + * @param observer Audio encoded frame observer. See IAudioEncodedFrameObserver. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call was successful. + * < 0: The method call failed. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract unregisterAudioEncodedFrameObserver( observer: IAudioEncodedFrameObserver @@ -7263,24 +7645,26 @@ export abstract class IRtcEngine { /** * Gets the C++ handle of the Native SDK. * - * This method retrieves the C++ handle of the SDK, which is used for registering the audio and video frame observer. + * This method gets the C++ handle of the Native SDK engine, used in special scenarios such as registering audio and video callbacks. * * @returns - * The native handle of the SDK. + * The Native handle of the SDK engine. */ abstract getNativeHandle(): number; /** - * Takes a screenshot of the video at the specified observation point. + * Takes a video snapshot at a specified observation point. * - * This method takes a snapshot of a video stream from the specified user, generates a JPG image, and saves it to the specified path. + * This method captures a snapshot of the specified user's video stream, generates a JPG image, and saves it to the specified path. + * When this method returns, the SDK has not actually captured the snapshot. + * When used for local video snapshot, it captures the video stream specified for publishing in ChannelMediaOptions. * - * @param uid The user ID. Set uid as 0 if you want to take a snapshot of the local user's video. - * @param config The configuration of the snaptshot. See SnapshotConfig. + * @param uid User ID. Set to 0 to capture the local user's video. + * @param config Snapshot settings. See SnapshotConfig. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract takeSnapshotWithConfig(uid: number, config: SnapshotConfig): number; } @@ -7300,31 +7684,31 @@ export enum QualityReportFormatType { } /** - * Media device states. + * Device state. */ export enum MediaDeviceStateType { /** - * 0: The device is ready for use. + * 0: Device is ready. */ MediaDeviceStateIdle = 0, /** - * 1: The device is in use. + * 1: Device is in use. */ MediaDeviceStateActive = 1, /** - * 2: The device is disabled. + * 2: Device is disabled. */ MediaDeviceStateDisabled = 2, /** - * 3: The device is plugged in. + * 3: Device is plugged in. */ MediaDeviceStatePluggedIn = 3, /** - * 4: The device is not found. + * 4: Device is not present. */ MediaDeviceStateNotPresent = 4, /** - * 8: The device is unplugged. + * 8: Device is unplugged. */ MediaDeviceStateUnplugged = 8, } @@ -7624,43 +8008,43 @@ export enum VideoProfileType { */ export class SDKBuildInfo { /** - * SDK build index. + * SDK build number. */ build?: number; /** - * SDK version information. String format, such as 4.0.0. + * SDK version number. Format: string, e.g., 4.0.0. */ version?: string; } /** - * The VideoDeviceInfo class that contains the ID and device name of the video devices. + * The VideoDeviceInfo class contains the video device ID and device name. */ export class VideoDeviceInfo { /** - * The device ID. + * Device ID. */ deviceId?: string; /** - * The device name. + * Device name. */ deviceName?: string; } /** - * The AudioDeviceInfo class that contains the ID, name and type of the audio devices. + * The AudioDeviceInfo class contains the audio device ID and device name. */ export class AudioDeviceInfo { /** - * The device ID. + * Device ID. */ deviceId?: string; /** - * Output parameter; indicates the type of audio devices, such as built-in, USB and HDMI. + * Audio device type, such as: built-in, USB, HDMI, etc. */ deviceTypeName?: string; /** - * The device name. + * Device name. */ deviceName?: string; } diff --git a/src/IAgoraRtcEngineEx.ts b/src/IAgoraRtcEngineEx.ts index 88fad685..9b5f0b76 100644 --- a/src/IAgoraRtcEngineEx.ts +++ b/src/IAgoraRtcEngineEx.ts @@ -32,47 +32,48 @@ import { } from './IAgoraRtcEngine'; /** - * Contains connection information. + * Class containing connection information. */ export class RtcConnection { /** - * The channel name. + * Channel name. */ channelId?: string; /** - * The ID of the local user. + * Local user ID. */ localUid?: number; } /** - * This interface class contains multi-channel methods. + * Interface class that provides multi-channel methods. * - * Inherited from IRtcEngine. + * Inherits from IRtcEngine. */ export abstract class IRtcEngineEx extends IRtcEngine { /** * Joins a channel. * - * You can call this method multiple times to join more than one channel. If you want to join the same channel from different devices, ensure that the user IDs are different for all devices. + * Call this method to join multiple channels simultaneously. If you want to join the same channel on different devices, make sure the user IDs used on each device are different. If you are already in a channel, you cannot use the same user ID to join the same channel again. + * Before joining a channel, make sure the App ID used to generate the Token is the same as the one used in the initialize method to initialize the engine, otherwise joining the channel with the Token will fail. * - * @param token The token generated on your server for authentication. - * (Recommended) If your project has enabled the security mode (using APP ID and Token for authentication), this parameter is required. - * If you have only enabled the testing mode (using APP ID for authentication), this parameter is optional. You will automatically exit the channel 24 hours after successfully joining in. - * If you need to join different channels at the same time or switch between channels, Agora recommends using a wildcard token so that you don't need to apply for a new token every time joining a channel. - * @param connection The connection information. See RtcConnection. - * @param options The channel media options. See ChannelMediaOptions. + * @param token A dynamic key generated on your server for authentication. See [Use Token Authentication](https://doc.shengwang.cn/doc/rtc/rn/basic-features/token-authentication). + * (Recommended) If your project enables the security mode, i.e., uses APP ID + Token for authentication, this parameter is required. + * If your project only enables debug mode, i.e., uses only the APP ID for authentication, you can join the channel without a Token. The user will automatically leave the channel 24 hours after successfully joining. + * If you need to join multiple channels simultaneously or switch channels frequently, Agora recommends using a wildcard Token to avoid requesting a new Token from the server each time. See [Use Wildcard Token](https://doc.shengwang.cn/doc/rtc/rn/best-practice/wildcard-token). + * @param connection Connection information. See RtcConnection. + * @param options Channel media options. See ChannelMediaOptions. * * @returns - * 0: Success. - * < 0: Failure. - * -2: The parameter is invalid. For example, the token is invalid, the uid parameter is not set to an integer, or the value of a member in ChannelMediaOptions is invalid. You need to pass in a valid parameter and join the channel again. - * -3: Fails to initialize the IRtcEngine object. You need to reinitialize the IRtcEngine object. - * -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine object before calling this method. - * -8: The internal state of the IRtcEngine object is wrong. The typical cause is that after calling startEchoTest to start a call loop test, you call this method to join the channel without calling stopEchoTest to stop the test. You need to call stopEchoTest before calling this method. - * -17: The request to join the channel is rejected. The typical cause is that the user is already in the channel. Agora recommends that you use the onConnectionStateChanged callback to see whether the user is in the channel. Do not call this method to join the channel unless you receive the ConnectionStateDisconnected (1) state. - * -102: The channel name is invalid. You need to pass in a valid channel name in channelId to rejoin the channel. - * -121: The user ID is invalid. You need to pass in a valid user ID in uid to rejoin the channel. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -2: Invalid parameter. For example, an invalid Token is used, uid is not an integer, or a ChannelMediaOptions member is invalid. You need to provide valid parameters and rejoin the channel. + * -3: IRtcEngine object initialization failed. You need to reinitialize the IRtcEngine object. + * -7: IRtcEngine object is not initialized. You must initialize the IRtcEngine object before calling this method. + * -8: Internal state error of the IRtcEngine object. Possible reason: startEchoTest was called to start an echo test, but stopEchoTest was not called before calling this method. You must call stopEchoTest before this method. + * -17: Join channel request is rejected. Possible reason: the user is already in the channel. Use the onConnectionStateChanged callback to check if the user is in the channel. Do not call this method again unless you receive the ConnectionStateDisconnected (1) state. + * -102: Invalid channel name. You must provide a valid channel name in channelId and rejoin the channel. + * -121: Invalid user ID. You must provide a valid user ID in uid and rejoin the channel. */ abstract joinChannelEx( token: string, @@ -83,16 +84,17 @@ export abstract class IRtcEngineEx extends IRtcEngine { /** * Sets channel options and leaves the channel. * - * After calling this method, the SDK terminates the audio and video interaction, leaves the current channel, and releases all resources related to the session. After calling joinChannelEx to join a channel, you must call this method to end the call, otherwise, the next call cannot be started. - * This method call is asynchronous. When this method returns, it does not necessarily mean that the user has left the channel. - * If you call leaveChannel, you will leave all the channels you have joined by calling joinChannel or joinChannelEx. + * After calling this method, the SDK stops all audio and video interactions, leaves the current channel, and releases all session-related resources. + * You must call this method after successfully joining a channel using joinChannelEx to end the call; otherwise, you cannot start a new call. + * This method is asynchronous. When the call returns, the channel has not actually been left. + * If you call leaveChannel, it leaves both joinChannel and joinChannelEx channels. If you call release immediately after this method, the SDK will not trigger the onLeaveChannel callback. * - * @param connection The connection information. See RtcConnection. - * @param options The options for leaving the channel. See LeaveChannelOptions. This parameter only supports the stopMicrophoneRecording member in the LeaveChannelOptions settings; setting other members does not take effect. + * @param connection Connection information. See RtcConnection. + * @param options Options for leaving the channel. See LeaveChannelOptions. This parameter only supports setting the stopMicrophoneRecording member in LeaveChannelOptions. Other members are not effective. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract leaveChannelEx( connection: RtcConnection, @@ -109,17 +111,17 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Updates the channel media options after joining the channel. + * Updates the channel media options after joining a channel. * * @param options The channel media options. See ChannelMediaOptions. * @param connection The connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. - * -2: The value of a member in ChannelMediaOptions is invalid. For example, the token or the user ID is invalid. You need to fill in a valid parameter. - * -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine object before calling this method. - * -8: The internal state of the IRtcEngine object is wrong. The possible reason is that the user is not in the channel. Agora recommends that you use the onConnectionStateChanged callback to see whether the user is in the channel. If you receive the ConnectionStateDisconnected (1) or ConnectionStateFailed (5) state, the user is not in the channel. You need to call joinChannel to join a channel before calling this method. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -2: The value of a ChannelMediaOptions member is invalid. For example, an illegal token is used or an invalid user role is set. You need to provide valid parameters. + * -7: The IRtcEngine object is not initialized. You need to initialize the IRtcEngine object before calling this method. + * -8: The internal state of the IRtcEngine object is incorrect. A possible reason is that the user is not in a channel. It is recommended to determine whether the user is in a channel through the onConnectionStateChanged callback. If you receive ConnectionStateDisconnected (1) or ConnectionStateFailed (5), it means the user is not in a channel. You need to call joinChannel before calling this method. */ abstract updateChannelMediaOptionsEx( options: ChannelMediaOptions, @@ -127,16 +129,16 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Sets the video encoder configuration. + * Sets video encoding properties. * - * Sets the encoder configuration for the local video. Each configuration profile corresponds to a set of video parameters, including the resolution, frame rate, and bitrate. + * Sets the encoding properties for local video. Each video encoding configuration corresponds to a series of video-related parameter settings, including resolution, frame rate, and bitrate. The config parameter of this method sets the maximum values achievable under ideal network conditions. If the network is poor, the video engine will not use this config to render local video and will automatically downgrade to suitable video parameters. * - * @param config Video profile. See VideoEncoderConfiguration. - * @param connection The connection information. See RtcConnection. + * @param config Video encoding parameter configuration. See VideoEncoderConfiguration. + * @param connection Connection information. See RtcConnection. * * @returns - * 0: Success. - * < 0: Failure. + * 0: Method call succeeds. + * < 0: Method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setVideoEncoderConfigurationEx( config: VideoEncoderConfiguration, @@ -152,15 +154,17 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Stops or resumes receiving the audio stream of a specified user. + * Stops or resumes receiving the specified audio stream. + * + * This method stops or resumes receiving the audio stream from a specified remote user. It can be called before or after joining a channel. The setting is reset after leaving the channel. * * @param uid The ID of the specified user. - * @param mute Whether to stop receiving the audio stream of the specified user: true : Stop receiving the audio stream of the specified user. false : (Default) Resume receiving the audio stream of the specified user. - * @param connection The connection information. See RtcConnection. + * @param mute Whether to stop receiving the specified audio stream: true : Stop receiving the specified audio stream. false : (Default) Continue receiving the specified audio stream. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract muteRemoteAudioStreamEx( uid: number, @@ -169,17 +173,17 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Stops or resumes receiving the video stream of a specified user. + * Stops or resumes receiving a specified video stream. * - * This method is used to stop or resume receiving the video stream of a specified user. You can call this method before or after joining a channel. If a user leaves a channel, the settings in this method become invalid. + * This method stops or resumes receiving the video stream of a specified remote user. You can call this method either before or after joining a channel. The settings of this method are reset once you leave the channel. * * @param uid The user ID of the remote user. - * @param mute Whether to stop receiving the video stream of the specified user: true : Stop receiving the video stream of the specified user. false : (Default) Resume receiving the video stream of the specified user. - * @param connection The connection information. See RtcConnection. + * @param mute Whether to stop receiving the video stream of a remote user: true : Stop receiving. false : (Default) Resume receiving. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Code](https://doc.shengwang.cn/api-ref/rtc/rn/error-code) for details and resolution suggestions. */ abstract muteRemoteVideoStreamEx( uid: number, @@ -188,20 +192,20 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Sets the video stream type to subscribe to. + * Sets the type of video stream to subscribe to. * - * The SDK will dynamically adjust the size of the corresponding video stream based on the size of the video window to save bandwidth and computing resources. The default aspect ratio of the low-quality video stream is the same as that of the high-quality video stream. According to the current aspect ratio of the high-quality video stream, the system will automatically allocate the resolution, frame rate, and bitrate of the low-quality video stream. Depending on the default behavior of the sender and the specific settings when calling setDualStreamMode, the scenarios for the receiver calling this method are as follows: - * The SDK enables low-quality video stream adaptive mode (AutoSimulcastStream) on the sender side by default, meaning only the high-quality video stream is transmitted. Only the receiver with the role of the host can call this method to initiate a low-quality video stream request. Once the sender receives the request, it starts automatically sending the low-quality video stream. At this point, all users in the channel can call this method to switch to low-quality video stream subscription mode. - * If the sender calls setDualStreamMode and sets mode to DisableSimulcastStream (never send low-quality video stream), then calling this method will have no effect. - * If the sender calls setDualStreamMode and sets mode to EnableSimulcastStream (always send low-quality video stream), both the host and audience receivers can call this method to switch to low-quality video stream subscription mode. If the publisher has already called setDualStreamModeEx and set mode to DisableSimulcastStream (never send low-quality video stream), calling this method will not take effect, you should call setDualStreamModeEx again on the sending end and adjust the settings. + * Depending on the default behavior of the sender and the specific settings of setDualStreamMode, the receiver can use this method in the following situations: + * By default, the SDK enables the low stream adaptive mode (AutoSimulcastStream) on the sender, meaning the sender only sends the high stream. Only receivers with host role can call this method to request the low stream. Once the sender receives the request, it starts sending the low stream automatically. At this point, all users in the channel can call this method to switch to low stream subscription mode. + * If the sender calls setDualStreamMode and sets mode to DisableSimulcastStream (never send low stream), this method has no effect. + * If the sender calls setDualStreamMode and sets mode to EnableSimulcastStream (always send low stream), both hosts and audience can call this method to switch to low stream subscription mode. When receiving the low video stream, the SDK dynamically adjusts the video stream size based on the size of the video window to save bandwidth and computing resources. The aspect ratio of the low stream is the same as that of the high stream. Based on the current aspect ratio of the high stream, the system automatically allocates the resolution, frame rate, and bitrate of the low stream. If the sender has already called setDualStreamModeEx and set mode to DisableSimulcastStream (never send low stream), this method has no effect. You need to call setDualStreamModeEx again on the sender to change the setting. * - * @param uid The user ID. - * @param streamType The video stream type, see VideoStreamType. - * @param connection The connection information. See RtcConnection. + * @param uid User ID. + * @param streamType Video stream type: VideoStreamType. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setRemoteVideoStreamTypeEx( uid: number, @@ -212,14 +216,14 @@ export abstract class IRtcEngineEx extends IRtcEngine { /** * Stops or resumes publishing the local audio stream. * - * This method does not affect any ongoing audio recording, because it does not disable the audio capture device. A successful call of this method triggers the onUserMuteAudio and onRemoteAudioStateChanged callbacks on the remote client. + * After this method is successfully called, the remote user triggers the onUserMuteAudio and onRemoteAudioStateChanged callbacks. This method does not affect the audio capture status because it does not disable the audio capturing device. * - * @param mute Whether to stop publishing the local audio stream: true : Stops publishing the local audio stream. false : (Default) Resumes publishing the local audio stream. - * @param connection The connection information. See RtcConnection. + * @param mute Whether to stop publishing the local audio stream. true : Stop publishing. false : (Default) Publish. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract muteLocalAudioStreamEx( mute: boolean, @@ -229,15 +233,15 @@ export abstract class IRtcEngineEx extends IRtcEngine { /** * Stops or resumes publishing the local video stream. * - * A successful call of this method triggers the onUserMuteVideo callback on the remote client. - * This method does not affect any ongoing video recording, because it does not disable the camera. + * After this method is successfully called, the remote user triggers the onUserMuteVideo callback. + * This method does not affect the video capture status and does not disable the camera. * - * @param mute Whether to stop publishing the local video stream. true : Stop publishing the local video stream. false : (Default) Publish the local video stream. - * @param connection The connection information. See RtcConnection. + * @param mute Whether to stop sending the local video stream. true : Stop sending the local video stream. false : (Default) Send the local video stream. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract muteLocalVideoStreamEx( mute: boolean, @@ -245,18 +249,18 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Stops or resumes subscribing to the audio streams of all remote users. + * Stops or resumes subscribing to all remote users' audio streams. * - * After successfully calling this method, the local user stops or resumes subscribing to the audio streams of all remote users, including the ones join the channel subsequent to this call. - * Call this method after joining a channel. - * If you do not want to subscribe the audio streams of remote users before joining a channel, you can set autoSubscribeAudio as false when calling joinChannel. + * After successfully calling this method, the local user stops or resumes subscribing to remote users' audio streams, including those who join the channel after the method is called. + * This method must be called after joining a channel. + * To set the default behavior to not subscribe to remote users' audio streams before joining a channel, set autoSubscribeAudio to false when calling joinChannel. * - * @param mute Whether to stop subscribing to the audio streams of all remote users: true : Stops subscribing to the audio streams of all remote users. false : (Default) Subscribes to the audio streams of all remote users by default. - * @param connection The connection information. See RtcConnection. + * @param mute Whether to stop subscribing to all remote users' audio streams: true : Stop subscribing to all remote users' audio streams. false : (Default) Subscribe to all remote users' audio streams. + * @param connection Connection information. See RtcConnection. * * @returns - * 0: Success. - * < 0: Failure. + * 0: Method call succeeds. + * < 0: Method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract muteAllRemoteAudioStreamsEx( mute: boolean, @@ -264,16 +268,16 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Stops or resumes subscribing to the video streams of all remote users. + * Stops or resumes subscribing to all remote users' video streams. * - * After successfully calling this method, the local user stops or resumes subscribing to the video streams of all remote users, including all subsequent users. + * After this method is successfully called, the local user stops or resumes subscribing to all remote users' video streams, including those who join the channel after this method is called. * - * @param mute Whether to stop subscribing to the video streams of all remote users. true : Stop subscribing to the video streams of all remote users. false : (Default) Subscribe to the video streams of all remote users by default. - * @param connection The connection information. See RtcConnection. + * @param mute Whether to stop subscribing to all remote users' video streams. true : Stop subscribing to all users' video streams. false : (Default) Subscribe to all users' video streams. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract muteAllRemoteVideoStreamsEx( mute: boolean, @@ -281,21 +285,22 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Sets the blocklist of subscriptions for audio streams. + * Sets the audio subscription blocklist. * - * You can call this method to specify the audio streams of a user that you do not want to subscribe to. + * You can call this method to specify the audio streams you do not want to subscribe to. * You can call this method either before or after joining a channel. - * The blocklist is not affected by the setting in muteRemoteAudioStream, muteAllRemoteAudioStreams, and autoSubscribeAudio in ChannelMediaOptions. - * Once the blocklist of subscriptions is set, it is effective even if you leave the current channel and rejoin the channel. - * If a user is added in the allowlist and blocklist at the same time, only the blocklist takes effect. + * The audio subscription blocklist is not affected by muteRemoteAudioStream, muteAllRemoteAudioStreams, or autoSubscribeAudio in ChannelMediaOptions. + * After setting the blocklist, if you leave and rejoin the channel, the blocklist remains effective. + * If a user is in both the audio subscription blocklist and allowlist, only the blocklist takes effect. * - * @param uidList The user ID list of users that you do not want to subscribe to. If you want to specify the audio streams of a user that you do not want to subscribe to, add the user ID in this list. If you want to remove a user from the blocklist, you need to call the setSubscribeAudioBlocklist method to update the user ID list; this means you only add the uid of users that you do not want to subscribe to in the new user ID list. - * @param uidNumber The number of users in the user ID list. - * @param connection The connection information. See RtcConnection. + * @param uidList User ID list of the audio subscription blocklist. + * If you want to exclude a specific user's audio stream from being subscribed to, add that user's ID to this list. To remove a user from the blocklist, you need to call the setSubscribeAudioBlocklist method again and update the user ID list to exclude the uid of the user you want to remove. + * @param uidNumber The number of users in the blocklist. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setSubscribeAudioBlocklistEx( uidList: number[], @@ -304,21 +309,22 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Sets the allowlist of subscriptions for audio streams. + * Sets the audio subscription allowlist. * - * You can call this method to specify the audio streams of a user that you want to subscribe to. - * If a user is added in the allowlist and blocklist at the same time, only the blocklist takes effect. - * You can call this method either before or after joining a channel. - * The allowlist is not affected by the setting in muteRemoteAudioStream, muteAllRemoteAudioStreams and autoSubscribeAudio in ChannelMediaOptions. - * Once the allowlist of subscriptions is set, it is effective even if you leave the current channel and rejoin the channel. + * You can call this method to specify the audio streams you want to subscribe to. + * This method can be called before or after joining a channel. + * The audio subscription allowlist is not affected by muteRemoteAudioStream, muteAllRemoteAudioStreams, or the autoSubscribeAudio setting in ChannelMediaOptions. + * After setting the allowlist, if you leave and rejoin the channel, the allowlist remains effective. + * If a user is included in both the audio subscription allowlist and blocklist, only the blocklist takes effect. * - * @param uidList The user ID list of users that you want to subscribe to. If you want to specify the audio streams of a user for subscription, add the user ID in this list. If you want to remove a user from the allowlist, you need to call the setSubscribeAudioAllowlist method to update the user ID list; this means you only add the uid of users that you want to subscribe to in the new user ID list. - * @param uidNumber The number of users in the user ID list. - * @param connection The connection information. See RtcConnection. + * @param uidList List of user IDs in the audio subscription allowlist. + * If you want to subscribe to a specific user's audio stream, add that user's ID to this list. To remove a user from the allowlist, call setSubscribeAudioAllowlist again with an updated list that excludes the uid of the user you want to remove. + * @param uidNumber Number of users in the allowlist. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setSubscribeAudioAllowlistEx( uidList: number[], @@ -327,21 +333,22 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Sets the blocklist of subscriptions for video streams. + * Sets the video subscription blocklist. * - * You can call this method to specify the video streams of a user that you do not want to subscribe to. - * If a user is added in the allowlist and blocklist at the same time, only the blocklist takes effect. - * Once the blocklist of subscriptions is set, it is effective even if you leave the current channel and rejoin the channel. + * You can call this method to specify the video streams you do not want to subscribe to. * You can call this method either before or after joining a channel. - * The blocklist is not affected by the setting in muteRemoteVideoStream, muteAllRemoteVideoStreams and autoSubscribeAudio in ChannelMediaOptions. + * The video subscription blocklist is not affected by muteRemoteVideoStream, muteAllRemoteVideoStreams, or autoSubscribeVideo in ChannelMediaOptions. + * After setting the blocklist, if you leave and rejoin the channel, the blocklist remains effective. + * If a user is in both the audio subscription blocklist and allowlist, only the blocklist takes effect. * - * @param uidList The user ID list of users that you do not want to subscribe to. If you want to specify the video streams of a user that you do not want to subscribe to, add the user ID of that user in this list. If you want to remove a user from the blocklist, you need to call the setSubscribeVideoBlocklist method to update the user ID list; this means you only add the uid of users that you do not want to subscribe to in the new user ID list. - * @param uidNumber The number of users in the user ID list. - * @param connection The connection information. See RtcConnection. + * @param uidList User ID list of the video subscription blocklist. + * If you want to exclude a specific user's video stream from being subscribed to, add that user's ID to this list. To remove a user from the blocklist, you need to call the setSubscribeVideoBlocklist method again and update the user ID list to exclude the uid of the user you want to remove. + * @param uidNumber The number of users in the blocklist. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setSubscribeVideoBlocklistEx( uidList: number[], @@ -350,21 +357,22 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Sets the allowlist of subscriptions for video streams. + * Sets the video subscription allowlist. * - * You can call this method to specify the video streams of a user that you want to subscribe to. - * If a user is added in the allowlist and blocklist at the same time, only the blocklist takes effect. - * Once the allowlist of subscriptions is set, it is effective even if you leave the current channel and rejoin the channel. + * You can call this method to specify the video streams you want to subscribe to. * You can call this method either before or after joining a channel. - * The allowlist is not affected by the setting in muteRemoteVideoStream, muteAllRemoteVideoStreams and autoSubscribeAudio in ChannelMediaOptions. + * The video subscription allowlist is not affected by muteRemoteVideoStream, muteAllRemoteVideoStreams, or autoSubscribeVideo in ChannelMediaOptions. + * After setting the allowlist, if you leave and rejoin the channel, the allowlist remains effective. + * If a user is in both the audio subscription blocklist and allowlist, only the blocklist takes effect. * - * @param uidList The user ID list of users that you want to subscribe to. If you want to specify the video streams of a user for subscription, add the user ID of that user in this list. If you want to remove a user from the allowlist, you need to call the setSubscribeVideoAllowlist method to update the user ID list; this means you only add the uid of users that you want to subscribe to in the new user ID list. - * @param uidNumber The number of users in the user ID list. - * @param connection The connection information. See RtcConnection. + * @param uidList User ID list of the video subscription allowlist. + * If you want to subscribe only to a specific user's video stream, add that user's ID to this list. To remove a user from the allowlist, you need to call the setSubscribeVideoAllowlist method again and update the video subscription allowlist to exclude the uid of the user you want to remove. + * @param uidNumber The number of users in the allowlist. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setSubscribeVideoAllowlistEx( uidList: number[], @@ -373,17 +381,17 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Sets options for subscribing to remote video streams. + * Sets the subscription options for remote video streams. * - * When a remote user has enabled dual-stream mode, you can call this method to choose the option for subscribing to the video streams sent by the remote user. + * When the remote user sends dual streams, you can call this method to set the subscription options for the remote video stream. * - * @param uid The user ID of the remote user. - * @param options The video subscription options. See VideoSubscriptionOptions. - * @param connection The connection information. See RtcConnection. + * @param uid Remote user ID. + * @param options Subscription settings for the video stream. See VideoSubscriptionOptions. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setRemoteVideoSubscriptionOptionsEx( uid: number, @@ -392,23 +400,24 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Sets the 2D position (the position on the horizontal plane) of the remote user's voice. + * Sets the 2D position of a remote user's voice, i.e., horizontal position. * - * This method sets the voice position and volume of a remote user. When the local user calls this method to set the voice position of a remote user, the voice difference between the left and right channels allows the local user to track the real-time position of the remote user, creating a sense of space. This method applies to massive multiplayer online games, such as Battle Royale games. - * For the best voice positioning, Agora recommends using a wired headset. - * Call this method after joining a channel. + * Sets the spatial position and volume of a remote user's voice to help the local user locate the sound source. + * By calling this method, you can set the position where the remote user's voice appears. The difference between the left and right channels creates a sense of direction, allowing the user to determine the real-time position of the remote user. In multiplayer online games such as battle royale, this method can effectively enhance the sense of direction of game characters and simulate a real environment. + * For the best audio experience, it is recommended that users wear wired headphones. + * This method must be called after joining a channel. * - * @param uid The user ID of the remote user. - * @param pan The voice position of the remote user. The value ranges from -1.0 to 1.0: - * -1.0: The remote voice comes from the left. - * 0.0: (Default) The remote voice comes from the front. - * 1.0: The remote voice comes from the right. - * @param gain The volume of the remote user. The value ranges from 0.0 to 100.0. The default value is 100.0 (the original volume of the remote user). The smaller the value, the lower the volume. - * @param connection The connection information. See RtcConnection. + * @param uid The ID of the remote user. + * @param pan Sets the spatial position of the remote user's voice. The range is [-1.0, 1.0]: + * -1.0: Voice appears on the left. + * (Default) 0.0: Voice appears in the center. + * 1.0: Voice appears on the right. + * @param gain Sets the volume of the remote user's voice. The range is [0.0, 100.0], with a default of 100.0, representing the original volume. The smaller the value, the lower the volume. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setRemoteVoicePositionEx( uid: number, @@ -427,20 +436,18 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Sets the video display mode of a specified remote user. + * Sets the remote view display mode. * - * After initializing the video view of a remote user, you can call this method to update its rendering and mirror modes. This method affects only the video view that the local user sees. - * Call this method after rendering the RtcSurfaceView or RtcTextureView component corresponding to the remote user ID. - * During a call, you can call this method as many times as necessary to update the display mode of the video view of a remote user. + * After initializing the remote user view, you can call this method to update the rendering and mirror mode of the remote user view as displayed locally. This method only affects the video image seen by the local user. * - * @param uid The user ID of the remote user. - * @param renderMode The video display mode of the remote user. See RenderModeType. + * @param uid Remote user ID. + * @param renderMode The display mode of the remote view. See RenderModeType. * @param mirrorMode The mirror mode of the remote user view. See VideoMirrorModeType. - * @param connection The connection information. See RtcConnection. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setRemoteRenderModeEx( uid: number, @@ -475,20 +482,20 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Adjusts the playback signal volume of a specified remote user. + * Adjusts the playback volume of a specified remote user locally. * - * You can call this method to adjust the playback volume of a specified remote user. To adjust the playback volume of different remote users, call the method as many times, once for each remote user. + * You can call this method during a call to adjust the playback volume of a specified remote user locally. To adjust the playback volume of multiple users locally, call this method multiple times. * - * @param uid The user ID of the remote user. - * @param volume The volume of the user. The value range is [0,400]. + * @param uid Remote user ID. + * @param volume Volume. The range is [0,400]. * 0: Mute. - * 100: (Default) The original volume. - * 400: Four times the original volume (amplifying the audio signals by four times). - * @param connection The connection information. See RtcConnection. + * 100: (Default) Original volume. + * 400: Four times the original volume with built-in overflow protection. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract adjustUserPlaybackSignalVolumeEx( uid: number, @@ -497,27 +504,29 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Gets the current connection state of the SDK. + * Gets the current network connection state. * - * @param connection The connection information. See RtcConnection. + * @param connection Connection information. See RtcConnection. * * @returns - * The current connection state. See ConnectionStateType. + * The current network connection state. See ConnectionStateType. */ abstract getConnectionStateEx(connection: RtcConnection): ConnectionStateType; /** - * Enables or disables the built-in encryption. + * Enable or disable built-in encryption. * - * After the user leaves the channel, the SDK automatically disables the built-in encryption. To enable the built-in encryption, call this method before the user joins the channel again. + * After the user leaves the channel, the SDK automatically disables encryption. To re-enable encryption, you need to call this method before the user joins the channel again. + * All users in the same channel must use the same encryption mode and key when calling this method. + * If built-in encryption is enabled, you cannot use the CDN streaming feature. * - * @param connection The connection information. See RtcConnection. - * @param enabled Whether to enable built-in encryption: true : Enable the built-in encryption. false : (Default) Disable the built-in encryption. - * @param config Built-in encryption configurations. See EncryptionConfig. + * @param connection Connection information. See RtcConnection. + * @param enabled Whether to enable built-in encryption: true : Enable built-in encryption. false : (default) Disable built-in encryption. + * @param config Configure the built-in encryption mode and key. See EncryptionConfig. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract enableEncryptionEx( connection: RtcConnection, @@ -528,12 +537,14 @@ export abstract class IRtcEngineEx extends IRtcEngine { /** * Creates a data stream. * - * @param config The configurations for the data stream. See DataStreamConfig. - * @param connection The connection information. See RtcConnection. + * Within the lifecycle of IRtcEngine, each user can create up to 5 data streams. The data streams are destroyed when leaving the channel. You need to recreate them to use again. + * + * @param config Data stream configuration. See DataStreamConfig. + * @param connection Connection information. See RtcConnection. * * @returns - * ID of the created data stream, if the method call succeeds. - * < 0: Failure. + * The ID of the created data stream: Success. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract createDataStreamEx( config: DataStreamConfig, @@ -541,22 +552,23 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Sends data stream messages. + * Sends a data stream. * - * A successful method call triggers the onStreamMessage callback on the remote client, from which the remote user gets the stream message. A failed method call triggers the onStreamMessageError callback on the remote client. The SDK has the following restrictions on this method: - * Each client within the channel can have up to 5 data channels simultaneously, with a total shared packet bitrate limit of 30 KB/s for all data channels. - * Each data channel can send up to 60 packets per second, with each packet being a maximum of 1 KB. After calling createDataStreamEx, you can call this method to send data stream messages to all users in the channel. - * Call this method after joinChannelEx. - * Ensure that you call createDataStreamEx to create a data channel before calling this method. + * After calling createDataStreamEx, you can call this method to send data stream messages to all users in the channel. + * The SDK imposes the following restrictions on this method: + * Each client in the channel can have up to 5 data channels simultaneously, and the total sending bitrate of all data channels is limited to 30 KB/s. + * Each data channel can send up to 60 packets per second, with a maximum size of 1 KB per packet. After the method is successfully called, the remote end triggers the onStreamMessage callback, where the remote user can retrieve the received stream message. If the call fails, the remote end triggers the onStreamMessageError callback. + * This method must be called after joinChannelEx. + * Make sure to call createDataStreamEx to create a data channel before calling this method. * - * @param streamId The data stream ID. You can get the data stream ID by calling createDataStreamEx. - * @param data The message to be sent. - * @param length The length of the data. - * @param connection The connection information. See RtcConnection. + * @param streamId Data stream ID. Obtained via createDataStreamEx. + * @param data Data to be sent. + * @param length Length of the data. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract sendStreamMessageEx( streamId: number, @@ -587,25 +599,26 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Adds a watermark image to the local video. - * - * This method adds a PNG watermark image to the local video in the live streaming. Once the watermark image is added, all the audience in the channel (CDN audience included), and the capturing device can see and capture it. The Agora SDK supports adding only one watermark image onto a live video stream. The newly added watermark image replaces the previous one. The watermark coordinates are dependent on the settings in the setVideoEncoderConfigurationEx method: - * If the orientation mode of the encoding video (OrientationMode) is fixed landscape mode or the adaptive landscape mode, the watermark uses the landscape orientation. - * If the orientation mode of the encoding video (OrientationMode) is fixed portrait mode or the adaptive portrait mode, the watermark uses the portrait orientation. - * When setting the watermark position, the region must be less than the dimensions set in the setVideoEncoderConfigurationEx method; otherwise, the watermark image will be cropped. - * Ensure that you have called enableVideo before calling this method. - * This method supports adding a watermark image in the PNG file format only. Supported pixel formats of the PNG image are RGBA, RGB, Palette, Gray, and Alpha_gray. - * If the dimensions of the PNG image differ from your settings in this method, the image will be cropped or zoomed to conform to your settings. - * If you have enabled the local video preview by calling the startPreview method, you can use the visibleInPreview member to set whether or not the watermark is visible in the preview. - * If you have enabled the mirror mode for the local video, the watermark on the local video is also mirrored. To avoid mirroring the watermark, Agora recommends that you do not use the mirror and watermark functions for the local video at the same time. You can implement the watermark function in your application layer. - * - * @param watermarkUrl The local file path of the watermark image to be added. This method supports adding a watermark image from the local absolute or relative file path. - * @param options The options of the watermark image to be added. See WatermarkOptions. - * @param connection The connection information. See RtcConnection. + * Adds a local video watermark. + * + * Deprecated Deprecated: This method is deprecated. Use addVideoWatermarkWithConfigEx instead. This method adds a PNG image as a watermark to the local published live video stream. Users in the same live channel, audience of the CDN live stream, and capture devices can all see or capture the watermark image. Currently, only one watermark can be added to the live video stream. A newly added watermark replaces the previous one. + * The watermark coordinates depend on the settings in the setVideoEncoderConfigurationEx method: + * If the video encoding orientation (OrientationMode) is fixed to landscape or landscape in adaptive mode, landscape coordinates are used for the watermark. + * If the video encoding orientation (OrientationMode) is fixed to portrait or portrait in adaptive mode, portrait coordinates are used. + * When setting the watermark coordinates, the image area of the watermark must not exceed the video dimensions set in the setVideoEncoderConfigurationEx method. Otherwise, the exceeding part will be cropped. + * You must call this method after calling enableVideo. + * The watermark image must be in PNG format. This method supports all pixel formats of PNG: RGBA, RGB, Palette, Gray, and Alpha_gray. + * If the size of the PNG image to be added does not match the size you set in this method, the SDK will scale or crop the PNG image to match the setting. + * If you have already called startPreview to start local video preview, the visibleInPreview parameter in this method determines whether the watermark is visible in the preview. + * If local video is set to mirror mode, the local watermark will also be mirrored. To avoid the watermark being mirrored when local users view the local video, it is recommended not to use both mirror and watermark features for local video. Implement the local watermark feature at the application level. + * + * @param watermarkUrl The local path of the watermark image to be added. This method supports adding watermark images from local absolute/relative paths. + * @param options Settings for the watermark image to be added. See WatermarkOptions. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract addVideoWatermarkEx( watermarkUrl: string, @@ -614,7 +627,16 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * @ignore + * Removes the specified watermark image from the local or remote video stream. + * + * Since Available since v4.6.2. + * + * @param id Watermark ID. + * @param connection Connection information. See RtcConnection. + * + * @returns + * 0: Success. + * < 0: Failure. */ abstract removeVideoWatermarkEx( id: string, @@ -622,20 +644,20 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Removes the watermark image from the video stream. + * Removes added video watermarks. * - * @param connection The connection information. See RtcConnection. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract clearVideoWatermarkEx(connection: RtcConnection): number; /** - * Agora supports reporting and analyzing customized messages. + * Custom data reporting and analytics service. * - * Agora supports reporting and analyzing customized messages. This function is in the beta stage with a free trial. The ability provided in its beta test version is reporting a maximum of 10 message pieces within 6 seconds, with each message piece not exceeding 256 bytes and each string not exceeding 100 bytes. To try out this function, contact and discuss the format of customized messages with us. + * Agora provides custom data reporting and analytics services. This service is currently in a free beta period. During the beta, you can send up to 10 custom data messages within 6 seconds. Each message must not exceed 256 bytes, and each string must not exceed 100 bytes. To try this service, please [contact sales](https://www.shengwang.cn/contact-sales/) to enable it and agree on the custom data format. */ abstract sendCustomReportMessageEx( id: string, @@ -647,20 +669,20 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Enables the reporting of users' volume indication. + * Enables audio volume indication. * - * This method enables the SDK to regularly report the volume information to the app of the local user who sends a stream and remote users (three users at most) whose instantaneous volumes are the highest. + * This method allows the SDK to periodically report the volume information of the local user who is sending audio and up to three remote users with the highest instantaneous volume to the app. * * @param interval Sets the time interval between two consecutive volume indications: * ≤ 0: Disables the volume indication. - * > 0: Time interval (ms) between two consecutive volume indications. Ensure this parameter is set to a value greater than 10, otherwise you will not receive the onAudioVolumeIndication callback. Agora recommends that this value is set as greater than 100. - * @param smooth The smoothing factor that sets the sensitivity of the audio volume indicator. The value ranges between 0 and 10. The recommended value is 3. The greater the value, the more sensitive the indicator. - * @param reportVad true : Enables the voice activity detection of the local user. Once it is enabled, the vad parameter of the onAudioVolumeIndication callback reports the voice activity status of the local user. false : (Default) Disables the voice activity detection of the local user. Once it is disabled, the vad parameter of the onAudioVolumeIndication callback does not report the voice activity status of the local user, except for the scenario where the engine automatically detects the voice activity of the local user. - * @param connection The connection information. See RtcConnection. + * > 0: The time interval (ms) between volume indications. We recommend setting it to greater than 100 ms; it must not be less than 10 ms, or you will not receive the onAudioVolumeIndication callback. + * @param smooth The smoothing factor that sets the sensitivity of the volume indication. The range is [0,10], and the recommended value is 3. The larger the value, the more sensitive the volume fluctuation; the smaller the value, the smoother the fluctuation. + * @param reportVad true : Enables the local voice activity detection. Once enabled, the vad parameter in the onAudioVolumeIndication callback indicates whether the local user is speaking. false : (Default) Disables the local voice activity detection. Unless the engine automatically detects local voice activity, the vad parameter in the onAudioVolumeIndication callback does not indicate whether the local user is speaking. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and troubleshooting. */ abstract enableAudioVolumeIndicationEx( interval: number, @@ -670,21 +692,24 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Starts pushing media streams to a CDN without transcoding. + * Starts RTMP streaming without transcoding. * - * Call this method after joining a channel. - * Only hosts in the LIVE_BROADCASTING profile can call this method. - * If you want to retry pushing streams after a failed push, make sure to call stopRtmpStream first, then call this method to retry pushing streams; otherwise, the SDK returns the same error code as the last failed push. Agora recommends that you use the server-side Media Push function. You can call this method to push an audio or video stream to the specified CDN address. This method can push media streams to only one CDN address at a time, so if you need to push streams to multiple addresses, call this method multiple times. After you call this method, the SDK triggers the onRtmpStreamingStateChanged callback on the local client to report the state of the streaming. + * Agora recommends using the more advanced server-side streaming feature. See [Implement server-side streaming](https://doc.shengwang.cn/doc/media-push/restful/landing-page). + * Call this method to push live audio and video streams to the specified RTMP streaming URL. This method can push to only one URL at a time. To push to multiple URLs, call this method multiple times. + * After calling this method, the SDK triggers the onRtmpStreamingStateChanged callback locally to report the streaming status. + * Call this method after joining a channel. + * Only hosts in live streaming scenarios can call this method. + * If the streaming fails and you want to restart it, make sure to call stopRtmpStream first before calling this method again. Otherwise, the SDK will return the same error code as the previous failed attempt. * - * @param url The address of Media Push. The format is RTMP or RTMPS. The character length cannot exceed 1024 bytes. Special characters such as Chinese characters are not supported. - * @param connection The connection information. See RtcConnection. + * @param url The RTMP or RTMPS streaming URL. The maximum length is 1024 bytes. Chinese characters and special characters are not supported. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. - * -2: The URL or configuration of transcoding is invalid; check your URL and transcoding configurations. - * -7: The SDK is not initialized before calling this method. - * -19: The Media Push URL is already in use; use another URL instead. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and troubleshooting. + * -2: Invalid URL or transcoding parameter. Check your URL or parameter settings. + * -7: SDK not initialized before calling this method. + * -19: The RTMP streaming URL is already in use. Use a different URL. */ abstract startRtmpStreamWithoutTranscodingEx( url: string, @@ -692,24 +717,26 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Starts Media Push and sets the transcoding configuration. + * Starts pushing media streams to a CDN and sets the transcoding configuration. * - * Agora recommends that you use the server-side Media Push function. You can call this method to push a live audio-and-video stream to the specified CDN address and set the transcoding configuration. This method can push media streams to only one CDN address at a time, so if you need to push streams to multiple addresses, call this method multiple times. After you call this method, the SDK triggers the onRtmpStreamingStateChanged callback on the local client to report the state of the streaming. - * Ensure that you enable the Media Push service before using this function. + * Agora recommends using the more comprehensive server-side CDN streaming service. See [Implement server-side CDN streaming](https://doc.shengwang.cn/doc/media-push/restful/landing-page). + * Call this method to push live audio and video streams to the specified CDN streaming URL and set the transcoding configuration. This method can only push media streams to one URL at a time. To push to multiple URLs, call this method multiple times. + * After calling this method, the SDK triggers the onRtmpStreamingStateChanged callback locally to report the streaming status. + * Make sure the CDN streaming service is enabled. * Call this method after joining a channel. - * Only hosts in the LIVE_BROADCASTING profile can call this method. - * If you want to retry pushing streams after a failed push, make sure to call stopRtmpStreamEx first, then call this method to retry pushing streams; otherwise, the SDK returns the same error code as the last failed push. + * Only hosts in a live streaming scenario can call this method. + * If the streaming fails and you want to restart it, you must call stopRtmpStreamEx before calling this method again. Otherwise, the SDK returns the same error code as the previous failure. * - * @param url The address of Media Push. The format is RTMP or RTMPS. The character length cannot exceed 1024 bytes. Special characters such as Chinese characters are not supported. - * @param transcoding The transcoding configuration for Media Push. See LiveTranscoding. + * @param url The CDN streaming URL. The format must be RTMP or RTMPS. The character length must not exceed 1024 bytes. Chinese characters and other special characters are not supported. + * @param transcoding The transcoding configuration for CDN streaming. See LiveTranscoding. * @param connection The connection information. See RtcConnection. * * @returns - * 0: Success. - * < 0: Failure. - * -2: The URL or configuration of transcoding is invalid; check your URL and transcoding configurations. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -2: The URL or transcoding parameter is invalid. Check your URL or parameter settings. * -7: The SDK is not initialized before calling this method. - * -19: The Media Push URL is already in use; use another URL instead. + * -19: The CDN streaming URL is already in use. Use another CDN streaming URL. */ abstract startRtmpStreamWithTranscodingEx( url: string, @@ -718,16 +745,17 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Updates the transcoding configuration. + * Updates the transcoding configuration for CDN streaming. * - * Agora recommends that you use the server-side Media Push function. After you start pushing media streams to CDN with transcoding, you can dynamically update the transcoding configuration according to the scenario. The SDK triggers the onTranscodingUpdated callback after the transcoding configuration is updated. + * Agora recommends using the more comprehensive server-side CDN streaming service. See [Implement server-side CDN streaming](https://doc.shengwang.cn/doc/media-push/restful/landing-page). + * After enabling transcoding streaming, you can dynamically update the transcoding configuration based on your scenario. After the update, the SDK triggers the onTranscodingUpdated callback. * - * @param transcoding The transcoding configuration for Media Push. See LiveTranscoding. + * @param transcoding The transcoding configuration for CDN streaming. See LiveTranscoding. * @param connection The connection information. See RtcConnection. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract updateRtmpTranscodingEx( transcoding: LiveTranscoding, @@ -735,39 +763,42 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Stops pushing media streams to a CDN. + * Stops CDN streaming. * - * Agora recommends that you use the server-side Media Push function. You can call this method to stop the live stream on the specified CDN address. This method can stop pushing media streams to only one CDN address at a time, so if you need to stop pushing streams to multiple addresses, call this method multiple times. After you call this method, the SDK triggers the onRtmpStreamingStateChanged callback on the local client to report the state of the streaming. + * Agora recommends using the more comprehensive server-side CDN streaming service. See [Implement server-side CDN streaming](https://doc.shengwang.cn/doc/media-push/restful/landing-page). + * Call this method to stop the live streaming to the specified CDN streaming URL. This method can only stop streaming to one URL at a time. To stop streaming to multiple URLs, call this method multiple times. + * After calling this method, the SDK triggers the onRtmpStreamingStateChanged callback locally to report the streaming status. * - * @param url The address of Media Push. The format is RTMP or RTMPS. The character length cannot exceed 1024 bytes. Special characters such as Chinese characters are not supported. + * @param url The CDN streaming URL. The format must be RTMP or RTMPS. The character length must not exceed 1024 bytes. Chinese characters and other special characters are not supported. * @param connection The connection information. See RtcConnection. * * @returns - * 0: Success. - * < 0: Failure. + * 0: The method call succeeds. + * < 0: The method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract stopRtmpStreamEx(url: string, connection: RtcConnection): number; /** - * Starts relaying media streams across channels or updates channels for media relay. + * Starts or updates the media stream relay across channels. * - * The first successful call to this method starts relaying media streams from the source channel to the destination channels. To relay the media stream to other channels, or exit one of the current media relays, you can call this method again to update the destination channels. This feature supports relaying media streams to a maximum of six destination channels. After a successful method call, the SDK triggers the onChannelMediaRelayStateChanged callback, and this callback returns the state of the media stream relay. Common states are as follows: - * If the onChannelMediaRelayStateChanged callback returns RelayStateRunning (2) and RelayOk (0), it means that the SDK starts relaying media streams from the source channel to the destination channel. - * If the onChannelMediaRelayStateChanged callback returns RelayStateFailure (3), an exception occurs during the media stream relay. - * Call this method after joining the channel. - * This method takes effect only when you are a host in a live streaming channel. - * The relaying media streams across channels function needs to be enabled by contacting. - * Agora does not support string user accounts in this API. + * When you call this method for the first time, it starts to relay media streams across channels. To relay to multiple destination channels or to stop relaying to a specific channel, you can call this method again to add or remove destination channels. This function supports relaying to up to six destination channels. + * After a successful method call, the SDK triggers the onChannelMediaRelayStateChanged callback to report the current state of the media stream relay across channels. Common states include: + * If onChannelMediaRelayStateChanged reports RelayStateRunning (2) and RelayOk (0), it means the SDK has started relaying media streams between the source and destination channels. + * If onChannelMediaRelayStateChanged reports RelayStateFailure (3), it means an error occurred during the media stream relay across channels. + * Call this method after successfully joining a channel. + * In a live streaming scenario, only users with the host role can call this method. + * To enable media stream relay across channels, [contact technical support](https://ticket.shengwang.cn/). + * This function does not support string-type UIDs. * - * @param configuration The configuration of the media stream relay. See ChannelMediaRelayConfiguration. - * @param connection The connection information. See RtcConnection. + * @param configuration The configuration for media stream relay across channels. See ChannelMediaRelayConfiguration. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. - * -1: A general error occurs (no specified reason). - * -2: The parameter is invalid. - * -8: Internal state error. Probably because the user is not a broadcaster. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and troubleshooting. + * -1: General error (not categorized). + * -2: Invalid parameter. + * -8: Internal state error. Possibly because the user is not a host. */ abstract startOrUpdateChannelMediaRelayEx( configuration: ChannelMediaRelayConfiguration, @@ -775,44 +806,44 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Stops the media stream relay. Once the relay stops, the host quits all the target channels. + * Stops the media stream relay across channels. Once stopped, the host leaves all destination channels. * - * After a successful method call, the SDK triggers the onChannelMediaRelayStateChanged callback. If the callback reports RelayStateIdle (0) and RelayOk (0), the host successfully stops the relay. If the method call fails, the SDK triggers the onChannelMediaRelayStateChanged callback with the RelayErrorServerNoResponse (2) or RelayErrorServerConnectionLost (8) status code. You can call the leaveChannel method to leave the channel, and the media stream relay automatically stops. + * After a successful call, the SDK triggers the onChannelMediaRelayStateChanged callback. If it reports RelayStateIdle (0) and RelayOk (0), it indicates that the media stream relay has stopped. If the method call fails, the SDK triggers the onChannelMediaRelayStateChanged callback and reports error codes RelayErrorServerNoResponse (2) or RelayErrorServerConnectionLost (8). You can call the leaveChannel method to leave the channel, and the media stream relay will automatically stop. * - * @param connection The connection information. See RtcConnection. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. - * -5: The method call was rejected. There is no ongoing channel media relay. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and troubleshooting. + * -5: The method call is rejected. There is no ongoing media stream relay across channels. */ abstract stopChannelMediaRelayEx(connection: RtcConnection): number; /** - * Pauses the media stream relay to all target channels. + * Pauses media stream forwarding to all destination channels. * - * After the cross-channel media stream relay starts, you can call this method to pause relaying media streams to all target channels; after the pause, if you want to resume the relay, call resumeAllChannelMediaRelay. Call this method after startOrUpdateChannelMediaRelayEx. + * After starting media stream forwarding across channels, you can call this method to pause forwarding to all channels. To resume forwarding, call the resumeAllChannelMediaRelay method. You must call this method after calling startOrUpdateChannelMediaRelayEx to start media stream forwarding across channels. * - * @param connection The connection information. See RtcConnection. + * @param connection Connection information. See RtcConnection. * * @returns - * 0: Success. - * < 0: Failure. - * -5: The method call was rejected. There is no ongoing channel media relay. + * 0: The method call was successful. + * < 0: The method call failed. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -5: This method call was rejected. No ongoing cross-channel media stream forwarding exists. */ abstract pauseAllChannelMediaRelayEx(connection: RtcConnection): number; /** - * Resumes the media stream relay to all target channels. + * Resumes media stream forwarding to all destination channels. * - * After calling the pauseAllChannelMediaRelayEx method, you can call this method to resume relaying media streams to all destination channels. Call this method after pauseAllChannelMediaRelayEx. + * After calling the pauseAllChannelMediaRelayEx method, you can call this method to resume media stream forwarding to all destination channels. You must call this method after pauseAllChannelMediaRelayEx. * - * @param connection The connection information. See RtcConnection. + * @param connection Connection information. See RtcConnection. * * @returns - * 0: Success. - * < 0: Failure. - * -5: The method call was rejected. There is no paused channel media relay. + * 0: The method call was successful. + * < 0: The method call failed. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. + * -5: This method call was rejected. No paused cross-channel media stream forwarding exists. */ abstract resumeAllChannelMediaRelayEx(connection: RtcConnection): number; @@ -830,19 +861,19 @@ export abstract class IRtcEngineEx extends IRtcEngine { abstract getUserInfoByUidEx(uid: number, connection: RtcConnection): UserInfo; /** - * Enables or disables dual-stream mode on the sender side. + * Enables or disables the dual-stream mode on the sender side. * - * After you enable dual-stream mode, you can call setRemoteVideoStreamType to choose to receive either the high-quality video stream or the low-quality video stream on the subscriber side. You can call this method to enable or disable the dual-stream mode on the publisher side. Dual streams are a pairing of a high-quality video stream and a low-quality video stream: - * High-quality video stream: High bitrate, high resolution. - * Low-quality video stream: Low bitrate, low resolution. Deprecated: This method is deprecated as of v4.2.0. Use setDualStreamModeEx instead. This method is applicable to all types of streams from the sender, including but not limited to video streams collected from cameras, screen sharing streams, and custom-collected video streams. + * Deprecated Deprecated: Deprecated since v4.2.0. Use setDualStreamModeEx instead. You can call this method on the sender side to enable or disable dual-stream mode. Dual-stream refers to high-quality and low-quality video streams: + * High-quality stream: High resolution and high frame rate video stream. + * Low-quality stream: Low resolution and low frame rate video stream. After enabling dual-stream mode, you can call setRemoteVideoStreamType on the receiver side to choose to receive either the high-quality or low-quality video stream. This method applies to all types of streams sent by the sender, including but not limited to camera-captured video, screen sharing, and custom video streams. * * @param enabled Whether to enable dual-stream mode: true : Enable dual-stream mode. false : (Default) Disable dual-stream mode. - * @param streamConfig The configuration of the low-quality video stream. See SimulcastStreamConfig. When setting mode to DisableSimulcastStream, setting streamConfig will not take effect. - * @param connection The connection information. See RtcConnection. + * @param streamConfig Configuration for the low-quality video stream. See SimulcastStreamConfig. When mode is set to DisableSimulcastStream, setting streamConfig has no effect. + * @param connection Connection information. See RtcConnection. * * @returns - * 0: Success. - * < 0: Failure. + * 0: Method call succeeds. + * < 0: Method call fails. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract enableDualStreamModeEx( enabled: boolean, @@ -853,20 +884,20 @@ export abstract class IRtcEngineEx extends IRtcEngine { /** * Sets the dual-stream mode on the sender side. * - * The SDK defaults to enabling low-quality video stream adaptive mode (AutoSimulcastStream) on the sender side, which means the sender does not actively send low-quality video stream. The receiving end with the role of the host can initiate a low-quality video stream request by calling setRemoteVideoStreamTypeEx, and upon receiving the request, the sending end automatically starts sending low-quality stream. - * If you want to modify this behavior, you can call this method and set mode to DisableSimulcastStream (never send low-quality video streams) or EnableSimulcastStream (always send low-quality video streams). - * If you want to restore the default behavior after making changes, you can call this method again with mode set to AutoSimulcastStream. The difference and connection between this method and enableDualStreamModeEx is as follows: - * When calling this method and setting mode to DisableSimulcastStream, it has the same effect as enableDualStreamModeEx (false). - * When calling this method and setting mode to EnableSimulcastStream, it has the same effect as enableDualStreamModeEx (true). - * Both methods can be called before and after joining a channel. If both methods are used, the settings in the method called later takes precedence. + * By default, the SDK enables the adaptive low-quality stream mode (AutoSimulcastStream) on the sender side, meaning the sender does not actively send the low-quality stream. A receiver with host role can call setRemoteVideoStreamTypeEx to request the low-quality stream, and the sender starts sending it automatically upon receiving the request. + * To change this behavior, call this method and set mode to DisableSimulcastStream (never send low-quality stream) or EnableSimulcastStream (always send low-quality stream). + * To revert to the default behavior after making changes, call this method again and set mode to AutoSimulcastStream. The differences and similarities between this method and enableDualStreamModeEx are as follows: + * Calling this method with mode set to DisableSimulcastStream is equivalent to enableDualStreamModeEx(false). + * Calling this method with mode set to EnableSimulcastStream is equivalent to enableDualStreamModeEx(true). + * Both methods can be called before or after joining a channel. If both are used, the settings from the later call take precedence. * - * @param mode The mode in which the video stream is sent. See SimulcastStreamMode. - * @param streamConfig The configuration of the low-quality video stream. See SimulcastStreamConfig. When setting mode to DisableSimulcastStream, setting streamConfig will not take effect. - * @param connection The connection information. See RtcConnection. + * @param mode The mode for sending video streams. See SimulcastStreamMode. + * @param streamConfig Configuration for the low-quality video stream. See SimulcastStreamConfig. When mode is set to DisableSimulcastStream, streamConfig has no effect. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Code](https://doc.shengwang.cn/api-ref/rtc/rn/error-code) for details and resolution suggestions. */ abstract setDualStreamModeEx( mode: SimulcastStreamMode, @@ -893,19 +924,22 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Takes a snapshot of a video stream using connection ID. + * Takes a snapshot of the video using the connection ID. * - * This method takes a snapshot of a video stream from the specified user, generates a JPG image, and saves it to the specified path. + * This method takes a snapshot of the specified user's video stream, generates a JPG image, and saves it to the specified path. + * When this method returns, the SDK has not actually taken the snapshot. + * When used for local video snapshot, it captures the video stream specified in ChannelMediaOptions. + * If the video has been pre-processed, such as with watermarking or beautification, the snapshot will include the effects of the pre-processing. * - * @param connection The connection information. See RtcConnection. - * @param uid The user ID. Set uid as 0 if you want to take a snapshot of the local user's video. - * @param filePath The local path (including filename extensions) of the snapshot. For example: + * @param connection Connection information. See RtcConnection. + * @param uid User ID. Set to 0 to capture the local user's video. + * @param filePath Make sure the directory exists and is writable. The local path where the snapshot is saved, including the file name and format. For example: * iOS: /App Sandbox/Library/Caches/example.jpg - * Android: /storage/emulated/0/Android/data//files/example.jpg Ensure that the path you specify exists and is writable. + * Android: /storage/emulated/0/Android/data//files/example.jpg * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract takeSnapshotEx( connection: RtcConnection, @@ -914,17 +948,17 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Enables or disables video screenshot and upload. + * Enables/disables local snapshot upload. * - * This method can take screenshots for multiple video streams and upload them. When video screenshot and upload function is enabled, the SDK takes screenshots and uploads videos sent by local users based on the type and frequency of the module you set in ContentInspectConfig. After video screenshot and upload, the Agora server sends the callback notification to your app server in HTTPS requests and sends all screenshots to the third-party cloud storage service. + * This method allows capturing and uploading snapshots of multiple video streams. After local snapshot upload is enabled, the SDK captures and uploads snapshots of the video sent by the local user based on the module type and frequency you set in ContentInspectConfig. Once the snapshot is complete, the Agora server sends a callback notification to your server via HTTPS and uploads all snapshots to your specified third-party cloud storage. Before calling this method, make sure you have [contacted technical support](https://ticket.shengwang.cn/) to enable the local snapshot upload service. * - * @param enabled Whether to enalbe video screenshot and upload: true : Enables video screenshot and upload. false : Disables video screenshot and upload. - * @param config Screenshot and upload configuration. See ContentInspectConfig. - * @param connection The connection information. See RtcConnection. + * @param enabled Specifies whether to enable local snapshot upload: true : Enable local snapshot upload. false : Disable local snapshot upload. + * @param config Configuration for local snapshot upload. See ContentInspectConfig. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract enableContentInspectEx( enabled: boolean, @@ -933,16 +967,17 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Enables tracing the video frame rendering process. + * Starts video frame rendering tracing. * - * If you have not called this method, the SDK tracks the rendering events of the video frames from the moment you call joinChannel to join the channel. You can call this method at an appropriate time according to the actual application scenario to set the starting position for tracking video rendering events. - * After the local user leaves the current channel, the SDK automatically tracks the video rendering events from the moment you join a channel. The SDK starts tracing the rendering status of the video frames in the channel from the moment this method is successfully called and reports information about the event through the onVideoRenderingTracingResult callback. + * After this method is successfully called, the SDK uses the time of the call as the starting point and reports video frame rendering information through the onVideoRenderingTracingResult callback. + * If you do not call this method, the SDK uses the time of calling joinChannel to join the channel as the default starting point and automatically starts tracing video rendering events. You can call this method at an appropriate time based on your business scenario to customize the tracing point. + * After leaving the current channel, the SDK automatically resets the tracing point to the next time you join a channel. * - * @param connection The connection information. See RtcConnection. + * @param connection Connection information. See RtcConnection. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract startMediaRenderingTracingEx(connection: RtcConnection): number; @@ -955,15 +990,11 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Gets the call ID with the connection ID. - * - * When a user joins a channel on a client, a callId is generated to identify the call from the client. You can call this method to get callId, and pass it in when calling methods such as rate and complain. + * Get the call ID using the connection ID. * - * @param connection The connection information. See RtcConnection. + * Each time the client joins a channel, a corresponding callId is generated to identify the call session. You can call this method to obtain the callId parameter, then pass it to methods like rate and complain. * - * @returns - * The current call ID, if the method succeeds. - * An empty string, if the method call fails. + * @param connection Connection information. See RtcConnection. */ abstract getCallIdEx(connection: RtcConnection): string; @@ -977,7 +1008,22 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * @ignore + * Preloads the specified sound effect into the channel. + * + * Since Available since v4.6.2. Each time you call this method, only one sound effect file can be preloaded into memory. To preload multiple sound effect files, call this method multiple times. After preloading, you can call playEffect to play the preloaded sound effect, or call playAllEffects to play all preloaded sound effects. + * To ensure a smooth experience, the size of the sound effect file should not exceed the limit. + * Agora recommends calling this method before joining a channel. + * If preloadEffectEx is called before playEffectEx, then after playEffectEx is executed, the file resource is not released. The next playEffectEx call will start playback from the beginning. + * If preloadEffectEx is not called before playEffectEx, then after playEffectEx is executed, the resource is destroyed. The next playEffectEx call will attempt to reopen the file and play from the beginning. + * + * @param connection Connection information. See RtcConnection. + * @param soundId Sound effect ID. + * @param filePath Absolute path of the local file or URL of the online file. Supported audio formats include: mp3, mp4, m4a, aac, 3gp, mkv, and wav. + * @param startPos Start position for playing the sound effect file (in milliseconds). + * + * @returns + * 0: Success. + * < 0: Failure. */ abstract preloadEffectEx( connection: RtcConnection, @@ -987,7 +1033,27 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * @ignore + * Plays the specified sound effect in the channel. + * + * Since Available since v4.6.2. You can call this method to play the specified sound effect to all users in the channel. Each call can only play one sound effect. To play multiple sound effects simultaneously, use different soundId and filePath values and call this method multiple times. You can also set whether to publish the sound effect in the channel. + * Agora recommends not playing more than three sound effects simultaneously. + * The sound effect ID and file path in this method must match those used in the preloadEffectEx method. + * If preloadEffectEx is called before playEffectEx, then after playEffectEx is executed, the file resource is not released. The next playEffectEx call will start playback from the beginning. + * If preloadEffectEx is not called before playEffectEx, then after playEffectEx is executed, the resource is destroyed. The next playEffectEx call will attempt to reopen the file and play from the beginning. + * + * @param connection RtcConnection object. See RtcConnection. + * @param soundId Sound effect ID. + * @param filePath Absolute path of the local file or URL of the online file. Supported audio formats include mp3, mp4, m4a, aac, 3gp, mkv, and wav. + * @param loopCount Number of times to loop the sound effect: -1 : Infinite loop until stopEffect or stopAllEffects is called. 0 : Play once. 1 : Play twice. + * @param pitch Pitch of the sound effect. Range: 0.5 to 2.0. Default is 1.0 (original pitch). The smaller the value, the lower the pitch. + * @param pan Spatial position of the sound effect. Range: -1.0 to 1.0: -1.0 : Sound comes from the left. 0.0 : Sound comes from the front. 1.0 : Sound comes from the right. + * @param gain Volume of the sound effect. Range: 0 to 100. Default is 100 (original volume). The smaller the value, the lower the volume. + * @param publish Whether to publish the sound effect in the channel: true : Publish the sound effect in the channel. false : (Default) Do not publish the sound effect in the channel. + * @param startPos Start position for playing the sound effect file, in milliseconds. + * + * @returns + * 0: Success. + * < 0: Failure. */ abstract playEffectEx( connection: RtcConnection, @@ -1002,17 +1068,20 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * Gets a video screenshot of the specified observation point using the connection ID. + * Takes a snapshot of the video at a specified observation point using the connection ID. * - * This method takes a snapshot of a video stream from the specified user, generates a JPG image, and saves it to the specified path. + * This method takes a snapshot of the specified user's video stream, generates a JPG image, and saves it to the specified path. + * When this method returns, the SDK has not actually taken the snapshot. + * When used for local video snapshot, it captures the video stream specified in ChannelMediaOptions. + * If the video has been pre-processed, such as with watermarking or beautification, the snapshot will include the effects of the pre-processing. * - * @param connection The connection information. See RtcConnection. - * @param uid The user ID. Set uid as 0 if you want to take a snapshot of the local user's video. - * @param config The configuration of the snaptshot. See SnapshotConfig. + * @param connection Connection information. See RtcConnection. + * @param uid User ID. Set to 0 to capture the local user's video. + * @param config Snapshot configuration. See SnapshotConfig. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract takeSnapshotWithConfigEx( connection: RtcConnection, @@ -1021,7 +1090,16 @@ export abstract class IRtcEngineEx extends IRtcEngine { ): number; /** - * @ignore + * Adds a watermark image to the local video. + * + * Since Available since v4.6.2. + * + * @param config Watermark configuration. See WatermarkConfig. + * @param connection RtcConnection object. See RtcConnection. + * + * @returns + * 0: Success. + * < 0: Failure. */ abstract addVideoWatermarkWithConfigEx( config: WatermarkConfig, diff --git a/src/IAgoraSpatialAudio.ts b/src/IAgoraSpatialAudio.ts index 34229f08..4859b624 100644 --- a/src/IAgoraSpatialAudio.ts +++ b/src/IAgoraSpatialAudio.ts @@ -2,69 +2,69 @@ import './extension/IAgoraSpatialAudioExtension'; import { RtcConnection } from './IAgoraRtcEngineEx'; /** - * The spatial position of the remote user or the media player. + * Spatial position information of the remote user or media player. */ export class RemoteVoicePositionInfo { /** - * The coordinates in the world coordinate system. This parameter is an array of length 3, and the three values represent the front, right, and top coordinates in turn. + * Coordinates in the world coordinate system. This parameter is an array of length 3, representing the coordinates in the forward, right, and up directions respectively. */ position?: number[]; /** - * The unit vector of the x axis in the coordinate system. This parameter is an array of length 3, and the three values represent the front, right, and top coordinates in turn. + * Unit vector of the forward axis in the world coordinate system. This parameter is an array of length 3, representing the directions in the forward, right, and up axes respectively. */ forward?: number[]; } /** - * Sound insulation area settings. + * Sound isolation zone settings. */ export class SpatialAudioZone { /** - * The ID of the sound insulation area. + * ID of the sound isolation zone. */ zoneSetId?: number; /** - * The spatial center point of the sound insulation area. This parameter is an array of length 3, and the three values represent the front, right, and top coordinates in turn. + * The spatial center point of the sound isolation zone. This parameter is an array of length 3, representing coordinates in the forward, right, and up directions respectively. */ position?: number[]; /** - * Starting at position, the forward unit vector. This parameter is an array of length 3, and the three values represent the front, right, and top coordinates in turn. + * Unit vector pointing forward from position. This parameter is an array of length 3, representing coordinates in the forward, right, and up directions respectively. */ forward?: number[]; /** - * Starting at position, the right unit vector. This parameter is an array of length 3, and the three values represent the front, right, and top coordinates in turn. + * Unit vector pointing right from position. This parameter is an array of length 3, representing coordinates in the forward, right, and up directions respectively. */ right?: number[]; /** - * Starting at position, the up unit vector. This parameter is an array of length 3, and the three values represent the front, right, and top coordinates in turn. + * Unit vector pointing upward from position. This parameter is an array of length 3, representing coordinates in the forward, right, and up directions respectively. */ up?: number[]; /** - * The entire sound insulation area is regarded as a cube; this represents the length of the forward side in the unit length of the game engine. + * Treating the entire sound isolation zone as a cube, this represents the length of the forward edge, in game engine units. */ forwardLength?: number; /** - * The entire sound insulation area is regarded as a cube; this represents the length of the right side in the unit length of the game engine. + * Treating the entire sound isolation zone as a cube, this represents the length of the right edge, in game engine units. */ rightLength?: number; /** - * The entire sound insulation area is regarded as a cube; this represents the length of the up side in the unit length of the game engine. + * Treating the entire sound isolation zone as a cube, this represents the length of the upward edge, in game engine units. */ upLength?: number; /** - * The sound attenuation coefficient when users within the sound insulation area communicate with external users. The value range is [0,1]. The values are as follows: - * 0: Broadcast mode, where the volume and timbre are not attenuated with distance, and the volume and timbre heard by local users do not change regardless of distance. - * (0,0.5): Weak attenuation mode, that is, the volume and timbre are only weakly attenuated during the propagation process, and the sound can travel farther than the real environment. - * 0.5: (Default) simulates the attenuation of the volume in the real environment; the effect is equivalent to not setting the audioAttenuation parameter. - * (0.5,1]: Strong attenuation mode (default value is 1), that is, the volume and timbre attenuate rapidly during propagation. + * Sound attenuation coefficient when users inside and outside the sound isolation zone communicate. Value range: [0,1]. Where: + * 0: Broadcast mode, volume and timbre do not attenuate with distance; the local user hears no change regardless of distance. + * (0,0.5): Weak attenuation mode; volume and timbre attenuate slightly, allowing sound to travel farther than in real environments. + * 0.5: Simulates real-world volume attenuation, equivalent to not setting audioAttenuation. + * (0.5,1]: Strong attenuation mode (default is 1); volume and timbre attenuate rapidly. */ audioAttenuation?: number; } /** - * This class calculates user positions through the SDK to implement the spatial audio effect. + * This class implements spatial audio by calculating user coordinates through the SDK. * - * This class inherits from IBaseSpatialAudioEngine. Before calling other APIs in this class, you need to call the initialize method to initialize this class. + * This class inherits from IBaseSpatialAudioEngine. Before calling other APIs under this class, you need to call the initialize method to initialize it. */ export abstract class ILocalSpatialAudioEngine { /** @@ -75,26 +75,26 @@ export abstract class ILocalSpatialAudioEngine { /** * Initializes ILocalSpatialAudioEngine. * - * Before calling other methods of the ILocalSpatialAudioEngine class, you need to call this method to initialize ILocalSpatialAudioEngine. - * The SDK supports creating only one ILocalSpatialAudioEngine instance for an app. + * You need to call this method to initialize ILocalSpatialAudioEngine before calling other methods of the ILocalSpatialAudioEngine class. + * The SDK only supports creating one instance of ILocalSpatialAudioEngine per app. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract initialize(): number; /** - * Updates the spatial position of the specified remote user. + * Updates the spatial position information of a remote user. * - * After successfully calling this method, the SDK calculates the spatial audio parameters based on the relative position of the local and remote user. Call this method after the or joinChannel method. + * After successfully calling this method, the SDK calculates spatial audio parameters based on the relative positions of the local and remote users. This method must be called after joinChannel. * - * @param uid The user ID. This parameter must be the same as the user ID passed in when the user joined the channel. - * @param posInfo The spatial position of the remote user. See RemoteVoicePositionInfo. + * @param uid User ID. Must be the same as the user ID used when the user joined the channel. + * @param posInfo Spatial position information of the remote user. See RemoteVoicePositionInfo. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract updateRemotePosition( uid: number, @@ -111,15 +111,16 @@ export abstract class ILocalSpatialAudioEngine { ): number; /** - * Removes the spatial position of the specified remote user. + * Deletes the spatial position information of the specified remote user. * - * After successfully calling this method, the local user no longer hears the specified remote user. After leaving the channel, to avoid wasting computing resources, call this method to delete the spatial position information of the specified remote user. Otherwise, the user's spatial position information will be saved continuously. When the number of remote users exceeds the number of audio streams that can be received as set in setMaxAudioRecvCount, the system automatically unsubscribes from the audio stream of the user who is furthest away based on relative distance. + * After this method is successfully called, the local user will no longer hear the specified remote user. + * To avoid wasting computational resources after leaving the channel, you need to call this method to remove the spatial position information of the specified remote user. Otherwise, the spatial position information of that user will be retained. When the number of remote users exceeds the maximum number of audio streams that can be received as set in setMaxAudioRecvCount, the SDK will automatically unsubscribe from the audio streams of the farthest users based on relative distance. * - * @param uid The user ID. This parameter must be the same as the user ID passed in when the user joined the channel. + * @param uid User ID. Must match the user ID used when the user joined the channel. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract removeRemotePosition(uid: number): number; @@ -201,21 +202,17 @@ export abstract class ILocalSpatialAudioEngine { abstract muteRemoteAudioStream(uid: number, mute: boolean): number; /** - * Sets the sound attenuation effect for the specified user. + * Sets the audio attenuation effect for the specified user. * - * @param uid The user ID. This parameter must be the same as the user ID passed in when the user joined the channel. - * @param attenuation For the user's sound attenuation coefficient, the value range is [0,1]. The values are as follows: - * 0: Broadcast mode, where the volume and timbre are not attenuated with distance, and the volume and timbre heard by local users do not change regardless of distance. - * (0,0.5): Weak attenuation mode, that is, the volume and timbre are only weakly attenuated during the propagation process, and the sound can travel farther than the real environment. - * 0.5: (Default) simulates the attenuation of the volume in the real environment; the effect is equivalent to not setting the speaker_attenuation parameter. - * (0.5,1]: Strong attenuation mode, that is, the volume and timbre attenuate rapidly during the propagation process. - * @param forceSet Whether to force the user's sound attenuation effect: true : Force attenuation to set the sound attenuation of the user. At this time, the attenuation coefficient of the sound insulation area set in the audioAttenuation of the SpatialAudioZone does not take effect for the user. - * If the sound source and listener are inside and outside the sound isolation area, the sound attenuation effect is determined by the audioAttenuation in SpatialAudioZone. - * If the sound source and the listener are in the same sound insulation area or outside the same sound insulation area, the sound attenuation effect is determined by attenuation in this method. false : Do not force attenuation to set the user's sound attenuation effect, as shown in the following two cases. + * @param uid User ID. Must be the same as the user ID used when joining the channel. + * @param attenuation The audio attenuation coefficient for the user. The range is [0,1]. + * @param forceSet Whether to force the audio attenuation effect for the user: true : Forces the use of attenuation to set the user's audio attenuation effect. In this case, the audioAttenuation set in SpatialAudioZone does not take effect for this user. false : Does not force the use of attenuation to set the user's audio attenuation effect. There are two cases: + * If the audio source and listener are inside and outside the sound insulation zone respectively, the attenuation effect is determined by audioAttenuation in SpatialAudioZone. + * If the audio source and listener are both inside the same sound insulation zone or both outside, the attenuation effect is determined by attenuation in this method. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract setRemoteAudioAttenuation( uid: number, @@ -238,13 +235,14 @@ export abstract class ILocalSpatialAudioEngine { ): number; /** - * Removes the spatial positions of all remote users. + * Deletes the spatial position information of all remote users. * - * After successfully calling this method, the local user no longer hears any remote users. After leaving the channel, to avoid wasting resources, you can also call this method to delete the spatial positions of all remote users. + * After successfully calling this method, the local user will no longer hear any remote users. + * After leaving the channel, you can also call this method to delete all remote users' spatial position information to avoid wasting computing resources. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract clearRemotePositions(): number; } diff --git a/src/IAudioDeviceManager.ts b/src/IAudioDeviceManager.ts index 9f57d49f..cc5d7e5e 100644 --- a/src/IAudioDeviceManager.ts +++ b/src/IAudioDeviceManager.ts @@ -2,11 +2,11 @@ import './extension/IAudioDeviceManagerExtension'; import { AudioDeviceInfo } from './IAgoraRtcEngine'; /** - * The maximum length of the device ID. + * Maximum length of device ID. */ export enum MaxDeviceIdLengthType { /** - * The maximum length of the device ID is 512 bytes. + * The maximum length of the device ID is 512 characters. */ MaxDeviceIdLength = 512, } @@ -108,26 +108,28 @@ export abstract class IAudioDeviceManager { /** * Starts the audio playback device test. * - * This method tests whether the audio device for local playback works properly. Once a user starts the test, the SDK plays an audio file specified by the user. If the user can hear the audio, the playback device works properly. After calling this method, the SDK triggers the onAudioVolumeIndication callback every 100 ms, reporting uid = 1 and the volume information of the playback device. The difference between this method and the startEchoTest method is that the former checks if the local audio playback device is working properly, while the latter can check the audio and video devices and network conditions. Call this method before joining a channel. After the test is completed, call stopPlaybackDeviceTest to stop the test before joining a channel. + * This method tests whether the local audio playback device is working properly. After the test starts, the SDK plays the specified audio file. If the tester hears the sound, it indicates the playback device is functioning correctly. + * After calling this method, the SDK triggers the onAudioVolumeIndication callback every 100 ms, reporting uid = 1 and the volume information of the playback device. + * The difference between this method and startEchoTest is that this method checks whether the local audio playback device works properly, while the latter checks whether the audio/video devices and network are functioning properly. You must call this method before joining a channel. After the test is complete, if you need to join a channel, make sure to call stopPlaybackDeviceTest to stop the device test. * - * @param testAudioFilePath The path of the audio file. The data format is string in UTF-8. - * Supported file formats: wav, mp3, m4a, and aac. - * Supported file sample rates: 8000, 16000, 32000, 44100, and 48000 Hz. + * @param testAudioFilePath The absolute path of the audio file. The path string must be in UTF-8 encoding. + * Supported file formats: wav, mp3, m4a, aac. + * Supported sampling rates: 8000, 16000, 32000, 44100, 48000. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract startPlaybackDeviceTest(testAudioFilePath: string): number; /** * Stops the audio playback device test. * - * This method stops the audio playback device test. You must call this method to stop the test after calling the startPlaybackDeviceTest method. Call this method before joining a channel. + * This method stops the audio playback device test. After calling startPlaybackDeviceTest, you must call this method to stop the test. You must call this method before joining a channel. * * @returns * 0: Success. - * < 0: Failure. + * < 0: Failure. See [Error Codes](https://docs.agora.io/en/video-calling/troubleshooting/error-codes) for details and resolution suggestions. */ abstract stopPlaybackDeviceTest(): number; diff --git a/src/index.ts b/src/index.ts index 1f05a9da..be7241af 100644 --- a/src/index.ts +++ b/src/index.ts @@ -33,24 +33,14 @@ AgoraEventEmitter.addListener('AgoraRtcNg:onEvent', handleEvent); const instance = new RtcEngineExInternal(); /** - * Creates one IRtcEngine object. - * - * Currently, the Agora RTC SDK v4.x supports creating only one IRtcEngine object for each app. - * - * @returns - * IRtcEngine object. + * @ignore */ export function createAgoraRtcEngine(): IRtcEngine { return instance; } /** - * Gets one IMediaPlayerCacheManager instance. - * - * Before calling any APIs in the IMediaPlayerCacheManager class, you need to call this method to get a cache manager instance of a media player. - * - * @returns - * The IMediaPlayerCacheManager instance. + * @ignore */ export function getMediaPlayerCacheManager(): IMediaPlayerCacheManager { return new IMediaPlayerCacheManagerImpl();