Skip to content

Commit

Permalink
[AUTO] Generate codes by terra (#1215)
Browse files Browse the repository at this point in the history
Co-authored-by: guoxianzhe <[email protected]>
  • Loading branch information
sda-rob and guoxianzhe authored Aug 15, 2024
1 parent 3cc47de commit e2888d8
Show file tree
Hide file tree
Showing 13 changed files with 260 additions and 214 deletions.
2 changes: 1 addition & 1 deletion ts/AgoraSdk.ts
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ export function createAgoraRtcEngine(options?: AgoraEnvOptions): IRtcEngineEx {
/**
* Gets one IMediaPlayerCacheManager instance.
*
* When you successfully call this method, the SDK returns a media player cache manager instance. The cache manager is a singleton pattern. Therefore, multiple calls to this method returns the same instance. Make sure the IRtcEngine is initialized before you call this method.
* Before calling any APIs in the IMediaPlayerCacheManager class, you need to call this method to get a cache manager instance of a media player.
*
* @returns
* The IMediaPlayerCacheManager instance.
Expand Down
17 changes: 9 additions & 8 deletions ts/Private/AgoraBase.ts
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ export enum ErrorCodeType {
ErrNetDown = 14,
/**
* 17: The request to join the channel is rejected. Possible reasons include the following:
* The user is already in the channel. Agora recommends that you use the onConnectionStateChanged callback to determine whether the user exists in the channel. Do not call this method to join the channel unless you receive the ConnectionStateDisconnected (1) state.
* The user is already in the channel. Agora recommends that you use the onConnectionStateChanged callback to see whether the user is in the channel. Do not call this method to join the channel unless you receive the ConnectionStateDisconnected (1) state.
* After calling startEchoTest for the call test, the user tries to join the channel without calling stopEchoTest to end the current test. To join a channel, the call test must be ended by calling stopEchoTest.
*/
ErrJoinChannelRejected = 17,
Expand Down Expand Up @@ -1508,7 +1508,7 @@ export enum AudioScenarioType {
*/
AudioScenarioGameStreaming = 3,
/**
* 5: Chatroom scenario, where users need to frequently switch the user role or mute and unmute the microphone. For example, education scenarios. In this scenario, audience members receive a pop-up window to request permission of using microphones.
* 5: Chatroom scenario, where users need to frequently switch the user role or mute and unmute the microphone. For example, education scenarios.
*/
AudioScenarioChatroom = 5,
/**
Expand Down Expand Up @@ -1583,7 +1583,7 @@ export enum VideoApplicationScenarioType {
*/
ApplicationScenarioGeneral = 0,
/**
* If set to ApplicationScenarioMeeting (1), the SDK automatically enables the following strategies:
* ApplicationScenarioMeeting (1) is suitable for meeting scenarios. If set to ApplicationScenarioMeeting (1), the SDK automatically enables the following strategies:
* In meeting scenarios where low-quality video streams are required to have a high bitrate, the SDK automatically enables multiple technologies used to deal with network congestions, to enhance the performance of the low-quality streams and to ensure the smooth reception by subscribers.
* The SDK monitors the number of subscribers to the high-quality video stream in real time and dynamically adjusts its configuration based on the number of subscribers.
* If nobody subscribers to the high-quality stream, the SDK automatically reduces its bitrate and frame rate to save upstream bandwidth.
Expand Down Expand Up @@ -1854,7 +1854,7 @@ export enum LocalVideoStreamError {
/**
* @ignore
*/
LocalVideoStreamReasonScreenCaptureDisplayDiscnnected = 30,
LocalVideoStreamReasonScreenCaptureDisplayDisconnected = 30,
}

/**
Expand Down Expand Up @@ -2634,19 +2634,19 @@ export enum ConnectionChangedReasonType {
*/
export enum ClientRoleChangeFailedReason {
/**
* 1: The number of hosts in the channel is already at the upper limit. This enumerator is reported only when the support for 128 users is enabled. The maximum number of hosts is based on the actual number of hosts configured when you enable the 128-user feature.
* 1: The number of hosts in the channel exceeds the limit. This enumerator is reported only when the support for 128 users is enabled. The maximum number of hosts is based on the actual number of hosts configured when you enable the 128-user feature.
*/
ClientRoleChangeFailedTooManyBroadcasters = 1,
/**
* 2: The request is rejected by the Agora server. Agora recommends you prompt the user to try to switch their user role again.
*/
ClientRoleChangeFailedNotAuthorized = 2,
/**
* 3: The request is timed out. Agora recommends you prompt the user to check the network connection and try to switch their user role again.
* 3: The request is timed out. Agora recommends you prompt the user to check the network connection and try to switch their user role again. Deprecated: This enumerator is deprecated since v4.4.0 and is not recommended for use.
*/
ClientRoleChangeFailedRequestTimeOut = 3,
/**
* 4: The SDK connection fails. You can use reason reported in the onConnectionStateChanged callback to troubleshoot the failure.
* 4: The SDK is disconnected from the Agora edge server. You can troubleshoot the failure through the reason reported by onConnectionStateChanged. Deprecated: This enumerator is deprecated since v4.4.0 and is not recommended for use.
*/
ClientRoleChangeFailedConnectionFailed = 4,
}
Expand Down Expand Up @@ -2766,6 +2766,7 @@ export class VideoCanvas {
mediaPlayerId?: number;
cropArea?: Rectangle;
enableAlphaMask?: boolean;
rotation?: VideoOrientation;
}

/**
Expand Down Expand Up @@ -2886,7 +2887,7 @@ export class ColorEnhanceOptions {
*/
export enum BackgroundSourceType {
/**
* 0: Process the background as alpha information without replacement, only separating the portrait and the background. After setting this value, you can call startLocalVideoTranscoder to implement the picture-in-picture effect.
* 0: Process the background as alpha data without replacement, only separating the portrait and the background. After setting this value, you can call startLocalVideoTranscoder to implement the picture-in-picture effect.
*/
BackgroundNone = 0,
/**
Expand Down
28 changes: 5 additions & 23 deletions ts/Private/AgoraMediaBase.ts
Original file line number Diff line number Diff line change
Expand Up @@ -306,15 +306,15 @@ export enum ContentInspectType {
}

/**
* A ContentInspectModule structure used to configure the frequency of video screenshot and upload.
* ContentInspectModule A structure used to configure the frequency of video screenshot and upload.
*/
export class ContentInspectModule {
type?: ContentInspectType;
interval?: number;
}

/**
* Configuration of video screenshot and upload.
* Screenshot and upload configuration.
*/
export class ContentInspectConfig {
extraInfo?: string;
Expand Down Expand Up @@ -866,6 +866,7 @@ export interface IVideoFrameObserver {
* Occurs each time the SDK receives a video frame before encoding.
*
* After you successfully register the video frame observer, the SDK triggers this callback each time it receives a video frame. In this callback, you can get the video data before encoding and then process the data according to your particular scenarios. After processing, you can send the processed video data back to the SDK in this callback.
* It is recommended that you ensure the modified parameters in videoFrame are consistent with the actual situation of the video frames in the video frame buffer. Otherwise, it may cause unexpected rotation, distortion, and other issues in the local preview and remote video display.
* It's recommended that you implement this callback through the C++ API.
* Due to framework limitations, this callback does not support sending processed video data back to the SDK.
* The video data that this callback gets has been preprocessed, with its content cropped and rotated, and the image enhanced.
Expand All @@ -889,6 +890,7 @@ export interface IVideoFrameObserver {
* Occurs each time the SDK receives a video frame sent by the remote user.
*
* After you successfully register the video frame observer, the SDK triggers this callback each time it receives a video frame. In this callback, you can get the video data sent from the remote end before rendering, and then process it according to the particular scenarios.
* It is recommended that you ensure the modified parameters in videoFrame are consistent with the actual situation of the video frames in the video frame buffer. Otherwise, it may cause unexpected rotation, distortion, and other issues in the local preview and remote video display.
* If the video data type you get is RGBA, the SDK does not support processing the data of the alpha channel.
* It's recommended that you implement this callback through the C++ API.
* Due to framework limitations, this callback does not support sending processed video data back to the SDK.
Expand Down Expand Up @@ -1024,27 +1026,7 @@ export interface IFaceInfoObserver {
* pitch: Head pitch angle. A positve value means looking down, while a negative value means looking up.
* yaw: Head yaw angle. A positve value means turning left, while a negative value means turning right.
* roll: Head roll angle. A positve value means tilting to the right, while a negative value means tilting to the left.
* timestamp: String. The timestamp of the output result, in milliseconds. Here is an example of JSON:
* {
* "faces":[{
* "blendshapes":{
* "eyeBlinkLeft":0.9, "eyeLookDownLeft":0.0, "eyeLookInLeft":0.0, "eyeLookOutLeft":0.0, "eyeLookUpLeft":0.0,
* "eyeSquintLeft":0.0, "eyeWideLeft":0.0, "eyeBlinkRight":0.0, "eyeLookDownRight":0.0, "eyeLookInRight":0.0,
* "eyeLookOutRight":0.0, "eyeLookUpRight":0.0, "eyeSquintRight":0.0, "eyeWideRight":0.0, "jawForward":0.0,
* "jawLeft":0.0, "jawRight":0.0, "jawOpen":0.0, "mouthClose":0.0, "mouthFunnel":0.0, "mouthPucker":0.0,
* "mouthLeft":0.0, "mouthRight":0.0, "mouthSmileLeft":0.0, "mouthSmileRight":0.0, "mouthFrownLeft":0.0,
* "mouthFrownRight":0.0, "mouthDimpleLeft":0.0, "mouthDimpleRight":0.0, "mouthStretchLeft":0.0, "mouthStretchRight":0.0,
* "mouthRollLower":0.0, "mouthRollUpper":0.0, "mouthShrugLower":0.0, "mouthShrugUpper":0.0, "mouthPressLeft":0.0,
* "mouthPressRight":0.0, "mouthLowerDownLeft":0.0, "mouthLowerDownRight":0.0, "mouthUpperUpLeft":0.0, "mouthUpperUpRight":0.0,
* "browDownLeft":0.0, "browDownRight":0.0, "browInnerUp":0.0, "browOuterUpLeft":0.0, "browOuterUpRight":0.0,
* "cheekPuff":0.0, "cheekSquintLeft":0.0, "cheekSquintRight":0.0, "noseSneerLeft":0.0, "noseSneerRight":0.0,
* "tongueOut":0.0
* },
* "rotation":{"pitch":30.0, "yaw":25.5, "roll":-15.5},
*
* }],
* "timestamp":"654879876546"
* }
* timestamp: String. The timestamp of the output result, in milliseconds. Here is an example of JSON: { "faces":[{ "blendshapes":{ "eyeBlinkLeft":0.9, "eyeLookDownLeft":0.0, "eyeLookInLeft":0.0, "eyeLookOutLeft":0.0, "eyeLookUpLeft":0.0, "eyeSquintLeft":0.0, "eyeWideLeft":0.0, "eyeBlinkRight":0.0, "eyeLookDownRight":0.0, "eyeLookInRight":0.0, "eyeLookOutRight":0.0, "eyeLookUpRight":0.0, "eyeSquintRight":0.0, "eyeWideRight":0.0, "jawForward":0.0, "jawLeft":0.0, "jawRight":0.0, "jawOpen":0.0, "mouthClose":0.0, "mouthFunnel":0.0, "mouthPucker":0.0, "mouthLeft":0.0, "mouthRight":0.0, "mouthSmileLeft":0.0, "mouthSmileRight":0.0, "mouthFrownLeft":0.0, "mouthFrownRight":0.0, "mouthDimpleLeft":0.0, "mouthDimpleRight":0.0, "mouthStretchLeft":0.0, "mouthStretchRight":0.0, "mouthRollLower":0.0, "mouthRollUpper":0.0, "mouthShrugLower":0.0, "mouthShrugUpper":0.0, "mouthPressLeft":0.0, "mouthPressRight":0.0, "mouthLowerDownLeft":0.0, "mouthLowerDownRight":0.0, "mouthUpperUpLeft":0.0, "mouthUpperUpRight":0.0, "browDownLeft":0.0, "browDownRight":0.0, "browInnerUp":0.0, "browOuterUpLeft":0.0, "browOuterUpRight":0.0, "cheekPuff":0.0, "cheekSquintLeft":0.0, "cheekSquintRight":0.0, "noseSneerLeft":0.0, "noseSneerRight":0.0, "tongueOut":0.0 }, "rotation":{"pitch":30.0, "yaw":25.5, "roll":-15.5}, }], "timestamp":"654879876546" }
*
* @returns
* true : Facial information JSON parsing successful. false : Facial information JSON parsing failed.
Expand Down
Loading

0 comments on commit e2888d8

Please sign in to comment.