Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,5 @@
"AssetsRepo": "Azure/azure-sdk-assets",
"AssetsRepoPrefixPath": "java",
"TagPrefix": "java/communication/azure-communication-callautomation",
"Tag": "java/communication/azure-communication-callautomation_9339e7fc0c"
"Tag": "java/communication/azure-communication-callautomation_6dc9a150d6"
}
Original file line number Diff line number Diff line change
Expand Up @@ -3,28 +3,29 @@

package com.azure.communication.callautomation;

import java.util.List;

import com.azure.communication.callautomation.models.CallMediaRecognizeOptions;
import com.azure.communication.callautomation.models.ContinuousDtmfRecognitionOptions;
import com.azure.communication.callautomation.models.DtmfTone;
import com.azure.communication.callautomation.models.HoldOptions;
import com.azure.communication.callautomation.models.UnholdOptions;
import com.azure.communication.callautomation.models.PlayOptions;
import com.azure.communication.callautomation.models.PlaySource;
import com.azure.communication.callautomation.models.PlayToAllOptions;
import com.azure.communication.callautomation.models.SendDtmfTonesOptions;
import com.azure.communication.callautomation.models.SendDtmfTonesResult;
import com.azure.communication.callautomation.models.StartMediaStreamingOptions;
import com.azure.communication.callautomation.models.StartTranscriptionOptions;
import com.azure.communication.callautomation.models.StopMediaStreamingOptions;
import com.azure.communication.callautomation.models.StopTranscriptionOptions;
import com.azure.communication.callautomation.models.PlaySource;
import com.azure.communication.callautomation.models.UnholdOptions;
import com.azure.communication.callautomation.models.UpdateTranscriptionOptions;
import com.azure.communication.common.CommunicationIdentifier;
import com.azure.core.annotation.ReturnType;
import com.azure.core.annotation.ServiceMethod;
import com.azure.core.exception.HttpResponseException;
import com.azure.core.http.rest.Response;
import com.azure.core.util.Context;
import com.azure.core.exception.HttpResponseException;

import java.util.List;

/**
* CallContent.
Expand Down Expand Up @@ -331,20 +332,13 @@ public void updateTranscription(String locale, String speechRecognitionModelEndp

/**
* Updates transcription language in the call.
*
* @param locale Defines new locale for transcription.
* @param speechRecognitionModelEndpointId Defines custom model endpoint.
*@param options Options for the Update Transcription operation.
* @param context Context
* @param operationContext operational context.
* @return Response for successful update transcription request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> updateTranscriptionWithResponse(String locale, String speechRecognitionModelEndpointId,
String operationContext, Context context) {
return callMediaAsync
.updateTranscriptionWithResponseInternal(locale, speechRecognitionModelEndpointId, operationContext,
context)
.block();
public Response<Void> updateTranscriptionWithResponse(UpdateTranscriptionOptions options, Context context) {
return callMediaAsync.updateTranscriptionWithResponseInternal(options, context).block();
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@
import com.azure.communication.callautomation.models.StopTranscriptionOptions;
import com.azure.communication.callautomation.models.TextSource;
import com.azure.communication.callautomation.models.UnholdOptions;
import com.azure.communication.callautomation.models.UpdateTranscriptionOptions;
import com.azure.communication.common.CommunicationIdentifier;
import com.azure.core.annotation.ReturnType;
import com.azure.core.annotation.ServiceMethod;
Expand Down Expand Up @@ -887,7 +888,7 @@ Mono<Response<Void>> stopTranscriptionWithResponseInternal(StopTranscriptionOpti
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> updateTranscription(String locale) {
return updateTranscriptionWithResponse(locale, null, null).then();
return updateTranscriptionWithResponse(new UpdateTranscriptionOptions().setLocale(locale)).then();
}

/**
Expand All @@ -899,42 +900,27 @@ public Mono<Void> updateTranscription(String locale) {
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> updateTranscription(String locale, String speechRecognitionModelEndpointId) {
return updateTranscriptionWithResponse(locale, speechRecognitionModelEndpointId, null).then();
return updateTranscriptionWithResponse(new UpdateTranscriptionOptions().setLocale(locale)
.setSpeechRecognitionModelEndpointId(speechRecognitionModelEndpointId)).then();
}

/**
* Updates transcription language
* @param speechRecognitionModelEndpointId Defines custom model endpoint.
* @param locale Defines new locale for transcription.
* @param operationContext operational context.
* @param options Options for the Update Transcription operation.
* @return Response for successful operation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> updateTranscriptionWithResponse(String locale, String speechRecognitionModelEndpointId,
String operationContext) {
return withContext(context -> updateTranscriptionWithResponseInternal(locale, speechRecognitionModelEndpointId,
operationContext, context));
public Mono<Response<Void>> updateTranscriptionWithResponse(UpdateTranscriptionOptions options) {
return withContext(context -> updateTranscriptionWithResponseInternal(options, context));
}

Mono<Response<Void>> updateTranscriptionWithResponseInternal(String locale, Context context) {
Mono<Response<Void>> updateTranscriptionWithResponseInternal(UpdateTranscriptionOptions options, Context context) {
try {
context = context == null ? Context.NONE : context;
UpdateTranscriptionRequestInternal request = new UpdateTranscriptionRequestInternal();
request.setLocale(locale);
return contentsInternal.updateTranscriptionWithResponseAsync(callConnectionId, request, context);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}

Mono<Response<Void>> updateTranscriptionWithResponseInternal(String locale, String speechRecognitionModelEndpointId,
String operationContext, Context context) {
try {
context = context == null ? Context.NONE : context;
UpdateTranscriptionRequestInternal request = new UpdateTranscriptionRequestInternal();
request.setLocale(locale);
request.setSpeechModelEndpointId(speechRecognitionModelEndpointId);
request.setOperationContext(operationContext);
request.setLocale(options.getLocale());
request.setSpeechModelEndpointId(options.getSpeechRecognitionModelEndpointId());
request.setOperationContext(options.getOperationContext());
return contentsInternal.updateTranscriptionWithResponseAsync(callConnectionId, request, context);
} catch (RuntimeException ex) {
return monoError(logger, ex);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

import com.azure.communication.callautomation.implementation.converters.AudioDataConverter;
import com.azure.communication.callautomation.models.AudioData;
import com.azure.core.util.BinaryData;

/**
* Helper class to access private values of {@link AudioData} across package boundaries.
Expand Down Expand Up @@ -35,7 +36,7 @@ public interface AudioDataContructorProxyAccessor {
* @param data The internal response.
* @return A new instance of {@link AudioData}.
*/
AudioData create(byte[] data);
AudioData create(BinaryData data);
}

/**
Expand Down Expand Up @@ -72,7 +73,7 @@ public static AudioData create(AudioDataConverter internalResponse) {
* @param data The audio data.
* @return A new instance of {@link AudioData}.
*/
public static AudioData create(byte[] data) {
public static AudioData create(BinaryData data) {
// This looks odd but is necessary, it is possible to engage the access helper before anywhere else in the
// application accesses AudioData which triggers the accessor to be configured. So, if the accessor
// is null this effectively pokes the class to set up the accessor.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,11 @@

package com.azure.communication.callautomation.implementation.converters;

import java.io.IOException;

import com.azure.json.JsonReader;
import com.azure.json.JsonToken;

import java.io.IOException;

/** The Audio
* MetadataInternal model. */
public final class AudioMetadataConverter {
Expand All @@ -32,11 +32,6 @@ public final class AudioMetadataConverter {
*/
private int channels;

/*
* The length.
*/
private int length;

/**
* Get the mediaSubscriptionId property.
*
Expand Down Expand Up @@ -73,15 +68,6 @@ public int getChannels() {
return channels;
}

/**
* Get the length property.
*
* @return the length value.
*/
public int getLength() {
return length;
}

/**
* Reads an instance of AudioMetadataConverter from the JsonReader.
*<p>
Expand All @@ -107,8 +93,6 @@ public static AudioMetadataConverter fromJson(JsonReader jsonReader) throws IOEx
converter.sampleRate = reader.getInt();
} else if ("channels".equals(fieldName)) {
converter.channels = reader.getInt();
} else if ("length".equals(fieldName)) {
converter.length = reader.getInt();
} else {
reader.skipChildren();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,6 @@

package com.azure.communication.callautomation.models;

import java.util.HashMap;

import com.azure.core.annotation.Fluent;

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,25 +3,33 @@

package com.azure.communication.callautomation.models;

import com.azure.core.util.ExpandableStringEnum;
import java.util.Collection;

import com.azure.core.util.ExpandableStringEnum;

/**
* Specifies the text format of transcription.
*/
public final class Channels extends ExpandableStringEnum<Channels> {
public final class AudioChannelType extends ExpandableStringEnum<AudioChannelType> {
/**
* Display.
* Audio channel type.
*/
public static final AudioChannelType MONO = fromString("mono");

/**
* Display.
* Unknown Audio channel type.
*/
public static final Channels MONO = fromString("mono");
public static final AudioChannelType UNKNOWN = fromString("unknown");

/**
* Creates a new instance of Channels value.
*
* @deprecated Use the {@link #fromString(String)} factory method.
*/
@Deprecated
public Channels() {
public AudioChannelType() {
}

/**
Expand All @@ -30,16 +38,16 @@ public Channels() {
* @param name a name to look for.
* @return the corresponding Channels.
*/
public static Channels fromString(String name) {
return fromString(name, Channels.class);
public static AudioChannelType fromString(String name) {
return fromString(name, AudioChannelType.class);
}

/**
* Gets known Channels values.
*
* @return known Channels values.
*/
public static Collection<Channels> values() {
return values(Channels.class);
public static Collection<AudioChannelType> values() {
return values(AudioChannelType.class);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -3,20 +3,20 @@

package com.azure.communication.callautomation.models;

import java.time.OffsetDateTime;
import java.time.format.DateTimeFormatter;

import com.azure.communication.callautomation.implementation.accesshelpers.AudioDataContructorProxy;
import com.azure.communication.callautomation.implementation.converters.AudioDataConverter;
import com.azure.communication.common.CommunicationIdentifier;

import java.time.OffsetDateTime;
import java.time.format.DateTimeFormatter;
import java.util.Base64;
import com.azure.core.util.BinaryData;

/** The MediaStreamingAudio model. */
public final class AudioData extends StreamingData {
/*
* The audio data, encoded as a base64 byte.
* The audio data, encoded as a binary data.
*/
private final byte[] data;
private final BinaryData data;

/*
* The timestamp indicating when the media content was received by the bot, or if the bot is sending media,
Expand All @@ -42,7 +42,7 @@ public AudioData create(AudioDataConverter internalData) {
}

@Override
public AudioData create(byte[] data) {
public AudioData create(BinaryData data) {
return new AudioData(data);
}
});
Expand All @@ -54,7 +54,7 @@ public AudioData create(byte[] data) {
* @param internalData The audiodataconvertor
*/
AudioData(AudioDataConverter internalData) {
this.data = Base64.getDecoder().decode(internalData.getData());
this.data = BinaryData.fromString(internalData.getData());
this.timestamp = OffsetDateTime.parse(internalData.getTimestamp(), DateTimeFormatter.ISO_OFFSET_DATE_TIME);
if (internalData.getParticipantRawID() != null && !internalData.getParticipantRawID().isEmpty()) {
this.participant = CommunicationIdentifier.fromRawId(internalData.getParticipantRawID());
Expand All @@ -79,20 +79,20 @@ public AudioData() {
*
* @param data The audio data.
*/
AudioData(byte[] data) {
AudioData(BinaryData data) {
this.data = data;
this.timestamp = null;
this.participant = null;
this.silent = false;
}

/**
* The audio data, encoded as a base64 byte.
* The audio data, encoded as a binary data.
* Get the data property.
*
* @return the data value.
*/
public byte[] getData() {
public BinaryData getData() {
return data;
}

Expand Down
Loading