From 509aecd4ef57f4f83cb3bde5381fafd9d52a564e Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Fri, 20 Dec 2024 00:24:29 -0800 Subject: [PATCH] feat(all): auto-regenerate discovery clients (#2933) --- apigee/v1/apigee-api.json | 33 +- apigee/v1/apigee-gen.go | 57 +++ .../v1/contactcenterinsights-api.json | 145 ++++++- .../v1/contactcenterinsights-gen.go | 243 +++++++++++ css/v1/css-api.json | 36 +- css/v1/css-gen.go | 121 ++++++ dialogflow/v2/dialogflow-api.json | 235 ++++++++++- dialogflow/v2/dialogflow-gen.go | 374 +++++++++++++++++ dialogflow/v2beta1/dialogflow-api.json | 185 +++++++- dialogflow/v2beta1/dialogflow-gen.go | 347 ++++++++++++++- dialogflow/v3/dialogflow-api.json | 231 +++++++++- dialogflow/v3/dialogflow-gen.go | 368 ++++++++++++++++ dialogflow/v3beta1/dialogflow-api.json | 254 ++++++++++- dialogflow/v3beta1/dialogflow-gen.go | 396 ++++++++++++++++++ firebase/v1beta1/firebase-api.json | 6 +- firebase/v1beta1/firebase-gen.go | 6 +- firebaseml/v2beta/firebaseml-api.json | 5 +- firebaseml/v2beta/firebaseml-gen.go | 2 +- jobs/v4/jobs-api.json | 42 +- jobs/v4/jobs-gen.go | 118 +++--- .../products_v1beta/merchantapi-api.json | 39 +- .../products_v1beta/merchantapi-gen.go | 54 +++ .../v1alpha1/migrationcenter-api.json | 36 +- .../v1alpha1/migrationcenter-gen.go | 48 +++ texttospeech/v1/texttospeech-api.json | 8 +- texttospeech/v1/texttospeech-gen.go | 3 + texttospeech/v1beta1/texttospeech-api.json | 8 +- texttospeech/v1beta1/texttospeech-gen.go | 3 + 28 files changed, 3308 insertions(+), 95 deletions(-) diff --git a/apigee/v1/apigee-api.json b/apigee/v1/apigee-api.json index f77cd421b89..0311b5a45c5 100644 --- a/apigee/v1/apigee-api.json +++ b/apigee/v1/apigee-api.json @@ -10385,7 +10385,7 @@ } } }, - "revision": "20241210", + "revision": "20241213", "rootUrl": "https://apigee.googleapis.com/", "schemas": { "EdgeConfigstoreBundleBadBundle": { @@ -13390,6 +13390,10 @@ ], "type": "string" }, + "clientIpResolutionConfig": { + "$ref": "GoogleCloudApigeeV1EnvironmentClientIPResolutionConfig", + "description": "Optional. The algorithm to resolve IP. This will affect Analytics, API Security, and other features that use the client ip. To remove a client ip resolution config, update the field to an empty value. Example: '{ \"clientIpResolutionConfig\" = {} }' For more information, see: https://cloud.google.com/apigee/docs/api-platform/system-administration/client-ip-resolution." + }, "createdAt": { "description": "Output only. Creation time of this environment as milliseconds since epoch.", "format": "int64", @@ -13481,6 +13485,33 @@ }, "type": "object" }, + "GoogleCloudApigeeV1EnvironmentClientIPResolutionConfig": { + "description": "Configuration for resolving the client ip.", + "id": "GoogleCloudApigeeV1EnvironmentClientIPResolutionConfig", + "properties": { + "headerIndexAlgorithm": { + "$ref": "GoogleCloudApigeeV1EnvironmentClientIPResolutionConfigHeaderIndexAlgorithm", + "description": "Resolves the client ip based on a custom header." + } + }, + "type": "object" + }, + "GoogleCloudApigeeV1EnvironmentClientIPResolutionConfigHeaderIndexAlgorithm": { + "description": "Resolves the client ip based on a custom header.", + "id": "GoogleCloudApigeeV1EnvironmentClientIPResolutionConfigHeaderIndexAlgorithm", + "properties": { + "ipHeaderIndex": { + "description": "Required. The index of the ip in the header. Positive indices 0, 1, 2, 3 chooses indices from the left (first ips) Negative indices -1, -2, -3 chooses indices from the right (last ips)", + "format": "int32", + "type": "integer" + }, + "ipHeaderName": { + "description": "Required. The name of the header to extract the client ip from. We are currently only supporting the X-Forwarded-For header.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudApigeeV1EnvironmentConfig": { "id": "GoogleCloudApigeeV1EnvironmentConfig", "properties": { diff --git a/apigee/v1/apigee-gen.go b/apigee/v1/apigee-gen.go index db16f15a668..1dcd1210e6a 100644 --- a/apigee/v1/apigee-gen.go +++ b/apigee/v1/apigee-gen.go @@ -4617,6 +4617,12 @@ type GoogleCloudApigeeV1Environment struct { // is handled by Apigee. This type only works with the ARCHIVE deployment type // and cannot be combined with the PROXY deployment type. ApiProxyType string `json:"apiProxyType,omitempty"` + // ClientIpResolutionConfig: Optional. The algorithm to resolve IP. This will + // affect Analytics, API Security, and other features that use the client ip. + // To remove a client ip resolution config, update the field to an empty value. + // Example: '{ "clientIpResolutionConfig" = {} }' For more information, see: + // https://cloud.google.com/apigee/docs/api-platform/system-administration/client-ip-resolution. + ClientIpResolutionConfig *GoogleCloudApigeeV1EnvironmentClientIPResolutionConfig `json:"clientIpResolutionConfig,omitempty"` // CreatedAt: Output only. Creation time of this environment as milliseconds // since epoch. CreatedAt int64 `json:"createdAt,omitempty,string"` @@ -4711,6 +4717,57 @@ func (s GoogleCloudApigeeV1Environment) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } +// GoogleCloudApigeeV1EnvironmentClientIPResolutionConfig: Configuration for +// resolving the client ip. +type GoogleCloudApigeeV1EnvironmentClientIPResolutionConfig struct { + // HeaderIndexAlgorithm: Resolves the client ip based on a custom header. + HeaderIndexAlgorithm *GoogleCloudApigeeV1EnvironmentClientIPResolutionConfigHeaderIndexAlgorithm `json:"headerIndexAlgorithm,omitempty"` + // ForceSendFields is a list of field names (e.g. "HeaderIndexAlgorithm") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "HeaderIndexAlgorithm") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudApigeeV1EnvironmentClientIPResolutionConfig) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudApigeeV1EnvironmentClientIPResolutionConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// GoogleCloudApigeeV1EnvironmentClientIPResolutionConfigHeaderIndexAlgorithm: +// Resolves the client ip based on a custom header. +type GoogleCloudApigeeV1EnvironmentClientIPResolutionConfigHeaderIndexAlgorithm struct { + // IpHeaderIndex: Required. The index of the ip in the header. Positive indices + // 0, 1, 2, 3 chooses indices from the left (first ips) Negative indices -1, + // -2, -3 chooses indices from the right (last ips) + IpHeaderIndex int64 `json:"ipHeaderIndex,omitempty"` + // IpHeaderName: Required. The name of the header to extract the client ip + // from. We are currently only supporting the X-Forwarded-For header. + IpHeaderName string `json:"ipHeaderName,omitempty"` + // ForceSendFields is a list of field names (e.g. "IpHeaderIndex") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "IpHeaderIndex") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudApigeeV1EnvironmentClientIPResolutionConfigHeaderIndexAlgorithm) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudApigeeV1EnvironmentClientIPResolutionConfigHeaderIndexAlgorithm + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + type GoogleCloudApigeeV1EnvironmentConfig struct { // AddonsConfig: The latest runtime configurations for add-ons. AddonsConfig *GoogleCloudApigeeV1RuntimeAddonsConfig `json:"addonsConfig,omitempty"` diff --git a/contactcenterinsights/v1/contactcenterinsights-api.json b/contactcenterinsights/v1/contactcenterinsights-api.json index 91b82b6a2a0..aca3ab2515a 100644 --- a/contactcenterinsights/v1/contactcenterinsights-api.json +++ b/contactcenterinsights/v1/contactcenterinsights-api.json @@ -1519,6 +1519,34 @@ "resources": { "issues": { "methods": { + "create": { + "description": "Creates an issue.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/issueModels/{issueModelsId}/issues", + "httpMethod": "POST", + "id": "contactcenterinsights.projects.locations.issueModels.issues.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The parent resource of the issue.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/issueModels/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/issues", + "request": { + "$ref": "GoogleCloudContactcenterinsightsV1Issue" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, "delete": { "description": "Deletes an issue.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/issueModels/{issueModelsId}/issues/{issuesId}", @@ -2581,7 +2609,7 @@ } } }, - "revision": "20241209", + "revision": "20241218", "rootUrl": "https://contactcenterinsights.googleapis.com/", "schemas": { "GoogleCloudContactcenterinsightsV1Analysis": { @@ -3915,6 +3943,29 @@ }, "type": "object" }, + "GoogleCloudContactcenterinsightsV1CreateIssueMetadata": { + "description": "Metadata for creating an issue.", + "id": "GoogleCloudContactcenterinsightsV1CreateIssueMetadata", + "properties": { + "createTime": { + "description": "Output only. The time the operation was created.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "endTime": { + "description": "Output only. The time the operation finished running.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "request": { + "$ref": "GoogleCloudContactcenterinsightsV1CreateIssueRequest", + "description": "The original request for creation." + } + }, + "type": "object" + }, "GoogleCloudContactcenterinsightsV1CreateIssueModelMetadata": { "description": "Metadata for creating an issue model.", "id": "GoogleCloudContactcenterinsightsV1CreateIssueModelMetadata", @@ -3953,6 +4004,21 @@ }, "type": "object" }, + "GoogleCloudContactcenterinsightsV1CreateIssueRequest": { + "description": "The request to create an issue.", + "id": "GoogleCloudContactcenterinsightsV1CreateIssueRequest", + "properties": { + "issue": { + "$ref": "GoogleCloudContactcenterinsightsV1Issue", + "description": "Required. The values for the new issue." + }, + "parent": { + "description": "Required. The parent resource of the issue.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudContactcenterinsightsV1DeleteIssueModelMetadata": { "description": "Metadata for deleting an issue model.", "id": "GoogleCloudContactcenterinsightsV1DeleteIssueModelMetadata", @@ -7568,6 +7634,29 @@ }, "type": "object" }, + "GoogleCloudContactcenterinsightsV1alpha1CreateIssueMetadata": { + "description": "Metadata for creating an issue.", + "id": "GoogleCloudContactcenterinsightsV1alpha1CreateIssueMetadata", + "properties": { + "createTime": { + "description": "Output only. The time the operation was created.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "endTime": { + "description": "Output only. The time the operation finished running.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "request": { + "$ref": "GoogleCloudContactcenterinsightsV1alpha1CreateIssueRequest", + "description": "The original request for creation." + } + }, + "type": "object" + }, "GoogleCloudContactcenterinsightsV1alpha1CreateIssueModelMetadata": { "description": "Metadata for creating an issue model.", "id": "GoogleCloudContactcenterinsightsV1alpha1CreateIssueModelMetadata", @@ -7606,6 +7695,21 @@ }, "type": "object" }, + "GoogleCloudContactcenterinsightsV1alpha1CreateIssueRequest": { + "description": "The request to create an issue.", + "id": "GoogleCloudContactcenterinsightsV1alpha1CreateIssueRequest", + "properties": { + "issue": { + "$ref": "GoogleCloudContactcenterinsightsV1alpha1Issue", + "description": "Required. The values for the new issue." + }, + "parent": { + "description": "Required. The parent resource of the issue.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudContactcenterinsightsV1alpha1DeleteIssueModelMetadata": { "description": "Metadata for deleting an issue model.", "id": "GoogleCloudContactcenterinsightsV1alpha1DeleteIssueModelMetadata", @@ -8523,6 +8627,45 @@ "properties": {}, "type": "object" }, + "GoogleCloudContactcenterinsightsV1alpha1Issue": { + "description": "The issue resource.", + "id": "GoogleCloudContactcenterinsightsV1alpha1Issue", + "properties": { + "createTime": { + "description": "Output only. The time at which this issue was created.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "displayDescription": { + "description": "Representative description of the issue.", + "type": "string" + }, + "displayName": { + "description": "The representative name for the issue.", + "type": "string" + }, + "name": { + "description": "Immutable. The resource name of the issue. Format: projects/{project}/locations/{location}/issueModels/{issue_model}/issues/{issue}", + "type": "string" + }, + "sampleUtterances": { + "description": "Output only. Resource names of the sample representative utterances that match to this issue.", + "items": { + "type": "string" + }, + "readOnly": true, + "type": "array" + }, + "updateTime": { + "description": "Output only. The most recent time that this issue was updated.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudContactcenterinsightsV1alpha1IssueAssignment": { "description": "Information about the issue.", "id": "GoogleCloudContactcenterinsightsV1alpha1IssueAssignment", diff --git a/contactcenterinsights/v1/contactcenterinsights-gen.go b/contactcenterinsights/v1/contactcenterinsights-gen.go index 266f25c879c..59cb7225f21 100644 --- a/contactcenterinsights/v1/contactcenterinsights-gen.go +++ b/contactcenterinsights/v1/contactcenterinsights-gen.go @@ -1945,6 +1945,33 @@ func (s GoogleCloudContactcenterinsightsV1CreateAnalysisOperationMetadata) Marsh return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } +// GoogleCloudContactcenterinsightsV1CreateIssueMetadata: Metadata for creating +// an issue. +type GoogleCloudContactcenterinsightsV1CreateIssueMetadata struct { + // CreateTime: Output only. The time the operation was created. + CreateTime string `json:"createTime,omitempty"` + // EndTime: Output only. The time the operation finished running. + EndTime string `json:"endTime,omitempty"` + // Request: The original request for creation. + Request *GoogleCloudContactcenterinsightsV1CreateIssueRequest `json:"request,omitempty"` + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CreateTime") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudContactcenterinsightsV1CreateIssueMetadata) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudContactcenterinsightsV1CreateIssueMetadata + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + // GoogleCloudContactcenterinsightsV1CreateIssueModelMetadata: Metadata for // creating an issue model. type GoogleCloudContactcenterinsightsV1CreateIssueModelMetadata struct { @@ -1997,6 +2024,31 @@ func (s GoogleCloudContactcenterinsightsV1CreateIssueModelRequest) MarshalJSON() return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } +// GoogleCloudContactcenterinsightsV1CreateIssueRequest: The request to create +// an issue. +type GoogleCloudContactcenterinsightsV1CreateIssueRequest struct { + // Issue: Required. The values for the new issue. + Issue *GoogleCloudContactcenterinsightsV1Issue `json:"issue,omitempty"` + // Parent: Required. The parent resource of the issue. + Parent string `json:"parent,omitempty"` + // ForceSendFields is a list of field names (e.g. "Issue") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Issue") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudContactcenterinsightsV1CreateIssueRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudContactcenterinsightsV1CreateIssueRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + // GoogleCloudContactcenterinsightsV1DeleteIssueModelMetadata: Metadata for // deleting an issue model. type GoogleCloudContactcenterinsightsV1DeleteIssueModelMetadata struct { @@ -6717,6 +6769,33 @@ func (s GoogleCloudContactcenterinsightsV1alpha1CreateAnalysisOperationMetadata) return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } +// GoogleCloudContactcenterinsightsV1alpha1CreateIssueMetadata: Metadata for +// creating an issue. +type GoogleCloudContactcenterinsightsV1alpha1CreateIssueMetadata struct { + // CreateTime: Output only. The time the operation was created. + CreateTime string `json:"createTime,omitempty"` + // EndTime: Output only. The time the operation finished running. + EndTime string `json:"endTime,omitempty"` + // Request: The original request for creation. + Request *GoogleCloudContactcenterinsightsV1alpha1CreateIssueRequest `json:"request,omitempty"` + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CreateTime") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudContactcenterinsightsV1alpha1CreateIssueMetadata) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudContactcenterinsightsV1alpha1CreateIssueMetadata + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + // GoogleCloudContactcenterinsightsV1alpha1CreateIssueModelMetadata: Metadata // for creating an issue model. type GoogleCloudContactcenterinsightsV1alpha1CreateIssueModelMetadata struct { @@ -6769,6 +6848,31 @@ func (s GoogleCloudContactcenterinsightsV1alpha1CreateIssueModelRequest) Marshal return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } +// GoogleCloudContactcenterinsightsV1alpha1CreateIssueRequest: The request to +// create an issue. +type GoogleCloudContactcenterinsightsV1alpha1CreateIssueRequest struct { + // Issue: Required. The values for the new issue. + Issue *GoogleCloudContactcenterinsightsV1alpha1Issue `json:"issue,omitempty"` + // Parent: Required. The parent resource of the issue. + Parent string `json:"parent,omitempty"` + // ForceSendFields is a list of field names (e.g. "Issue") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Issue") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudContactcenterinsightsV1alpha1CreateIssueRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudContactcenterinsightsV1alpha1CreateIssueRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + // GoogleCloudContactcenterinsightsV1alpha1DeleteIssueModelMetadata: Metadata // for deleting an issue model. type GoogleCloudContactcenterinsightsV1alpha1DeleteIssueModelMetadata struct { @@ -8031,6 +8135,41 @@ func (s GoogleCloudContactcenterinsightsV1alpha1IntentMatchData) MarshalJSON() ( type GoogleCloudContactcenterinsightsV1alpha1InterruptionData struct { } +// GoogleCloudContactcenterinsightsV1alpha1Issue: The issue resource. +type GoogleCloudContactcenterinsightsV1alpha1Issue struct { + // CreateTime: Output only. The time at which this issue was created. + CreateTime string `json:"createTime,omitempty"` + // DisplayDescription: Representative description of the issue. + DisplayDescription string `json:"displayDescription,omitempty"` + // DisplayName: The representative name for the issue. + DisplayName string `json:"displayName,omitempty"` + // Name: Immutable. The resource name of the issue. Format: + // projects/{project}/locations/{location}/issueModels/{issue_model}/issues/{iss + // ue} + Name string `json:"name,omitempty"` + // SampleUtterances: Output only. Resource names of the sample representative + // utterances that match to this issue. + SampleUtterances []string `json:"sampleUtterances,omitempty"` + // UpdateTime: Output only. The most recent time that this issue was updated. + UpdateTime string `json:"updateTime,omitempty"` + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CreateTime") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudContactcenterinsightsV1alpha1Issue) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudContactcenterinsightsV1alpha1Issue + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + // GoogleCloudContactcenterinsightsV1alpha1IssueAssignment: Information about // the issue. type GoogleCloudContactcenterinsightsV1alpha1IssueAssignment struct { @@ -14594,6 +14733,110 @@ func (c *ProjectsLocationsIssueModelsUndeployCall) Do(opts ...googleapi.CallOpti return ret, nil } +type ProjectsLocationsIssueModelsIssuesCreateCall struct { + s *Service + parent string + googlecloudcontactcenterinsightsv1issue *GoogleCloudContactcenterinsightsV1Issue + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates an issue. +// +// - parent: The parent resource of the issue. +func (r *ProjectsLocationsIssueModelsIssuesService) Create(parent string, googlecloudcontactcenterinsightsv1issue *GoogleCloudContactcenterinsightsV1Issue) *ProjectsLocationsIssueModelsIssuesCreateCall { + c := &ProjectsLocationsIssueModelsIssuesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googlecloudcontactcenterinsightsv1issue = googlecloudcontactcenterinsightsv1issue + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsIssueModelsIssuesCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsIssueModelsIssuesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsIssueModelsIssuesCreateCall) Context(ctx context.Context) *ProjectsLocationsIssueModelsIssuesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsIssueModelsIssuesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsIssueModelsIssuesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.googlecloudcontactcenterinsightsv1issue) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/issues") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "contactcenterinsights.projects.locations.issueModels.issues.create", "request", internallog.HTTPRequest(req, body.Bytes())) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "contactcenterinsights.projects.locations.issueModels.issues.create" call. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsIssueModelsIssuesCreateCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { + return nil, err + } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "contactcenterinsights.projects.locations.issueModels.issues.create", "response", internallog.HTTPResponse(res, b)) + return ret, nil +} + type ProjectsLocationsIssueModelsIssuesDeleteCall struct { s *Service name string diff --git a/css/v1/css-api.json b/css/v1/css-api.json index 04844d8e685..226d97f3d4e 100644 --- a/css/v1/css-api.json +++ b/css/v1/css-api.json @@ -281,6 +281,40 @@ "scopes": [ "https://www.googleapis.com/auth/content" ] + }, + "patch": { + "description": "Updates the existing Css Product input in your CSS Center account. After inserting, updating, or deleting a CSS Product input, it may take several minutes before the processed Css Product can be retrieved.", + "flatPath": "v1/accounts/{accountsId}/cssProductInputs/{cssProductInputsId}", + "httpMethod": "PATCH", + "id": "css.accounts.cssProductInputs.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the CSS Product input. Format: `accounts/{account}/cssProductInputs/{css_product_input}`", + "location": "path", + "pattern": "^accounts/[^/]+/cssProductInputs/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "The list of CSS product attributes to be updated. If the update mask is omitted, then it is treated as implied field mask equivalent to all fields that are populated (have a non-empty value). Attributes specified in the update mask without a value specified in the body will be deleted from the CSS product. Update mask can only be specified for top level fields in attributes and custom attributes. To specify the update mask for custom attributes you need to add the `custom_attribute.` prefix. Providing special \"*\" value for full CSS product replacement is not supported.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}", + "request": { + "$ref": "CssProductInput" + }, + "response": { + "$ref": "CssProductInput" + }, + "scopes": [ + "https://www.googleapis.com/auth/content" + ] } } }, @@ -473,7 +507,7 @@ } } }, - "revision": "20241210", + "revision": "20241217", "rootUrl": "https://css.googleapis.com/", "schemas": { "Account": { diff --git a/css/v1/css-gen.go b/css/v1/css-gen.go index f53962c3de4..3c1f50bef0f 100644 --- a/css/v1/css-gen.go +++ b/css/v1/css-gen.go @@ -1663,6 +1663,127 @@ func (c *AccountsCssProductInputsInsertCall) Do(opts ...googleapi.CallOption) (* return ret, nil } +type AccountsCssProductInputsPatchCall struct { + s *Service + name string + cssproductinput *CssProductInput + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the existing Css Product input in your CSS Center account. +// After inserting, updating, or deleting a CSS Product input, it may take +// several minutes before the processed Css Product can be retrieved. +// +// - name: The name of the CSS Product input. Format: +// `accounts/{account}/cssProductInputs/{css_product_input}`. +func (r *AccountsCssProductInputsService) Patch(name string, cssproductinput *CssProductInput) *AccountsCssProductInputsPatchCall { + c := &AccountsCssProductInputsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.cssproductinput = cssproductinput + return c +} + +// UpdateMask sets the optional parameter "updateMask": The list of CSS product +// attributes to be updated. If the update mask is omitted, then it is treated +// as implied field mask equivalent to all fields that are populated (have a +// non-empty value). Attributes specified in the update mask without a value +// specified in the body will be deleted from the CSS product. Update mask can +// only be specified for top level fields in attributes and custom attributes. +// To specify the update mask for custom attributes you need to add the +// `custom_attribute.` prefix. Providing special "*" value for full CSS product +// replacement is not supported. +func (c *AccountsCssProductInputsPatchCall) UpdateMask(updateMask string) *AccountsCssProductInputsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *AccountsCssProductInputsPatchCall) Fields(s ...googleapi.Field) *AccountsCssProductInputsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *AccountsCssProductInputsPatchCall) Context(ctx context.Context) *AccountsCssProductInputsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *AccountsCssProductInputsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AccountsCssProductInputsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.cssproductinput) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "css.accounts.cssProductInputs.patch", "request", internallog.HTTPRequest(req, body.Bytes())) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "css.accounts.cssProductInputs.patch" call. +// Any non-2xx status code is an error. Response headers are in either +// *CssProductInput.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *AccountsCssProductInputsPatchCall) Do(opts ...googleapi.CallOption) (*CssProductInput, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &CssProductInput{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { + return nil, err + } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "css.accounts.cssProductInputs.patch", "response", internallog.HTTPResponse(res, b)) + return ret, nil +} + type AccountsCssProductsGetCall struct { s *Service name string diff --git a/dialogflow/v2/dialogflow-api.json b/dialogflow/v2/dialogflow-api.json index 5faa83dd310..9c8ede6d6de 100644 --- a/dialogflow/v2/dialogflow-api.json +++ b/dialogflow/v2/dialogflow-api.json @@ -8714,7 +8714,7 @@ } } }, - "revision": "20241212", + "revision": "20241216", "rootUrl": "https://dialogflow.googleapis.com/", "schemas": { "GoogleCloudDialogflowCxV3AdvancedSettings": { @@ -14366,6 +14366,10 @@ "$ref": "GoogleCloudDialogflowV2Message", "description": "Payload of NEW_MESSAGE event." }, + "newRecognitionResultPayload": { + "$ref": "GoogleCloudDialogflowV2StreamingRecognitionResult", + "description": "Payload of NEW_RECOGNITION_RESULT event." + }, "type": { "description": "The type of the event that this notification refers to.", "enum": [ @@ -14374,6 +14378,7 @@ "CONVERSATION_FINISHED", "HUMAN_INTERVENTION_NEEDED", "NEW_MESSAGE", + "NEW_RECOGNITION_RESULT", "UNRECOVERABLE_ERROR" ], "enumDescriptions": [ @@ -14382,6 +14387,7 @@ "An existing conversation has closed. This is fired when a telephone call is terminated, or a conversation is closed via the API.", "An existing conversation has received notification from Dialogflow that human intervention is required.", "An existing conversation has received a new message, either from API or telephony. It is configured in ConversationProfile.new_message_event_notification_config", + "An existing conversation has received a new speech recognition result. This is mainly for delivering intermediate transcripts. The notification is configured in ConversationProfile.new_recognition_event_notification_config.", "Unrecoverable error during a telephone call. In general non-recoverable errors only occur if something was misconfigured in the ConversationProfile corresponding to the call. After a non-recoverable error, Dialogflow may stop responding. We don't fire this event: * in an API call because we can directly return the error, or, * when we can recover from an error." ], "type": "string" @@ -14566,6 +14572,10 @@ "$ref": "GoogleCloudDialogflowV2NotificationConfig", "description": "Configuration for publishing new message events. Event will be sent in format of ConversationEvent" }, + "newRecognitionResultNotificationConfig": { + "$ref": "GoogleCloudDialogflowV2NotificationConfig", + "description": "Optional. Configuration for publishing transcription intermediate results. Event will be sent in format of ConversationEvent. If configured, the following information will be populated as ConversationEvent Pub/Sub message attributes: - \"participant_id\" - \"participant_role\" - \"message_id\"" + }, "notificationConfig": { "$ref": "GoogleCloudDialogflowV2NotificationConfig", "description": "Configuration for publishing conversation lifecycle events." @@ -18824,6 +18834,82 @@ }, "type": "object" }, + "GoogleCloudDialogflowV2SpeechWordInfo": { + "description": "Information for a word recognized by the speech recognizer.", + "id": "GoogleCloudDialogflowV2SpeechWordInfo", + "properties": { + "confidence": { + "description": "The Speech confidence between 0.0 and 1.0 for this word. A higher number indicates an estimated greater likelihood that the recognized word is correct. The default of 0.0 is a sentinel value indicating that confidence was not set. This field is not guaranteed to be fully stable over time for the same audio input. Users should also not rely on it to always be provided.", + "format": "float", + "type": "number" + }, + "endOffset": { + "description": "Time offset relative to the beginning of the audio that corresponds to the end of the spoken word. This is an experimental feature and the accuracy of the time offset can vary.", + "format": "google-duration", + "type": "string" + }, + "startOffset": { + "description": "Time offset relative to the beginning of the audio that corresponds to the start of the spoken word. This is an experimental feature and the accuracy of the time offset can vary.", + "format": "google-duration", + "type": "string" + }, + "word": { + "description": "The word this info is for.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2StreamingRecognitionResult": { + "description": "Contains a speech recognition result corresponding to a portion of the audio that is currently being processed or an indication that this is the end of the single requested utterance. While end-user audio is being processed, Dialogflow sends a series of results. Each result may contain a `transcript` value. A transcript represents a portion of the utterance. While the recognizer is processing audio, transcript values may be interim values or finalized values. Once a transcript is finalized, the `is_final` value is set to true and processing continues for the next transcript. If `StreamingDetectIntentRequest.query_input.audio_config.single_utterance` was true, and the recognizer has completed processing audio, the `message_type` value is set to `END_OF_SINGLE_UTTERANCE and the following (last) result contains the last finalized transcript. The complete end-user utterance is determined by concatenating the finalized transcript values received for the series of results. In the following example, single utterance is enabled. In the case where single utterance is not enabled, result 7 would not occur. ``` Num | transcript | message_type | is_final --- | ----------------------- | ----------------------- | -------- 1 | \"tube\" | TRANSCRIPT | false 2 | \"to be a\" | TRANSCRIPT | false 3 | \"to be\" | TRANSCRIPT | false 4 | \"to be or not to be\" | TRANSCRIPT | true 5 | \"that's\" | TRANSCRIPT | false 6 | \"that is | TRANSCRIPT | false 7 | unset | END_OF_SINGLE_UTTERANCE | unset 8 | \" that is the question\" | TRANSCRIPT | true ``` Concatenating the finalized transcripts with `is_final` set to true, the complete utterance becomes \"to be or not to be that is the question\".", + "id": "GoogleCloudDialogflowV2StreamingRecognitionResult", + "properties": { + "confidence": { + "description": "The Speech confidence between 0.0 and 1.0 for the current portion of audio. A higher number indicates an estimated greater likelihood that the recognized words are correct. The default of 0.0 is a sentinel value indicating that confidence was not set. This field is typically only provided if `is_final` is true and you should not rely on it being accurate or even set.", + "format": "float", + "type": "number" + }, + "isFinal": { + "description": "If `false`, the `StreamingRecognitionResult` represents an interim result that may change. If `true`, the recognizer will not return any further hypotheses about this piece of the audio. May only be populated for `message_type` = `TRANSCRIPT`.", + "type": "boolean" + }, + "languageCode": { + "description": "Detected language code for the transcript.", + "type": "string" + }, + "messageType": { + "description": "Type of the result message.", + "enum": [ + "MESSAGE_TYPE_UNSPECIFIED", + "TRANSCRIPT", + "END_OF_SINGLE_UTTERANCE" + ], + "enumDescriptions": [ + "Not specified. Should never be used.", + "Message contains a (possibly partial) transcript.", + "This event indicates that the server has detected the end of the user's speech utterance and expects no additional inputs. Therefore, the server will not process additional audio (although it may subsequently return additional results). The client should stop sending additional audio data, half-close the gRPC connection, and wait for any additional results until the server closes the gRPC connection. This message is only sent if `single_utterance` was set to `true`, and is not used otherwise." + ], + "type": "string" + }, + "speechEndOffset": { + "description": "Time offset of the end of this Speech recognition result relative to the beginning of the audio. Only populated for `message_type` = `TRANSCRIPT`.", + "format": "google-duration", + "type": "string" + }, + "speechWordInfo": { + "description": "Word-specific information for the words recognized by Speech in transcript. Populated if and only if `message_type` = `TRANSCRIPT` and [InputAudioConfig.enable_word_info] is set.", + "items": { + "$ref": "GoogleCloudDialogflowV2SpeechWordInfo" + }, + "type": "array" + }, + "transcript": { + "description": "Transcript text representing the words that the user spoke. Populated if and only if `message_type` = `TRANSCRIPT`.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudDialogflowV2SuggestArticlesRequest": { "description": "The request message for Participants.SuggestArticles.", "id": "GoogleCloudDialogflowV2SuggestArticlesRequest", @@ -19727,6 +19813,10 @@ "$ref": "GoogleCloudDialogflowV2beta1Message", "description": "Payload of NEW_MESSAGE event." }, + "newRecognitionResultPayload": { + "$ref": "GoogleCloudDialogflowV2beta1StreamingRecognitionResult", + "description": "Payload of NEW_RECOGNITION_RESULT event." + }, "type": { "description": "Required. The type of the event that this notification refers to.", "enum": [ @@ -19735,6 +19825,7 @@ "CONVERSATION_FINISHED", "HUMAN_INTERVENTION_NEEDED", "NEW_MESSAGE", + "NEW_RECOGNITION_RESULT", "UNRECOVERABLE_ERROR" ], "enumDescriptions": [ @@ -19743,6 +19834,7 @@ "An existing conversation has closed. This is fired when a telephone call is terminated, or a conversation is closed via the API.", "An existing conversation has received notification from Dialogflow that human intervention is required.", "An existing conversation has received a new message, either from API or telephony. It is configured in ConversationProfile.new_message_event_notification_config", + "An existing conversation has received a new speech recognition result. This is mainly for delivering intermediate transcripts. The notification is configured in ConversationProfile.new_recognition_event_notification_config.", "Unrecoverable error during a telephone call. In general non-recoverable errors only occur if something was misconfigured in the ConversationProfile corresponding to the call. After a non-recoverable error, Dialogflow may stop responding. We don't fire this event: * in an API call because we can directly return the error, or, * when we can recover from an error." ], "type": "string" @@ -21890,6 +21982,95 @@ }, "type": "object" }, + "GoogleCloudDialogflowV2beta1SpeechWordInfo": { + "description": "Information for a word recognized by the speech recognizer.", + "id": "GoogleCloudDialogflowV2beta1SpeechWordInfo", + "properties": { + "confidence": { + "description": "The Speech confidence between 0.0 and 1.0 for this word. A higher number indicates an estimated greater likelihood that the recognized word is correct. The default of 0.0 is a sentinel value indicating that confidence was not set. This field is not guaranteed to be fully stable over time for the same audio input. Users should also not rely on it to always be provided.", + "format": "float", + "type": "number" + }, + "endOffset": { + "description": "Time offset relative to the beginning of the audio that corresponds to the end of the spoken word. This is an experimental feature and the accuracy of the time offset can vary.", + "format": "google-duration", + "type": "string" + }, + "startOffset": { + "description": "Time offset relative to the beginning of the audio that corresponds to the start of the spoken word. This is an experimental feature and the accuracy of the time offset can vary.", + "format": "google-duration", + "type": "string" + }, + "word": { + "description": "The word this info is for.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1StreamingRecognitionResult": { + "description": "Contains a speech recognition result corresponding to a portion of the audio that is currently being processed or an indication that this is the end of the single requested utterance. While end-user audio is being processed, Dialogflow sends a series of results. Each result may contain a `transcript` value. A transcript represents a portion of the utterance. While the recognizer is processing audio, transcript values may be interim values or finalized values. Once a transcript is finalized, the `is_final` value is set to true and processing continues for the next transcript. If `StreamingDetectIntentRequest.query_input.audio_config.single_utterance` was true, and the recognizer has completed processing audio, the `message_type` value is set to `END_OF_SINGLE_UTTERANCE and the following (last) result contains the last finalized transcript. The complete end-user utterance is determined by concatenating the finalized transcript values received for the series of results. In the following example, single utterance is enabled. In the case where single utterance is not enabled, result 7 would not occur. ``` Num | transcript | message_type | is_final --- | ----------------------- | ----------------------- | -------- 1 | \"tube\" | TRANSCRIPT | false 2 | \"to be a\" | TRANSCRIPT | false 3 | \"to be\" | TRANSCRIPT | false 4 | \"to be or not to be\" | TRANSCRIPT | true 5 | \"that's\" | TRANSCRIPT | false 6 | \"that is | TRANSCRIPT | false 7 | unset | END_OF_SINGLE_UTTERANCE | unset 8 | \" that is the question\" | TRANSCRIPT | true ``` Concatenating the finalized transcripts with `is_final` set to true, the complete utterance becomes \"to be or not to be that is the question\".", + "id": "GoogleCloudDialogflowV2beta1StreamingRecognitionResult", + "properties": { + "confidence": { + "description": "The Speech confidence between 0.0 and 1.0 for the current portion of audio. A higher number indicates an estimated greater likelihood that the recognized words are correct. The default of 0.0 is a sentinel value indicating that confidence was not set. This field is typically only provided if `is_final` is true and you should not rely on it being accurate or even set.", + "format": "float", + "type": "number" + }, + "dtmfDigits": { + "$ref": "GoogleCloudDialogflowV2beta1TelephonyDtmfEvents", + "description": "DTMF digits. Populated if and only if `message_type` = `DTMF_DIGITS`." + }, + "isFinal": { + "description": "If `false`, the `StreamingRecognitionResult` represents an interim result that may change. If `true`, the recognizer will not return any further hypotheses about this piece of the audio. May only be populated for `message_type` = `TRANSCRIPT`.", + "type": "boolean" + }, + "languageCode": { + "description": "Detected language code for the transcript.", + "type": "string" + }, + "messageType": { + "description": "Type of the result message.", + "enum": [ + "MESSAGE_TYPE_UNSPECIFIED", + "TRANSCRIPT", + "DTMF_DIGITS", + "END_OF_SINGLE_UTTERANCE", + "PARTIAL_DTMF_DIGITS" + ], + "enumDescriptions": [ + "Not specified. Should never be used.", + "Message contains a (possibly partial) transcript.", + "Message contains DTMF digits.", + "This event indicates that the server has detected the end of the user's speech utterance and expects no additional speech. Therefore, the server will not process additional audio (although it may subsequently return additional results). The client should stop sending additional audio data, half-close the gRPC connection, and wait for any additional results until the server closes the gRPC connection. This message is only sent if `single_utterance` was set to `true`, and is not used otherwise.", + "Message contains DTMF digits. Before a message with DTMF_DIGITS is sent, a message with PARTIAL_DTMF_DIGITS may be sent with DTMF digits collected up to the time of sending, which represents an intermediate result." + ], + "type": "string" + }, + "speechEndOffset": { + "description": "Time offset of the end of this Speech recognition result relative to the beginning of the audio. Only populated for `message_type` = `TRANSCRIPT`.", + "format": "google-duration", + "type": "string" + }, + "speechWordInfo": { + "description": "Word-specific information for the words recognized by Speech in transcript. Populated if and only if `message_type` = `TRANSCRIPT` and [InputAudioConfig.enable_word_info] is set.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1SpeechWordInfo" + }, + "type": "array" + }, + "stability": { + "description": "An estimate of the likelihood that the speech recognizer will not change its guess about this interim recognition result: * If the value is unspecified or 0.0, Dialogflow didn't compute the stability. In particular, Dialogflow will only provide stability for `TRANSCRIPT` results with `is_final = false`. * Otherwise, the value is in (0.0, 1.0] where 0.0 means completely unstable and 1.0 means completely stable.", + "format": "float", + "type": "number" + }, + "transcript": { + "description": "Transcript text representing the words that the user spoke. Populated if and only if `message_type` = `TRANSCRIPT`.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudDialogflowV2beta1SuggestArticlesResponse": { "description": "The response message for Participants.SuggestArticles.", "id": "GoogleCloudDialogflowV2beta1SuggestArticlesResponse", @@ -22038,6 +22219,58 @@ }, "type": "object" }, + "GoogleCloudDialogflowV2beta1TelephonyDtmfEvents": { + "description": "A wrapper of repeated TelephonyDtmf digits.", + "id": "GoogleCloudDialogflowV2beta1TelephonyDtmfEvents", + "properties": { + "dtmfEvents": { + "description": "A sequence of TelephonyDtmf digits.", + "items": { + "enum": [ + "TELEPHONY_DTMF_UNSPECIFIED", + "DTMF_ONE", + "DTMF_TWO", + "DTMF_THREE", + "DTMF_FOUR", + "DTMF_FIVE", + "DTMF_SIX", + "DTMF_SEVEN", + "DTMF_EIGHT", + "DTMF_NINE", + "DTMF_ZERO", + "DTMF_A", + "DTMF_B", + "DTMF_C", + "DTMF_D", + "DTMF_STAR", + "DTMF_POUND" + ], + "enumDescriptions": [ + "Not specified. This value may be used to indicate an absent digit.", + "Number: '1'.", + "Number: '2'.", + "Number: '3'.", + "Number: '4'.", + "Number: '5'.", + "Number: '6'.", + "Number: '7'.", + "Number: '8'.", + "Number: '9'.", + "Number: '0'.", + "Letter: 'A'.", + "Letter: 'B'.", + "Letter: 'C'.", + "Letter: 'D'.", + "Asterisk/star: '*'.", + "Pound/diamond/hash/square/gate/octothorpe: '#'." + ], + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "GoogleCloudDialogflowV2beta1WebhookRequest": { "description": "The request message for a webhook call.", "id": "GoogleCloudDialogflowV2beta1WebhookRequest", diff --git a/dialogflow/v2/dialogflow-gen.go b/dialogflow/v2/dialogflow-gen.go index 3ce947c19ab..7df819bfaf1 100644 --- a/dialogflow/v2/dialogflow-gen.go +++ b/dialogflow/v2/dialogflow-gen.go @@ -8841,6 +8841,8 @@ type GoogleCloudDialogflowV2ConversationEvent struct { ErrorStatus *GoogleRpcStatus `json:"errorStatus,omitempty"` // NewMessagePayload: Payload of NEW_MESSAGE event. NewMessagePayload *GoogleCloudDialogflowV2Message `json:"newMessagePayload,omitempty"` + // NewRecognitionResultPayload: Payload of NEW_RECOGNITION_RESULT event. + NewRecognitionResultPayload *GoogleCloudDialogflowV2StreamingRecognitionResult `json:"newRecognitionResultPayload,omitempty"` // Type: The type of the event that this notification refers to. // // Possible values: @@ -8855,6 +8857,10 @@ type GoogleCloudDialogflowV2ConversationEvent struct { // "NEW_MESSAGE" - An existing conversation has received a new message, // either from API or telephony. It is configured in // ConversationProfile.new_message_event_notification_config + // "NEW_RECOGNITION_RESULT" - An existing conversation has received a new + // speech recognition result. This is mainly for delivering intermediate + // transcripts. The notification is configured in + // ConversationProfile.new_recognition_event_notification_config. // "UNRECOVERABLE_ERROR" - Unrecoverable error during a telephone call. In // general non-recoverable errors only occur if something was misconfigured in // the ConversationProfile corresponding to the call. After a non-recoverable @@ -9077,6 +9083,12 @@ type GoogleCloudDialogflowV2ConversationProfile struct { // NewMessageEventNotificationConfig: Configuration for publishing new message // events. Event will be sent in format of ConversationEvent NewMessageEventNotificationConfig *GoogleCloudDialogflowV2NotificationConfig `json:"newMessageEventNotificationConfig,omitempty"` + // NewRecognitionResultNotificationConfig: Optional. Configuration for + // publishing transcription intermediate results. Event will be sent in format + // of ConversationEvent. If configured, the following information will be + // populated as ConversationEvent Pub/Sub message attributes: - + // "participant_id" - "participant_role" - "message_id" + NewRecognitionResultNotificationConfig *GoogleCloudDialogflowV2NotificationConfig `json:"newRecognitionResultNotificationConfig,omitempty"` // NotificationConfig: Configuration for publishing conversation lifecycle // events. NotificationConfig *GoogleCloudDialogflowV2NotificationConfig `json:"notificationConfig,omitempty"` @@ -15291,6 +15303,154 @@ func (s GoogleCloudDialogflowV2SpeechToTextConfig) MarshalJSON() ([]byte, error) return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } +// GoogleCloudDialogflowV2SpeechWordInfo: Information for a word recognized by +// the speech recognizer. +type GoogleCloudDialogflowV2SpeechWordInfo struct { + // Confidence: The Speech confidence between 0.0 and 1.0 for this word. A + // higher number indicates an estimated greater likelihood that the recognized + // word is correct. The default of 0.0 is a sentinel value indicating that + // confidence was not set. This field is not guaranteed to be fully stable over + // time for the same audio input. Users should also not rely on it to always be + // provided. + Confidence float64 `json:"confidence,omitempty"` + // EndOffset: Time offset relative to the beginning of the audio that + // corresponds to the end of the spoken word. This is an experimental feature + // and the accuracy of the time offset can vary. + EndOffset string `json:"endOffset,omitempty"` + // StartOffset: Time offset relative to the beginning of the audio that + // corresponds to the start of the spoken word. This is an experimental feature + // and the accuracy of the time offset can vary. + StartOffset string `json:"startOffset,omitempty"` + // Word: The word this info is for. + Word string `json:"word,omitempty"` + // ForceSendFields is a list of field names (e.g. "Confidence") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Confidence") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudDialogflowV2SpeechWordInfo) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2SpeechWordInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +func (s *GoogleCloudDialogflowV2SpeechWordInfo) UnmarshalJSON(data []byte) error { + type NoMethod GoogleCloudDialogflowV2SpeechWordInfo + var s1 struct { + Confidence gensupport.JSONFloat64 `json:"confidence"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Confidence = float64(s1.Confidence) + return nil +} + +// GoogleCloudDialogflowV2StreamingRecognitionResult: Contains a speech +// recognition result corresponding to a portion of the audio that is currently +// being processed or an indication that this is the end of the single +// requested utterance. While end-user audio is being processed, Dialogflow +// sends a series of results. Each result may contain a `transcript` value. A +// transcript represents a portion of the utterance. While the recognizer is +// processing audio, transcript values may be interim values or finalized +// values. Once a transcript is finalized, the `is_final` value is set to true +// and processing continues for the next transcript. If +// `StreamingDetectIntentRequest.query_input.audio_config.single_utterance` was +// true, and the recognizer has completed processing audio, the `message_type` +// value is set to `END_OF_SINGLE_UTTERANCE and the following (last) result +// contains the last finalized transcript. The complete end-user utterance is +// determined by concatenating the finalized transcript values received for the +// series of results. In the following example, single utterance is enabled. In +// the case where single utterance is not enabled, result 7 would not occur. +// ``` Num | transcript | message_type | is_final --- | ----------------------- +// | ----------------------- | -------- 1 | "tube" | TRANSCRIPT | false 2 | "to +// be a" | TRANSCRIPT | false 3 | "to be" | TRANSCRIPT | false 4 | "to be or +// not to be" | TRANSCRIPT | true 5 | "that's" | TRANSCRIPT | false 6 | "that +// is | TRANSCRIPT | false 7 | unset | END_OF_SINGLE_UTTERANCE | unset 8 | " +// that is the question" | TRANSCRIPT | true ``` Concatenating the finalized +// transcripts with `is_final` set to true, the complete utterance becomes "to +// be or not to be that is the question". +type GoogleCloudDialogflowV2StreamingRecognitionResult struct { + // Confidence: The Speech confidence between 0.0 and 1.0 for the current + // portion of audio. A higher number indicates an estimated greater likelihood + // that the recognized words are correct. The default of 0.0 is a sentinel + // value indicating that confidence was not set. This field is typically only + // provided if `is_final` is true and you should not rely on it being accurate + // or even set. + Confidence float64 `json:"confidence,omitempty"` + // IsFinal: If `false`, the `StreamingRecognitionResult` represents an interim + // result that may change. If `true`, the recognizer will not return any + // further hypotheses about this piece of the audio. May only be populated for + // `message_type` = `TRANSCRIPT`. + IsFinal bool `json:"isFinal,omitempty"` + // LanguageCode: Detected language code for the transcript. + LanguageCode string `json:"languageCode,omitempty"` + // MessageType: Type of the result message. + // + // Possible values: + // "MESSAGE_TYPE_UNSPECIFIED" - Not specified. Should never be used. + // "TRANSCRIPT" - Message contains a (possibly partial) transcript. + // "END_OF_SINGLE_UTTERANCE" - This event indicates that the server has + // detected the end of the user's speech utterance and expects no additional + // inputs. Therefore, the server will not process additional audio (although it + // may subsequently return additional results). The client should stop sending + // additional audio data, half-close the gRPC connection, and wait for any + // additional results until the server closes the gRPC connection. This message + // is only sent if `single_utterance` was set to `true`, and is not used + // otherwise. + MessageType string `json:"messageType,omitempty"` + // SpeechEndOffset: Time offset of the end of this Speech recognition result + // relative to the beginning of the audio. Only populated for `message_type` = + // `TRANSCRIPT`. + SpeechEndOffset string `json:"speechEndOffset,omitempty"` + // SpeechWordInfo: Word-specific information for the words recognized by Speech + // in transcript. Populated if and only if `message_type` = `TRANSCRIPT` and + // [InputAudioConfig.enable_word_info] is set. + SpeechWordInfo []*GoogleCloudDialogflowV2SpeechWordInfo `json:"speechWordInfo,omitempty"` + // Transcript: Transcript text representing the words that the user spoke. + // Populated if and only if `message_type` = `TRANSCRIPT`. + Transcript string `json:"transcript,omitempty"` + // ForceSendFields is a list of field names (e.g. "Confidence") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Confidence") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudDialogflowV2StreamingRecognitionResult) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2StreamingRecognitionResult + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +func (s *GoogleCloudDialogflowV2StreamingRecognitionResult) UnmarshalJSON(data []byte) error { + type NoMethod GoogleCloudDialogflowV2StreamingRecognitionResult + var s1 struct { + Confidence gensupport.JSONFloat64 `json:"confidence"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Confidence = float64(s1.Confidence) + return nil +} + // GoogleCloudDialogflowV2SuggestArticlesRequest: The request message for // Participants.SuggestArticles. type GoogleCloudDialogflowV2SuggestArticlesRequest struct { @@ -16593,6 +16753,8 @@ type GoogleCloudDialogflowV2beta1ConversationEvent struct { ErrorStatus *GoogleRpcStatus `json:"errorStatus,omitempty"` // NewMessagePayload: Payload of NEW_MESSAGE event. NewMessagePayload *GoogleCloudDialogflowV2beta1Message `json:"newMessagePayload,omitempty"` + // NewRecognitionResultPayload: Payload of NEW_RECOGNITION_RESULT event. + NewRecognitionResultPayload *GoogleCloudDialogflowV2beta1StreamingRecognitionResult `json:"newRecognitionResultPayload,omitempty"` // Type: Required. The type of the event that this notification refers to. // // Possible values: @@ -16607,6 +16769,10 @@ type GoogleCloudDialogflowV2beta1ConversationEvent struct { // "NEW_MESSAGE" - An existing conversation has received a new message, // either from API or telephony. It is configured in // ConversationProfile.new_message_event_notification_config + // "NEW_RECOGNITION_RESULT" - An existing conversation has received a new + // speech recognition result. This is mainly for delivering intermediate + // transcripts. The notification is configured in + // ConversationProfile.new_recognition_event_notification_config. // "UNRECOVERABLE_ERROR" - Unrecoverable error during a telephone call. In // general non-recoverable errors only occur if something was misconfigured in // the ConversationProfile corresponding to the call. After a non-recoverable @@ -19574,6 +19740,171 @@ func (s *GoogleCloudDialogflowV2beta1SmartReplyAnswer) UnmarshalJSON(data []byte return nil } +// GoogleCloudDialogflowV2beta1SpeechWordInfo: Information for a word +// recognized by the speech recognizer. +type GoogleCloudDialogflowV2beta1SpeechWordInfo struct { + // Confidence: The Speech confidence between 0.0 and 1.0 for this word. A + // higher number indicates an estimated greater likelihood that the recognized + // word is correct. The default of 0.0 is a sentinel value indicating that + // confidence was not set. This field is not guaranteed to be fully stable over + // time for the same audio input. Users should also not rely on it to always be + // provided. + Confidence float64 `json:"confidence,omitempty"` + // EndOffset: Time offset relative to the beginning of the audio that + // corresponds to the end of the spoken word. This is an experimental feature + // and the accuracy of the time offset can vary. + EndOffset string `json:"endOffset,omitempty"` + // StartOffset: Time offset relative to the beginning of the audio that + // corresponds to the start of the spoken word. This is an experimental feature + // and the accuracy of the time offset can vary. + StartOffset string `json:"startOffset,omitempty"` + // Word: The word this info is for. + Word string `json:"word,omitempty"` + // ForceSendFields is a list of field names (e.g. "Confidence") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Confidence") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudDialogflowV2beta1SpeechWordInfo) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1SpeechWordInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +func (s *GoogleCloudDialogflowV2beta1SpeechWordInfo) UnmarshalJSON(data []byte) error { + type NoMethod GoogleCloudDialogflowV2beta1SpeechWordInfo + var s1 struct { + Confidence gensupport.JSONFloat64 `json:"confidence"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Confidence = float64(s1.Confidence) + return nil +} + +// GoogleCloudDialogflowV2beta1StreamingRecognitionResult: Contains a speech +// recognition result corresponding to a portion of the audio that is currently +// being processed or an indication that this is the end of the single +// requested utterance. While end-user audio is being processed, Dialogflow +// sends a series of results. Each result may contain a `transcript` value. A +// transcript represents a portion of the utterance. While the recognizer is +// processing audio, transcript values may be interim values or finalized +// values. Once a transcript is finalized, the `is_final` value is set to true +// and processing continues for the next transcript. If +// `StreamingDetectIntentRequest.query_input.audio_config.single_utterance` was +// true, and the recognizer has completed processing audio, the `message_type` +// value is set to `END_OF_SINGLE_UTTERANCE and the following (last) result +// contains the last finalized transcript. The complete end-user utterance is +// determined by concatenating the finalized transcript values received for the +// series of results. In the following example, single utterance is enabled. In +// the case where single utterance is not enabled, result 7 would not occur. +// ``` Num | transcript | message_type | is_final --- | ----------------------- +// | ----------------------- | -------- 1 | "tube" | TRANSCRIPT | false 2 | "to +// be a" | TRANSCRIPT | false 3 | "to be" | TRANSCRIPT | false 4 | "to be or +// not to be" | TRANSCRIPT | true 5 | "that's" | TRANSCRIPT | false 6 | "that +// is | TRANSCRIPT | false 7 | unset | END_OF_SINGLE_UTTERANCE | unset 8 | " +// that is the question" | TRANSCRIPT | true ``` Concatenating the finalized +// transcripts with `is_final` set to true, the complete utterance becomes "to +// be or not to be that is the question". +type GoogleCloudDialogflowV2beta1StreamingRecognitionResult struct { + // Confidence: The Speech confidence between 0.0 and 1.0 for the current + // portion of audio. A higher number indicates an estimated greater likelihood + // that the recognized words are correct. The default of 0.0 is a sentinel + // value indicating that confidence was not set. This field is typically only + // provided if `is_final` is true and you should not rely on it being accurate + // or even set. + Confidence float64 `json:"confidence,omitempty"` + // DtmfDigits: DTMF digits. Populated if and only if `message_type` = + // `DTMF_DIGITS`. + DtmfDigits *GoogleCloudDialogflowV2beta1TelephonyDtmfEvents `json:"dtmfDigits,omitempty"` + // IsFinal: If `false`, the `StreamingRecognitionResult` represents an interim + // result that may change. If `true`, the recognizer will not return any + // further hypotheses about this piece of the audio. May only be populated for + // `message_type` = `TRANSCRIPT`. + IsFinal bool `json:"isFinal,omitempty"` + // LanguageCode: Detected language code for the transcript. + LanguageCode string `json:"languageCode,omitempty"` + // MessageType: Type of the result message. + // + // Possible values: + // "MESSAGE_TYPE_UNSPECIFIED" - Not specified. Should never be used. + // "TRANSCRIPT" - Message contains a (possibly partial) transcript. + // "DTMF_DIGITS" - Message contains DTMF digits. + // "END_OF_SINGLE_UTTERANCE" - This event indicates that the server has + // detected the end of the user's speech utterance and expects no additional + // speech. Therefore, the server will not process additional audio (although it + // may subsequently return additional results). The client should stop sending + // additional audio data, half-close the gRPC connection, and wait for any + // additional results until the server closes the gRPC connection. This message + // is only sent if `single_utterance` was set to `true`, and is not used + // otherwise. + // "PARTIAL_DTMF_DIGITS" - Message contains DTMF digits. Before a message + // with DTMF_DIGITS is sent, a message with PARTIAL_DTMF_DIGITS may be sent + // with DTMF digits collected up to the time of sending, which represents an + // intermediate result. + MessageType string `json:"messageType,omitempty"` + // SpeechEndOffset: Time offset of the end of this Speech recognition result + // relative to the beginning of the audio. Only populated for `message_type` = + // `TRANSCRIPT`. + SpeechEndOffset string `json:"speechEndOffset,omitempty"` + // SpeechWordInfo: Word-specific information for the words recognized by Speech + // in transcript. Populated if and only if `message_type` = `TRANSCRIPT` and + // [InputAudioConfig.enable_word_info] is set. + SpeechWordInfo []*GoogleCloudDialogflowV2beta1SpeechWordInfo `json:"speechWordInfo,omitempty"` + // Stability: An estimate of the likelihood that the speech recognizer will not + // change its guess about this interim recognition result: * If the value is + // unspecified or 0.0, Dialogflow didn't compute the stability. In particular, + // Dialogflow will only provide stability for `TRANSCRIPT` results with + // `is_final = false`. * Otherwise, the value is in (0.0, 1.0] where 0.0 means + // completely unstable and 1.0 means completely stable. + Stability float64 `json:"stability,omitempty"` + // Transcript: Transcript text representing the words that the user spoke. + // Populated if and only if `message_type` = `TRANSCRIPT`. + Transcript string `json:"transcript,omitempty"` + // ForceSendFields is a list of field names (e.g. "Confidence") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Confidence") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudDialogflowV2beta1StreamingRecognitionResult) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1StreamingRecognitionResult + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +func (s *GoogleCloudDialogflowV2beta1StreamingRecognitionResult) UnmarshalJSON(data []byte) error { + type NoMethod GoogleCloudDialogflowV2beta1StreamingRecognitionResult + var s1 struct { + Confidence gensupport.JSONFloat64 `json:"confidence"` + Stability gensupport.JSONFloat64 `json:"stability"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Confidence = float64(s1.Confidence) + s.Stability = float64(s1.Stability) + return nil +} + // GoogleCloudDialogflowV2beta1SuggestArticlesResponse: The response message // for Participants.SuggestArticles. type GoogleCloudDialogflowV2beta1SuggestArticlesResponse struct { @@ -19776,6 +20107,49 @@ func (s GoogleCloudDialogflowV2beta1SuggestionResult) MarshalJSON() ([]byte, err return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } +// GoogleCloudDialogflowV2beta1TelephonyDtmfEvents: A wrapper of repeated +// TelephonyDtmf digits. +type GoogleCloudDialogflowV2beta1TelephonyDtmfEvents struct { + // DtmfEvents: A sequence of TelephonyDtmf digits. + // + // Possible values: + // "TELEPHONY_DTMF_UNSPECIFIED" - Not specified. This value may be used to + // indicate an absent digit. + // "DTMF_ONE" - Number: '1'. + // "DTMF_TWO" - Number: '2'. + // "DTMF_THREE" - Number: '3'. + // "DTMF_FOUR" - Number: '4'. + // "DTMF_FIVE" - Number: '5'. + // "DTMF_SIX" - Number: '6'. + // "DTMF_SEVEN" - Number: '7'. + // "DTMF_EIGHT" - Number: '8'. + // "DTMF_NINE" - Number: '9'. + // "DTMF_ZERO" - Number: '0'. + // "DTMF_A" - Letter: 'A'. + // "DTMF_B" - Letter: 'B'. + // "DTMF_C" - Letter: 'C'. + // "DTMF_D" - Letter: 'D'. + // "DTMF_STAR" - Asterisk/star: '*'. + // "DTMF_POUND" - Pound/diamond/hash/square/gate/octothorpe: '#'. + DtmfEvents []string `json:"dtmfEvents,omitempty"` + // ForceSendFields is a list of field names (e.g. "DtmfEvents") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "DtmfEvents") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudDialogflowV2beta1TelephonyDtmfEvents) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1TelephonyDtmfEvents + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + // GoogleCloudDialogflowV2beta1WebhookRequest: The request message for a // webhook call. type GoogleCloudDialogflowV2beta1WebhookRequest struct { diff --git a/dialogflow/v2beta1/dialogflow-api.json b/dialogflow/v2beta1/dialogflow-api.json index 32cebf933db..95ae815423a 100644 --- a/dialogflow/v2beta1/dialogflow-api.json +++ b/dialogflow/v2beta1/dialogflow-api.json @@ -8239,7 +8239,7 @@ } } }, - "revision": "20241212", + "revision": "20241216", "rootUrl": "https://dialogflow.googleapis.com/", "schemas": { "GoogleCloudDialogflowCxV3AdvancedSettings": { @@ -13109,6 +13109,10 @@ "$ref": "GoogleCloudDialogflowV2Message", "description": "Payload of NEW_MESSAGE event." }, + "newRecognitionResultPayload": { + "$ref": "GoogleCloudDialogflowV2StreamingRecognitionResult", + "description": "Payload of NEW_RECOGNITION_RESULT event." + }, "type": { "description": "The type of the event that this notification refers to.", "enum": [ @@ -13117,6 +13121,7 @@ "CONVERSATION_FINISHED", "HUMAN_INTERVENTION_NEEDED", "NEW_MESSAGE", + "NEW_RECOGNITION_RESULT", "UNRECOVERABLE_ERROR" ], "enumDescriptions": [ @@ -13125,6 +13130,7 @@ "An existing conversation has closed. This is fired when a telephone call is terminated, or a conversation is closed via the API.", "An existing conversation has received notification from Dialogflow that human intervention is required.", "An existing conversation has received a new message, either from API or telephony. It is configured in ConversationProfile.new_message_event_notification_config", + "An existing conversation has received a new speech recognition result. This is mainly for delivering intermediate transcripts. The notification is configured in ConversationProfile.new_recognition_event_notification_config.", "Unrecoverable error during a telephone call. In general non-recoverable errors only occur if something was misconfigured in the ConversationProfile corresponding to the call. After a non-recoverable error, Dialogflow may stop responding. We don't fire this event: * in an API call because we can directly return the error, or, * when we can recover from an error." ], "type": "string" @@ -15024,6 +15030,82 @@ }, "type": "object" }, + "GoogleCloudDialogflowV2SpeechWordInfo": { + "description": "Information for a word recognized by the speech recognizer.", + "id": "GoogleCloudDialogflowV2SpeechWordInfo", + "properties": { + "confidence": { + "description": "The Speech confidence between 0.0 and 1.0 for this word. A higher number indicates an estimated greater likelihood that the recognized word is correct. The default of 0.0 is a sentinel value indicating that confidence was not set. This field is not guaranteed to be fully stable over time for the same audio input. Users should also not rely on it to always be provided.", + "format": "float", + "type": "number" + }, + "endOffset": { + "description": "Time offset relative to the beginning of the audio that corresponds to the end of the spoken word. This is an experimental feature and the accuracy of the time offset can vary.", + "format": "google-duration", + "type": "string" + }, + "startOffset": { + "description": "Time offset relative to the beginning of the audio that corresponds to the start of the spoken word. This is an experimental feature and the accuracy of the time offset can vary.", + "format": "google-duration", + "type": "string" + }, + "word": { + "description": "The word this info is for.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2StreamingRecognitionResult": { + "description": "Contains a speech recognition result corresponding to a portion of the audio that is currently being processed or an indication that this is the end of the single requested utterance. While end-user audio is being processed, Dialogflow sends a series of results. Each result may contain a `transcript` value. A transcript represents a portion of the utterance. While the recognizer is processing audio, transcript values may be interim values or finalized values. Once a transcript is finalized, the `is_final` value is set to true and processing continues for the next transcript. If `StreamingDetectIntentRequest.query_input.audio_config.single_utterance` was true, and the recognizer has completed processing audio, the `message_type` value is set to `END_OF_SINGLE_UTTERANCE and the following (last) result contains the last finalized transcript. The complete end-user utterance is determined by concatenating the finalized transcript values received for the series of results. In the following example, single utterance is enabled. In the case where single utterance is not enabled, result 7 would not occur. ``` Num | transcript | message_type | is_final --- | ----------------------- | ----------------------- | -------- 1 | \"tube\" | TRANSCRIPT | false 2 | \"to be a\" | TRANSCRIPT | false 3 | \"to be\" | TRANSCRIPT | false 4 | \"to be or not to be\" | TRANSCRIPT | true 5 | \"that's\" | TRANSCRIPT | false 6 | \"that is | TRANSCRIPT | false 7 | unset | END_OF_SINGLE_UTTERANCE | unset 8 | \" that is the question\" | TRANSCRIPT | true ``` Concatenating the finalized transcripts with `is_final` set to true, the complete utterance becomes \"to be or not to be that is the question\".", + "id": "GoogleCloudDialogflowV2StreamingRecognitionResult", + "properties": { + "confidence": { + "description": "The Speech confidence between 0.0 and 1.0 for the current portion of audio. A higher number indicates an estimated greater likelihood that the recognized words are correct. The default of 0.0 is a sentinel value indicating that confidence was not set. This field is typically only provided if `is_final` is true and you should not rely on it being accurate or even set.", + "format": "float", + "type": "number" + }, + "isFinal": { + "description": "If `false`, the `StreamingRecognitionResult` represents an interim result that may change. If `true`, the recognizer will not return any further hypotheses about this piece of the audio. May only be populated for `message_type` = `TRANSCRIPT`.", + "type": "boolean" + }, + "languageCode": { + "description": "Detected language code for the transcript.", + "type": "string" + }, + "messageType": { + "description": "Type of the result message.", + "enum": [ + "MESSAGE_TYPE_UNSPECIFIED", + "TRANSCRIPT", + "END_OF_SINGLE_UTTERANCE" + ], + "enumDescriptions": [ + "Not specified. Should never be used.", + "Message contains a (possibly partial) transcript.", + "This event indicates that the server has detected the end of the user's speech utterance and expects no additional inputs. Therefore, the server will not process additional audio (although it may subsequently return additional results). The client should stop sending additional audio data, half-close the gRPC connection, and wait for any additional results until the server closes the gRPC connection. This message is only sent if `single_utterance` was set to `true`, and is not used otherwise." + ], + "type": "string" + }, + "speechEndOffset": { + "description": "Time offset of the end of this Speech recognition result relative to the beginning of the audio. Only populated for `message_type` = `TRANSCRIPT`.", + "format": "google-duration", + "type": "string" + }, + "speechWordInfo": { + "description": "Word-specific information for the words recognized by Speech in transcript. Populated if and only if `message_type` = `TRANSCRIPT` and [InputAudioConfig.enable_word_info] is set.", + "items": { + "$ref": "GoogleCloudDialogflowV2SpeechWordInfo" + }, + "type": "array" + }, + "transcript": { + "description": "Transcript text representing the words that the user spoke. Populated if and only if `message_type` = `TRANSCRIPT`.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudDialogflowV2SuggestArticlesResponse": { "description": "The response message for Participants.SuggestArticles.", "id": "GoogleCloudDialogflowV2SuggestArticlesResponse", @@ -15509,7 +15591,7 @@ "description": "The intent to be triggered on V3 agent." }, "messageSendTime": { - "description": "Optional. The send time of the message from end user or human agent's perspective. It is used for identifying the same message under one participant. Given two messages under the same participant: * If send time are different regardless of whether the content of the messages are exactly the same, the conversation will regard them as two distinct messages sent by the participant. * If send time is the same regardless of whether the content of the messages are exactly the same, the conversation will regard them as same message, and ignore the message received later. If the value is not provided, a new request will always be regarded as a new message without any de-duplication.", + "description": "Optional. The send time of the message from end user or human agent's perspective. It is used for identifying the same message under one participant. For BatchCreateMessages API only: Given two messages under the same participant: * If send time are different regardless of whether the content of the messages are exactly the same, the conversation will regard them as two distinct messages sent by the participant. * If send time is the same regardless of whether the content of the messages are exactly the same, the conversation will regard them as same message, and ignore the message received later. If the value is not provided, a new request will always be regarded as a new message without any de-duplication.", "format": "google-datetime", "type": "string" }, @@ -16377,6 +16459,10 @@ "$ref": "GoogleCloudDialogflowV2beta1Message", "description": "Payload of NEW_MESSAGE event." }, + "newRecognitionResultPayload": { + "$ref": "GoogleCloudDialogflowV2beta1StreamingRecognitionResult", + "description": "Payload of NEW_RECOGNITION_RESULT event." + }, "type": { "description": "Required. The type of the event that this notification refers to.", "enum": [ @@ -16385,6 +16471,7 @@ "CONVERSATION_FINISHED", "HUMAN_INTERVENTION_NEEDED", "NEW_MESSAGE", + "NEW_RECOGNITION_RESULT", "UNRECOVERABLE_ERROR" ], "enumDescriptions": [ @@ -16393,6 +16480,7 @@ "An existing conversation has closed. This is fired when a telephone call is terminated, or a conversation is closed via the API.", "An existing conversation has received notification from Dialogflow that human intervention is required.", "An existing conversation has received a new message, either from API or telephony. It is configured in ConversationProfile.new_message_event_notification_config", + "An existing conversation has received a new speech recognition result. This is mainly for delivering intermediate transcripts. The notification is configured in ConversationProfile.new_recognition_event_notification_config.", "Unrecoverable error during a telephone call. In general non-recoverable errors only occur if something was misconfigured in the ConversationProfile corresponding to the call. After a non-recoverable error, Dialogflow may stop responding. We don't fire this event: * in an API call because we can directly return the error, or, * when we can recover from an error." ], "type": "string" @@ -16454,6 +16542,10 @@ "$ref": "GoogleCloudDialogflowV2beta1NotificationConfig", "description": "Configuration for publishing new message events. Event will be sent in format of ConversationEvent" }, + "newRecognitionResultNotificationConfig": { + "$ref": "GoogleCloudDialogflowV2beta1NotificationConfig", + "description": "Optional. Configuration for publishing transcription intermediate results. Event will be sent in format of ConversationEvent. If configured, the following information will be populated as ConversationEvent Pub/Sub message attributes: - \"participant_id\" - \"participant_role\" - \"message_id\"" + }, "notificationConfig": { "$ref": "GoogleCloudDialogflowV2beta1NotificationConfig", "description": "Configuration for publishing conversation lifecycle events." @@ -20935,6 +21027,95 @@ }, "type": "object" }, + "GoogleCloudDialogflowV2beta1SpeechWordInfo": { + "description": "Information for a word recognized by the speech recognizer.", + "id": "GoogleCloudDialogflowV2beta1SpeechWordInfo", + "properties": { + "confidence": { + "description": "The Speech confidence between 0.0 and 1.0 for this word. A higher number indicates an estimated greater likelihood that the recognized word is correct. The default of 0.0 is a sentinel value indicating that confidence was not set. This field is not guaranteed to be fully stable over time for the same audio input. Users should also not rely on it to always be provided.", + "format": "float", + "type": "number" + }, + "endOffset": { + "description": "Time offset relative to the beginning of the audio that corresponds to the end of the spoken word. This is an experimental feature and the accuracy of the time offset can vary.", + "format": "google-duration", + "type": "string" + }, + "startOffset": { + "description": "Time offset relative to the beginning of the audio that corresponds to the start of the spoken word. This is an experimental feature and the accuracy of the time offset can vary.", + "format": "google-duration", + "type": "string" + }, + "word": { + "description": "The word this info is for.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1StreamingRecognitionResult": { + "description": "Contains a speech recognition result corresponding to a portion of the audio that is currently being processed or an indication that this is the end of the single requested utterance. While end-user audio is being processed, Dialogflow sends a series of results. Each result may contain a `transcript` value. A transcript represents a portion of the utterance. While the recognizer is processing audio, transcript values may be interim values or finalized values. Once a transcript is finalized, the `is_final` value is set to true and processing continues for the next transcript. If `StreamingDetectIntentRequest.query_input.audio_config.single_utterance` was true, and the recognizer has completed processing audio, the `message_type` value is set to `END_OF_SINGLE_UTTERANCE and the following (last) result contains the last finalized transcript. The complete end-user utterance is determined by concatenating the finalized transcript values received for the series of results. In the following example, single utterance is enabled. In the case where single utterance is not enabled, result 7 would not occur. ``` Num | transcript | message_type | is_final --- | ----------------------- | ----------------------- | -------- 1 | \"tube\" | TRANSCRIPT | false 2 | \"to be a\" | TRANSCRIPT | false 3 | \"to be\" | TRANSCRIPT | false 4 | \"to be or not to be\" | TRANSCRIPT | true 5 | \"that's\" | TRANSCRIPT | false 6 | \"that is | TRANSCRIPT | false 7 | unset | END_OF_SINGLE_UTTERANCE | unset 8 | \" that is the question\" | TRANSCRIPT | true ``` Concatenating the finalized transcripts with `is_final` set to true, the complete utterance becomes \"to be or not to be that is the question\".", + "id": "GoogleCloudDialogflowV2beta1StreamingRecognitionResult", + "properties": { + "confidence": { + "description": "The Speech confidence between 0.0 and 1.0 for the current portion of audio. A higher number indicates an estimated greater likelihood that the recognized words are correct. The default of 0.0 is a sentinel value indicating that confidence was not set. This field is typically only provided if `is_final` is true and you should not rely on it being accurate or even set.", + "format": "float", + "type": "number" + }, + "dtmfDigits": { + "$ref": "GoogleCloudDialogflowV2beta1TelephonyDtmfEvents", + "description": "DTMF digits. Populated if and only if `message_type` = `DTMF_DIGITS`." + }, + "isFinal": { + "description": "If `false`, the `StreamingRecognitionResult` represents an interim result that may change. If `true`, the recognizer will not return any further hypotheses about this piece of the audio. May only be populated for `message_type` = `TRANSCRIPT`.", + "type": "boolean" + }, + "languageCode": { + "description": "Detected language code for the transcript.", + "type": "string" + }, + "messageType": { + "description": "Type of the result message.", + "enum": [ + "MESSAGE_TYPE_UNSPECIFIED", + "TRANSCRIPT", + "DTMF_DIGITS", + "END_OF_SINGLE_UTTERANCE", + "PARTIAL_DTMF_DIGITS" + ], + "enumDescriptions": [ + "Not specified. Should never be used.", + "Message contains a (possibly partial) transcript.", + "Message contains DTMF digits.", + "This event indicates that the server has detected the end of the user's speech utterance and expects no additional speech. Therefore, the server will not process additional audio (although it may subsequently return additional results). The client should stop sending additional audio data, half-close the gRPC connection, and wait for any additional results until the server closes the gRPC connection. This message is only sent if `single_utterance` was set to `true`, and is not used otherwise.", + "Message contains DTMF digits. Before a message with DTMF_DIGITS is sent, a message with PARTIAL_DTMF_DIGITS may be sent with DTMF digits collected up to the time of sending, which represents an intermediate result." + ], + "type": "string" + }, + "speechEndOffset": { + "description": "Time offset of the end of this Speech recognition result relative to the beginning of the audio. Only populated for `message_type` = `TRANSCRIPT`.", + "format": "google-duration", + "type": "string" + }, + "speechWordInfo": { + "description": "Word-specific information for the words recognized by Speech in transcript. Populated if and only if `message_type` = `TRANSCRIPT` and [InputAudioConfig.enable_word_info] is set.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1SpeechWordInfo" + }, + "type": "array" + }, + "stability": { + "description": "An estimate of the likelihood that the speech recognizer will not change its guess about this interim recognition result: * If the value is unspecified or 0.0, Dialogflow didn't compute the stability. In particular, Dialogflow will only provide stability for `TRANSCRIPT` results with `is_final = false`. * Otherwise, the value is in (0.0, 1.0] where 0.0 means completely unstable and 1.0 means completely stable.", + "format": "float", + "type": "number" + }, + "transcript": { + "description": "Transcript text representing the words that the user spoke. Populated if and only if `message_type` = `TRANSCRIPT`.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudDialogflowV2beta1SubAgent": { "description": "Contains basic configuration for a sub-agent.", "id": "GoogleCloudDialogflowV2beta1SubAgent", diff --git a/dialogflow/v2beta1/dialogflow-gen.go b/dialogflow/v2beta1/dialogflow-gen.go index e27adffd8c6..fad815f39a8 100644 --- a/dialogflow/v2beta1/dialogflow-gen.go +++ b/dialogflow/v2beta1/dialogflow-gen.go @@ -7797,6 +7797,8 @@ type GoogleCloudDialogflowV2ConversationEvent struct { ErrorStatus *GoogleRpcStatus `json:"errorStatus,omitempty"` // NewMessagePayload: Payload of NEW_MESSAGE event. NewMessagePayload *GoogleCloudDialogflowV2Message `json:"newMessagePayload,omitempty"` + // NewRecognitionResultPayload: Payload of NEW_RECOGNITION_RESULT event. + NewRecognitionResultPayload *GoogleCloudDialogflowV2StreamingRecognitionResult `json:"newRecognitionResultPayload,omitempty"` // Type: The type of the event that this notification refers to. // // Possible values: @@ -7811,6 +7813,10 @@ type GoogleCloudDialogflowV2ConversationEvent struct { // "NEW_MESSAGE" - An existing conversation has received a new message, // either from API or telephony. It is configured in // ConversationProfile.new_message_event_notification_config + // "NEW_RECOGNITION_RESULT" - An existing conversation has received a new + // speech recognition result. This is mainly for delivering intermediate + // transcripts. The notification is configured in + // ConversationProfile.new_recognition_event_notification_config. // "UNRECOVERABLE_ERROR" - Unrecoverable error during a telephone call. In // general non-recoverable errors only occur if something was misconfigured in // the ConversationProfile corresponding to the call. After a non-recoverable @@ -10331,6 +10337,154 @@ func (s GoogleCloudDialogflowV2SmartReplyModelMetadata) MarshalJSON() ([]byte, e return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } +// GoogleCloudDialogflowV2SpeechWordInfo: Information for a word recognized by +// the speech recognizer. +type GoogleCloudDialogflowV2SpeechWordInfo struct { + // Confidence: The Speech confidence between 0.0 and 1.0 for this word. A + // higher number indicates an estimated greater likelihood that the recognized + // word is correct. The default of 0.0 is a sentinel value indicating that + // confidence was not set. This field is not guaranteed to be fully stable over + // time for the same audio input. Users should also not rely on it to always be + // provided. + Confidence float64 `json:"confidence,omitempty"` + // EndOffset: Time offset relative to the beginning of the audio that + // corresponds to the end of the spoken word. This is an experimental feature + // and the accuracy of the time offset can vary. + EndOffset string `json:"endOffset,omitempty"` + // StartOffset: Time offset relative to the beginning of the audio that + // corresponds to the start of the spoken word. This is an experimental feature + // and the accuracy of the time offset can vary. + StartOffset string `json:"startOffset,omitempty"` + // Word: The word this info is for. + Word string `json:"word,omitempty"` + // ForceSendFields is a list of field names (e.g. "Confidence") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Confidence") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudDialogflowV2SpeechWordInfo) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2SpeechWordInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +func (s *GoogleCloudDialogflowV2SpeechWordInfo) UnmarshalJSON(data []byte) error { + type NoMethod GoogleCloudDialogflowV2SpeechWordInfo + var s1 struct { + Confidence gensupport.JSONFloat64 `json:"confidence"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Confidence = float64(s1.Confidence) + return nil +} + +// GoogleCloudDialogflowV2StreamingRecognitionResult: Contains a speech +// recognition result corresponding to a portion of the audio that is currently +// being processed or an indication that this is the end of the single +// requested utterance. While end-user audio is being processed, Dialogflow +// sends a series of results. Each result may contain a `transcript` value. A +// transcript represents a portion of the utterance. While the recognizer is +// processing audio, transcript values may be interim values or finalized +// values. Once a transcript is finalized, the `is_final` value is set to true +// and processing continues for the next transcript. If +// `StreamingDetectIntentRequest.query_input.audio_config.single_utterance` was +// true, and the recognizer has completed processing audio, the `message_type` +// value is set to `END_OF_SINGLE_UTTERANCE and the following (last) result +// contains the last finalized transcript. The complete end-user utterance is +// determined by concatenating the finalized transcript values received for the +// series of results. In the following example, single utterance is enabled. In +// the case where single utterance is not enabled, result 7 would not occur. +// ``` Num | transcript | message_type | is_final --- | ----------------------- +// | ----------------------- | -------- 1 | "tube" | TRANSCRIPT | false 2 | "to +// be a" | TRANSCRIPT | false 3 | "to be" | TRANSCRIPT | false 4 | "to be or +// not to be" | TRANSCRIPT | true 5 | "that's" | TRANSCRIPT | false 6 | "that +// is | TRANSCRIPT | false 7 | unset | END_OF_SINGLE_UTTERANCE | unset 8 | " +// that is the question" | TRANSCRIPT | true ``` Concatenating the finalized +// transcripts with `is_final` set to true, the complete utterance becomes "to +// be or not to be that is the question". +type GoogleCloudDialogflowV2StreamingRecognitionResult struct { + // Confidence: The Speech confidence between 0.0 and 1.0 for the current + // portion of audio. A higher number indicates an estimated greater likelihood + // that the recognized words are correct. The default of 0.0 is a sentinel + // value indicating that confidence was not set. This field is typically only + // provided if `is_final` is true and you should not rely on it being accurate + // or even set. + Confidence float64 `json:"confidence,omitempty"` + // IsFinal: If `false`, the `StreamingRecognitionResult` represents an interim + // result that may change. If `true`, the recognizer will not return any + // further hypotheses about this piece of the audio. May only be populated for + // `message_type` = `TRANSCRIPT`. + IsFinal bool `json:"isFinal,omitempty"` + // LanguageCode: Detected language code for the transcript. + LanguageCode string `json:"languageCode,omitempty"` + // MessageType: Type of the result message. + // + // Possible values: + // "MESSAGE_TYPE_UNSPECIFIED" - Not specified. Should never be used. + // "TRANSCRIPT" - Message contains a (possibly partial) transcript. + // "END_OF_SINGLE_UTTERANCE" - This event indicates that the server has + // detected the end of the user's speech utterance and expects no additional + // inputs. Therefore, the server will not process additional audio (although it + // may subsequently return additional results). The client should stop sending + // additional audio data, half-close the gRPC connection, and wait for any + // additional results until the server closes the gRPC connection. This message + // is only sent if `single_utterance` was set to `true`, and is not used + // otherwise. + MessageType string `json:"messageType,omitempty"` + // SpeechEndOffset: Time offset of the end of this Speech recognition result + // relative to the beginning of the audio. Only populated for `message_type` = + // `TRANSCRIPT`. + SpeechEndOffset string `json:"speechEndOffset,omitempty"` + // SpeechWordInfo: Word-specific information for the words recognized by Speech + // in transcript. Populated if and only if `message_type` = `TRANSCRIPT` and + // [InputAudioConfig.enable_word_info] is set. + SpeechWordInfo []*GoogleCloudDialogflowV2SpeechWordInfo `json:"speechWordInfo,omitempty"` + // Transcript: Transcript text representing the words that the user spoke. + // Populated if and only if `message_type` = `TRANSCRIPT`. + Transcript string `json:"transcript,omitempty"` + // ForceSendFields is a list of field names (e.g. "Confidence") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Confidence") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudDialogflowV2StreamingRecognitionResult) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2StreamingRecognitionResult + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +func (s *GoogleCloudDialogflowV2StreamingRecognitionResult) UnmarshalJSON(data []byte) error { + type NoMethod GoogleCloudDialogflowV2StreamingRecognitionResult + var s1 struct { + Confidence gensupport.JSONFloat64 `json:"confidence"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Confidence = float64(s1.Confidence) + return nil +} + // GoogleCloudDialogflowV2SuggestArticlesResponse: The response message for // Participants.SuggestArticles. type GoogleCloudDialogflowV2SuggestArticlesResponse struct { @@ -10932,14 +11086,14 @@ type GoogleCloudDialogflowV2beta1AnalyzeContentRequest struct { IntentInput *GoogleCloudDialogflowV2beta1IntentInput `json:"intentInput,omitempty"` // MessageSendTime: Optional. The send time of the message from end user or // human agent's perspective. It is used for identifying the same message under - // one participant. Given two messages under the same participant: * If send - // time are different regardless of whether the content of the messages are - // exactly the same, the conversation will regard them as two distinct messages - // sent by the participant. * If send time is the same regardless of whether - // the content of the messages are exactly the same, the conversation will - // regard them as same message, and ignore the message received later. If the - // value is not provided, a new request will always be regarded as a new - // message without any de-duplication. + // one participant. For BatchCreateMessages API only: Given two messages under + // the same participant: * If send time are different regardless of whether the + // content of the messages are exactly the same, the conversation will regard + // them as two distinct messages sent by the participant. * If send time is the + // same regardless of whether the content of the messages are exactly the same, + // the conversation will regard them as same message, and ignore the message + // received later. If the value is not provided, a new request will always be + // regarded as a new message without any de-duplication. MessageSendTime string `json:"messageSendTime,omitempty"` // QueryParams: Parameters for a Dialogflow virtual-agent query. QueryParams *GoogleCloudDialogflowV2beta1QueryParameters `json:"queryParams,omitempty"` @@ -12126,6 +12280,8 @@ type GoogleCloudDialogflowV2beta1ConversationEvent struct { ErrorStatus *GoogleRpcStatus `json:"errorStatus,omitempty"` // NewMessagePayload: Payload of NEW_MESSAGE event. NewMessagePayload *GoogleCloudDialogflowV2beta1Message `json:"newMessagePayload,omitempty"` + // NewRecognitionResultPayload: Payload of NEW_RECOGNITION_RESULT event. + NewRecognitionResultPayload *GoogleCloudDialogflowV2beta1StreamingRecognitionResult `json:"newRecognitionResultPayload,omitempty"` // Type: Required. The type of the event that this notification refers to. // // Possible values: @@ -12140,6 +12296,10 @@ type GoogleCloudDialogflowV2beta1ConversationEvent struct { // "NEW_MESSAGE" - An existing conversation has received a new message, // either from API or telephony. It is configured in // ConversationProfile.new_message_event_notification_config + // "NEW_RECOGNITION_RESULT" - An existing conversation has received a new + // speech recognition result. This is mainly for delivering intermediate + // transcripts. The notification is configured in + // ConversationProfile.new_recognition_event_notification_config. // "UNRECOVERABLE_ERROR" - Unrecoverable error during a telephone call. In // general non-recoverable errors only occur if something was misconfigured in // the ConversationProfile corresponding to the call. After a non-recoverable @@ -12221,6 +12381,12 @@ type GoogleCloudDialogflowV2beta1ConversationProfile struct { // NewMessageEventNotificationConfig: Configuration for publishing new message // events. Event will be sent in format of ConversationEvent NewMessageEventNotificationConfig *GoogleCloudDialogflowV2beta1NotificationConfig `json:"newMessageEventNotificationConfig,omitempty"` + // NewRecognitionResultNotificationConfig: Optional. Configuration for + // publishing transcription intermediate results. Event will be sent in format + // of ConversationEvent. If configured, the following information will be + // populated as ConversationEvent Pub/Sub message attributes: - + // "participant_id" - "participant_role" - "message_id" + NewRecognitionResultNotificationConfig *GoogleCloudDialogflowV2beta1NotificationConfig `json:"newRecognitionResultNotificationConfig,omitempty"` // NotificationConfig: Configuration for publishing conversation lifecycle // events. NotificationConfig *GoogleCloudDialogflowV2beta1NotificationConfig `json:"notificationConfig,omitempty"` @@ -18725,6 +18891,171 @@ func (s GoogleCloudDialogflowV2beta1SpeechToTextConfig) MarshalJSON() ([]byte, e return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } +// GoogleCloudDialogflowV2beta1SpeechWordInfo: Information for a word +// recognized by the speech recognizer. +type GoogleCloudDialogflowV2beta1SpeechWordInfo struct { + // Confidence: The Speech confidence between 0.0 and 1.0 for this word. A + // higher number indicates an estimated greater likelihood that the recognized + // word is correct. The default of 0.0 is a sentinel value indicating that + // confidence was not set. This field is not guaranteed to be fully stable over + // time for the same audio input. Users should also not rely on it to always be + // provided. + Confidence float64 `json:"confidence,omitempty"` + // EndOffset: Time offset relative to the beginning of the audio that + // corresponds to the end of the spoken word. This is an experimental feature + // and the accuracy of the time offset can vary. + EndOffset string `json:"endOffset,omitempty"` + // StartOffset: Time offset relative to the beginning of the audio that + // corresponds to the start of the spoken word. This is an experimental feature + // and the accuracy of the time offset can vary. + StartOffset string `json:"startOffset,omitempty"` + // Word: The word this info is for. + Word string `json:"word,omitempty"` + // ForceSendFields is a list of field names (e.g. "Confidence") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Confidence") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudDialogflowV2beta1SpeechWordInfo) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1SpeechWordInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +func (s *GoogleCloudDialogflowV2beta1SpeechWordInfo) UnmarshalJSON(data []byte) error { + type NoMethod GoogleCloudDialogflowV2beta1SpeechWordInfo + var s1 struct { + Confidence gensupport.JSONFloat64 `json:"confidence"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Confidence = float64(s1.Confidence) + return nil +} + +// GoogleCloudDialogflowV2beta1StreamingRecognitionResult: Contains a speech +// recognition result corresponding to a portion of the audio that is currently +// being processed or an indication that this is the end of the single +// requested utterance. While end-user audio is being processed, Dialogflow +// sends a series of results. Each result may contain a `transcript` value. A +// transcript represents a portion of the utterance. While the recognizer is +// processing audio, transcript values may be interim values or finalized +// values. Once a transcript is finalized, the `is_final` value is set to true +// and processing continues for the next transcript. If +// `StreamingDetectIntentRequest.query_input.audio_config.single_utterance` was +// true, and the recognizer has completed processing audio, the `message_type` +// value is set to `END_OF_SINGLE_UTTERANCE and the following (last) result +// contains the last finalized transcript. The complete end-user utterance is +// determined by concatenating the finalized transcript values received for the +// series of results. In the following example, single utterance is enabled. In +// the case where single utterance is not enabled, result 7 would not occur. +// ``` Num | transcript | message_type | is_final --- | ----------------------- +// | ----------------------- | -------- 1 | "tube" | TRANSCRIPT | false 2 | "to +// be a" | TRANSCRIPT | false 3 | "to be" | TRANSCRIPT | false 4 | "to be or +// not to be" | TRANSCRIPT | true 5 | "that's" | TRANSCRIPT | false 6 | "that +// is | TRANSCRIPT | false 7 | unset | END_OF_SINGLE_UTTERANCE | unset 8 | " +// that is the question" | TRANSCRIPT | true ``` Concatenating the finalized +// transcripts with `is_final` set to true, the complete utterance becomes "to +// be or not to be that is the question". +type GoogleCloudDialogflowV2beta1StreamingRecognitionResult struct { + // Confidence: The Speech confidence between 0.0 and 1.0 for the current + // portion of audio. A higher number indicates an estimated greater likelihood + // that the recognized words are correct. The default of 0.0 is a sentinel + // value indicating that confidence was not set. This field is typically only + // provided if `is_final` is true and you should not rely on it being accurate + // or even set. + Confidence float64 `json:"confidence,omitempty"` + // DtmfDigits: DTMF digits. Populated if and only if `message_type` = + // `DTMF_DIGITS`. + DtmfDigits *GoogleCloudDialogflowV2beta1TelephonyDtmfEvents `json:"dtmfDigits,omitempty"` + // IsFinal: If `false`, the `StreamingRecognitionResult` represents an interim + // result that may change. If `true`, the recognizer will not return any + // further hypotheses about this piece of the audio. May only be populated for + // `message_type` = `TRANSCRIPT`. + IsFinal bool `json:"isFinal,omitempty"` + // LanguageCode: Detected language code for the transcript. + LanguageCode string `json:"languageCode,omitempty"` + // MessageType: Type of the result message. + // + // Possible values: + // "MESSAGE_TYPE_UNSPECIFIED" - Not specified. Should never be used. + // "TRANSCRIPT" - Message contains a (possibly partial) transcript. + // "DTMF_DIGITS" - Message contains DTMF digits. + // "END_OF_SINGLE_UTTERANCE" - This event indicates that the server has + // detected the end of the user's speech utterance and expects no additional + // speech. Therefore, the server will not process additional audio (although it + // may subsequently return additional results). The client should stop sending + // additional audio data, half-close the gRPC connection, and wait for any + // additional results until the server closes the gRPC connection. This message + // is only sent if `single_utterance` was set to `true`, and is not used + // otherwise. + // "PARTIAL_DTMF_DIGITS" - Message contains DTMF digits. Before a message + // with DTMF_DIGITS is sent, a message with PARTIAL_DTMF_DIGITS may be sent + // with DTMF digits collected up to the time of sending, which represents an + // intermediate result. + MessageType string `json:"messageType,omitempty"` + // SpeechEndOffset: Time offset of the end of this Speech recognition result + // relative to the beginning of the audio. Only populated for `message_type` = + // `TRANSCRIPT`. + SpeechEndOffset string `json:"speechEndOffset,omitempty"` + // SpeechWordInfo: Word-specific information for the words recognized by Speech + // in transcript. Populated if and only if `message_type` = `TRANSCRIPT` and + // [InputAudioConfig.enable_word_info] is set. + SpeechWordInfo []*GoogleCloudDialogflowV2beta1SpeechWordInfo `json:"speechWordInfo,omitempty"` + // Stability: An estimate of the likelihood that the speech recognizer will not + // change its guess about this interim recognition result: * If the value is + // unspecified or 0.0, Dialogflow didn't compute the stability. In particular, + // Dialogflow will only provide stability for `TRANSCRIPT` results with + // `is_final = false`. * Otherwise, the value is in (0.0, 1.0] where 0.0 means + // completely unstable and 1.0 means completely stable. + Stability float64 `json:"stability,omitempty"` + // Transcript: Transcript text representing the words that the user spoke. + // Populated if and only if `message_type` = `TRANSCRIPT`. + Transcript string `json:"transcript,omitempty"` + // ForceSendFields is a list of field names (e.g. "Confidence") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Confidence") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudDialogflowV2beta1StreamingRecognitionResult) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1StreamingRecognitionResult + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +func (s *GoogleCloudDialogflowV2beta1StreamingRecognitionResult) UnmarshalJSON(data []byte) error { + type NoMethod GoogleCloudDialogflowV2beta1StreamingRecognitionResult + var s1 struct { + Confidence gensupport.JSONFloat64 `json:"confidence"` + Stability gensupport.JSONFloat64 `json:"stability"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Confidence = float64(s1.Confidence) + s.Stability = float64(s1.Stability) + return nil +} + // GoogleCloudDialogflowV2beta1SubAgent: Contains basic configuration for a // sub-agent. type GoogleCloudDialogflowV2beta1SubAgent struct { diff --git a/dialogflow/v3/dialogflow-api.json b/dialogflow/v3/dialogflow-api.json index 19d676c2631..f12a522dc8c 100644 --- a/dialogflow/v3/dialogflow-api.json +++ b/dialogflow/v3/dialogflow-api.json @@ -4453,7 +4453,7 @@ } } }, - "revision": "20241212", + "revision": "20241216", "rootUrl": "https://dialogflow.googleapis.com/", "schemas": { "GoogleCloudDialogflowCxV3AdvancedSettings": { @@ -12814,6 +12814,10 @@ "$ref": "GoogleCloudDialogflowV2Message", "description": "Payload of NEW_MESSAGE event." }, + "newRecognitionResultPayload": { + "$ref": "GoogleCloudDialogflowV2StreamingRecognitionResult", + "description": "Payload of NEW_RECOGNITION_RESULT event." + }, "type": { "description": "The type of the event that this notification refers to.", "enum": [ @@ -12822,6 +12826,7 @@ "CONVERSATION_FINISHED", "HUMAN_INTERVENTION_NEEDED", "NEW_MESSAGE", + "NEW_RECOGNITION_RESULT", "UNRECOVERABLE_ERROR" ], "enumDescriptions": [ @@ -12830,6 +12835,7 @@ "An existing conversation has closed. This is fired when a telephone call is terminated, or a conversation is closed via the API.", "An existing conversation has received notification from Dialogflow that human intervention is required.", "An existing conversation has received a new message, either from API or telephony. It is configured in ConversationProfile.new_message_event_notification_config", + "An existing conversation has received a new speech recognition result. This is mainly for delivering intermediate transcripts. The notification is configured in ConversationProfile.new_recognition_event_notification_config.", "Unrecoverable error during a telephone call. In general non-recoverable errors only occur if something was misconfigured in the ConversationProfile corresponding to the call. After a non-recoverable error, Dialogflow may stop responding. We don't fire this event: * in an API call because we can directly return the error, or, * when we can recover from an error." ], "type": "string" @@ -14729,6 +14735,82 @@ }, "type": "object" }, + "GoogleCloudDialogflowV2SpeechWordInfo": { + "description": "Information for a word recognized by the speech recognizer.", + "id": "GoogleCloudDialogflowV2SpeechWordInfo", + "properties": { + "confidence": { + "description": "The Speech confidence between 0.0 and 1.0 for this word. A higher number indicates an estimated greater likelihood that the recognized word is correct. The default of 0.0 is a sentinel value indicating that confidence was not set. This field is not guaranteed to be fully stable over time for the same audio input. Users should also not rely on it to always be provided.", + "format": "float", + "type": "number" + }, + "endOffset": { + "description": "Time offset relative to the beginning of the audio that corresponds to the end of the spoken word. This is an experimental feature and the accuracy of the time offset can vary.", + "format": "google-duration", + "type": "string" + }, + "startOffset": { + "description": "Time offset relative to the beginning of the audio that corresponds to the start of the spoken word. This is an experimental feature and the accuracy of the time offset can vary.", + "format": "google-duration", + "type": "string" + }, + "word": { + "description": "The word this info is for.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2StreamingRecognitionResult": { + "description": "Contains a speech recognition result corresponding to a portion of the audio that is currently being processed or an indication that this is the end of the single requested utterance. While end-user audio is being processed, Dialogflow sends a series of results. Each result may contain a `transcript` value. A transcript represents a portion of the utterance. While the recognizer is processing audio, transcript values may be interim values or finalized values. Once a transcript is finalized, the `is_final` value is set to true and processing continues for the next transcript. If `StreamingDetectIntentRequest.query_input.audio_config.single_utterance` was true, and the recognizer has completed processing audio, the `message_type` value is set to `END_OF_SINGLE_UTTERANCE and the following (last) result contains the last finalized transcript. The complete end-user utterance is determined by concatenating the finalized transcript values received for the series of results. In the following example, single utterance is enabled. In the case where single utterance is not enabled, result 7 would not occur. ``` Num | transcript | message_type | is_final --- | ----------------------- | ----------------------- | -------- 1 | \"tube\" | TRANSCRIPT | false 2 | \"to be a\" | TRANSCRIPT | false 3 | \"to be\" | TRANSCRIPT | false 4 | \"to be or not to be\" | TRANSCRIPT | true 5 | \"that's\" | TRANSCRIPT | false 6 | \"that is | TRANSCRIPT | false 7 | unset | END_OF_SINGLE_UTTERANCE | unset 8 | \" that is the question\" | TRANSCRIPT | true ``` Concatenating the finalized transcripts with `is_final` set to true, the complete utterance becomes \"to be or not to be that is the question\".", + "id": "GoogleCloudDialogflowV2StreamingRecognitionResult", + "properties": { + "confidence": { + "description": "The Speech confidence between 0.0 and 1.0 for the current portion of audio. A higher number indicates an estimated greater likelihood that the recognized words are correct. The default of 0.0 is a sentinel value indicating that confidence was not set. This field is typically only provided if `is_final` is true and you should not rely on it being accurate or even set.", + "format": "float", + "type": "number" + }, + "isFinal": { + "description": "If `false`, the `StreamingRecognitionResult` represents an interim result that may change. If `true`, the recognizer will not return any further hypotheses about this piece of the audio. May only be populated for `message_type` = `TRANSCRIPT`.", + "type": "boolean" + }, + "languageCode": { + "description": "Detected language code for the transcript.", + "type": "string" + }, + "messageType": { + "description": "Type of the result message.", + "enum": [ + "MESSAGE_TYPE_UNSPECIFIED", + "TRANSCRIPT", + "END_OF_SINGLE_UTTERANCE" + ], + "enumDescriptions": [ + "Not specified. Should never be used.", + "Message contains a (possibly partial) transcript.", + "This event indicates that the server has detected the end of the user's speech utterance and expects no additional inputs. Therefore, the server will not process additional audio (although it may subsequently return additional results). The client should stop sending additional audio data, half-close the gRPC connection, and wait for any additional results until the server closes the gRPC connection. This message is only sent if `single_utterance` was set to `true`, and is not used otherwise." + ], + "type": "string" + }, + "speechEndOffset": { + "description": "Time offset of the end of this Speech recognition result relative to the beginning of the audio. Only populated for `message_type` = `TRANSCRIPT`.", + "format": "google-duration", + "type": "string" + }, + "speechWordInfo": { + "description": "Word-specific information for the words recognized by Speech in transcript. Populated if and only if `message_type` = `TRANSCRIPT` and [InputAudioConfig.enable_word_info] is set.", + "items": { + "$ref": "GoogleCloudDialogflowV2SpeechWordInfo" + }, + "type": "array" + }, + "transcript": { + "description": "Transcript text representing the words that the user spoke. Populated if and only if `message_type` = `TRANSCRIPT`.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudDialogflowV2SuggestArticlesResponse": { "description": "The response message for Participants.SuggestArticles.", "id": "GoogleCloudDialogflowV2SuggestArticlesResponse", @@ -15110,6 +15192,10 @@ "$ref": "GoogleCloudDialogflowV2beta1Message", "description": "Payload of NEW_MESSAGE event." }, + "newRecognitionResultPayload": { + "$ref": "GoogleCloudDialogflowV2beta1StreamingRecognitionResult", + "description": "Payload of NEW_RECOGNITION_RESULT event." + }, "type": { "description": "Required. The type of the event that this notification refers to.", "enum": [ @@ -15118,6 +15204,7 @@ "CONVERSATION_FINISHED", "HUMAN_INTERVENTION_NEEDED", "NEW_MESSAGE", + "NEW_RECOGNITION_RESULT", "UNRECOVERABLE_ERROR" ], "enumDescriptions": [ @@ -15126,6 +15213,7 @@ "An existing conversation has closed. This is fired when a telephone call is terminated, or a conversation is closed via the API.", "An existing conversation has received notification from Dialogflow that human intervention is required.", "An existing conversation has received a new message, either from API or telephony. It is configured in ConversationProfile.new_message_event_notification_config", + "An existing conversation has received a new speech recognition result. This is mainly for delivering intermediate transcripts. The notification is configured in ConversationProfile.new_recognition_event_notification_config.", "Unrecoverable error during a telephone call. In general non-recoverable errors only occur if something was misconfigured in the ConversationProfile corresponding to the call. After a non-recoverable error, Dialogflow may stop responding. We don't fire this event: * in an API call because we can directly return the error, or, * when we can recover from an error." ], "type": "string" @@ -17273,6 +17361,95 @@ }, "type": "object" }, + "GoogleCloudDialogflowV2beta1SpeechWordInfo": { + "description": "Information for a word recognized by the speech recognizer.", + "id": "GoogleCloudDialogflowV2beta1SpeechWordInfo", + "properties": { + "confidence": { + "description": "The Speech confidence between 0.0 and 1.0 for this word. A higher number indicates an estimated greater likelihood that the recognized word is correct. The default of 0.0 is a sentinel value indicating that confidence was not set. This field is not guaranteed to be fully stable over time for the same audio input. Users should also not rely on it to always be provided.", + "format": "float", + "type": "number" + }, + "endOffset": { + "description": "Time offset relative to the beginning of the audio that corresponds to the end of the spoken word. This is an experimental feature and the accuracy of the time offset can vary.", + "format": "google-duration", + "type": "string" + }, + "startOffset": { + "description": "Time offset relative to the beginning of the audio that corresponds to the start of the spoken word. This is an experimental feature and the accuracy of the time offset can vary.", + "format": "google-duration", + "type": "string" + }, + "word": { + "description": "The word this info is for.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1StreamingRecognitionResult": { + "description": "Contains a speech recognition result corresponding to a portion of the audio that is currently being processed or an indication that this is the end of the single requested utterance. While end-user audio is being processed, Dialogflow sends a series of results. Each result may contain a `transcript` value. A transcript represents a portion of the utterance. While the recognizer is processing audio, transcript values may be interim values or finalized values. Once a transcript is finalized, the `is_final` value is set to true and processing continues for the next transcript. If `StreamingDetectIntentRequest.query_input.audio_config.single_utterance` was true, and the recognizer has completed processing audio, the `message_type` value is set to `END_OF_SINGLE_UTTERANCE and the following (last) result contains the last finalized transcript. The complete end-user utterance is determined by concatenating the finalized transcript values received for the series of results. In the following example, single utterance is enabled. In the case where single utterance is not enabled, result 7 would not occur. ``` Num | transcript | message_type | is_final --- | ----------------------- | ----------------------- | -------- 1 | \"tube\" | TRANSCRIPT | false 2 | \"to be a\" | TRANSCRIPT | false 3 | \"to be\" | TRANSCRIPT | false 4 | \"to be or not to be\" | TRANSCRIPT | true 5 | \"that's\" | TRANSCRIPT | false 6 | \"that is | TRANSCRIPT | false 7 | unset | END_OF_SINGLE_UTTERANCE | unset 8 | \" that is the question\" | TRANSCRIPT | true ``` Concatenating the finalized transcripts with `is_final` set to true, the complete utterance becomes \"to be or not to be that is the question\".", + "id": "GoogleCloudDialogflowV2beta1StreamingRecognitionResult", + "properties": { + "confidence": { + "description": "The Speech confidence between 0.0 and 1.0 for the current portion of audio. A higher number indicates an estimated greater likelihood that the recognized words are correct. The default of 0.0 is a sentinel value indicating that confidence was not set. This field is typically only provided if `is_final` is true and you should not rely on it being accurate or even set.", + "format": "float", + "type": "number" + }, + "dtmfDigits": { + "$ref": "GoogleCloudDialogflowV2beta1TelephonyDtmfEvents", + "description": "DTMF digits. Populated if and only if `message_type` = `DTMF_DIGITS`." + }, + "isFinal": { + "description": "If `false`, the `StreamingRecognitionResult` represents an interim result that may change. If `true`, the recognizer will not return any further hypotheses about this piece of the audio. May only be populated for `message_type` = `TRANSCRIPT`.", + "type": "boolean" + }, + "languageCode": { + "description": "Detected language code for the transcript.", + "type": "string" + }, + "messageType": { + "description": "Type of the result message.", + "enum": [ + "MESSAGE_TYPE_UNSPECIFIED", + "TRANSCRIPT", + "DTMF_DIGITS", + "END_OF_SINGLE_UTTERANCE", + "PARTIAL_DTMF_DIGITS" + ], + "enumDescriptions": [ + "Not specified. Should never be used.", + "Message contains a (possibly partial) transcript.", + "Message contains DTMF digits.", + "This event indicates that the server has detected the end of the user's speech utterance and expects no additional speech. Therefore, the server will not process additional audio (although it may subsequently return additional results). The client should stop sending additional audio data, half-close the gRPC connection, and wait for any additional results until the server closes the gRPC connection. This message is only sent if `single_utterance` was set to `true`, and is not used otherwise.", + "Message contains DTMF digits. Before a message with DTMF_DIGITS is sent, a message with PARTIAL_DTMF_DIGITS may be sent with DTMF digits collected up to the time of sending, which represents an intermediate result." + ], + "type": "string" + }, + "speechEndOffset": { + "description": "Time offset of the end of this Speech recognition result relative to the beginning of the audio. Only populated for `message_type` = `TRANSCRIPT`.", + "format": "google-duration", + "type": "string" + }, + "speechWordInfo": { + "description": "Word-specific information for the words recognized by Speech in transcript. Populated if and only if `message_type` = `TRANSCRIPT` and [InputAudioConfig.enable_word_info] is set.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1SpeechWordInfo" + }, + "type": "array" + }, + "stability": { + "description": "An estimate of the likelihood that the speech recognizer will not change its guess about this interim recognition result: * If the value is unspecified or 0.0, Dialogflow didn't compute the stability. In particular, Dialogflow will only provide stability for `TRANSCRIPT` results with `is_final = false`. * Otherwise, the value is in (0.0, 1.0] where 0.0 means completely unstable and 1.0 means completely stable.", + "format": "float", + "type": "number" + }, + "transcript": { + "description": "Transcript text representing the words that the user spoke. Populated if and only if `message_type` = `TRANSCRIPT`.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudDialogflowV2beta1SuggestArticlesResponse": { "description": "The response message for Participants.SuggestArticles.", "id": "GoogleCloudDialogflowV2beta1SuggestArticlesResponse", @@ -17421,6 +17598,58 @@ }, "type": "object" }, + "GoogleCloudDialogflowV2beta1TelephonyDtmfEvents": { + "description": "A wrapper of repeated TelephonyDtmf digits.", + "id": "GoogleCloudDialogflowV2beta1TelephonyDtmfEvents", + "properties": { + "dtmfEvents": { + "description": "A sequence of TelephonyDtmf digits.", + "items": { + "enum": [ + "TELEPHONY_DTMF_UNSPECIFIED", + "DTMF_ONE", + "DTMF_TWO", + "DTMF_THREE", + "DTMF_FOUR", + "DTMF_FIVE", + "DTMF_SIX", + "DTMF_SEVEN", + "DTMF_EIGHT", + "DTMF_NINE", + "DTMF_ZERO", + "DTMF_A", + "DTMF_B", + "DTMF_C", + "DTMF_D", + "DTMF_STAR", + "DTMF_POUND" + ], + "enumDescriptions": [ + "Not specified. This value may be used to indicate an absent digit.", + "Number: '1'.", + "Number: '2'.", + "Number: '3'.", + "Number: '4'.", + "Number: '5'.", + "Number: '6'.", + "Number: '7'.", + "Number: '8'.", + "Number: '9'.", + "Number: '0'.", + "Letter: 'A'.", + "Letter: 'B'.", + "Letter: 'C'.", + "Letter: 'D'.", + "Asterisk/star: '*'.", + "Pound/diamond/hash/square/gate/octothorpe: '#'." + ], + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "GoogleCloudDialogflowV2beta1WebhookRequest": { "description": "The request message for a webhook call.", "id": "GoogleCloudDialogflowV2beta1WebhookRequest", diff --git a/dialogflow/v3/dialogflow-gen.go b/dialogflow/v3/dialogflow-gen.go index e55d573e608..4452e761855 100644 --- a/dialogflow/v3/dialogflow-gen.go +++ b/dialogflow/v3/dialogflow-gen.go @@ -12424,6 +12424,8 @@ type GoogleCloudDialogflowV2ConversationEvent struct { ErrorStatus *GoogleRpcStatus `json:"errorStatus,omitempty"` // NewMessagePayload: Payload of NEW_MESSAGE event. NewMessagePayload *GoogleCloudDialogflowV2Message `json:"newMessagePayload,omitempty"` + // NewRecognitionResultPayload: Payload of NEW_RECOGNITION_RESULT event. + NewRecognitionResultPayload *GoogleCloudDialogflowV2StreamingRecognitionResult `json:"newRecognitionResultPayload,omitempty"` // Type: The type of the event that this notification refers to. // // Possible values: @@ -12438,6 +12440,10 @@ type GoogleCloudDialogflowV2ConversationEvent struct { // "NEW_MESSAGE" - An existing conversation has received a new message, // either from API or telephony. It is configured in // ConversationProfile.new_message_event_notification_config + // "NEW_RECOGNITION_RESULT" - An existing conversation has received a new + // speech recognition result. This is mainly for delivering intermediate + // transcripts. The notification is configured in + // ConversationProfile.new_recognition_event_notification_config. // "UNRECOVERABLE_ERROR" - Unrecoverable error during a telephone call. In // general non-recoverable errors only occur if something was misconfigured in // the ConversationProfile corresponding to the call. After a non-recoverable @@ -14958,6 +14964,154 @@ func (s GoogleCloudDialogflowV2SmartReplyModelMetadata) MarshalJSON() ([]byte, e return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } +// GoogleCloudDialogflowV2SpeechWordInfo: Information for a word recognized by +// the speech recognizer. +type GoogleCloudDialogflowV2SpeechWordInfo struct { + // Confidence: The Speech confidence between 0.0 and 1.0 for this word. A + // higher number indicates an estimated greater likelihood that the recognized + // word is correct. The default of 0.0 is a sentinel value indicating that + // confidence was not set. This field is not guaranteed to be fully stable over + // time for the same audio input. Users should also not rely on it to always be + // provided. + Confidence float64 `json:"confidence,omitempty"` + // EndOffset: Time offset relative to the beginning of the audio that + // corresponds to the end of the spoken word. This is an experimental feature + // and the accuracy of the time offset can vary. + EndOffset string `json:"endOffset,omitempty"` + // StartOffset: Time offset relative to the beginning of the audio that + // corresponds to the start of the spoken word. This is an experimental feature + // and the accuracy of the time offset can vary. + StartOffset string `json:"startOffset,omitempty"` + // Word: The word this info is for. + Word string `json:"word,omitempty"` + // ForceSendFields is a list of field names (e.g. "Confidence") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Confidence") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudDialogflowV2SpeechWordInfo) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2SpeechWordInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +func (s *GoogleCloudDialogflowV2SpeechWordInfo) UnmarshalJSON(data []byte) error { + type NoMethod GoogleCloudDialogflowV2SpeechWordInfo + var s1 struct { + Confidence gensupport.JSONFloat64 `json:"confidence"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Confidence = float64(s1.Confidence) + return nil +} + +// GoogleCloudDialogflowV2StreamingRecognitionResult: Contains a speech +// recognition result corresponding to a portion of the audio that is currently +// being processed or an indication that this is the end of the single +// requested utterance. While end-user audio is being processed, Dialogflow +// sends a series of results. Each result may contain a `transcript` value. A +// transcript represents a portion of the utterance. While the recognizer is +// processing audio, transcript values may be interim values or finalized +// values. Once a transcript is finalized, the `is_final` value is set to true +// and processing continues for the next transcript. If +// `StreamingDetectIntentRequest.query_input.audio_config.single_utterance` was +// true, and the recognizer has completed processing audio, the `message_type` +// value is set to `END_OF_SINGLE_UTTERANCE and the following (last) result +// contains the last finalized transcript. The complete end-user utterance is +// determined by concatenating the finalized transcript values received for the +// series of results. In the following example, single utterance is enabled. In +// the case where single utterance is not enabled, result 7 would not occur. +// ``` Num | transcript | message_type | is_final --- | ----------------------- +// | ----------------------- | -------- 1 | "tube" | TRANSCRIPT | false 2 | "to +// be a" | TRANSCRIPT | false 3 | "to be" | TRANSCRIPT | false 4 | "to be or +// not to be" | TRANSCRIPT | true 5 | "that's" | TRANSCRIPT | false 6 | "that +// is | TRANSCRIPT | false 7 | unset | END_OF_SINGLE_UTTERANCE | unset 8 | " +// that is the question" | TRANSCRIPT | true ``` Concatenating the finalized +// transcripts with `is_final` set to true, the complete utterance becomes "to +// be or not to be that is the question". +type GoogleCloudDialogflowV2StreamingRecognitionResult struct { + // Confidence: The Speech confidence between 0.0 and 1.0 for the current + // portion of audio. A higher number indicates an estimated greater likelihood + // that the recognized words are correct. The default of 0.0 is a sentinel + // value indicating that confidence was not set. This field is typically only + // provided if `is_final` is true and you should not rely on it being accurate + // or even set. + Confidence float64 `json:"confidence,omitempty"` + // IsFinal: If `false`, the `StreamingRecognitionResult` represents an interim + // result that may change. If `true`, the recognizer will not return any + // further hypotheses about this piece of the audio. May only be populated for + // `message_type` = `TRANSCRIPT`. + IsFinal bool `json:"isFinal,omitempty"` + // LanguageCode: Detected language code for the transcript. + LanguageCode string `json:"languageCode,omitempty"` + // MessageType: Type of the result message. + // + // Possible values: + // "MESSAGE_TYPE_UNSPECIFIED" - Not specified. Should never be used. + // "TRANSCRIPT" - Message contains a (possibly partial) transcript. + // "END_OF_SINGLE_UTTERANCE" - This event indicates that the server has + // detected the end of the user's speech utterance and expects no additional + // inputs. Therefore, the server will not process additional audio (although it + // may subsequently return additional results). The client should stop sending + // additional audio data, half-close the gRPC connection, and wait for any + // additional results until the server closes the gRPC connection. This message + // is only sent if `single_utterance` was set to `true`, and is not used + // otherwise. + MessageType string `json:"messageType,omitempty"` + // SpeechEndOffset: Time offset of the end of this Speech recognition result + // relative to the beginning of the audio. Only populated for `message_type` = + // `TRANSCRIPT`. + SpeechEndOffset string `json:"speechEndOffset,omitempty"` + // SpeechWordInfo: Word-specific information for the words recognized by Speech + // in transcript. Populated if and only if `message_type` = `TRANSCRIPT` and + // [InputAudioConfig.enable_word_info] is set. + SpeechWordInfo []*GoogleCloudDialogflowV2SpeechWordInfo `json:"speechWordInfo,omitempty"` + // Transcript: Transcript text representing the words that the user spoke. + // Populated if and only if `message_type` = `TRANSCRIPT`. + Transcript string `json:"transcript,omitempty"` + // ForceSendFields is a list of field names (e.g. "Confidence") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Confidence") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudDialogflowV2StreamingRecognitionResult) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2StreamingRecognitionResult + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +func (s *GoogleCloudDialogflowV2StreamingRecognitionResult) UnmarshalJSON(data []byte) error { + type NoMethod GoogleCloudDialogflowV2StreamingRecognitionResult + var s1 struct { + Confidence gensupport.JSONFloat64 `json:"confidence"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Confidence = float64(s1.Confidence) + return nil +} + // GoogleCloudDialogflowV2SuggestArticlesResponse: The response message for // Participants.SuggestArticles. type GoogleCloudDialogflowV2SuggestArticlesResponse struct { @@ -15487,6 +15641,8 @@ type GoogleCloudDialogflowV2beta1ConversationEvent struct { ErrorStatus *GoogleRpcStatus `json:"errorStatus,omitempty"` // NewMessagePayload: Payload of NEW_MESSAGE event. NewMessagePayload *GoogleCloudDialogflowV2beta1Message `json:"newMessagePayload,omitempty"` + // NewRecognitionResultPayload: Payload of NEW_RECOGNITION_RESULT event. + NewRecognitionResultPayload *GoogleCloudDialogflowV2beta1StreamingRecognitionResult `json:"newRecognitionResultPayload,omitempty"` // Type: Required. The type of the event that this notification refers to. // // Possible values: @@ -15501,6 +15657,10 @@ type GoogleCloudDialogflowV2beta1ConversationEvent struct { // "NEW_MESSAGE" - An existing conversation has received a new message, // either from API or telephony. It is configured in // ConversationProfile.new_message_event_notification_config + // "NEW_RECOGNITION_RESULT" - An existing conversation has received a new + // speech recognition result. This is mainly for delivering intermediate + // transcripts. The notification is configured in + // ConversationProfile.new_recognition_event_notification_config. // "UNRECOVERABLE_ERROR" - Unrecoverable error during a telephone call. In // general non-recoverable errors only occur if something was misconfigured in // the ConversationProfile corresponding to the call. After a non-recoverable @@ -18468,6 +18628,171 @@ func (s *GoogleCloudDialogflowV2beta1SmartReplyAnswer) UnmarshalJSON(data []byte return nil } +// GoogleCloudDialogflowV2beta1SpeechWordInfo: Information for a word +// recognized by the speech recognizer. +type GoogleCloudDialogflowV2beta1SpeechWordInfo struct { + // Confidence: The Speech confidence between 0.0 and 1.0 for this word. A + // higher number indicates an estimated greater likelihood that the recognized + // word is correct. The default of 0.0 is a sentinel value indicating that + // confidence was not set. This field is not guaranteed to be fully stable over + // time for the same audio input. Users should also not rely on it to always be + // provided. + Confidence float64 `json:"confidence,omitempty"` + // EndOffset: Time offset relative to the beginning of the audio that + // corresponds to the end of the spoken word. This is an experimental feature + // and the accuracy of the time offset can vary. + EndOffset string `json:"endOffset,omitempty"` + // StartOffset: Time offset relative to the beginning of the audio that + // corresponds to the start of the spoken word. This is an experimental feature + // and the accuracy of the time offset can vary. + StartOffset string `json:"startOffset,omitempty"` + // Word: The word this info is for. + Word string `json:"word,omitempty"` + // ForceSendFields is a list of field names (e.g. "Confidence") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Confidence") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudDialogflowV2beta1SpeechWordInfo) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1SpeechWordInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +func (s *GoogleCloudDialogflowV2beta1SpeechWordInfo) UnmarshalJSON(data []byte) error { + type NoMethod GoogleCloudDialogflowV2beta1SpeechWordInfo + var s1 struct { + Confidence gensupport.JSONFloat64 `json:"confidence"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Confidence = float64(s1.Confidence) + return nil +} + +// GoogleCloudDialogflowV2beta1StreamingRecognitionResult: Contains a speech +// recognition result corresponding to a portion of the audio that is currently +// being processed or an indication that this is the end of the single +// requested utterance. While end-user audio is being processed, Dialogflow +// sends a series of results. Each result may contain a `transcript` value. A +// transcript represents a portion of the utterance. While the recognizer is +// processing audio, transcript values may be interim values or finalized +// values. Once a transcript is finalized, the `is_final` value is set to true +// and processing continues for the next transcript. If +// `StreamingDetectIntentRequest.query_input.audio_config.single_utterance` was +// true, and the recognizer has completed processing audio, the `message_type` +// value is set to `END_OF_SINGLE_UTTERANCE and the following (last) result +// contains the last finalized transcript. The complete end-user utterance is +// determined by concatenating the finalized transcript values received for the +// series of results. In the following example, single utterance is enabled. In +// the case where single utterance is not enabled, result 7 would not occur. +// ``` Num | transcript | message_type | is_final --- | ----------------------- +// | ----------------------- | -------- 1 | "tube" | TRANSCRIPT | false 2 | "to +// be a" | TRANSCRIPT | false 3 | "to be" | TRANSCRIPT | false 4 | "to be or +// not to be" | TRANSCRIPT | true 5 | "that's" | TRANSCRIPT | false 6 | "that +// is | TRANSCRIPT | false 7 | unset | END_OF_SINGLE_UTTERANCE | unset 8 | " +// that is the question" | TRANSCRIPT | true ``` Concatenating the finalized +// transcripts with `is_final` set to true, the complete utterance becomes "to +// be or not to be that is the question". +type GoogleCloudDialogflowV2beta1StreamingRecognitionResult struct { + // Confidence: The Speech confidence between 0.0 and 1.0 for the current + // portion of audio. A higher number indicates an estimated greater likelihood + // that the recognized words are correct. The default of 0.0 is a sentinel + // value indicating that confidence was not set. This field is typically only + // provided if `is_final` is true and you should not rely on it being accurate + // or even set. + Confidence float64 `json:"confidence,omitempty"` + // DtmfDigits: DTMF digits. Populated if and only if `message_type` = + // `DTMF_DIGITS`. + DtmfDigits *GoogleCloudDialogflowV2beta1TelephonyDtmfEvents `json:"dtmfDigits,omitempty"` + // IsFinal: If `false`, the `StreamingRecognitionResult` represents an interim + // result that may change. If `true`, the recognizer will not return any + // further hypotheses about this piece of the audio. May only be populated for + // `message_type` = `TRANSCRIPT`. + IsFinal bool `json:"isFinal,omitempty"` + // LanguageCode: Detected language code for the transcript. + LanguageCode string `json:"languageCode,omitempty"` + // MessageType: Type of the result message. + // + // Possible values: + // "MESSAGE_TYPE_UNSPECIFIED" - Not specified. Should never be used. + // "TRANSCRIPT" - Message contains a (possibly partial) transcript. + // "DTMF_DIGITS" - Message contains DTMF digits. + // "END_OF_SINGLE_UTTERANCE" - This event indicates that the server has + // detected the end of the user's speech utterance and expects no additional + // speech. Therefore, the server will not process additional audio (although it + // may subsequently return additional results). The client should stop sending + // additional audio data, half-close the gRPC connection, and wait for any + // additional results until the server closes the gRPC connection. This message + // is only sent if `single_utterance` was set to `true`, and is not used + // otherwise. + // "PARTIAL_DTMF_DIGITS" - Message contains DTMF digits. Before a message + // with DTMF_DIGITS is sent, a message with PARTIAL_DTMF_DIGITS may be sent + // with DTMF digits collected up to the time of sending, which represents an + // intermediate result. + MessageType string `json:"messageType,omitempty"` + // SpeechEndOffset: Time offset of the end of this Speech recognition result + // relative to the beginning of the audio. Only populated for `message_type` = + // `TRANSCRIPT`. + SpeechEndOffset string `json:"speechEndOffset,omitempty"` + // SpeechWordInfo: Word-specific information for the words recognized by Speech + // in transcript. Populated if and only if `message_type` = `TRANSCRIPT` and + // [InputAudioConfig.enable_word_info] is set. + SpeechWordInfo []*GoogleCloudDialogflowV2beta1SpeechWordInfo `json:"speechWordInfo,omitempty"` + // Stability: An estimate of the likelihood that the speech recognizer will not + // change its guess about this interim recognition result: * If the value is + // unspecified or 0.0, Dialogflow didn't compute the stability. In particular, + // Dialogflow will only provide stability for `TRANSCRIPT` results with + // `is_final = false`. * Otherwise, the value is in (0.0, 1.0] where 0.0 means + // completely unstable and 1.0 means completely stable. + Stability float64 `json:"stability,omitempty"` + // Transcript: Transcript text representing the words that the user spoke. + // Populated if and only if `message_type` = `TRANSCRIPT`. + Transcript string `json:"transcript,omitempty"` + // ForceSendFields is a list of field names (e.g. "Confidence") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Confidence") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudDialogflowV2beta1StreamingRecognitionResult) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1StreamingRecognitionResult + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +func (s *GoogleCloudDialogflowV2beta1StreamingRecognitionResult) UnmarshalJSON(data []byte) error { + type NoMethod GoogleCloudDialogflowV2beta1StreamingRecognitionResult + var s1 struct { + Confidence gensupport.JSONFloat64 `json:"confidence"` + Stability gensupport.JSONFloat64 `json:"stability"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Confidence = float64(s1.Confidence) + s.Stability = float64(s1.Stability) + return nil +} + // GoogleCloudDialogflowV2beta1SuggestArticlesResponse: The response message // for Participants.SuggestArticles. type GoogleCloudDialogflowV2beta1SuggestArticlesResponse struct { @@ -18670,6 +18995,49 @@ func (s GoogleCloudDialogflowV2beta1SuggestionResult) MarshalJSON() ([]byte, err return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } +// GoogleCloudDialogflowV2beta1TelephonyDtmfEvents: A wrapper of repeated +// TelephonyDtmf digits. +type GoogleCloudDialogflowV2beta1TelephonyDtmfEvents struct { + // DtmfEvents: A sequence of TelephonyDtmf digits. + // + // Possible values: + // "TELEPHONY_DTMF_UNSPECIFIED" - Not specified. This value may be used to + // indicate an absent digit. + // "DTMF_ONE" - Number: '1'. + // "DTMF_TWO" - Number: '2'. + // "DTMF_THREE" - Number: '3'. + // "DTMF_FOUR" - Number: '4'. + // "DTMF_FIVE" - Number: '5'. + // "DTMF_SIX" - Number: '6'. + // "DTMF_SEVEN" - Number: '7'. + // "DTMF_EIGHT" - Number: '8'. + // "DTMF_NINE" - Number: '9'. + // "DTMF_ZERO" - Number: '0'. + // "DTMF_A" - Letter: 'A'. + // "DTMF_B" - Letter: 'B'. + // "DTMF_C" - Letter: 'C'. + // "DTMF_D" - Letter: 'D'. + // "DTMF_STAR" - Asterisk/star: '*'. + // "DTMF_POUND" - Pound/diamond/hash/square/gate/octothorpe: '#'. + DtmfEvents []string `json:"dtmfEvents,omitempty"` + // ForceSendFields is a list of field names (e.g. "DtmfEvents") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "DtmfEvents") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudDialogflowV2beta1TelephonyDtmfEvents) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1TelephonyDtmfEvents + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + // GoogleCloudDialogflowV2beta1WebhookRequest: The request message for a // webhook call. type GoogleCloudDialogflowV2beta1WebhookRequest struct { diff --git a/dialogflow/v3beta1/dialogflow-api.json b/dialogflow/v3beta1/dialogflow-api.json index 034e830a8a9..38b8abe8477 100644 --- a/dialogflow/v3beta1/dialogflow-api.json +++ b/dialogflow/v3beta1/dialogflow-api.json @@ -5272,7 +5272,7 @@ } } }, - "revision": "20241212", + "revision": "20241216", "rootUrl": "https://dialogflow.googleapis.com/", "schemas": { "GoogleCloudDialogflowCxV3AdvancedSettings": { @@ -8468,6 +8468,13 @@ "responseUtterances": { "description": "The output text or the transcript of the output audio in the responses. If multiple output messages are returned, they will be concatenated into one.", "type": "string" + }, + "stepMetrics": { + "description": "Metrics associated with different processing steps. Names and number of steps depend on the request and can change without a notice.", + "items": { + "$ref": "GoogleCloudDialogflowCxV3beta1ConversationInteractionStepMetrics" + }, + "type": "array" } }, "type": "object" @@ -8488,6 +8495,22 @@ }, "type": "object" }, + "GoogleCloudDialogflowCxV3beta1ConversationInteractionStepMetrics": { + "description": "Metrics of each processing step.", + "id": "GoogleCloudDialogflowCxV3beta1ConversationInteractionStepMetrics", + "properties": { + "latency": { + "description": "Processing latency of the step.", + "format": "google-duration", + "type": "string" + }, + "name": { + "description": "Name of the request processing step.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudDialogflowCxV3beta1ConversationMetrics": { "description": "Represents metrics for the conversation.", "id": "GoogleCloudDialogflowCxV3beta1ConversationMetrics", @@ -15119,6 +15142,10 @@ "$ref": "GoogleCloudDialogflowV2Message", "description": "Payload of NEW_MESSAGE event." }, + "newRecognitionResultPayload": { + "$ref": "GoogleCloudDialogflowV2StreamingRecognitionResult", + "description": "Payload of NEW_RECOGNITION_RESULT event." + }, "type": { "description": "The type of the event that this notification refers to.", "enum": [ @@ -15127,6 +15154,7 @@ "CONVERSATION_FINISHED", "HUMAN_INTERVENTION_NEEDED", "NEW_MESSAGE", + "NEW_RECOGNITION_RESULT", "UNRECOVERABLE_ERROR" ], "enumDescriptions": [ @@ -15135,6 +15163,7 @@ "An existing conversation has closed. This is fired when a telephone call is terminated, or a conversation is closed via the API.", "An existing conversation has received notification from Dialogflow that human intervention is required.", "An existing conversation has received a new message, either from API or telephony. It is configured in ConversationProfile.new_message_event_notification_config", + "An existing conversation has received a new speech recognition result. This is mainly for delivering intermediate transcripts. The notification is configured in ConversationProfile.new_recognition_event_notification_config.", "Unrecoverable error during a telephone call. In general non-recoverable errors only occur if something was misconfigured in the ConversationProfile corresponding to the call. After a non-recoverable error, Dialogflow may stop responding. We don't fire this event: * in an API call because we can directly return the error, or, * when we can recover from an error." ], "type": "string" @@ -17034,6 +17063,82 @@ }, "type": "object" }, + "GoogleCloudDialogflowV2SpeechWordInfo": { + "description": "Information for a word recognized by the speech recognizer.", + "id": "GoogleCloudDialogflowV2SpeechWordInfo", + "properties": { + "confidence": { + "description": "The Speech confidence between 0.0 and 1.0 for this word. A higher number indicates an estimated greater likelihood that the recognized word is correct. The default of 0.0 is a sentinel value indicating that confidence was not set. This field is not guaranteed to be fully stable over time for the same audio input. Users should also not rely on it to always be provided.", + "format": "float", + "type": "number" + }, + "endOffset": { + "description": "Time offset relative to the beginning of the audio that corresponds to the end of the spoken word. This is an experimental feature and the accuracy of the time offset can vary.", + "format": "google-duration", + "type": "string" + }, + "startOffset": { + "description": "Time offset relative to the beginning of the audio that corresponds to the start of the spoken word. This is an experimental feature and the accuracy of the time offset can vary.", + "format": "google-duration", + "type": "string" + }, + "word": { + "description": "The word this info is for.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2StreamingRecognitionResult": { + "description": "Contains a speech recognition result corresponding to a portion of the audio that is currently being processed or an indication that this is the end of the single requested utterance. While end-user audio is being processed, Dialogflow sends a series of results. Each result may contain a `transcript` value. A transcript represents a portion of the utterance. While the recognizer is processing audio, transcript values may be interim values or finalized values. Once a transcript is finalized, the `is_final` value is set to true and processing continues for the next transcript. If `StreamingDetectIntentRequest.query_input.audio_config.single_utterance` was true, and the recognizer has completed processing audio, the `message_type` value is set to `END_OF_SINGLE_UTTERANCE and the following (last) result contains the last finalized transcript. The complete end-user utterance is determined by concatenating the finalized transcript values received for the series of results. In the following example, single utterance is enabled. In the case where single utterance is not enabled, result 7 would not occur. ``` Num | transcript | message_type | is_final --- | ----------------------- | ----------------------- | -------- 1 | \"tube\" | TRANSCRIPT | false 2 | \"to be a\" | TRANSCRIPT | false 3 | \"to be\" | TRANSCRIPT | false 4 | \"to be or not to be\" | TRANSCRIPT | true 5 | \"that's\" | TRANSCRIPT | false 6 | \"that is | TRANSCRIPT | false 7 | unset | END_OF_SINGLE_UTTERANCE | unset 8 | \" that is the question\" | TRANSCRIPT | true ``` Concatenating the finalized transcripts with `is_final` set to true, the complete utterance becomes \"to be or not to be that is the question\".", + "id": "GoogleCloudDialogflowV2StreamingRecognitionResult", + "properties": { + "confidence": { + "description": "The Speech confidence between 0.0 and 1.0 for the current portion of audio. A higher number indicates an estimated greater likelihood that the recognized words are correct. The default of 0.0 is a sentinel value indicating that confidence was not set. This field is typically only provided if `is_final` is true and you should not rely on it being accurate or even set.", + "format": "float", + "type": "number" + }, + "isFinal": { + "description": "If `false`, the `StreamingRecognitionResult` represents an interim result that may change. If `true`, the recognizer will not return any further hypotheses about this piece of the audio. May only be populated for `message_type` = `TRANSCRIPT`.", + "type": "boolean" + }, + "languageCode": { + "description": "Detected language code for the transcript.", + "type": "string" + }, + "messageType": { + "description": "Type of the result message.", + "enum": [ + "MESSAGE_TYPE_UNSPECIFIED", + "TRANSCRIPT", + "END_OF_SINGLE_UTTERANCE" + ], + "enumDescriptions": [ + "Not specified. Should never be used.", + "Message contains a (possibly partial) transcript.", + "This event indicates that the server has detected the end of the user's speech utterance and expects no additional inputs. Therefore, the server will not process additional audio (although it may subsequently return additional results). The client should stop sending additional audio data, half-close the gRPC connection, and wait for any additional results until the server closes the gRPC connection. This message is only sent if `single_utterance` was set to `true`, and is not used otherwise." + ], + "type": "string" + }, + "speechEndOffset": { + "description": "Time offset of the end of this Speech recognition result relative to the beginning of the audio. Only populated for `message_type` = `TRANSCRIPT`.", + "format": "google-duration", + "type": "string" + }, + "speechWordInfo": { + "description": "Word-specific information for the words recognized by Speech in transcript. Populated if and only if `message_type` = `TRANSCRIPT` and [InputAudioConfig.enable_word_info] is set.", + "items": { + "$ref": "GoogleCloudDialogflowV2SpeechWordInfo" + }, + "type": "array" + }, + "transcript": { + "description": "Transcript text representing the words that the user spoke. Populated if and only if `message_type` = `TRANSCRIPT`.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudDialogflowV2SuggestArticlesResponse": { "description": "The response message for Participants.SuggestArticles.", "id": "GoogleCloudDialogflowV2SuggestArticlesResponse", @@ -17415,6 +17520,10 @@ "$ref": "GoogleCloudDialogflowV2beta1Message", "description": "Payload of NEW_MESSAGE event." }, + "newRecognitionResultPayload": { + "$ref": "GoogleCloudDialogflowV2beta1StreamingRecognitionResult", + "description": "Payload of NEW_RECOGNITION_RESULT event." + }, "type": { "description": "Required. The type of the event that this notification refers to.", "enum": [ @@ -17423,6 +17532,7 @@ "CONVERSATION_FINISHED", "HUMAN_INTERVENTION_NEEDED", "NEW_MESSAGE", + "NEW_RECOGNITION_RESULT", "UNRECOVERABLE_ERROR" ], "enumDescriptions": [ @@ -17431,6 +17541,7 @@ "An existing conversation has closed. This is fired when a telephone call is terminated, or a conversation is closed via the API.", "An existing conversation has received notification from Dialogflow that human intervention is required.", "An existing conversation has received a new message, either from API or telephony. It is configured in ConversationProfile.new_message_event_notification_config", + "An existing conversation has received a new speech recognition result. This is mainly for delivering intermediate transcripts. The notification is configured in ConversationProfile.new_recognition_event_notification_config.", "Unrecoverable error during a telephone call. In general non-recoverable errors only occur if something was misconfigured in the ConversationProfile corresponding to the call. After a non-recoverable error, Dialogflow may stop responding. We don't fire this event: * in an API call because we can directly return the error, or, * when we can recover from an error." ], "type": "string" @@ -19578,6 +19689,95 @@ }, "type": "object" }, + "GoogleCloudDialogflowV2beta1SpeechWordInfo": { + "description": "Information for a word recognized by the speech recognizer.", + "id": "GoogleCloudDialogflowV2beta1SpeechWordInfo", + "properties": { + "confidence": { + "description": "The Speech confidence between 0.0 and 1.0 for this word. A higher number indicates an estimated greater likelihood that the recognized word is correct. The default of 0.0 is a sentinel value indicating that confidence was not set. This field is not guaranteed to be fully stable over time for the same audio input. Users should also not rely on it to always be provided.", + "format": "float", + "type": "number" + }, + "endOffset": { + "description": "Time offset relative to the beginning of the audio that corresponds to the end of the spoken word. This is an experimental feature and the accuracy of the time offset can vary.", + "format": "google-duration", + "type": "string" + }, + "startOffset": { + "description": "Time offset relative to the beginning of the audio that corresponds to the start of the spoken word. This is an experimental feature and the accuracy of the time offset can vary.", + "format": "google-duration", + "type": "string" + }, + "word": { + "description": "The word this info is for.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1StreamingRecognitionResult": { + "description": "Contains a speech recognition result corresponding to a portion of the audio that is currently being processed or an indication that this is the end of the single requested utterance. While end-user audio is being processed, Dialogflow sends a series of results. Each result may contain a `transcript` value. A transcript represents a portion of the utterance. While the recognizer is processing audio, transcript values may be interim values or finalized values. Once a transcript is finalized, the `is_final` value is set to true and processing continues for the next transcript. If `StreamingDetectIntentRequest.query_input.audio_config.single_utterance` was true, and the recognizer has completed processing audio, the `message_type` value is set to `END_OF_SINGLE_UTTERANCE and the following (last) result contains the last finalized transcript. The complete end-user utterance is determined by concatenating the finalized transcript values received for the series of results. In the following example, single utterance is enabled. In the case where single utterance is not enabled, result 7 would not occur. ``` Num | transcript | message_type | is_final --- | ----------------------- | ----------------------- | -------- 1 | \"tube\" | TRANSCRIPT | false 2 | \"to be a\" | TRANSCRIPT | false 3 | \"to be\" | TRANSCRIPT | false 4 | \"to be or not to be\" | TRANSCRIPT | true 5 | \"that's\" | TRANSCRIPT | false 6 | \"that is | TRANSCRIPT | false 7 | unset | END_OF_SINGLE_UTTERANCE | unset 8 | \" that is the question\" | TRANSCRIPT | true ``` Concatenating the finalized transcripts with `is_final` set to true, the complete utterance becomes \"to be or not to be that is the question\".", + "id": "GoogleCloudDialogflowV2beta1StreamingRecognitionResult", + "properties": { + "confidence": { + "description": "The Speech confidence between 0.0 and 1.0 for the current portion of audio. A higher number indicates an estimated greater likelihood that the recognized words are correct. The default of 0.0 is a sentinel value indicating that confidence was not set. This field is typically only provided if `is_final` is true and you should not rely on it being accurate or even set.", + "format": "float", + "type": "number" + }, + "dtmfDigits": { + "$ref": "GoogleCloudDialogflowV2beta1TelephonyDtmfEvents", + "description": "DTMF digits. Populated if and only if `message_type` = `DTMF_DIGITS`." + }, + "isFinal": { + "description": "If `false`, the `StreamingRecognitionResult` represents an interim result that may change. If `true`, the recognizer will not return any further hypotheses about this piece of the audio. May only be populated for `message_type` = `TRANSCRIPT`.", + "type": "boolean" + }, + "languageCode": { + "description": "Detected language code for the transcript.", + "type": "string" + }, + "messageType": { + "description": "Type of the result message.", + "enum": [ + "MESSAGE_TYPE_UNSPECIFIED", + "TRANSCRIPT", + "DTMF_DIGITS", + "END_OF_SINGLE_UTTERANCE", + "PARTIAL_DTMF_DIGITS" + ], + "enumDescriptions": [ + "Not specified. Should never be used.", + "Message contains a (possibly partial) transcript.", + "Message contains DTMF digits.", + "This event indicates that the server has detected the end of the user's speech utterance and expects no additional speech. Therefore, the server will not process additional audio (although it may subsequently return additional results). The client should stop sending additional audio data, half-close the gRPC connection, and wait for any additional results until the server closes the gRPC connection. This message is only sent if `single_utterance` was set to `true`, and is not used otherwise.", + "Message contains DTMF digits. Before a message with DTMF_DIGITS is sent, a message with PARTIAL_DTMF_DIGITS may be sent with DTMF digits collected up to the time of sending, which represents an intermediate result." + ], + "type": "string" + }, + "speechEndOffset": { + "description": "Time offset of the end of this Speech recognition result relative to the beginning of the audio. Only populated for `message_type` = `TRANSCRIPT`.", + "format": "google-duration", + "type": "string" + }, + "speechWordInfo": { + "description": "Word-specific information for the words recognized by Speech in transcript. Populated if and only if `message_type` = `TRANSCRIPT` and [InputAudioConfig.enable_word_info] is set.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1SpeechWordInfo" + }, + "type": "array" + }, + "stability": { + "description": "An estimate of the likelihood that the speech recognizer will not change its guess about this interim recognition result: * If the value is unspecified or 0.0, Dialogflow didn't compute the stability. In particular, Dialogflow will only provide stability for `TRANSCRIPT` results with `is_final = false`. * Otherwise, the value is in (0.0, 1.0] where 0.0 means completely unstable and 1.0 means completely stable.", + "format": "float", + "type": "number" + }, + "transcript": { + "description": "Transcript text representing the words that the user spoke. Populated if and only if `message_type` = `TRANSCRIPT`.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudDialogflowV2beta1SuggestArticlesResponse": { "description": "The response message for Participants.SuggestArticles.", "id": "GoogleCloudDialogflowV2beta1SuggestArticlesResponse", @@ -19726,6 +19926,58 @@ }, "type": "object" }, + "GoogleCloudDialogflowV2beta1TelephonyDtmfEvents": { + "description": "A wrapper of repeated TelephonyDtmf digits.", + "id": "GoogleCloudDialogflowV2beta1TelephonyDtmfEvents", + "properties": { + "dtmfEvents": { + "description": "A sequence of TelephonyDtmf digits.", + "items": { + "enum": [ + "TELEPHONY_DTMF_UNSPECIFIED", + "DTMF_ONE", + "DTMF_TWO", + "DTMF_THREE", + "DTMF_FOUR", + "DTMF_FIVE", + "DTMF_SIX", + "DTMF_SEVEN", + "DTMF_EIGHT", + "DTMF_NINE", + "DTMF_ZERO", + "DTMF_A", + "DTMF_B", + "DTMF_C", + "DTMF_D", + "DTMF_STAR", + "DTMF_POUND" + ], + "enumDescriptions": [ + "Not specified. This value may be used to indicate an absent digit.", + "Number: '1'.", + "Number: '2'.", + "Number: '3'.", + "Number: '4'.", + "Number: '5'.", + "Number: '6'.", + "Number: '7'.", + "Number: '8'.", + "Number: '9'.", + "Number: '0'.", + "Letter: 'A'.", + "Letter: 'B'.", + "Letter: 'C'.", + "Letter: 'D'.", + "Asterisk/star: '*'.", + "Pound/diamond/hash/square/gate/octothorpe: '#'." + ], + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "GoogleCloudDialogflowV2beta1WebhookRequest": { "description": "The request message for a webhook call.", "id": "GoogleCloudDialogflowV2beta1WebhookRequest", diff --git a/dialogflow/v3beta1/dialogflow-gen.go b/dialogflow/v3beta1/dialogflow-gen.go index 959e9020123..001cc41fde3 100644 --- a/dialogflow/v3beta1/dialogflow-gen.go +++ b/dialogflow/v3beta1/dialogflow-gen.go @@ -5065,6 +5065,9 @@ type GoogleCloudDialogflowCxV3beta1ConversationInteraction struct { // the responses. If multiple output messages are returned, they will be // concatenated into one. ResponseUtterances string `json:"responseUtterances,omitempty"` + // StepMetrics: Metrics associated with different processing steps. Names and + // number of steps depend on the request and can change without a notice. + StepMetrics []*GoogleCloudDialogflowCxV3beta1ConversationInteractionStepMetrics `json:"stepMetrics,omitempty"` // ForceSendFields is a list of field names (e.g. "AnswerFeedback") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See @@ -5125,6 +5128,31 @@ func (s *GoogleCloudDialogflowCxV3beta1ConversationInteractionMissingTransition) return nil } +// GoogleCloudDialogflowCxV3beta1ConversationInteractionStepMetrics: Metrics of +// each processing step. +type GoogleCloudDialogflowCxV3beta1ConversationInteractionStepMetrics struct { + // Latency: Processing latency of the step. + Latency string `json:"latency,omitempty"` + // Name: Name of the request processing step. + Name string `json:"name,omitempty"` + // ForceSendFields is a list of field names (e.g. "Latency") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Latency") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudDialogflowCxV3beta1ConversationInteractionStepMetrics) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowCxV3beta1ConversationInteractionStepMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + // GoogleCloudDialogflowCxV3beta1ConversationMetrics: Represents metrics for // the conversation. type GoogleCloudDialogflowCxV3beta1ConversationMetrics struct { @@ -14293,6 +14321,8 @@ type GoogleCloudDialogflowV2ConversationEvent struct { ErrorStatus *GoogleRpcStatus `json:"errorStatus,omitempty"` // NewMessagePayload: Payload of NEW_MESSAGE event. NewMessagePayload *GoogleCloudDialogflowV2Message `json:"newMessagePayload,omitempty"` + // NewRecognitionResultPayload: Payload of NEW_RECOGNITION_RESULT event. + NewRecognitionResultPayload *GoogleCloudDialogflowV2StreamingRecognitionResult `json:"newRecognitionResultPayload,omitempty"` // Type: The type of the event that this notification refers to. // // Possible values: @@ -14307,6 +14337,10 @@ type GoogleCloudDialogflowV2ConversationEvent struct { // "NEW_MESSAGE" - An existing conversation has received a new message, // either from API or telephony. It is configured in // ConversationProfile.new_message_event_notification_config + // "NEW_RECOGNITION_RESULT" - An existing conversation has received a new + // speech recognition result. This is mainly for delivering intermediate + // transcripts. The notification is configured in + // ConversationProfile.new_recognition_event_notification_config. // "UNRECOVERABLE_ERROR" - Unrecoverable error during a telephone call. In // general non-recoverable errors only occur if something was misconfigured in // the ConversationProfile corresponding to the call. After a non-recoverable @@ -16827,6 +16861,154 @@ func (s GoogleCloudDialogflowV2SmartReplyModelMetadata) MarshalJSON() ([]byte, e return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } +// GoogleCloudDialogflowV2SpeechWordInfo: Information for a word recognized by +// the speech recognizer. +type GoogleCloudDialogflowV2SpeechWordInfo struct { + // Confidence: The Speech confidence between 0.0 and 1.0 for this word. A + // higher number indicates an estimated greater likelihood that the recognized + // word is correct. The default of 0.0 is a sentinel value indicating that + // confidence was not set. This field is not guaranteed to be fully stable over + // time for the same audio input. Users should also not rely on it to always be + // provided. + Confidence float64 `json:"confidence,omitempty"` + // EndOffset: Time offset relative to the beginning of the audio that + // corresponds to the end of the spoken word. This is an experimental feature + // and the accuracy of the time offset can vary. + EndOffset string `json:"endOffset,omitempty"` + // StartOffset: Time offset relative to the beginning of the audio that + // corresponds to the start of the spoken word. This is an experimental feature + // and the accuracy of the time offset can vary. + StartOffset string `json:"startOffset,omitempty"` + // Word: The word this info is for. + Word string `json:"word,omitempty"` + // ForceSendFields is a list of field names (e.g. "Confidence") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Confidence") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudDialogflowV2SpeechWordInfo) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2SpeechWordInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +func (s *GoogleCloudDialogflowV2SpeechWordInfo) UnmarshalJSON(data []byte) error { + type NoMethod GoogleCloudDialogflowV2SpeechWordInfo + var s1 struct { + Confidence gensupport.JSONFloat64 `json:"confidence"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Confidence = float64(s1.Confidence) + return nil +} + +// GoogleCloudDialogflowV2StreamingRecognitionResult: Contains a speech +// recognition result corresponding to a portion of the audio that is currently +// being processed or an indication that this is the end of the single +// requested utterance. While end-user audio is being processed, Dialogflow +// sends a series of results. Each result may contain a `transcript` value. A +// transcript represents a portion of the utterance. While the recognizer is +// processing audio, transcript values may be interim values or finalized +// values. Once a transcript is finalized, the `is_final` value is set to true +// and processing continues for the next transcript. If +// `StreamingDetectIntentRequest.query_input.audio_config.single_utterance` was +// true, and the recognizer has completed processing audio, the `message_type` +// value is set to `END_OF_SINGLE_UTTERANCE and the following (last) result +// contains the last finalized transcript. The complete end-user utterance is +// determined by concatenating the finalized transcript values received for the +// series of results. In the following example, single utterance is enabled. In +// the case where single utterance is not enabled, result 7 would not occur. +// ``` Num | transcript | message_type | is_final --- | ----------------------- +// | ----------------------- | -------- 1 | "tube" | TRANSCRIPT | false 2 | "to +// be a" | TRANSCRIPT | false 3 | "to be" | TRANSCRIPT | false 4 | "to be or +// not to be" | TRANSCRIPT | true 5 | "that's" | TRANSCRIPT | false 6 | "that +// is | TRANSCRIPT | false 7 | unset | END_OF_SINGLE_UTTERANCE | unset 8 | " +// that is the question" | TRANSCRIPT | true ``` Concatenating the finalized +// transcripts with `is_final` set to true, the complete utterance becomes "to +// be or not to be that is the question". +type GoogleCloudDialogflowV2StreamingRecognitionResult struct { + // Confidence: The Speech confidence between 0.0 and 1.0 for the current + // portion of audio. A higher number indicates an estimated greater likelihood + // that the recognized words are correct. The default of 0.0 is a sentinel + // value indicating that confidence was not set. This field is typically only + // provided if `is_final` is true and you should not rely on it being accurate + // or even set. + Confidence float64 `json:"confidence,omitempty"` + // IsFinal: If `false`, the `StreamingRecognitionResult` represents an interim + // result that may change. If `true`, the recognizer will not return any + // further hypotheses about this piece of the audio. May only be populated for + // `message_type` = `TRANSCRIPT`. + IsFinal bool `json:"isFinal,omitempty"` + // LanguageCode: Detected language code for the transcript. + LanguageCode string `json:"languageCode,omitempty"` + // MessageType: Type of the result message. + // + // Possible values: + // "MESSAGE_TYPE_UNSPECIFIED" - Not specified. Should never be used. + // "TRANSCRIPT" - Message contains a (possibly partial) transcript. + // "END_OF_SINGLE_UTTERANCE" - This event indicates that the server has + // detected the end of the user's speech utterance and expects no additional + // inputs. Therefore, the server will not process additional audio (although it + // may subsequently return additional results). The client should stop sending + // additional audio data, half-close the gRPC connection, and wait for any + // additional results until the server closes the gRPC connection. This message + // is only sent if `single_utterance` was set to `true`, and is not used + // otherwise. + MessageType string `json:"messageType,omitempty"` + // SpeechEndOffset: Time offset of the end of this Speech recognition result + // relative to the beginning of the audio. Only populated for `message_type` = + // `TRANSCRIPT`. + SpeechEndOffset string `json:"speechEndOffset,omitempty"` + // SpeechWordInfo: Word-specific information for the words recognized by Speech + // in transcript. Populated if and only if `message_type` = `TRANSCRIPT` and + // [InputAudioConfig.enable_word_info] is set. + SpeechWordInfo []*GoogleCloudDialogflowV2SpeechWordInfo `json:"speechWordInfo,omitempty"` + // Transcript: Transcript text representing the words that the user spoke. + // Populated if and only if `message_type` = `TRANSCRIPT`. + Transcript string `json:"transcript,omitempty"` + // ForceSendFields is a list of field names (e.g. "Confidence") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Confidence") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudDialogflowV2StreamingRecognitionResult) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2StreamingRecognitionResult + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +func (s *GoogleCloudDialogflowV2StreamingRecognitionResult) UnmarshalJSON(data []byte) error { + type NoMethod GoogleCloudDialogflowV2StreamingRecognitionResult + var s1 struct { + Confidence gensupport.JSONFloat64 `json:"confidence"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Confidence = float64(s1.Confidence) + return nil +} + // GoogleCloudDialogflowV2SuggestArticlesResponse: The response message for // Participants.SuggestArticles. type GoogleCloudDialogflowV2SuggestArticlesResponse struct { @@ -17356,6 +17538,8 @@ type GoogleCloudDialogflowV2beta1ConversationEvent struct { ErrorStatus *GoogleRpcStatus `json:"errorStatus,omitempty"` // NewMessagePayload: Payload of NEW_MESSAGE event. NewMessagePayload *GoogleCloudDialogflowV2beta1Message `json:"newMessagePayload,omitempty"` + // NewRecognitionResultPayload: Payload of NEW_RECOGNITION_RESULT event. + NewRecognitionResultPayload *GoogleCloudDialogflowV2beta1StreamingRecognitionResult `json:"newRecognitionResultPayload,omitempty"` // Type: Required. The type of the event that this notification refers to. // // Possible values: @@ -17370,6 +17554,10 @@ type GoogleCloudDialogflowV2beta1ConversationEvent struct { // "NEW_MESSAGE" - An existing conversation has received a new message, // either from API or telephony. It is configured in // ConversationProfile.new_message_event_notification_config + // "NEW_RECOGNITION_RESULT" - An existing conversation has received a new + // speech recognition result. This is mainly for delivering intermediate + // transcripts. The notification is configured in + // ConversationProfile.new_recognition_event_notification_config. // "UNRECOVERABLE_ERROR" - Unrecoverable error during a telephone call. In // general non-recoverable errors only occur if something was misconfigured in // the ConversationProfile corresponding to the call. After a non-recoverable @@ -20337,6 +20525,171 @@ func (s *GoogleCloudDialogflowV2beta1SmartReplyAnswer) UnmarshalJSON(data []byte return nil } +// GoogleCloudDialogflowV2beta1SpeechWordInfo: Information for a word +// recognized by the speech recognizer. +type GoogleCloudDialogflowV2beta1SpeechWordInfo struct { + // Confidence: The Speech confidence between 0.0 and 1.0 for this word. A + // higher number indicates an estimated greater likelihood that the recognized + // word is correct. The default of 0.0 is a sentinel value indicating that + // confidence was not set. This field is not guaranteed to be fully stable over + // time for the same audio input. Users should also not rely on it to always be + // provided. + Confidence float64 `json:"confidence,omitempty"` + // EndOffset: Time offset relative to the beginning of the audio that + // corresponds to the end of the spoken word. This is an experimental feature + // and the accuracy of the time offset can vary. + EndOffset string `json:"endOffset,omitempty"` + // StartOffset: Time offset relative to the beginning of the audio that + // corresponds to the start of the spoken word. This is an experimental feature + // and the accuracy of the time offset can vary. + StartOffset string `json:"startOffset,omitempty"` + // Word: The word this info is for. + Word string `json:"word,omitempty"` + // ForceSendFields is a list of field names (e.g. "Confidence") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Confidence") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudDialogflowV2beta1SpeechWordInfo) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1SpeechWordInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +func (s *GoogleCloudDialogflowV2beta1SpeechWordInfo) UnmarshalJSON(data []byte) error { + type NoMethod GoogleCloudDialogflowV2beta1SpeechWordInfo + var s1 struct { + Confidence gensupport.JSONFloat64 `json:"confidence"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Confidence = float64(s1.Confidence) + return nil +} + +// GoogleCloudDialogflowV2beta1StreamingRecognitionResult: Contains a speech +// recognition result corresponding to a portion of the audio that is currently +// being processed or an indication that this is the end of the single +// requested utterance. While end-user audio is being processed, Dialogflow +// sends a series of results. Each result may contain a `transcript` value. A +// transcript represents a portion of the utterance. While the recognizer is +// processing audio, transcript values may be interim values or finalized +// values. Once a transcript is finalized, the `is_final` value is set to true +// and processing continues for the next transcript. If +// `StreamingDetectIntentRequest.query_input.audio_config.single_utterance` was +// true, and the recognizer has completed processing audio, the `message_type` +// value is set to `END_OF_SINGLE_UTTERANCE and the following (last) result +// contains the last finalized transcript. The complete end-user utterance is +// determined by concatenating the finalized transcript values received for the +// series of results. In the following example, single utterance is enabled. In +// the case where single utterance is not enabled, result 7 would not occur. +// ``` Num | transcript | message_type | is_final --- | ----------------------- +// | ----------------------- | -------- 1 | "tube" | TRANSCRIPT | false 2 | "to +// be a" | TRANSCRIPT | false 3 | "to be" | TRANSCRIPT | false 4 | "to be or +// not to be" | TRANSCRIPT | true 5 | "that's" | TRANSCRIPT | false 6 | "that +// is | TRANSCRIPT | false 7 | unset | END_OF_SINGLE_UTTERANCE | unset 8 | " +// that is the question" | TRANSCRIPT | true ``` Concatenating the finalized +// transcripts with `is_final` set to true, the complete utterance becomes "to +// be or not to be that is the question". +type GoogleCloudDialogflowV2beta1StreamingRecognitionResult struct { + // Confidence: The Speech confidence between 0.0 and 1.0 for the current + // portion of audio. A higher number indicates an estimated greater likelihood + // that the recognized words are correct. The default of 0.0 is a sentinel + // value indicating that confidence was not set. This field is typically only + // provided if `is_final` is true and you should not rely on it being accurate + // or even set. + Confidence float64 `json:"confidence,omitempty"` + // DtmfDigits: DTMF digits. Populated if and only if `message_type` = + // `DTMF_DIGITS`. + DtmfDigits *GoogleCloudDialogflowV2beta1TelephonyDtmfEvents `json:"dtmfDigits,omitempty"` + // IsFinal: If `false`, the `StreamingRecognitionResult` represents an interim + // result that may change. If `true`, the recognizer will not return any + // further hypotheses about this piece of the audio. May only be populated for + // `message_type` = `TRANSCRIPT`. + IsFinal bool `json:"isFinal,omitempty"` + // LanguageCode: Detected language code for the transcript. + LanguageCode string `json:"languageCode,omitempty"` + // MessageType: Type of the result message. + // + // Possible values: + // "MESSAGE_TYPE_UNSPECIFIED" - Not specified. Should never be used. + // "TRANSCRIPT" - Message contains a (possibly partial) transcript. + // "DTMF_DIGITS" - Message contains DTMF digits. + // "END_OF_SINGLE_UTTERANCE" - This event indicates that the server has + // detected the end of the user's speech utterance and expects no additional + // speech. Therefore, the server will not process additional audio (although it + // may subsequently return additional results). The client should stop sending + // additional audio data, half-close the gRPC connection, and wait for any + // additional results until the server closes the gRPC connection. This message + // is only sent if `single_utterance` was set to `true`, and is not used + // otherwise. + // "PARTIAL_DTMF_DIGITS" - Message contains DTMF digits. Before a message + // with DTMF_DIGITS is sent, a message with PARTIAL_DTMF_DIGITS may be sent + // with DTMF digits collected up to the time of sending, which represents an + // intermediate result. + MessageType string `json:"messageType,omitempty"` + // SpeechEndOffset: Time offset of the end of this Speech recognition result + // relative to the beginning of the audio. Only populated for `message_type` = + // `TRANSCRIPT`. + SpeechEndOffset string `json:"speechEndOffset,omitempty"` + // SpeechWordInfo: Word-specific information for the words recognized by Speech + // in transcript. Populated if and only if `message_type` = `TRANSCRIPT` and + // [InputAudioConfig.enable_word_info] is set. + SpeechWordInfo []*GoogleCloudDialogflowV2beta1SpeechWordInfo `json:"speechWordInfo,omitempty"` + // Stability: An estimate of the likelihood that the speech recognizer will not + // change its guess about this interim recognition result: * If the value is + // unspecified or 0.0, Dialogflow didn't compute the stability. In particular, + // Dialogflow will only provide stability for `TRANSCRIPT` results with + // `is_final = false`. * Otherwise, the value is in (0.0, 1.0] where 0.0 means + // completely unstable and 1.0 means completely stable. + Stability float64 `json:"stability,omitempty"` + // Transcript: Transcript text representing the words that the user spoke. + // Populated if and only if `message_type` = `TRANSCRIPT`. + Transcript string `json:"transcript,omitempty"` + // ForceSendFields is a list of field names (e.g. "Confidence") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Confidence") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudDialogflowV2beta1StreamingRecognitionResult) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1StreamingRecognitionResult + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +func (s *GoogleCloudDialogflowV2beta1StreamingRecognitionResult) UnmarshalJSON(data []byte) error { + type NoMethod GoogleCloudDialogflowV2beta1StreamingRecognitionResult + var s1 struct { + Confidence gensupport.JSONFloat64 `json:"confidence"` + Stability gensupport.JSONFloat64 `json:"stability"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Confidence = float64(s1.Confidence) + s.Stability = float64(s1.Stability) + return nil +} + // GoogleCloudDialogflowV2beta1SuggestArticlesResponse: The response message // for Participants.SuggestArticles. type GoogleCloudDialogflowV2beta1SuggestArticlesResponse struct { @@ -20539,6 +20892,49 @@ func (s GoogleCloudDialogflowV2beta1SuggestionResult) MarshalJSON() ([]byte, err return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } +// GoogleCloudDialogflowV2beta1TelephonyDtmfEvents: A wrapper of repeated +// TelephonyDtmf digits. +type GoogleCloudDialogflowV2beta1TelephonyDtmfEvents struct { + // DtmfEvents: A sequence of TelephonyDtmf digits. + // + // Possible values: + // "TELEPHONY_DTMF_UNSPECIFIED" - Not specified. This value may be used to + // indicate an absent digit. + // "DTMF_ONE" - Number: '1'. + // "DTMF_TWO" - Number: '2'. + // "DTMF_THREE" - Number: '3'. + // "DTMF_FOUR" - Number: '4'. + // "DTMF_FIVE" - Number: '5'. + // "DTMF_SIX" - Number: '6'. + // "DTMF_SEVEN" - Number: '7'. + // "DTMF_EIGHT" - Number: '8'. + // "DTMF_NINE" - Number: '9'. + // "DTMF_ZERO" - Number: '0'. + // "DTMF_A" - Letter: 'A'. + // "DTMF_B" - Letter: 'B'. + // "DTMF_C" - Letter: 'C'. + // "DTMF_D" - Letter: 'D'. + // "DTMF_STAR" - Asterisk/star: '*'. + // "DTMF_POUND" - Pound/diamond/hash/square/gate/octothorpe: '#'. + DtmfEvents []string `json:"dtmfEvents,omitempty"` + // ForceSendFields is a list of field names (e.g. "DtmfEvents") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "DtmfEvents") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudDialogflowV2beta1TelephonyDtmfEvents) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1TelephonyDtmfEvents + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + // GoogleCloudDialogflowV2beta1WebhookRequest: The request message for a // webhook call. type GoogleCloudDialogflowV2beta1WebhookRequest struct { diff --git a/firebase/v1beta1/firebase-api.json b/firebase/v1beta1/firebase-api.json index 1b14bcf9480..b519234c993 100644 --- a/firebase/v1beta1/firebase-api.json +++ b/firebase/v1beta1/firebase-api.json @@ -1324,7 +1324,7 @@ } } }, - "revision": "20241107", + "revision": "20241217", "rootUrl": "https://firebase.googleapis.com/", "schemas": { "AddFirebaseRequest": { @@ -2162,12 +2162,12 @@ "type": "integer" }, "message": { - "description": "Detail message", + "description": "Detail message copybara:strip_begin(b/383363683) copybara:strip_end_and_replace optional string message = 3;", "type": "string" }, "messageSet": { "$ref": "MessageSet", - "description": "message_set associates an arbitrary proto message with the status." + "description": "message_set associates an arbitrary proto message with the status. copybara:strip_begin(b/383363683) copybara:strip_end_and_replace optional proto2.bridge.MessageSet message_set = 5;" }, "space": { "description": "The following are usually only present when code != 0 Space to which this status belongs", diff --git a/firebase/v1beta1/firebase-gen.go b/firebase/v1beta1/firebase-gen.go index 4973034a87f..54e76c8519a 100644 --- a/firebase/v1beta1/firebase-gen.go +++ b/firebase/v1beta1/firebase-gen.go @@ -1599,10 +1599,12 @@ type StatusProto struct { // Code: Numeric code drawn from the space specified below. Often, this is the // canonical error space, and code is drawn from google3/util/task/codes.proto Code int64 `json:"code,omitempty"` - // Message: Detail message + // Message: Detail message copybara:strip_begin(b/383363683) + // copybara:strip_end_and_replace optional string message = 3; Message string `json:"message,omitempty"` // MessageSet: message_set associates an arbitrary proto message with the - // status. + // status. copybara:strip_begin(b/383363683) copybara:strip_end_and_replace + // optional proto2.bridge.MessageSet message_set = 5; MessageSet *MessageSet `json:"messageSet,omitempty"` // Space: The following are usually only present when code != 0 Space to which // this status belongs diff --git a/firebaseml/v2beta/firebaseml-api.json b/firebaseml/v2beta/firebaseml-api.json index e12ad255046..5a549333a01 100644 --- a/firebaseml/v2beta/firebaseml-api.json +++ b/firebaseml/v2beta/firebaseml-api.json @@ -206,7 +206,7 @@ } } }, - "revision": "20241215", + "revision": "20241217", "rootUrl": "https://firebaseml.googleapis.com/", "schemas": { "Date": { @@ -1171,7 +1171,8 @@ "type": "string" }, "thought": { - "description": "Optional. Indicates if the part is thought from the model.", + "description": "Output only. Indicates if the part is thought from the model.", + "readOnly": true, "type": "boolean" }, "videoMetadata": { diff --git a/firebaseml/v2beta/firebaseml-gen.go b/firebaseml/v2beta/firebaseml-gen.go index ac367529f73..819fbe39a08 100644 --- a/firebaseml/v2beta/firebaseml-gen.go +++ b/firebaseml/v2beta/firebaseml-gen.go @@ -1420,7 +1420,7 @@ type GoogleCloudAiplatformV1beta1Part struct { InlineData *GoogleCloudAiplatformV1beta1Blob `json:"inlineData,omitempty"` // Text: Optional. Text part (can be code). Text string `json:"text,omitempty"` - // Thought: Optional. Indicates if the part is thought from the model. + // Thought: Output only. Indicates if the part is thought from the model. Thought bool `json:"thought,omitempty"` // VideoMetadata: Optional. Video metadata. The metadata should only be // specified while the video data is presented in inline_data or file_data. diff --git a/jobs/v4/jobs-api.json b/jobs/v4/jobs-api.json index 86981177cc4..c794476ea5d 100644 --- a/jobs/v4/jobs-api.json +++ b/jobs/v4/jobs-api.json @@ -903,7 +903,7 @@ } } }, - "revision": "20240614", + "revision": "20241217", "rootUrl": "https://jobs.googleapis.com/", "schemas": { "ApplicationInfo": { @@ -1829,7 +1829,7 @@ "type": "string" }, "postingExpireTime": { - "description": "Strongly recommended for the best service experience. The expiration timestamp of the job. After this timestamp, the job is marked as expired, and it no longer appears in search results. The expired job can't be listed by the ListJobs API, but it can be retrieved with the GetJob API or updated with the UpdateJob API or deleted with the DeleteJob API. An expired job can be updated and opened again by using a future expiration timestamp. Updating an expired job fails if there is another existing open job with same company, language_code and requisition_id. The expired jobs are retained in our system for 90 days. However, the overall expired job count cannot exceed 3 times the maximum number of open jobs over previous 7 days. If this threshold is exceeded, expired jobs are cleaned out in order of earliest expire time. Expired jobs are no longer accessible after they are cleaned out. Invalid timestamps are ignored, and treated as expire time not provided. If the timestamp is before the instant request is made, the job is treated as expired immediately on creation. This kind of job can not be updated. And when creating a job with past timestamp, the posting_publish_time must be set before posting_expire_time. The purpose of this feature is to allow other objects, such as Application, to refer a job that didn't exist in the system prior to becoming expired. If you want to modify a job that was expired on creation, delete it and create a new one. If this value isn't provided at the time of job creation or is invalid, the job posting expires after 30 days from the job's creation time. For example, if the job was created on 2017/01/01 13:00AM UTC with an unspecified expiration date, the job expires after 2017/01/31 13:00AM UTC. If this value isn't provided on job update, it depends on the field masks set by UpdateJobRequest.update_mask. If the field masks include job_end_time, or the masks are empty meaning that every field is updated, the job posting expires after 30 days from the job's last update time. Otherwise the expiration date isn't updated.", + "description": "Strongly recommended for the best service experience. The expiration timestamp of the job. After this timestamp, the job is marked as expired, and it no longer appears in search results. The expired job can't be listed by the ListJobs API, but it can be retrieved with the GetJob API or updated with the UpdateJob API or deleted with the DeleteJob API. An expired job can be updated and opened again by using a future expiration timestamp. Updating an expired job fails if there is another existing open job with same company, language_code and requisition_id. The expired jobs are retained in our system for 90 days. However, the overall expired job count cannot exceed 3 times the maximum number of open jobs over previous 7 days. If this threshold is exceeded, expired jobs are cleaned out in order of earliest expire time. Expired jobs are no longer accessible after they are cleaned out. Invalid timestamps are ignored, and treated as expire time not provided. If the timestamp is before the instant request is made, the job is treated as expired immediately on creation. This kind of job can not be updated. And when creating a job with past timestamp, the posting_publish_time must be set before posting_expire_time. The purpose of this feature is to allow other objects, such as ApplicationInfo, to refer a job that didn't exist in the system prior to becoming expired. If you want to modify a job that was expired on creation, delete it and create a new one. If this value isn't provided at the time of job creation or is invalid, the job posting expires after 30 days from the job's creation time. For example, if the job was created on 2017/01/01 13:00AM UTC with an unspecified expiration date, the job expires after 2017/01/31 13:00AM UTC. If this value isn't provided on job update, it depends on the field masks set by UpdateJobRequest.update_mask. If the field masks include job_end_time, or the masks are empty meaning that every field is updated, the job posting expires after 30 days from the job's last update time. Otherwise the expiration date isn't updated.", "format": "google-datetime", "type": "string" }, @@ -2392,7 +2392,7 @@ "type": "string" }, "telecommutePreference": { - "description": "Allows the client to return jobs without a set location, specifically, telecommuting jobs (telecommuting is considered by the service as a special location). Job.posting_region indicates if a job permits telecommuting. If this field is set to TelecommutePreference.TELECOMMUTE_ALLOWED, telecommuting jobs are searched, and address and lat_lng are ignored. If not set or set to TelecommutePreference.TELECOMMUTE_EXCLUDED, the telecommute status of the jobs is ignored. Jobs that have PostingRegion.TELECOMMUTE and have additional Job.addresses may still be matched based on other location filters using address or latlng. This filter can be used by itself to search exclusively for telecommuting jobs, or it can be combined with another location filter to search for a combination of job locations, such as \"Mountain View\" or \"telecommuting\" jobs. However, when used in combination with other location filters, telecommuting jobs can be treated as less relevant than other jobs in the search response. This field is only used for job search requests.", + "description": "Allows the client to return jobs without a set location, specifically, telecommuting jobs (telecommuting is considered by the service as a special location). Job.posting_region indicates if a job permits telecommuting. If this field is set to TelecommutePreference.TELECOMMUTE_ALLOWED, telecommuting jobs are searched, and address and lat_lng are ignored. If not set or set to TelecommutePreference.TELECOMMUTE_EXCLUDED, the telecommute status of the jobs is ignored. Jobs that have PostingRegion.TELECOMMUTE and have additional Job.addresses may still be matched based on other location filters using address or lat_lng. This filter can be used by itself to search exclusively for telecommuting jobs, or it can be combined with another location filter to search for a combination of job locations, such as \"Mountain View\" or \"telecommuting\" jobs. However, when used in combination with other location filters, telecommuting jobs can be treated as less relevant than other jobs in the search response. This field is only used for job search requests.", "enum": [ "TELECOMMUTE_PREFERENCE_UNSPECIFIED", "TELECOMMUTE_EXCLUDED", @@ -2500,18 +2500,18 @@ "type": "object" }, "PostalAddress": { - "description": "Represents a postal address, e.g. for postal delivery or payments addresses. Given a postal address, a postal service can deliver items to a premise, P.O. Box or similar. It is not intended to model geographical locations (roads, towns, mountains). In typical usage an address would be created via user input or from importing existing data, depending on the type of process. Advice on address input / editing: - Use an internationalization-ready address widget such as https://github.com/google/libaddressinput) - Users should not be presented with UI elements for input or editing of fields outside countries where that field is used. For more guidance on how to use this schema, please see: https://support.google.com/business/answer/6397478", + "description": "Represents a postal address. For example for postal delivery or payments addresses. Given a postal address, a postal service can deliver items to a premise, P.O. Box or similar. It is not intended to model geographical locations (roads, towns, mountains). In typical usage an address would be created by user input or from importing existing data, depending on the type of process. Advice on address input / editing: - Use an internationalization-ready address widget such as https://github.com/google/libaddressinput) - Users should not be presented with UI elements for input or editing of fields outside countries where that field is used. For more guidance on how to use this schema, see: https://support.google.com/business/answer/6397478", "id": "PostalAddress", "properties": { "addressLines": { - "description": "Unstructured address lines describing the lower levels of an address. Because values in address_lines do not have type information and may sometimes contain multiple values in a single field (e.g. \"Austin, TX\"), it is important that the line order is clear. The order of address lines should be \"envelope order\" for the country/region of the address. In places where this can vary (e.g. Japan), address_language is used to make it explicit (e.g. \"ja\" for large-to-small ordering and \"ja-Latn\" or \"en\" for small-to-large). This way, the most specific line of an address can be selected based on the language. The minimum permitted structural representation of an address consists of a region_code with all remaining information placed in the address_lines. It would be possible to format such an address very approximately without geocoding, but no semantic reasoning could be made about any of the address components until it was at least partially resolved. Creating an address only containing a region_code and address_lines, and then geocoding is the recommended way to handle completely unstructured addresses (as opposed to guessing which parts of the address should be localities or administrative areas).", + "description": "Unstructured address lines describing the lower levels of an address. Because values in address_lines do not have type information and may sometimes contain multiple values in a single field (For example \"Austin, TX\"), it is important that the line order is clear. The order of address lines should be \"envelope order\" for the country/region of the address. In places where this can vary (For example Japan), address_language is used to make it explicit (For example \"ja\" for large-to-small ordering and \"ja-Latn\" or \"en\" for small-to-large). This way, the most specific line of an address can be selected based on the language. The minimum permitted structural representation of an address consists of a region_code with all remaining information placed in the address_lines. It would be possible to format such an address very approximately without geocoding, but no semantic reasoning could be made about any of the address components until it was at least partially resolved. Creating an address only containing a region_code and address_lines, and then geocoding is the recommended way to handle completely unstructured addresses (as opposed to guessing which parts of the address should be localities or administrative areas).", "items": { "type": "string" }, "type": "array" }, "administrativeArea": { - "description": "Optional. Highest administrative subdivision which is used for postal addresses of a country or region. For example, this can be a state, a province, an oblast, or a prefecture. Specifically, for Spain this is the province and not the autonomous community (e.g. \"Barcelona\" and not \"Catalonia\"). Many countries don't use an administrative area in postal addresses. E.g. in Switzerland this should be left unpopulated.", + "description": "Optional. Highest administrative subdivision which is used for postal addresses of a country or region. For example, this can be a state, a province, an oblast, or a prefecture. Specifically, for Spain this is the province and not the autonomous community (For example \"Barcelona\" and not \"Catalonia\"). Many countries don't use an administrative area in postal addresses. For example in Switzerland this should be left unpopulated.", "type": "string" }, "languageCode": { @@ -2527,7 +2527,7 @@ "type": "string" }, "postalCode": { - "description": "Optional. Postal code of the address. Not all countries use or require postal codes to be present, but where they are used, they may trigger additional validation with other parts of the address (e.g. state/zip validation in the U.S.A.).", + "description": "Optional. Postal code of the address. Not all countries use or require postal codes to be present, but where they are used, they may trigger additional validation with other parts of the address (For example state/zip validation in the U.S.A.).", "type": "string" }, "recipients": { @@ -2547,7 +2547,7 @@ "type": "integer" }, "sortingCode": { - "description": "Optional. Additional, country-specific, sorting code. This is not used in most regions. Where it is used, the value is either a string like \"CEDEX\", optionally followed by a number (e.g. \"CEDEX 7\"), or just a number alone, representing the \"sector code\" (Jamaica), \"delivery area indicator\" (Malawi) or \"post office indicator\" (e.g. Côte d'Ivoire).", + "description": "Optional. Additional, country-specific, sorting code. This is not used in most regions. Where it is used, the value is either a string like \"CEDEX\", optionally followed by a number (For example \"CEDEX 7\"), or just a number alone, representing the \"sector code\" (Jamaica), \"delivery area indicator\" (Malawi) or \"post office indicator\" (For example Côte d'Ivoire).", "type": "string" }, "sublocality": { @@ -2722,6 +2722,24 @@ "description": "The token specifying the current offset within search results. See SearchJobsResponse.next_page_token for an explanation of how to obtain the next set of query results.", "type": "string" }, + "relevanceThreshold": { + "description": "Optional. The relevance threshold of the search results. Default to Google defined threshold, leveraging a balance of precision and recall to deliver both highly accurate results and comprehensive coverage of relevant information.", + "enum": [ + "RELEVANCE_THRESHOLD_UNSPECIFIED", + "LOWEST", + "LOW", + "MEDIUM", + "HIGH" + ], + "enumDescriptions": [ + "Default value. In this case, server behavior defaults to Google defined threshold.", + "Lowest relevance threshold.", + "Low relevance threshold.", + "Medium relevance threshold.", + "High relevance threshold." + ], + "type": "string" + }, "requestMetadata": { "$ref": "RequestMetadata", "description": "Required. The meta information collected about the job searcher, used to improve the search quality of the service. The identifiers (such as `user_id`) are provided by users, and must be unique and consistent." @@ -2859,22 +2877,22 @@ "id": "TimeOfDay", "properties": { "hours": { - "description": "Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", + "description": "Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", "format": "int32", "type": "integer" }, "minutes": { - "description": "Minutes of hour of day. Must be from 0 to 59.", + "description": "Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59.", "format": "int32", "type": "integer" }, "nanos": { - "description": "Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.", + "description": "Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999.", "format": "int32", "type": "integer" }, "seconds": { - "description": "Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.", + "description": "Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds.", "format": "int32", "type": "integer" } diff --git a/jobs/v4/jobs-gen.go b/jobs/v4/jobs-gen.go index 55194c62883..565dbe91a24 100644 --- a/jobs/v4/jobs-gen.go +++ b/jobs/v4/jobs-gen.go @@ -1403,18 +1403,18 @@ type Job struct { // job is treated as expired immediately on creation. This kind of job can not // be updated. And when creating a job with past timestamp, the // posting_publish_time must be set before posting_expire_time. The purpose of - // this feature is to allow other objects, such as Application, to refer a job - // that didn't exist in the system prior to becoming expired. If you want to - // modify a job that was expired on creation, delete it and create a new one. - // If this value isn't provided at the time of job creation or is invalid, the - // job posting expires after 30 days from the job's creation time. For example, - // if the job was created on 2017/01/01 13:00AM UTC with an unspecified - // expiration date, the job expires after 2017/01/31 13:00AM UTC. If this value - // isn't provided on job update, it depends on the field masks set by - // UpdateJobRequest.update_mask. If the field masks include job_end_time, or - // the masks are empty meaning that every field is updated, the job posting - // expires after 30 days from the job's last update time. Otherwise the - // expiration date isn't updated. + // this feature is to allow other objects, such as ApplicationInfo, to refer a + // job that didn't exist in the system prior to becoming expired. If you want + // to modify a job that was expired on creation, delete it and create a new + // one. If this value isn't provided at the time of job creation or is invalid, + // the job posting expires after 30 days from the job's creation time. For + // example, if the job was created on 2017/01/01 13:00AM UTC with an + // unspecified expiration date, the job expires after 2017/01/31 13:00AM UTC. + // If this value isn't provided on job update, it depends on the field masks + // set by UpdateJobRequest.update_mask. If the field masks include + // job_end_time, or the masks are empty meaning that every field is updated, + // the job posting expires after 30 days from the job's last update time. + // Otherwise the expiration date isn't updated. PostingExpireTime string `json:"postingExpireTime,omitempty"` // PostingPublishTime: The timestamp this job posting was most recently // published. The default value is the time the request arrives at the server. @@ -2141,13 +2141,13 @@ type LocationFilter struct { // TelecommutePreference.TELECOMMUTE_EXCLUDED, the telecommute status of the // jobs is ignored. Jobs that have PostingRegion.TELECOMMUTE and have // additional Job.addresses may still be matched based on other location - // filters using address or latlng. This filter can be used by itself to search - // exclusively for telecommuting jobs, or it can be combined with another - // location filter to search for a combination of job locations, such as - // "Mountain View" or "telecommuting" jobs. However, when used in combination - // with other location filters, telecommuting jobs can be treated as less - // relevant than other jobs in the search response. This field is only used for - // job search requests. + // filters using address or lat_lng. This filter can be used by itself to + // search exclusively for telecommuting jobs, or it can be combined with + // another location filter to search for a combination of job locations, such + // as "Mountain View" or "telecommuting" jobs. However, when used in + // combination with other location filters, telecommuting jobs can be treated + // as less relevant than other jobs in the search response. This field is only + // used for job search requests. // // Possible values: // "TELECOMMUTE_PREFERENCE_UNSPECIFIED" - Default value if the telecommute @@ -2304,42 +2304,43 @@ func (s Operation) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// PostalAddress: Represents a postal address, e.g. for postal delivery or -// payments addresses. Given a postal address, a postal service can deliver +// PostalAddress: Represents a postal address. For example for postal delivery +// or payments addresses. Given a postal address, a postal service can deliver // items to a premise, P.O. Box or similar. It is not intended to model // geographical locations (roads, towns, mountains). In typical usage an -// address would be created via user input or from importing existing data, +// address would be created by user input or from importing existing data, // depending on the type of process. Advice on address input / editing: - Use // an internationalization-ready address widget such as // https://github.com/google/libaddressinput) - Users should not be presented // with UI elements for input or editing of fields outside countries where that -// field is used. For more guidance on how to use this schema, please see: +// field is used. For more guidance on how to use this schema, see: // https://support.google.com/business/answer/6397478 type PostalAddress struct { // AddressLines: Unstructured address lines describing the lower levels of an // address. Because values in address_lines do not have type information and - // may sometimes contain multiple values in a single field (e.g. "Austin, TX"), - // it is important that the line order is clear. The order of address lines - // should be "envelope order" for the country/region of the address. In places - // where this can vary (e.g. Japan), address_language is used to make it - // explicit (e.g. "ja" for large-to-small ordering and "ja-Latn" or "en" for - // small-to-large). This way, the most specific line of an address can be - // selected based on the language. The minimum permitted structural - // representation of an address consists of a region_code with all remaining - // information placed in the address_lines. It would be possible to format such - // an address very approximately without geocoding, but no semantic reasoning - // could be made about any of the address components until it was at least - // partially resolved. Creating an address only containing a region_code and - // address_lines, and then geocoding is the recommended way to handle + // may sometimes contain multiple values in a single field (For example + // "Austin, TX"), it is important that the line order is clear. The order of + // address lines should be "envelope order" for the country/region of the + // address. In places where this can vary (For example Japan), address_language + // is used to make it explicit (For example "ja" for large-to-small ordering + // and "ja-Latn" or "en" for small-to-large). This way, the most specific line + // of an address can be selected based on the language. The minimum permitted + // structural representation of an address consists of a region_code with all + // remaining information placed in the address_lines. It would be possible to + // format such an address very approximately without geocoding, but no semantic + // reasoning could be made about any of the address components until it was at + // least partially resolved. Creating an address only containing a region_code + // and address_lines, and then geocoding is the recommended way to handle // completely unstructured addresses (as opposed to guessing which parts of the // address should be localities or administrative areas). AddressLines []string `json:"addressLines,omitempty"` // AdministrativeArea: Optional. Highest administrative subdivision which is // used for postal addresses of a country or region. For example, this can be a // state, a province, an oblast, or a prefecture. Specifically, for Spain this - // is the province and not the autonomous community (e.g. "Barcelona" and not - // "Catalonia"). Many countries don't use an administrative area in postal - // addresses. E.g. in Switzerland this should be left unpopulated. + // is the province and not the autonomous community (For example "Barcelona" + // and not "Catalonia"). Many countries don't use an administrative area in + // postal addresses. For example in Switzerland this should be left + // unpopulated. AdministrativeArea string `json:"administrativeArea,omitempty"` // LanguageCode: Optional. BCP-47 language code of the contents of this address // (if known). This is often the UI language of the input form or is expected @@ -2359,7 +2360,7 @@ type PostalAddress struct { Organization string `json:"organization,omitempty"` // PostalCode: Optional. Postal code of the address. Not all countries use or // require postal codes to be present, but where they are used, they may - // trigger additional validation with other parts of the address (e.g. + // trigger additional validation with other parts of the address (For example // state/zip validation in the U.S.A.). PostalCode string `json:"postalCode,omitempty"` // Recipients: Optional. The recipient at the address. This field may, under @@ -2378,9 +2379,10 @@ type PostalAddress struct { Revision int64 `json:"revision,omitempty"` // SortingCode: Optional. Additional, country-specific, sorting code. This is // not used in most regions. Where it is used, the value is either a string - // like "CEDEX", optionally followed by a number (e.g. "CEDEX 7"), or just a - // number alone, representing the "sector code" (Jamaica), "delivery area - // indicator" (Malawi) or "post office indicator" (e.g. Côte d'Ivoire). + // like "CEDEX", optionally followed by a number (For example "CEDEX 7"), or + // just a number alone, representing the "sector code" (Jamaica), "delivery + // area indicator" (Malawi) or "post office indicator" (For example Côte + // d'Ivoire). SortingCode string `json:"sortingCode,omitempty"` // Sublocality: Optional. Sublocality of the address. For example, this can be // neighborhoods, boroughs, districts. @@ -2726,6 +2728,19 @@ type SearchJobsRequest struct { // See SearchJobsResponse.next_page_token for an explanation of how to obtain // the next set of query results. PageToken string `json:"pageToken,omitempty"` + // RelevanceThreshold: Optional. The relevance threshold of the search results. + // Default to Google defined threshold, leveraging a balance of precision and + // recall to deliver both highly accurate results and comprehensive coverage of + // relevant information. + // + // Possible values: + // "RELEVANCE_THRESHOLD_UNSPECIFIED" - Default value. In this case, server + // behavior defaults to Google defined threshold. + // "LOWEST" - Lowest relevance threshold. + // "LOW" - Low relevance threshold. + // "MEDIUM" - Medium relevance threshold. + // "HIGH" - High relevance threshold. + RelevanceThreshold string `json:"relevanceThreshold,omitempty"` // RequestMetadata: Required. The meta information collected about the job // searcher, used to improve the search quality of the service. The identifiers // (such as `user_id`) are provided by users, and must be unique and @@ -2916,16 +2931,19 @@ func (s Tenant) MarshalJSON() ([]byte, error) { // significant or are specified elsewhere. An API may choose to allow leap // seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. type TimeOfDay struct { - // Hours: Hours of day in 24 hour format. Should be from 0 to 23. An API may - // choose to allow the value "24:00:00" for scenarios like business closing - // time. + // Hours: Hours of a day in 24 hour format. Must be greater than or equal to 0 + // and typically must be less than or equal to 23. An API may choose to allow + // the value "24:00:00" for scenarios like business closing time. Hours int64 `json:"hours,omitempty"` - // Minutes: Minutes of hour of day. Must be from 0 to 59. + // Minutes: Minutes of an hour. Must be greater than or equal to 0 and less + // than or equal to 59. Minutes int64 `json:"minutes,omitempty"` - // Nanos: Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + // Nanos: Fractions of seconds, in nanoseconds. Must be greater than or equal + // to 0 and less than or equal to 999,999,999. Nanos int64 `json:"nanos,omitempty"` - // Seconds: Seconds of minutes of the time. Must normally be from 0 to 59. An - // API may allow the value 60 if it allows leap-seconds. + // Seconds: Seconds of a minute. Must be greater than or equal to 0 and + // typically must be less than or equal to 59. An API may allow the value 60 if + // it allows leap-seconds. Seconds int64 `json:"seconds,omitempty"` // ForceSendFields is a list of field names (e.g. "Hours") to unconditionally // include in API requests. By default, fields with empty or default values are diff --git a/merchantapi/products_v1beta/merchantapi-api.json b/merchantapi/products_v1beta/merchantapi-api.json index 89bba4e883e..77f7fc9a920 100644 --- a/merchantapi/products_v1beta/merchantapi-api.json +++ b/merchantapi/products_v1beta/merchantapi-api.json @@ -242,7 +242,7 @@ } } }, - "revision": "20241211", + "revision": "20241217", "rootUrl": "https://merchantapi.googleapis.com/", "schemas": { "Attributes": { @@ -651,6 +651,13 @@ "$ref": "SubscriptionCost", "description": "Number of periods (months or years) and amount of payment per period for an item with an associated subscription contract." }, + "sustainabilityIncentives": { + "description": "The list of sustainability incentive programs.", + "items": { + "$ref": "ProductSustainabilityIncentive" + }, + "type": "array" + }, "taxCategory": { "description": "The tax category of the product.", "type": "string" @@ -1516,6 +1523,36 @@ }, "type": "object" }, + "ProductSustainabilityIncentive": { + "description": "Information regarding sustainability-related incentive programs such as rebates or tax relief.", + "id": "ProductSustainabilityIncentive", + "properties": { + "amount": { + "$ref": "Price", + "description": "The fixed amount of the incentive." + }, + "percentage": { + "description": "The percentage of the sale price that the incentive is applied to.", + "format": "double", + "type": "number" + }, + "type": { + "description": "Sustainability incentive program.", + "enum": [ + "TYPE_UNSPECIFIED", + "EV_TAX_CREDIT", + "EV_PRICE_DISCOUNT" + ], + "enumDescriptions": [ + "Unspecified or unknown sustainability incentive type.", + "Program offering tax liability reductions for electric vehicles and, in some countries, plug-in hybrids. These reductions can be based on a specific amount or a percentage of the sale price.", + "A subsidy program, often called an environmental bonus, provides a purchase grant for electric vehicles and, in some countries, plug-in hybrids. The grant amount may be a fixed sum or a percentage of the sale price." + ], + "type": "string" + } + }, + "type": "object" + }, "ProductWeight": { "description": "The weight of the product.", "id": "ProductWeight", diff --git a/merchantapi/products_v1beta/merchantapi-gen.go b/merchantapi/products_v1beta/merchantapi-gen.go index 0ee1b603578..2495343367e 100644 --- a/merchantapi/products_v1beta/merchantapi-gen.go +++ b/merchantapi/products_v1beta/merchantapi-gen.go @@ -437,6 +437,8 @@ type Attributes struct { // SubscriptionCost: Number of periods (months or years) and amount of payment // per period for an item with an associated subscription contract. SubscriptionCost *SubscriptionCost `json:"subscriptionCost,omitempty"` + // SustainabilityIncentives: The list of sustainability incentive programs. + SustainabilityIncentives []*ProductSustainabilityIncentive `json:"sustainabilityIncentives,omitempty"` // TaxCategory: The tax category of the product. TaxCategory string `json:"taxCategory,omitempty"` // Taxes: Tax information. @@ -1451,6 +1453,58 @@ func (s ProductStructuredTitle) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } +// ProductSustainabilityIncentive: Information regarding sustainability-related +// incentive programs such as rebates or tax relief. +type ProductSustainabilityIncentive struct { + // Amount: The fixed amount of the incentive. + Amount *Price `json:"amount,omitempty"` + // Percentage: The percentage of the sale price that the incentive is applied + // to. + Percentage float64 `json:"percentage,omitempty"` + // Type: Sustainability incentive program. + // + // Possible values: + // "TYPE_UNSPECIFIED" - Unspecified or unknown sustainability incentive type. + // "EV_TAX_CREDIT" - Program offering tax liability reductions for electric + // vehicles and, in some countries, plug-in hybrids. These reductions can be + // based on a specific amount or a percentage of the sale price. + // "EV_PRICE_DISCOUNT" - A subsidy program, often called an environmental + // bonus, provides a purchase grant for electric vehicles and, in some + // countries, plug-in hybrids. The grant amount may be a fixed sum or a + // percentage of the sale price. + Type string `json:"type,omitempty"` + // ForceSendFields is a list of field names (e.g. "Amount") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Amount") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ProductSustainabilityIncentive) MarshalJSON() ([]byte, error) { + type NoMethod ProductSustainabilityIncentive + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +func (s *ProductSustainabilityIncentive) UnmarshalJSON(data []byte) error { + type NoMethod ProductSustainabilityIncentive + var s1 struct { + Percentage gensupport.JSONFloat64 `json:"percentage"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Percentage = float64(s1.Percentage) + return nil +} + // ProductWeight: The weight of the product. type ProductWeight struct { // Unit: Required. The weight unit. Acceptable values are: * "g" * "kg" * diff --git a/migrationcenter/v1alpha1/migrationcenter-api.json b/migrationcenter/v1alpha1/migrationcenter-api.json index 23f37f37b8f..f31ad3a78c9 100644 --- a/migrationcenter/v1alpha1/migrationcenter-api.json +++ b/migrationcenter/v1alpha1/migrationcenter-api.json @@ -2548,7 +2548,7 @@ } } }, - "revision": "20241205", + "revision": "20241212", "rootUrl": "https://migrationcenter.googleapis.com/", "schemas": { "AddAssetsToGroupRequest": { @@ -6620,6 +6620,11 @@ "format": "int32", "readOnly": true, "type": "integer" + }, + "xlsxOutputFile": { + "$ref": "XlsxOutputFile", + "description": "Output only. XLSX output file.", + "readOnly": true } }, "type": "object" @@ -7858,7 +7863,22 @@ "SignedUriDestination": { "description": "Signed URI destination configuration.", "id": "SignedUriDestination", - "properties": {}, + "properties": { + "fileFormat": { + "description": "Required. The file format to export.", + "enum": [ + "FILE_FORMAT_UNSPECIFIED", + "CSV", + "XLSX" + ], + "enumDescriptions": [ + "Unspecified file format will be treated as CSV.", + "CSV file format.", + "XLSX file format which used in Excel." + ], + "type": "string" + } + }, "type": "object" }, "SignedUris": { @@ -8798,6 +8818,18 @@ } }, "type": "object" + }, + "XlsxOutputFile": { + "description": "Contains a single output file of type XLSX.", + "id": "XlsxOutputFile", + "properties": { + "signedUri": { + "$ref": "SignedUri", + "description": "Output only. Signed URI destination.", + "readOnly": true + } + }, + "type": "object" } }, "servicePath": "", diff --git a/migrationcenter/v1alpha1/migrationcenter-gen.go b/migrationcenter/v1alpha1/migrationcenter-gen.go index f9ebdb27876..7d4689cb5b6 100644 --- a/migrationcenter/v1alpha1/migrationcenter-gen.go +++ b/migrationcenter/v1alpha1/migrationcenter-gen.go @@ -5005,6 +5005,8 @@ type OutputFile struct { CsvOutputFile *CsvOutputFile `json:"csvOutputFile,omitempty"` // FileSizeBytes: Output only. File size in bytes. FileSizeBytes int64 `json:"fileSizeBytes,omitempty"` + // XlsxOutputFile: Output only. XLSX output file. + XlsxOutputFile *XlsxOutputFile `json:"xlsxOutputFile,omitempty"` // ForceSendFields is a list of field names (e.g. "CsvOutputFile") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See @@ -6492,6 +6494,30 @@ func (s SignedUri) MarshalJSON() ([]byte, error) { // SignedUriDestination: Signed URI destination configuration. type SignedUriDestination struct { + // FileFormat: Required. The file format to export. + // + // Possible values: + // "FILE_FORMAT_UNSPECIFIED" - Unspecified file format will be treated as + // CSV. + // "CSV" - CSV file format. + // "XLSX" - XLSX file format which used in Excel. + FileFormat string `json:"fileFormat,omitempty"` + // ForceSendFields is a list of field names (e.g. "FileFormat") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "FileFormat") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SignedUriDestination) MarshalJSON() ([]byte, error) { + type NoMethod SignedUriDestination + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SignedUris: Contains a list of Signed URIs. @@ -7570,6 +7596,28 @@ func (s VmwarePlatformDetails) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } +// XlsxOutputFile: Contains a single output file of type XLSX. +type XlsxOutputFile struct { + // SignedUri: Output only. Signed URI destination. + SignedUri *SignedUri `json:"signedUri,omitempty"` + // ForceSendFields is a list of field names (e.g. "SignedUri") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "SignedUri") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s XlsxOutputFile) MarshalJSON() ([]byte, error) { + type NoMethod XlsxOutputFile + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + type ProjectsLocationsGetCall struct { s *Service name string diff --git a/texttospeech/v1/texttospeech-api.json b/texttospeech/v1/texttospeech-api.json index fd347dc6141..d29406c2d37 100644 --- a/texttospeech/v1/texttospeech-api.json +++ b/texttospeech/v1/texttospeech-api.json @@ -318,7 +318,7 @@ } } }, - "revision": "20241116", + "revision": "20241216", "rootUrl": "https://texttospeech.googleapis.com/", "schemas": { "AdvancedVoiceOptions": { @@ -344,7 +344,8 @@ "MP3", "OGG_OPUS", "MULAW", - "ALAW" + "ALAW", + "PCM" ], "enumDescriptions": [ "Not specified. Will return result google.rpc.Code.INVALID_ARGUMENT.", @@ -352,7 +353,8 @@ "MP3 audio at 32kbps.", "Opus encoded audio wrapped in an ogg container. The result will be a file which can be played natively on Android, and in browsers (at least Chrome and Firefox). The quality of the encoding is considerably higher than MP3 while using approximately the same bitrate.", "8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law. Audio content returned as MULAW also contains a WAV header.", - "8-bit samples that compand 14-bit audio samples using G.711 PCMU/A-law. Audio content returned as ALAW also contains a WAV header." + "8-bit samples that compand 14-bit audio samples using G.711 PCMU/A-law. Audio content returned as ALAW also contains a WAV header.", + "Uncompressed 16-bit signed little-endian samples (Linear PCM). Note that as opposed to LINEAR16, audio will not be wrapped in a WAV (or any other) header." ], "type": "string" }, diff --git a/texttospeech/v1/texttospeech-gen.go b/texttospeech/v1/texttospeech-gen.go index e0d9dcbaa7c..3c47c8a2b1c 100644 --- a/texttospeech/v1/texttospeech-gen.go +++ b/texttospeech/v1/texttospeech-gen.go @@ -269,6 +269,9 @@ type AudioConfig struct { // PCMU/mu-law. Audio content returned as MULAW also contains a WAV header. // "ALAW" - 8-bit samples that compand 14-bit audio samples using G.711 // PCMU/A-law. Audio content returned as ALAW also contains a WAV header. + // "PCM" - Uncompressed 16-bit signed little-endian samples (Linear PCM). + // Note that as opposed to LINEAR16, audio will not be wrapped in a WAV (or any + // other) header. AudioEncoding string `json:"audioEncoding,omitempty"` // EffectsProfileId: Optional. Input only. An identifier which selects 'audio // effects' profiles that are applied on (post synthesized) text to speech. diff --git a/texttospeech/v1beta1/texttospeech-api.json b/texttospeech/v1beta1/texttospeech-api.json index 77169025a57..b4a9d21d4b4 100644 --- a/texttospeech/v1beta1/texttospeech-api.json +++ b/texttospeech/v1beta1/texttospeech-api.json @@ -261,7 +261,7 @@ } } }, - "revision": "20241026", + "revision": "20241216", "rootUrl": "https://texttospeech.googleapis.com/", "schemas": { "AdvancedVoiceOptions": { @@ -288,7 +288,8 @@ "MP3_64_KBPS", "OGG_OPUS", "MULAW", - "ALAW" + "ALAW", + "PCM" ], "enumDescriptions": [ "Not specified. Will return result google.rpc.Code.INVALID_ARGUMENT.", @@ -297,7 +298,8 @@ "MP3 at 64kbps.", "Opus encoded audio wrapped in an ogg container. The result will be a file which can be played natively on Android, and in browsers (at least Chrome and Firefox). The quality of the encoding is considerably higher than MP3 while using approximately the same bitrate.", "8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law. Audio content returned as MULAW also contains a WAV header.", - "8-bit samples that compand 14-bit audio samples using G.711 PCMU/A-law. Audio content returned as ALAW also contains a WAV header." + "8-bit samples that compand 14-bit audio samples using G.711 PCMU/A-law. Audio content returned as ALAW also contains a WAV header.", + "Uncompressed 16-bit signed little-endian samples (Linear PCM). Note that as opposed to LINEAR16, audio will not be wrapped in a WAV (or any other) header." ], "type": "string" }, diff --git a/texttospeech/v1beta1/texttospeech-gen.go b/texttospeech/v1beta1/texttospeech-gen.go index 4e5417b8ca1..ed423c1413b 100644 --- a/texttospeech/v1beta1/texttospeech-gen.go +++ b/texttospeech/v1beta1/texttospeech-gen.go @@ -258,6 +258,9 @@ type AudioConfig struct { // PCMU/mu-law. Audio content returned as MULAW also contains a WAV header. // "ALAW" - 8-bit samples that compand 14-bit audio samples using G.711 // PCMU/A-law. Audio content returned as ALAW also contains a WAV header. + // "PCM" - Uncompressed 16-bit signed little-endian samples (Linear PCM). + // Note that as opposed to LINEAR16, audio will not be wrapped in a WAV (or any + // other) header. AudioEncoding string `json:"audioEncoding,omitempty"` // EffectsProfileId: Optional. Input only. An identifier which selects 'audio // effects' profiles that are applied on (post synthesized) text to speech.