diff --git a/sdk/search/Azure.Search.Documents/CHANGELOG.md b/sdk/search/Azure.Search.Documents/CHANGELOG.md index 8bea0751336b..0889564e0319 100644 --- a/sdk/search/Azure.Search.Documents/CHANGELOG.md +++ b/sdk/search/Azure.Search.Documents/CHANGELOG.md @@ -4,11 +4,16 @@ ### Breaking Changes +- Made collection- and dictionary-type properties read-only, i.e. has only get-accessors, based on [.NET Guidelines][net-guidelines-collection-properties]. - Moved models for managing indexes, indexers, and skillsets to `Azure.Search.Documents.Indexes.Models`. -- Split `SearchServiceClient` into `SearchIndexClient` for managing indexes, and `SearchIndexerClient` for managing indexers, both of which are now in `Azure.Search.Documents.Indexes`. +- Removed the `SynonymMap.Format` property since only the "solr" format is supported currently. +- Renamed `AnalyzeRequest` to `AnalyzeTextOptions`, and overloaded constructors with required parameters. +- Renamed `AnalyzeTextOptions.Analyzer` to `AnalyzeTextOptions.AnalyzerName`. +- Renamed `AnalyzeTextOptions.Tokenizer` to `AnalyzeTextOptions.TokenizerName`. +- Renamed `CustomAnalyzer.Tokenizer` to `CustomAnalyzer.TokenizerName`. - Renamed `SearchIndexerDataSource` to `SearchIndexerDataSourceConnection`. - Renamed methods on `SearchIndexerClient` matching "\*DataSource" to "\*DataSourceConnection". -- Made collection- and dictionary-type properties read-only, i.e. has only get-accessors, based on [.NET Guidelines][net-guidelines-collection-properties]. +- Split `SearchServiceClient` into `SearchIndexClient` for managing indexes, and `SearchIndexerClient` for managing indexers, both of which are now in `Azure.Search.Documents.Indexes`. ## 1.0.0-preview.3 (2020-05-05) diff --git a/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs b/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs index 6fd9a6cb48f3..bac910eae60c 100644 --- a/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs +++ b/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs @@ -109,8 +109,8 @@ public SearchIndexClient(System.Uri endpoint, Azure.AzureKeyCredential credentia public SearchIndexClient(System.Uri endpoint, Azure.AzureKeyCredential credential, Azure.Search.Documents.SearchClientOptions options) { } public virtual System.Uri Endpoint { get { throw null; } } public virtual string ServiceName { get { throw null; } } - public virtual Azure.Response> AnalyzeText(string indexName, Azure.Search.Documents.Indexes.Models.AnalyzeRequest analyzeRequest, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual System.Threading.Tasks.Task>> AnalyzeTextAsync(string indexName, Azure.Search.Documents.Indexes.Models.AnalyzeRequest analyzeRequest, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response> AnalyzeText(string indexName, Azure.Search.Documents.Indexes.Models.AnalyzeTextOptions options, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task>> AnalyzeTextAsync(string indexName, Azure.Search.Documents.Indexes.Models.AnalyzeTextOptions options, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Response CreateIndex(Azure.Search.Documents.Indexes.Models.SearchIndex index, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual System.Threading.Tasks.Task> CreateIndexAsync(Azure.Search.Documents.Indexes.Models.SearchIndex index, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Response CreateOrUpdateIndex(Azure.Search.Documents.Indexes.Models.SearchIndex index, bool allowIndexDowntime = false, bool onlyIfUnchanged = false, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } @@ -212,14 +212,16 @@ internal AnalyzedTokenInfo() { } public int StartOffset { get { throw null; } } public string Token { get { throw null; } } } - public partial class AnalyzeRequest + public partial class AnalyzeTextOptions { - public AnalyzeRequest(string text) { } - public Azure.Search.Documents.Indexes.Models.LexicalAnalyzerName? Analyzer { get { throw null; } set { } } + public AnalyzeTextOptions(string text) { } + public AnalyzeTextOptions(string text, Azure.Search.Documents.Indexes.Models.LexicalAnalyzerName analyzerName) { } + public AnalyzeTextOptions(string text, Azure.Search.Documents.Indexes.Models.LexicalTokenizerName tokenizerName) { } + public Azure.Search.Documents.Indexes.Models.LexicalAnalyzerName? AnalyzerName { get { throw null; } } public System.Collections.Generic.IList CharFilters { get { throw null; } } public string Text { get { throw null; } } public System.Collections.Generic.IList TokenFilters { get { throw null; } } - public Azure.Search.Documents.Indexes.Models.LexicalTokenizerName? Tokenizer { get { throw null; } set { } } + public Azure.Search.Documents.Indexes.Models.LexicalTokenizerName? TokenizerName { get { throw null; } } } public partial class AsciiFoldingTokenFilter : Azure.Search.Documents.Indexes.Models.TokenFilter { @@ -293,10 +295,10 @@ public CorsOptions(System.Collections.Generic.IEnumerable allowedOrigins } public partial class CustomAnalyzer : Azure.Search.Documents.Indexes.Models.LexicalAnalyzer { - public CustomAnalyzer(string name, Azure.Search.Documents.Indexes.Models.LexicalTokenizerName tokenizer) { } + public CustomAnalyzer(string name, Azure.Search.Documents.Indexes.Models.LexicalTokenizerName tokenizerName) { } public System.Collections.Generic.IList CharFilters { get { throw null; } } public System.Collections.Generic.IList TokenFilters { get { throw null; } } - public Azure.Search.Documents.Indexes.Models.LexicalTokenizerName Tokenizer { get { throw null; } set { } } + public Azure.Search.Documents.Indexes.Models.LexicalTokenizerName TokenizerName { get { throw null; } set { } } } public partial class DataChangeDetectionPolicy { @@ -1582,7 +1584,6 @@ public SynonymMap(string name, System.IO.TextReader reader) { } public SynonymMap(string name, string synonyms) { } public Azure.Search.Documents.Indexes.Models.SearchResourceEncryptionKey EncryptionKey { get { throw null; } set { } } public Azure.ETag? ETag { get { throw null; } set { } } - public string Format { get { throw null; } set { } } public string Name { get { throw null; } set { } } public string Synonyms { get { throw null; } set { } } } diff --git a/sdk/search/Azure.Search.Documents/src/Generated/IndexesRestClient.cs b/sdk/search/Azure.Search.Documents/src/Generated/IndexesRestClient.cs index d043fa369afd..6f7c379dc5cb 100644 --- a/sdk/search/Azure.Search.Documents/src/Generated/IndexesRestClient.cs +++ b/sdk/search/Azure.Search.Documents/src/Generated/IndexesRestClient.cs @@ -584,7 +584,7 @@ public Response GetStatistics(string indexName, Cancellat } } - internal HttpMessage CreateAnalyzeRequest(string indexName, AnalyzeRequest request) + internal HttpMessage CreateAnalyzeRequest(string indexName, AnalyzeTextOptions request) { var message = _pipeline.CreateMessage(); var request0 = message.Request; @@ -612,7 +612,7 @@ internal HttpMessage CreateAnalyzeRequest(string indexName, AnalyzeRequest reque /// The name of the index for which to test an analyzer. /// The text and analyzer or analysis components to test. /// The cancellation token to use. - public async Task> AnalyzeAsync(string indexName, AnalyzeRequest request, CancellationToken cancellationToken = default) + public async Task> AnalyzeAsync(string indexName, AnalyzeTextOptions request, CancellationToken cancellationToken = default) { if (indexName == null) { @@ -650,7 +650,7 @@ public async Task> AnalyzeAsync(string indexName, Analyz /// The name of the index for which to test an analyzer. /// The text and analyzer or analysis components to test. /// The cancellation token to use. - public Response Analyze(string indexName, AnalyzeRequest request, CancellationToken cancellationToken = default) + public Response Analyze(string indexName, AnalyzeTextOptions request, CancellationToken cancellationToken = default) { if (indexName == null) { diff --git a/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeRequest.cs b/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeRequest.cs deleted file mode 100644 index 56ad05a5cfa4..000000000000 --- a/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeRequest.cs +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// - -#nullable disable - -using System; -using System.Collections.Generic; - -namespace Azure.Search.Documents.Indexes.Models -{ - /// Specifies some text and analysis components used to break that text into tokens. - public partial class AnalyzeRequest - { - /// Initializes a new instance of AnalyzeRequest. - /// The text to break into tokens. - public AnalyzeRequest(string text) - { - if (text == null) - { - throw new ArgumentNullException(nameof(text)); - } - - Text = text; - TokenFilters = new List(); - CharFilters = new List(); - } - - /// Initializes a new instance of AnalyzeRequest. - /// The text to break into tokens. - /// The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually exclusive. - /// The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. - /// An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. - /// An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. - internal AnalyzeRequest(string text, LexicalAnalyzerName? analyzer, LexicalTokenizerName? tokenizer, IList tokenFilters, IList charFilters) - { - Text = text; - Analyzer = analyzer; - Tokenizer = tokenizer; - TokenFilters = tokenFilters ?? new List(); - CharFilters = charFilters ?? new List(); - } - - /// The text to break into tokens. - public string Text { get; } - /// The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually exclusive. - public LexicalAnalyzerName? Analyzer { get; set; } - /// The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. - public LexicalTokenizerName? Tokenizer { get; set; } - } -} diff --git a/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeRequest.Serialization.cs b/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextOptions.Serialization.cs similarity index 83% rename from sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeRequest.Serialization.cs rename to sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextOptions.Serialization.cs index a5943ff2a735..b52a3b874967 100644 --- a/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeRequest.Serialization.cs +++ b/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextOptions.Serialization.cs @@ -11,22 +11,22 @@ namespace Azure.Search.Documents.Indexes.Models { - public partial class AnalyzeRequest : IUtf8JsonSerializable + public partial class AnalyzeTextOptions : IUtf8JsonSerializable { void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) { writer.WriteStartObject(); writer.WritePropertyName("text"); writer.WriteStringValue(Text); - if (Analyzer != null) + if (AnalyzerName != null) { writer.WritePropertyName("analyzer"); - writer.WriteStringValue(Analyzer.Value.ToString()); + writer.WriteStringValue(AnalyzerName.Value.ToString()); } - if (Tokenizer != null) + if (TokenizerName != null) { writer.WritePropertyName("tokenizer"); - writer.WriteStringValue(Tokenizer.Value.ToString()); + writer.WriteStringValue(TokenizerName.Value.ToString()); } if (TokenFilters != null && TokenFilters.Any()) { diff --git a/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextOptions.cs b/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextOptions.cs new file mode 100644 index 000000000000..ee81a8ed6578 --- /dev/null +++ b/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextOptions.cs @@ -0,0 +1,48 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Search.Documents.Indexes.Models +{ + /// Specifies some text and analysis components used to break that text into tokens. + public partial class AnalyzeTextOptions + { + /// Initializes a new instance of AnalyzeTextOptions. + /// The text to break into tokens. + public AnalyzeTextOptions(string text) + { + if (text == null) + { + throw new ArgumentNullException(nameof(text)); + } + + Text = text; + TokenFilters = new List(); + CharFilters = new List(); + } + + /// Initializes a new instance of AnalyzeTextOptions. + /// The text to break into tokens. + /// The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually exclusive. + /// The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. + /// An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. + /// An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. + internal AnalyzeTextOptions(string text, LexicalAnalyzerName? analyzerName, LexicalTokenizerName? tokenizerName, IList tokenFilters, IList charFilters) + { + Text = text; + AnalyzerName = analyzerName; + TokenizerName = tokenizerName; + TokenFilters = tokenFilters ?? new List(); + CharFilters = charFilters ?? new List(); + } + + /// The text to break into tokens. + public string Text { get; } + } +} diff --git a/sdk/search/Azure.Search.Documents/src/Generated/Models/CustomAnalyzer.Serialization.cs b/sdk/search/Azure.Search.Documents/src/Generated/Models/CustomAnalyzer.Serialization.cs index 10848d4d2dd2..c485886e1875 100644 --- a/sdk/search/Azure.Search.Documents/src/Generated/Models/CustomAnalyzer.Serialization.cs +++ b/sdk/search/Azure.Search.Documents/src/Generated/Models/CustomAnalyzer.Serialization.cs @@ -18,7 +18,7 @@ void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) { writer.WriteStartObject(); writer.WritePropertyName("tokenizer"); - writer.WriteStringValue(Tokenizer.ToString()); + writer.WriteStringValue(TokenizerName.ToString()); if (TokenFilters != null && TokenFilters.Any()) { writer.WritePropertyName("tokenFilters"); diff --git a/sdk/search/Azure.Search.Documents/src/Generated/Models/CustomAnalyzer.cs b/sdk/search/Azure.Search.Documents/src/Generated/Models/CustomAnalyzer.cs index 240bc7a5fd29..d04776fad06a 100644 --- a/sdk/search/Azure.Search.Documents/src/Generated/Models/CustomAnalyzer.cs +++ b/sdk/search/Azure.Search.Documents/src/Generated/Models/CustomAnalyzer.cs @@ -13,37 +13,19 @@ namespace Azure.Search.Documents.Indexes.Models /// Allows you to take control over the process of converting text into indexable/searchable tokens. It's a user-defined configuration consisting of a single predefined tokenizer and one or more filters. The tokenizer is responsible for breaking text into tokens, and the filters for modifying tokens emitted by the tokenizer. public partial class CustomAnalyzer : LexicalAnalyzer { - /// Initializes a new instance of CustomAnalyzer. - /// The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - /// The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. - public CustomAnalyzer(string name, LexicalTokenizerName tokenizer) : base(name) - { - if (name == null) - { - throw new ArgumentNullException(nameof(name)); - } - - Tokenizer = tokenizer; - TokenFilters = new List(); - CharFilters = new List(); - ODataType = "#Microsoft.Azure.Search.CustomAnalyzer"; - } /// Initializes a new instance of CustomAnalyzer. /// Identifies the concrete type of the analyzer. /// The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - /// The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. + /// The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. /// A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. /// A list of character filters used to prepare input text before it is processed by the tokenizer. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. - internal CustomAnalyzer(string oDataType, string name, LexicalTokenizerName tokenizer, IList tokenFilters, IList charFilters) : base(oDataType, name) + internal CustomAnalyzer(string oDataType, string name, LexicalTokenizerName tokenizerName, IList tokenFilters, IList charFilters) : base(oDataType, name) { - Tokenizer = tokenizer; + TokenizerName = tokenizerName; TokenFilters = tokenFilters ?? new List(); CharFilters = charFilters ?? new List(); ODataType = oDataType ?? "#Microsoft.Azure.Search.CustomAnalyzer"; } - - /// The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. - public LexicalTokenizerName Tokenizer { get; set; } } } diff --git a/sdk/search/Azure.Search.Documents/src/Generated/Models/SynonymMap.Serialization.cs b/sdk/search/Azure.Search.Documents/src/Generated/Models/SynonymMap.Serialization.cs index 980f61fc67ad..1e92dcff4768 100644 --- a/sdk/search/Azure.Search.Documents/src/Generated/Models/SynonymMap.Serialization.cs +++ b/sdk/search/Azure.Search.Documents/src/Generated/Models/SynonymMap.Serialization.cs @@ -17,8 +17,11 @@ void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) writer.WriteStartObject(); writer.WritePropertyName("name"); writer.WriteStringValue(Name); - writer.WritePropertyName("format"); - writer.WriteStringValue(Format); + if (Format != null) + { + writer.WritePropertyName("format"); + writer.WriteStringValue(Format); + } writer.WritePropertyName("synonyms"); writer.WriteStringValue(Synonyms); if (EncryptionKey != null) @@ -50,6 +53,10 @@ internal static SynonymMap DeserializeSynonymMap(JsonElement element) } if (property.NameEquals("format")) { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } format = property.Value.GetString(); continue; } diff --git a/sdk/search/Azure.Search.Documents/src/Generated/Models/SynonymMap.cs b/sdk/search/Azure.Search.Documents/src/Generated/Models/SynonymMap.cs index 68f6f1d2c4f3..6fdbfcdf5c77 100644 --- a/sdk/search/Azure.Search.Documents/src/Generated/Models/SynonymMap.cs +++ b/sdk/search/Azure.Search.Documents/src/Generated/Models/SynonymMap.cs @@ -30,8 +30,6 @@ internal SynonymMap(string name, string format, string synonyms, SearchResourceE /// The name of the synonym map. public string Name { get; set; } - /// The format of the synonym map. Only the 'solr' format is currently supported. - public string Format { get; set; } /// A series of synonym rules in the specified synonym map format. The rules must be separated by newlines. public string Synonyms { get; set; } /// A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. diff --git a/sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeRequest.cs b/sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeRequest.cs deleted file mode 100644 index 1b9f1bc3eead..000000000000 --- a/sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeRequest.cs +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System.Collections.Generic; -using Azure.Core; - -namespace Azure.Search.Documents.Indexes.Models -{ - public partial class AnalyzeRequest - { - /// An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. - [CodeGenMember(EmptyAsUndefined = true, Initialize = true)] - public IList TokenFilters { get; } - - /// An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. - [CodeGenMember(EmptyAsUndefined = true, Initialize = true)] - public IList CharFilters { get; } - } -} diff --git a/sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeTextOptions.cs b/sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeTextOptions.cs new file mode 100644 index 000000000000..1892b02af806 --- /dev/null +++ b/sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeTextOptions.cs @@ -0,0 +1,59 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using Azure.Core; + +namespace Azure.Search.Documents.Indexes.Models +{ + [CodeGenModel("AnalyzeRequest")] + public partial class AnalyzeTextOptions + { + /// + /// Initializes a new instance of AnalyzeRequest. + /// + /// Required text to break into tokens. + /// The name of the analyzer to use to break the given . + /// is null. + public AnalyzeTextOptions(string text, LexicalAnalyzerName analyzerName) + { + Text = text ?? throw new ArgumentNullException(nameof(text)); + AnalyzerName = analyzerName; + + TokenFilters = new List(); + CharFilters = new List(); + } + + /// + /// Initializes a new instance of AnalyzeRequest. + /// + /// Required text to break into tokens. + /// The name of the tokenizer to use to break the given . + /// is null. + public AnalyzeTextOptions(string text, LexicalTokenizerName tokenizerName) + { + Text = text ?? throw new ArgumentNullException(nameof(text)); + TokenizerName = tokenizerName; + + TokenFilters = new List(); + CharFilters = new List(); + } + + /// The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually exclusive. + [CodeGenMember("Analyzer")] + public LexicalAnalyzerName? AnalyzerName { get; } + + /// The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. + [CodeGenMember("Tokenizer")] + public LexicalTokenizerName? TokenizerName { get; } + + /// An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. + [CodeGenMember(EmptyAsUndefined = true, Initialize = true)] + public IList TokenFilters { get; } + + /// An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. + [CodeGenMember(EmptyAsUndefined = true, Initialize = true)] + public IList CharFilters { get; } + } +} diff --git a/sdk/search/Azure.Search.Documents/src/Indexes/Models/CustomAnalyzer.cs b/sdk/search/Azure.Search.Documents/src/Indexes/Models/CustomAnalyzer.cs index bb60990ae83d..3613759ebeb2 100644 --- a/sdk/search/Azure.Search.Documents/src/Indexes/Models/CustomAnalyzer.cs +++ b/sdk/search/Azure.Search.Documents/src/Indexes/Models/CustomAnalyzer.cs @@ -1,6 +1,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +using System; using System.Collections.Generic; using Azure.Core; @@ -8,6 +9,22 @@ namespace Azure.Search.Documents.Indexes.Models { public partial class CustomAnalyzer { + /// Initializes a new instance of CustomAnalyzer. + /// The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. + /// The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. + /// is null. + public CustomAnalyzer(string name, LexicalTokenizerName tokenizerName) : base(name) + { + TokenizerName = tokenizerName; + TokenFilters = new List(); + CharFilters = new List(); + ODataType = "#Microsoft.Azure.Search.CustomAnalyzer"; + } + + /// The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. + [CodeGenMember("Tokenizer")] + public LexicalTokenizerName TokenizerName { get; set; } + /// A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. [CodeGenMember(EmptyAsUndefined = true, Initialize = true)] public IList TokenFilters { get; } diff --git a/sdk/search/Azure.Search.Documents/src/Indexes/Models/SynonymMap.cs b/sdk/search/Azure.Search.Documents/src/Indexes/Models/SynonymMap.cs index 7ffe6e413a97..8e441a7cf376 100644 --- a/sdk/search/Azure.Search.Documents/src/Indexes/Models/SynonymMap.cs +++ b/sdk/search/Azure.Search.Documents/src/Indexes/Models/SynonymMap.cs @@ -23,7 +23,7 @@ public partial class SynonymMap /// The name of the synonym map. /// /// The formatted synonyms string to define. - /// Because only the Solr synonym map format is currently supported, these are values delimited by "\n". + /// Because only the "solr" synonym map format is currently supported, these are values delimited by "\n". /// /// or is an empty string. /// or is null. @@ -43,7 +43,7 @@ public SynonymMap(string name, string synonyms) /// The name of the synonym map. /// /// A from which formatted synonyms are read. - /// Because only the Solr synonym map format is currently supported, these are values delimited by "\n". + /// Because only the "solr" synonym map format is currently supported, these are values delimited by "\n". /// /// is an empty string. /// or is null. @@ -65,5 +65,8 @@ public ETag? ETag get => _etag is null ? (ETag?)null : new ETag(_etag); set => _etag = value?.ToString(); } + + /// The format of the synonym map. Only the "solr" format is currently supported. + internal string Format { get; set; } } } diff --git a/sdk/search/Azure.Search.Documents/src/Indexes/SearchIndexClient.cs b/sdk/search/Azure.Search.Documents/src/Indexes/SearchIndexClient.cs index bbd0ce054fae..6ee47e9acb8d 100644 --- a/sdk/search/Azure.Search.Documents/src/Indexes/SearchIndexClient.cs +++ b/sdk/search/Azure.Search.Documents/src/Indexes/SearchIndexClient.cs @@ -227,16 +227,16 @@ public virtual async Task> GetServiceStatistic /// Shows how an analyzer breaks text into tokens. /// /// The name of the index used to test an analyzer. - /// The containing the text and analyzer or analyzer components to test. + /// The containing the text and analyzer or analyzer components to test. /// Optional to propagate notifications that the operation should be canceled. /// /// The from the server containing a list of for analyzed text. /// - /// Thrown when or is null. + /// Thrown when or is null. /// Thrown when a failure is returned by the Search service. public virtual Response> AnalyzeText( string indexName, - AnalyzeRequest analyzeRequest, + AnalyzeTextOptions options, CancellationToken cancellationToken = default) { using DiagnosticScope scope = _clientDiagnostics.CreateScope($"{nameof(SearchIndexClient)}.{nameof(AnalyzeText)}"); @@ -245,7 +245,7 @@ public virtual Response> AnalyzeText( { Response result = IndexesClient.Analyze( indexName, - analyzeRequest, + options, cancellationToken); return Response.FromValue(result.Value.Tokens, result.GetRawResponse()); @@ -261,16 +261,16 @@ public virtual Response> AnalyzeText( /// Shows how an analyzer breaks text into tokens. /// /// The name of the index used to test an analyzer. - /// The containing the text and analyzer or analyzer components to test. + /// The containing the text and analyzer or analyzer components to test. /// Optional to propagate notifications that the operation should be canceled. /// /// The from the server containing a list of for analyzed text. /// - /// Thrown when or is null. + /// Thrown when or is null. /// Thrown when a failure is returned by the Search service. public virtual async Task>> AnalyzeTextAsync( string indexName, - AnalyzeRequest analyzeRequest, + AnalyzeTextOptions options, CancellationToken cancellationToken = default) { using DiagnosticScope scope = _clientDiagnostics.CreateScope($"{nameof(SearchIndexClient)}.{nameof(AnalyzeText)}"); @@ -279,7 +279,7 @@ public virtual async Task>> AnalyzeTex { Response result = await IndexesClient.AnalyzeAsync( indexName, - analyzeRequest, + options, cancellationToken) .ConfigureAwait(false); diff --git a/sdk/search/Azure.Search.Documents/tests/Models/AnalyzeTextOptionsTests.cs b/sdk/search/Azure.Search.Documents/tests/Models/AnalyzeTextOptionsTests.cs new file mode 100644 index 000000000000..5e8c6a27474d --- /dev/null +++ b/sdk/search/Azure.Search.Documents/tests/Models/AnalyzeTextOptionsTests.cs @@ -0,0 +1,19 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using Azure.Search.Documents.Indexes.Models; +using NUnit.Framework; + +namespace Azure.Search.Documents.Tests.Models +{ + public class AnalyzeTextOptionsTests + { + [Test] + public void RequiresText() + { + ArgumentNullException ex = Assert.Throws(() => new AnalyzeTextOptions(null, LexicalTokenizerName.Whitespace)); + Assert.AreEqual("text", ex.ParamName); + } + } +} diff --git a/sdk/search/Azure.Search.Documents/tests/SearchIndexClientTests.cs b/sdk/search/Azure.Search.Documents/tests/SearchIndexClientTests.cs index f492c8374921..f365339da857 100644 --- a/sdk/search/Azure.Search.Documents/tests/SearchIndexClientTests.cs +++ b/sdk/search/Azure.Search.Documents/tests/SearchIndexClientTests.cs @@ -3,6 +3,7 @@ using System; using System.Collections.Generic; +using System.Linq; using System.Net; using System.Threading.Tasks; using Azure.Core; @@ -314,5 +315,20 @@ await client.CreateOrUpdateSynonymMapAsync( await client.DeleteSynonymMapAsync(updatedMap, onlyIfUnchanged: true); } + + [Test] + public async Task AnalyzeText() + { + await using SearchResources resources = await SearchResources.GetSharedHotelsIndexAsync(this); + + SearchIndexClient client = resources.GetIndexClient(); + + AnalyzeTextOptions request = new AnalyzeTextOptions("The quick brown fox jumped over the lazy dog.", LexicalTokenizerName.Whitespace); + + Response> result = await client.AnalyzeTextAsync(resources.IndexName, request); + IReadOnlyList tokens = result.Value; + + Assert.AreEqual(new[] { "The", "quick", "brown", "fox", "jumped", "over", "the", "lazy", "dog." }, tokens.Select(t => t.Token)); + } } } diff --git a/sdk/search/Azure.Search.Documents/tests/SessionRecords/SearchIndexClientTests/AnalyzeText.json b/sdk/search/Azure.Search.Documents/tests/SessionRecords/SearchIndexClientTests/AnalyzeText.json new file mode 100644 index 000000000000..883af27c73f5 --- /dev/null +++ b/sdk/search/Azure.Search.Documents/tests/SessionRecords/SearchIndexClientTests/AnalyzeText.json @@ -0,0 +1,106 @@ +{ + "Entries": [ + { + "RequestUri": "https://azs-net-heathsrchtst.search.windows.net/indexes(\u0027omcjubbl\u0027)/search.analyze?api-version=2019-05-06-Preview", + "RequestMethod": "POST", + "RequestHeaders": { + "Accept": "application/json; odata.metadata=minimal", + "api-key": "Sanitized", + "Content-Length": "81", + "Content-Type": "application/json", + "traceparent": "00-0fcda9bf5af6cc4ab507afc2cde9ff76-d9390cf074ea274c-00", + "User-Agent": [ + "azsdk-net-Search.Documents/1.0.0-dev.20200603.1", + "(.NET Core 4.6.28801.04; Microsoft Windows 10.0.19041 )" + ], + "x-ms-client-request-id": "b6bb294210d3599c5ce1600661618eab", + "x-ms-return-client-request-id": "true" + }, + "RequestBody": { + "text": "The quick brown fox jumped over the lazy dog.", + "tokenizer": "whitespace" + }, + "StatusCode": 200, + "ResponseHeaders": { + "Cache-Control": "no-cache", + "client-request-id": "b6bb2942-10d3-599c-5ce1-600661618eab", + "Content-Length": "701", + "Content-Type": "application/json; odata.metadata=minimal", + "Date": "Thu, 04 Jun 2020 02:02:51 GMT", + "elapsed-time": "41", + "Expires": "-1", + "OData-Version": "4.0", + "Pragma": "no-cache", + "Preference-Applied": "odata.include-annotations=\u0022*\u0022", + "request-id": "b6bb2942-10d3-599c-5ce1-600661618eab", + "Strict-Transport-Security": "max-age=15724800; includeSubDomains", + "x-ms-client-request-id": "b6bb2942-10d3-599c-5ce1-600661618eab" + }, + "ResponseBody": { + "@odata.context": "https://azs-net-heathsrchtst.search.windows.net/$metadata#Microsoft.Azure.Search.V2019_05_06_Preview.AnalyzeResult", + "tokens": [ + { + "token": "The", + "startOffset": 0, + "endOffset": 3, + "position": 0 + }, + { + "token": "quick", + "startOffset": 4, + "endOffset": 9, + "position": 1 + }, + { + "token": "brown", + "startOffset": 10, + "endOffset": 15, + "position": 2 + }, + { + "token": "fox", + "startOffset": 16, + "endOffset": 19, + "position": 3 + }, + { + "token": "jumped", + "startOffset": 20, + "endOffset": 26, + "position": 4 + }, + { + "token": "over", + "startOffset": 27, + "endOffset": 31, + "position": 5 + }, + { + "token": "the", + "startOffset": 32, + "endOffset": 35, + "position": 6 + }, + { + "token": "lazy", + "startOffset": 36, + "endOffset": 40, + "position": 7 + }, + { + "token": "dog.", + "startOffset": 41, + "endOffset": 45, + "position": 8 + } + ] + } + } + ], + "Variables": { + "RandomSeed": "398631221", + "SearchIndexName": "omcjubbl", + "SEARCH_ADMIN_API_KEY": "Sanitized", + "SEARCH_SERVICE_NAME": "azs-net-heathsrchtst" + } +} \ No newline at end of file diff --git a/sdk/search/Azure.Search.Documents/tests/SessionRecords/SearchIndexClientTests/AnalyzeTextAsync.json b/sdk/search/Azure.Search.Documents/tests/SessionRecords/SearchIndexClientTests/AnalyzeTextAsync.json new file mode 100644 index 000000000000..68fb02aab69b --- /dev/null +++ b/sdk/search/Azure.Search.Documents/tests/SessionRecords/SearchIndexClientTests/AnalyzeTextAsync.json @@ -0,0 +1,106 @@ +{ + "Entries": [ + { + "RequestUri": "https://azs-net-heathsrchtst.search.windows.net/indexes(\u0027omcjubbl\u0027)/search.analyze?api-version=2019-05-06-Preview", + "RequestMethod": "POST", + "RequestHeaders": { + "Accept": "application/json; odata.metadata=minimal", + "api-key": "Sanitized", + "Content-Length": "81", + "Content-Type": "application/json", + "traceparent": "00-2814f435f4dcd24e9d2ab4ee73d60e28-90ec2e1c86b12949-00", + "User-Agent": [ + "azsdk-net-Search.Documents/1.0.0-dev.20200603.1", + "(.NET Core 4.6.28801.04; Microsoft Windows 10.0.19041 )" + ], + "x-ms-client-request-id": "768227d217ad9009492ef7abdef00368", + "x-ms-return-client-request-id": "true" + }, + "RequestBody": { + "text": "The quick brown fox jumped over the lazy dog.", + "tokenizer": "whitespace" + }, + "StatusCode": 200, + "ResponseHeaders": { + "Cache-Control": "no-cache", + "client-request-id": "768227d2-17ad-9009-492e-f7abdef00368", + "Content-Length": "701", + "Content-Type": "application/json; odata.metadata=minimal", + "Date": "Thu, 04 Jun 2020 02:02:51 GMT", + "elapsed-time": "6", + "Expires": "-1", + "OData-Version": "4.0", + "Pragma": "no-cache", + "Preference-Applied": "odata.include-annotations=\u0022*\u0022", + "request-id": "768227d2-17ad-9009-492e-f7abdef00368", + "Strict-Transport-Security": "max-age=15724800; includeSubDomains", + "x-ms-client-request-id": "768227d2-17ad-9009-492e-f7abdef00368" + }, + "ResponseBody": { + "@odata.context": "https://azs-net-heathsrchtst.search.windows.net/$metadata#Microsoft.Azure.Search.V2019_05_06_Preview.AnalyzeResult", + "tokens": [ + { + "token": "The", + "startOffset": 0, + "endOffset": 3, + "position": 0 + }, + { + "token": "quick", + "startOffset": 4, + "endOffset": 9, + "position": 1 + }, + { + "token": "brown", + "startOffset": 10, + "endOffset": 15, + "position": 2 + }, + { + "token": "fox", + "startOffset": 16, + "endOffset": 19, + "position": 3 + }, + { + "token": "jumped", + "startOffset": 20, + "endOffset": 26, + "position": 4 + }, + { + "token": "over", + "startOffset": 27, + "endOffset": 31, + "position": 5 + }, + { + "token": "the", + "startOffset": 32, + "endOffset": 35, + "position": 6 + }, + { + "token": "lazy", + "startOffset": 36, + "endOffset": 40, + "position": 7 + }, + { + "token": "dog.", + "startOffset": 41, + "endOffset": 45, + "position": 8 + } + ] + } + } + ], + "Variables": { + "RandomSeed": "197061334", + "SearchIndexName": "omcjubbl", + "SEARCH_ADMIN_API_KEY": "Sanitized", + "SEARCH_SERVICE_NAME": "azs-net-heathsrchtst" + } +} \ No newline at end of file