diff --git a/sdk/search/azure-search-documents/CHANGELOG.md b/sdk/search/azure-search-documents/CHANGELOG.md index 4a1bd49f692b..84fde7ce1243 100644 --- a/sdk/search/azure-search-documents/CHANGELOG.md +++ b/sdk/search/azure-search-documents/CHANGELOG.md @@ -8,6 +8,21 @@ - Split searchindex.json and searchservice.json models and operations into separate namespaces #11508 - Renamed `edm` to `SearchFieldDataType` #11511 - Now Search Synonym Map creation/update returns a model #11514 +- Renaming #11565 + SearchIndexerDataSource -> SearchIndexerDataSourceConnection + SearchField.SynonymMaps -> SearchField.SynonymMapNames + SearchField.Analyzer -> SearchField.AnalyzerName + SearchField.IndexAnalyzer -> SearchField.IndexAnalyzerName + SearchField.SearchAnalyzer -> SearchField.SearchAnalyzerName + SearchableField.SynonymMaps -> SearchableField.SynonymMapNames + SearchableField.Analyzer -> SearchableField.AnalyzerName + SearchableField.IndexAnalyzer -> SearchableField.IndexAnalyzerName + SearchableField.SearchAnalyzer -> SearchableField.SearchAnalyzerName + Similarity -> SimilarityAlgorithm + Suggester -> SearchSuggester + PathHierarchyTokenizerV2 -> PathHierarchyTokenizer + + ## 1.0.0b3 (2020-05-04) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/__init__.py index f416b5efafd4..b0d6dd0718e8 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/__init__.py @@ -4,6 +4,7 @@ # ------------------------------------ from ._index import ( # pylint: disable=unused-import ComplexField, + SearchField, SearchableField, SimpleField, ) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_index.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_index.py index f04e97e50ac1..7f128cdfd2ca 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_index.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_index.py @@ -5,8 +5,8 @@ # -------------------------------------------------------------------------- from typing import TYPE_CHECKING -from ._edm import Collection, ComplexType -from ._generated.models import SearchField +import msrest.serialization +from ._edm import Collection, ComplexType, String if TYPE_CHECKING: from typing import Any, Dict, List @@ -14,6 +14,185 @@ __all__ = ("ComplexField", "SearchableField", "SimpleField") +class SearchField(msrest.serialization.Model): + # pylint: disable=too-many-instance-attributes + """Represents a field in an index definition, which describes the name, data type, and search behavior of a field. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the field, which must be unique within the fields collection + of the index or parent field. + :type name: str + :param type: Required. The data type of the field. Possible values include: "Edm.String", + "Edm.Int32", "Edm.Int64", "Edm.Double", "Edm.Boolean", "Edm.DateTimeOffset", + "Edm.GeographyPoint", "Edm.ComplexType". + :type type: str or ~azure.search.documents.models.SearchFieldDataType + :param key: A value indicating whether the field uniquely identifies documents in the index. + Exactly one top-level field in each index must be chosen as the key field and it must be of + type Edm.String. Key fields can be used to look up documents directly and update or delete + specific documents. Default is false for simple fields and null for complex fields. + :type key: bool + :param is_hidden: A value indicating whether the field can be returned in a search result. + You can enable this option if you want to use a field (for example, margin) as a filter, + sorting, or scoring mechanism but do not want the field to be visible to the end user. This + property must be False for key fields, and it must be null for complex fields. This property can + be changed on existing fields. Enabling this property does not cause any increase in index + storage requirements. Default is False for simple fields and null for complex fields. + :type is_hidden: bool + :param searchable: A value indicating whether the field is full-text searchable. This means it + will undergo analysis such as word-breaking during indexing. If you set a searchable field to a + value like "sunny day", internally it will be split into the individual tokens "sunny" and + "day". This enables full-text searches for these terms. Fields of type Edm.String or + Collection(Edm.String) are searchable by default. This property must be false for simple fields + of other non-string data types, and it must be null for complex fields. Note: searchable fields + consume extra space in your index since Azure Cognitive Search will store an additional + tokenized version of the field value for full-text searches. If you want to save space in your + index and you don't need a field to be included in searches, set searchable to false. + :type searchable: bool + :param filterable: A value indicating whether to enable the field to be referenced in $filter + queries. filterable differs from searchable in how strings are handled. Fields of type + Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so + comparisons are for exact matches only. For example, if you set such a field f to "sunny day", + $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property + must be null for complex fields. Default is true for simple fields and null for complex fields. + :type filterable: bool + :param sortable: A value indicating whether to enable the field to be referenced in $orderby + expressions. By default Azure Cognitive Search sorts results by score, but in many experiences + users will want to sort by fields in the documents. A simple field can be sortable only if it + is single-valued (it has a single value in the scope of the parent document). Simple collection + fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex + collections are also multi-valued, and therefore cannot be sortable. This is true whether it's + an immediate parent field, or an ancestor field, that's the complex collection. Complex fields + cannot be sortable and the sortable property must be null for such fields. The default for + sortable is true for single-valued simple fields, false for multi-valued simple fields, and + null for complex fields. + :type sortable: bool + :param facetable: A value indicating whether to enable the field to be referenced in facet + queries. Typically used in a presentation of search results that includes hit count by category + (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so + on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or + Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple + fields. + :type facetable: bool + :param analyzer_name: The name of the analyzer to use for the field. This option can be used only + with searchable fields and it can't be set together with either searchAnalyzer or + indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null + for complex fields. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", + "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh- + Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", + "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", + "en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", + "fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", + "gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", + "is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", + "ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", + "lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", + "no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt- + PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", + "ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", + "es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", + "th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", + "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", + "simple", "stop", "whitespace". + :type analyzer_name: str or ~azure.search.documents.models.LexicalAnalyzerName + :param search_analyzer_name: The name of the analyzer used at search time for the field. This option + can be used only with searchable fields. It must be set together with indexAnalyzer and it + cannot be set together with the analyzer option. This property cannot be set to the name of a + language analyzer; use the analyzer property instead if you need a language analyzer. This + analyzer can be updated on an existing field. Must be null for complex fields. Possible values + include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", + "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", "zh- + Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene", "da.microsoft", + "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene", "et.microsoft", + "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", "gl.lucene", "de.microsoft", + "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", "he.microsoft", "hi.microsoft", + "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", "id.microsoft", "id.lucene", + "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", "ja.lucene", "kn.microsoft", + "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", "lt.microsoft", "ml.microsoft", + "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", "fa.lucene", "pl.microsoft", + "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", "pt-PT.lucene", + "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", "sr- + cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", + "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", + "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", + "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", + "whitespace". + :type search_analyzer_name: str or ~azure.search.documents.models.LexicalAnalyzerName + :param index_analyzer_name: The name of the analyzer used at indexing time for the field. This + option can be used only with searchable fields. It must be set together with searchAnalyzer and + it cannot be set together with the analyzer option. This property cannot be set to the name of + a language analyzer; use the analyzer property instead if you need a language analyzer. Once + the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. + Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", + "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh- + Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", + "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", + "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", + "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", + "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", + "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", + "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", + "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", + "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", + "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", "sr- + cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", + "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", + "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", + "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", + "whitespace". + :type index_analyzer_name: str or ~azure.search.documents.models.LexicalAnalyzerName + :param synonym_map_names: A list of the names of synonym maps to associate with this field. This + option can be used only with searchable fields. Currently only one synonym map per field is + supported. Assigning a synonym map to a field ensures that query terms targeting that field are + expanded at query-time using the rules in the synonym map. This attribute can be changed on + existing fields. Must be null or an empty collection for complex fields. + :type synonym_map_names: list[str] + :param fields: A list of sub-fields if this is a field of type Edm.ComplexType or + Collection(Edm.ComplexType). Must be null or empty for simple fields. + :type fields: list[~azure.search.documents.models.SearchField] + """ + + _validation = { + 'name': {'required': True}, + 'type': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'key': {'key': 'key', 'type': 'bool'}, + 'is_hidden': {'key': 'isHidden', 'type': 'bool'}, + 'searchable': {'key': 'searchable', 'type': 'bool'}, + 'filterable': {'key': 'filterable', 'type': 'bool'}, + 'sortable': {'key': 'sortable', 'type': 'bool'}, + 'facetable': {'key': 'facetable', 'type': 'bool'}, + 'analyzer_name': {'key': 'analyzerName', 'type': 'str'}, + 'search_analyzer_name': {'key': 'searchAnalyzerName', 'type': 'str'}, + 'index_analyzer_name': {'key': 'indexAnalyzerName', 'type': 'str'}, + 'synonym_map_names': {'key': 'synonymMapNames', 'type': '[str]'}, + 'fields': {'key': 'fields', 'type': '[SearchField]'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchField, self).__init__(**kwargs) + self.name = kwargs['name'] + self.type = kwargs['type'] + self.key = kwargs.get('key', None) + self.is_hidden = kwargs.get('is_hidden', None) + self.searchable = kwargs.get('searchable', None) + self.filterable = kwargs.get('filterable', None) + self.sortable = kwargs.get('sortable', None) + self.facetable = kwargs.get('facetable', None) + self.analyzer_name = kwargs.get('analyzer_name', None) + self.search_analyzer_name = kwargs.get('search_analyzer_name', None) + self.index_analyzer_name = kwargs.get('index_analyzer_name', None) + self.synonym_map_names = kwargs.get('synonym_map_names', None) + self.fields = kwargs.get('fields', None) + + def SimpleField(**kw): # type: (**Any) -> SearchField """Configure a simple field for an Azure Search Index @@ -25,19 +204,19 @@ def SimpleField(**kw): SearchFieldDataType.Int32, SearchFieldDataType.Int64, SearchFieldDataType.Double, SearchFieldDataType.Boolean, SearchFieldDataType.DateTimeOffset, SearchFieldDataType.GeographyPoint, SearchFieldDataType.ComplexType, from `azure.search.documents.SearchFieldDataType`. - :type type: str + :type type: str :param key: A value indicating whether the field uniquely identifies documents in the index. Exactly one top-level field in each index must be chosen as the key field and it must be of type SearchFieldDataType.String. Key fields can be used to look up documents directly and update or delete specific documents. Default is False :type key: bool - :param hidden: A value indicating whether the field can be returned in a search result. + :param is_hidden: A value indicating whether the field can be returned in a search result. You can enable this option if you want to use a field (for example, margin) as a filter, sorting, or scoring mechanism but do not want the field to be visible to the end user. This property must be False for key fields. This property can be changed on existing fields. Enabling this property does not cause any increase in index storage requirements. Default is False. - :type retrievable: bool + :type is_hidden: bool :param filterable: A value indicating whether to enable the field to be referenced in $filter queries. filterable differs from searchable in how strings are handled. Fields of type SearchFieldDataType.String or Collection(SearchFieldDataType.String) that are filterable do @@ -67,7 +246,7 @@ def SimpleField(**kw): result["filterable"] = kw.get("filterable", False) result["facetable"] = kw.get("facetable", False) result["sortable"] = kw.get("sortable", False) - result["retrievable"] = not kw.get("hidden", False) + result["is_hidden"] = kw.get("is_hidden", False) return SearchField(**result) @@ -78,21 +257,20 @@ def SearchableField(**kw): :param name: Required. The name of the field, which must be unique within the fields collection of the index or parent field. :type name: str - :param type: Required. The data type of the field. Possible values include: SearchFieldDataType.String - and Collection(SearchFieldDataType.String), from `azure.search.documents.SearchFieldDataType`. - :type type: str + :param collection: Whether this search field is a collection (default False) + :type collection: bool :param key: A value indicating whether the field uniquely identifies documents in the index. Exactly one top-level field in each index must be chosen as the key field and it must be of type SearchFieldDataType.String. Key fields can be used to look up documents directly and update or delete specific documents. Default is False :type key: bool - :param hidden: A value indicating whether the field can be returned in a search result. + :param is_hidden: A value indicating whether the field can be returned in a search result. You can enable this option if you want to use a field (for example, margin) as a filter, sorting, or scoring mechanism but do not want the field to be visible to the end user. This property must be False for key fields. This property can be changed on existing fields. Enabling this property does not cause any increase in index storage requirements. Default is False. - :type hidden: bool + :type is_hidden: bool :param searchable: A value indicating whether the field is full-text searchable. This means it will undergo analysis such as word-breaking during indexing. If you set a searchable field to a value like "sunny day", internally it will be split into the individual tokens "sunny" and @@ -117,7 +295,7 @@ def SearchableField(**kw): (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so on). Default is False. :type facetable: bool - :param analyzer: The name of the analyzer to use for the field. This option can't be set together + :param analyzer_name: The name of the analyzer to use for the field. This option can't be set together with either searchAnalyzer or indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', 'bn.microsoft', 'eu.lucene', 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh- @@ -136,8 +314,8 @@ def SearchableField(**kw): 'th.microsoft', 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', 'whitespace'. - :type analyzer: str or ~search_service_client.models.AnalyzerName - :param search_analyzer: The name of the analyzer used at search time for the field. It must be + :type analyzer_name: str or ~search_service_client.models.AnalyzerName + :param search_analyzer_name: The name of the analyzer used at search time for the field. It must be set together with indexAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. This analyzer can be updated on an existing field. Possible @@ -159,8 +337,8 @@ def SearchableField(**kw): 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', 'whitespace'. - :type search_analyzer: str or ~search_service_client.models.AnalyzerName - :param index_analyzer: The name of the analyzer used at indexing time for the field. + :type search_analyzer_name: str or ~search_service_client.models.AnalyzerName + :param index_analyzer_name: The name of the analyzer used at indexing time for the field. It must be set together with searchAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. Once the analyzer is chosen, it cannot be @@ -182,29 +360,30 @@ def SearchableField(**kw): 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', 'whitespace'. - :type index_analyzer: str or ~search_service_client.models.AnalyzerName - :param synonym_maps: A list of the names of synonym maps to associate with this field. Currently + :type index_analyzer_name: str or ~search_service_client.models.AnalyzerName + :param synonym_map_names: A list of the names of synonym maps to associate with this field. Currently only one synonym map per field is supported. Assigning a synonym map to a field ensures that query terms targeting that field are expanded at query-time using the rules in the synonym map. This attribute can be changed on existing fields. - :type synonym_maps: list[str] + :type synonym_map_names: list[str] """ - result = {"name": kw.get("name"), "type": kw.get("type")} # type: Dict[str, Any] + typ = Collection(String) if kw.get("collection", False) else String + result = {"name": kw.get("name"), "type": typ} # type: Dict[str, Any] result["key"] = kw.get("key", False) result["searchable"] = kw.get("searchable", True) result["filterable"] = kw.get("filterable", False) result["facetable"] = kw.get("facetable", False) result["sortable"] = kw.get("sortable", False) - result["retrievable"] = not kw.get("hidden", False) - if "analyzer" in kw: - result["analyzer"] = kw["analyzer"] - if "search_analyzer" in kw: - result["search_analyzer"] = kw["search_analyzer"] - if "index_analyzer" in kw: - result["index_analyzer"] = kw["index_analyzer"] - if "synonym_maps" in kw: - result["synonym_maps"] = kw["synonym_maps"] + result["is_hidden"] = kw.get("is_hidden", False) + if "analyzer_name" in kw: + result["analyzer_name"] = kw["analyzer_name"] + if "search_analyzer_name" in kw: + result["search_analyzer_name"] = kw["search_analyzer_name"] + if "index_analyzer_name" in kw: + result["index_analyzer_name"] = kw["index_analyzer_name"] + if "synonym_map_names" in kw: + result["synonym_map_names"] = kw["synonym_map_names"] return SearchField(**result) @@ -227,3 +406,90 @@ def ComplexField(**kw): result = {"name": kw.get("name"), "type": typ} # type: Dict[str, Any] result["fields"] = kw.get("fields") return SearchField(**result) + + +class SearchIndex(msrest.serialization.Model): + # pylint: disable=too-many-instance-attributes + """Represents a search index definition, which describes the fields and search behavior of an index. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the index. + :type name: str + :param fields: Required. The fields of the index. + :type fields: list[~azure.search.documents.models.SearchField] + :param scoring_profiles: The scoring profiles for the index. + :type scoring_profiles: list[~azure.search.documents.models.ScoringProfile] + :param default_scoring_profile: The name of the scoring profile to use if none is specified in + the query. If this property is not set and no scoring profile is specified in the query, then + default scoring (tf-idf) will be used. + :type default_scoring_profile: str + :param cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index. + :type cors_options: ~azure.search.documents.models.CorsOptions + :param suggesters: The suggesters for the index. + :type suggesters: list[~azure.search.documents.models.Suggester] + :param analyzers: The analyzers for the index. + :type analyzers: list[~azure.search.documents.models.LexicalAnalyzer] + :param tokenizers: The tokenizers for the index. + :type tokenizers: list[~azure.search.documents.models.LexicalTokenizer] + :param token_filters: The token filters for the index. + :type token_filters: list[~azure.search.documents.models.TokenFilter] + :param char_filters: The character filters for the index. + :type char_filters: list[~azure.search.documents.models.CharFilter] + :param encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your data when you + want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive + Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive + Search will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with + customer-managed keys is not available for free search services, and is only available for paid + services created on or after January 1, 2019. + :type encryption_key: ~azure.search.documents.models.SearchResourceEncryptionKey + :param similarity: The type of similarity algorithm to be used when scoring and ranking the + documents matching a search query. The similarity algorithm can only be defined at index + creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity + algorithm is used. + :type similarity: ~azure.search.documents.models.Similarity + :param e_tag: The ETag of the index. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'fields': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'fields': {'key': 'fields', 'type': '[SearchField]'}, + 'scoring_profiles': {'key': 'scoringProfiles', 'type': '[ScoringProfile]'}, + 'default_scoring_profile': {'key': 'defaultScoringProfile', 'type': 'str'}, + 'cors_options': {'key': 'corsOptions', 'type': 'CorsOptions'}, + 'suggesters': {'key': 'suggesters', 'type': '[Suggester]'}, + 'analyzers': {'key': 'analyzers', 'type': '[LexicalAnalyzer]'}, + 'tokenizers': {'key': 'tokenizers', 'type': '[LexicalTokenizer]'}, + 'token_filters': {'key': 'tokenFilters', 'type': '[TokenFilter]'}, + 'char_filters': {'key': 'charFilters', 'type': '[CharFilter]'}, + 'encryption_key': {'key': 'encryptionKey', 'type': 'SearchResourceEncryptionKey'}, + 'similarity': {'key': 'similarity', 'type': 'Similarity'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchIndex, self).__init__(**kwargs) + self.name = kwargs['name'] + self.fields = kwargs['fields'] + self.scoring_profiles = kwargs.get('scoring_profiles', None) + self.default_scoring_profile = kwargs.get('default_scoring_profile', None) + self.cors_options = kwargs.get('cors_options', None) + self.suggesters = kwargs.get('suggesters', None) + self.analyzers = kwargs.get('analyzers', None) + self.tokenizers = kwargs.get('tokenizers', None) + self.token_filters = kwargs.get('token_filters', None) + self.char_filters = kwargs.get('char_filters', None) + self.encryption_key = kwargs.get('encryption_key', None) + self.similarity = kwargs.get('similarity', None) + self.e_tag = kwargs.get('e_tag', None) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_search_index_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_search_index_client.py index f572c79f14a7..5af83818b167 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_search_index_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_search_index_client.py @@ -12,9 +12,9 @@ from ._generated import SearchServiceClient as _SearchServiceClient from ._generated.models import SynonymMap as _SynonymMap from ._utils import ( - delistize_flags_for_index, - listize_flags_for_index, - listize_synonyms, + unpack_search_index, + pack_search_index, + unpack_synonyms, pack_search_resource_encryption_key, get_access_conditions, normalize_endpoint, @@ -83,7 +83,7 @@ def list_indexes(self, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - return self._client.indexes.list(cls=lambda objs: [listize_flags_for_index(x) for x in objs], **kwargs) + return self._client.indexes.list(cls=lambda objs: [unpack_search_index(x) for x in objs], **kwargs) @distributed_trace def get_index(self, index_name, **kwargs): @@ -107,7 +107,7 @@ def get_index(self, index_name, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = self._client.indexes.get(index_name, **kwargs) - return listize_flags_for_index(result) + return unpack_search_index(result) @distributed_trace def get_index_statistics(self, index_name, **kwargs): @@ -181,9 +181,9 @@ def create_index(self, index, **kwargs): :caption: Creating a new index. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - patched_index = delistize_flags_for_index(index) + patched_index = pack_search_index(index) result = self._client.indexes.create(patched_index, **kwargs) - return result + return unpack_search_index(result) @distributed_trace def create_or_update_index( @@ -226,7 +226,7 @@ def create_or_update_index( index, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) kwargs.update(access_condition) - patched_index = delistize_flags_for_index(index) + patched_index = pack_search_index(index) result = self._client.indexes.create_or_update( index_name=index_name, index=patched_index, @@ -234,7 +234,7 @@ def create_or_update_index( error_map=error_map, **kwargs ) - return result + return unpack_search_index(result) @distributed_trace def analyze_text(self, index_name, analyze_request, **kwargs): @@ -285,7 +285,7 @@ def get_synonym_maps(self, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = self._client.synonym_maps.list(**kwargs) - return [listize_synonyms(x) for x in result.synonym_maps] + return [unpack_synonyms(x) for x in result.synonym_maps] @distributed_trace def get_synonym_map(self, name, **kwargs): @@ -310,7 +310,7 @@ def get_synonym_map(self, name, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = self._client.synonym_maps.get(name, **kwargs) - return listize_synonyms(result) + return unpack_synonyms(result) @distributed_trace def delete_synonym_map(self, synonym_map, **kwargs): @@ -375,7 +375,7 @@ def create_synonym_map(self, name, synonyms, **kwargs): solr_format_synonyms = "\n".join(synonyms) synonym_map = _SynonymMap(name=name, synonyms=solr_format_synonyms) result = self._client.synonym_maps.create(synonym_map, **kwargs) - return listize_synonyms(result) + return unpack_synonyms(result) @distributed_trace def create_or_update_synonym_map(self, synonym_map, synonyms=None, **kwargs): @@ -413,7 +413,7 @@ def create_or_update_synonym_map(self, synonym_map, synonyms=None, **kwargs): error_map=error_map, **kwargs ) - return listize_synonyms(result) + return unpack_synonyms(result) @distributed_trace def get_service_statistics(self, **kwargs): diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_utils.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_utils.py index 24fce6f65f25..e29bcbcc9eae 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_utils.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_utils.py @@ -19,7 +19,8 @@ SearchIndexerDataSource as _SearchIndexerDataSource, SearchResourceEncryptionKey as _SearchResourceEncryptionKey, SynonymMap as _SynonymMap, - SearchIndex, + SearchField as _SearchField, + SearchIndex as _SearchIndex, PatternAnalyzer as _PatternAnalyzer, PatternTokenizer as _PatternTokenizer, ) @@ -30,6 +31,10 @@ SearchIndexerDataSourceConnection, SearchResourceEncryptionKey, ) +from ._index import ( + SearchField, + SearchIndex, +) if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports @@ -126,45 +131,93 @@ def listize_flags_for_pattern_tokenizer(pattern_tokenizer): ) -def delistize_flags_for_index(index): - # type: (SearchIndex) -> SearchIndex - if index.analyzers: - index.analyzers = [ +def pack_search_index(search_index): + # type: (SearchIndex) -> _SearchIndex + if not search_index: + return None + if search_index.analyzers: + analyzers = [ delistize_flags_for_pattern_analyzer(x) # type: ignore if isinstance(x, PatternAnalyzer) else x - for x in index.analyzers + for x in search_index.analyzers ] # mypy: ignore - if index.tokenizers: - index.tokenizers = [ + else: + analyzers = None + if search_index.tokenizers: + tokenizers = [ delistize_flags_for_pattern_tokenizer(x) # type: ignore if isinstance(x, PatternTokenizer) else x - for x in index.tokenizers + for x in search_index.tokenizers ] - return index + else: + tokenizers = None + if search_index.fields: + fields = [pack_search_field(x) for x in search_index.fields] + else: + fields = None + return _SearchIndex( + name=search_index.name, + fields=fields, + scoring_profiles=search_index.scoring_profiles, + default_scoring_profile=search_index.default_scoring_profile, + cors_options=search_index.cors_options, + suggesters=search_index.suggesters, + analyzers=analyzers, + tokenizers=tokenizers, + token_filters=search_index.token_filters, + char_filters=search_index.char_filters, + encryption_key=unpack_search_resource_encryption_key(search_index.encryption_key), + similarity=search_index.similarity, + e_tag=search_index.e_tag + ) -def listize_flags_for_index(index): - # type: (SearchIndex) -> SearchIndex - if index.analyzers: - index.analyzers = [ +def unpack_search_index(search_index): + # type: (_SearchIndex) -> SearchIndex + if not search_index: + return None + if search_index.analyzers: + analyzers = [ listize_flags_for_pattern_analyzer(x) # type: ignore if isinstance(x, _PatternAnalyzer) else x - for x in index.analyzers + for x in search_index.analyzers ] - if index.tokenizers: - index.tokenizers = [ + else: + analyzers = None + if search_index.tokenizers: + tokenizers = [ listize_flags_for_pattern_tokenizer(x) # type: ignore if isinstance(x, _PatternTokenizer) else x - for x in index.tokenizers + for x in search_index.tokenizers ] - return index + else: + tokenizers = None + if search_index.fields: + fields = [unpack_search_field(x) for x in search_index.fields] + else: + fields = None + return SearchIndex( + name=search_index.name, + fields=fields, + scoring_profiles=search_index.scoring_profiles, + default_scoring_profile=search_index.default_scoring_profile, + cors_options=search_index.cors_options, + suggesters=search_index.suggesters, + analyzers=analyzers, + tokenizers=tokenizers, + token_filters=search_index.token_filters, + char_filters=search_index.char_filters, + encryption_key=unpack_search_resource_encryption_key(search_index.encryption_key), + similarity=search_index.similarity, + e_tag=search_index.e_tag + ) -def listize_synonyms(synonym_map): +def unpack_synonyms(synonym_map): # type: (_SynonymMap) -> SynonymMap return SynonymMap( name=synonym_map.name, @@ -173,14 +226,18 @@ def listize_synonyms(synonym_map): e_tag=synonym_map.e_tag ) + def pack_search_resource_encryption_key(search_resource_encryption_key): # type: (SearchResourceEncryptionKey) -> _SearchResourceEncryptionKey if not search_resource_encryption_key: return None - access_credentials = AzureActiveDirectoryApplicationCredentials( - application_id=search_resource_encryption_key.application_id, - application_secret=search_resource_encryption_key.application_secret - ) + if search_resource_encryption_key.application_id and search_resource_encryption_key.application_secret: + access_credentials = AzureActiveDirectoryApplicationCredentials( + application_id=search_resource_encryption_key.application_id, + application_secret=search_resource_encryption_key.application_secret + ) + else: + access_credentials = None return _SearchResourceEncryptionKey( key_name=search_resource_encryption_key.key_name, key_version=search_resource_encryption_key.key_version, @@ -188,18 +245,26 @@ def pack_search_resource_encryption_key(search_resource_encryption_key): access_credentials=access_credentials ) + def unpack_search_resource_encryption_key(search_resource_encryption_key): # type: (_SearchResourceEncryptionKey) -> SearchResourceEncryptionKey if not search_resource_encryption_key: return None + if search_resource_encryption_key.access_credentials: + application_id = search_resource_encryption_key.access_credentials.application_id + application_secret = search_resource_encryption_key.access_credentials.application_secret + else: + application_id = None + application_secret = None return SearchResourceEncryptionKey( key_name=search_resource_encryption_key.key_name, key_version=search_resource_encryption_key.key_version, vault_uri=search_resource_encryption_key.vault_uri, - application_id=search_resource_encryption_key.access_credentials.application_id, - application_secret=search_resource_encryption_key.access_credentials.application_secret + application_id=application_id, + application_secret=application_secret ) + def pack_search_indexer_data_source(search_indexer_data_source): # type: (SearchIndexerDataSourceConnection) -> _SearchIndexerDataSource if not search_indexer_data_source: @@ -218,21 +283,25 @@ def pack_search_indexer_data_source(search_indexer_data_source): e_tag=search_indexer_data_source.e_tag ) + def unpack_search_indexer_data_source(search_indexer_data_source): # type: (_SearchIndexerDataSource) -> SearchIndexerDataSourceConnection if not search_indexer_data_source: return None + connection_string = search_indexer_data_source.credentials.connection_string \ + if search_indexer_data_source.credentials else None return SearchIndexerDataSourceConnection( name=search_indexer_data_source.name, description=search_indexer_data_source.description, type=search_indexer_data_source.type, - connection_string=search_indexer_data_source.credentials.connection_string, + connection_string=connection_string, container=search_indexer_data_source.container, data_change_detection_policy=search_indexer_data_source.data_change_detection_policy, data_deletion_detection_policy=search_indexer_data_source.data_deletion_detection_policy, e_tag=search_indexer_data_source.e_tag ) + def get_access_conditions(model, match_condition=MatchConditions.Unconditionally): # type: (Any, MatchConditions) -> Tuple[Dict[int, Any], Dict[str, bool]] error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError} @@ -258,6 +327,7 @@ def get_access_conditions(model, match_condition=MatchConditions.Unconditionally except AttributeError: raise ValueError("Unable to get e_tag from the model") + def normalize_endpoint(endpoint): try: if not endpoint.lower().startswith('http'): @@ -267,3 +337,79 @@ def normalize_endpoint(endpoint): return endpoint except AttributeError: raise ValueError("Endpoint must be a string.") + + +def pack_search_field(search_field): + # type: (SearchField) -> _SearchField + if not search_field: + return None + if isinstance(search_field, dict): + name = search_field.get("name") + field_type = search_field.get("type") + key = search_field.get("key") + is_hidden = search_field.get("is_hidden") + searchable = search_field.get("searchable") + filterable = search_field.get("filterable") + sortable = search_field.get("sortable") + facetable = search_field.get("facetable") + analyzer_name = search_field.get("analyzer_name") + search_analyzer_name = search_field.get("search_analyzer_name") + index_analyzer_name = search_field.get("index_analyzer_name") + synonym_map_names = search_field.get("synonym_map_names") + fields = search_field.get("fields") + fields = [pack_search_field(x) for x in fields] if fields else None + return _SearchField( + name=name, + type=field_type, + key=key, + retrievable=not is_hidden, + searchable=searchable, + filterable=filterable, + sortable=sortable, + facetable=facetable, + analyzer=analyzer_name, + search_analyzer=search_analyzer_name, + index_analyzer=index_analyzer_name, + synonym_maps=synonym_map_names, + fields=fields + ) + fields = [pack_search_field(x) for x in search_field.fields] \ + if search_field.fields else None + return _SearchField( + name=search_field.name, + type=search_field.type, + key=search_field.key, + retrievable=not search_field.is_hidden, + searchable=search_field.searchable, + filterable=search_field.filterable, + sortable=search_field.sortable, + facetable=search_field.facetable, + analyzer=search_field.analyzer_name, + search_analyzer=search_field.search_analyzer_name, + index_analyzer=search_field.index_analyzer_name, + synonym_maps=search_field.synonym_map_names, + fields=fields + ) + + +def unpack_search_field(search_field): + # type: (_SearchField) -> SearchField + if not search_field: + return None + fields = [unpack_search_field(x) for x in search_field.fields] \ + if search_field.fields else None + return _SearchField( + name=search_field.name, + type=search_field.type, + key=search_field.key, + is_hidden=search_field.retrievable, + searchable=search_field.searchable, + filterable=search_field.filterable, + sortable=search_field.sortable, + facetable=search_field.facetable, + analyzer_name=search_field.analyzer, + search_analyzer_name=search_field.search_analyzer, + index_analyzer_name=search_field.index_analyzer, + synonym_map_names=search_field.synonym_maps, + fields=fields + ) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/aio/_search_index_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/aio/_search_index_client.py index 150d7fcfdc28..0c4196d1d2a6 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/aio/_search_index_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/aio/_search_index_client.py @@ -13,9 +13,9 @@ from .._generated.models import SynonymMap from ....aio import SearchClient from .._utils import ( - delistize_flags_for_index, - listize_flags_for_index, - listize_synonyms, + pack_search_index, + unpack_search_index, + unpack_synonyms, pack_search_resource_encryption_key, get_access_conditions, normalize_endpoint, @@ -87,7 +87,7 @@ def list_indexes(self, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - return self._client.indexes.list(cls=lambda objs: [listize_flags_for_index(x) for x in objs], **kwargs) + return self._client.indexes.list(cls=lambda objs: [unpack_search_index(x) for x in objs], **kwargs) @distributed_trace_async async def get_index(self, index_name, **kwargs): @@ -111,7 +111,7 @@ async def get_index(self, index_name, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = await self._client.indexes.get(index_name, **kwargs) - return listize_flags_for_index(result) + return unpack_search_index(result) @distributed_trace_async async def get_index_statistics(self, index_name, **kwargs): @@ -185,7 +185,7 @@ async def create_index(self, index, **kwargs): :caption: Creating a new index. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - patched_index = delistize_flags_for_index(index) + patched_index = pack_search_index(index) result = await self._client.indexes.create(patched_index, **kwargs) return result @@ -230,7 +230,7 @@ async def create_or_update_index( index, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) kwargs.update(access_condition) - patched_index = delistize_flags_for_index(index) + patched_index = pack_search_index(index) result = await self._client.indexes.create_or_update( index_name=index_name, index=patched_index, @@ -289,7 +289,7 @@ async def get_synonym_maps(self, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = await self._client.synonym_maps.list(**kwargs) - return [listize_synonyms(x) for x in result.synonym_maps] + return [unpack_synonyms(x) for x in result.synonym_maps] @distributed_trace_async async def get_synonym_map(self, name, **kwargs): @@ -314,7 +314,7 @@ async def get_synonym_map(self, name, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = await self._client.synonym_maps.get(name, **kwargs) - return listize_synonyms(result) + return unpack_synonyms(result) @distributed_trace_async async def delete_synonym_map(self, synonym_map, **kwargs): @@ -380,7 +380,7 @@ async def create_synonym_map(self, name, synonyms, **kwargs): solr_format_synonyms = "\n".join(synonyms) synonym_map = SynonymMap(name=name, synonyms=solr_format_synonyms) result = await self._client.synonym_maps.create(synonym_map, **kwargs) - return listize_synonyms(result) + return unpack_synonyms(result) @distributed_trace_async async def create_or_update_synonym_map(self, synonym_map, synonyms=None, **kwargs): @@ -418,7 +418,7 @@ async def create_or_update_synonym_map(self, synonym_map, synonyms=None, **kwarg error_map=error_map, **kwargs ) - return listize_synonyms(result) + return unpack_synonyms(result) @distributed_trace_async async def get_service_statistics(self, **kwargs): diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py index 14a832503d8e..8cafe20955af 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py @@ -26,6 +26,7 @@ from .._internal import ( ComplexField, + SearchField, SearchableField, SimpleField, SearchFieldDataType, @@ -77,12 +78,12 @@ NGramTokenizer, OcrSkill, OutputFieldMappingEntry, + PathHierarchyTokenizerV2 as PathHierarchyTokenizer, PatternCaptureTokenFilter, PatternReplaceCharFilter, PatternReplaceTokenFilter, PhoneticTokenFilter, RegexFlags, - SearchField, SearchIndex, SearchIndexer, SearchIndexerDataContainer, @@ -92,13 +93,14 @@ SentimentSkill, ShaperSkill, ShingleTokenFilter, + Similarity as SimilarityAlgorithm, SnowballTokenFilter, SplitSkill, StemmerOverrideTokenFilter, StemmerTokenFilter, StopAnalyzer, StopwordsTokenFilter, - Suggester, + Suggester as SearchSuggester, SynonymTokenFilter, TagScoringFunction, TagScoringParameters, @@ -168,6 +170,7 @@ "NGramTokenizer", "OcrSkill", "OutputFieldMappingEntry", + "PathHierarchyTokenizer", "PatternAnalyzer", "PatternCaptureTokenFilter", "PatternReplaceCharFilter", @@ -189,13 +192,14 @@ "ShaperSkill", "ShingleTokenFilter", "SimpleField", + "SimilarityAlgorithm", "SnowballTokenFilter", "SplitSkill", "StemmerOverrideTokenFilter", "StemmerTokenFilter", "StopAnalyzer", "StopwordsTokenFilter", - "Suggester", + "SearchSuggester", "SynonymMap", "SynonymTokenFilter", "TagScoringFunction", diff --git a/sdk/search/azure-search-documents/tests/test_index_field_helpers.py b/sdk/search/azure-search-documents/tests/test_index_field_helpers.py index 10ce0d7f5f38..6c077a58f05e 100644 --- a/sdk/search/azure-search-documents/tests/test_index_field_helpers.py +++ b/sdk/search/azure-search-documents/tests/test_index_field_helpers.py @@ -26,10 +26,10 @@ def test_single(self): assert fld.facetable is None assert fld.searchable is None assert fld.filterable is None - assert fld.analyzer is None - assert fld.search_analyzer is None - assert fld.index_analyzer is None - assert fld.synonym_maps is None + assert fld.analyzer_name is None + assert fld.search_analyzer_name is None + assert fld.index_analyzer_name is None + assert fld.synonym_map_names is None def test_collection(self): fld = ComplexField(name="foo", fields=[], collection=True) @@ -40,39 +40,39 @@ def test_collection(self): assert fld.facetable is None assert fld.searchable is None assert fld.filterable is None - assert fld.analyzer is None - assert fld.search_analyzer is None - assert fld.index_analyzer is None - assert fld.synonym_maps is None + assert fld.analyzer_name is None + assert fld.search_analyzer_name is None + assert fld.index_analyzer_name is None + assert fld.synonym_map_names is None class TestSimplexField(object): def test_defaults(self): fld = SimpleField(name="foo", type=SearchFieldDataType.Double) assert fld.name == "foo" assert fld.type == SearchFieldDataType.Double - assert fld.retrievable == True + assert fld.is_hidden == False assert fld.sortable == False assert fld.facetable == False assert fld.searchable == False assert fld.filterable == False - assert fld.analyzer is None - assert fld.search_analyzer is None - assert fld.index_analyzer is None - assert fld.synonym_maps is None + assert fld.analyzer_name is None + assert fld.search_analyzer_name is None + assert fld.index_analyzer_name is None + assert fld.synonym_map_names is None class TestSearchableField(object): def test_defaults(self): - fld = SearchableField(name="foo", type=SearchFieldDataType.Collection(SearchFieldDataType.String)) + fld = SearchableField(name="foo", collection=True) assert fld.name == "foo" assert fld.type == SearchFieldDataType.Collection(SearchFieldDataType.String) - assert fld.retrievable == True + assert fld.is_hidden == False assert fld.sortable == False assert fld.facetable == False assert fld.searchable == True assert fld.filterable == False - assert fld.analyzer is None - assert fld.search_analyzer is None - assert fld.index_analyzer is None - assert fld.synonym_maps is None + assert fld.analyzer_name is None + assert fld.search_analyzer_name is None + assert fld.index_analyzer_name is None + assert fld.synonym_map_names is None diff --git a/sdk/search/azure-search-documents/tests/test_regex_flags.py b/sdk/search/azure-search-documents/tests/test_regex_flags.py index 52ed7f43a101..469ce67005ad 100644 --- a/sdk/search/azure-search-documents/tests/test_regex_flags.py +++ b/sdk/search/azure-search-documents/tests/test_regex_flags.py @@ -8,9 +8,9 @@ PatternAnalyzer as _PatternAnalyzer, PatternTokenizer as _PatternTokenizer, ) -from azure.search.documents.indexes._internal._utils import delistize_flags_for_index, listize_flags_for_index +from azure.search.documents.indexes._internal._utils import unpack_search_index, pack_search_index -def test_listize_flags_for_index(): +def test_unpack_search_index(): pattern_analyzer = _PatternAnalyzer( name="test_analyzer", flags="CANON_EQ" @@ -29,7 +29,7 @@ def test_listize_flags_for_index(): analyzers=analyzers, tokenizers=tokenizers ) - result = listize_flags_for_index(index) + result = unpack_search_index(index) assert isinstance(result.analyzers[0], PatternAnalyzer) assert isinstance(result.analyzers[0].flags, list) assert result.analyzers[0].flags[0] == "CANON_EQ" @@ -37,7 +37,7 @@ def test_listize_flags_for_index(): assert isinstance(result.tokenizers[0].flags, list) assert result.tokenizers[0].flags[0] == "CANON_EQ" -def test_listize_multi_flags_for_index(): +def test_multi_unpack_search_index(): pattern_analyzer = _PatternAnalyzer( name="test_analyzer", flags="CANON_EQ|MULTILINE" @@ -56,7 +56,7 @@ def test_listize_multi_flags_for_index(): analyzers=analyzers, tokenizers=tokenizers ) - result = listize_flags_for_index(index) + result = unpack_search_index(index) assert isinstance(result.analyzers[0], PatternAnalyzer) assert isinstance(result.analyzers[0].flags, list) assert result.analyzers[0].flags[0] == "CANON_EQ" @@ -66,7 +66,7 @@ def test_listize_multi_flags_for_index(): assert result.tokenizers[0].flags[0] == "CANON_EQ" assert result.tokenizers[0].flags[1] == "MULTILINE" -def test_listize_flags_for_index_enum(): +def test_unpack_search_index_enum(): pattern_analyzer = _PatternAnalyzer( name="test_analyzer", flags=RegexFlags.canon_eq @@ -85,7 +85,7 @@ def test_listize_flags_for_index_enum(): analyzers=analyzers, tokenizers=tokenizers ) - result = listize_flags_for_index(index) + result = unpack_search_index(index) assert isinstance(result.analyzers[0], PatternAnalyzer) assert isinstance(result.analyzers[0].flags, list) assert result.analyzers[0].flags[0] == "CANON_EQ" @@ -93,7 +93,7 @@ def test_listize_flags_for_index_enum(): assert isinstance(result.tokenizers[0].flags, list) assert result.tokenizers[0].flags[0] == "CANON_EQ" -def test_delistize_flags_for_index(): +def test_pack_search_index(): pattern_analyzer = PatternAnalyzer( name="test_analyzer", flags=["CANON_EQ"] @@ -112,7 +112,7 @@ def test_delistize_flags_for_index(): analyzers=analyzers, tokenizers=tokenizers ) - result = delistize_flags_for_index(index) + result = pack_search_index(index) assert isinstance(result.analyzers[0], _PatternAnalyzer) assert isinstance(result.analyzers[0].flags, str) assert result.analyzers[0].flags == "CANON_EQ" @@ -120,7 +120,7 @@ def test_delistize_flags_for_index(): assert isinstance(result.tokenizers[0].flags, str) assert result.tokenizers[0].flags == "CANON_EQ" -def test_delistize_multi_flags_for_index(): +def test_multi_pack_search_index(): pattern_analyzer = PatternAnalyzer( name="test_analyzer", flags=["CANON_EQ", "MULTILINE"] @@ -139,7 +139,7 @@ def test_delistize_multi_flags_for_index(): analyzers=analyzers, tokenizers=tokenizers ) - result = delistize_flags_for_index(index) + result = pack_search_index(index) assert isinstance(result.analyzers[0], _PatternAnalyzer) assert isinstance(result.analyzers[0].flags, str) assert result.analyzers[0].flags == "CANON_EQ|MULTILINE"