diff --git a/azure-mgmt-datalake-analytics/MANIFEST.in b/azure-mgmt-datalake-analytics/MANIFEST.in index 9ecaeb15de50..bb37a2723dae 100644 --- a/azure-mgmt-datalake-analytics/MANIFEST.in +++ b/azure-mgmt-datalake-analytics/MANIFEST.in @@ -1,2 +1 @@ include *.rst -include azure_bdist_wheel.py \ No newline at end of file diff --git a/azure-mgmt-datalake-analytics/README.rst b/azure-mgmt-datalake-analytics/README.rst index 84c59860ba70..45de4e6b43dd 100644 --- a/azure-mgmt-datalake-analytics/README.rst +++ b/azure-mgmt-datalake-analytics/README.rst @@ -1,12 +1,12 @@ Microsoft Azure SDK for Python ============================== -This is the Microsoft Azure Data Lake Analytics Management Client Library. +This is the Microsoft Azure MyService Management Client Library. Azure Resource Manager (ARM) is the next generation of management APIs that replace the old Azure Service Management (ASM). -This package has been tested with Python 2.7, 3.4, 3.5 and 3.6. +This package has been tested with Python 2.7, 3.4, 3.5, 3.6 and 3.7. For the older Azure Service Management (ASM) libraries, see `azure-servicemanagement-legacy `__ library. @@ -36,8 +36,8 @@ If you see azure==0.11.0 (or any version below 1.0), uninstall it first: Usage ===== -For code examples, see `Azure Data Lake Analytics Management -`__ +For code examples, see `MyService Management +`__ on docs.microsoft.com. diff --git a/azure-mgmt-datalake-analytics/azure/__init__.py b/azure-mgmt-datalake-analytics/azure/__init__.py index 849489fca33c..0260537a02bb 100644 --- a/azure-mgmt-datalake-analytics/azure/__init__.py +++ b/azure-mgmt-datalake-analytics/azure/__init__.py @@ -1 +1 @@ -__import__('pkg_resources').declare_namespace(__name__) +__path__ = __import__('pkgutil').extend_path(__path__, __name__) \ No newline at end of file diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/__init__.py b/azure-mgmt-datalake-analytics/azure/mgmt/__init__.py index 849489fca33c..0260537a02bb 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/__init__.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/__init__.py @@ -1 +1 @@ -__import__('pkg_resources').declare_namespace(__name__) +__path__ = __import__('pkgutil').extend_path(__path__, __name__) \ No newline at end of file diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/__init__.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/__init__.py index 849489fca33c..0260537a02bb 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/__init__.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/__init__.py @@ -1 +1 @@ -__import__('pkg_resources').declare_namespace(__name__) +__path__ = __import__('pkgutil').extend_path(__path__, __name__) \ No newline at end of file diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/data_lake_analytics_catalog_management_client.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/data_lake_analytics_catalog_management_client.py index 38400cfbc158..626d61e9c2f4 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/data_lake_analytics_catalog_management_client.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/data_lake_analytics_catalog_management_client.py @@ -9,8 +9,7 @@ # regenerated. # -------------------------------------------------------------------------- -import warnings -from msrest.service_client import ServiceClient +from msrest.service_client import SDKClient from msrest import Serializer, Deserializer from msrestazure import AzureConfiguration from .version import VERSION @@ -49,7 +48,7 @@ def __init__( self.adla_catalog_dns_suffix = adla_catalog_dns_suffix -class DataLakeAnalyticsCatalogManagementClient(object): +class DataLakeAnalyticsCatalogManagementClient(SDKClient): """Creates an Azure Data Lake Analytics catalog client. :ivar config: Configuration for client. @@ -70,7 +69,7 @@ def __init__( self, credentials, adla_catalog_dns_suffix): self.config = DataLakeAnalyticsCatalogManagementClientConfiguration(credentials, adla_catalog_dns_suffix) - self._client = ServiceClient(self.config.credentials, self.config) + super(DataLakeAnalyticsCatalogManagementClient, self).__init__(self.config.credentials, self.config) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self.api_version = '2016-11-01' diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/__init__.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/__init__.py index fa539a690a38..031c65b77ffa 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/__init__.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/__init__.py @@ -9,44 +9,86 @@ # regenerated. # -------------------------------------------------------------------------- -from .acl_create_or_update_parameters import AclCreateOrUpdateParameters -from .acl_delete_parameters import AclDeleteParameters -from .acl import Acl -from .data_lake_analytics_catalog_secret_create_or_update_parameters import DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters -from .data_lake_analytics_catalog_credential_create_parameters import DataLakeAnalyticsCatalogCredentialCreateParameters -from .data_lake_analytics_catalog_credential_delete_parameters import DataLakeAnalyticsCatalogCredentialDeleteParameters -from .data_lake_analytics_catalog_credential_update_parameters import DataLakeAnalyticsCatalogCredentialUpdateParameters -from .usql_secret import USqlSecret -from .usql_external_data_source import USqlExternalDataSource -from .usql_credential import USqlCredential -from .usql_procedure import USqlProcedure -from .usql_table_column import USqlTableColumn -from .usql_directed_column import USqlDirectedColumn -from .usql_distribution_info import USqlDistributionInfo -from .usql_index import USqlIndex -from .ddl_name import DdlName -from .entity_id import EntityId -from .external_table import ExternalTable -from .type_field_info import TypeFieldInfo -from .usql_table import USqlTable -from .usql_table_type import USqlTableType -from .usql_view import USqlView -from .usql_package import USqlPackage -from .usql_table_partition import USqlTablePartition -from .usql_table_statistics import USqlTableStatistics -from .usql_type import USqlType -from .usql_table_valued_function import USqlTableValuedFunction -from .usql_assembly_file_info import USqlAssemblyFileInfo -from .usql_assembly_dependency_info import USqlAssemblyDependencyInfo -from .usql_assembly import USqlAssembly -from .usql_assembly_clr import USqlAssemblyClr -from .usql_schema import USqlSchema -from .usql_database import USqlDatabase -from .catalog_item import CatalogItem -from .catalog_item_list import CatalogItemList +try: + from .acl_create_or_update_parameters_py3 import AclCreateOrUpdateParameters + from .acl_delete_parameters_py3 import AclDeleteParameters + from .acl_py3 import Acl + from .data_lake_analytics_catalog_secret_create_or_update_parameters_py3 import DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters + from .data_lake_analytics_catalog_credential_create_parameters_py3 import DataLakeAnalyticsCatalogCredentialCreateParameters + from .data_lake_analytics_catalog_credential_delete_parameters_py3 import DataLakeAnalyticsCatalogCredentialDeleteParameters + from .data_lake_analytics_catalog_credential_update_parameters_py3 import DataLakeAnalyticsCatalogCredentialUpdateParameters + from .usql_secret_py3 import USqlSecret + from .usql_external_data_source_py3 import USqlExternalDataSource + from .usql_credential_py3 import USqlCredential + from .usql_procedure_py3 import USqlProcedure + from .usql_table_column_py3 import USqlTableColumn + from .usql_directed_column_py3 import USqlDirectedColumn + from .usql_distribution_info_py3 import USqlDistributionInfo + from .usql_index_py3 import USqlIndex + from .ddl_name_py3 import DdlName + from .entity_id_py3 import EntityId + from .external_table_py3 import ExternalTable + from .type_field_info_py3 import TypeFieldInfo + from .usql_table_preview_py3 import USqlTablePreview + from .usql_table_py3 import USqlTable + from .usql_table_fragment_py3 import USqlTableFragment + from .usql_table_type_py3 import USqlTableType + from .usql_view_py3 import USqlView + from .usql_package_py3 import USqlPackage + from .usql_table_partition_py3 import USqlTablePartition + from .usql_table_statistics_py3 import USqlTableStatistics + from .usql_type_py3 import USqlType + from .usql_table_valued_function_py3 import USqlTableValuedFunction + from .usql_assembly_file_info_py3 import USqlAssemblyFileInfo + from .usql_assembly_dependency_info_py3 import USqlAssemblyDependencyInfo + from .usql_assembly_py3 import USqlAssembly + from .usql_assembly_clr_py3 import USqlAssemblyClr + from .usql_schema_py3 import USqlSchema + from .usql_database_py3 import USqlDatabase + from .catalog_item_py3 import CatalogItem + from .catalog_item_list_py3 import CatalogItemList +except (SyntaxError, ImportError): + from .acl_create_or_update_parameters import AclCreateOrUpdateParameters + from .acl_delete_parameters import AclDeleteParameters + from .acl import Acl + from .data_lake_analytics_catalog_secret_create_or_update_parameters import DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters + from .data_lake_analytics_catalog_credential_create_parameters import DataLakeAnalyticsCatalogCredentialCreateParameters + from .data_lake_analytics_catalog_credential_delete_parameters import DataLakeAnalyticsCatalogCredentialDeleteParameters + from .data_lake_analytics_catalog_credential_update_parameters import DataLakeAnalyticsCatalogCredentialUpdateParameters + from .usql_secret import USqlSecret + from .usql_external_data_source import USqlExternalDataSource + from .usql_credential import USqlCredential + from .usql_procedure import USqlProcedure + from .usql_table_column import USqlTableColumn + from .usql_directed_column import USqlDirectedColumn + from .usql_distribution_info import USqlDistributionInfo + from .usql_index import USqlIndex + from .ddl_name import DdlName + from .entity_id import EntityId + from .external_table import ExternalTable + from .type_field_info import TypeFieldInfo + from .usql_table_preview import USqlTablePreview + from .usql_table import USqlTable + from .usql_table_fragment import USqlTableFragment + from .usql_table_type import USqlTableType + from .usql_view import USqlView + from .usql_package import USqlPackage + from .usql_table_partition import USqlTablePartition + from .usql_table_statistics import USqlTableStatistics + from .usql_type import USqlType + from .usql_table_valued_function import USqlTableValuedFunction + from .usql_assembly_file_info import USqlAssemblyFileInfo + from .usql_assembly_dependency_info import USqlAssemblyDependencyInfo + from .usql_assembly import USqlAssembly + from .usql_assembly_clr import USqlAssemblyClr + from .usql_schema import USqlSchema + from .usql_database import USqlDatabase + from .catalog_item import CatalogItem + from .catalog_item_list import CatalogItemList from .usql_credential_paged import USqlCredentialPaged from .usql_external_data_source_paged import USqlExternalDataSourcePaged from .usql_procedure_paged import USqlProcedurePaged +from .usql_table_fragment_paged import USqlTableFragmentPaged from .usql_table_paged import USqlTablePaged from .usql_table_statistics_paged import USqlTableStatisticsPaged from .usql_table_type_paged import USqlTableTypePaged @@ -85,7 +127,9 @@ 'EntityId', 'ExternalTable', 'TypeFieldInfo', + 'USqlTablePreview', 'USqlTable', + 'USqlTableFragment', 'USqlTableType', 'USqlView', 'USqlPackage', @@ -104,6 +148,7 @@ 'USqlCredentialPaged', 'USqlExternalDataSourcePaged', 'USqlProcedurePaged', + 'USqlTableFragmentPaged', 'USqlTablePaged', 'USqlTableStatisticsPaged', 'USqlTableTypePaged', diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl.py index fbc4d6f2f3ba..c3a76d792cf7 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl.py @@ -45,8 +45,8 @@ class Acl(Model): 'permission': {'key': 'permission', 'type': 'str'}, } - def __init__(self): - super(Acl, self).__init__() + def __init__(self, **kwargs): + super(Acl, self).__init__(**kwargs) self.ace_type = None self.principal_id = None self.permission = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_create_or_update_parameters.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_create_or_update_parameters.py index ddb9e68b76fc..c91fabb4ca29 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_create_or_update_parameters.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_create_or_update_parameters.py @@ -15,17 +15,19 @@ class AclCreateOrUpdateParameters(Model): """The parameters used to create or update an access control list (ACL) entry. - :param ace_type: the access control list (ACL) entry type. UserObj and - GroupObj denote the owning user and group, respectively. Possible values - include: 'UserObj', 'GroupObj', 'Other', 'User', 'Group' + All required parameters must be populated in order to send to Azure. + + :param ace_type: Required. the access control list (ACL) entry type. + UserObj and GroupObj denote the owning user and group, respectively. + Possible values include: 'UserObj', 'GroupObj', 'Other', 'User', 'Group' :type ace_type: str or ~azure.mgmt.datalake.analytics.catalog.models.AclType - :param principal_id: the Azure AD object ID of the user or group being - specified in the access control list (ACL) entry. + :param principal_id: Required. the Azure AD object ID of the user or group + being specified in the access control list (ACL) entry. :type principal_id: str - :param permission: the permission type of the access control list (ACL) - entry. Possible values include: 'None', 'Use', 'Create', 'Drop', 'Alter', - 'Write', 'All' + :param permission: Required. the permission type of the access control + list (ACL) entry. Possible values include: 'None', 'Use', 'Create', + 'Drop', 'Alter', 'Write', 'All' :type permission: str or ~azure.mgmt.datalake.analytics.catalog.models.PermissionType """ @@ -42,8 +44,8 @@ class AclCreateOrUpdateParameters(Model): 'permission': {'key': 'permission', 'type': 'str'}, } - def __init__(self, ace_type, principal_id, permission): - super(AclCreateOrUpdateParameters, self).__init__() - self.ace_type = ace_type - self.principal_id = principal_id - self.permission = permission + def __init__(self, **kwargs): + super(AclCreateOrUpdateParameters, self).__init__(**kwargs) + self.ace_type = kwargs.get('ace_type', None) + self.principal_id = kwargs.get('principal_id', None) + self.permission = kwargs.get('permission', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_create_or_update_parameters_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_create_or_update_parameters_py3.py new file mode 100644 index 000000000000..a5abdbc2687e --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_create_or_update_parameters_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AclCreateOrUpdateParameters(Model): + """The parameters used to create or update an access control list (ACL) entry. + + All required parameters must be populated in order to send to Azure. + + :param ace_type: Required. the access control list (ACL) entry type. + UserObj and GroupObj denote the owning user and group, respectively. + Possible values include: 'UserObj', 'GroupObj', 'Other', 'User', 'Group' + :type ace_type: str or + ~azure.mgmt.datalake.analytics.catalog.models.AclType + :param principal_id: Required. the Azure AD object ID of the user or group + being specified in the access control list (ACL) entry. + :type principal_id: str + :param permission: Required. the permission type of the access control + list (ACL) entry. Possible values include: 'None', 'Use', 'Create', + 'Drop', 'Alter', 'Write', 'All' + :type permission: str or + ~azure.mgmt.datalake.analytics.catalog.models.PermissionType + """ + + _validation = { + 'ace_type': {'required': True}, + 'principal_id': {'required': True}, + 'permission': {'required': True}, + } + + _attribute_map = { + 'ace_type': {'key': 'aceType', 'type': 'str'}, + 'principal_id': {'key': 'principalId', 'type': 'str'}, + 'permission': {'key': 'permission', 'type': 'str'}, + } + + def __init__(self, *, ace_type, principal_id: str, permission, **kwargs) -> None: + super(AclCreateOrUpdateParameters, self).__init__(**kwargs) + self.ace_type = ace_type + self.principal_id = principal_id + self.permission = permission diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_delete_parameters.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_delete_parameters.py index fa41a05a4b26..fdfb3317dd09 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_delete_parameters.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_delete_parameters.py @@ -15,13 +15,15 @@ class AclDeleteParameters(Model): """The parameters used to delete an access control list (ACL) entry. - :param ace_type: the access control list (ACL) entry type. UserObj and - GroupObj denote the owning user and group, respectively. Possible values - include: 'UserObj', 'GroupObj', 'Other', 'User', 'Group' + All required parameters must be populated in order to send to Azure. + + :param ace_type: Required. the access control list (ACL) entry type. + UserObj and GroupObj denote the owning user and group, respectively. + Possible values include: 'UserObj', 'GroupObj', 'Other', 'User', 'Group' :type ace_type: str or ~azure.mgmt.datalake.analytics.catalog.models.AclType - :param principal_id: the Azure AD object ID of the user or group being - specified in the access control list (ACL) entry. + :param principal_id: Required. the Azure AD object ID of the user or group + being specified in the access control list (ACL) entry. :type principal_id: str """ @@ -35,7 +37,7 @@ class AclDeleteParameters(Model): 'principal_id': {'key': 'principalId', 'type': 'str'}, } - def __init__(self, ace_type, principal_id): - super(AclDeleteParameters, self).__init__() - self.ace_type = ace_type - self.principal_id = principal_id + def __init__(self, **kwargs): + super(AclDeleteParameters, self).__init__(**kwargs) + self.ace_type = kwargs.get('ace_type', None) + self.principal_id = kwargs.get('principal_id', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_delete_parameters_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_delete_parameters_py3.py new file mode 100644 index 000000000000..29cb4179acc2 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_delete_parameters_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AclDeleteParameters(Model): + """The parameters used to delete an access control list (ACL) entry. + + All required parameters must be populated in order to send to Azure. + + :param ace_type: Required. the access control list (ACL) entry type. + UserObj and GroupObj denote the owning user and group, respectively. + Possible values include: 'UserObj', 'GroupObj', 'Other', 'User', 'Group' + :type ace_type: str or + ~azure.mgmt.datalake.analytics.catalog.models.AclType + :param principal_id: Required. the Azure AD object ID of the user or group + being specified in the access control list (ACL) entry. + :type principal_id: str + """ + + _validation = { + 'ace_type': {'required': True}, + 'principal_id': {'required': True}, + } + + _attribute_map = { + 'ace_type': {'key': 'aceType', 'type': 'str'}, + 'principal_id': {'key': 'principalId', 'type': 'str'}, + } + + def __init__(self, *, ace_type, principal_id: str, **kwargs) -> None: + super(AclDeleteParameters, self).__init__(**kwargs) + self.ace_type = ace_type + self.principal_id = principal_id diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_py3.py new file mode 100644 index 000000000000..3d98a56102a2 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_py3.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Acl(Model): + """A Data Lake Analytics catalog access control list (ACL) entry. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar ace_type: the access control list (ACL) entry type. UserObj and + GroupObj denote the owning user and group, respectively. Possible values + include: 'UserObj', 'GroupObj', 'Other', 'User', 'Group' + :vartype ace_type: str or + ~azure.mgmt.datalake.analytics.catalog.models.AclType + :ivar principal_id: the Azure AD object ID of the user or group being + specified in the access control list (ACL) entry. + :vartype principal_id: str + :ivar permission: the permission type of the access control list (ACL) + entry. Possible values include: 'None', 'Use', 'Create', 'Drop', 'Alter', + 'Write', 'All' + :vartype permission: str or + ~azure.mgmt.datalake.analytics.catalog.models.PermissionType + """ + + _validation = { + 'ace_type': {'readonly': True}, + 'principal_id': {'readonly': True}, + 'permission': {'readonly': True}, + } + + _attribute_map = { + 'ace_type': {'key': 'aceType', 'type': 'str'}, + 'principal_id': {'key': 'principalId', 'type': 'str'}, + 'permission': {'key': 'permission', 'type': 'str'}, + } + + def __init__(self, **kwargs) -> None: + super(Acl, self).__init__(**kwargs) + self.ace_type = None + self.principal_id = None + self.permission = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item.py index 23ec60d1d05c..741b35995ad2 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item.py @@ -26,7 +26,7 @@ class CatalogItem(Model): 'version': {'key': 'version', 'type': 'str'}, } - def __init__(self, compute_account_name=None, version=None): - super(CatalogItem, self).__init__() - self.compute_account_name = compute_account_name - self.version = version + def __init__(self, **kwargs): + super(CatalogItem, self).__init__(**kwargs) + self.compute_account_name = kwargs.get('compute_account_name', None) + self.version = kwargs.get('version', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item_list.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item_list.py index 590dd1abf71a..9ad56393d90b 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item_list.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item_list.py @@ -23,6 +23,6 @@ class CatalogItemList(Model): 'next_link': {'key': 'nextLink', 'type': 'str'}, } - def __init__(self, next_link=None): - super(CatalogItemList, self).__init__() - self.next_link = next_link + def __init__(self, **kwargs): + super(CatalogItemList, self).__init__(**kwargs) + self.next_link = kwargs.get('next_link', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item_list_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item_list_py3.py new file mode 100644 index 000000000000..8a25d8e32de3 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item_list_py3.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CatalogItemList(Model): + """A Data Lake Analytics catalog item list. + + :param next_link: the link to the next page of results. + :type next_link: str + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + } + + def __init__(self, *, next_link: str=None, **kwargs) -> None: + super(CatalogItemList, self).__init__(**kwargs) + self.next_link = next_link diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item_py3.py new file mode 100644 index 000000000000..aec21a6408ee --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CatalogItem(Model): + """A Data Lake Analytics catalog item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, **kwargs) -> None: + super(CatalogItem, self).__init__(**kwargs) + self.compute_account_name = compute_account_name + self.version = version diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_create_parameters.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_create_parameters.py index 35c7552cb2f0..ff1312b7e813 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_create_parameters.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_create_parameters.py @@ -15,14 +15,16 @@ class DataLakeAnalyticsCatalogCredentialCreateParameters(Model): """Data Lake Analytics catalog credential creation parameters. - :param password: the password for the credential and user with access to - the data source. + All required parameters must be populated in order to send to Azure. + + :param password: Required. the password for the credential and user with + access to the data source. :type password: str - :param uri: the URI identifier for the data source this credential can - connect to in the format : + :param uri: Required. the URI identifier for the data source this + credential can connect to in the format : :type uri: str - :param user_id: the object identifier for the user associated with this - credential with access to the data source. + :param user_id: Required. the object identifier for the user associated + with this credential with access to the data source. :type user_id: str """ @@ -38,8 +40,8 @@ class DataLakeAnalyticsCatalogCredentialCreateParameters(Model): 'user_id': {'key': 'userId', 'type': 'str'}, } - def __init__(self, password, uri, user_id): - super(DataLakeAnalyticsCatalogCredentialCreateParameters, self).__init__() - self.password = password - self.uri = uri - self.user_id = user_id + def __init__(self, **kwargs): + super(DataLakeAnalyticsCatalogCredentialCreateParameters, self).__init__(**kwargs) + self.password = kwargs.get('password', None) + self.uri = kwargs.get('uri', None) + self.user_id = kwargs.get('user_id', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_create_parameters_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_create_parameters_py3.py new file mode 100644 index 000000000000..15ccd484953b --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_create_parameters_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DataLakeAnalyticsCatalogCredentialCreateParameters(Model): + """Data Lake Analytics catalog credential creation parameters. + + All required parameters must be populated in order to send to Azure. + + :param password: Required. the password for the credential and user with + access to the data source. + :type password: str + :param uri: Required. the URI identifier for the data source this + credential can connect to in the format : + :type uri: str + :param user_id: Required. the object identifier for the user associated + with this credential with access to the data source. + :type user_id: str + """ + + _validation = { + 'password': {'required': True}, + 'uri': {'required': True}, + 'user_id': {'required': True}, + } + + _attribute_map = { + 'password': {'key': 'password', 'type': 'str'}, + 'uri': {'key': 'uri', 'type': 'str'}, + 'user_id': {'key': 'userId', 'type': 'str'}, + } + + def __init__(self, *, password: str, uri: str, user_id: str, **kwargs) -> None: + super(DataLakeAnalyticsCatalogCredentialCreateParameters, self).__init__(**kwargs) + self.password = password + self.uri = uri + self.user_id = user_id diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_delete_parameters.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_delete_parameters.py index 91b13687ea70..748060c8e4e4 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_delete_parameters.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_delete_parameters.py @@ -25,6 +25,6 @@ class DataLakeAnalyticsCatalogCredentialDeleteParameters(Model): 'password': {'key': 'password', 'type': 'str'}, } - def __init__(self, password=None): - super(DataLakeAnalyticsCatalogCredentialDeleteParameters, self).__init__() - self.password = password + def __init__(self, **kwargs): + super(DataLakeAnalyticsCatalogCredentialDeleteParameters, self).__init__(**kwargs) + self.password = kwargs.get('password', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_delete_parameters_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_delete_parameters_py3.py new file mode 100644 index 000000000000..0e1e99f00659 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_delete_parameters_py3.py @@ -0,0 +1,30 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DataLakeAnalyticsCatalogCredentialDeleteParameters(Model): + """Data Lake Analytics catalog credential deletion parameters. + + :param password: the current password for the credential and user with + access to the data source. This is required if the requester is not the + account owner. + :type password: str + """ + + _attribute_map = { + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, *, password: str=None, **kwargs) -> None: + super(DataLakeAnalyticsCatalogCredentialDeleteParameters, self).__init__(**kwargs) + self.password = password diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_update_parameters.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_update_parameters.py index f12009cd4a72..1301d96a77fd 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_update_parameters.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_update_parameters.py @@ -37,9 +37,9 @@ class DataLakeAnalyticsCatalogCredentialUpdateParameters(Model): 'user_id': {'key': 'userId', 'type': 'str'}, } - def __init__(self, password=None, new_password=None, uri=None, user_id=None): - super(DataLakeAnalyticsCatalogCredentialUpdateParameters, self).__init__() - self.password = password - self.new_password = new_password - self.uri = uri - self.user_id = user_id + def __init__(self, **kwargs): + super(DataLakeAnalyticsCatalogCredentialUpdateParameters, self).__init__(**kwargs) + self.password = kwargs.get('password', None) + self.new_password = kwargs.get('new_password', None) + self.uri = kwargs.get('uri', None) + self.user_id = kwargs.get('user_id', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_update_parameters_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_update_parameters_py3.py new file mode 100644 index 000000000000..c348936b53a9 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_update_parameters_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DataLakeAnalyticsCatalogCredentialUpdateParameters(Model): + """Data Lake Analytics catalog credential update parameters. + + :param password: the current password for the credential and user with + access to the data source. This is required if the requester is not the + account owner. + :type password: str + :param new_password: the new password for the credential and user with + access to the data source. + :type new_password: str + :param uri: the URI identifier for the data source this credential can + connect to in the format : + :type uri: str + :param user_id: the object identifier for the user associated with this + credential with access to the data source. + :type user_id: str + """ + + _attribute_map = { + 'password': {'key': 'password', 'type': 'str'}, + 'new_password': {'key': 'newPassword', 'type': 'str'}, + 'uri': {'key': 'uri', 'type': 'str'}, + 'user_id': {'key': 'userId', 'type': 'str'}, + } + + def __init__(self, *, password: str=None, new_password: str=None, uri: str=None, user_id: str=None, **kwargs) -> None: + super(DataLakeAnalyticsCatalogCredentialUpdateParameters, self).__init__(**kwargs) + self.password = password + self.new_password = new_password + self.uri = uri + self.user_id = user_id diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_management_client_enums.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_management_client_enums.py index 29e9f3109f69..e202a80ec2f9 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_management_client_enums.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_management_client_enums.py @@ -12,7 +12,7 @@ from enum import Enum -class AclType(Enum): +class AclType(str, Enum): user_obj = "UserObj" group_obj = "GroupObj" @@ -21,7 +21,7 @@ class AclType(Enum): group = "Group" -class PermissionType(Enum): +class PermissionType(str, Enum): none = "None" use = "Use" @@ -32,7 +32,7 @@ class PermissionType(Enum): all = "All" -class FileType(Enum): +class FileType(str, Enum): assembly = "Assembly" resource = "Resource" diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_secret_create_or_update_parameters.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_secret_create_or_update_parameters.py index b99dc9d57f24..8a9cd7a8cbdb 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_secret_create_or_update_parameters.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_secret_create_or_update_parameters.py @@ -17,7 +17,9 @@ class DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters(Model): deprecated and will be removed in the next release. Please use DataLakeAnalyticsCatalogCredentialCreateOrUpdateParameters instead. - :param password: the password for the secret to pass in + All required parameters must be populated in order to send to Azure. + + :param password: Required. the password for the secret to pass in :type password: str :param uri: the URI identifier for the secret in the format : @@ -33,7 +35,7 @@ class DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters(Model): 'uri': {'key': 'uri', 'type': 'str'}, } - def __init__(self, password, uri=None): - super(DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters, self).__init__() - self.password = password - self.uri = uri + def __init__(self, **kwargs): + super(DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters, self).__init__(**kwargs) + self.password = kwargs.get('password', None) + self.uri = kwargs.get('uri', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_secret_create_or_update_parameters_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_secret_create_or_update_parameters_py3.py new file mode 100644 index 000000000000..95aa1aea4d92 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_secret_create_or_update_parameters_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters(Model): + """Data Lake Analytics catalog secret creation and update parameters. This is + deprecated and will be removed in the next release. Please use + DataLakeAnalyticsCatalogCredentialCreateOrUpdateParameters instead. + + All required parameters must be populated in order to send to Azure. + + :param password: Required. the password for the secret to pass in + :type password: str + :param uri: the URI identifier for the secret in the format + : + :type uri: str + """ + + _validation = { + 'password': {'required': True}, + } + + _attribute_map = { + 'password': {'key': 'password', 'type': 'str'}, + 'uri': {'key': 'uri', 'type': 'str'}, + } + + def __init__(self, *, password: str, uri: str=None, **kwargs) -> None: + super(DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters, self).__init__(**kwargs) + self.password = password + self.uri = uri diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/ddl_name.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/ddl_name.py index 12e0da950eac..f292532ecd32 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/ddl_name.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/ddl_name.py @@ -36,9 +36,9 @@ class DdlName(Model): 'server': {'key': 'server', 'type': 'str'}, } - def __init__(self, first_part=None, second_part=None, third_part=None, server=None): - super(DdlName, self).__init__() - self.first_part = first_part - self.second_part = second_part - self.third_part = third_part - self.server = server + def __init__(self, **kwargs): + super(DdlName, self).__init__(**kwargs) + self.first_part = kwargs.get('first_part', None) + self.second_part = kwargs.get('second_part', None) + self.third_part = kwargs.get('third_part', None) + self.server = kwargs.get('server', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/ddl_name_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/ddl_name_py3.py new file mode 100644 index 000000000000..cde8d3153014 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/ddl_name_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DdlName(Model): + """A Data Lake Analytics DDL name item. + + :param first_part: the name of the table associated with this database and + schema. + :type first_part: str + :param second_part: the name of the table associated with this database + and schema. + :type second_part: str + :param third_part: the name of the table associated with this database and + schema. + :type third_part: str + :param server: the name of the table associated with this database and + schema. + :type server: str + """ + + _attribute_map = { + 'first_part': {'key': 'firstPart', 'type': 'str'}, + 'second_part': {'key': 'secondPart', 'type': 'str'}, + 'third_part': {'key': 'thirdPart', 'type': 'str'}, + 'server': {'key': 'server', 'type': 'str'}, + } + + def __init__(self, *, first_part: str=None, second_part: str=None, third_part: str=None, server: str=None, **kwargs) -> None: + super(DdlName, self).__init__(**kwargs) + self.first_part = first_part + self.second_part = second_part + self.third_part = third_part + self.server = server diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/entity_id.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/entity_id.py index be36df614737..ead952acc662 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/entity_id.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/entity_id.py @@ -27,7 +27,7 @@ class EntityId(Model): 'version': {'key': 'version', 'type': 'str'}, } - def __init__(self, name=None, version=None): - super(EntityId, self).__init__() - self.name = name - self.version = version + def __init__(self, **kwargs): + super(EntityId, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.version = kwargs.get('version', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/entity_id_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/entity_id_py3.py new file mode 100644 index 000000000000..837237ccbed6 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/entity_id_py3.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class EntityId(Model): + """A Data Lake Analytics catalog entity identifier object. + + :param name: the name of the external table associated with this database, + schema and table. + :type name: ~azure.mgmt.datalake.analytics.catalog.models.DdlName + :param version: the version of the external data source. + :type version: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'DdlName'}, + 'version': {'key': 'version', 'type': 'str'}, + } + + def __init__(self, *, name=None, version: str=None, **kwargs) -> None: + super(EntityId, self).__init__(**kwargs) + self.name = name + self.version = version diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/external_table.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/external_table.py index a9defe163035..f320fe801e9c 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/external_table.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/external_table.py @@ -27,7 +27,7 @@ class ExternalTable(Model): 'data_source': {'key': 'dataSource', 'type': 'EntityId'}, } - def __init__(self, table_name=None, data_source=None): - super(ExternalTable, self).__init__() - self.table_name = table_name - self.data_source = data_source + def __init__(self, **kwargs): + super(ExternalTable, self).__init__(**kwargs) + self.table_name = kwargs.get('table_name', None) + self.data_source = kwargs.get('data_source', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/external_table_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/external_table_py3.py new file mode 100644 index 000000000000..c0ce15c2c40f --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/external_table_py3.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExternalTable(Model): + """A Data Lake Analytics catalog external table item. + + :param table_name: the name of the table associated with this database and + schema. + :type table_name: str + :param data_source: the data source associated with this external table. + :type data_source: ~azure.mgmt.datalake.analytics.catalog.models.EntityId + """ + + _attribute_map = { + 'table_name': {'key': 'tableName', 'type': 'str'}, + 'data_source': {'key': 'dataSource', 'type': 'EntityId'}, + } + + def __init__(self, *, table_name: str=None, data_source=None, **kwargs) -> None: + super(ExternalTable, self).__init__(**kwargs) + self.table_name = table_name + self.data_source = data_source diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/type_field_info.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/type_field_info.py index bfdaee960702..3f19e903f478 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/type_field_info.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/type_field_info.py @@ -26,7 +26,7 @@ class TypeFieldInfo(Model): 'type': {'key': 'type', 'type': 'str'}, } - def __init__(self, name=None, type=None): - super(TypeFieldInfo, self).__init__() - self.name = name - self.type = type + def __init__(self, **kwargs): + super(TypeFieldInfo, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.type = kwargs.get('type', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/type_field_info_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/type_field_info_py3.py new file mode 100644 index 000000000000..629d08f65304 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/type_field_info_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TypeFieldInfo(Model): + """A Data Lake Analytics catalog type field information item. + + :param name: the name of the field associated with this type. + :type name: str + :param type: the type of the field. + :type type: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, type: str=None, **kwargs) -> None: + super(TypeFieldInfo, self).__init__(**kwargs) + self.name = name + self.type = type diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly.py index 8c2f96ea3152..c3adfeab895c 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly.py @@ -51,12 +51,12 @@ class USqlAssembly(CatalogItem): 'dependencies': {'key': 'dependencies', 'type': '[USqlAssemblyDependencyInfo]'}, } - def __init__(self, compute_account_name=None, version=None, database_name=None, name=None, clr_name=None, is_visible=None, is_user_defined=None, files=None, dependencies=None): - super(USqlAssembly, self).__init__(compute_account_name=compute_account_name, version=version) - self.database_name = database_name - self.name = name - self.clr_name = clr_name - self.is_visible = is_visible - self.is_user_defined = is_user_defined - self.files = files - self.dependencies = dependencies + def __init__(self, **kwargs): + super(USqlAssembly, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.name = kwargs.get('name', None) + self.clr_name = kwargs.get('clr_name', None) + self.is_visible = kwargs.get('is_visible', None) + self.is_user_defined = kwargs.get('is_user_defined', None) + self.files = kwargs.get('files', None) + self.dependencies = kwargs.get('dependencies', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_clr.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_clr.py index ab4c6bb9567a..23412eb5f4e5 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_clr.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_clr.py @@ -35,8 +35,8 @@ class USqlAssemblyClr(CatalogItem): 'clr_name': {'key': 'clrName', 'type': 'str'}, } - def __init__(self, compute_account_name=None, version=None, database_name=None, name=None, clr_name=None): - super(USqlAssemblyClr, self).__init__(compute_account_name=compute_account_name, version=version) - self.database_name = database_name - self.name = name - self.clr_name = clr_name + def __init__(self, **kwargs): + super(USqlAssemblyClr, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.name = kwargs.get('name', None) + self.clr_name = kwargs.get('clr_name', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_clr_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_clr_py3.py new file mode 100644 index 000000000000..50579d120536 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_clr_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .catalog_item_py3 import CatalogItem + + +class USqlAssemblyClr(CatalogItem): + """A Data Lake Analytics catalog U-SQL assembly CLR item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param name: the name of the assembly. + :type name: str + :param clr_name: the name of the CLR. + :type clr_name: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'name': {'key': 'assemblyClrName', 'type': 'str'}, + 'clr_name': {'key': 'clrName', 'type': 'str'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, name: str=None, clr_name: str=None, **kwargs) -> None: + super(USqlAssemblyClr, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.name = name + self.clr_name = clr_name diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_dependency_info.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_dependency_info.py index 9e67a40ed60a..b9995d9f20d0 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_dependency_info.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_dependency_info.py @@ -23,6 +23,6 @@ class USqlAssemblyDependencyInfo(Model): 'entity_id': {'key': 'entityId', 'type': 'EntityId'}, } - def __init__(self, entity_id=None): - super(USqlAssemblyDependencyInfo, self).__init__() - self.entity_id = entity_id + def __init__(self, **kwargs): + super(USqlAssemblyDependencyInfo, self).__init__(**kwargs) + self.entity_id = kwargs.get('entity_id', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_dependency_info_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_dependency_info_py3.py new file mode 100644 index 000000000000..fa03a9d16955 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_dependency_info_py3.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class USqlAssemblyDependencyInfo(Model): + """A Data Lake Analytics catalog U-SQL dependency information item. + + :param entity_id: the EntityId of the dependency. + :type entity_id: ~azure.mgmt.datalake.analytics.catalog.models.EntityId + """ + + _attribute_map = { + 'entity_id': {'key': 'entityId', 'type': 'EntityId'}, + } + + def __init__(self, *, entity_id=None, **kwargs) -> None: + super(USqlAssemblyDependencyInfo, self).__init__(**kwargs) + self.entity_id = entity_id diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_file_info.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_file_info.py index 6b88668fadb7..19d3135999b3 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_file_info.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_file_info.py @@ -30,8 +30,8 @@ class USqlAssemblyFileInfo(Model): 'content_path': {'key': 'contentPath', 'type': 'str'}, } - def __init__(self, type=None, original_path=None, content_path=None): - super(USqlAssemblyFileInfo, self).__init__() - self.type = type - self.original_path = original_path - self.content_path = content_path + def __init__(self, **kwargs): + super(USqlAssemblyFileInfo, self).__init__(**kwargs) + self.type = kwargs.get('type', None) + self.original_path = kwargs.get('original_path', None) + self.content_path = kwargs.get('content_path', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_file_info_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_file_info_py3.py new file mode 100644 index 000000000000..3d6889c04be9 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_file_info_py3.py @@ -0,0 +1,37 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class USqlAssemblyFileInfo(Model): + """A Data Lake Analytics catalog U-SQL assembly file information item. + + :param type: the assembly file type. Possible values include: 'Assembly', + 'Resource', 'Nodeploy' + :type type: str or ~azure.mgmt.datalake.analytics.catalog.models.FileType + :param original_path: the the original path to the assembly file. + :type original_path: str + :param content_path: the the content path to the assembly file. + :type content_path: str + """ + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'original_path': {'key': 'originalPath', 'type': 'str'}, + 'content_path': {'key': 'contentPath', 'type': 'str'}, + } + + def __init__(self, *, type=None, original_path: str=None, content_path: str=None, **kwargs) -> None: + super(USqlAssemblyFileInfo, self).__init__(**kwargs) + self.type = type + self.original_path = original_path + self.content_path = content_path diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_py3.py new file mode 100644 index 000000000000..4ebf178e728b --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_py3.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .catalog_item_py3 import CatalogItem + + +class USqlAssembly(CatalogItem): + """A Data Lake Analytics catalog U-SQL Assembly. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param name: the name of the assembly. + :type name: str + :param clr_name: the name of the CLR. + :type clr_name: str + :param is_visible: the switch indicating if this assembly is visible or + not. + :type is_visible: bool + :param is_user_defined: the switch indicating if this assembly is user + defined or not. + :type is_user_defined: bool + :param files: the list of files associated with the assembly + :type files: + list[~azure.mgmt.datalake.analytics.catalog.models.USqlAssemblyFileInfo] + :param dependencies: the list of dependencies associated with the assembly + :type dependencies: + list[~azure.mgmt.datalake.analytics.catalog.models.USqlAssemblyDependencyInfo] + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'name': {'key': 'assemblyName', 'type': 'str'}, + 'clr_name': {'key': 'clrName', 'type': 'str'}, + 'is_visible': {'key': 'isVisible', 'type': 'bool'}, + 'is_user_defined': {'key': 'isUserDefined', 'type': 'bool'}, + 'files': {'key': 'files', 'type': '[USqlAssemblyFileInfo]'}, + 'dependencies': {'key': 'dependencies', 'type': '[USqlAssemblyDependencyInfo]'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, name: str=None, clr_name: str=None, is_visible: bool=None, is_user_defined: bool=None, files=None, dependencies=None, **kwargs) -> None: + super(USqlAssembly, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.name = name + self.clr_name = clr_name + self.is_visible = is_visible + self.is_user_defined = is_user_defined + self.files = files + self.dependencies = dependencies diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_credential.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_credential.py index 292d9c9d7abb..5596e8b8aa86 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_credential.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_credential.py @@ -29,6 +29,6 @@ class USqlCredential(CatalogItem): 'name': {'key': 'credentialName', 'type': 'str'}, } - def __init__(self, compute_account_name=None, version=None, name=None): - super(USqlCredential, self).__init__(compute_account_name=compute_account_name, version=version) - self.name = name + def __init__(self, **kwargs): + super(USqlCredential, self).__init__(**kwargs) + self.name = kwargs.get('name', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_credential_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_credential_py3.py new file mode 100644 index 000000000000..f1654bc03774 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_credential_py3.py @@ -0,0 +1,34 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .catalog_item_py3 import CatalogItem + + +class USqlCredential(CatalogItem): + """A Data Lake Analytics catalog U-SQL credential item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param name: the name of the credential. + :type name: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'name': {'key': 'credentialName', 'type': 'str'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, name: str=None, **kwargs) -> None: + super(USqlCredential, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.name = name diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_database.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_database.py index ff1657b30b38..93fedcb63ae6 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_database.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_database.py @@ -29,6 +29,6 @@ class USqlDatabase(CatalogItem): 'name': {'key': 'databaseName', 'type': 'str'}, } - def __init__(self, compute_account_name=None, version=None, name=None): - super(USqlDatabase, self).__init__(compute_account_name=compute_account_name, version=version) - self.name = name + def __init__(self, **kwargs): + super(USqlDatabase, self).__init__(**kwargs) + self.name = kwargs.get('name', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_database_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_database_py3.py new file mode 100644 index 000000000000..00cfffdf5ccd --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_database_py3.py @@ -0,0 +1,34 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .catalog_item_py3 import CatalogItem + + +class USqlDatabase(CatalogItem): + """A Data Lake Analytics catalog U-SQL database item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param name: the name of the database. + :type name: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'name': {'key': 'databaseName', 'type': 'str'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, name: str=None, **kwargs) -> None: + super(USqlDatabase, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.name = name diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_directed_column.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_directed_column.py index 9ac9ec6aa4b9..5d413c63b7d8 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_directed_column.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_directed_column.py @@ -27,7 +27,7 @@ class USqlDirectedColumn(Model): 'descending': {'key': 'descending', 'type': 'bool'}, } - def __init__(self, name=None, descending=None): - super(USqlDirectedColumn, self).__init__() - self.name = name - self.descending = descending + def __init__(self, **kwargs): + super(USqlDirectedColumn, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.descending = kwargs.get('descending', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_directed_column_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_directed_column_py3.py new file mode 100644 index 000000000000..c699a39c407b --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_directed_column_py3.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class USqlDirectedColumn(Model): + """A Data Lake Analytics catalog U-SQL directed column item. + + :param name: the name of the index in the table. + :type name: str + :param descending: the switch indicating if the index is descending or + not. + :type descending: bool + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'descending': {'key': 'descending', 'type': 'bool'}, + } + + def __init__(self, *, name: str=None, descending: bool=None, **kwargs) -> None: + super(USqlDirectedColumn, self).__init__(**kwargs) + self.name = name + self.descending = descending diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_distribution_info.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_distribution_info.py index 46c47230d143..c5f27b3ade7d 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_distribution_info.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_distribution_info.py @@ -34,9 +34,9 @@ class USqlDistributionInfo(Model): 'dynamic_count': {'key': 'dynamicCount', 'type': 'int'}, } - def __init__(self, type=None, keys=None, count=None, dynamic_count=None): - super(USqlDistributionInfo, self).__init__() - self.type = type - self.keys = keys - self.count = count - self.dynamic_count = dynamic_count + def __init__(self, **kwargs): + super(USqlDistributionInfo, self).__init__(**kwargs) + self.type = kwargs.get('type', None) + self.keys = kwargs.get('keys', None) + self.count = kwargs.get('count', None) + self.dynamic_count = kwargs.get('dynamic_count', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_distribution_info_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_distribution_info_py3.py new file mode 100644 index 000000000000..ba87c79220fa --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_distribution_info_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class USqlDistributionInfo(Model): + """A Data Lake Analytics catalog U-SQL distribution information object. + + :param type: the type of this distribution. + :type type: int + :param keys: the list of directed columns in the distribution + :type keys: + list[~azure.mgmt.datalake.analytics.catalog.models.USqlDirectedColumn] + :param count: the count of indices using this distribution. + :type count: int + :param dynamic_count: the dynamic count of indices using this + distribution. + :type dynamic_count: int + """ + + _attribute_map = { + 'type': {'key': 'type', 'type': 'int'}, + 'keys': {'key': 'keys', 'type': '[USqlDirectedColumn]'}, + 'count': {'key': 'count', 'type': 'int'}, + 'dynamic_count': {'key': 'dynamicCount', 'type': 'int'}, + } + + def __init__(self, *, type: int=None, keys=None, count: int=None, dynamic_count: int=None, **kwargs) -> None: + super(USqlDistributionInfo, self).__init__(**kwargs) + self.type = type + self.keys = keys + self.count = count + self.dynamic_count = dynamic_count diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_external_data_source.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_external_data_source.py index 524e08b35984..390b2359c172 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_external_data_source.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_external_data_source.py @@ -43,10 +43,10 @@ class USqlExternalDataSource(CatalogItem): 'pushdown_types': {'key': 'pushdownTypes', 'type': '[str]'}, } - def __init__(self, compute_account_name=None, version=None, database_name=None, name=None, provider=None, provider_string=None, pushdown_types=None): - super(USqlExternalDataSource, self).__init__(compute_account_name=compute_account_name, version=version) - self.database_name = database_name - self.name = name - self.provider = provider - self.provider_string = provider_string - self.pushdown_types = pushdown_types + def __init__(self, **kwargs): + super(USqlExternalDataSource, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.name = kwargs.get('name', None) + self.provider = kwargs.get('provider', None) + self.provider_string = kwargs.get('provider_string', None) + self.pushdown_types = kwargs.get('pushdown_types', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_external_data_source_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_external_data_source_py3.py new file mode 100644 index 000000000000..946ed8479b4d --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_external_data_source_py3.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .catalog_item_py3 import CatalogItem + + +class USqlExternalDataSource(CatalogItem): + """A Data Lake Analytics catalog U-SQL external datasource item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param name: the name of the external data source. + :type name: str + :param provider: the name of the provider for the external data source. + :type provider: str + :param provider_string: the name of the provider string for the external + data source. + :type provider_string: str + :param pushdown_types: the list of types to push down from the external + data source. + :type pushdown_types: list[str] + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'name': {'key': 'externalDataSourceName', 'type': 'str'}, + 'provider': {'key': 'provider', 'type': 'str'}, + 'provider_string': {'key': 'providerString', 'type': 'str'}, + 'pushdown_types': {'key': 'pushdownTypes', 'type': '[str]'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, name: str=None, provider: str=None, provider_string: str=None, pushdown_types=None, **kwargs) -> None: + super(USqlExternalDataSource, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.name = name + self.provider = provider + self.provider_string = provider_string + self.pushdown_types = pushdown_types diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_index.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_index.py index 0a7ea50f05f0..bdb47ae18d97 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_index.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_index.py @@ -54,15 +54,15 @@ class USqlIndex(Model): 'is_unique': {'key': 'isUnique', 'type': 'bool'}, } - def __init__(self, name=None, index_keys=None, columns=None, distribution_info=None, partition_function=None, partition_key_list=None, stream_names=None, is_columnstore=None, index_id=None, is_unique=None): - super(USqlIndex, self).__init__() - self.name = name - self.index_keys = index_keys - self.columns = columns - self.distribution_info = distribution_info - self.partition_function = partition_function - self.partition_key_list = partition_key_list - self.stream_names = stream_names - self.is_columnstore = is_columnstore - self.index_id = index_id - self.is_unique = is_unique + def __init__(self, **kwargs): + super(USqlIndex, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.index_keys = kwargs.get('index_keys', None) + self.columns = kwargs.get('columns', None) + self.distribution_info = kwargs.get('distribution_info', None) + self.partition_function = kwargs.get('partition_function', None) + self.partition_key_list = kwargs.get('partition_key_list', None) + self.stream_names = kwargs.get('stream_names', None) + self.is_columnstore = kwargs.get('is_columnstore', None) + self.index_id = kwargs.get('index_id', None) + self.is_unique = kwargs.get('is_unique', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_index_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_index_py3.py new file mode 100644 index 000000000000..899591442bb1 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_index_py3.py @@ -0,0 +1,68 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class USqlIndex(Model): + """A Data Lake Analytics catalog U-SQL table index item. + + :param name: the name of the index in the table. + :type name: str + :param index_keys: the list of directed columns in the index + :type index_keys: + list[~azure.mgmt.datalake.analytics.catalog.models.USqlDirectedColumn] + :param columns: the list of columns in the index + :type columns: list[str] + :param distribution_info: the distributions info of the index + :type distribution_info: + ~azure.mgmt.datalake.analytics.catalog.models.USqlDistributionInfo + :param partition_function: partition function ID for the index. + :type partition_function: str + :param partition_key_list: the list of partion keys in the index + :type partition_key_list: list[str] + :param stream_names: the list of full paths to the streams that contain + this index in the DataLake account. + :type stream_names: list[str] + :param is_columnstore: the switch indicating if this index is a + columnstore index. + :type is_columnstore: bool + :param index_id: the ID of this index within the table. + :type index_id: int + :param is_unique: the switch indicating if this index is a unique index. + :type is_unique: bool + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'index_keys': {'key': 'indexKeys', 'type': '[USqlDirectedColumn]'}, + 'columns': {'key': 'columns', 'type': '[str]'}, + 'distribution_info': {'key': 'distributionInfo', 'type': 'USqlDistributionInfo'}, + 'partition_function': {'key': 'partitionFunction', 'type': 'str'}, + 'partition_key_list': {'key': 'partitionKeyList', 'type': '[str]'}, + 'stream_names': {'key': 'streamNames', 'type': '[str]'}, + 'is_columnstore': {'key': 'isColumnstore', 'type': 'bool'}, + 'index_id': {'key': 'indexId', 'type': 'int'}, + 'is_unique': {'key': 'isUnique', 'type': 'bool'}, + } + + def __init__(self, *, name: str=None, index_keys=None, columns=None, distribution_info=None, partition_function: str=None, partition_key_list=None, stream_names=None, is_columnstore: bool=None, index_id: int=None, is_unique: bool=None, **kwargs) -> None: + super(USqlIndex, self).__init__(**kwargs) + self.name = name + self.index_keys = index_keys + self.columns = columns + self.distribution_info = distribution_info + self.partition_function = partition_function + self.partition_key_list = partition_key_list + self.stream_names = stream_names + self.is_columnstore = is_columnstore + self.index_id = index_id + self.is_unique = is_unique diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_package.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_package.py index bcb62ae92f13..bbb6ed25858e 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_package.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_package.py @@ -39,9 +39,9 @@ class USqlPackage(CatalogItem): 'definition': {'key': 'definition', 'type': 'str'}, } - def __init__(self, compute_account_name=None, version=None, database_name=None, schema_name=None, name=None, definition=None): - super(USqlPackage, self).__init__(compute_account_name=compute_account_name, version=version) - self.database_name = database_name - self.schema_name = schema_name - self.name = name - self.definition = definition + def __init__(self, **kwargs): + super(USqlPackage, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.schema_name = kwargs.get('schema_name', None) + self.name = kwargs.get('name', None) + self.definition = kwargs.get('definition', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_package_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_package_py3.py new file mode 100644 index 000000000000..e1f79d784766 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_package_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .catalog_item_py3 import CatalogItem + + +class USqlPackage(CatalogItem): + """A Data Lake Analytics catalog U-SQL package item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database containing the package. + :type database_name: str + :param schema_name: the name of the schema associated with this package + and database. + :type schema_name: str + :param name: the name of the package. + :type name: str + :param definition: the definition of the package. + :type definition: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'packageName', 'type': 'str'}, + 'definition': {'key': 'definition', 'type': 'str'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, definition: str=None, **kwargs) -> None: + super(USqlPackage, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.schema_name = schema_name + self.name = name + self.definition = definition diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_procedure.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_procedure.py index 94123ace034e..92874b2d9bda 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_procedure.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_procedure.py @@ -39,9 +39,9 @@ class USqlProcedure(CatalogItem): 'definition': {'key': 'definition', 'type': 'str'}, } - def __init__(self, compute_account_name=None, version=None, database_name=None, schema_name=None, name=None, definition=None): - super(USqlProcedure, self).__init__(compute_account_name=compute_account_name, version=version) - self.database_name = database_name - self.schema_name = schema_name - self.name = name - self.definition = definition + def __init__(self, **kwargs): + super(USqlProcedure, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.schema_name = kwargs.get('schema_name', None) + self.name = kwargs.get('name', None) + self.definition = kwargs.get('definition', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_procedure_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_procedure_py3.py new file mode 100644 index 000000000000..98f9a30e6267 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_procedure_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .catalog_item_py3 import CatalogItem + + +class USqlProcedure(CatalogItem): + """A Data Lake Analytics catalog U-SQL procedure item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this procedure + and database. + :type schema_name: str + :param name: the name of the procedure. + :type name: str + :param definition: the defined query of the procedure. + :type definition: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'procName', 'type': 'str'}, + 'definition': {'key': 'definition', 'type': 'str'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, definition: str=None, **kwargs) -> None: + super(USqlProcedure, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.schema_name = schema_name + self.name = name + self.definition = definition diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_schema.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_schema.py index a9a2d9f40b52..1a292ca673f6 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_schema.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_schema.py @@ -32,7 +32,7 @@ class USqlSchema(CatalogItem): 'name': {'key': 'schemaName', 'type': 'str'}, } - def __init__(self, compute_account_name=None, version=None, database_name=None, name=None): - super(USqlSchema, self).__init__(compute_account_name=compute_account_name, version=version) - self.database_name = database_name - self.name = name + def __init__(self, **kwargs): + super(USqlSchema, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.name = kwargs.get('name', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_schema_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_schema_py3.py new file mode 100644 index 000000000000..bb7f8d3de46a --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_schema_py3.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .catalog_item_py3 import CatalogItem + + +class USqlSchema(CatalogItem): + """A Data Lake Analytics catalog U-SQL schema item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param name: the name of the schema. + :type name: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'name': {'key': 'schemaName', 'type': 'str'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, name: str=None, **kwargs) -> None: + super(USqlSchema, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.name = name diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_secret.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_secret.py index 17113d2b7b73..5054e0ecaa44 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_secret.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_secret.py @@ -43,10 +43,10 @@ class USqlSecret(CatalogItem): 'password': {'key': 'password', 'type': 'str'}, } - def __init__(self, compute_account_name=None, version=None, database_name=None, name=None, creation_time=None, uri=None, password=None): - super(USqlSecret, self).__init__(compute_account_name=compute_account_name, version=version) - self.database_name = database_name - self.name = name - self.creation_time = creation_time - self.uri = uri - self.password = password + def __init__(self, **kwargs): + super(USqlSecret, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.name = kwargs.get('name', None) + self.creation_time = kwargs.get('creation_time', None) + self.uri = kwargs.get('uri', None) + self.password = kwargs.get('password', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_secret_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_secret_py3.py new file mode 100644 index 000000000000..a493f7576c8a --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_secret_py3.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .catalog_item_py3 import CatalogItem + + +class USqlSecret(CatalogItem): + """A Data Lake Analytics catalog U-SQL secret item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param name: the name of the secret. + :type name: str + :param creation_time: the creation time of the credential object. This is + the only information returned about a secret from a GET. + :type creation_time: datetime + :param uri: the URI identifier for the secret in the format + : + :type uri: str + :param password: the password for the secret to pass in + :type password: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'name': {'key': 'secretName', 'type': 'str'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'uri': {'key': 'uri', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, name: str=None, creation_time=None, uri: str=None, password: str=None, **kwargs) -> None: + super(USqlSecret, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.name = name + self.creation_time = creation_time + self.uri = uri + self.password = password diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table.py index 93abe1c000ca..4e9bc0830f05 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table.py @@ -55,13 +55,13 @@ class USqlTable(CatalogItem): 'distribution_info': {'key': 'distributionInfo', 'type': 'USqlDistributionInfo'}, } - def __init__(self, compute_account_name=None, version=None, database_name=None, schema_name=None, name=None, column_list=None, index_list=None, partition_key_list=None, external_table=None, distribution_info=None): - super(USqlTable, self).__init__(compute_account_name=compute_account_name, version=version) - self.database_name = database_name - self.schema_name = schema_name - self.name = name - self.column_list = column_list - self.index_list = index_list - self.partition_key_list = partition_key_list - self.external_table = external_table - self.distribution_info = distribution_info + def __init__(self, **kwargs): + super(USqlTable, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.schema_name = kwargs.get('schema_name', None) + self.name = kwargs.get('name', None) + self.column_list = kwargs.get('column_list', None) + self.index_list = kwargs.get('index_list', None) + self.partition_key_list = kwargs.get('partition_key_list', None) + self.external_table = kwargs.get('external_table', None) + self.distribution_info = kwargs.get('distribution_info', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_column.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_column.py index 07807ab36444..b30cdc017ce6 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_column.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_column.py @@ -27,7 +27,7 @@ class USqlTableColumn(Model): 'type': {'key': 'type', 'type': 'str'}, } - def __init__(self, name=None, type=None): - super(USqlTableColumn, self).__init__() - self.name = name - self.type = type + def __init__(self, **kwargs): + super(USqlTableColumn, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.type = kwargs.get('type', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_column_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_column_py3.py new file mode 100644 index 000000000000..e1bd9931852e --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_column_py3.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class USqlTableColumn(Model): + """A Data Lake Analytics catalog U-SQL table column item. + + :param name: the name of the column in the table. + :type name: str + :param type: the object type of the specified column (such as + System.String). + :type type: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, type: str=None, **kwargs) -> None: + super(USqlTableColumn, self).__init__(**kwargs) + self.name = name + self.type = type diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_fragment.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_fragment.py new file mode 100644 index 000000000000..565a1246737f --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_fragment.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class USqlTableFragment(Model): + """A Data Lake Analytics catalog U-SQL table fragment item. + + :param parent_id: the parent object Id of the table fragment. The parent + could be a table or table partition. + :type parent_id: str + :param fragment_id: the version of the catalog item. + :type fragment_id: str + :param index_id: the ordinal of the index which contains the table + fragment. + :type index_id: int + :param size: the data size of the table fragment in bytes. + :type size: long + :param row_count: the number of rows in the table fragment. + :type row_count: long + :param create_date: the creation time of the table fragment. + :type create_date: datetime + """ + + _attribute_map = { + 'parent_id': {'key': 'parentId', 'type': 'str'}, + 'fragment_id': {'key': 'fragmentId', 'type': 'str'}, + 'index_id': {'key': 'indexId', 'type': 'int'}, + 'size': {'key': 'size', 'type': 'long'}, + 'row_count': {'key': 'rowCount', 'type': 'long'}, + 'create_date': {'key': 'createDate', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(USqlTableFragment, self).__init__(**kwargs) + self.parent_id = kwargs.get('parent_id', None) + self.fragment_id = kwargs.get('fragment_id', None) + self.index_id = kwargs.get('index_id', None) + self.size = kwargs.get('size', None) + self.row_count = kwargs.get('row_count', None) + self.create_date = kwargs.get('create_date', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_fragment_paged.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_fragment_paged.py new file mode 100644 index 000000000000..f0b3e7c68464 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_fragment_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class USqlTableFragmentPaged(Paged): + """ + A paging container for iterating over a list of :class:`USqlTableFragment ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[USqlTableFragment]'} + } + + def __init__(self, *args, **kwargs): + + super(USqlTableFragmentPaged, self).__init__(*args, **kwargs) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_fragment_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_fragment_py3.py new file mode 100644 index 000000000000..f211e39638a4 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_fragment_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class USqlTableFragment(Model): + """A Data Lake Analytics catalog U-SQL table fragment item. + + :param parent_id: the parent object Id of the table fragment. The parent + could be a table or table partition. + :type parent_id: str + :param fragment_id: the version of the catalog item. + :type fragment_id: str + :param index_id: the ordinal of the index which contains the table + fragment. + :type index_id: int + :param size: the data size of the table fragment in bytes. + :type size: long + :param row_count: the number of rows in the table fragment. + :type row_count: long + :param create_date: the creation time of the table fragment. + :type create_date: datetime + """ + + _attribute_map = { + 'parent_id': {'key': 'parentId', 'type': 'str'}, + 'fragment_id': {'key': 'fragmentId', 'type': 'str'}, + 'index_id': {'key': 'indexId', 'type': 'int'}, + 'size': {'key': 'size', 'type': 'long'}, + 'row_count': {'key': 'rowCount', 'type': 'long'}, + 'create_date': {'key': 'createDate', 'type': 'iso-8601'}, + } + + def __init__(self, *, parent_id: str=None, fragment_id: str=None, index_id: int=None, size: int=None, row_count: int=None, create_date=None, **kwargs) -> None: + super(USqlTableFragment, self).__init__(**kwargs) + self.parent_id = parent_id + self.fragment_id = fragment_id + self.index_id = index_id + self.size = size + self.row_count = row_count + self.create_date = create_date diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_partition.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_partition.py index e5dad9babe90..cc2f0acd03ef 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_partition.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_partition.py @@ -48,12 +48,12 @@ class USqlTablePartition(CatalogItem): 'create_date': {'key': 'createDate', 'type': 'iso-8601'}, } - def __init__(self, compute_account_name=None, version=None, database_name=None, schema_name=None, name=None, parent_name=None, index_id=None, label=None, create_date=None): - super(USqlTablePartition, self).__init__(compute_account_name=compute_account_name, version=version) - self.database_name = database_name - self.schema_name = schema_name - self.name = name - self.parent_name = parent_name - self.index_id = index_id - self.label = label - self.create_date = create_date + def __init__(self, **kwargs): + super(USqlTablePartition, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.schema_name = kwargs.get('schema_name', None) + self.name = kwargs.get('name', None) + self.parent_name = kwargs.get('parent_name', None) + self.index_id = kwargs.get('index_id', None) + self.label = kwargs.get('label', None) + self.create_date = kwargs.get('create_date', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_partition_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_partition_py3.py new file mode 100644 index 000000000000..3fd74aaf5613 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_partition_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .catalog_item_py3 import CatalogItem + + +class USqlTablePartition(CatalogItem): + """A Data Lake Analytics catalog U-SQL table partition item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this table + partition and database. + :type schema_name: str + :param name: the name of the table partition. + :type name: str + :param parent_name: the Ddl object of the partition's parent. + :type parent_name: ~azure.mgmt.datalake.analytics.catalog.models.DdlName + :param index_id: the index ID for this partition. + :type index_id: int + :param label: the list of labels associated with this partition. + :type label: list[str] + :param create_date: the creation time of the partition + :type create_date: datetime + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'partitionName', 'type': 'str'}, + 'parent_name': {'key': 'parentName', 'type': 'DdlName'}, + 'index_id': {'key': 'indexId', 'type': 'int'}, + 'label': {'key': 'label', 'type': '[str]'}, + 'create_date': {'key': 'createDate', 'type': 'iso-8601'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, parent_name=None, index_id: int=None, label=None, create_date=None, **kwargs) -> None: + super(USqlTablePartition, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.schema_name = schema_name + self.name = name + self.parent_name = parent_name + self.index_id = index_id + self.label = label + self.create_date = create_date diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_preview.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_preview.py new file mode 100644 index 000000000000..9962446d2e32 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_preview.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class USqlTablePreview(Model): + """A Data Lake Analytics catalog table or partition preview rows item. + + :param total_row_count: the total number of rows in the table or + partition. + :type total_row_count: long + :param total_column_count: the total number of columns in the table or + partition. + :type total_column_count: long + :param rows: the rows of the table or partition preview, where each row is + an array of string representations the row's values. Note: Byte arrays + will appear as base-64 encoded values, SqlMap and SqlArray objects will + appear as escaped JSON objects, and DateTime objects will appear as ISO + formatted UTC date-times. + :type rows: list[list[str]] + :param truncated: true if the amount of data in the response is less than + expected due to the preview operation's size limitations. This can occur + if the requested rows or row counts are too large. + :type truncated: bool + :param schema: the schema of the table or partition. + :type schema: + list[~azure.mgmt.datalake.analytics.catalog.models.USqlTableColumn] + """ + + _attribute_map = { + 'total_row_count': {'key': 'totalRowCount', 'type': 'long'}, + 'total_column_count': {'key': 'totalColumnCount', 'type': 'long'}, + 'rows': {'key': 'rows', 'type': '[[str]]'}, + 'truncated': {'key': 'truncated', 'type': 'bool'}, + 'schema': {'key': 'schema', 'type': '[USqlTableColumn]'}, + } + + def __init__(self, **kwargs): + super(USqlTablePreview, self).__init__(**kwargs) + self.total_row_count = kwargs.get('total_row_count', None) + self.total_column_count = kwargs.get('total_column_count', None) + self.rows = kwargs.get('rows', None) + self.truncated = kwargs.get('truncated', None) + self.schema = kwargs.get('schema', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_preview_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_preview_py3.py new file mode 100644 index 000000000000..972044d66119 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_preview_py3.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class USqlTablePreview(Model): + """A Data Lake Analytics catalog table or partition preview rows item. + + :param total_row_count: the total number of rows in the table or + partition. + :type total_row_count: long + :param total_column_count: the total number of columns in the table or + partition. + :type total_column_count: long + :param rows: the rows of the table or partition preview, where each row is + an array of string representations the row's values. Note: Byte arrays + will appear as base-64 encoded values, SqlMap and SqlArray objects will + appear as escaped JSON objects, and DateTime objects will appear as ISO + formatted UTC date-times. + :type rows: list[list[str]] + :param truncated: true if the amount of data in the response is less than + expected due to the preview operation's size limitations. This can occur + if the requested rows or row counts are too large. + :type truncated: bool + :param schema: the schema of the table or partition. + :type schema: + list[~azure.mgmt.datalake.analytics.catalog.models.USqlTableColumn] + """ + + _attribute_map = { + 'total_row_count': {'key': 'totalRowCount', 'type': 'long'}, + 'total_column_count': {'key': 'totalColumnCount', 'type': 'long'}, + 'rows': {'key': 'rows', 'type': '[[str]]'}, + 'truncated': {'key': 'truncated', 'type': 'bool'}, + 'schema': {'key': 'schema', 'type': '[USqlTableColumn]'}, + } + + def __init__(self, *, total_row_count: int=None, total_column_count: int=None, rows=None, truncated: bool=None, schema=None, **kwargs) -> None: + super(USqlTablePreview, self).__init__(**kwargs) + self.total_row_count = total_row_count + self.total_column_count = total_column_count + self.rows = rows + self.truncated = truncated + self.schema = schema diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_py3.py new file mode 100644 index 000000000000..9bab0fafbd46 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_py3.py @@ -0,0 +1,67 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .catalog_item_py3 import CatalogItem + + +class USqlTable(CatalogItem): + """A Data Lake Analytics catalog U-SQL table item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this table and + database. + :type schema_name: str + :param name: the name of the table. + :type name: str + :param column_list: the list of columns in this table + :type column_list: + list[~azure.mgmt.datalake.analytics.catalog.models.USqlTableColumn] + :param index_list: the list of indices in this table + :type index_list: + list[~azure.mgmt.datalake.analytics.catalog.models.USqlIndex] + :param partition_key_list: the list of partition keys in the table + :type partition_key_list: list[str] + :param external_table: the external table associated with the table. + :type external_table: + ~azure.mgmt.datalake.analytics.catalog.models.ExternalTable + :param distribution_info: the distributions info of the table + :type distribution_info: + ~azure.mgmt.datalake.analytics.catalog.models.USqlDistributionInfo + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'tableName', 'type': 'str'}, + 'column_list': {'key': 'columnList', 'type': '[USqlTableColumn]'}, + 'index_list': {'key': 'indexList', 'type': '[USqlIndex]'}, + 'partition_key_list': {'key': 'partitionKeyList', 'type': '[str]'}, + 'external_table': {'key': 'externalTable', 'type': 'ExternalTable'}, + 'distribution_info': {'key': 'distributionInfo', 'type': 'USqlDistributionInfo'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, column_list=None, index_list=None, partition_key_list=None, external_table=None, distribution_info=None, **kwargs) -> None: + super(USqlTable, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.schema_name = schema_name + self.name = name + self.column_list = column_list + self.index_list = index_list + self.partition_key_list = partition_key_list + self.external_table = external_table + self.distribution_info = distribution_info diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_statistics.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_statistics.py index 4846d5f3c994..7d01781764ed 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_statistics.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_statistics.py @@ -70,18 +70,18 @@ class USqlTableStatistics(CatalogItem): 'col_names': {'key': 'colNames', 'type': '[str]'}, } - def __init__(self, compute_account_name=None, version=None, database_name=None, schema_name=None, table_name=None, name=None, user_stat_name=None, stat_data_path=None, create_time=None, update_time=None, is_user_created=None, is_auto_created=None, has_filter=None, filter_definition=None, col_names=None): - super(USqlTableStatistics, self).__init__(compute_account_name=compute_account_name, version=version) - self.database_name = database_name - self.schema_name = schema_name - self.table_name = table_name - self.name = name - self.user_stat_name = user_stat_name - self.stat_data_path = stat_data_path - self.create_time = create_time - self.update_time = update_time - self.is_user_created = is_user_created - self.is_auto_created = is_auto_created - self.has_filter = has_filter - self.filter_definition = filter_definition - self.col_names = col_names + def __init__(self, **kwargs): + super(USqlTableStatistics, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.schema_name = kwargs.get('schema_name', None) + self.table_name = kwargs.get('table_name', None) + self.name = kwargs.get('name', None) + self.user_stat_name = kwargs.get('user_stat_name', None) + self.stat_data_path = kwargs.get('stat_data_path', None) + self.create_time = kwargs.get('create_time', None) + self.update_time = kwargs.get('update_time', None) + self.is_user_created = kwargs.get('is_user_created', None) + self.is_auto_created = kwargs.get('is_auto_created', None) + self.has_filter = kwargs.get('has_filter', None) + self.filter_definition = kwargs.get('filter_definition', None) + self.col_names = kwargs.get('col_names', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_statistics_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_statistics_py3.py new file mode 100644 index 000000000000..d60a6e311279 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_statistics_py3.py @@ -0,0 +1,87 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .catalog_item_py3 import CatalogItem + + +class USqlTableStatistics(CatalogItem): + """A Data Lake Analytics catalog U-SQL table statistics item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this table and + database. + :type schema_name: str + :param table_name: the name of the table. + :type table_name: str + :param name: the name of the table statistics. + :type name: str + :param user_stat_name: the name of the user statistics. + :type user_stat_name: str + :param stat_data_path: the path to the statistics data. + :type stat_data_path: str + :param create_time: the creation time of the statistics. + :type create_time: datetime + :param update_time: the last time the statistics were updated. + :type update_time: datetime + :param is_user_created: the switch indicating if these statistics are user + created. + :type is_user_created: bool + :param is_auto_created: the switch indicating if these statistics are + automatically created. + :type is_auto_created: bool + :param has_filter: the switch indicating if these statistics have a + filter. + :type has_filter: bool + :param filter_definition: the filter definition for the statistics. + :type filter_definition: str + :param col_names: the list of column names associated with these + statistics. + :type col_names: list[str] + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'table_name': {'key': 'tableName', 'type': 'str'}, + 'name': {'key': 'statisticsName', 'type': 'str'}, + 'user_stat_name': {'key': 'userStatName', 'type': 'str'}, + 'stat_data_path': {'key': 'statDataPath', 'type': 'str'}, + 'create_time': {'key': 'createTime', 'type': 'iso-8601'}, + 'update_time': {'key': 'updateTime', 'type': 'iso-8601'}, + 'is_user_created': {'key': 'isUserCreated', 'type': 'bool'}, + 'is_auto_created': {'key': 'isAutoCreated', 'type': 'bool'}, + 'has_filter': {'key': 'hasFilter', 'type': 'bool'}, + 'filter_definition': {'key': 'filterDefinition', 'type': 'str'}, + 'col_names': {'key': 'colNames', 'type': '[str]'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, table_name: str=None, name: str=None, user_stat_name: str=None, stat_data_path: str=None, create_time=None, update_time=None, is_user_created: bool=None, is_auto_created: bool=None, has_filter: bool=None, filter_definition: str=None, col_names=None, **kwargs) -> None: + super(USqlTableStatistics, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.schema_name = schema_name + self.table_name = table_name + self.name = name + self.user_stat_name = user_stat_name + self.stat_data_path = stat_data_path + self.create_time = create_time + self.update_time = update_time + self.is_user_created = is_user_created + self.is_auto_created = is_auto_created + self.has_filter = has_filter + self.filter_definition = filter_definition + self.col_names = col_names diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_type.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_type.py index 588c4b1bbe2b..19a1906ca282 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_type.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_type.py @@ -87,6 +87,6 @@ class USqlTableType(USqlType): 'columns': {'key': 'columns', 'type': '[TypeFieldInfo]'}, } - def __init__(self, compute_account_name=None, version=None, database_name=None, schema_name=None, name=None, type_family=None, c_sharp_name=None, full_csharp_name=None, system_type_id=None, user_type_id=None, schema_id=None, principal_id=None, is_nullable=None, is_user_defined=None, is_assembly_type=None, is_table_type=None, is_complex_type=None): - super(USqlTableType, self).__init__(compute_account_name=compute_account_name, version=version, database_name=database_name, schema_name=schema_name, name=name, type_family=type_family, c_sharp_name=c_sharp_name, full_csharp_name=full_csharp_name, system_type_id=system_type_id, user_type_id=user_type_id, schema_id=schema_id, principal_id=principal_id, is_nullable=is_nullable, is_user_defined=is_user_defined, is_assembly_type=is_assembly_type, is_table_type=is_table_type, is_complex_type=is_complex_type) + def __init__(self, **kwargs): + super(USqlTableType, self).__init__(**kwargs) self.columns = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_type_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_type_py3.py new file mode 100644 index 000000000000..a44f43f366a9 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_type_py3.py @@ -0,0 +1,92 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .usql_type_py3 import USqlType + + +class USqlTableType(USqlType): + """A Data Lake Analytics catalog U-SQL table type item. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this table and + database. + :type schema_name: str + :param name: the name of type for this type. + :type name: str + :param type_family: the type family for this type. + :type type_family: str + :param c_sharp_name: the C# name for this type. + :type c_sharp_name: str + :param full_csharp_name: the fully qualified C# name for this type. + :type full_csharp_name: str + :param system_type_id: the system type ID for this type. + :type system_type_id: int + :param user_type_id: the user type ID for this type. + :type user_type_id: int + :param schema_id: the schema ID for this type. + :type schema_id: int + :param principal_id: the principal ID for this type. + :type principal_id: int + :param is_nullable: the the switch indicating if this type is nullable. + :type is_nullable: bool + :param is_user_defined: the the switch indicating if this type is user + defined. + :type is_user_defined: bool + :param is_assembly_type: the the switch indicating if this type is an + assembly type. + :type is_assembly_type: bool + :param is_table_type: the the switch indicating if this type is a table + type. + :type is_table_type: bool + :param is_complex_type: the the switch indicating if this type is a + complex type. + :type is_complex_type: bool + :ivar columns: the type field information associated with this table type. + :vartype columns: + list[~azure.mgmt.datalake.analytics.catalog.models.TypeFieldInfo] + """ + + _validation = { + 'columns': {'readonly': True}, + } + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'typeName', 'type': 'str'}, + 'type_family': {'key': 'typeFamily', 'type': 'str'}, + 'c_sharp_name': {'key': 'cSharpName', 'type': 'str'}, + 'full_csharp_name': {'key': 'fullCSharpName', 'type': 'str'}, + 'system_type_id': {'key': 'systemTypeId', 'type': 'int'}, + 'user_type_id': {'key': 'userTypeId', 'type': 'int'}, + 'schema_id': {'key': 'schemaId', 'type': 'int'}, + 'principal_id': {'key': 'principalId', 'type': 'int'}, + 'is_nullable': {'key': 'isNullable', 'type': 'bool'}, + 'is_user_defined': {'key': 'isUserDefined', 'type': 'bool'}, + 'is_assembly_type': {'key': 'isAssemblyType', 'type': 'bool'}, + 'is_table_type': {'key': 'isTableType', 'type': 'bool'}, + 'is_complex_type': {'key': 'isComplexType', 'type': 'bool'}, + 'columns': {'key': 'columns', 'type': '[TypeFieldInfo]'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, type_family: str=None, c_sharp_name: str=None, full_csharp_name: str=None, system_type_id: int=None, user_type_id: int=None, schema_id: int=None, principal_id: int=None, is_nullable: bool=None, is_user_defined: bool=None, is_assembly_type: bool=None, is_table_type: bool=None, is_complex_type: bool=None, **kwargs) -> None: + super(USqlTableType, self).__init__(compute_account_name=compute_account_name, version=version, database_name=database_name, schema_name=schema_name, name=name, type_family=type_family, c_sharp_name=c_sharp_name, full_csharp_name=full_csharp_name, system_type_id=system_type_id, user_type_id=user_type_id, schema_id=schema_id, principal_id=principal_id, is_nullable=is_nullable, is_user_defined=is_user_defined, is_assembly_type=is_assembly_type, is_table_type=is_table_type, is_complex_type=is_complex_type, **kwargs) + self.columns = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_valued_function.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_valued_function.py index 70d2c94a50b0..0c2d0ac4242f 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_valued_function.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_valued_function.py @@ -38,9 +38,9 @@ class USqlTableValuedFunction(CatalogItem): 'definition': {'key': 'definition', 'type': 'str'}, } - def __init__(self, compute_account_name=None, version=None, database_name=None, schema_name=None, name=None, definition=None): - super(USqlTableValuedFunction, self).__init__(compute_account_name=compute_account_name, version=version) - self.database_name = database_name - self.schema_name = schema_name - self.name = name - self.definition = definition + def __init__(self, **kwargs): + super(USqlTableValuedFunction, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.schema_name = kwargs.get('schema_name', None) + self.name = kwargs.get('name', None) + self.definition = kwargs.get('definition', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_valued_function_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_valued_function_py3.py new file mode 100644 index 000000000000..833f96d8d557 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_valued_function_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .catalog_item_py3 import CatalogItem + + +class USqlTableValuedFunction(CatalogItem): + """A Data Lake Analytics catalog U-SQL table valued function item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this database. + :type schema_name: str + :param name: the name of the table valued function. + :type name: str + :param definition: the definition of the table valued function. + :type definition: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'tvfName', 'type': 'str'}, + 'definition': {'key': 'definition', 'type': 'str'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, definition: str=None, **kwargs) -> None: + super(USqlTableValuedFunction, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.schema_name = schema_name + self.name = name + self.definition = definition diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_type.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_type.py index 680abeb54473..e377666a65f3 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_type.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_type.py @@ -76,20 +76,20 @@ class USqlType(CatalogItem): 'is_complex_type': {'key': 'isComplexType', 'type': 'bool'}, } - def __init__(self, compute_account_name=None, version=None, database_name=None, schema_name=None, name=None, type_family=None, c_sharp_name=None, full_csharp_name=None, system_type_id=None, user_type_id=None, schema_id=None, principal_id=None, is_nullable=None, is_user_defined=None, is_assembly_type=None, is_table_type=None, is_complex_type=None): - super(USqlType, self).__init__(compute_account_name=compute_account_name, version=version) - self.database_name = database_name - self.schema_name = schema_name - self.name = name - self.type_family = type_family - self.c_sharp_name = c_sharp_name - self.full_csharp_name = full_csharp_name - self.system_type_id = system_type_id - self.user_type_id = user_type_id - self.schema_id = schema_id - self.principal_id = principal_id - self.is_nullable = is_nullable - self.is_user_defined = is_user_defined - self.is_assembly_type = is_assembly_type - self.is_table_type = is_table_type - self.is_complex_type = is_complex_type + def __init__(self, **kwargs): + super(USqlType, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.schema_name = kwargs.get('schema_name', None) + self.name = kwargs.get('name', None) + self.type_family = kwargs.get('type_family', None) + self.c_sharp_name = kwargs.get('c_sharp_name', None) + self.full_csharp_name = kwargs.get('full_csharp_name', None) + self.system_type_id = kwargs.get('system_type_id', None) + self.user_type_id = kwargs.get('user_type_id', None) + self.schema_id = kwargs.get('schema_id', None) + self.principal_id = kwargs.get('principal_id', None) + self.is_nullable = kwargs.get('is_nullable', None) + self.is_user_defined = kwargs.get('is_user_defined', None) + self.is_assembly_type = kwargs.get('is_assembly_type', None) + self.is_table_type = kwargs.get('is_table_type', None) + self.is_complex_type = kwargs.get('is_complex_type', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_type_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_type_py3.py new file mode 100644 index 000000000000..c4c5f0bb306d --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_type_py3.py @@ -0,0 +1,95 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .catalog_item_py3 import CatalogItem + + +class USqlType(CatalogItem): + """A Data Lake Analytics catalog U-SQL type item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this table and + database. + :type schema_name: str + :param name: the name of type for this type. + :type name: str + :param type_family: the type family for this type. + :type type_family: str + :param c_sharp_name: the C# name for this type. + :type c_sharp_name: str + :param full_csharp_name: the fully qualified C# name for this type. + :type full_csharp_name: str + :param system_type_id: the system type ID for this type. + :type system_type_id: int + :param user_type_id: the user type ID for this type. + :type user_type_id: int + :param schema_id: the schema ID for this type. + :type schema_id: int + :param principal_id: the principal ID for this type. + :type principal_id: int + :param is_nullable: the the switch indicating if this type is nullable. + :type is_nullable: bool + :param is_user_defined: the the switch indicating if this type is user + defined. + :type is_user_defined: bool + :param is_assembly_type: the the switch indicating if this type is an + assembly type. + :type is_assembly_type: bool + :param is_table_type: the the switch indicating if this type is a table + type. + :type is_table_type: bool + :param is_complex_type: the the switch indicating if this type is a + complex type. + :type is_complex_type: bool + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'typeName', 'type': 'str'}, + 'type_family': {'key': 'typeFamily', 'type': 'str'}, + 'c_sharp_name': {'key': 'cSharpName', 'type': 'str'}, + 'full_csharp_name': {'key': 'fullCSharpName', 'type': 'str'}, + 'system_type_id': {'key': 'systemTypeId', 'type': 'int'}, + 'user_type_id': {'key': 'userTypeId', 'type': 'int'}, + 'schema_id': {'key': 'schemaId', 'type': 'int'}, + 'principal_id': {'key': 'principalId', 'type': 'int'}, + 'is_nullable': {'key': 'isNullable', 'type': 'bool'}, + 'is_user_defined': {'key': 'isUserDefined', 'type': 'bool'}, + 'is_assembly_type': {'key': 'isAssemblyType', 'type': 'bool'}, + 'is_table_type': {'key': 'isTableType', 'type': 'bool'}, + 'is_complex_type': {'key': 'isComplexType', 'type': 'bool'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, type_family: str=None, c_sharp_name: str=None, full_csharp_name: str=None, system_type_id: int=None, user_type_id: int=None, schema_id: int=None, principal_id: int=None, is_nullable: bool=None, is_user_defined: bool=None, is_assembly_type: bool=None, is_table_type: bool=None, is_complex_type: bool=None, **kwargs) -> None: + super(USqlType, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.schema_name = schema_name + self.name = name + self.type_family = type_family + self.c_sharp_name = c_sharp_name + self.full_csharp_name = full_csharp_name + self.system_type_id = system_type_id + self.user_type_id = user_type_id + self.schema_id = schema_id + self.principal_id = principal_id + self.is_nullable = is_nullable + self.is_user_defined = is_user_defined + self.is_assembly_type = is_assembly_type + self.is_table_type = is_table_type + self.is_complex_type = is_complex_type diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_view.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_view.py index 54a30de755c1..a1819d7712cf 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_view.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_view.py @@ -39,9 +39,9 @@ class USqlView(CatalogItem): 'definition': {'key': 'definition', 'type': 'str'}, } - def __init__(self, compute_account_name=None, version=None, database_name=None, schema_name=None, name=None, definition=None): - super(USqlView, self).__init__(compute_account_name=compute_account_name, version=version) - self.database_name = database_name - self.schema_name = schema_name - self.name = name - self.definition = definition + def __init__(self, **kwargs): + super(USqlView, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.schema_name = kwargs.get('schema_name', None) + self.name = kwargs.get('name', None) + self.definition = kwargs.get('definition', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_view_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_view_py3.py new file mode 100644 index 000000000000..675e61057e8d --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_view_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .catalog_item_py3 import CatalogItem + + +class USqlView(CatalogItem): + """A Data Lake Analytics catalog U-SQL view item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this view and + database. + :type schema_name: str + :param name: the name of the view. + :type name: str + :param definition: the defined query of the view. + :type definition: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'viewName', 'type': 'str'}, + 'definition': {'key': 'definition', 'type': 'str'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, definition: str=None, **kwargs) -> None: + super(USqlView, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.schema_name = schema_name + self.name = name + self.definition = definition diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/operations/catalog_operations.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/operations/catalog_operations.py index ef6616875477..a6ba91268fb9 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/operations/catalog_operations.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/operations/catalog_operations.py @@ -10,7 +10,6 @@ # -------------------------------------------------------------------------- import uuid -import warnings from msrest.pipeline import ClientRawResponse from msrestazure.azure_exceptions import CloudError @@ -23,7 +22,7 @@ class CatalogOperations(object): :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. - :param deserializer: An objec model deserializer. + :param deserializer: An object model deserializer. :ivar api_version: Client Api Version. Constant value: "2016-11-01". """ @@ -44,9 +43,6 @@ def create_secret( specified database. This is deprecated and will be removed in the next release. Please use CreateCredential instead. - .. warning:: - This method is deprecated - :param account_name: The Azure Data Lake Analytics account upon which to execute catalog operations. :type account_name: str @@ -69,11 +65,10 @@ def create_secret( :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: :class:`CloudError` """ - warnings.warn("Method create_secret is deprecated", DeprecationWarning) parameters = models.DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters(password=password, uri=uri) # Construct URL - url = '/catalog/usql/databases/{databaseName}/secrets/{secretName}' + url = self.create_secret.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -100,9 +95,8 @@ def create_secret( body_content = self._serialize.body(parameters, 'DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters') # Construct and send request - request = self._client.put(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -112,6 +106,7 @@ def create_secret( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + create_secret.metadata = {'url': '/catalog/usql/databases/{databaseName}/secrets/{secretName}'} def get_secret( self, account_name, database_name, secret_name, custom_headers=None, raw=False, **operation_config): @@ -119,9 +114,6 @@ def get_secret( and will be removed in the next release. Please use GetCredential instead. - .. warning:: - This method is deprecated - :param account_name: The Azure Data Lake Analytics account upon which to execute catalog operations. :type account_name: str @@ -139,9 +131,8 @@ def get_secret( ~msrest.pipeline.ClientRawResponse :raises: :class:`CloudError` """ - warnings.warn("Method get_secret is deprecated", DeprecationWarning) # Construct URL - url = '/catalog/usql/databases/{databaseName}/secrets/{secretName}' + url = self.get_secret.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -156,7 +147,7 @@ def get_secret( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -165,8 +156,8 @@ def get_secret( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -183,6 +174,7 @@ def get_secret( return client_raw_response return deserialized + get_secret.metadata = {'url': '/catalog/usql/databases/{databaseName}/secrets/{secretName}'} def update_secret( self, account_name, database_name, secret_name, password, uri=None, custom_headers=None, raw=False, **operation_config): @@ -190,9 +182,6 @@ def update_secret( specified database. This is deprecated and will be removed in the next release. Please use UpdateCredential instead. - .. warning:: - This method is deprecated - :param account_name: The Azure Data Lake Analytics account upon which to execute catalog operations. :type account_name: str @@ -214,11 +203,10 @@ def update_secret( :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: :class:`CloudError` """ - warnings.warn("Method update_secret is deprecated", DeprecationWarning) parameters = models.DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters(password=password, uri=uri) # Construct URL - url = '/catalog/usql/databases/{databaseName}/secrets/{secretName}' + url = self.update_secret.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -245,9 +233,8 @@ def update_secret( body_content = self._serialize.body(parameters, 'DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters') # Construct and send request - request = self._client.patch(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -257,6 +244,7 @@ def update_secret( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + update_secret.metadata = {'url': '/catalog/usql/databases/{databaseName}/secrets/{secretName}'} def delete_secret( self, account_name, database_name, secret_name, custom_headers=None, raw=False, **operation_config): @@ -264,9 +252,6 @@ def delete_secret( deprecated and will be removed in the next release. Please use DeleteCredential instead. - .. warning:: - This method is deprecated - :param account_name: The Azure Data Lake Analytics account upon which to execute catalog operations. :type account_name: str @@ -283,9 +268,8 @@ def delete_secret( :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: :class:`CloudError` """ - warnings.warn("Method delete_secret is deprecated", DeprecationWarning) # Construct URL - url = '/catalog/usql/databases/{databaseName}/secrets/{secretName}' + url = self.delete_secret.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -300,7 +284,6 @@ def delete_secret( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -309,8 +292,8 @@ def delete_secret( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.delete(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -320,6 +303,7 @@ def delete_secret( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + delete_secret.metadata = {'url': '/catalog/usql/databases/{databaseName}/secrets/{secretName}'} def delete_all_secrets( self, account_name, database_name, custom_headers=None, raw=False, **operation_config): @@ -327,9 +311,6 @@ def delete_all_secrets( will be removed in the next release. In the future, please only drop individual credentials using DeleteCredential. - .. warning:: - This method is deprecated - :param account_name: The Azure Data Lake Analytics account upon which to execute catalog operations. :type account_name: str @@ -344,9 +325,8 @@ def delete_all_secrets( :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: :class:`CloudError` """ - warnings.warn("Method delete_all_secrets is deprecated", DeprecationWarning) # Construct URL - url = '/catalog/usql/databases/{databaseName}/secrets' + url = self.delete_all_secrets.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -360,7 +340,6 @@ def delete_all_secrets( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -369,8 +348,8 @@ def delete_all_secrets( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.delete(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -380,6 +359,7 @@ def delete_all_secrets( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + delete_all_secrets.metadata = {'url': '/catalog/usql/databases/{databaseName}/secrets'} def create_credential( self, account_name, database_name, credential_name, parameters, custom_headers=None, raw=False, **operation_config): @@ -410,7 +390,7 @@ def create_credential( :raises: :class:`CloudError` """ # Construct URL - url = '/catalog/usql/databases/{databaseName}/credentials/{credentialName}' + url = self.create_credential.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -437,9 +417,8 @@ def create_credential( body_content = self._serialize.body(parameters, 'DataLakeAnalyticsCatalogCredentialCreateParameters') # Construct and send request - request = self._client.put(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -449,6 +428,7 @@ def create_credential( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + create_credential.metadata = {'url': '/catalog/usql/databases/{databaseName}/credentials/{credentialName}'} def get_credential( self, account_name, database_name, credential_name, custom_headers=None, raw=False, **operation_config): @@ -473,7 +453,7 @@ def get_credential( :raises: :class:`CloudError` """ # Construct URL - url = '/catalog/usql/databases/{databaseName}/credentials/{credentialName}' + url = self.get_credential.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -488,7 +468,7 @@ def get_credential( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -497,8 +477,8 @@ def get_credential( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -515,6 +495,7 @@ def get_credential( return client_raw_response return deserialized + get_credential.metadata = {'url': '/catalog/usql/databases/{databaseName}/credentials/{credentialName}'} def update_credential( self, account_name, database_name, credential_name, parameters, custom_headers=None, raw=False, **operation_config): @@ -543,7 +524,7 @@ def update_credential( :raises: :class:`CloudError` """ # Construct URL - url = '/catalog/usql/databases/{databaseName}/credentials/{credentialName}' + url = self.update_credential.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -570,9 +551,8 @@ def update_credential( body_content = self._serialize.body(parameters, 'DataLakeAnalyticsCatalogCredentialUpdateParameters') # Construct and send request - request = self._client.patch(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -582,6 +562,7 @@ def update_credential( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + update_credential.metadata = {'url': '/catalog/usql/databases/{databaseName}/credentials/{credentialName}'} def delete_credential( self, account_name, database_name, credential_name, cascade=False, password=None, custom_headers=None, raw=False, **operation_config): @@ -618,7 +599,7 @@ def delete_credential( parameters = models.DataLakeAnalyticsCatalogCredentialDeleteParameters(password=password) # Construct URL - url = '/catalog/usql/databases/{databaseName}/credentials/{credentialName}' + url = self.delete_credential.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -650,9 +631,8 @@ def delete_credential( body_content = None # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -662,6 +642,7 @@ def delete_credential( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + delete_credential.metadata = {'url': '/catalog/usql/databases/{databaseName}/credentials/{credentialName}'} def list_credentials( self, account_name, database_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, custom_headers=None, raw=False, **operation_config): @@ -706,7 +687,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/catalog/usql/databases/{databaseName}/credentials' + url = self.list_credentials.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -736,7 +717,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -745,9 +726,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -765,6 +745,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list_credentials.metadata = {'url': '/catalog/usql/databases/{databaseName}/credentials'} def get_external_data_source( self, account_name, database_name, external_data_source_name, custom_headers=None, raw=False, **operation_config): @@ -792,7 +773,7 @@ def get_external_data_source( :raises: :class:`CloudError` """ # Construct URL - url = '/catalog/usql/databases/{databaseName}/externaldatasources/{externalDataSourceName}' + url = self.get_external_data_source.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -807,7 +788,7 @@ def get_external_data_source( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -816,8 +797,8 @@ def get_external_data_source( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -834,6 +815,7 @@ def get_external_data_source( return client_raw_response return deserialized + get_external_data_source.metadata = {'url': '/catalog/usql/databases/{databaseName}/externaldatasources/{externalDataSourceName}'} def list_external_data_sources( self, account_name, database_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, custom_headers=None, raw=False, **operation_config): @@ -880,7 +862,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/catalog/usql/databases/{databaseName}/externaldatasources' + url = self.list_external_data_sources.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -910,7 +892,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -919,9 +901,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -939,6 +920,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list_external_data_sources.metadata = {'url': '/catalog/usql/databases/{databaseName}/externaldatasources'} def get_procedure( self, account_name, database_name, schema_name, procedure_name, custom_headers=None, raw=False, **operation_config): @@ -965,7 +947,7 @@ def get_procedure( :raises: :class:`CloudError` """ # Construct URL - url = '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/procedures/{procedureName}' + url = self.get_procedure.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -981,7 +963,7 @@ def get_procedure( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -990,8 +972,8 @@ def get_procedure( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -1008,6 +990,7 @@ def get_procedure( return client_raw_response return deserialized + get_procedure.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/procedures/{procedureName}'} def list_procedures( self, account_name, database_name, schema_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, custom_headers=None, raw=False, **operation_config): @@ -1055,7 +1038,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/procedures' + url = self.list_procedures.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -1086,7 +1069,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -1095,9 +1078,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -1115,6 +1097,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list_procedures.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/procedures'} def get_table( self, account_name, database_name, schema_name, table_name, custom_headers=None, raw=False, **operation_config): @@ -1140,7 +1123,7 @@ def get_table( :raises: :class:`CloudError` """ # Construct URL - url = '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tables/{tableName}' + url = self.get_table.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -1156,7 +1139,7 @@ def get_table( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -1165,8 +1148,8 @@ def get_table( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -1183,6 +1166,120 @@ def get_table( return client_raw_response return deserialized + get_table.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tables/{tableName}'} + + def list_table_fragments( + self, account_name, database_name, schema_name, table_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, custom_headers=None, raw=False, **operation_config): + """Retrieves the list of table fragments from the Data Lake Analytics + catalog. + + :param account_name: The Azure Data Lake Analytics account upon which + to execute catalog operations. + :type account_name: str + :param database_name: The name of the database containing the table + fragments. + :type database_name: str + :param schema_name: The name of the schema containing the table + fragments. + :type schema_name: str + :param table_name: The name of the table containing the table + fragments. + :type table_name: str + :param filter: OData filter. Optional. + :type filter: str + :param top: The number of items to return. Optional. + :type top: int + :param skip: The number of items to skip over before returning + elements. Optional. + :type skip: int + :param select: OData Select statement. Limits the properties on each + entry to just those requested, e.g. + Categories?$select=CategoryName,Description. Optional. + :type select: str + :param orderby: OrderBy clause. One or more comma-separated + expressions with an optional "asc" (the default) or "desc" depending + on the order you'd like the values sorted, e.g. + Categories?$orderby=CategoryName desc. Optional. + :type orderby: str + :param count: The Boolean value of true or false to request a count of + the matching resources included with the resources in the response, + e.g. Categories?$count=true. Optional. + :type count: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of USqlTableFragment + :rtype: + ~azure.mgmt.datalake.analytics.catalog.models.USqlTableFragmentPaged[~azure.mgmt.datalake.analytics.catalog.models.USqlTableFragment] + :raises: :class:`CloudError` + """ + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_table_fragments.metadata['url'] + path_format_arguments = { + 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), + 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), + 'databaseName': self._serialize.url("database_name", database_name, 'str'), + 'schemaName': self._serialize.url("schema_name", schema_name, 'str'), + 'tableName': self._serialize.url("table_name", table_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if top is not None: + query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=1) + if skip is not None: + query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=1) + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if orderby is not None: + query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str') + if count is not None: + query_parameters['$count'] = self._serialize.query("count", count, 'bool') + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + return response + + # Deserialize response + deserialized = models.USqlTableFragmentPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.USqlTableFragmentPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_table_fragments.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tables/{tableName}/tablefragments'} def list_tables( self, account_name, database_name, schema_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, basic=False, custom_headers=None, raw=False, **operation_config): @@ -1235,7 +1332,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tables' + url = self.list_tables.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -1268,7 +1365,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -1277,9 +1374,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -1297,6 +1393,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list_tables.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tables'} def list_table_statistics_by_database_and_schema( self, account_name, database_name, schema_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, custom_headers=None, raw=False, **operation_config): @@ -1345,7 +1442,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/statistics' + url = self.list_table_statistics_by_database_and_schema.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -1376,7 +1473,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -1385,9 +1482,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -1405,6 +1501,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list_table_statistics_by_database_and_schema.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/statistics'} def get_table_type( self, account_name, database_name, schema_name, table_type_name, custom_headers=None, raw=False, **operation_config): @@ -1432,7 +1529,7 @@ def get_table_type( :raises: :class:`CloudError` """ # Construct URL - url = '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tabletypes/{tableTypeName}' + url = self.get_table_type.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -1448,7 +1545,7 @@ def get_table_type( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -1457,8 +1554,8 @@ def get_table_type( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -1475,6 +1572,7 @@ def get_table_type( return client_raw_response return deserialized + get_table_type.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tabletypes/{tableTypeName}'} def list_table_types( self, account_name, database_name, schema_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, custom_headers=None, raw=False, **operation_config): @@ -1522,7 +1620,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tabletypes' + url = self.list_table_types.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -1553,7 +1651,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -1562,9 +1660,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -1582,6 +1679,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list_table_types.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tabletypes'} def get_package( self, account_name, database_name, schema_name, package_name, custom_headers=None, raw=False, **operation_config): @@ -1607,7 +1705,7 @@ def get_package( :raises: :class:`CloudError` """ # Construct URL - url = '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/packages/{packageName}' + url = self.get_package.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -1623,7 +1721,7 @@ def get_package( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -1632,8 +1730,8 @@ def get_package( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -1650,6 +1748,7 @@ def get_package( return client_raw_response return deserialized + get_package.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/packages/{packageName}'} def list_packages( self, account_name, database_name, schema_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, custom_headers=None, raw=False, **operation_config): @@ -1697,7 +1796,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/packages' + url = self.list_packages.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -1728,7 +1827,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -1737,9 +1836,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -1757,6 +1855,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list_packages.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/packages'} def get_view( self, account_name, database_name, schema_name, view_name, custom_headers=None, raw=False, **operation_config): @@ -1782,7 +1881,7 @@ def get_view( :raises: :class:`CloudError` """ # Construct URL - url = '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/views/{viewName}' + url = self.get_view.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -1798,7 +1897,7 @@ def get_view( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -1807,8 +1906,8 @@ def get_view( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -1825,6 +1924,7 @@ def get_view( return client_raw_response return deserialized + get_view.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/views/{viewName}'} def list_views( self, account_name, database_name, schema_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, custom_headers=None, raw=False, **operation_config): @@ -1871,7 +1971,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/views' + url = self.list_views.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -1902,7 +2002,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -1911,9 +2011,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -1931,6 +2030,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list_views.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/views'} def get_table_statistic( self, account_name, database_name, schema_name, table_name, statistics_name, custom_headers=None, raw=False, **operation_config): @@ -1961,7 +2061,7 @@ def get_table_statistic( :raises: :class:`CloudError` """ # Construct URL - url = '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tables/{tableName}/statistics/{statisticsName}' + url = self.get_table_statistic.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -1978,7 +2078,7 @@ def get_table_statistic( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -1987,8 +2087,8 @@ def get_table_statistic( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -2005,6 +2105,7 @@ def get_table_statistic( return client_raw_response return deserialized + get_table_statistic.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tables/{tableName}/statistics/{statisticsName}'} def list_table_statistics( self, account_name, database_name, schema_name, table_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, custom_headers=None, raw=False, **operation_config): @@ -2055,7 +2156,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tables/{tableName}/statistics' + url = self.list_table_statistics.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -2087,7 +2188,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -2096,9 +2197,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -2116,6 +2216,90 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list_table_statistics.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tables/{tableName}/statistics'} + + def preview_table_partition( + self, account_name, database_name, schema_name, table_name, partition_name, max_rows=None, max_columns=None, custom_headers=None, raw=False, **operation_config): + """Retrieves a preview set of rows in given partition. + + :param account_name: The Azure Data Lake Analytics account upon which + to execute catalog operations. + :type account_name: str + :param database_name: The name of the database containing the + partition. + :type database_name: str + :param schema_name: The name of the schema containing the partition. + :type schema_name: str + :param table_name: The name of the table containing the partition. + :type table_name: str + :param partition_name: The name of the table partition. + :type partition_name: str + :param max_rows: The maximum number of preview rows to be + retrieved.Rows returned may be less than or equal to this number + depending on row sizes and number of rows in the partition. + :type max_rows: long + :param max_columns: The maximum number of columns to be retrieved. + :type max_columns: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: USqlTablePreview or ClientRawResponse if raw=true + :rtype: ~azure.mgmt.datalake.analytics.catalog.models.USqlTablePreview + or ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.preview_table_partition.metadata['url'] + path_format_arguments = { + 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), + 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), + 'databaseName': self._serialize.url("database_name", database_name, 'str'), + 'schemaName': self._serialize.url("schema_name", schema_name, 'str'), + 'tableName': self._serialize.url("table_name", table_name, 'str'), + 'partitionName': self._serialize.url("partition_name", partition_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if max_rows is not None: + query_parameters['maxRows'] = self._serialize.query("max_rows", max_rows, 'long') + if max_columns is not None: + query_parameters['maxColumns'] = self._serialize.query("max_columns", max_columns, 'long') + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('USqlTablePreview', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + preview_table_partition.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tables/{tableName}/partitions/{partitionName}/previewrows'} def get_table_partition( self, account_name, database_name, schema_name, table_name, partition_name, custom_headers=None, raw=False, **operation_config): @@ -2146,7 +2330,7 @@ def get_table_partition( :raises: :class:`CloudError` """ # Construct URL - url = '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tables/{tableName}/partitions/{partitionName}' + url = self.get_table_partition.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -2163,7 +2347,7 @@ def get_table_partition( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -2172,8 +2356,8 @@ def get_table_partition( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -2190,6 +2374,86 @@ def get_table_partition( return client_raw_response return deserialized + get_table_partition.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tables/{tableName}/partitions/{partitionName}'} + + def preview_table( + self, account_name, database_name, schema_name, table_name, max_rows=None, max_columns=None, custom_headers=None, raw=False, **operation_config): + """Retrieves a preview set of rows in given table. + + :param account_name: The Azure Data Lake Analytics account upon which + to execute catalog operations. + :type account_name: str + :param database_name: The name of the database containing the table. + :type database_name: str + :param schema_name: The name of the schema containing the table. + :type schema_name: str + :param table_name: The name of the table. + :type table_name: str + :param max_rows: The maximum number of preview rows to be retrieved. + Rows returned may be less than or equal to this number depending on + row sizes and number of rows in the table. + :type max_rows: long + :param max_columns: The maximum number of columns to be retrieved. + :type max_columns: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: USqlTablePreview or ClientRawResponse if raw=true + :rtype: ~azure.mgmt.datalake.analytics.catalog.models.USqlTablePreview + or ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.preview_table.metadata['url'] + path_format_arguments = { + 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), + 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), + 'databaseName': self._serialize.url("database_name", database_name, 'str'), + 'schemaName': self._serialize.url("schema_name", schema_name, 'str'), + 'tableName': self._serialize.url("table_name", table_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if max_rows is not None: + query_parameters['maxRows'] = self._serialize.query("max_rows", max_rows, 'long') + if max_columns is not None: + query_parameters['maxColumns'] = self._serialize.query("max_columns", max_columns, 'long') + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('USqlTablePreview', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + preview_table.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tables/{tableName}/previewrows'} def list_table_partitions( self, account_name, database_name, schema_name, table_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, custom_headers=None, raw=False, **operation_config): @@ -2240,7 +2504,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tables/{tableName}/partitions' + url = self.list_table_partitions.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -2272,7 +2536,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -2281,9 +2545,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -2301,6 +2564,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list_table_partitions.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tables/{tableName}/partitions'} def list_types( self, account_name, database_name, schema_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, custom_headers=None, raw=False, **operation_config): @@ -2348,7 +2612,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/types' + url = self.list_types.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -2379,7 +2643,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -2388,9 +2652,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -2408,6 +2671,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list_types.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/types'} def get_table_valued_function( self, account_name, database_name, schema_name, table_valued_function_name, custom_headers=None, raw=False, **operation_config): @@ -2438,7 +2702,7 @@ def get_table_valued_function( :raises: :class:`CloudError` """ # Construct URL - url = '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tablevaluedfunctions/{tableValuedFunctionName}' + url = self.get_table_valued_function.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -2454,7 +2718,7 @@ def get_table_valued_function( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -2463,8 +2727,8 @@ def get_table_valued_function( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -2481,6 +2745,7 @@ def get_table_valued_function( return client_raw_response return deserialized + get_table_valued_function.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tablevaluedfunctions/{tableValuedFunctionName}'} def list_table_valued_functions( self, account_name, database_name, schema_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, custom_headers=None, raw=False, **operation_config): @@ -2530,7 +2795,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tablevaluedfunctions' + url = self.list_table_valued_functions.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -2561,7 +2826,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -2570,9 +2835,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -2590,6 +2854,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list_table_valued_functions.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tablevaluedfunctions'} def get_assembly( self, account_name, database_name, assembly_name, custom_headers=None, raw=False, **operation_config): @@ -2614,7 +2879,7 @@ def get_assembly( :raises: :class:`CloudError` """ # Construct URL - url = '/catalog/usql/databases/{databaseName}/assemblies/{assemblyName}' + url = self.get_assembly.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -2629,7 +2894,7 @@ def get_assembly( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -2638,8 +2903,8 @@ def get_assembly( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -2656,6 +2921,7 @@ def get_assembly( return client_raw_response return deserialized + get_assembly.metadata = {'url': '/catalog/usql/databases/{databaseName}/assemblies/{assemblyName}'} def list_assemblies( self, account_name, database_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, custom_headers=None, raw=False, **operation_config): @@ -2701,7 +2967,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/catalog/usql/databases/{databaseName}/assemblies' + url = self.list_assemblies.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -2731,7 +2997,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -2740,9 +3006,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -2760,6 +3025,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list_assemblies.metadata = {'url': '/catalog/usql/databases/{databaseName}/assemblies'} def get_schema( self, account_name, database_name, schema_name, custom_headers=None, raw=False, **operation_config): @@ -2783,7 +3049,7 @@ def get_schema( :raises: :class:`CloudError` """ # Construct URL - url = '/catalog/usql/databases/{databaseName}/schemas/{schemaName}' + url = self.get_schema.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -2798,7 +3064,7 @@ def get_schema( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -2807,8 +3073,8 @@ def get_schema( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -2825,6 +3091,7 @@ def get_schema( return client_raw_response return deserialized + get_schema.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}'} def list_schemas( self, account_name, database_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, custom_headers=None, raw=False, **operation_config): @@ -2869,7 +3136,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/catalog/usql/databases/{databaseName}/schemas' + url = self.list_schemas.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -2899,7 +3166,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -2908,9 +3175,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -2928,6 +3194,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list_schemas.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas'} def list_table_statistics_by_database( self, account_name, database_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, custom_headers=None, raw=False, **operation_config): @@ -2974,7 +3241,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/catalog/usql/databases/{databaseName}/statistics' + url = self.list_table_statistics_by_database.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -3004,7 +3271,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -3013,9 +3280,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -3033,6 +3299,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list_table_statistics_by_database.metadata = {'url': '/catalog/usql/databases/{databaseName}/statistics'} def list_tables_by_database( self, account_name, database_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, basic=False, custom_headers=None, raw=False, **operation_config): @@ -3083,7 +3350,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/catalog/usql/databases/{databaseName}/tables' + url = self.list_tables_by_database.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -3115,7 +3382,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -3124,9 +3391,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -3144,6 +3410,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list_tables_by_database.metadata = {'url': '/catalog/usql/databases/{databaseName}/tables'} def list_table_valued_functions_by_database( self, account_name, database_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, custom_headers=None, raw=False, **operation_config): @@ -3190,7 +3457,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/catalog/usql/databases/{databaseName}/tablevaluedfunctions' + url = self.list_table_valued_functions_by_database.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -3220,7 +3487,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -3229,9 +3496,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -3249,6 +3515,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list_table_valued_functions_by_database.metadata = {'url': '/catalog/usql/databases/{databaseName}/tablevaluedfunctions'} def list_views_by_database( self, account_name, database_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, custom_headers=None, raw=False, **operation_config): @@ -3294,7 +3561,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/catalog/usql/databases/{databaseName}/views' + url = self.list_views_by_database.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -3324,7 +3591,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -3333,9 +3600,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -3353,6 +3619,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list_views_by_database.metadata = {'url': '/catalog/usql/databases/{databaseName}/views'} def list_acls_by_database( self, account_name, database_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, custom_headers=None, raw=False, **operation_config): @@ -3398,7 +3665,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/catalog/usql/databases/{databaseName}/acl' + url = self.list_acls_by_database.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -3428,7 +3695,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -3437,9 +3704,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -3457,6 +3723,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list_acls_by_database.metadata = {'url': '/catalog/usql/databases/{databaseName}/acl'} def list_acls( self, account_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, custom_headers=None, raw=False, **operation_config): @@ -3500,7 +3767,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/catalog/usql/acl' + url = self.list_acls.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True) @@ -3529,7 +3796,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -3538,9 +3805,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -3558,6 +3824,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list_acls.metadata = {'url': '/catalog/usql/acl'} def get_database( self, account_name, database_name, custom_headers=None, raw=False, **operation_config): @@ -3579,7 +3846,7 @@ def get_database( :raises: :class:`CloudError` """ # Construct URL - url = '/catalog/usql/databases/{databaseName}' + url = self.get_database.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -3593,7 +3860,7 @@ def get_database( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -3602,8 +3869,8 @@ def get_database( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -3620,6 +3887,7 @@ def get_database( return client_raw_response return deserialized + get_database.metadata = {'url': '/catalog/usql/databases/{databaseName}'} def list_databases( self, account_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, custom_headers=None, raw=False, **operation_config): @@ -3662,7 +3930,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/catalog/usql/databases' + url = self.list_databases.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True) @@ -3691,7 +3959,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -3700,9 +3968,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -3720,6 +3987,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list_databases.metadata = {'url': '/catalog/usql/databases'} def grant_acl( self, account_name, parameters, custom_headers=None, raw=False, **operation_config): @@ -3745,7 +4013,7 @@ def grant_acl( op = "GRANTACE" # Construct URL - url = '/catalog/usql/acl' + url = self.grant_acl.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True) @@ -3771,9 +4039,8 @@ def grant_acl( body_content = self._serialize.body(parameters, 'AclCreateOrUpdateParameters') # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -3783,6 +4050,7 @@ def grant_acl( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + grant_acl.metadata = {'url': '/catalog/usql/acl'} def grant_acl_to_database( self, account_name, database_name, parameters, custom_headers=None, raw=False, **operation_config): @@ -3810,7 +4078,7 @@ def grant_acl_to_database( op = "GRANTACE" # Construct URL - url = '/catalog/usql/databases/{databaseName}/acl' + url = self.grant_acl_to_database.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -3837,9 +4105,8 @@ def grant_acl_to_database( body_content = self._serialize.body(parameters, 'AclCreateOrUpdateParameters') # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -3849,6 +4116,7 @@ def grant_acl_to_database( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + grant_acl_to_database.metadata = {'url': '/catalog/usql/databases/{databaseName}/acl'} def revoke_acl( self, account_name, ace_type, principal_id, custom_headers=None, raw=False, **operation_config): @@ -3880,7 +4148,7 @@ def revoke_acl( op = "REVOKEACE" # Construct URL - url = '/catalog/usql/acl' + url = self.revoke_acl.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True) @@ -3906,9 +4174,8 @@ def revoke_acl( body_content = self._serialize.body(parameters, 'AclDeleteParameters') # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -3918,6 +4185,7 @@ def revoke_acl( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + revoke_acl.metadata = {'url': '/catalog/usql/acl'} def revoke_acl_from_database( self, account_name, database_name, ace_type, principal_id, custom_headers=None, raw=False, **operation_config): @@ -3951,7 +4219,7 @@ def revoke_acl_from_database( op = "REVOKEACE" # Construct URL - url = '/catalog/usql/databases/{databaseName}/acl' + url = self.revoke_acl_from_database.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True), @@ -3978,9 +4246,8 @@ def revoke_acl_from_database( body_content = self._serialize.body(parameters, 'AclDeleteParameters') # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -3990,3 +4257,4 @@ def revoke_acl_from_database( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + revoke_acl_from_database.metadata = {'url': '/catalog/usql/databases/{databaseName}/acl'} diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/data_lake_analytics_job_management_client.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/data_lake_analytics_job_management_client.py index e68f076cd078..c74fcbd0dc46 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/data_lake_analytics_job_management_client.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/data_lake_analytics_job_management_client.py @@ -9,7 +9,7 @@ # regenerated. # -------------------------------------------------------------------------- -from msrest.service_client import ServiceClient +from msrest.service_client import SDKClient from msrest import Serializer, Deserializer from msrestazure import AzureConfiguration from .version import VERSION @@ -50,7 +50,7 @@ def __init__( self.adla_job_dns_suffix = adla_job_dns_suffix -class DataLakeAnalyticsJobManagementClient(object): +class DataLakeAnalyticsJobManagementClient(SDKClient): """Creates an Azure Data Lake Analytics job client. :ivar config: Configuration for client. @@ -75,7 +75,7 @@ def __init__( self, credentials, adla_job_dns_suffix): self.config = DataLakeAnalyticsJobManagementClientConfiguration(credentials, adla_job_dns_suffix) - self._client = ServiceClient(self.config.credentials, self.config) + super(DataLakeAnalyticsJobManagementClient, self).__init__(self.config.credentials, self.config) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self.api_version = '2017-09-01-preview' diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/__init__.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/__init__.py index 0c5bfceb2c8a..a45a43d93918 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/__init__.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/__init__.py @@ -9,84 +9,115 @@ # regenerated. # -------------------------------------------------------------------------- -from .resource_usage_statistics import ResourceUsageStatistics -from .job_statistics_vertex import JobStatisticsVertex -from .job_statistics_vertex_stage import JobStatisticsVertexStage -from .job_statistics import JobStatistics -from .job_data_path import JobDataPath -from .job_state_audit_record import JobStateAuditRecord -from .scope_job_resource import ScopeJobResource -from .job_resource import JobResource -from .diagnostics import Diagnostics -from .usql_job_properties import USqlJobProperties -from .scope_job_properties import ScopeJobProperties -from .hive_job_properties import HiveJobProperties -from .job_properties import JobProperties -from .create_usql_job_properties import CreateUSqlJobProperties -from .create_scope_job_properties import CreateScopeJobProperties -from .create_job_properties import CreateJobProperties -from .job_inner_error import JobInnerError -from .job_error_details import JobErrorDetails -from .job_relationship_properties import JobRelationshipProperties -from .job_pipeline_run_information import JobPipelineRunInformation -from .job_pipeline_information import JobPipelineInformation -from .job_recurrence_information import JobRecurrenceInformation -from .create_scope_job_parameters import CreateScopeJobParameters -from .create_job_parameters import CreateJobParameters -from .build_job_parameters import BuildJobParameters -from .base_job_parameters import BaseJobParameters -from .job_information_basic import JobInformationBasic -from .job_information import JobInformation -from .update_job_parameters import UpdateJobParameters +try: + from .job_inner_error_py3 import JobInnerError + from .job_error_details_py3 import JobErrorDetails + from .job_state_audit_record_py3 import JobStateAuditRecord + from .job_properties_py3 import JobProperties + from .job_information_py3 import JobInformation + from .job_relationship_properties_py3 import JobRelationshipProperties + from .job_information_basic_py3 import JobInformationBasic + from .job_resource_py3 import JobResource + from .job_statistics_vertex_py3 import JobStatisticsVertex + from .resource_usage_statistics_py3 import ResourceUsageStatistics + from .job_statistics_vertex_stage_py3 import JobStatisticsVertexStage + from .job_statistics_py3 import JobStatistics + from .job_data_path_py3 import JobDataPath + from .diagnostics_py3 import Diagnostics + from .usql_job_properties_py3 import USqlJobProperties + from .hive_job_properties_py3 import HiveJobProperties + from .scope_job_resource_py3 import ScopeJobResource + from .scope_job_properties_py3 import ScopeJobProperties + from .job_pipeline_run_information_py3 import JobPipelineRunInformation + from .job_pipeline_information_py3 import JobPipelineInformation + from .job_recurrence_information_py3 import JobRecurrenceInformation + from .create_job_properties_py3 import CreateJobProperties + from .base_job_parameters_py3 import BaseJobParameters + from .create_job_parameters_py3 import CreateJobParameters + from .create_scope_job_parameters_py3 import CreateScopeJobParameters + from .create_usql_job_properties_py3 import CreateUSqlJobProperties + from .create_scope_job_properties_py3 import CreateScopeJobProperties + from .build_job_parameters_py3 import BuildJobParameters + from .update_job_parameters_py3 import UpdateJobParameters +except (SyntaxError, ImportError): + from .job_inner_error import JobInnerError + from .job_error_details import JobErrorDetails + from .job_state_audit_record import JobStateAuditRecord + from .job_properties import JobProperties + from .job_information import JobInformation + from .job_relationship_properties import JobRelationshipProperties + from .job_information_basic import JobInformationBasic + from .job_resource import JobResource + from .job_statistics_vertex import JobStatisticsVertex + from .resource_usage_statistics import ResourceUsageStatistics + from .job_statistics_vertex_stage import JobStatisticsVertexStage + from .job_statistics import JobStatistics + from .job_data_path import JobDataPath + from .diagnostics import Diagnostics + from .usql_job_properties import USqlJobProperties + from .hive_job_properties import HiveJobProperties + from .scope_job_resource import ScopeJobResource + from .scope_job_properties import ScopeJobProperties + from .job_pipeline_run_information import JobPipelineRunInformation + from .job_pipeline_information import JobPipelineInformation + from .job_recurrence_information import JobRecurrenceInformation + from .create_job_properties import CreateJobProperties + from .base_job_parameters import BaseJobParameters + from .create_job_parameters import CreateJobParameters + from .create_scope_job_parameters import CreateScopeJobParameters + from .create_usql_job_properties import CreateUSqlJobProperties + from .create_scope_job_properties import CreateScopeJobProperties + from .build_job_parameters import BuildJobParameters + from .update_job_parameters import UpdateJobParameters from .job_information_basic_paged import JobInformationBasicPaged from .job_pipeline_information_paged import JobPipelineInformationPaged from .job_recurrence_information_paged import JobRecurrenceInformationPaged from .data_lake_analytics_job_management_client_enums import ( - JobResourceType, SeverityTypes, - CompileMode, JobType, JobState, JobResult, + JobResourceType, + CompileMode, ) __all__ = [ - 'ResourceUsageStatistics', + 'JobInnerError', + 'JobErrorDetails', + 'JobStateAuditRecord', + 'JobProperties', + 'JobInformation', + 'JobRelationshipProperties', + 'JobInformationBasic', + 'JobResource', 'JobStatisticsVertex', + 'ResourceUsageStatistics', 'JobStatisticsVertexStage', 'JobStatistics', 'JobDataPath', - 'JobStateAuditRecord', - 'ScopeJobResource', - 'JobResource', 'Diagnostics', 'USqlJobProperties', - 'ScopeJobProperties', 'HiveJobProperties', - 'JobProperties', - 'CreateUSqlJobProperties', - 'CreateScopeJobProperties', - 'CreateJobProperties', - 'JobInnerError', - 'JobErrorDetails', - 'JobRelationshipProperties', + 'ScopeJobResource', + 'ScopeJobProperties', 'JobPipelineRunInformation', 'JobPipelineInformation', 'JobRecurrenceInformation', - 'CreateScopeJobParameters', + 'CreateJobProperties', + 'BaseJobParameters', 'CreateJobParameters', + 'CreateScopeJobParameters', + 'CreateUSqlJobProperties', + 'CreateScopeJobProperties', 'BuildJobParameters', - 'BaseJobParameters', - 'JobInformationBasic', - 'JobInformation', 'UpdateJobParameters', 'JobInformationBasicPaged', 'JobPipelineInformationPaged', 'JobRecurrenceInformationPaged', - 'JobResourceType', 'SeverityTypes', - 'CompileMode', 'JobType', 'JobState', 'JobResult', + 'JobResourceType', + 'CompileMode', ] diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/base_job_parameters.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/base_job_parameters.py index b5d372b3eae9..cb184b099a95 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/base_job_parameters.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/base_job_parameters.py @@ -15,10 +15,13 @@ class BaseJobParameters(Model): """Data Lake Analytics Job Parameters base class for build and submit. - :param type: the job type of the current job (Hive, USql, or Scope (for - internal use only)). Possible values include: 'USql', 'Hive', 'Scope' + All required parameters must be populated in order to send to Azure. + + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType - :param properties: the job specific properties. + :param properties: Required. The job specific properties. :type properties: ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties """ @@ -33,7 +36,7 @@ class BaseJobParameters(Model): 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, } - def __init__(self, type, properties): - super(BaseJobParameters, self).__init__() - self.type = type - self.properties = properties + def __init__(self, **kwargs): + super(BaseJobParameters, self).__init__(**kwargs) + self.type = kwargs.get('type', None) + self.properties = kwargs.get('properties', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/base_job_parameters_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/base_job_parameters_py3.py new file mode 100644 index 000000000000..df4ee0146392 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/base_job_parameters_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BaseJobParameters(Model): + """Data Lake Analytics Job Parameters base class for build and submit. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' + :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType + :param properties: Required. The job specific properties. + :type properties: + ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties + """ + + _validation = { + 'type': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'JobType'}, + 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, + } + + def __init__(self, *, type, properties, **kwargs) -> None: + super(BaseJobParameters, self).__init__(**kwargs) + self.type = type + self.properties = properties diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/build_job_parameters.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/build_job_parameters.py index a80cd740a66e..7e96058ad235 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/build_job_parameters.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/build_job_parameters.py @@ -15,13 +15,16 @@ class BuildJobParameters(BaseJobParameters): """The parameters used to build a new Data Lake Analytics job. - :param type: the job type of the current job (Hive, USql, or Scope (for - internal use only)). Possible values include: 'USql', 'Hive', 'Scope' + All required parameters must be populated in order to send to Azure. + + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType - :param properties: the job specific properties. + :param properties: Required. The job specific properties. :type properties: ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties - :param name: the friendly name of the job to build. + :param name: The friendly name of the job to build. :type name: str """ @@ -36,6 +39,6 @@ class BuildJobParameters(BaseJobParameters): 'name': {'key': 'name', 'type': 'str'}, } - def __init__(self, type, properties, name=None): - super(BuildJobParameters, self).__init__(type=type, properties=properties) - self.name = name + def __init__(self, **kwargs): + super(BuildJobParameters, self).__init__(**kwargs) + self.name = kwargs.get('name', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/build_job_parameters_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/build_job_parameters_py3.py new file mode 100644 index 000000000000..6f9e91fba271 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/build_job_parameters_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .base_job_parameters_py3 import BaseJobParameters + + +class BuildJobParameters(BaseJobParameters): + """The parameters used to build a new Data Lake Analytics job. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' + :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType + :param properties: Required. The job specific properties. + :type properties: + ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties + :param name: The friendly name of the job to build. + :type name: str + """ + + _validation = { + 'type': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'JobType'}, + 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + def __init__(self, *, type, properties, name: str=None, **kwargs) -> None: + super(BuildJobParameters, self).__init__(type=type, properties=properties, **kwargs) + self.name = name diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_parameters.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_parameters.py index ac0e564fbc85..5ec2d1df2ccb 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_parameters.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_parameters.py @@ -15,27 +15,36 @@ class CreateJobParameters(BaseJobParameters): """The parameters used to submit a new Data Lake Analytics job. - :param type: the job type of the current job (Hive, USql, or Scope (for - internal use only)). Possible values include: 'USql', 'Hive', 'Scope' + All required parameters must be populated in order to send to Azure. + + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType - :param properties: the job specific properties. + :param properties: Required. The job specific properties. :type properties: ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties - :param name: the friendly name of the job to submit. + :param name: Required. The friendly name of the job to submit. :type name: str - :param degree_of_parallelism: the degree of parallelism to use for this - job. This must be greater than 0, if set to less than 0 it will default to - 1. Default value: 1 . + :param degree_of_parallelism: The degree of parallelism to use for this + job. At most one of degreeOfParallelism and degreeOfParallelismPercent + should be specified. If none, a default value of 1 will be used for + degreeOfParallelism. Default value: 1 . :type degree_of_parallelism: int - :param priority: the priority value to use for the current job. Lower + :param degree_of_parallelism_percent: the degree of parallelism in + percentage used for this job. At most one of degreeOfParallelism and + degreeOfParallelismPercent should be specified. If none, a default value + of 1 will be used for degreeOfParallelism. + :type degree_of_parallelism_percent: float + :param priority: The priority value to use for the current job. Lower numbers have a higher priority. By default, a job has a priority of 1000. This must be greater than 0. :type priority: int - :param log_file_patterns: the list of log file name patterns to find in + :param log_file_patterns: The list of log file name patterns to find in the logFolder. '*' is the only matching character allowed. Example format: jobExecution*.log or *mylog*.txt :type log_file_patterns: list[str] - :param related: the recurring job relationship information properties. + :param related: The recurring job relationship information properties. :type related: ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties """ @@ -51,15 +60,17 @@ class CreateJobParameters(BaseJobParameters): 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, 'name': {'key': 'name', 'type': 'str'}, 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, + 'degree_of_parallelism_percent': {'key': 'degreeOfParallelismPercent', 'type': 'float'}, 'priority': {'key': 'priority', 'type': 'int'}, 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, } - def __init__(self, type, properties, name, degree_of_parallelism=1, priority=None, log_file_patterns=None, related=None): - super(CreateJobParameters, self).__init__(type=type, properties=properties) - self.name = name - self.degree_of_parallelism = degree_of_parallelism - self.priority = priority - self.log_file_patterns = log_file_patterns - self.related = related + def __init__(self, **kwargs): + super(CreateJobParameters, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.degree_of_parallelism = kwargs.get('degree_of_parallelism', 1) + self.degree_of_parallelism_percent = kwargs.get('degree_of_parallelism_percent', None) + self.priority = kwargs.get('priority', None) + self.log_file_patterns = kwargs.get('log_file_patterns', None) + self.related = kwargs.get('related', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_parameters_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_parameters_py3.py new file mode 100644 index 000000000000..784fb18c0a6c --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_parameters_py3.py @@ -0,0 +1,76 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .base_job_parameters_py3 import BaseJobParameters + + +class CreateJobParameters(BaseJobParameters): + """The parameters used to submit a new Data Lake Analytics job. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' + :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType + :param properties: Required. The job specific properties. + :type properties: + ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties + :param name: Required. The friendly name of the job to submit. + :type name: str + :param degree_of_parallelism: The degree of parallelism to use for this + job. At most one of degreeOfParallelism and degreeOfParallelismPercent + should be specified. If none, a default value of 1 will be used for + degreeOfParallelism. Default value: 1 . + :type degree_of_parallelism: int + :param degree_of_parallelism_percent: the degree of parallelism in + percentage used for this job. At most one of degreeOfParallelism and + degreeOfParallelismPercent should be specified. If none, a default value + of 1 will be used for degreeOfParallelism. + :type degree_of_parallelism_percent: float + :param priority: The priority value to use for the current job. Lower + numbers have a higher priority. By default, a job has a priority of 1000. + This must be greater than 0. + :type priority: int + :param log_file_patterns: The list of log file name patterns to find in + the logFolder. '*' is the only matching character allowed. Example format: + jobExecution*.log or *mylog*.txt + :type log_file_patterns: list[str] + :param related: The recurring job relationship information properties. + :type related: + ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties + """ + + _validation = { + 'type': {'required': True}, + 'properties': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'JobType'}, + 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, + 'name': {'key': 'name', 'type': 'str'}, + 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, + 'degree_of_parallelism_percent': {'key': 'degreeOfParallelismPercent', 'type': 'float'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, + 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, + } + + def __init__(self, *, type, properties, name: str, degree_of_parallelism: int=1, degree_of_parallelism_percent: float=None, priority: int=None, log_file_patterns=None, related=None, **kwargs) -> None: + super(CreateJobParameters, self).__init__(type=type, properties=properties, **kwargs) + self.name = name + self.degree_of_parallelism = degree_of_parallelism + self.degree_of_parallelism_percent = degree_of_parallelism_percent + self.priority = priority + self.log_file_patterns = log_file_patterns + self.related = related diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_properties.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_properties.py index 1ffce245434d..60960f47bd98 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_properties.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_properties.py @@ -18,13 +18,15 @@ class CreateJobProperties(Model): You probably want to use the sub-classes and not this class directly. Known sub-classes are: CreateUSqlJobProperties, CreateScopeJobProperties - :param runtime_version: the runtime version of the Data Lake Analytics + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics engine to use for the specific type of job being run. :type runtime_version: str - :param script: the script to run. Please note that the maximum script size - is 3 MB. + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. :type script: str - :param type: Constant filled by server. + :param type: Required. Constant filled by server. :type type: str """ @@ -43,8 +45,8 @@ class CreateJobProperties(Model): 'type': {'USql': 'CreateUSqlJobProperties', 'Scope': 'CreateScopeJobProperties'} } - def __init__(self, script, runtime_version=None): - super(CreateJobProperties, self).__init__() - self.runtime_version = runtime_version - self.script = script + def __init__(self, **kwargs): + super(CreateJobProperties, self).__init__(**kwargs) + self.runtime_version = kwargs.get('runtime_version', None) + self.script = kwargs.get('script', None) self.type = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_properties_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_properties_py3.py new file mode 100644 index 000000000000..8e1e9942743b --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_properties_py3.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CreateJobProperties(Model): + """The common Data Lake Analytics job properties for job submission. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: CreateUSqlJobProperties, CreateScopeJobProperties + + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics + engine to use for the specific type of job being run. + :type runtime_version: str + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. + :type script: str + :param type: Required. Constant filled by server. + :type type: str + """ + + _validation = { + 'script': {'required': True}, + 'type': {'required': True}, + } + + _attribute_map = { + 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, + 'script': {'key': 'script', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'USql': 'CreateUSqlJobProperties', 'Scope': 'CreateScopeJobProperties'} + } + + def __init__(self, *, script: str, runtime_version: str=None, **kwargs) -> None: + super(CreateJobProperties, self).__init__(**kwargs) + self.runtime_version = runtime_version + self.script = script + self.type = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_parameters.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_parameters.py index 07b2f6383c2d..2fcd22a45be0 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_parameters.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_parameters.py @@ -16,31 +16,40 @@ class CreateScopeJobParameters(CreateJobParameters): """The parameters used to submit a new Data Lake Analytics Scope job. (Only for use internally with Scope job type.). - :param type: the job type of the current job (Hive, USql, or Scope (for - internal use only)). Possible values include: 'USql', 'Hive', 'Scope' + All required parameters must be populated in order to send to Azure. + + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType - :param properties: the job specific properties. + :param properties: Required. The job specific properties. :type properties: ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties - :param name: the friendly name of the job to submit. + :param name: Required. The friendly name of the job to submit. :type name: str - :param degree_of_parallelism: the degree of parallelism to use for this - job. This must be greater than 0, if set to less than 0 it will default to - 1. Default value: 1 . + :param degree_of_parallelism: The degree of parallelism to use for this + job. At most one of degreeOfParallelism and degreeOfParallelismPercent + should be specified. If none, a default value of 1 will be used for + degreeOfParallelism. Default value: 1 . :type degree_of_parallelism: int - :param priority: the priority value to use for the current job. Lower + :param degree_of_parallelism_percent: the degree of parallelism in + percentage used for this job. At most one of degreeOfParallelism and + degreeOfParallelismPercent should be specified. If none, a default value + of 1 will be used for degreeOfParallelism. + :type degree_of_parallelism_percent: float + :param priority: The priority value to use for the current job. Lower numbers have a higher priority. By default, a job has a priority of 1000. This must be greater than 0. :type priority: int - :param log_file_patterns: the list of log file name patterns to find in + :param log_file_patterns: The list of log file name patterns to find in the logFolder. '*' is the only matching character allowed. Example format: jobExecution*.log or *mylog*.txt :type log_file_patterns: list[str] - :param related: the recurring job relationship information properties. + :param related: The recurring job relationship information properties. :type related: ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties - :param tags: the key-value pairs used to add additional metadata to the - job information. (Only for use internally with Scope job type.) + :param tags: The key-value pairs used to add additional metadata to the + job information. :type tags: dict[str, str] """ @@ -55,12 +64,13 @@ class CreateScopeJobParameters(CreateJobParameters): 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, 'name': {'key': 'name', 'type': 'str'}, 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, + 'degree_of_parallelism_percent': {'key': 'degreeOfParallelismPercent', 'type': 'float'}, 'priority': {'key': 'priority', 'type': 'int'}, 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, 'tags': {'key': 'tags', 'type': '{str}'}, } - def __init__(self, type, properties, name, degree_of_parallelism=1, priority=None, log_file_patterns=None, related=None, tags=None): - super(CreateScopeJobParameters, self).__init__(type=type, properties=properties, name=name, degree_of_parallelism=degree_of_parallelism, priority=priority, log_file_patterns=log_file_patterns, related=related) - self.tags = tags + def __init__(self, **kwargs): + super(CreateScopeJobParameters, self).__init__(**kwargs) + self.tags = kwargs.get('tags', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_parameters_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_parameters_py3.py new file mode 100644 index 000000000000..65256454b400 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_parameters_py3.py @@ -0,0 +1,76 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .create_job_parameters_py3 import CreateJobParameters + + +class CreateScopeJobParameters(CreateJobParameters): + """The parameters used to submit a new Data Lake Analytics Scope job. (Only + for use internally with Scope job type.). + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' + :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType + :param properties: Required. The job specific properties. + :type properties: + ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties + :param name: Required. The friendly name of the job to submit. + :type name: str + :param degree_of_parallelism: The degree of parallelism to use for this + job. At most one of degreeOfParallelism and degreeOfParallelismPercent + should be specified. If none, a default value of 1 will be used for + degreeOfParallelism. Default value: 1 . + :type degree_of_parallelism: int + :param degree_of_parallelism_percent: the degree of parallelism in + percentage used for this job. At most one of degreeOfParallelism and + degreeOfParallelismPercent should be specified. If none, a default value + of 1 will be used for degreeOfParallelism. + :type degree_of_parallelism_percent: float + :param priority: The priority value to use for the current job. Lower + numbers have a higher priority. By default, a job has a priority of 1000. + This must be greater than 0. + :type priority: int + :param log_file_patterns: The list of log file name patterns to find in + the logFolder. '*' is the only matching character allowed. Example format: + jobExecution*.log or *mylog*.txt + :type log_file_patterns: list[str] + :param related: The recurring job relationship information properties. + :type related: + ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties + :param tags: The key-value pairs used to add additional metadata to the + job information. + :type tags: dict[str, str] + """ + + _validation = { + 'type': {'required': True}, + 'properties': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'JobType'}, + 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, + 'name': {'key': 'name', 'type': 'str'}, + 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, + 'degree_of_parallelism_percent': {'key': 'degreeOfParallelismPercent', 'type': 'float'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, + 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + } + + def __init__(self, *, type, properties, name: str, degree_of_parallelism: int=1, degree_of_parallelism_percent: float=None, priority: int=None, log_file_patterns=None, related=None, tags=None, **kwargs) -> None: + super(CreateScopeJobParameters, self).__init__(type=type, properties=properties, name=name, degree_of_parallelism=degree_of_parallelism, degree_of_parallelism_percent=degree_of_parallelism_percent, priority=priority, log_file_patterns=log_file_patterns, related=related, **kwargs) + self.tags = tags diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_properties.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_properties.py index 09d0267599e1..3c720218ff2d 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_properties.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_properties.py @@ -13,20 +13,23 @@ class CreateScopeJobProperties(CreateJobProperties): - """Scope job properties used when submitting Scope jobs. + """Scope job properties used when submitting Scope jobs. (Only for use + internally with Scope job type.). - :param runtime_version: the runtime version of the Data Lake Analytics + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics engine to use for the specific type of job being run. :type runtime_version: str - :param script: the script to run. Please note that the maximum script size - is 3 MB. + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. :type script: str - :param type: Constant filled by server. + :param type: Required. Constant filled by server. :type type: str - :param resources: the list of resources that are required by the job. + :param resources: The list of resources that are required by the job. :type resources: list[~azure.mgmt.datalake.analytics.job.models.ScopeJobResource] - :param notifier: the list of email addresses, separated by semi-colons, to + :param notifier: The list of email addresses, separated by semi-colons, to notify when the job reaches a terminal state. :type notifier: str """ @@ -44,8 +47,8 @@ class CreateScopeJobProperties(CreateJobProperties): 'notifier': {'key': 'notifier', 'type': 'str'}, } - def __init__(self, script, runtime_version=None, resources=None, notifier=None): - super(CreateScopeJobProperties, self).__init__(runtime_version=runtime_version, script=script) - self.resources = resources - self.notifier = notifier + def __init__(self, **kwargs): + super(CreateScopeJobProperties, self).__init__(**kwargs) + self.resources = kwargs.get('resources', None) + self.notifier = kwargs.get('notifier', None) self.type = 'Scope' diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_properties_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_properties_py3.py new file mode 100644 index 000000000000..e67a033dd8cd --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_properties_py3.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .create_job_properties_py3 import CreateJobProperties + + +class CreateScopeJobProperties(CreateJobProperties): + """Scope job properties used when submitting Scope jobs. (Only for use + internally with Scope job type.). + + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics + engine to use for the specific type of job being run. + :type runtime_version: str + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. + :type script: str + :param type: Required. Constant filled by server. + :type type: str + :param resources: The list of resources that are required by the job. + :type resources: + list[~azure.mgmt.datalake.analytics.job.models.ScopeJobResource] + :param notifier: The list of email addresses, separated by semi-colons, to + notify when the job reaches a terminal state. + :type notifier: str + """ + + _validation = { + 'script': {'required': True}, + 'type': {'required': True}, + } + + _attribute_map = { + 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, + 'script': {'key': 'script', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'resources': {'key': 'resources', 'type': '[ScopeJobResource]'}, + 'notifier': {'key': 'notifier', 'type': 'str'}, + } + + def __init__(self, *, script: str, runtime_version: str=None, resources=None, notifier: str=None, **kwargs) -> None: + super(CreateScopeJobProperties, self).__init__(runtime_version=runtime_version, script=script, **kwargs) + self.resources = resources + self.notifier = notifier + self.type = 'Scope' diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_usql_job_properties.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_usql_job_properties.py index 1640d2a5a6f2..b2a399714c82 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_usql_job_properties.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_usql_job_properties.py @@ -15,15 +15,17 @@ class CreateUSqlJobProperties(CreateJobProperties): """U-SQL job properties used when submitting U-SQL jobs. - :param runtime_version: the runtime version of the Data Lake Analytics + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics engine to use for the specific type of job being run. :type runtime_version: str - :param script: the script to run. Please note that the maximum script size - is 3 MB. + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. :type script: str - :param type: Constant filled by server. + :param type: Required. Constant filled by server. :type type: str - :param compile_mode: the specific compilation mode for the job used during + :param compile_mode: The specific compilation mode for the job used during execution. If this is not specified during submission, the server will determine the optimal compilation mode. Possible values include: 'Semantic', 'Full', 'SingleBox' @@ -43,7 +45,7 @@ class CreateUSqlJobProperties(CreateJobProperties): 'compile_mode': {'key': 'compileMode', 'type': 'CompileMode'}, } - def __init__(self, script, runtime_version=None, compile_mode=None): - super(CreateUSqlJobProperties, self).__init__(runtime_version=runtime_version, script=script) - self.compile_mode = compile_mode + def __init__(self, **kwargs): + super(CreateUSqlJobProperties, self).__init__(**kwargs) + self.compile_mode = kwargs.get('compile_mode', None) self.type = 'USql' diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_usql_job_properties_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_usql_job_properties_py3.py new file mode 100644 index 000000000000..946ada0909e5 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_usql_job_properties_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .create_job_properties_py3 import CreateJobProperties + + +class CreateUSqlJobProperties(CreateJobProperties): + """U-SQL job properties used when submitting U-SQL jobs. + + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics + engine to use for the specific type of job being run. + :type runtime_version: str + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. + :type script: str + :param type: Required. Constant filled by server. + :type type: str + :param compile_mode: The specific compilation mode for the job used during + execution. If this is not specified during submission, the server will + determine the optimal compilation mode. Possible values include: + 'Semantic', 'Full', 'SingleBox' + :type compile_mode: str or + ~azure.mgmt.datalake.analytics.job.models.CompileMode + """ + + _validation = { + 'script': {'required': True}, + 'type': {'required': True}, + } + + _attribute_map = { + 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, + 'script': {'key': 'script', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'compile_mode': {'key': 'compileMode', 'type': 'CompileMode'}, + } + + def __init__(self, *, script: str, runtime_version: str=None, compile_mode=None, **kwargs) -> None: + super(CreateUSqlJobProperties, self).__init__(runtime_version=runtime_version, script=script, **kwargs) + self.compile_mode = compile_mode + self.type = 'USql' diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/data_lake_analytics_job_management_client_enums.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/data_lake_analytics_job_management_client_enums.py index b90127007fc3..e81ed60f32da 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/data_lake_analytics_job_management_client_enums.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/data_lake_analytics_job_management_client_enums.py @@ -12,17 +12,7 @@ from enum import Enum -class JobResourceType(Enum): - - vertex_resource = "VertexResource" - job_manager_resource = "JobManagerResource" - statistics_resource = "StatisticsResource" - vertex_resource_in_user_folder = "VertexResourceInUserFolder" - job_manager_resource_in_user_folder = "JobManagerResourceInUserFolder" - statistics_resource_in_user_folder = "StatisticsResourceInUserFolder" - - -class SeverityTypes(Enum): +class SeverityTypes(str, Enum): warning = "Warning" error = "Error" @@ -32,21 +22,14 @@ class SeverityTypes(Enum): user_warning = "UserWarning" -class CompileMode(Enum): - - semantic = "Semantic" - full = "Full" - single_box = "SingleBox" - - -class JobType(Enum): +class JobType(str, Enum): usql = "USql" hive = "Hive" scope = "Scope" -class JobState(Enum): +class JobState(str, Enum): accepted = "Accepted" compiling = "Compiling" @@ -58,11 +41,30 @@ class JobState(Enum): starting = "Starting" paused = "Paused" waiting_for_capacity = "WaitingForCapacity" + yielded = "Yielded" + finalizing = "Finalizing" -class JobResult(Enum): +class JobResult(str, Enum): none = "None" succeeded = "Succeeded" cancelled = "Cancelled" failed = "Failed" + + +class JobResourceType(str, Enum): + + vertex_resource = "VertexResource" + job_manager_resource = "JobManagerResource" + statistics_resource = "StatisticsResource" + vertex_resource_in_user_folder = "VertexResourceInUserFolder" + job_manager_resource_in_user_folder = "JobManagerResourceInUserFolder" + statistics_resource_in_user_folder = "StatisticsResourceInUserFolder" + + +class CompileMode(str, Enum): + + semantic = "Semantic" + full = "Full" + single_box = "SingleBox" diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/diagnostics.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/diagnostics.py index fdf5b3758f39..bdf4b3834333 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/diagnostics.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/diagnostics.py @@ -18,45 +18,45 @@ class Diagnostics(Model): Variables are only populated by the server, and will be ignored when sending a request. - :ivar column_number: the column where the error occured. - :vartype column_number: int - :ivar end: the ending index of the error. - :vartype end: int - :ivar line_number: the line number the error occured on. - :vartype line_number: int - :ivar message: the error message. + :ivar message: The error message. :vartype message: str - :ivar severity: the severity of the error. Possible values include: + :ivar severity: The severity of the error. Possible values include: 'Warning', 'Error', 'Info', 'SevereWarning', 'Deprecated', 'UserWarning' :vartype severity: str or ~azure.mgmt.datalake.analytics.job.models.SeverityTypes - :ivar start: the starting index of the error. + :ivar line_number: The line number the error occured on. + :vartype line_number: int + :ivar column_number: The column where the error occured. + :vartype column_number: int + :ivar start: The starting index of the error. :vartype start: int + :ivar end: The ending index of the error. + :vartype end: int """ _validation = { - 'column_number': {'readonly': True}, - 'end': {'readonly': True}, - 'line_number': {'readonly': True}, 'message': {'readonly': True}, 'severity': {'readonly': True}, + 'line_number': {'readonly': True}, + 'column_number': {'readonly': True}, 'start': {'readonly': True}, + 'end': {'readonly': True}, } _attribute_map = { - 'column_number': {'key': 'columnNumber', 'type': 'int'}, - 'end': {'key': 'end', 'type': 'int'}, - 'line_number': {'key': 'lineNumber', 'type': 'int'}, 'message': {'key': 'message', 'type': 'str'}, 'severity': {'key': 'severity', 'type': 'SeverityTypes'}, + 'line_number': {'key': 'lineNumber', 'type': 'int'}, + 'column_number': {'key': 'columnNumber', 'type': 'int'}, 'start': {'key': 'start', 'type': 'int'}, + 'end': {'key': 'end', 'type': 'int'}, } - def __init__(self): - super(Diagnostics, self).__init__() - self.column_number = None - self.end = None - self.line_number = None + def __init__(self, **kwargs): + super(Diagnostics, self).__init__(**kwargs) self.message = None self.severity = None + self.line_number = None + self.column_number = None self.start = None + self.end = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/diagnostics_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/diagnostics_py3.py new file mode 100644 index 000000000000..3887cba9542a --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/diagnostics_py3.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Diagnostics(Model): + """Error diagnostic information for failed jobs. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar message: The error message. + :vartype message: str + :ivar severity: The severity of the error. Possible values include: + 'Warning', 'Error', 'Info', 'SevereWarning', 'Deprecated', 'UserWarning' + :vartype severity: str or + ~azure.mgmt.datalake.analytics.job.models.SeverityTypes + :ivar line_number: The line number the error occured on. + :vartype line_number: int + :ivar column_number: The column where the error occured. + :vartype column_number: int + :ivar start: The starting index of the error. + :vartype start: int + :ivar end: The ending index of the error. + :vartype end: int + """ + + _validation = { + 'message': {'readonly': True}, + 'severity': {'readonly': True}, + 'line_number': {'readonly': True}, + 'column_number': {'readonly': True}, + 'start': {'readonly': True}, + 'end': {'readonly': True}, + } + + _attribute_map = { + 'message': {'key': 'message', 'type': 'str'}, + 'severity': {'key': 'severity', 'type': 'SeverityTypes'}, + 'line_number': {'key': 'lineNumber', 'type': 'int'}, + 'column_number': {'key': 'columnNumber', 'type': 'int'}, + 'start': {'key': 'start', 'type': 'int'}, + 'end': {'key': 'end', 'type': 'int'}, + } + + def __init__(self, **kwargs) -> None: + super(Diagnostics, self).__init__(**kwargs) + self.message = None + self.severity = None + self.line_number = None + self.column_number = None + self.start = None + self.end = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/hive_job_properties.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/hive_job_properties.py index 217ba284a0b8..5e8a85f0f4fd 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/hive_job_properties.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/hive_job_properties.py @@ -18,24 +18,26 @@ class HiveJobProperties(JobProperties): Variables are only populated by the server, and will be ignored when sending a request. - :param runtime_version: the runtime version of the Data Lake Analytics + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics engine to use for the specific type of job being run. :type runtime_version: str - :param script: the script to run. Please note that the maximum script size - is 3 MB. + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. :type script: str - :param type: Constant filled by server. + :param type: Required. Constant filled by server. :type type: str - :ivar logs_location: the Hive logs location + :ivar logs_location: The Hive logs location. :vartype logs_location: str - :ivar output_location: the location of Hive job output files (both - execution output and results) + :ivar output_location: The location of Hive job output files (both + execution output and results). :vartype output_location: str - :ivar statement_count: the number of statements that will be run based on - the script + :ivar statement_count: The number of statements that will be run based on + the script. :vartype statement_count: int - :ivar executed_statement_count: the number of statements that have been - run based on the script + :ivar executed_statement_count: The number of statements that have been + run based on the script. :vartype executed_statement_count: int """ @@ -58,8 +60,8 @@ class HiveJobProperties(JobProperties): 'executed_statement_count': {'key': 'executedStatementCount', 'type': 'int'}, } - def __init__(self, script, runtime_version=None): - super(HiveJobProperties, self).__init__(runtime_version=runtime_version, script=script) + def __init__(self, **kwargs): + super(HiveJobProperties, self).__init__(**kwargs) self.logs_location = None self.output_location = None self.statement_count = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/hive_job_properties_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/hive_job_properties_py3.py new file mode 100644 index 000000000000..8660637ca460 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/hive_job_properties_py3.py @@ -0,0 +1,69 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .job_properties_py3 import JobProperties + + +class HiveJobProperties(JobProperties): + """Hive job properties used when retrieving Hive jobs. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics + engine to use for the specific type of job being run. + :type runtime_version: str + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. + :type script: str + :param type: Required. Constant filled by server. + :type type: str + :ivar logs_location: The Hive logs location. + :vartype logs_location: str + :ivar output_location: The location of Hive job output files (both + execution output and results). + :vartype output_location: str + :ivar statement_count: The number of statements that will be run based on + the script. + :vartype statement_count: int + :ivar executed_statement_count: The number of statements that have been + run based on the script. + :vartype executed_statement_count: int + """ + + _validation = { + 'script': {'required': True}, + 'type': {'required': True}, + 'logs_location': {'readonly': True}, + 'output_location': {'readonly': True}, + 'statement_count': {'readonly': True}, + 'executed_statement_count': {'readonly': True}, + } + + _attribute_map = { + 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, + 'script': {'key': 'script', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'logs_location': {'key': 'logsLocation', 'type': 'str'}, + 'output_location': {'key': 'outputLocation', 'type': 'str'}, + 'statement_count': {'key': 'statementCount', 'type': 'int'}, + 'executed_statement_count': {'key': 'executedStatementCount', 'type': 'int'}, + } + + def __init__(self, *, script: str, runtime_version: str=None, **kwargs) -> None: + super(HiveJobProperties, self).__init__(runtime_version=runtime_version, script=script, **kwargs) + self.logs_location = None + self.output_location = None + self.statement_count = None + self.executed_statement_count = None + self.type = 'Hive' diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_data_path.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_data_path.py index edda346e5905..1ec4c66441d3 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_data_path.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_data_path.py @@ -18,11 +18,11 @@ class JobDataPath(Model): Variables are only populated by the server, and will be ignored when sending a request. - :ivar job_id: the id of the job this data is for. + :ivar job_id: The ID of the job this data is for. :vartype job_id: str - :ivar command: the command that this job data relates to. + :ivar command: The command that this job data relates to. :vartype command: str - :ivar paths: the list of paths to all of the job data. + :ivar paths: The list of paths to all of the job data. :vartype paths: list[str] """ @@ -38,8 +38,8 @@ class JobDataPath(Model): 'paths': {'key': 'paths', 'type': '[str]'}, } - def __init__(self): - super(JobDataPath, self).__init__() + def __init__(self, **kwargs): + super(JobDataPath, self).__init__(**kwargs) self.job_id = None self.command = None self.paths = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_data_path_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_data_path_py3.py new file mode 100644 index 000000000000..08f8667caf49 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_data_path_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDataPath(Model): + """A Data Lake Analytics job data path item. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar job_id: The ID of the job this data is for. + :vartype job_id: str + :ivar command: The command that this job data relates to. + :vartype command: str + :ivar paths: The list of paths to all of the job data. + :vartype paths: list[str] + """ + + _validation = { + 'job_id': {'readonly': True}, + 'command': {'readonly': True}, + 'paths': {'readonly': True}, + } + + _attribute_map = { + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'command': {'key': 'command', 'type': 'str'}, + 'paths': {'key': 'paths', 'type': '[str]'}, + } + + def __init__(self, **kwargs) -> None: + super(JobDataPath, self).__init__(**kwargs) + self.job_id = None + self.command = None + self.paths = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_error_details.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_error_details.py index 9085a596e9ec..c72f9491bc9d 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_error_details.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_error_details.py @@ -18,94 +18,94 @@ class JobErrorDetails(Model): Variables are only populated by the server, and will be ignored when sending a request. - :ivar description: the error message description + :ivar error_id: The specific identifier for the type of error encountered + in the job. + :vartype error_id: str + :ivar severity: The severity level of the failure. Possible values + include: 'Warning', 'Error', 'Info', 'SevereWarning', 'Deprecated', + 'UserWarning' + :vartype severity: str or + ~azure.mgmt.datalake.analytics.job.models.SeverityTypes + :ivar source: The ultimate source of the failure (usually either SYSTEM or + USER). + :vartype source: str + :ivar message: The user friendly error message for the failure. + :vartype message: str + :ivar description: The error message description. :vartype description: str - :ivar details: the details of the error message. + :ivar details: The details of the error message. :vartype details: str - :ivar end_offset: the end offset in the job where the error was found. + :ivar line_number: The specific line number in the job where the error + occured. + :vartype line_number: int + :ivar start_offset: The start offset in the job where the error was found + :vartype start_offset: int + :ivar end_offset: The end offset in the job where the error was found. :vartype end_offset: int - :ivar error_id: the specific identifier for the type of error encountered - in the job. - :vartype error_id: str - :ivar file_path: the path to any supplemental error files, if any. + :ivar resolution: The recommended resolution for the failure, if any. + :vartype resolution: str + :ivar file_path: The path to any supplemental error files, if any. :vartype file_path: str - :ivar help_link: the link to MSDN or Azure help for this type of error, if + :ivar help_link: The link to MSDN or Azure help for this type of error, if any. :vartype help_link: str - :ivar internal_diagnostics: the internal diagnostic stack trace if the + :ivar internal_diagnostics: The internal diagnostic stack trace if the user requesting the job error details has sufficient permissions it will be retrieved, otherwise it will be empty. :vartype internal_diagnostics: str - :ivar line_number: the specific line number in the job where the error - occured. - :vartype line_number: int - :ivar message: the user friendly error message for the failure. - :vartype message: str - :ivar resolution: the recommended resolution for the failure, if any. - :vartype resolution: str - :ivar inner_error: the inner error of this specific job error message, if + :ivar inner_error: The inner error of this specific job error message, if any. :vartype inner_error: ~azure.mgmt.datalake.analytics.job.models.JobInnerError - :ivar severity: the severity level of the failure. Possible values - include: 'Warning', 'Error', 'Info', 'SevereWarning', 'Deprecated', - 'UserWarning' - :vartype severity: str or - ~azure.mgmt.datalake.analytics.job.models.SeverityTypes - :ivar source: the ultimate source of the failure (usually either SYSTEM or - USER). - :vartype source: str - :ivar start_offset: the start offset in the job where the error was found - :vartype start_offset: int """ _validation = { + 'error_id': {'readonly': True}, + 'severity': {'readonly': True}, + 'source': {'readonly': True}, + 'message': {'readonly': True}, 'description': {'readonly': True}, 'details': {'readonly': True}, + 'line_number': {'readonly': True}, + 'start_offset': {'readonly': True}, 'end_offset': {'readonly': True}, - 'error_id': {'readonly': True}, + 'resolution': {'readonly': True}, 'file_path': {'readonly': True}, 'help_link': {'readonly': True}, 'internal_diagnostics': {'readonly': True}, - 'line_number': {'readonly': True}, - 'message': {'readonly': True}, - 'resolution': {'readonly': True}, 'inner_error': {'readonly': True}, - 'severity': {'readonly': True}, - 'source': {'readonly': True}, - 'start_offset': {'readonly': True}, } _attribute_map = { + 'error_id': {'key': 'errorId', 'type': 'str'}, + 'severity': {'key': 'severity', 'type': 'SeverityTypes'}, + 'source': {'key': 'source', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'details': {'key': 'details', 'type': 'str'}, + 'line_number': {'key': 'lineNumber', 'type': 'int'}, + 'start_offset': {'key': 'startOffset', 'type': 'int'}, 'end_offset': {'key': 'endOffset', 'type': 'int'}, - 'error_id': {'key': 'errorId', 'type': 'str'}, + 'resolution': {'key': 'resolution', 'type': 'str'}, 'file_path': {'key': 'filePath', 'type': 'str'}, 'help_link': {'key': 'helpLink', 'type': 'str'}, 'internal_diagnostics': {'key': 'internalDiagnostics', 'type': 'str'}, - 'line_number': {'key': 'lineNumber', 'type': 'int'}, - 'message': {'key': 'message', 'type': 'str'}, - 'resolution': {'key': 'resolution', 'type': 'str'}, 'inner_error': {'key': 'innerError', 'type': 'JobInnerError'}, - 'severity': {'key': 'severity', 'type': 'SeverityTypes'}, - 'source': {'key': 'source', 'type': 'str'}, - 'start_offset': {'key': 'startOffset', 'type': 'int'}, } - def __init__(self): - super(JobErrorDetails, self).__init__() + def __init__(self, **kwargs): + super(JobErrorDetails, self).__init__(**kwargs) + self.error_id = None + self.severity = None + self.source = None + self.message = None self.description = None self.details = None + self.line_number = None + self.start_offset = None self.end_offset = None - self.error_id = None + self.resolution = None self.file_path = None self.help_link = None self.internal_diagnostics = None - self.line_number = None - self.message = None - self.resolution = None self.inner_error = None - self.severity = None - self.source = None - self.start_offset = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_error_details_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_error_details_py3.py new file mode 100644 index 000000000000..6dbffa254dd3 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_error_details_py3.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobErrorDetails(Model): + """The Data Lake Analytics job error details. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar error_id: The specific identifier for the type of error encountered + in the job. + :vartype error_id: str + :ivar severity: The severity level of the failure. Possible values + include: 'Warning', 'Error', 'Info', 'SevereWarning', 'Deprecated', + 'UserWarning' + :vartype severity: str or + ~azure.mgmt.datalake.analytics.job.models.SeverityTypes + :ivar source: The ultimate source of the failure (usually either SYSTEM or + USER). + :vartype source: str + :ivar message: The user friendly error message for the failure. + :vartype message: str + :ivar description: The error message description. + :vartype description: str + :ivar details: The details of the error message. + :vartype details: str + :ivar line_number: The specific line number in the job where the error + occured. + :vartype line_number: int + :ivar start_offset: The start offset in the job where the error was found + :vartype start_offset: int + :ivar end_offset: The end offset in the job where the error was found. + :vartype end_offset: int + :ivar resolution: The recommended resolution for the failure, if any. + :vartype resolution: str + :ivar file_path: The path to any supplemental error files, if any. + :vartype file_path: str + :ivar help_link: The link to MSDN or Azure help for this type of error, if + any. + :vartype help_link: str + :ivar internal_diagnostics: The internal diagnostic stack trace if the + user requesting the job error details has sufficient permissions it will + be retrieved, otherwise it will be empty. + :vartype internal_diagnostics: str + :ivar inner_error: The inner error of this specific job error message, if + any. + :vartype inner_error: + ~azure.mgmt.datalake.analytics.job.models.JobInnerError + """ + + _validation = { + 'error_id': {'readonly': True}, + 'severity': {'readonly': True}, + 'source': {'readonly': True}, + 'message': {'readonly': True}, + 'description': {'readonly': True}, + 'details': {'readonly': True}, + 'line_number': {'readonly': True}, + 'start_offset': {'readonly': True}, + 'end_offset': {'readonly': True}, + 'resolution': {'readonly': True}, + 'file_path': {'readonly': True}, + 'help_link': {'readonly': True}, + 'internal_diagnostics': {'readonly': True}, + 'inner_error': {'readonly': True}, + } + + _attribute_map = { + 'error_id': {'key': 'errorId', 'type': 'str'}, + 'severity': {'key': 'severity', 'type': 'SeverityTypes'}, + 'source': {'key': 'source', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'details': {'key': 'details', 'type': 'str'}, + 'line_number': {'key': 'lineNumber', 'type': 'int'}, + 'start_offset': {'key': 'startOffset', 'type': 'int'}, + 'end_offset': {'key': 'endOffset', 'type': 'int'}, + 'resolution': {'key': 'resolution', 'type': 'str'}, + 'file_path': {'key': 'filePath', 'type': 'str'}, + 'help_link': {'key': 'helpLink', 'type': 'str'}, + 'internal_diagnostics': {'key': 'internalDiagnostics', 'type': 'str'}, + 'inner_error': {'key': 'innerError', 'type': 'JobInnerError'}, + } + + def __init__(self, **kwargs) -> None: + super(JobErrorDetails, self).__init__(**kwargs) + self.error_id = None + self.severity = None + self.source = None + self.message = None + self.description = None + self.details = None + self.line_number = None + self.start_offset = None + self.end_offset = None + self.resolution = None + self.file_path = None + self.help_link = None + self.internal_diagnostics = None + self.inner_error = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information.py index 8235029bffcd..ef22598b1451 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information.py @@ -19,61 +19,70 @@ class JobInformation(JobInformationBasic): Variables are only populated by the server, and will be ignored when sending a request. - :ivar job_id: the job's unique identifier (a GUID). + All required parameters must be populated in order to send to Azure. + + :ivar job_id: The job's unique identifier (a GUID). :vartype job_id: str - :param name: the friendly name of the job. + :param name: Required. The friendly name of the job. :type name: str - :param type: the job type of the current job (Hive, USql, or Scope (for - internal use only)). Possible values include: 'USql', 'Hive', 'Scope' + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType - :ivar submitter: the user or account that submitted the job. + :ivar submitter: The user or account that submitted the job. :vartype submitter: str - :param degree_of_parallelism: the degree of parallelism used for this job. - This must be greater than 0, if set to less than 0 it will default to 1. + :param degree_of_parallelism: The degree of parallelism used for this job. Default value: 1 . :type degree_of_parallelism: int - :param priority: the priority value for the current job. Lower numbers + :ivar degree_of_parallelism_percent: the degree of parallelism in + percentage used for this job. + :vartype degree_of_parallelism_percent: float + :param priority: The priority value for the current job. Lower numbers have a higher priority. By default, a job has a priority of 1000. This must be greater than 0. :type priority: int - :ivar submit_time: the time the job was submitted to the service. + :ivar submit_time: The time the job was submitted to the service. :vartype submit_time: datetime - :ivar start_time: the start time of the job. + :ivar start_time: The start time of the job. :vartype start_time: datetime - :ivar end_time: the completion time of the job. + :ivar end_time: The completion time of the job. :vartype end_time: datetime - :ivar state: the job state. When the job is in the Ended state, refer to + :ivar state: The job state. When the job is in the Ended state, refer to Result and ErrorMessage for details. Possible values include: 'Accepted', 'Compiling', 'Ended', 'New', 'Queued', 'Running', 'Scheduling', - 'Starting', 'Paused', 'WaitingForCapacity' + 'Starting', 'Paused', 'WaitingForCapacity', 'Yielded', 'Finalizing' :vartype state: str or ~azure.mgmt.datalake.analytics.job.models.JobState - :ivar result: the result of job execution or the current result of the + :ivar result: The result of job execution or the current result of the running job. Possible values include: 'None', 'Succeeded', 'Cancelled', 'Failed' :vartype result: str or ~azure.mgmt.datalake.analytics.job.models.JobResult - :ivar log_folder: the log folder path to use in the following format: + :ivar log_folder: The log folder path to use in the following format: adl://.azuredatalakestore.net/system/jobservice/jobs/Usql/2016/03/13/17/18/5fe51957-93bc-4de0-8ddc-c5a4753b068b/logs/. :vartype log_folder: str - :param log_file_patterns: the list of log file name patterns to find in + :param log_file_patterns: The list of log file name patterns to find in the logFolder. '*' is the only matching character allowed. Example format: jobExecution*.log or *mylog*.txt :type log_file_patterns: list[str] - :param related: the recurring job relationship information properties. + :param related: The recurring job relationship information properties. :type related: ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties - :param tags: the key-value pairs used to add additional metadata to the + :param tags: The key-value pairs used to add additional metadata to the job information. (Only for use internally with Scope job type.) :type tags: dict[str, str] - :ivar error_message: the error message details for the job, if the job + :ivar hierarchy_queue_node: the name of hierarchy queue node this job is + assigned to, Null if job has not been assigned yet or the account doesn't + have hierarchy queue. + :vartype hierarchy_queue_node: str + :ivar error_message: The error message details for the job, if the job failed. :vartype error_message: list[~azure.mgmt.datalake.analytics.job.models.JobErrorDetails] - :ivar state_audit_records: the job state audit records, indicating when + :ivar state_audit_records: The job state audit records, indicating when various operations have been performed on this job. :vartype state_audit_records: list[~azure.mgmt.datalake.analytics.job.models.JobStateAuditRecord] - :param properties: the job specific properties. + :param properties: Required. The job specific properties. :type properties: ~azure.mgmt.datalake.analytics.job.models.JobProperties """ @@ -82,12 +91,14 @@ class JobInformation(JobInformationBasic): 'name': {'required': True}, 'type': {'required': True}, 'submitter': {'readonly': True}, + 'degree_of_parallelism_percent': {'readonly': True}, 'submit_time': {'readonly': True}, 'start_time': {'readonly': True}, 'end_time': {'readonly': True}, 'state': {'readonly': True}, 'result': {'readonly': True}, 'log_folder': {'readonly': True}, + 'hierarchy_queue_node': {'readonly': True}, 'error_message': {'readonly': True}, 'state_audit_records': {'readonly': True}, 'properties': {'required': True}, @@ -99,6 +110,7 @@ class JobInformation(JobInformationBasic): 'type': {'key': 'type', 'type': 'JobType'}, 'submitter': {'key': 'submitter', 'type': 'str'}, 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, + 'degree_of_parallelism_percent': {'key': 'degreeOfParallelismPercent', 'type': 'float'}, 'priority': {'key': 'priority', 'type': 'int'}, 'submit_time': {'key': 'submitTime', 'type': 'iso-8601'}, 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, @@ -109,13 +121,14 @@ class JobInformation(JobInformationBasic): 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, 'tags': {'key': 'tags', 'type': '{str}'}, + 'hierarchy_queue_node': {'key': 'hierarchyQueueNode', 'type': 'str'}, 'error_message': {'key': 'errorMessage', 'type': '[JobErrorDetails]'}, 'state_audit_records': {'key': 'stateAuditRecords', 'type': '[JobStateAuditRecord]'}, 'properties': {'key': 'properties', 'type': 'JobProperties'}, } - def __init__(self, name, type, properties, degree_of_parallelism=1, priority=None, log_file_patterns=None, related=None, tags=None): - super(JobInformation, self).__init__(name=name, type=type, degree_of_parallelism=degree_of_parallelism, priority=priority, log_file_patterns=log_file_patterns, related=related, tags=tags) + def __init__(self, **kwargs): + super(JobInformation, self).__init__(**kwargs) self.error_message = None self.state_audit_records = None - self.properties = properties + self.properties = kwargs.get('properties', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information_basic.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information_basic.py index 9cb67c342c4c..6dc6be5769d1 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information_basic.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information_basic.py @@ -18,52 +18,61 @@ class JobInformationBasic(Model): Variables are only populated by the server, and will be ignored when sending a request. - :ivar job_id: the job's unique identifier (a GUID). + All required parameters must be populated in order to send to Azure. + + :ivar job_id: The job's unique identifier (a GUID). :vartype job_id: str - :param name: the friendly name of the job. + :param name: Required. The friendly name of the job. :type name: str - :param type: the job type of the current job (Hive, USql, or Scope (for - internal use only)). Possible values include: 'USql', 'Hive', 'Scope' + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType - :ivar submitter: the user or account that submitted the job. + :ivar submitter: The user or account that submitted the job. :vartype submitter: str - :param degree_of_parallelism: the degree of parallelism used for this job. - This must be greater than 0, if set to less than 0 it will default to 1. + :param degree_of_parallelism: The degree of parallelism used for this job. Default value: 1 . :type degree_of_parallelism: int - :param priority: the priority value for the current job. Lower numbers + :ivar degree_of_parallelism_percent: the degree of parallelism in + percentage used for this job. + :vartype degree_of_parallelism_percent: float + :param priority: The priority value for the current job. Lower numbers have a higher priority. By default, a job has a priority of 1000. This must be greater than 0. :type priority: int - :ivar submit_time: the time the job was submitted to the service. + :ivar submit_time: The time the job was submitted to the service. :vartype submit_time: datetime - :ivar start_time: the start time of the job. + :ivar start_time: The start time of the job. :vartype start_time: datetime - :ivar end_time: the completion time of the job. + :ivar end_time: The completion time of the job. :vartype end_time: datetime - :ivar state: the job state. When the job is in the Ended state, refer to + :ivar state: The job state. When the job is in the Ended state, refer to Result and ErrorMessage for details. Possible values include: 'Accepted', 'Compiling', 'Ended', 'New', 'Queued', 'Running', 'Scheduling', - 'Starting', 'Paused', 'WaitingForCapacity' + 'Starting', 'Paused', 'WaitingForCapacity', 'Yielded', 'Finalizing' :vartype state: str or ~azure.mgmt.datalake.analytics.job.models.JobState - :ivar result: the result of job execution or the current result of the + :ivar result: The result of job execution or the current result of the running job. Possible values include: 'None', 'Succeeded', 'Cancelled', 'Failed' :vartype result: str or ~azure.mgmt.datalake.analytics.job.models.JobResult - :ivar log_folder: the log folder path to use in the following format: + :ivar log_folder: The log folder path to use in the following format: adl://.azuredatalakestore.net/system/jobservice/jobs/Usql/2016/03/13/17/18/5fe51957-93bc-4de0-8ddc-c5a4753b068b/logs/. :vartype log_folder: str - :param log_file_patterns: the list of log file name patterns to find in + :param log_file_patterns: The list of log file name patterns to find in the logFolder. '*' is the only matching character allowed. Example format: jobExecution*.log or *mylog*.txt :type log_file_patterns: list[str] - :param related: the recurring job relationship information properties. + :param related: The recurring job relationship information properties. :type related: ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties - :param tags: the key-value pairs used to add additional metadata to the + :param tags: The key-value pairs used to add additional metadata to the job information. (Only for use internally with Scope job type.) :type tags: dict[str, str] + :ivar hierarchy_queue_node: the name of hierarchy queue node this job is + assigned to, Null if job has not been assigned yet or the account doesn't + have hierarchy queue. + :vartype hierarchy_queue_node: str """ _validation = { @@ -71,12 +80,14 @@ class JobInformationBasic(Model): 'name': {'required': True}, 'type': {'required': True}, 'submitter': {'readonly': True}, + 'degree_of_parallelism_percent': {'readonly': True}, 'submit_time': {'readonly': True}, 'start_time': {'readonly': True}, 'end_time': {'readonly': True}, 'state': {'readonly': True}, 'result': {'readonly': True}, 'log_folder': {'readonly': True}, + 'hierarchy_queue_node': {'readonly': True}, } _attribute_map = { @@ -85,6 +96,7 @@ class JobInformationBasic(Model): 'type': {'key': 'type', 'type': 'JobType'}, 'submitter': {'key': 'submitter', 'type': 'str'}, 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, + 'degree_of_parallelism_percent': {'key': 'degreeOfParallelismPercent', 'type': 'float'}, 'priority': {'key': 'priority', 'type': 'int'}, 'submit_time': {'key': 'submitTime', 'type': 'iso-8601'}, 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, @@ -95,22 +107,25 @@ class JobInformationBasic(Model): 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, 'tags': {'key': 'tags', 'type': '{str}'}, + 'hierarchy_queue_node': {'key': 'hierarchyQueueNode', 'type': 'str'}, } - def __init__(self, name, type, degree_of_parallelism=1, priority=None, log_file_patterns=None, related=None, tags=None): - super(JobInformationBasic, self).__init__() + def __init__(self, **kwargs): + super(JobInformationBasic, self).__init__(**kwargs) self.job_id = None - self.name = name - self.type = type + self.name = kwargs.get('name', None) + self.type = kwargs.get('type', None) self.submitter = None - self.degree_of_parallelism = degree_of_parallelism - self.priority = priority + self.degree_of_parallelism = kwargs.get('degree_of_parallelism', 1) + self.degree_of_parallelism_percent = None + self.priority = kwargs.get('priority', None) self.submit_time = None self.start_time = None self.end_time = None self.state = None self.result = None self.log_folder = None - self.log_file_patterns = log_file_patterns - self.related = related - self.tags = tags + self.log_file_patterns = kwargs.get('log_file_patterns', None) + self.related = kwargs.get('related', None) + self.tags = kwargs.get('tags', None) + self.hierarchy_queue_node = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information_basic_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information_basic_py3.py new file mode 100644 index 000000000000..18ce3252ad3e --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information_basic_py3.py @@ -0,0 +1,131 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobInformationBasic(Model): + """The common Data Lake Analytics job information properties. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar job_id: The job's unique identifier (a GUID). + :vartype job_id: str + :param name: Required. The friendly name of the job. + :type name: str + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' + :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType + :ivar submitter: The user or account that submitted the job. + :vartype submitter: str + :param degree_of_parallelism: The degree of parallelism used for this job. + Default value: 1 . + :type degree_of_parallelism: int + :ivar degree_of_parallelism_percent: the degree of parallelism in + percentage used for this job. + :vartype degree_of_parallelism_percent: float + :param priority: The priority value for the current job. Lower numbers + have a higher priority. By default, a job has a priority of 1000. This + must be greater than 0. + :type priority: int + :ivar submit_time: The time the job was submitted to the service. + :vartype submit_time: datetime + :ivar start_time: The start time of the job. + :vartype start_time: datetime + :ivar end_time: The completion time of the job. + :vartype end_time: datetime + :ivar state: The job state. When the job is in the Ended state, refer to + Result and ErrorMessage for details. Possible values include: 'Accepted', + 'Compiling', 'Ended', 'New', 'Queued', 'Running', 'Scheduling', + 'Starting', 'Paused', 'WaitingForCapacity', 'Yielded', 'Finalizing' + :vartype state: str or ~azure.mgmt.datalake.analytics.job.models.JobState + :ivar result: The result of job execution or the current result of the + running job. Possible values include: 'None', 'Succeeded', 'Cancelled', + 'Failed' + :vartype result: str or + ~azure.mgmt.datalake.analytics.job.models.JobResult + :ivar log_folder: The log folder path to use in the following format: + adl://.azuredatalakestore.net/system/jobservice/jobs/Usql/2016/03/13/17/18/5fe51957-93bc-4de0-8ddc-c5a4753b068b/logs/. + :vartype log_folder: str + :param log_file_patterns: The list of log file name patterns to find in + the logFolder. '*' is the only matching character allowed. Example format: + jobExecution*.log or *mylog*.txt + :type log_file_patterns: list[str] + :param related: The recurring job relationship information properties. + :type related: + ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties + :param tags: The key-value pairs used to add additional metadata to the + job information. (Only for use internally with Scope job type.) + :type tags: dict[str, str] + :ivar hierarchy_queue_node: the name of hierarchy queue node this job is + assigned to, Null if job has not been assigned yet or the account doesn't + have hierarchy queue. + :vartype hierarchy_queue_node: str + """ + + _validation = { + 'job_id': {'readonly': True}, + 'name': {'required': True}, + 'type': {'required': True}, + 'submitter': {'readonly': True}, + 'degree_of_parallelism_percent': {'readonly': True}, + 'submit_time': {'readonly': True}, + 'start_time': {'readonly': True}, + 'end_time': {'readonly': True}, + 'state': {'readonly': True}, + 'result': {'readonly': True}, + 'log_folder': {'readonly': True}, + 'hierarchy_queue_node': {'readonly': True}, + } + + _attribute_map = { + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'JobType'}, + 'submitter': {'key': 'submitter', 'type': 'str'}, + 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, + 'degree_of_parallelism_percent': {'key': 'degreeOfParallelismPercent', 'type': 'float'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'submit_time': {'key': 'submitTime', 'type': 'iso-8601'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobState'}, + 'result': {'key': 'result', 'type': 'JobResult'}, + 'log_folder': {'key': 'logFolder', 'type': 'str'}, + 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, + 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'hierarchy_queue_node': {'key': 'hierarchyQueueNode', 'type': 'str'}, + } + + def __init__(self, *, name: str, type, degree_of_parallelism: int=1, priority: int=None, log_file_patterns=None, related=None, tags=None, **kwargs) -> None: + super(JobInformationBasic, self).__init__(**kwargs) + self.job_id = None + self.name = name + self.type = type + self.submitter = None + self.degree_of_parallelism = degree_of_parallelism + self.degree_of_parallelism_percent = None + self.priority = priority + self.submit_time = None + self.start_time = None + self.end_time = None + self.state = None + self.result = None + self.log_folder = None + self.log_file_patterns = log_file_patterns + self.related = related + self.tags = tags + self.hierarchy_queue_node = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information_py3.py new file mode 100644 index 000000000000..fc67bdcf4d0b --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information_py3.py @@ -0,0 +1,134 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .job_information_basic_py3 import JobInformationBasic + + +class JobInformation(JobInformationBasic): + """The extended Data Lake Analytics job information properties returned when + retrieving a specific job. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar job_id: The job's unique identifier (a GUID). + :vartype job_id: str + :param name: Required. The friendly name of the job. + :type name: str + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' + :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType + :ivar submitter: The user or account that submitted the job. + :vartype submitter: str + :param degree_of_parallelism: The degree of parallelism used for this job. + Default value: 1 . + :type degree_of_parallelism: int + :ivar degree_of_parallelism_percent: the degree of parallelism in + percentage used for this job. + :vartype degree_of_parallelism_percent: float + :param priority: The priority value for the current job. Lower numbers + have a higher priority. By default, a job has a priority of 1000. This + must be greater than 0. + :type priority: int + :ivar submit_time: The time the job was submitted to the service. + :vartype submit_time: datetime + :ivar start_time: The start time of the job. + :vartype start_time: datetime + :ivar end_time: The completion time of the job. + :vartype end_time: datetime + :ivar state: The job state. When the job is in the Ended state, refer to + Result and ErrorMessage for details. Possible values include: 'Accepted', + 'Compiling', 'Ended', 'New', 'Queued', 'Running', 'Scheduling', + 'Starting', 'Paused', 'WaitingForCapacity', 'Yielded', 'Finalizing' + :vartype state: str or ~azure.mgmt.datalake.analytics.job.models.JobState + :ivar result: The result of job execution or the current result of the + running job. Possible values include: 'None', 'Succeeded', 'Cancelled', + 'Failed' + :vartype result: str or + ~azure.mgmt.datalake.analytics.job.models.JobResult + :ivar log_folder: The log folder path to use in the following format: + adl://.azuredatalakestore.net/system/jobservice/jobs/Usql/2016/03/13/17/18/5fe51957-93bc-4de0-8ddc-c5a4753b068b/logs/. + :vartype log_folder: str + :param log_file_patterns: The list of log file name patterns to find in + the logFolder. '*' is the only matching character allowed. Example format: + jobExecution*.log or *mylog*.txt + :type log_file_patterns: list[str] + :param related: The recurring job relationship information properties. + :type related: + ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties + :param tags: The key-value pairs used to add additional metadata to the + job information. (Only for use internally with Scope job type.) + :type tags: dict[str, str] + :ivar hierarchy_queue_node: the name of hierarchy queue node this job is + assigned to, Null if job has not been assigned yet or the account doesn't + have hierarchy queue. + :vartype hierarchy_queue_node: str + :ivar error_message: The error message details for the job, if the job + failed. + :vartype error_message: + list[~azure.mgmt.datalake.analytics.job.models.JobErrorDetails] + :ivar state_audit_records: The job state audit records, indicating when + various operations have been performed on this job. + :vartype state_audit_records: + list[~azure.mgmt.datalake.analytics.job.models.JobStateAuditRecord] + :param properties: Required. The job specific properties. + :type properties: ~azure.mgmt.datalake.analytics.job.models.JobProperties + """ + + _validation = { + 'job_id': {'readonly': True}, + 'name': {'required': True}, + 'type': {'required': True}, + 'submitter': {'readonly': True}, + 'degree_of_parallelism_percent': {'readonly': True}, + 'submit_time': {'readonly': True}, + 'start_time': {'readonly': True}, + 'end_time': {'readonly': True}, + 'state': {'readonly': True}, + 'result': {'readonly': True}, + 'log_folder': {'readonly': True}, + 'hierarchy_queue_node': {'readonly': True}, + 'error_message': {'readonly': True}, + 'state_audit_records': {'readonly': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'JobType'}, + 'submitter': {'key': 'submitter', 'type': 'str'}, + 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, + 'degree_of_parallelism_percent': {'key': 'degreeOfParallelismPercent', 'type': 'float'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'submit_time': {'key': 'submitTime', 'type': 'iso-8601'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobState'}, + 'result': {'key': 'result', 'type': 'JobResult'}, + 'log_folder': {'key': 'logFolder', 'type': 'str'}, + 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, + 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'hierarchy_queue_node': {'key': 'hierarchyQueueNode', 'type': 'str'}, + 'error_message': {'key': 'errorMessage', 'type': '[JobErrorDetails]'}, + 'state_audit_records': {'key': 'stateAuditRecords', 'type': '[JobStateAuditRecord]'}, + 'properties': {'key': 'properties', 'type': 'JobProperties'}, + } + + def __init__(self, *, name: str, type, properties, degree_of_parallelism: int=1, priority: int=None, log_file_patterns=None, related=None, tags=None, **kwargs) -> None: + super(JobInformation, self).__init__(name=name, type=type, degree_of_parallelism=degree_of_parallelism, priority=priority, log_file_patterns=log_file_patterns, related=related, tags=tags, **kwargs) + self.error_message = None + self.state_audit_records = None + self.properties = properties diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_inner_error.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_inner_error.py index fac1479a1491..ebf20a919050 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_inner_error.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_inner_error.py @@ -18,83 +18,83 @@ class JobInnerError(Model): Variables are only populated by the server, and will be ignored when sending a request. - :ivar diagnostic_code: the diagnostic error code. - :vartype diagnostic_code: int - :ivar severity: the severity level of the failure. Possible values + :ivar error_id: The specific identifier for the type of error encountered + in the job. + :vartype error_id: str + :ivar severity: The severity level of the failure. Possible values include: 'Warning', 'Error', 'Info', 'SevereWarning', 'Deprecated', 'UserWarning' :vartype severity: str or ~azure.mgmt.datalake.analytics.job.models.SeverityTypes - :ivar details: the details of the error message. + :ivar source: The ultimate source of the failure (usually either SYSTEM or + USER). + :vartype source: str + :ivar message: The user friendly error message for the failure. + :vartype message: str + :ivar description: The error message description. + :vartype description: str + :ivar details: The details of the error message. :vartype details: str - :ivar component: the component that failed. + :ivar diagnostic_code: The diagnostic error code. + :vartype diagnostic_code: int + :ivar component: The component that failed. :vartype component: str - :ivar error_id: the specific identifier for the type of error encountered - in the job. - :vartype error_id: str - :ivar help_link: the link to MSDN or Azure help for this type of error, if + :ivar resolution: The recommended resolution for the failure, if any. + :vartype resolution: str + :ivar help_link: The link to MSDN or Azure help for this type of error, if any. :vartype help_link: str - :ivar internal_diagnostics: the internal diagnostic stack trace if the + :ivar internal_diagnostics: The internal diagnostic stack trace if the user requesting the job error details has sufficient permissions it will be retrieved, otherwise it will be empty. :vartype internal_diagnostics: str - :ivar message: the user friendly error message for the failure. - :vartype message: str - :ivar resolution: the recommended resolution for the failure, if any. - :vartype resolution: str - :ivar source: the ultimate source of the failure (usually either SYSTEM or - USER). - :vartype source: str - :ivar description: the error message description - :vartype description: str - :ivar inner_error: the inner error of this specific job error message, if + :ivar inner_error: The inner error of this specific job error message, if any. :vartype inner_error: ~azure.mgmt.datalake.analytics.job.models.JobInnerError """ _validation = { - 'diagnostic_code': {'readonly': True}, + 'error_id': {'readonly': True}, 'severity': {'readonly': True}, + 'source': {'readonly': True}, + 'message': {'readonly': True}, + 'description': {'readonly': True}, 'details': {'readonly': True}, + 'diagnostic_code': {'readonly': True}, 'component': {'readonly': True}, - 'error_id': {'readonly': True}, + 'resolution': {'readonly': True}, 'help_link': {'readonly': True}, 'internal_diagnostics': {'readonly': True}, - 'message': {'readonly': True}, - 'resolution': {'readonly': True}, - 'source': {'readonly': True}, - 'description': {'readonly': True}, 'inner_error': {'readonly': True}, } _attribute_map = { - 'diagnostic_code': {'key': 'diagnosticCode', 'type': 'int'}, + 'error_id': {'key': 'errorId', 'type': 'str'}, 'severity': {'key': 'severity', 'type': 'SeverityTypes'}, + 'source': {'key': 'source', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, 'details': {'key': 'details', 'type': 'str'}, + 'diagnostic_code': {'key': 'diagnosticCode', 'type': 'int'}, 'component': {'key': 'component', 'type': 'str'}, - 'error_id': {'key': 'errorId', 'type': 'str'}, + 'resolution': {'key': 'resolution', 'type': 'str'}, 'help_link': {'key': 'helpLink', 'type': 'str'}, 'internal_diagnostics': {'key': 'internalDiagnostics', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'resolution': {'key': 'resolution', 'type': 'str'}, - 'source': {'key': 'source', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, 'inner_error': {'key': 'innerError', 'type': 'JobInnerError'}, } - def __init__(self): - super(JobInnerError, self).__init__() - self.diagnostic_code = None + def __init__(self, **kwargs): + super(JobInnerError, self).__init__(**kwargs) + self.error_id = None self.severity = None + self.source = None + self.message = None + self.description = None self.details = None + self.diagnostic_code = None self.component = None - self.error_id = None + self.resolution = None self.help_link = None self.internal_diagnostics = None - self.message = None - self.resolution = None - self.source = None - self.description = None self.inner_error = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_inner_error_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_inner_error_py3.py new file mode 100644 index 000000000000..78de225efe91 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_inner_error_py3.py @@ -0,0 +1,100 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobInnerError(Model): + """The Data Lake Analytics job error details. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar error_id: The specific identifier for the type of error encountered + in the job. + :vartype error_id: str + :ivar severity: The severity level of the failure. Possible values + include: 'Warning', 'Error', 'Info', 'SevereWarning', 'Deprecated', + 'UserWarning' + :vartype severity: str or + ~azure.mgmt.datalake.analytics.job.models.SeverityTypes + :ivar source: The ultimate source of the failure (usually either SYSTEM or + USER). + :vartype source: str + :ivar message: The user friendly error message for the failure. + :vartype message: str + :ivar description: The error message description. + :vartype description: str + :ivar details: The details of the error message. + :vartype details: str + :ivar diagnostic_code: The diagnostic error code. + :vartype diagnostic_code: int + :ivar component: The component that failed. + :vartype component: str + :ivar resolution: The recommended resolution for the failure, if any. + :vartype resolution: str + :ivar help_link: The link to MSDN or Azure help for this type of error, if + any. + :vartype help_link: str + :ivar internal_diagnostics: The internal diagnostic stack trace if the + user requesting the job error details has sufficient permissions it will + be retrieved, otherwise it will be empty. + :vartype internal_diagnostics: str + :ivar inner_error: The inner error of this specific job error message, if + any. + :vartype inner_error: + ~azure.mgmt.datalake.analytics.job.models.JobInnerError + """ + + _validation = { + 'error_id': {'readonly': True}, + 'severity': {'readonly': True}, + 'source': {'readonly': True}, + 'message': {'readonly': True}, + 'description': {'readonly': True}, + 'details': {'readonly': True}, + 'diagnostic_code': {'readonly': True}, + 'component': {'readonly': True}, + 'resolution': {'readonly': True}, + 'help_link': {'readonly': True}, + 'internal_diagnostics': {'readonly': True}, + 'inner_error': {'readonly': True}, + } + + _attribute_map = { + 'error_id': {'key': 'errorId', 'type': 'str'}, + 'severity': {'key': 'severity', 'type': 'SeverityTypes'}, + 'source': {'key': 'source', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'details': {'key': 'details', 'type': 'str'}, + 'diagnostic_code': {'key': 'diagnosticCode', 'type': 'int'}, + 'component': {'key': 'component', 'type': 'str'}, + 'resolution': {'key': 'resolution', 'type': 'str'}, + 'help_link': {'key': 'helpLink', 'type': 'str'}, + 'internal_diagnostics': {'key': 'internalDiagnostics', 'type': 'str'}, + 'inner_error': {'key': 'innerError', 'type': 'JobInnerError'}, + } + + def __init__(self, **kwargs) -> None: + super(JobInnerError, self).__init__(**kwargs) + self.error_id = None + self.severity = None + self.source = None + self.message = None + self.description = None + self.details = None + self.diagnostic_code = None + self.component = None + self.resolution = None + self.help_link = None + self.internal_diagnostics = None + self.inner_error = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_information.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_information.py index 274f344fa495..763a8ac89801 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_information.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_information.py @@ -19,40 +19,40 @@ class JobPipelineInformation(Model): Variables are only populated by the server, and will be ignored when sending a request. - :ivar pipeline_id: the job relationship pipeline identifier (a GUID). + :ivar pipeline_id: The job relationship pipeline identifier (a GUID). :vartype pipeline_id: str - :ivar pipeline_name: the friendly name of the job relationship pipeline, + :ivar pipeline_name: The friendly name of the job relationship pipeline, which does not need to be unique. :vartype pipeline_name: str - :ivar pipeline_uri: the pipeline uri, unique, links to the originating + :ivar pipeline_uri: The pipeline uri, unique, links to the originating service for this pipeline. :vartype pipeline_uri: str - :ivar num_jobs_failed: the number of jobs in this pipeline that have + :ivar num_jobs_failed: The number of jobs in this pipeline that have failed. :vartype num_jobs_failed: int - :ivar num_jobs_canceled: the number of jobs in this pipeline that have + :ivar num_jobs_canceled: The number of jobs in this pipeline that have been canceled. :vartype num_jobs_canceled: int - :ivar num_jobs_succeeded: the number of jobs in this pipeline that have + :ivar num_jobs_succeeded: The number of jobs in this pipeline that have succeeded. :vartype num_jobs_succeeded: int - :ivar au_hours_failed: the number of job execution hours that resulted in + :ivar au_hours_failed: The number of job execution hours that resulted in failed jobs. :vartype au_hours_failed: float - :ivar au_hours_canceled: the number of job execution hours that resulted + :ivar au_hours_canceled: The number of job execution hours that resulted in canceled jobs. :vartype au_hours_canceled: float - :ivar au_hours_succeeded: the number of job execution hours that resulted + :ivar au_hours_succeeded: The number of job execution hours that resulted in successful jobs. :vartype au_hours_succeeded: float - :ivar last_submit_time: the last time a job in this pipeline was + :ivar last_submit_time: The last time a job in this pipeline was submitted. :vartype last_submit_time: datetime - :ivar runs: the list of recurrence identifiers representing each run of + :ivar runs: The list of recurrence identifiers representing each run of this pipeline. :vartype runs: list[~azure.mgmt.datalake.analytics.job.models.JobPipelineRunInformation] - :ivar recurrences: the list of recurrence identifiers representing each + :ivar recurrences: The list of recurrence identifiers representing each run of this pipeline. :vartype recurrences: list[str] """ @@ -87,8 +87,8 @@ class JobPipelineInformation(Model): 'recurrences': {'key': 'recurrences', 'type': '[str]'}, } - def __init__(self): - super(JobPipelineInformation, self).__init__() + def __init__(self, **kwargs): + super(JobPipelineInformation, self).__init__(**kwargs) self.pipeline_id = None self.pipeline_name = None self.pipeline_uri = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_information_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_information_py3.py new file mode 100644 index 000000000000..3f088103e097 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_information_py3.py @@ -0,0 +1,103 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPipelineInformation(Model): + """Job Pipeline Information, showing the relationship of jobs and recurrences + of those jobs in a pipeline. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar pipeline_id: The job relationship pipeline identifier (a GUID). + :vartype pipeline_id: str + :ivar pipeline_name: The friendly name of the job relationship pipeline, + which does not need to be unique. + :vartype pipeline_name: str + :ivar pipeline_uri: The pipeline uri, unique, links to the originating + service for this pipeline. + :vartype pipeline_uri: str + :ivar num_jobs_failed: The number of jobs in this pipeline that have + failed. + :vartype num_jobs_failed: int + :ivar num_jobs_canceled: The number of jobs in this pipeline that have + been canceled. + :vartype num_jobs_canceled: int + :ivar num_jobs_succeeded: The number of jobs in this pipeline that have + succeeded. + :vartype num_jobs_succeeded: int + :ivar au_hours_failed: The number of job execution hours that resulted in + failed jobs. + :vartype au_hours_failed: float + :ivar au_hours_canceled: The number of job execution hours that resulted + in canceled jobs. + :vartype au_hours_canceled: float + :ivar au_hours_succeeded: The number of job execution hours that resulted + in successful jobs. + :vartype au_hours_succeeded: float + :ivar last_submit_time: The last time a job in this pipeline was + submitted. + :vartype last_submit_time: datetime + :ivar runs: The list of recurrence identifiers representing each run of + this pipeline. + :vartype runs: + list[~azure.mgmt.datalake.analytics.job.models.JobPipelineRunInformation] + :ivar recurrences: The list of recurrence identifiers representing each + run of this pipeline. + :vartype recurrences: list[str] + """ + + _validation = { + 'pipeline_id': {'readonly': True}, + 'pipeline_name': {'readonly': True, 'max_length': 260}, + 'pipeline_uri': {'readonly': True}, + 'num_jobs_failed': {'readonly': True}, + 'num_jobs_canceled': {'readonly': True}, + 'num_jobs_succeeded': {'readonly': True}, + 'au_hours_failed': {'readonly': True}, + 'au_hours_canceled': {'readonly': True}, + 'au_hours_succeeded': {'readonly': True}, + 'last_submit_time': {'readonly': True}, + 'runs': {'readonly': True}, + 'recurrences': {'readonly': True}, + } + + _attribute_map = { + 'pipeline_id': {'key': 'pipelineId', 'type': 'str'}, + 'pipeline_name': {'key': 'pipelineName', 'type': 'str'}, + 'pipeline_uri': {'key': 'pipelineUri', 'type': 'str'}, + 'num_jobs_failed': {'key': 'numJobsFailed', 'type': 'int'}, + 'num_jobs_canceled': {'key': 'numJobsCanceled', 'type': 'int'}, + 'num_jobs_succeeded': {'key': 'numJobsSucceeded', 'type': 'int'}, + 'au_hours_failed': {'key': 'auHoursFailed', 'type': 'float'}, + 'au_hours_canceled': {'key': 'auHoursCanceled', 'type': 'float'}, + 'au_hours_succeeded': {'key': 'auHoursSucceeded', 'type': 'float'}, + 'last_submit_time': {'key': 'lastSubmitTime', 'type': 'iso-8601'}, + 'runs': {'key': 'runs', 'type': '[JobPipelineRunInformation]'}, + 'recurrences': {'key': 'recurrences', 'type': '[str]'}, + } + + def __init__(self, **kwargs) -> None: + super(JobPipelineInformation, self).__init__(**kwargs) + self.pipeline_id = None + self.pipeline_name = None + self.pipeline_uri = None + self.num_jobs_failed = None + self.num_jobs_canceled = None + self.num_jobs_succeeded = None + self.au_hours_failed = None + self.au_hours_canceled = None + self.au_hours_succeeded = None + self.last_submit_time = None + self.runs = None + self.recurrences = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_run_information.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_run_information.py index 0dd46d6d4d25..27e7818219e7 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_run_information.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_run_information.py @@ -18,10 +18,10 @@ class JobPipelineRunInformation(Model): Variables are only populated by the server, and will be ignored when sending a request. - :ivar run_id: the run identifier of an instance of pipeline executions (a + :ivar run_id: The run identifier of an instance of pipeline executions (a GUID). :vartype run_id: str - :ivar last_submit_time: the time this instance was last submitted. + :ivar last_submit_time: The time this instance was last submitted. :vartype last_submit_time: datetime """ @@ -35,7 +35,7 @@ class JobPipelineRunInformation(Model): 'last_submit_time': {'key': 'lastSubmitTime', 'type': 'iso-8601'}, } - def __init__(self): - super(JobPipelineRunInformation, self).__init__() + def __init__(self, **kwargs): + super(JobPipelineRunInformation, self).__init__(**kwargs) self.run_id = None self.last_submit_time = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_run_information_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_run_information_py3.py new file mode 100644 index 000000000000..cefd0d4ab04c --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_run_information_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPipelineRunInformation(Model): + """Run info for a specific job pipeline. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar run_id: The run identifier of an instance of pipeline executions (a + GUID). + :vartype run_id: str + :ivar last_submit_time: The time this instance was last submitted. + :vartype last_submit_time: datetime + """ + + _validation = { + 'run_id': {'readonly': True}, + 'last_submit_time': {'readonly': True}, + } + + _attribute_map = { + 'run_id': {'key': 'runId', 'type': 'str'}, + 'last_submit_time': {'key': 'lastSubmitTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs) -> None: + super(JobPipelineRunInformation, self).__init__(**kwargs) + self.run_id = None + self.last_submit_time = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_properties.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_properties.py index 281bb014f038..67330098258f 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_properties.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_properties.py @@ -16,15 +16,17 @@ class JobProperties(Model): """The common Data Lake Analytics job properties. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: USqlJobProperties, ScopeJobProperties, HiveJobProperties + sub-classes are: USqlJobProperties, HiveJobProperties, ScopeJobProperties - :param runtime_version: the runtime version of the Data Lake Analytics + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics engine to use for the specific type of job being run. :type runtime_version: str - :param script: the script to run. Please note that the maximum script size - is 3 MB. + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. :type script: str - :param type: Constant filled by server. + :param type: Required. Constant filled by server. :type type: str """ @@ -40,11 +42,11 @@ class JobProperties(Model): } _subtype_map = { - 'type': {'USql': 'USqlJobProperties', 'Scope': 'ScopeJobProperties', 'Hive': 'HiveJobProperties'} + 'type': {'USql': 'USqlJobProperties', 'Hive': 'HiveJobProperties', 'Scope': 'ScopeJobProperties'} } - def __init__(self, script, runtime_version=None): - super(JobProperties, self).__init__() - self.runtime_version = runtime_version - self.script = script + def __init__(self, **kwargs): + super(JobProperties, self).__init__(**kwargs) + self.runtime_version = kwargs.get('runtime_version', None) + self.script = kwargs.get('script', None) self.type = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_properties_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_properties_py3.py new file mode 100644 index 000000000000..7470e6271f5b --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_properties_py3.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobProperties(Model): + """The common Data Lake Analytics job properties. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: USqlJobProperties, HiveJobProperties, ScopeJobProperties + + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics + engine to use for the specific type of job being run. + :type runtime_version: str + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. + :type script: str + :param type: Required. Constant filled by server. + :type type: str + """ + + _validation = { + 'script': {'required': True}, + 'type': {'required': True}, + } + + _attribute_map = { + 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, + 'script': {'key': 'script', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'USql': 'USqlJobProperties', 'Hive': 'HiveJobProperties', 'Scope': 'ScopeJobProperties'} + } + + def __init__(self, *, script: str, runtime_version: str=None, **kwargs) -> None: + super(JobProperties, self).__init__(**kwargs) + self.runtime_version = runtime_version + self.script = script + self.type = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_recurrence_information.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_recurrence_information.py index d32882e99322..22603aaffdad 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_recurrence_information.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_recurrence_information.py @@ -18,32 +18,32 @@ class JobRecurrenceInformation(Model): Variables are only populated by the server, and will be ignored when sending a request. - :ivar recurrence_id: the recurrence identifier (a GUID), unique per + :ivar recurrence_id: The recurrence identifier (a GUID), unique per activity/script, regardless of iterations. This is something to link different occurrences of the same job together. :vartype recurrence_id: str - :ivar recurrence_name: the recurrence name, user friendly name for the + :ivar recurrence_name: The recurrence name, user friendly name for the correlation between jobs. :vartype recurrence_name: str - :ivar num_jobs_failed: the number of jobs in this recurrence that have + :ivar num_jobs_failed: The number of jobs in this recurrence that have failed. :vartype num_jobs_failed: int - :ivar num_jobs_canceled: the number of jobs in this recurrence that have + :ivar num_jobs_canceled: The number of jobs in this recurrence that have been canceled. :vartype num_jobs_canceled: int - :ivar num_jobs_succeeded: the number of jobs in this recurrence that have + :ivar num_jobs_succeeded: The number of jobs in this recurrence that have succeeded. :vartype num_jobs_succeeded: int - :ivar au_hours_failed: the number of job execution hours that resulted in + :ivar au_hours_failed: The number of job execution hours that resulted in failed jobs. :vartype au_hours_failed: float - :ivar au_hours_canceled: the number of job execution hours that resulted + :ivar au_hours_canceled: The number of job execution hours that resulted in canceled jobs. :vartype au_hours_canceled: float - :ivar au_hours_succeeded: the number of job execution hours that resulted + :ivar au_hours_succeeded: The number of job execution hours that resulted in successful jobs. :vartype au_hours_succeeded: float - :ivar last_submit_time: the last time a job in this recurrence was + :ivar last_submit_time: The last time a job in this recurrence was submitted. :vartype last_submit_time: datetime """ @@ -72,8 +72,8 @@ class JobRecurrenceInformation(Model): 'last_submit_time': {'key': 'lastSubmitTime', 'type': 'iso-8601'}, } - def __init__(self): - super(JobRecurrenceInformation, self).__init__() + def __init__(self, **kwargs): + super(JobRecurrenceInformation, self).__init__(**kwargs) self.recurrence_id = None self.recurrence_name = None self.num_jobs_failed = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_recurrence_information_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_recurrence_information_py3.py new file mode 100644 index 000000000000..2d601ac83018 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_recurrence_information_py3.py @@ -0,0 +1,85 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobRecurrenceInformation(Model): + """Recurrence job information for a specific recurrence. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar recurrence_id: The recurrence identifier (a GUID), unique per + activity/script, regardless of iterations. This is something to link + different occurrences of the same job together. + :vartype recurrence_id: str + :ivar recurrence_name: The recurrence name, user friendly name for the + correlation between jobs. + :vartype recurrence_name: str + :ivar num_jobs_failed: The number of jobs in this recurrence that have + failed. + :vartype num_jobs_failed: int + :ivar num_jobs_canceled: The number of jobs in this recurrence that have + been canceled. + :vartype num_jobs_canceled: int + :ivar num_jobs_succeeded: The number of jobs in this recurrence that have + succeeded. + :vartype num_jobs_succeeded: int + :ivar au_hours_failed: The number of job execution hours that resulted in + failed jobs. + :vartype au_hours_failed: float + :ivar au_hours_canceled: The number of job execution hours that resulted + in canceled jobs. + :vartype au_hours_canceled: float + :ivar au_hours_succeeded: The number of job execution hours that resulted + in successful jobs. + :vartype au_hours_succeeded: float + :ivar last_submit_time: The last time a job in this recurrence was + submitted. + :vartype last_submit_time: datetime + """ + + _validation = { + 'recurrence_id': {'readonly': True}, + 'recurrence_name': {'readonly': True}, + 'num_jobs_failed': {'readonly': True}, + 'num_jobs_canceled': {'readonly': True}, + 'num_jobs_succeeded': {'readonly': True}, + 'au_hours_failed': {'readonly': True}, + 'au_hours_canceled': {'readonly': True}, + 'au_hours_succeeded': {'readonly': True}, + 'last_submit_time': {'readonly': True}, + } + + _attribute_map = { + 'recurrence_id': {'key': 'recurrenceId', 'type': 'str'}, + 'recurrence_name': {'key': 'recurrenceName', 'type': 'str'}, + 'num_jobs_failed': {'key': 'numJobsFailed', 'type': 'int'}, + 'num_jobs_canceled': {'key': 'numJobsCanceled', 'type': 'int'}, + 'num_jobs_succeeded': {'key': 'numJobsSucceeded', 'type': 'int'}, + 'au_hours_failed': {'key': 'auHoursFailed', 'type': 'float'}, + 'au_hours_canceled': {'key': 'auHoursCanceled', 'type': 'float'}, + 'au_hours_succeeded': {'key': 'auHoursSucceeded', 'type': 'float'}, + 'last_submit_time': {'key': 'lastSubmitTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs) -> None: + super(JobRecurrenceInformation, self).__init__(**kwargs) + self.recurrence_id = None + self.recurrence_name = None + self.num_jobs_failed = None + self.num_jobs_canceled = None + self.num_jobs_succeeded = None + self.au_hours_failed = None + self.au_hours_canceled = None + self.au_hours_succeeded = None + self.last_submit_time = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_relationship_properties.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_relationship_properties.py index 99e66b2c6854..f407db458a81 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_relationship_properties.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_relationship_properties.py @@ -16,22 +16,24 @@ class JobRelationshipProperties(Model): """Job relationship information properties including pipeline information, correlation information, etc. - :param pipeline_id: the job relationship pipeline identifier (a GUID). + All required parameters must be populated in order to send to Azure. + + :param pipeline_id: The job relationship pipeline identifier (a GUID). :type pipeline_id: str - :param pipeline_name: the friendly name of the job relationship pipeline, + :param pipeline_name: The friendly name of the job relationship pipeline, which does not need to be unique. :type pipeline_name: str - :param pipeline_uri: the pipeline uri, unique, links to the originating + :param pipeline_uri: The pipeline uri, unique, links to the originating service for this pipeline. :type pipeline_uri: str - :param run_id: the run identifier (a GUID), unique identifier of the + :param run_id: The run identifier (a GUID), unique identifier of the iteration of this pipeline. :type run_id: str - :param recurrence_id: the recurrence identifier (a GUID), unique per - activity/script, regardless of iterations. This is something to link + :param recurrence_id: Required. The recurrence identifier (a GUID), unique + per activity/script, regardless of iterations. This is something to link different occurrences of the same job together. :type recurrence_id: str - :param recurrence_name: the recurrence name, user friendly name for the + :param recurrence_name: The recurrence name, user friendly name for the correlation between jobs. :type recurrence_name: str """ @@ -51,11 +53,11 @@ class JobRelationshipProperties(Model): 'recurrence_name': {'key': 'recurrenceName', 'type': 'str'}, } - def __init__(self, recurrence_id, pipeline_id=None, pipeline_name=None, pipeline_uri=None, run_id=None, recurrence_name=None): - super(JobRelationshipProperties, self).__init__() - self.pipeline_id = pipeline_id - self.pipeline_name = pipeline_name - self.pipeline_uri = pipeline_uri - self.run_id = run_id - self.recurrence_id = recurrence_id - self.recurrence_name = recurrence_name + def __init__(self, **kwargs): + super(JobRelationshipProperties, self).__init__(**kwargs) + self.pipeline_id = kwargs.get('pipeline_id', None) + self.pipeline_name = kwargs.get('pipeline_name', None) + self.pipeline_uri = kwargs.get('pipeline_uri', None) + self.run_id = kwargs.get('run_id', None) + self.recurrence_id = kwargs.get('recurrence_id', None) + self.recurrence_name = kwargs.get('recurrence_name', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_relationship_properties_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_relationship_properties_py3.py new file mode 100644 index 000000000000..876bdc690718 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_relationship_properties_py3.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobRelationshipProperties(Model): + """Job relationship information properties including pipeline information, + correlation information, etc. + + All required parameters must be populated in order to send to Azure. + + :param pipeline_id: The job relationship pipeline identifier (a GUID). + :type pipeline_id: str + :param pipeline_name: The friendly name of the job relationship pipeline, + which does not need to be unique. + :type pipeline_name: str + :param pipeline_uri: The pipeline uri, unique, links to the originating + service for this pipeline. + :type pipeline_uri: str + :param run_id: The run identifier (a GUID), unique identifier of the + iteration of this pipeline. + :type run_id: str + :param recurrence_id: Required. The recurrence identifier (a GUID), unique + per activity/script, regardless of iterations. This is something to link + different occurrences of the same job together. + :type recurrence_id: str + :param recurrence_name: The recurrence name, user friendly name for the + correlation between jobs. + :type recurrence_name: str + """ + + _validation = { + 'pipeline_name': {'max_length': 260}, + 'recurrence_id': {'required': True}, + 'recurrence_name': {'max_length': 260}, + } + + _attribute_map = { + 'pipeline_id': {'key': 'pipelineId', 'type': 'str'}, + 'pipeline_name': {'key': 'pipelineName', 'type': 'str'}, + 'pipeline_uri': {'key': 'pipelineUri', 'type': 'str'}, + 'run_id': {'key': 'runId', 'type': 'str'}, + 'recurrence_id': {'key': 'recurrenceId', 'type': 'str'}, + 'recurrence_name': {'key': 'recurrenceName', 'type': 'str'}, + } + + def __init__(self, *, recurrence_id: str, pipeline_id: str=None, pipeline_name: str=None, pipeline_uri: str=None, run_id: str=None, recurrence_name: str=None, **kwargs) -> None: + super(JobRelationshipProperties, self).__init__(**kwargs) + self.pipeline_id = pipeline_id + self.pipeline_name = pipeline_name + self.pipeline_uri = pipeline_uri + self.run_id = run_id + self.recurrence_id = recurrence_id + self.recurrence_name = recurrence_name diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_resource.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_resource.py index f3ad28a6f409..aa33c333446a 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_resource.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_resource.py @@ -15,11 +15,11 @@ class JobResource(Model): """The Data Lake Analytics job resources. - :param name: the name of the resource. + :param name: The name of the resource. :type name: str - :param resource_path: the path to the resource. + :param resource_path: The path to the resource. :type resource_path: str - :param type: the job resource type. Possible values include: + :param type: The job resource type. Possible values include: 'VertexResource', 'JobManagerResource', 'StatisticsResource', 'VertexResourceInUserFolder', 'JobManagerResourceInUserFolder', 'StatisticsResourceInUserFolder' @@ -33,8 +33,8 @@ class JobResource(Model): 'type': {'key': 'type', 'type': 'JobResourceType'}, } - def __init__(self, name=None, resource_path=None, type=None): - super(JobResource, self).__init__() - self.name = name - self.resource_path = resource_path - self.type = type + def __init__(self, **kwargs): + super(JobResource, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.resource_path = kwargs.get('resource_path', None) + self.type = kwargs.get('type', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_resource_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_resource_py3.py new file mode 100644 index 000000000000..d0d2b7c2cd28 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_resource_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobResource(Model): + """The Data Lake Analytics job resources. + + :param name: The name of the resource. + :type name: str + :param resource_path: The path to the resource. + :type resource_path: str + :param type: The job resource type. Possible values include: + 'VertexResource', 'JobManagerResource', 'StatisticsResource', + 'VertexResourceInUserFolder', 'JobManagerResourceInUserFolder', + 'StatisticsResourceInUserFolder' + :type type: str or + ~azure.mgmt.datalake.analytics.job.models.JobResourceType + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'resource_path': {'key': 'resourcePath', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'JobResourceType'}, + } + + def __init__(self, *, name: str=None, resource_path: str=None, type=None, **kwargs) -> None: + super(JobResource, self).__init__(**kwargs) + self.name = name + self.resource_path = resource_path + self.type = type diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_state_audit_record.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_state_audit_record.py index 5cf5717f649f..ec299db698bc 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_state_audit_record.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_state_audit_record.py @@ -19,13 +19,13 @@ class JobStateAuditRecord(Model): Variables are only populated by the server, and will be ignored when sending a request. - :ivar new_state: the new state the job is in. + :ivar new_state: The new state the job is in. :vartype new_state: str - :ivar time_stamp: the time stamp that the state change took place. + :ivar time_stamp: The time stamp that the state change took place. :vartype time_stamp: datetime - :ivar requested_by_user: the user who requests the change. + :ivar requested_by_user: The user who requests the change. :vartype requested_by_user: str - :ivar details: the details of the audit log. + :ivar details: The details of the audit log. :vartype details: str """ @@ -43,8 +43,8 @@ class JobStateAuditRecord(Model): 'details': {'key': 'details', 'type': 'str'}, } - def __init__(self): - super(JobStateAuditRecord, self).__init__() + def __init__(self, **kwargs): + super(JobStateAuditRecord, self).__init__(**kwargs) self.new_state = None self.time_stamp = None self.requested_by_user = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_state_audit_record_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_state_audit_record_py3.py new file mode 100644 index 000000000000..1465c71edadb --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_state_audit_record_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobStateAuditRecord(Model): + """The Data Lake Analytics job state audit records for tracking the lifecycle + of a job. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar new_state: The new state the job is in. + :vartype new_state: str + :ivar time_stamp: The time stamp that the state change took place. + :vartype time_stamp: datetime + :ivar requested_by_user: The user who requests the change. + :vartype requested_by_user: str + :ivar details: The details of the audit log. + :vartype details: str + """ + + _validation = { + 'new_state': {'readonly': True}, + 'time_stamp': {'readonly': True}, + 'requested_by_user': {'readonly': True}, + 'details': {'readonly': True}, + } + + _attribute_map = { + 'new_state': {'key': 'newState', 'type': 'str'}, + 'time_stamp': {'key': 'timeStamp', 'type': 'iso-8601'}, + 'requested_by_user': {'key': 'requestedByUser', 'type': 'str'}, + 'details': {'key': 'details', 'type': 'str'}, + } + + def __init__(self, **kwargs) -> None: + super(JobStateAuditRecord, self).__init__(**kwargs) + self.new_state = None + self.time_stamp = None + self.requested_by_user = None + self.details = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics.py index ef6c9fc7f9bc..d9b24bbce258 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics.py @@ -18,11 +18,11 @@ class JobStatistics(Model): Variables are only populated by the server, and will be ignored when sending a request. - :ivar last_update_time_utc: the last update time for the statistics. + :ivar last_update_time_utc: The last update time for the statistics. :vartype last_update_time_utc: datetime - :ivar finalizing_time_utc: the job finalizing start time. + :ivar finalizing_time_utc: The job finalizing start time. :vartype finalizing_time_utc: datetime - :ivar stages: the list of stages for the job. + :ivar stages: The list of stages for the job. :vartype stages: list[~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertexStage] """ @@ -39,8 +39,8 @@ class JobStatistics(Model): 'stages': {'key': 'stages', 'type': '[JobStatisticsVertexStage]'}, } - def __init__(self): - super(JobStatistics, self).__init__() + def __init__(self, **kwargs): + super(JobStatistics, self).__init__(**kwargs) self.last_update_time_utc = None self.finalizing_time_utc = None self.stages = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_py3.py new file mode 100644 index 000000000000..2c3c187b2793 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobStatistics(Model): + """The Data Lake Analytics job execution statistics. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar last_update_time_utc: The last update time for the statistics. + :vartype last_update_time_utc: datetime + :ivar finalizing_time_utc: The job finalizing start time. + :vartype finalizing_time_utc: datetime + :ivar stages: The list of stages for the job. + :vartype stages: + list[~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertexStage] + """ + + _validation = { + 'last_update_time_utc': {'readonly': True}, + 'finalizing_time_utc': {'readonly': True}, + 'stages': {'readonly': True}, + } + + _attribute_map = { + 'last_update_time_utc': {'key': 'lastUpdateTimeUtc', 'type': 'iso-8601'}, + 'finalizing_time_utc': {'key': 'finalizingTimeUtc', 'type': 'iso-8601'}, + 'stages': {'key': 'stages', 'type': '[JobStatisticsVertexStage]'}, + } + + def __init__(self, **kwargs) -> None: + super(JobStatistics, self).__init__(**kwargs) + self.last_update_time_utc = None + self.finalizing_time_utc = None + self.stages = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex.py index 8688d0bcb6a6..d3fba8e5e282 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex.py @@ -13,20 +13,20 @@ class JobStatisticsVertex(Model): - """the detailed information for a vertex. + """The detailed information for a vertex. Variables are only populated by the server, and will be ignored when sending a request. - :ivar name: the name of the vertex. + :ivar name: The name of the vertex. :vartype name: str - :ivar vertex_id: the id of the vertex. + :ivar vertex_id: The id of the vertex. :vartype vertex_id: str - :ivar execution_time: the amount of execution time of the vertex. + :ivar execution_time: The amount of execution time of the vertex. :vartype execution_time: timedelta - :ivar data_read: the amount of data read of the vertex, in bytes. + :ivar data_read: The amount of data read of the vertex, in bytes. :vartype data_read: long - :ivar peak_mem_usage: the amount of peak memory usage of the vertex, in + :ivar peak_mem_usage: The amount of peak memory usage of the vertex, in bytes. :vartype peak_mem_usage: long """ @@ -47,8 +47,8 @@ class JobStatisticsVertex(Model): 'peak_mem_usage': {'key': 'peakMemUsage', 'type': 'long'}, } - def __init__(self): - super(JobStatisticsVertex, self).__init__() + def __init__(self, **kwargs): + super(JobStatisticsVertex, self).__init__(**kwargs) self.name = None self.vertex_id = None self.execution_time = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex_py3.py new file mode 100644 index 000000000000..85b20fc5576c --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex_py3.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobStatisticsVertex(Model): + """The detailed information for a vertex. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar name: The name of the vertex. + :vartype name: str + :ivar vertex_id: The id of the vertex. + :vartype vertex_id: str + :ivar execution_time: The amount of execution time of the vertex. + :vartype execution_time: timedelta + :ivar data_read: The amount of data read of the vertex, in bytes. + :vartype data_read: long + :ivar peak_mem_usage: The amount of peak memory usage of the vertex, in + bytes. + :vartype peak_mem_usage: long + """ + + _validation = { + 'name': {'readonly': True}, + 'vertex_id': {'readonly': True}, + 'execution_time': {'readonly': True}, + 'data_read': {'readonly': True}, + 'peak_mem_usage': {'readonly': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'vertex_id': {'key': 'vertexId', 'type': 'str'}, + 'execution_time': {'key': 'executionTime', 'type': 'duration'}, + 'data_read': {'key': 'dataRead', 'type': 'long'}, + 'peak_mem_usage': {'key': 'peakMemUsage', 'type': 'long'}, + } + + def __init__(self, **kwargs) -> None: + super(JobStatisticsVertex, self).__init__(**kwargs) + self.name = None + self.vertex_id = None + self.execution_time = None + self.data_read = None + self.peak_mem_usage = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex_stage.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex_stage.py index edf6908ab3e2..c5567667ea3e 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex_stage.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex_stage.py @@ -18,60 +18,60 @@ class JobStatisticsVertexStage(Model): Variables are only populated by the server, and will be ignored when sending a request. - :ivar data_read: the amount of data read, in bytes. + :ivar data_read: The amount of data read, in bytes. :vartype data_read: long - :ivar data_read_cross_pod: the amount of data read across multiple pods, + :ivar data_read_cross_pod: The amount of data read across multiple pods, in bytes. :vartype data_read_cross_pod: long - :ivar data_read_intra_pod: the amount of data read in one pod, in bytes. + :ivar data_read_intra_pod: The amount of data read in one pod, in bytes. :vartype data_read_intra_pod: long - :ivar data_to_read: the amount of data remaining to be read, in bytes. + :ivar data_to_read: The amount of data remaining to be read, in bytes. :vartype data_to_read: long - :ivar data_written: the amount of data written, in bytes. + :ivar data_written: The amount of data written, in bytes. :vartype data_written: long - :ivar duplicate_discard_count: the number of duplicates that were + :ivar duplicate_discard_count: The number of duplicates that were discarded. :vartype duplicate_discard_count: int - :ivar failed_count: the number of failures that occured in this stage. + :ivar failed_count: The number of failures that occured in this stage. :vartype failed_count: int - :ivar max_vertex_data_read: the maximum amount of data read in a single + :ivar max_vertex_data_read: The maximum amount of data read in a single vertex, in bytes. :vartype max_vertex_data_read: long - :ivar min_vertex_data_read: the minimum amount of data read in a single + :ivar min_vertex_data_read: The minimum amount of data read in a single vertex, in bytes. :vartype min_vertex_data_read: long - :ivar read_failure_count: the number of read failures in this stage. + :ivar read_failure_count: The number of read failures in this stage. :vartype read_failure_count: int - :ivar revocation_count: the number of vertices that were revoked during + :ivar revocation_count: The number of vertices that were revoked during this stage. :vartype revocation_count: int - :ivar running_count: the number of currently running vertices in this + :ivar running_count: The number of currently running vertices in this stage. :vartype running_count: int - :ivar scheduled_count: the number of currently scheduled vertices in this - stage + :ivar scheduled_count: The number of currently scheduled vertices in this + stage. :vartype scheduled_count: int - :ivar stage_name: the name of this stage in job execution. + :ivar stage_name: The name of this stage in job execution. :vartype stage_name: str - :ivar succeeded_count: the number of vertices that succeeded in this + :ivar succeeded_count: The number of vertices that succeeded in this stage. :vartype succeeded_count: int - :ivar temp_data_written: the amount of temporary data written, in bytes. + :ivar temp_data_written: The amount of temporary data written, in bytes. :vartype temp_data_written: long - :ivar total_count: the total vertex count for this stage. + :ivar total_count: The total vertex count for this stage. :vartype total_count: int - :ivar total_failed_time: the amount of time that failed vertices took up + :ivar total_failed_time: The amount of time that failed vertices took up in this stage. :vartype total_failed_time: timedelta - :ivar total_progress: the current progress of this stage, as a percentage. + :ivar total_progress: The current progress of this stage, as a percentage. :vartype total_progress: int - :ivar total_succeeded_time: the amount of time all successful vertices + :ivar total_succeeded_time: The amount of time all successful vertices took in this stage. :vartype total_succeeded_time: timedelta - :ivar total_peak_mem_usage: the sum of the peak memory usage of all the + :ivar total_peak_mem_usage: The sum of the peak memory usage of all the vertices in the stage, in bytes. :vartype total_peak_mem_usage: long - :ivar total_execution_time: the sum of the total execution time of all the + :ivar total_execution_time: The sum of the total execution time of all the vertices in the stage. :vartype total_execution_time: timedelta :param max_data_read_vertex: the vertex with the maximum amount of data @@ -86,28 +86,28 @@ class JobStatisticsVertexStage(Model): usage. :type max_peak_mem_usage_vertex: ~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertex - :ivar estimated_vertex_cpu_core_count: the estimated vertex CPU core + :ivar estimated_vertex_cpu_core_count: The estimated vertex CPU core count. :vartype estimated_vertex_cpu_core_count: int - :ivar estimated_vertex_peak_cpu_core_count: the estimated vertex peak CPU + :ivar estimated_vertex_peak_cpu_core_count: The estimated vertex peak CPU core count. :vartype estimated_vertex_peak_cpu_core_count: int - :ivar estimated_vertex_mem_size: the estimated vertex memory size, in + :ivar estimated_vertex_mem_size: The estimated vertex memory size, in bytes. :vartype estimated_vertex_mem_size: long - :param allocated_container_cpu_core_count: the statistics information for + :param allocated_container_cpu_core_count: The statistics information for the allocated container CPU core count. :type allocated_container_cpu_core_count: ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics - :param allocated_container_mem_size: the statistics information for the + :param allocated_container_mem_size: The statistics information for the allocated container memory size. :type allocated_container_mem_size: ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics - :param used_vertex_cpu_core_count: the statistics information for the used + :param used_vertex_cpu_core_count: The statistics information for the used vertex CPU core count. :type used_vertex_cpu_core_count: ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics - :param used_vertex_peak_mem_size: the statistics information for the used + :param used_vertex_peak_mem_size: The statistics information for the used vertex peak memory size. :type used_vertex_peak_mem_size: ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics @@ -176,8 +176,8 @@ class JobStatisticsVertexStage(Model): 'used_vertex_peak_mem_size': {'key': 'usedVertexPeakMemSize', 'type': 'ResourceUsageStatistics'}, } - def __init__(self, max_data_read_vertex=None, max_execution_time_vertex=None, max_peak_mem_usage_vertex=None, allocated_container_cpu_core_count=None, allocated_container_mem_size=None, used_vertex_cpu_core_count=None, used_vertex_peak_mem_size=None): - super(JobStatisticsVertexStage, self).__init__() + def __init__(self, **kwargs): + super(JobStatisticsVertexStage, self).__init__(**kwargs) self.data_read = None self.data_read_cross_pod = None self.data_read_intra_pod = None @@ -200,13 +200,13 @@ def __init__(self, max_data_read_vertex=None, max_execution_time_vertex=None, ma self.total_succeeded_time = None self.total_peak_mem_usage = None self.total_execution_time = None - self.max_data_read_vertex = max_data_read_vertex - self.max_execution_time_vertex = max_execution_time_vertex - self.max_peak_mem_usage_vertex = max_peak_mem_usage_vertex + self.max_data_read_vertex = kwargs.get('max_data_read_vertex', None) + self.max_execution_time_vertex = kwargs.get('max_execution_time_vertex', None) + self.max_peak_mem_usage_vertex = kwargs.get('max_peak_mem_usage_vertex', None) self.estimated_vertex_cpu_core_count = None self.estimated_vertex_peak_cpu_core_count = None self.estimated_vertex_mem_size = None - self.allocated_container_cpu_core_count = allocated_container_cpu_core_count - self.allocated_container_mem_size = allocated_container_mem_size - self.used_vertex_cpu_core_count = used_vertex_cpu_core_count - self.used_vertex_peak_mem_size = used_vertex_peak_mem_size + self.allocated_container_cpu_core_count = kwargs.get('allocated_container_cpu_core_count', None) + self.allocated_container_mem_size = kwargs.get('allocated_container_mem_size', None) + self.used_vertex_cpu_core_count = kwargs.get('used_vertex_cpu_core_count', None) + self.used_vertex_peak_mem_size = kwargs.get('used_vertex_peak_mem_size', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex_stage_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex_stage_py3.py new file mode 100644 index 000000000000..b8987167dec0 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex_stage_py3.py @@ -0,0 +1,212 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobStatisticsVertexStage(Model): + """The Data Lake Analytics job statistics vertex stage information. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar data_read: The amount of data read, in bytes. + :vartype data_read: long + :ivar data_read_cross_pod: The amount of data read across multiple pods, + in bytes. + :vartype data_read_cross_pod: long + :ivar data_read_intra_pod: The amount of data read in one pod, in bytes. + :vartype data_read_intra_pod: long + :ivar data_to_read: The amount of data remaining to be read, in bytes. + :vartype data_to_read: long + :ivar data_written: The amount of data written, in bytes. + :vartype data_written: long + :ivar duplicate_discard_count: The number of duplicates that were + discarded. + :vartype duplicate_discard_count: int + :ivar failed_count: The number of failures that occured in this stage. + :vartype failed_count: int + :ivar max_vertex_data_read: The maximum amount of data read in a single + vertex, in bytes. + :vartype max_vertex_data_read: long + :ivar min_vertex_data_read: The minimum amount of data read in a single + vertex, in bytes. + :vartype min_vertex_data_read: long + :ivar read_failure_count: The number of read failures in this stage. + :vartype read_failure_count: int + :ivar revocation_count: The number of vertices that were revoked during + this stage. + :vartype revocation_count: int + :ivar running_count: The number of currently running vertices in this + stage. + :vartype running_count: int + :ivar scheduled_count: The number of currently scheduled vertices in this + stage. + :vartype scheduled_count: int + :ivar stage_name: The name of this stage in job execution. + :vartype stage_name: str + :ivar succeeded_count: The number of vertices that succeeded in this + stage. + :vartype succeeded_count: int + :ivar temp_data_written: The amount of temporary data written, in bytes. + :vartype temp_data_written: long + :ivar total_count: The total vertex count for this stage. + :vartype total_count: int + :ivar total_failed_time: The amount of time that failed vertices took up + in this stage. + :vartype total_failed_time: timedelta + :ivar total_progress: The current progress of this stage, as a percentage. + :vartype total_progress: int + :ivar total_succeeded_time: The amount of time all successful vertices + took in this stage. + :vartype total_succeeded_time: timedelta + :ivar total_peak_mem_usage: The sum of the peak memory usage of all the + vertices in the stage, in bytes. + :vartype total_peak_mem_usage: long + :ivar total_execution_time: The sum of the total execution time of all the + vertices in the stage. + :vartype total_execution_time: timedelta + :param max_data_read_vertex: the vertex with the maximum amount of data + read. + :type max_data_read_vertex: + ~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertex + :param max_execution_time_vertex: the vertex with the maximum execution + time. + :type max_execution_time_vertex: + ~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertex + :param max_peak_mem_usage_vertex: the vertex with the maximum peak memory + usage. + :type max_peak_mem_usage_vertex: + ~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertex + :ivar estimated_vertex_cpu_core_count: The estimated vertex CPU core + count. + :vartype estimated_vertex_cpu_core_count: int + :ivar estimated_vertex_peak_cpu_core_count: The estimated vertex peak CPU + core count. + :vartype estimated_vertex_peak_cpu_core_count: int + :ivar estimated_vertex_mem_size: The estimated vertex memory size, in + bytes. + :vartype estimated_vertex_mem_size: long + :param allocated_container_cpu_core_count: The statistics information for + the allocated container CPU core count. + :type allocated_container_cpu_core_count: + ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics + :param allocated_container_mem_size: The statistics information for the + allocated container memory size. + :type allocated_container_mem_size: + ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics + :param used_vertex_cpu_core_count: The statistics information for the used + vertex CPU core count. + :type used_vertex_cpu_core_count: + ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics + :param used_vertex_peak_mem_size: The statistics information for the used + vertex peak memory size. + :type used_vertex_peak_mem_size: + ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics + """ + + _validation = { + 'data_read': {'readonly': True}, + 'data_read_cross_pod': {'readonly': True}, + 'data_read_intra_pod': {'readonly': True}, + 'data_to_read': {'readonly': True}, + 'data_written': {'readonly': True}, + 'duplicate_discard_count': {'readonly': True}, + 'failed_count': {'readonly': True}, + 'max_vertex_data_read': {'readonly': True}, + 'min_vertex_data_read': {'readonly': True}, + 'read_failure_count': {'readonly': True}, + 'revocation_count': {'readonly': True}, + 'running_count': {'readonly': True}, + 'scheduled_count': {'readonly': True}, + 'stage_name': {'readonly': True}, + 'succeeded_count': {'readonly': True}, + 'temp_data_written': {'readonly': True}, + 'total_count': {'readonly': True}, + 'total_failed_time': {'readonly': True}, + 'total_progress': {'readonly': True}, + 'total_succeeded_time': {'readonly': True}, + 'total_peak_mem_usage': {'readonly': True}, + 'total_execution_time': {'readonly': True}, + 'estimated_vertex_cpu_core_count': {'readonly': True}, + 'estimated_vertex_peak_cpu_core_count': {'readonly': True}, + 'estimated_vertex_mem_size': {'readonly': True}, + } + + _attribute_map = { + 'data_read': {'key': 'dataRead', 'type': 'long'}, + 'data_read_cross_pod': {'key': 'dataReadCrossPod', 'type': 'long'}, + 'data_read_intra_pod': {'key': 'dataReadIntraPod', 'type': 'long'}, + 'data_to_read': {'key': 'dataToRead', 'type': 'long'}, + 'data_written': {'key': 'dataWritten', 'type': 'long'}, + 'duplicate_discard_count': {'key': 'duplicateDiscardCount', 'type': 'int'}, + 'failed_count': {'key': 'failedCount', 'type': 'int'}, + 'max_vertex_data_read': {'key': 'maxVertexDataRead', 'type': 'long'}, + 'min_vertex_data_read': {'key': 'minVertexDataRead', 'type': 'long'}, + 'read_failure_count': {'key': 'readFailureCount', 'type': 'int'}, + 'revocation_count': {'key': 'revocationCount', 'type': 'int'}, + 'running_count': {'key': 'runningCount', 'type': 'int'}, + 'scheduled_count': {'key': 'scheduledCount', 'type': 'int'}, + 'stage_name': {'key': 'stageName', 'type': 'str'}, + 'succeeded_count': {'key': 'succeededCount', 'type': 'int'}, + 'temp_data_written': {'key': 'tempDataWritten', 'type': 'long'}, + 'total_count': {'key': 'totalCount', 'type': 'int'}, + 'total_failed_time': {'key': 'totalFailedTime', 'type': 'duration'}, + 'total_progress': {'key': 'totalProgress', 'type': 'int'}, + 'total_succeeded_time': {'key': 'totalSucceededTime', 'type': 'duration'}, + 'total_peak_mem_usage': {'key': 'totalPeakMemUsage', 'type': 'long'}, + 'total_execution_time': {'key': 'totalExecutionTime', 'type': 'duration'}, + 'max_data_read_vertex': {'key': 'maxDataReadVertex', 'type': 'JobStatisticsVertex'}, + 'max_execution_time_vertex': {'key': 'maxExecutionTimeVertex', 'type': 'JobStatisticsVertex'}, + 'max_peak_mem_usage_vertex': {'key': 'maxPeakMemUsageVertex', 'type': 'JobStatisticsVertex'}, + 'estimated_vertex_cpu_core_count': {'key': 'estimatedVertexCpuCoreCount', 'type': 'int'}, + 'estimated_vertex_peak_cpu_core_count': {'key': 'estimatedVertexPeakCpuCoreCount', 'type': 'int'}, + 'estimated_vertex_mem_size': {'key': 'estimatedVertexMemSize', 'type': 'long'}, + 'allocated_container_cpu_core_count': {'key': 'allocatedContainerCpuCoreCount', 'type': 'ResourceUsageStatistics'}, + 'allocated_container_mem_size': {'key': 'allocatedContainerMemSize', 'type': 'ResourceUsageStatistics'}, + 'used_vertex_cpu_core_count': {'key': 'usedVertexCpuCoreCount', 'type': 'ResourceUsageStatistics'}, + 'used_vertex_peak_mem_size': {'key': 'usedVertexPeakMemSize', 'type': 'ResourceUsageStatistics'}, + } + + def __init__(self, *, max_data_read_vertex=None, max_execution_time_vertex=None, max_peak_mem_usage_vertex=None, allocated_container_cpu_core_count=None, allocated_container_mem_size=None, used_vertex_cpu_core_count=None, used_vertex_peak_mem_size=None, **kwargs) -> None: + super(JobStatisticsVertexStage, self).__init__(**kwargs) + self.data_read = None + self.data_read_cross_pod = None + self.data_read_intra_pod = None + self.data_to_read = None + self.data_written = None + self.duplicate_discard_count = None + self.failed_count = None + self.max_vertex_data_read = None + self.min_vertex_data_read = None + self.read_failure_count = None + self.revocation_count = None + self.running_count = None + self.scheduled_count = None + self.stage_name = None + self.succeeded_count = None + self.temp_data_written = None + self.total_count = None + self.total_failed_time = None + self.total_progress = None + self.total_succeeded_time = None + self.total_peak_mem_usage = None + self.total_execution_time = None + self.max_data_read_vertex = max_data_read_vertex + self.max_execution_time_vertex = max_execution_time_vertex + self.max_peak_mem_usage_vertex = max_peak_mem_usage_vertex + self.estimated_vertex_cpu_core_count = None + self.estimated_vertex_peak_cpu_core_count = None + self.estimated_vertex_mem_size = None + self.allocated_container_cpu_core_count = allocated_container_cpu_core_count + self.allocated_container_mem_size = allocated_container_mem_size + self.used_vertex_cpu_core_count = used_vertex_cpu_core_count + self.used_vertex_peak_mem_size = used_vertex_peak_mem_size diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/resource_usage_statistics.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/resource_usage_statistics.py index e8fb529d5c72..468561904949 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/resource_usage_statistics.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/resource_usage_statistics.py @@ -13,16 +13,16 @@ class ResourceUsageStatistics(Model): - """the statistics information for resource usage. + """The statistics information for resource usage. Variables are only populated by the server, and will be ignored when sending a request. - :ivar average: the average value. + :ivar average: The average value. :vartype average: float - :ivar minimum: the minimum value. + :ivar minimum: The minimum value. :vartype minimum: long - :ivar maximum: the maximum value. + :ivar maximum: The maximum value. :vartype maximum: long """ @@ -38,8 +38,8 @@ class ResourceUsageStatistics(Model): 'maximum': {'key': 'maximum', 'type': 'long'}, } - def __init__(self): - super(ResourceUsageStatistics, self).__init__() + def __init__(self, **kwargs): + super(ResourceUsageStatistics, self).__init__(**kwargs) self.average = None self.minimum = None self.maximum = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/resource_usage_statistics_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/resource_usage_statistics_py3.py new file mode 100644 index 000000000000..a7b64a4926d9 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/resource_usage_statistics_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResourceUsageStatistics(Model): + """The statistics information for resource usage. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar average: The average value. + :vartype average: float + :ivar minimum: The minimum value. + :vartype minimum: long + :ivar maximum: The maximum value. + :vartype maximum: long + """ + + _validation = { + 'average': {'readonly': True}, + 'minimum': {'readonly': True}, + 'maximum': {'readonly': True}, + } + + _attribute_map = { + 'average': {'key': 'average', 'type': 'float'}, + 'minimum': {'key': 'minimum', 'type': 'long'}, + 'maximum': {'key': 'maximum', 'type': 'long'}, + } + + def __init__(self, **kwargs) -> None: + super(ResourceUsageStatistics, self).__init__(**kwargs) + self.average = None + self.minimum = None + self.maximum = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_properties.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_properties.py index 64e2ed11991e..1772c11b91a3 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_properties.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_properties.py @@ -19,39 +19,42 @@ class ScopeJobProperties(JobProperties): Variables are only populated by the server, and will be ignored when sending a request. - :param runtime_version: the runtime version of the Data Lake Analytics + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics engine to use for the specific type of job being run. :type runtime_version: str - :param script: the script to run. Please note that the maximum script size - is 3 MB. + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. :type script: str - :param type: Constant filled by server. + :param type: Required. Constant filled by server. :type type: str - :ivar resources: the list of resources that are required by the job + :ivar resources: The list of resources that are required by the job. :vartype resources: list[~azure.mgmt.datalake.analytics.job.models.ScopeJobResource] - :ivar user_algebra_path: the algebra file path after the job has completed + :ivar user_algebra_path: The algebra file path after the job has + completed. :vartype user_algebra_path: str - :param notifier: the list of email addresses, separated by semi-colons, to + :param notifier: The list of email addresses, separated by semi-colons, to notify when the job reaches a terminal state. :type notifier: str - :ivar total_compilation_time: the total time this job spent compiling. + :ivar total_compilation_time: The total time this job spent compiling. This value should not be set by the user and will be ignored if it is. :vartype total_compilation_time: timedelta - :ivar total_paused_time: the total time this job spent paused. This value - should not be set by the user and will be ignored if it is. - :vartype total_paused_time: timedelta - :ivar total_queued_time: the total time this job spent queued. This value + :ivar total_queued_time: The total time this job spent queued. This value should not be set by the user and will be ignored if it is. :vartype total_queued_time: timedelta - :ivar total_running_time: the total time this job spent executing. This + :ivar total_running_time: The total time this job spent executing. This value should not be set by the user and will be ignored if it is. :vartype total_running_time: timedelta - :ivar root_process_node_id: the ID used to identify the job manager + :ivar total_paused_time: The total time this job spent paused. This value + should not be set by the user and will be ignored if it is. + :vartype total_paused_time: timedelta + :ivar root_process_node_id: The ID used to identify the job manager coordinating job execution. This value should not be set by the user and will be ignored if it is. :vartype root_process_node_id: str - :ivar yarn_application_id: the ID used to identify the yarn application + :ivar yarn_application_id: The ID used to identify the yarn application executing the job. This value should not be set by the user and will be ignored if it is. :vartype yarn_application_id: str @@ -63,9 +66,9 @@ class ScopeJobProperties(JobProperties): 'resources': {'readonly': True}, 'user_algebra_path': {'readonly': True}, 'total_compilation_time': {'readonly': True}, - 'total_paused_time': {'readonly': True}, 'total_queued_time': {'readonly': True}, 'total_running_time': {'readonly': True}, + 'total_paused_time': {'readonly': True}, 'root_process_node_id': {'readonly': True}, 'yarn_application_id': {'readonly': True}, } @@ -78,22 +81,22 @@ class ScopeJobProperties(JobProperties): 'user_algebra_path': {'key': 'userAlgebraPath', 'type': 'str'}, 'notifier': {'key': 'notifier', 'type': 'str'}, 'total_compilation_time': {'key': 'totalCompilationTime', 'type': 'duration'}, - 'total_paused_time': {'key': 'totalPausedTime', 'type': 'duration'}, 'total_queued_time': {'key': 'totalQueuedTime', 'type': 'duration'}, 'total_running_time': {'key': 'totalRunningTime', 'type': 'duration'}, + 'total_paused_time': {'key': 'totalPausedTime', 'type': 'duration'}, 'root_process_node_id': {'key': 'rootProcessNodeId', 'type': 'str'}, 'yarn_application_id': {'key': 'yarnApplicationId', 'type': 'str'}, } - def __init__(self, script, runtime_version=None, notifier=None): - super(ScopeJobProperties, self).__init__(runtime_version=runtime_version, script=script) + def __init__(self, **kwargs): + super(ScopeJobProperties, self).__init__(**kwargs) self.resources = None self.user_algebra_path = None - self.notifier = notifier + self.notifier = kwargs.get('notifier', None) self.total_compilation_time = None - self.total_paused_time = None self.total_queued_time = None self.total_running_time = None + self.total_paused_time = None self.root_process_node_id = None self.yarn_application_id = None self.type = 'Scope' diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_properties_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_properties_py3.py new file mode 100644 index 000000000000..8a16deab8305 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_properties_py3.py @@ -0,0 +1,102 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .job_properties_py3 import JobProperties + + +class ScopeJobProperties(JobProperties): + """Scope job properties used when submitting and retrieving Scope jobs. (Only + for use internally with Scope job type.). + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics + engine to use for the specific type of job being run. + :type runtime_version: str + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. + :type script: str + :param type: Required. Constant filled by server. + :type type: str + :ivar resources: The list of resources that are required by the job. + :vartype resources: + list[~azure.mgmt.datalake.analytics.job.models.ScopeJobResource] + :ivar user_algebra_path: The algebra file path after the job has + completed. + :vartype user_algebra_path: str + :param notifier: The list of email addresses, separated by semi-colons, to + notify when the job reaches a terminal state. + :type notifier: str + :ivar total_compilation_time: The total time this job spent compiling. + This value should not be set by the user and will be ignored if it is. + :vartype total_compilation_time: timedelta + :ivar total_queued_time: The total time this job spent queued. This value + should not be set by the user and will be ignored if it is. + :vartype total_queued_time: timedelta + :ivar total_running_time: The total time this job spent executing. This + value should not be set by the user and will be ignored if it is. + :vartype total_running_time: timedelta + :ivar total_paused_time: The total time this job spent paused. This value + should not be set by the user and will be ignored if it is. + :vartype total_paused_time: timedelta + :ivar root_process_node_id: The ID used to identify the job manager + coordinating job execution. This value should not be set by the user and + will be ignored if it is. + :vartype root_process_node_id: str + :ivar yarn_application_id: The ID used to identify the yarn application + executing the job. This value should not be set by the user and will be + ignored if it is. + :vartype yarn_application_id: str + """ + + _validation = { + 'script': {'required': True}, + 'type': {'required': True}, + 'resources': {'readonly': True}, + 'user_algebra_path': {'readonly': True}, + 'total_compilation_time': {'readonly': True}, + 'total_queued_time': {'readonly': True}, + 'total_running_time': {'readonly': True}, + 'total_paused_time': {'readonly': True}, + 'root_process_node_id': {'readonly': True}, + 'yarn_application_id': {'readonly': True}, + } + + _attribute_map = { + 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, + 'script': {'key': 'script', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'resources': {'key': 'resources', 'type': '[ScopeJobResource]'}, + 'user_algebra_path': {'key': 'userAlgebraPath', 'type': 'str'}, + 'notifier': {'key': 'notifier', 'type': 'str'}, + 'total_compilation_time': {'key': 'totalCompilationTime', 'type': 'duration'}, + 'total_queued_time': {'key': 'totalQueuedTime', 'type': 'duration'}, + 'total_running_time': {'key': 'totalRunningTime', 'type': 'duration'}, + 'total_paused_time': {'key': 'totalPausedTime', 'type': 'duration'}, + 'root_process_node_id': {'key': 'rootProcessNodeId', 'type': 'str'}, + 'yarn_application_id': {'key': 'yarnApplicationId', 'type': 'str'}, + } + + def __init__(self, *, script: str, runtime_version: str=None, notifier: str=None, **kwargs) -> None: + super(ScopeJobProperties, self).__init__(runtime_version=runtime_version, script=script, **kwargs) + self.resources = None + self.user_algebra_path = None + self.notifier = notifier + self.total_compilation_time = None + self.total_queued_time = None + self.total_running_time = None + self.total_paused_time = None + self.root_process_node_id = None + self.yarn_application_id = None + self.type = 'Scope' diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_resource.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_resource.py index 10e30d0c6a76..fec138b82ac7 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_resource.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_resource.py @@ -15,9 +15,9 @@ class ScopeJobResource(Model): """The Scope job resources. (Only for use internally with Scope job type.). - :param name: the name of the resource. + :param name: The name of the resource. :type name: str - :param path: the path to the resource. + :param path: The path to the resource. :type path: str """ @@ -26,7 +26,7 @@ class ScopeJobResource(Model): 'path': {'key': 'path', 'type': 'str'}, } - def __init__(self, name=None, path=None): - super(ScopeJobResource, self).__init__() - self.name = name - self.path = path + def __init__(self, **kwargs): + super(ScopeJobResource, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.path = kwargs.get('path', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_resource_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_resource_py3.py new file mode 100644 index 000000000000..731eb3bf417a --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_resource_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ScopeJobResource(Model): + """The Scope job resources. (Only for use internally with Scope job type.). + + :param name: The name of the resource. + :type name: str + :param path: The path to the resource. + :type path: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'path': {'key': 'path', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, path: str=None, **kwargs) -> None: + super(ScopeJobResource, self).__init__(**kwargs) + self.name = name + self.path = path diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/update_job_parameters.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/update_job_parameters.py index 04e8a4ae3348..74194d6f210d 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/update_job_parameters.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/update_job_parameters.py @@ -16,26 +16,30 @@ class UpdateJobParameters(Model): """The parameters that can be used to update existing Data Lake Analytics job information properties. (Only for use internally with Scope job type.). - :param degree_of_parallelism: the degree of parallelism used for this job. - This must be greater than 0, if set to less than 0 it will default to 1. + :param degree_of_parallelism: The degree of parallelism used for this job. :type degree_of_parallelism: int - :param priority: the priority value for the current job. Lower numbers + :param degree_of_parallelism_percent: the degree of parallelism in + percentage used for this job. + :type degree_of_parallelism_percent: float + :param priority: The priority value for the current job. Lower numbers have a higher priority. By default, a job has a priority of 1000. This must be greater than 0. :type priority: int - :param tags: the key-value pairs used to add additional metadata to the - job information. (Only for use internally with Scope job type.) + :param tags: The key-value pairs used to add additional metadata to the + job information. :type tags: dict[str, str] """ _attribute_map = { 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, + 'degree_of_parallelism_percent': {'key': 'degreeOfParallelismPercent', 'type': 'float'}, 'priority': {'key': 'priority', 'type': 'int'}, 'tags': {'key': 'tags', 'type': '{str}'}, } - def __init__(self, degree_of_parallelism=None, priority=None, tags=None): - super(UpdateJobParameters, self).__init__() - self.degree_of_parallelism = degree_of_parallelism - self.priority = priority - self.tags = tags + def __init__(self, **kwargs): + super(UpdateJobParameters, self).__init__(**kwargs) + self.degree_of_parallelism = kwargs.get('degree_of_parallelism', None) + self.degree_of_parallelism_percent = kwargs.get('degree_of_parallelism_percent', None) + self.priority = kwargs.get('priority', None) + self.tags = kwargs.get('tags', None) diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/update_job_parameters_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/update_job_parameters_py3.py new file mode 100644 index 000000000000..fbbba329f131 --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/update_job_parameters_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UpdateJobParameters(Model): + """The parameters that can be used to update existing Data Lake Analytics job + information properties. (Only for use internally with Scope job type.). + + :param degree_of_parallelism: The degree of parallelism used for this job. + :type degree_of_parallelism: int + :param degree_of_parallelism_percent: the degree of parallelism in + percentage used for this job. + :type degree_of_parallelism_percent: float + :param priority: The priority value for the current job. Lower numbers + have a higher priority. By default, a job has a priority of 1000. This + must be greater than 0. + :type priority: int + :param tags: The key-value pairs used to add additional metadata to the + job information. + :type tags: dict[str, str] + """ + + _attribute_map = { + 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, + 'degree_of_parallelism_percent': {'key': 'degreeOfParallelismPercent', 'type': 'float'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + } + + def __init__(self, *, degree_of_parallelism: int=None, degree_of_parallelism_percent: float=None, priority: int=None, tags=None, **kwargs) -> None: + super(UpdateJobParameters, self).__init__(**kwargs) + self.degree_of_parallelism = degree_of_parallelism + self.degree_of_parallelism_percent = degree_of_parallelism_percent + self.priority = priority + self.tags = tags diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/usql_job_properties.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/usql_job_properties.py index 6f595b7130b2..fe9b57ec64ca 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/usql_job_properties.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/usql_job_properties.py @@ -18,51 +18,54 @@ class USqlJobProperties(JobProperties): Variables are only populated by the server, and will be ignored when sending a request. - :param runtime_version: the runtime version of the Data Lake Analytics + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics engine to use for the specific type of job being run. :type runtime_version: str - :param script: the script to run. Please note that the maximum script size - is 3 MB. + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. :type script: str - :param type: Constant filled by server. + :param type: Required. Constant filled by server. :type type: str - :ivar resources: the list of resources that are required by the job + :ivar resources: The list of resources that are required by the job. :vartype resources: list[~azure.mgmt.datalake.analytics.job.models.JobResource] - :param statistics: the job specific statistics. + :param statistics: The job specific statistics. :type statistics: ~azure.mgmt.datalake.analytics.job.models.JobStatistics - :param debug_data: the job specific debug data locations. + :param debug_data: The job specific debug data locations. :type debug_data: ~azure.mgmt.datalake.analytics.job.models.JobDataPath - :ivar diagnostics: the diagnostics for the job. + :ivar diagnostics: The diagnostics for the job. :vartype diagnostics: list[~azure.mgmt.datalake.analytics.job.models.Diagnostics] - :ivar algebra_file_path: the algebra file path after the job has completed + :ivar algebra_file_path: The algebra file path after the job has + completed. :vartype algebra_file_path: str - :ivar total_compilation_time: the total time this job spent compiling. + :ivar total_compilation_time: The total time this job spent compiling. This value should not be set by the user and will be ignored if it is. :vartype total_compilation_time: timedelta - :ivar total_paused_time: the total time this job spent paused. This value - should not be set by the user and will be ignored if it is. - :vartype total_paused_time: timedelta - :ivar total_queued_time: the total time this job spent queued. This value + :ivar total_queued_time: The total time this job spent queued. This value should not be set by the user and will be ignored if it is. :vartype total_queued_time: timedelta - :ivar total_running_time: the total time this job spent executing. This + :ivar total_running_time: The total time this job spent executing. This value should not be set by the user and will be ignored if it is. :vartype total_running_time: timedelta - :ivar root_process_node_id: the ID used to identify the job manager + :ivar total_paused_time: The total time this job spent paused. This value + should not be set by the user and will be ignored if it is. + :vartype total_paused_time: timedelta + :ivar root_process_node_id: The ID used to identify the job manager coordinating job execution. This value should not be set by the user and will be ignored if it is. :vartype root_process_node_id: str - :ivar yarn_application_id: the ID used to identify the yarn application + :ivar yarn_application_id: The ID used to identify the yarn application executing the job. This value should not be set by the user and will be ignored if it is. :vartype yarn_application_id: str - :ivar yarn_application_time_stamp: the timestamp (in ticks) for the yarn + :ivar yarn_application_time_stamp: The timestamp (in ticks) for the yarn application executing the job. This value should not be set by the user and will be ignored if it is. :vartype yarn_application_time_stamp: long - :ivar compile_mode: the specific compilation mode for the job used during + :ivar compile_mode: The specific compilation mode for the job used during execution. If this is not specified during submission, the server will determine the optimal compilation mode. Possible values include: 'Semantic', 'Full', 'SingleBox' @@ -77,9 +80,9 @@ class USqlJobProperties(JobProperties): 'diagnostics': {'readonly': True}, 'algebra_file_path': {'readonly': True}, 'total_compilation_time': {'readonly': True}, - 'total_paused_time': {'readonly': True}, 'total_queued_time': {'readonly': True}, 'total_running_time': {'readonly': True}, + 'total_paused_time': {'readonly': True}, 'root_process_node_id': {'readonly': True}, 'yarn_application_id': {'readonly': True}, 'yarn_application_time_stamp': {'readonly': True}, @@ -96,26 +99,26 @@ class USqlJobProperties(JobProperties): 'diagnostics': {'key': 'diagnostics', 'type': '[Diagnostics]'}, 'algebra_file_path': {'key': 'algebraFilePath', 'type': 'str'}, 'total_compilation_time': {'key': 'totalCompilationTime', 'type': 'duration'}, - 'total_paused_time': {'key': 'totalPausedTime', 'type': 'duration'}, 'total_queued_time': {'key': 'totalQueuedTime', 'type': 'duration'}, 'total_running_time': {'key': 'totalRunningTime', 'type': 'duration'}, + 'total_paused_time': {'key': 'totalPausedTime', 'type': 'duration'}, 'root_process_node_id': {'key': 'rootProcessNodeId', 'type': 'str'}, 'yarn_application_id': {'key': 'yarnApplicationId', 'type': 'str'}, 'yarn_application_time_stamp': {'key': 'yarnApplicationTimeStamp', 'type': 'long'}, 'compile_mode': {'key': 'compileMode', 'type': 'CompileMode'}, } - def __init__(self, script, runtime_version=None, statistics=None, debug_data=None): - super(USqlJobProperties, self).__init__(runtime_version=runtime_version, script=script) + def __init__(self, **kwargs): + super(USqlJobProperties, self).__init__(**kwargs) self.resources = None - self.statistics = statistics - self.debug_data = debug_data + self.statistics = kwargs.get('statistics', None) + self.debug_data = kwargs.get('debug_data', None) self.diagnostics = None self.algebra_file_path = None self.total_compilation_time = None - self.total_paused_time = None self.total_queued_time = None self.total_running_time = None + self.total_paused_time = None self.root_process_node_id = None self.yarn_application_id = None self.yarn_application_time_stamp = None diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/usql_job_properties_py3.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/usql_job_properties_py3.py new file mode 100644 index 000000000000..9cfcbb25799a --- /dev/null +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/usql_job_properties_py3.py @@ -0,0 +1,126 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .job_properties_py3 import JobProperties + + +class USqlJobProperties(JobProperties): + """U-SQL job properties used when retrieving U-SQL jobs. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics + engine to use for the specific type of job being run. + :type runtime_version: str + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. + :type script: str + :param type: Required. Constant filled by server. + :type type: str + :ivar resources: The list of resources that are required by the job. + :vartype resources: + list[~azure.mgmt.datalake.analytics.job.models.JobResource] + :param statistics: The job specific statistics. + :type statistics: ~azure.mgmt.datalake.analytics.job.models.JobStatistics + :param debug_data: The job specific debug data locations. + :type debug_data: ~azure.mgmt.datalake.analytics.job.models.JobDataPath + :ivar diagnostics: The diagnostics for the job. + :vartype diagnostics: + list[~azure.mgmt.datalake.analytics.job.models.Diagnostics] + :ivar algebra_file_path: The algebra file path after the job has + completed. + :vartype algebra_file_path: str + :ivar total_compilation_time: The total time this job spent compiling. + This value should not be set by the user and will be ignored if it is. + :vartype total_compilation_time: timedelta + :ivar total_queued_time: The total time this job spent queued. This value + should not be set by the user and will be ignored if it is. + :vartype total_queued_time: timedelta + :ivar total_running_time: The total time this job spent executing. This + value should not be set by the user and will be ignored if it is. + :vartype total_running_time: timedelta + :ivar total_paused_time: The total time this job spent paused. This value + should not be set by the user and will be ignored if it is. + :vartype total_paused_time: timedelta + :ivar root_process_node_id: The ID used to identify the job manager + coordinating job execution. This value should not be set by the user and + will be ignored if it is. + :vartype root_process_node_id: str + :ivar yarn_application_id: The ID used to identify the yarn application + executing the job. This value should not be set by the user and will be + ignored if it is. + :vartype yarn_application_id: str + :ivar yarn_application_time_stamp: The timestamp (in ticks) for the yarn + application executing the job. This value should not be set by the user + and will be ignored if it is. + :vartype yarn_application_time_stamp: long + :ivar compile_mode: The specific compilation mode for the job used during + execution. If this is not specified during submission, the server will + determine the optimal compilation mode. Possible values include: + 'Semantic', 'Full', 'SingleBox' + :vartype compile_mode: str or + ~azure.mgmt.datalake.analytics.job.models.CompileMode + """ + + _validation = { + 'script': {'required': True}, + 'type': {'required': True}, + 'resources': {'readonly': True}, + 'diagnostics': {'readonly': True}, + 'algebra_file_path': {'readonly': True}, + 'total_compilation_time': {'readonly': True}, + 'total_queued_time': {'readonly': True}, + 'total_running_time': {'readonly': True}, + 'total_paused_time': {'readonly': True}, + 'root_process_node_id': {'readonly': True}, + 'yarn_application_id': {'readonly': True}, + 'yarn_application_time_stamp': {'readonly': True}, + 'compile_mode': {'readonly': True}, + } + + _attribute_map = { + 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, + 'script': {'key': 'script', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'resources': {'key': 'resources', 'type': '[JobResource]'}, + 'statistics': {'key': 'statistics', 'type': 'JobStatistics'}, + 'debug_data': {'key': 'debugData', 'type': 'JobDataPath'}, + 'diagnostics': {'key': 'diagnostics', 'type': '[Diagnostics]'}, + 'algebra_file_path': {'key': 'algebraFilePath', 'type': 'str'}, + 'total_compilation_time': {'key': 'totalCompilationTime', 'type': 'duration'}, + 'total_queued_time': {'key': 'totalQueuedTime', 'type': 'duration'}, + 'total_running_time': {'key': 'totalRunningTime', 'type': 'duration'}, + 'total_paused_time': {'key': 'totalPausedTime', 'type': 'duration'}, + 'root_process_node_id': {'key': 'rootProcessNodeId', 'type': 'str'}, + 'yarn_application_id': {'key': 'yarnApplicationId', 'type': 'str'}, + 'yarn_application_time_stamp': {'key': 'yarnApplicationTimeStamp', 'type': 'long'}, + 'compile_mode': {'key': 'compileMode', 'type': 'CompileMode'}, + } + + def __init__(self, *, script: str, runtime_version: str=None, statistics=None, debug_data=None, **kwargs) -> None: + super(USqlJobProperties, self).__init__(runtime_version=runtime_version, script=script, **kwargs) + self.resources = None + self.statistics = statistics + self.debug_data = debug_data + self.diagnostics = None + self.algebra_file_path = None + self.total_compilation_time = None + self.total_queued_time = None + self.total_running_time = None + self.total_paused_time = None + self.root_process_node_id = None + self.yarn_application_id = None + self.yarn_application_time_stamp = None + self.compile_mode = None + self.type = 'USql' diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/job_operations.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/job_operations.py index 7535e0f31c31..acbc3f0b1735 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/job_operations.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/job_operations.py @@ -12,8 +12,8 @@ import uuid from msrest.pipeline import ClientRawResponse from msrestazure.azure_exceptions import CloudError -from msrest.exceptions import DeserializationError -from msrestazure.azure_operation import AzureOperationPoller +from msrest.polling import LROPoller, NoPolling +from msrestazure.polling.arm_polling import ARMPolling from .. import models @@ -24,7 +24,7 @@ class JobOperations(object): :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. - :param deserializer: An objec model deserializer. + :param deserializer: An object model deserializer. :ivar api_version: Client Api Version. Constant value: "2017-09-01-preview". """ @@ -82,7 +82,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/jobs' + url = self.list.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaJobDnsSuffix': self._serialize.url("self.config.adla_job_dns_suffix", self.config.adla_job_dns_suffix, 'str', skip_quote=True) @@ -111,7 +111,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -120,9 +120,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -140,6 +139,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list.metadata = {'url': '/jobs'} def create( self, account_name, job_identity, parameters, custom_headers=None, raw=False, **operation_config): @@ -165,7 +165,7 @@ def create( :raises: :class:`CloudError` """ # Construct URL - url = '/jobs/{jobIdentity}' + url = self.create.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaJobDnsSuffix': self._serialize.url("self.config.adla_job_dns_suffix", self.config.adla_job_dns_suffix, 'str', skip_quote=True), @@ -179,6 +179,7 @@ def create( # Construct headers header_parameters = {} + header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) @@ -191,9 +192,8 @@ def create( body_content = self._serialize.body(parameters, 'CreateJobParameters') # Construct and send request - request = self._client.put(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -210,6 +210,7 @@ def create( return client_raw_response return deserialized + create.metadata = {'url': '/jobs/{jobIdentity}'} def get( self, account_name, job_identity, custom_headers=None, raw=False, **operation_config): @@ -231,7 +232,7 @@ def get( :raises: :class:`CloudError` """ # Construct URL - url = '/jobs/{jobIdentity}' + url = self.get.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaJobDnsSuffix': self._serialize.url("self.config.adla_job_dns_suffix", self.config.adla_job_dns_suffix, 'str', skip_quote=True), @@ -245,7 +246,7 @@ def get( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -254,8 +255,8 @@ def get( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -272,12 +273,13 @@ def get( return client_raw_response return deserialized + get.metadata = {'url': '/jobs/{jobIdentity}'} def _update_initial( self, account_name, job_identity, parameters=None, custom_headers=None, raw=False, **operation_config): # Construct URL - url = '/jobs/{jobIdentity}' + url = self.update.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaJobDnsSuffix': self._serialize.url("self.config.adla_job_dns_suffix", self.config.adla_job_dns_suffix, 'str', skip_quote=True), @@ -291,6 +293,7 @@ def _update_initial( # Construct headers header_parameters = {} + header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) @@ -306,9 +309,8 @@ def _update_initial( body_content = None # Construct and send request - request = self._client.patch(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 201, 202]: exp = CloudError(response) @@ -327,7 +329,7 @@ def _update_initial( return deserialized def update( - self, account_name, job_identity, parameters=None, custom_headers=None, raw=False, **operation_config): + self, account_name, job_identity, parameters=None, custom_headers=None, raw=False, polling=True, **operation_config): """Updates the job information for the specified job ID. (Only for use internally with Scope job type.). @@ -341,13 +343,16 @@ def update( :type parameters: ~azure.mgmt.datalake.analytics.job.models.UpdateJobParameters :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :return: An instance of AzureOperationPoller that returns - JobInformation or ClientRawResponse if raw=true + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns JobInformation or + ClientRawResponse if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.datalake.analytics.job.models.JobInformation] - or ~msrest.pipeline.ClientRawResponse + or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.datalake.analytics.job.models.JobInformation]] :raises: :class:`CloudError` """ raw_result = self._update_initial( @@ -358,30 +363,8 @@ def update( raw=True, **operation_config ) - if raw: - return raw_result - - # Construct and send request - def long_running_send(): - return raw_result.response - - def get_long_running_status(status_link, headers=None): - - request = self._client.get(status_link) - if headers: - request.headers.update(headers) - header_parameters = {} - header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id'] - return self._client.send( - request, header_parameters, stream=False, **operation_config) def get_long_running_output(response): - - if response.status_code not in [200, 201, 202]: - exp = CloudError(response) - exp.request_id = response.headers.get('x-ms-request-id') - raise exp - deserialized = self._deserialize('JobInformation', response) if raw: @@ -390,12 +373,14 @@ def get_long_running_output(response): return deserialized - long_running_operation_timeout = operation_config.get( + lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) - return AzureOperationPoller( - long_running_send, get_long_running_output, - get_long_running_status, long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + update.metadata = {'url': '/jobs/{jobIdentity}'} def get_statistics( self, account_name, job_identity, custom_headers=None, raw=False, **operation_config): @@ -417,7 +402,7 @@ def get_statistics( :raises: :class:`CloudError` """ # Construct URL - url = '/jobs/{jobIdentity}/GetStatistics' + url = self.get_statistics.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaJobDnsSuffix': self._serialize.url("self.config.adla_job_dns_suffix", self.config.adla_job_dns_suffix, 'str', skip_quote=True), @@ -431,7 +416,7 @@ def get_statistics( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -440,8 +425,8 @@ def get_statistics( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -458,6 +443,7 @@ def get_statistics( return client_raw_response return deserialized + get_statistics.metadata = {'url': '/jobs/{jobIdentity}/GetStatistics'} def get_debug_data_path( self, account_name, job_identity, custom_headers=None, raw=False, **operation_config): @@ -480,7 +466,7 @@ def get_debug_data_path( :raises: :class:`CloudError` """ # Construct URL - url = '/jobs/{jobIdentity}/GetDebugDataPath' + url = self.get_debug_data_path.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaJobDnsSuffix': self._serialize.url("self.config.adla_job_dns_suffix", self.config.adla_job_dns_suffix, 'str', skip_quote=True), @@ -494,7 +480,7 @@ def get_debug_data_path( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -503,8 +489,8 @@ def get_debug_data_path( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -521,12 +507,13 @@ def get_debug_data_path( return client_raw_response return deserialized + get_debug_data_path.metadata = {'url': '/jobs/{jobIdentity}/GetDebugDataPath'} def _cancel_initial( self, account_name, job_identity, custom_headers=None, raw=False, **operation_config): # Construct URL - url = '/jobs/{jobIdentity}/CancelJob' + url = self.cancel.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaJobDnsSuffix': self._serialize.url("self.config.adla_job_dns_suffix", self.config.adla_job_dns_suffix, 'str', skip_quote=True), @@ -540,7 +527,6 @@ def _cancel_initial( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -549,8 +535,8 @@ def _cancel_initial( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202, 204]: exp = CloudError(response) @@ -562,7 +548,7 @@ def _cancel_initial( return client_raw_response def cancel( - self, account_name, job_identity, custom_headers=None, raw=False, **operation_config): + self, account_name, job_identity, custom_headers=None, raw=False, polling=True, **operation_config): """Cancels the running job specified by the job ID. :param account_name: The Azure Data Lake Analytics account to execute @@ -572,12 +558,14 @@ def cancel( across all jobs submitted to the service. :type job_identity: str :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :return: An instance of AzureOperationPoller that returns None or - ClientRawResponse if raw=true + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns None or + ClientRawResponse if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or - ~msrest.pipeline.ClientRawResponse + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: :class:`CloudError` """ raw_result = self._cancel_initial( @@ -587,46 +575,26 @@ def cancel( raw=True, **operation_config ) - if raw: - return raw_result - - # Construct and send request - def long_running_send(): - return raw_result.response - - def get_long_running_status(status_link, headers=None): - - request = self._client.get(status_link) - if headers: - request.headers.update(headers) - header_parameters = {} - header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id'] - return self._client.send( - request, header_parameters, stream=False, **operation_config) def get_long_running_output(response): - - if response.status_code not in [200, 202, 204]: - exp = CloudError(response) - exp.request_id = response.headers.get('x-ms-request-id') - raise exp - if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response - long_running_operation_timeout = operation_config.get( + lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) - return AzureOperationPoller( - long_running_send, get_long_running_output, - get_long_running_status, long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + cancel.metadata = {'url': '/jobs/{jobIdentity}/CancelJob'} def _yield_method_initial( self, account_name, job_identity, custom_headers=None, raw=False, **operation_config): # Construct URL - url = '/jobs/{jobIdentity}/YieldJob' + url = self.yield_method.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaJobDnsSuffix': self._serialize.url("self.config.adla_job_dns_suffix", self.config.adla_job_dns_suffix, 'str', skip_quote=True), @@ -640,7 +608,6 @@ def _yield_method_initial( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -649,8 +616,8 @@ def _yield_method_initial( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202, 204]: exp = CloudError(response) @@ -662,7 +629,7 @@ def _yield_method_initial( return client_raw_response def yield_method( - self, account_name, job_identity, custom_headers=None, raw=False, **operation_config): + self, account_name, job_identity, custom_headers=None, raw=False, polling=True, **operation_config): """Pauses the specified job and places it back in the job queue, behind other jobs of equal or higher importance, based on priority. (Only for use internally with Scope job type.). @@ -674,12 +641,14 @@ def yield_method( across all jobs submitted to the service. :type job_identity: str :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :return: An instance of AzureOperationPoller that returns None or - ClientRawResponse if raw=true + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns None or + ClientRawResponse if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or - ~msrest.pipeline.ClientRawResponse + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: :class:`CloudError` """ raw_result = self._yield_method_initial( @@ -689,40 +658,20 @@ def yield_method( raw=True, **operation_config ) - if raw: - return raw_result - - # Construct and send request - def long_running_send(): - return raw_result.response - - def get_long_running_status(status_link, headers=None): - - request = self._client.get(status_link) - if headers: - request.headers.update(headers) - header_parameters = {} - header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id'] - return self._client.send( - request, header_parameters, stream=False, **operation_config) def get_long_running_output(response): - - if response.status_code not in [200, 202, 204]: - exp = CloudError(response) - exp.request_id = response.headers.get('x-ms-request-id') - raise exp - if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response - long_running_operation_timeout = operation_config.get( + lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) - return AzureOperationPoller( - long_running_send, get_long_running_output, - get_long_running_status, long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + yield_method.metadata = {'url': '/jobs/{jobIdentity}/YieldJob'} def build( self, account_name, parameters, custom_headers=None, raw=False, **operation_config): @@ -746,7 +695,7 @@ def build( :raises: :class:`CloudError` """ # Construct URL - url = '/buildJob' + url = self.build.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaJobDnsSuffix': self._serialize.url("self.config.adla_job_dns_suffix", self.config.adla_job_dns_suffix, 'str', skip_quote=True) @@ -759,6 +708,7 @@ def build( # Construct headers header_parameters = {} + header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) @@ -771,9 +721,8 @@ def build( body_content = self._serialize.body(parameters, 'BuildJobParameters') # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -790,3 +739,4 @@ def build( return client_raw_response return deserialized + build.metadata = {'url': '/buildJob'} diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/pipeline_operations.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/pipeline_operations.py index fc39ff2e8f22..c177b08133a5 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/pipeline_operations.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/pipeline_operations.py @@ -22,7 +22,7 @@ class PipelineOperations(object): :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. - :param deserializer: An objec model deserializer. + :param deserializer: An object model deserializer. :ivar api_version: Client Api Version. Constant value: "2017-09-01-preview". """ @@ -66,7 +66,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/pipelines' + url = self.list.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaJobDnsSuffix': self._serialize.url("self.config.adla_job_dns_suffix", self.config.adla_job_dns_suffix, 'str', skip_quote=True) @@ -87,7 +87,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -96,9 +96,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -116,6 +115,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list.metadata = {'url': '/pipelines'} def get( self, account_name, pipeline_identity, start_date_time=None, end_date_time=None, custom_headers=None, raw=False, **operation_config): @@ -146,7 +146,7 @@ def get( :raises: :class:`CloudError` """ # Construct URL - url = '/pipelines/{pipelineIdentity}' + url = self.get.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaJobDnsSuffix': self._serialize.url("self.config.adla_job_dns_suffix", self.config.adla_job_dns_suffix, 'str', skip_quote=True), @@ -164,7 +164,7 @@ def get( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -173,8 +173,8 @@ def get( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -191,3 +191,4 @@ def get( return client_raw_response return deserialized + get.metadata = {'url': '/pipelines/{pipelineIdentity}'} diff --git a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/recurrence_operations.py b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/recurrence_operations.py index 0d6691df01a4..9728ea90f9c1 100644 --- a/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/recurrence_operations.py +++ b/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/recurrence_operations.py @@ -22,7 +22,7 @@ class RecurrenceOperations(object): :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. - :param deserializer: An objec model deserializer. + :param deserializer: An object model deserializer. :ivar api_version: Client Api Version. Constant value: "2017-09-01-preview". """ @@ -66,7 +66,7 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL - url = '/recurrences' + url = self.list.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaJobDnsSuffix': self._serialize.url("self.config.adla_job_dns_suffix", self.config.adla_job_dns_suffix, 'str', skip_quote=True) @@ -87,7 +87,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -96,9 +96,8 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -116,6 +115,7 @@ def internal_paging(next_link=None, raw=False): return client_raw_response return deserialized + list.metadata = {'url': '/recurrences'} def get( self, account_name, recurrence_identity, start_date_time=None, end_date_time=None, custom_headers=None, raw=False, **operation_config): @@ -146,7 +146,7 @@ def get( :raises: :class:`CloudError` """ # Construct URL - url = '/recurrences/{recurrenceIdentity}' + url = self.get.metadata['url'] path_format_arguments = { 'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True), 'adlaJobDnsSuffix': self._serialize.url("self.config.adla_job_dns_suffix", self.config.adla_job_dns_suffix, 'str', skip_quote=True), @@ -164,7 +164,7 @@ def get( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -173,8 +173,8 @@ def get( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -191,3 +191,4 @@ def get( return client_raw_response return deserialized + get.metadata = {'url': '/recurrences/{recurrenceIdentity}'} diff --git a/azure-mgmt-datalake-analytics/azure_bdist_wheel.py b/azure-mgmt-datalake-analytics/azure_bdist_wheel.py deleted file mode 100644 index 8a81d1b61775..000000000000 --- a/azure-mgmt-datalake-analytics/azure_bdist_wheel.py +++ /dev/null @@ -1,54 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -#-------------------------------------------------------------------------- - -from distutils import log as logger -import os.path - -from wheel.bdist_wheel import bdist_wheel -class azure_bdist_wheel(bdist_wheel): - """The purpose of this class is to build wheel a little differently than the sdist, - without requiring to build the wheel from the sdist (i.e. you can build the wheel - directly from source). - """ - - description = "Create an Azure wheel distribution" - - user_options = bdist_wheel.user_options + \ - [('azure-namespace-package=', None, - "Name of the deepest nspkg used")] - - def initialize_options(self): - bdist_wheel.initialize_options(self) - self.azure_namespace_package = None - - def finalize_options(self): - bdist_wheel.finalize_options(self) - if self.azure_namespace_package and not self.azure_namespace_package.endswith("-nspkg"): - raise ValueError("azure_namespace_package must finish by -nspkg") - - def run(self): - if not self.distribution.install_requires: - self.distribution.install_requires = [] - self.distribution.install_requires.append( - "{}>=2.0.0".format(self.azure_namespace_package)) - bdist_wheel.run(self) - - def write_record(self, bdist_dir, distinfo_dir): - if self.azure_namespace_package: - # Split and remove last part, assuming it's "nspkg" - subparts = self.azure_namespace_package.split('-')[0:-1] - folder_with_init = [os.path.join(*subparts[0:i+1]) for i in range(len(subparts))] - for azure_sub_package in folder_with_init: - init_file = os.path.join(bdist_dir, azure_sub_package, '__init__.py') - if os.path.isfile(init_file): - logger.info("manually remove {} while building the wheel".format(init_file)) - os.remove(init_file) - else: - raise ValueError("Unable to find {}. Are you sure of your namespace package?".format(init_file)) - bdist_wheel.write_record(self, bdist_dir, distinfo_dir) -cmdclass = { - 'bdist_wheel': azure_bdist_wheel, -} diff --git a/azure-mgmt-datalake-analytics/sdk_packaging.toml b/azure-mgmt-datalake-analytics/sdk_packaging.toml new file mode 100644 index 000000000000..231d3b4be52f --- /dev/null +++ b/azure-mgmt-datalake-analytics/sdk_packaging.toml @@ -0,0 +1,6 @@ +[packaging] +package_name = "azure-mgmt-datalake-analytics" +package_pprint_name = "MyService Management" +package_doc_id = "" +is_stable = false +is_arm = true diff --git a/azure-mgmt-datalake-analytics/setup.cfg b/azure-mgmt-datalake-analytics/setup.cfg index e6761b2e2518..3c6e79cf31da 100644 --- a/azure-mgmt-datalake-analytics/setup.cfg +++ b/azure-mgmt-datalake-analytics/setup.cfg @@ -1,3 +1,2 @@ [bdist_wheel] universal=1 -azure-namespace-package=azure-mgmt-datalake-nspkg \ No newline at end of file diff --git a/azure-mgmt-datalake-analytics/setup.py b/azure-mgmt-datalake-analytics/setup.py index 7b6e22f59731..4ddc6df0bf02 100644 --- a/azure-mgmt-datalake-analytics/setup.py +++ b/azure-mgmt-datalake-analytics/setup.py @@ -10,16 +10,10 @@ import os.path from io import open from setuptools import find_packages, setup -try: - from azure_bdist_wheel import cmdclass -except ImportError: - from distutils import log as logger - logger.warn("Wheel is not available, disabling bdist_wheel hook") - cmdclass = {} # Change the PACKAGE_NAME only to change folder and different name PACKAGE_NAME = "azure-mgmt-datalake-analytics" -PACKAGE_PPRINT_NAME = "Data Lake Analytics Management" +PACKAGE_PPRINT_NAME = "MyService Management" # a-b-c => a/b/c package_folder_path = PACKAGE_NAME.replace('-', '/') @@ -72,13 +66,23 @@ 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', 'License :: OSI Approved :: MIT License', ], zip_safe=False, - packages=find_packages(exclude=["tests"]), + packages=find_packages(exclude=[ + 'tests', + # Exclude packages that will be covered by PEP420 or nspkg + 'azure', + 'azure.mgmt', + 'azure.mgmt.datalake', + ]), install_requires=[ - 'msrestazure>=0.4.20,<2.0.0', + 'msrest>=0.5.0', + 'msrestazure>=0.4.32,<2.0.0', 'azure-common~=1.1', ], - cmdclass=cmdclass + extras_require={ + ":python_version<'3.0'": ['azure-mgmt-datalake-nspkg'], + } )