max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
rses/__init__.py | iScrE4m/RSES | 1 | 5400 | # coding=utf-8
"""RSES :)"""
| 1.117188 | 1 |
sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/operations/_gremlin_resources_operations.py | adewaleo/azure-sdk-for-python | 2 | 5401 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class GremlinResourcesOperations(object):
"""GremlinResourcesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.cosmosdb.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_gremlin_databases(
self,
resource_group_name, # type: str
account_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.GremlinDatabaseListResult"]
"""Lists the Gremlin databases under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GremlinDatabaseListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cosmosdb.models.GremlinDatabaseListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.GremlinDatabaseListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_gremlin_databases.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('GremlinDatabaseListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_gremlin_databases.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases'} # type: ignore
def get_gremlin_database(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.GremlinDatabaseGetResults"
"""Gets the Gremlin databases under an existing Azure Cosmos DB database account with the provided
name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GremlinDatabaseGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.GremlinDatabaseGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.GremlinDatabaseGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get_gremlin_database.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GremlinDatabaseGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_gremlin_database.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}'} # type: ignore
def _create_update_gremlin_database_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
create_update_gremlin_database_parameters, # type: "models.GremlinDatabaseCreateUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> Optional["models.GremlinDatabaseGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.GremlinDatabaseGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_update_gremlin_database_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(create_update_gremlin_database_parameters, 'GremlinDatabaseCreateUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GremlinDatabaseGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_gremlin_database_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}'} # type: ignore
def begin_create_update_gremlin_database(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
create_update_gremlin_database_parameters, # type: "models.GremlinDatabaseCreateUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.GremlinDatabaseGetResults"]
"""Create or update an Azure Cosmos DB Gremlin database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param create_update_gremlin_database_parameters: The parameters to provide for the current
Gremlin database.
:type create_update_gremlin_database_parameters: ~azure.mgmt.cosmosdb.models.GremlinDatabaseCreateUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either GremlinDatabaseGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.GremlinDatabaseGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.GremlinDatabaseGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_update_gremlin_database_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
create_update_gremlin_database_parameters=create_update_gremlin_database_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GremlinDatabaseGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_gremlin_database.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}'} # type: ignore
def _delete_gremlin_database_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
# Construct URL
url = self._delete_gremlin_database_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_gremlin_database_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}'} # type: ignore
def begin_delete_gremlin_database(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes an existing Azure Cosmos DB Gremlin database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_gremlin_database_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_gremlin_database.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}'} # type: ignore
def get_gremlin_database_throughput(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ThroughputSettingsGetResults"
"""Gets the RUs per second of the Gremlin database under an existing Azure Cosmos DB database
account with the provided name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ThroughputSettingsGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get_gremlin_database_throughput.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_gremlin_database_throughput.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/throughputSettings/default'} # type: ignore
def _update_gremlin_database_throughput_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
update_throughput_parameters, # type: "models.ThroughputSettingsUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> Optional["models.ThroughputSettingsGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_gremlin_database_throughput_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(update_throughput_parameters, 'ThroughputSettingsUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_gremlin_database_throughput_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/throughputSettings/default'} # type: ignore
def begin_update_gremlin_database_throughput(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
update_throughput_parameters, # type: "models.ThroughputSettingsUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ThroughputSettingsGetResults"]
"""Update RUs per second of an Azure Cosmos DB Gremlin database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param update_throughput_parameters: The RUs per second of the parameters to provide for the
current Gremlin database.
:type update_throughput_parameters: ~azure.mgmt.cosmosdb.models.ThroughputSettingsUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_gremlin_database_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
update_throughput_parameters=update_throughput_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_gremlin_database_throughput.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/throughputSettings/default'} # type: ignore
def _migrate_gremlin_database_to_autoscale_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["models.ThroughputSettingsGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._migrate_gremlin_database_to_autoscale_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_migrate_gremlin_database_to_autoscale_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/throughputSettings/default/migrateToAutoscale'} # type: ignore
def begin_migrate_gremlin_database_to_autoscale(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ThroughputSettingsGetResults"]
"""Migrate an Azure Cosmos DB Gremlin database from manual throughput to autoscale.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._migrate_gremlin_database_to_autoscale_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_migrate_gremlin_database_to_autoscale.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/throughputSettings/default/migrateToAutoscale'} # type: ignore
def _migrate_gremlin_database_to_manual_throughput_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["models.ThroughputSettingsGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._migrate_gremlin_database_to_manual_throughput_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_migrate_gremlin_database_to_manual_throughput_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/throughputSettings/default/migrateToManualThroughput'} # type: ignore
def begin_migrate_gremlin_database_to_manual_throughput(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ThroughputSettingsGetResults"]
"""Migrate an Azure Cosmos DB Gremlin database from autoscale to manual throughput.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._migrate_gremlin_database_to_manual_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_migrate_gremlin_database_to_manual_throughput.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/throughputSettings/default/migrateToManualThroughput'} # type: ignore
def list_gremlin_graphs(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.GremlinGraphListResult"]
"""Lists the Gremlin graph under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GremlinGraphListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cosmosdb.models.GremlinGraphListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.GremlinGraphListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_gremlin_graphs.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('GremlinGraphListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_gremlin_graphs.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs'} # type: ignore
def get_gremlin_graph(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.GremlinGraphGetResults"
"""Gets the Gremlin graph under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param graph_name: Cosmos DB graph name.
:type graph_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GremlinGraphGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.GremlinGraphGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.GremlinGraphGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get_gremlin_graph.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'graphName': self._serialize.url("graph_name", graph_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GremlinGraphGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_gremlin_graph.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}'} # type: ignore
def _create_update_gremlin_graph_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
create_update_gremlin_graph_parameters, # type: "models.GremlinGraphCreateUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> Optional["models.GremlinGraphGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.GremlinGraphGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_update_gremlin_graph_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'graphName': self._serialize.url("graph_name", graph_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(create_update_gremlin_graph_parameters, 'GremlinGraphCreateUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GremlinGraphGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_gremlin_graph_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}'} # type: ignore
def begin_create_update_gremlin_graph(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
create_update_gremlin_graph_parameters, # type: "models.GremlinGraphCreateUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.GremlinGraphGetResults"]
"""Create or update an Azure Cosmos DB Gremlin graph.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param graph_name: Cosmos DB graph name.
:type graph_name: str
:param create_update_gremlin_graph_parameters: The parameters to provide for the current
Gremlin graph.
:type create_update_gremlin_graph_parameters: ~azure.mgmt.cosmosdb.models.GremlinGraphCreateUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either GremlinGraphGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.GremlinGraphGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.GremlinGraphGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_update_gremlin_graph_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
graph_name=graph_name,
create_update_gremlin_graph_parameters=create_update_gremlin_graph_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GremlinGraphGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_gremlin_graph.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}'} # type: ignore
def _delete_gremlin_graph_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
# Construct URL
url = self._delete_gremlin_graph_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'graphName': self._serialize.url("graph_name", graph_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_gremlin_graph_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}'} # type: ignore
def begin_delete_gremlin_graph(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes an existing Azure Cosmos DB Gremlin graph.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param graph_name: Cosmos DB graph name.
:type graph_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_gremlin_graph_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
graph_name=graph_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_gremlin_graph.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}'} # type: ignore
def get_gremlin_graph_throughput(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ThroughputSettingsGetResults"
"""Gets the Gremlin graph throughput under an existing Azure Cosmos DB database account with the
provided name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param graph_name: Cosmos DB graph name.
:type graph_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ThroughputSettingsGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get_gremlin_graph_throughput.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'graphName': self._serialize.url("graph_name", graph_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_gremlin_graph_throughput.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}/throughputSettings/default'} # type: ignore
def _update_gremlin_graph_throughput_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
update_throughput_parameters, # type: "models.ThroughputSettingsUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> Optional["models.ThroughputSettingsGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_gremlin_graph_throughput_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'graphName': self._serialize.url("graph_name", graph_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(update_throughput_parameters, 'ThroughputSettingsUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_gremlin_graph_throughput_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}/throughputSettings/default'} # type: ignore
def begin_update_gremlin_graph_throughput(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
update_throughput_parameters, # type: "models.ThroughputSettingsUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ThroughputSettingsGetResults"]
"""Update RUs per second of an Azure Cosmos DB Gremlin graph.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param graph_name: Cosmos DB graph name.
:type graph_name: str
:param update_throughput_parameters: The RUs per second of the parameters to provide for the
current Gremlin graph.
:type update_throughput_parameters: ~azure.mgmt.cosmosdb.models.ThroughputSettingsUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_gremlin_graph_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
graph_name=graph_name,
update_throughput_parameters=update_throughput_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_gremlin_graph_throughput.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}/throughputSettings/default'} # type: ignore
def _migrate_gremlin_graph_to_autoscale_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["models.ThroughputSettingsGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._migrate_gremlin_graph_to_autoscale_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'graphName': self._serialize.url("graph_name", graph_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_migrate_gremlin_graph_to_autoscale_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}/throughputSettings/default/migrateToAutoscale'} # type: ignore
def begin_migrate_gremlin_graph_to_autoscale(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ThroughputSettingsGetResults"]
"""Migrate an Azure Cosmos DB Gremlin graph from manual throughput to autoscale.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param graph_name: Cosmos DB graph name.
:type graph_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._migrate_gremlin_graph_to_autoscale_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
graph_name=graph_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_migrate_gremlin_graph_to_autoscale.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}/throughputSettings/default/migrateToAutoscale'} # type: ignore
def _migrate_gremlin_graph_to_manual_throughput_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["models.ThroughputSettingsGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._migrate_gremlin_graph_to_manual_throughput_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'graphName': self._serialize.url("graph_name", graph_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_migrate_gremlin_graph_to_manual_throughput_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}/throughputSettings/default/migrateToManualThroughput'} # type: ignore
def begin_migrate_gremlin_graph_to_manual_throughput(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ThroughputSettingsGetResults"]
"""Migrate an Azure Cosmos DB Gremlin graph from autoscale to manual throughput.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param graph_name: Cosmos DB graph name.
:type graph_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._migrate_gremlin_graph_to_manual_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
graph_name=graph_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_migrate_gremlin_graph_to_manual_throughput.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}/throughputSettings/default/migrateToManualThroughput'} # type: ignore
| 1.695313 | 2 |
homeassistant/components/tasmota/discovery.py | yura505/core | 0 | 5402 | """Support for MQTT discovery."""
import asyncio
import logging
from hatasmota.discovery import (
TasmotaDiscovery,
get_device_config as tasmota_get_device_config,
get_entities_for_platform as tasmota_get_entities_for_platform,
get_entity as tasmota_get_entity,
has_entities_with_platform as tasmota_has_entities_with_platform,
unique_id_from_hash,
)
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.typing import HomeAssistantType
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
SUPPORTED_PLATFORMS = [
"switch",
]
ALREADY_DISCOVERED = "tasmota_discovered_components"
CONFIG_ENTRY_IS_SETUP = "tasmota_config_entry_is_setup"
DATA_CONFIG_ENTRY_LOCK = "tasmota_config_entry_lock"
TASMOTA_DISCOVERY_DEVICE = "tasmota_discovery_device"
TASMOTA_DISCOVERY_ENTITY_NEW = "tasmota_discovery_entity_new_{}"
TASMOTA_DISCOVERY_ENTITY_UPDATED = "tasmota_discovery_entity_updated_{}_{}_{}_{}"
def clear_discovery_hash(hass, discovery_hash):
"""Clear entry in ALREADY_DISCOVERED list."""
del hass.data[ALREADY_DISCOVERED][discovery_hash]
def set_discovery_hash(hass, discovery_hash):
"""Set entry in ALREADY_DISCOVERED list."""
hass.data[ALREADY_DISCOVERED][discovery_hash] = {}
async def async_start(
hass: HomeAssistantType, discovery_topic, config_entry, tasmota_mqtt
) -> bool:
"""Start MQTT Discovery."""
async def _load_platform(platform):
"""Load a Tasmota platform if not already done."""
async with hass.data[DATA_CONFIG_ENTRY_LOCK]:
config_entries_key = f"{platform}.tasmota"
if config_entries_key not in hass.data[CONFIG_ENTRY_IS_SETUP]:
await hass.config_entries.async_forward_entry_setup(
config_entry, platform
)
hass.data[CONFIG_ENTRY_IS_SETUP].add(config_entries_key)
async def _discover_entity(tasmota_entity_config, discovery_hash, platform):
"""Handle adding or updating a discovered entity."""
if not tasmota_entity_config:
# Entity disabled, clean up entity registry
entity_registry = await hass.helpers.entity_registry.async_get_registry()
unique_id = unique_id_from_hash(discovery_hash)
entity_id = entity_registry.async_get_entity_id(platform, DOMAIN, unique_id)
if entity_id:
_LOGGER.debug("Removing entity: %s %s", platform, discovery_hash)
entity_registry.async_remove(entity_id)
return
if discovery_hash in hass.data[ALREADY_DISCOVERED]:
_LOGGER.debug(
"Entity already added, sending update: %s %s",
platform,
discovery_hash,
)
async_dispatcher_send(
hass,
TASMOTA_DISCOVERY_ENTITY_UPDATED.format(*discovery_hash),
tasmota_entity_config,
)
else:
_LOGGER.debug("Adding new entity: %s %s", platform, discovery_hash)
tasmota_entity = tasmota_get_entity(tasmota_entity_config, tasmota_mqtt)
hass.data[ALREADY_DISCOVERED][discovery_hash] = None
async_dispatcher_send(
hass,
TASMOTA_DISCOVERY_ENTITY_NEW.format(platform),
tasmota_entity,
discovery_hash,
)
async def async_device_discovered(payload, mac):
"""Process the received message."""
if ALREADY_DISCOVERED not in hass.data:
hass.data[ALREADY_DISCOVERED] = {}
_LOGGER.debug("Received discovery data for tasmota device: %s", mac)
tasmota_device_config = tasmota_get_device_config(payload)
async_dispatcher_send(
hass, TASMOTA_DISCOVERY_DEVICE, tasmota_device_config, mac
)
if not payload:
return
for platform in SUPPORTED_PLATFORMS:
if not tasmota_has_entities_with_platform(payload, platform):
continue
await _load_platform(platform)
for platform in SUPPORTED_PLATFORMS:
tasmota_entities = tasmota_get_entities_for_platform(payload, platform)
for (tasmota_entity_config, discovery_hash) in tasmota_entities:
await _discover_entity(tasmota_entity_config, discovery_hash, platform)
hass.data[DATA_CONFIG_ENTRY_LOCK] = asyncio.Lock()
hass.data[CONFIG_ENTRY_IS_SETUP] = set()
tasmota_discovery = TasmotaDiscovery(discovery_topic, tasmota_mqtt)
await tasmota_discovery.start_discovery(async_device_discovered, None)
| 2.09375 | 2 |
tfx/components/infra_validator/executor.py | TimoKerr/tfx | 1 | 5403 | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX InfraValidator executor definition."""
import contextlib
import functools
import os
import signal
import threading
import time
from typing import Any, Dict, List, Optional
from absl import logging
from tfx import types
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import request_builder
from tfx.components.infra_validator import serving_bins
from tfx.components.infra_validator import types as iv_types
from tfx.components.infra_validator.model_server_runners import kubernetes_runner
from tfx.components.infra_validator.model_server_runners import local_docker_runner
from tfx.dsl.components.base import base_executor
from tfx.proto import infra_validator_pb2
from tfx.types import artifact_utils
from tfx.types.standard_component_specs import BLESSING_KEY
from tfx.types.standard_component_specs import EXAMPLES_KEY
from tfx.types.standard_component_specs import MODEL_KEY
from tfx.types.standard_component_specs import REQUEST_SPEC_KEY
from tfx.types.standard_component_specs import SERVING_SPEC_KEY
from tfx.types.standard_component_specs import VALIDATION_SPEC_KEY
from tfx.utils import io_utils
from tfx.utils import path_utils
from tfx.utils import proto_utils
from tfx.utils.model_paths import tf_serving_flavor
from tensorflow_serving.apis import classification_pb2
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_log_pb2
from tensorflow_serving.apis import regression_pb2
_DEFAULT_NUM_TRIES = 5
_DEFAULT_POLLING_INTERVAL_SEC = 1
_DEFAULT_MAX_LOADING_TIME_SEC = 300
_DEFAULT_MODEL_NAME = 'infra-validation-model'
# Proto message keys for oneof block.
_TENSORFLOW_SERVING = 'tensorflow_serving'
_LOCAL_DOCKER = 'local_docker'
_KUBERNETES = 'kubernetes'
# Artifact property keys
_BLESSED_KEY = 'blessed'
_MODEL_FLAG_KEY = 'has_model'
# Filename of infra blessing artifact on succeed.
_BLESSED_FILENAME = 'INFRA_BLESSED'
# Filename of infra blessing artifact on fail.
_NOT_BLESSED_FILENAME = 'INFRA_NOT_BLESSED'
def _create_model_server_runner(
model_path: str,
serving_binary: serving_bins.ServingBinary,
serving_spec: infra_validator_pb2.ServingSpec):
"""Create a ModelServerRunner from a model, a ServingBinary and a ServingSpec.
Args:
model_path: An IV-flavored model path. (See model_path_utils.py)
serving_binary: One of ServingBinary instances parsed from the
`serving_spec`.
serving_spec: A ServingSpec instance of this infra validation.
Returns:
A ModelServerRunner.
"""
platform = serving_spec.WhichOneof('serving_platform')
if platform == 'local_docker':
return local_docker_runner.LocalDockerRunner(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec
)
elif platform == 'kubernetes':
return kubernetes_runner.KubernetesRunner(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec
)
else:
raise NotImplementedError('Invalid serving_platform {}'.format(platform))
def _convert_to_prediction_log(request: iv_types.Request):
"""Try convert infra validation request to TF-Serving PredictionLog."""
if isinstance(request, classification_pb2.ClassificationRequest):
return prediction_log_pb2.PredictionLog(
classify_log=prediction_log_pb2.ClassifyLog(request=request))
elif isinstance(request, regression_pb2.RegressionRequest):
return prediction_log_pb2.PredictionLog(
regress_log=prediction_log_pb2.RegressLog(request=request))
elif isinstance(request, predict_pb2.PredictRequest):
return prediction_log_pb2.PredictionLog(
predict_log=prediction_log_pb2.PredictLog(request=request))
else:
raise NotImplementedError(
f'Cannot convert {type(request)} to PredictionLog')
def _mark_blessed(blessing: types.Artifact) -> None:
logging.info('Model passed infra validation.')
io_utils.write_string_file(
os.path.join(blessing.uri, _BLESSED_FILENAME), '')
blessing.set_int_custom_property(_BLESSED_KEY, 1)
def _mark_not_blessed(blessing: types.Artifact) -> None:
logging.info('Model failed infra validation.')
io_utils.write_string_file(
os.path.join(blessing.uri, _NOT_BLESSED_FILENAME), '')
blessing.set_int_custom_property(_BLESSED_KEY, 0)
class Executor(base_executor.BaseExecutor):
"""TFX infra validator executor."""
def __init__(self,
context: Optional[base_executor.BaseExecutor.Context] = None):
super(Executor, self).__init__(context)
self._cleanups = []
def _AddCleanup(self, function, *args, **kwargs):
self._cleanups.append(functools.partial(function, *args, **kwargs))
def _Cleanup(self):
for cleanup in self._cleanups:
try:
cleanup()
except: # pylint: disable=broad-except, bare-except
logging.warning('Error occurred during cleanup.', exc_info=True)
def Do(self, input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any]) -> None:
"""Contract for running InfraValidator Executor.
Args:
input_dict:
- `model`: Single `Model` artifact that we're validating.
- `examples`: `Examples` artifacts to be used for test requests.
output_dict:
- `blessing`: Single `InfraBlessing` artifact containing the validated
result and optinally validated model if warmup requests are appended.
Artifact URI includes an empty file with the name either of
INFRA_BLESSED or INFRA_NOT_BLESSED.
exec_properties:
- `serving_spec`: Serialized `ServingSpec` configuration.
- `validation_spec`: Serialized `ValidationSpec` configuration.
- `request_spec`: Serialized `RequestSpec` configuration.
"""
self._log_startup(input_dict, output_dict, exec_properties)
model = artifact_utils.get_single_instance(input_dict[MODEL_KEY])
blessing = artifact_utils.get_single_instance(output_dict[BLESSING_KEY])
if input_dict.get(EXAMPLES_KEY):
examples = artifact_utils.get_single_instance(input_dict[EXAMPLES_KEY])
else:
examples = None
serving_spec = infra_validator_pb2.ServingSpec()
proto_utils.json_to_proto(exec_properties[SERVING_SPEC_KEY], serving_spec)
if not serving_spec.model_name:
serving_spec.model_name = _DEFAULT_MODEL_NAME
validation_spec = infra_validator_pb2.ValidationSpec()
if exec_properties.get(VALIDATION_SPEC_KEY):
proto_utils.json_to_proto(exec_properties[VALIDATION_SPEC_KEY],
validation_spec)
if not validation_spec.num_tries:
validation_spec.num_tries = _DEFAULT_NUM_TRIES
if not validation_spec.max_loading_time_seconds:
validation_spec.max_loading_time_seconds = _DEFAULT_MAX_LOADING_TIME_SEC
if exec_properties.get(REQUEST_SPEC_KEY):
request_spec = infra_validator_pb2.RequestSpec()
proto_utils.json_to_proto(exec_properties[REQUEST_SPEC_KEY],
request_spec)
else:
request_spec = None
with self._InstallGracefulShutdownHandler():
self._Do(
model=model,
examples=examples,
blessing=blessing,
serving_spec=serving_spec,
validation_spec=validation_spec,
request_spec=request_spec,
)
@contextlib.contextmanager
def _InstallGracefulShutdownHandler(self):
# pylint: disable=g-doc-return-or-yield
"""Install graceful shutdown behavior.
Caveat: InfraValidator currently only recognizes SIGTERM signal as a
graceful shutdown. Furthermore, SIGTERM can be handled only if the executor
is running on the MainThread (the thread that runs the python interpreter)
due to the limitation of Python API.
When the executor is running on Kubernetes, SIGTERM is a standard way to
signal the graceful shutdown. Python default behavior for receiving SIGTERM
is to terminate the process without raising any exception. By registering a
handler that raises on signal, we can effectively transform the signal to an
exception, and we can reuse our cleanup code inside "except" or "finally"
block during the grace period.
When the executor is run by the local Beam DirectRunner, the executor thread
is one of the worker threads (not a MainThread) therefore SIGTERM cannot
be recognized. If either of MainThread or worker thread receives SIGTERM,
executor will die immediately without grace period.
Even if the executor fails to shutdown gracefully, external resources that
are created by model server runner can be cleaned up if the platform
supports such mechanism (e.g. activeDeadlineSeconds in Kubernetes).
"""
def _handler(signum, frame):
del frame # Unused.
raise error_types.GracefulShutdown('Got signal {}.'.format(signum))
try:
old_handler = signal.signal(signal.SIGTERM, _handler)
except ValueError:
# If current thread is not a MainThread, it is not allowed to register
# the signal handler (ValueError raised).
logging.info('Unable to register signal handler for non-MainThread '
'(name=%s). SIGTERM will not be handled.',
threading.current_thread().name)
old_handler = None
try:
yield
finally:
self._Cleanup()
if old_handler:
signal.signal(signal.SIGTERM, old_handler)
def _Do(
self,
model: types.Artifact,
examples: Optional[types.Artifact],
blessing: types.Artifact,
serving_spec: infra_validator_pb2.ServingSpec,
validation_spec: infra_validator_pb2.ValidationSpec,
request_spec: Optional[infra_validator_pb2.RequestSpec],
):
if examples and request_spec:
logging.info('InfraValidator will be run in LOAD_AND_QUERY mode.')
requests = request_builder.build_requests(
model_name=serving_spec.model_name,
model=model,
examples=examples,
request_spec=request_spec)
else:
logging.info('InfraValidator will be run in LOAD_ONLY mode.')
requests = []
model_path = self._PrepareModelPath(model, serving_spec)
# TODO(jjong): Make logic parallel.
all_passed = True
for serving_binary in serving_bins.parse_serving_binaries(serving_spec):
all_passed &= self._ValidateWithRetry(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec,
validation_spec=validation_spec,
requests=requests)
if all_passed:
_mark_blessed(blessing)
if requests and request_spec.make_warmup:
self._CreateWarmupModel(blessing, model_path, warmup_requests=requests)
else:
_mark_not_blessed(blessing)
def _CreateWarmupModel(self, blessing: types.Artifact, model_path: str,
warmup_requests: List[iv_types.Request]):
output_model_path = path_utils.stamped_model_path(blessing.uri)
io_utils.copy_dir(src=model_path, dst=output_model_path)
io_utils.write_tfrecord_file(
path_utils.warmup_file_path(output_model_path),
*[_convert_to_prediction_log(r) for r in warmup_requests])
blessing.set_int_custom_property(_MODEL_FLAG_KEY, 1)
def _PrepareModelPath(self, model: types.Artifact,
serving_spec: infra_validator_pb2.ServingSpec) -> str:
model_path = path_utils.serving_model_path(
model.uri, path_utils.is_old_model_artifact(model))
serving_binary = serving_spec.WhichOneof('serving_binary')
if serving_binary == _TENSORFLOW_SERVING:
# TensorFlow Serving requires model to be stored in its own directory
# structure flavor. If current model_path does not conform to the flavor,
# we need to make a copy to the temporary path.
try:
# Check whether current model_path conforms to the tensorflow serving
# model path flavor. (Parsed without exception)
tf_serving_flavor.parse_model_path(
model_path,
expected_model_name=serving_spec.model_name)
except ValueError:
# Copy the model to comply with the tensorflow serving model path
# flavor.
temp_model_path = tf_serving_flavor.make_model_path(
model_base_path=self._get_tmp_dir(),
model_name=serving_spec.model_name,
version=int(time.time()))
io_utils.copy_dir(src=model_path, dst=temp_model_path)
self._AddCleanup(io_utils.delete_dir, self._context.get_tmp_path())
return temp_model_path
return model_path
def _ValidateWithRetry(
self, model_path: str,
serving_binary: serving_bins.ServingBinary,
serving_spec: infra_validator_pb2.ServingSpec,
validation_spec: infra_validator_pb2.ValidationSpec,
requests: List[iv_types.Request]):
for i in range(validation_spec.num_tries):
logging.info('Starting infra validation (attempt %d/%d).', i + 1,
validation_spec.num_tries)
try:
self._ValidateOnce(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec,
validation_spec=validation_spec,
requests=requests)
except error_types.GracefulShutdown:
# GracefulShutdown means infra validation aborted. No more retry and
# escalate the error.
raise
except Exception as e: # pylint: disable=broad-except
# Other exceptions indicates validation failure. Log the error and
# retry.
logging.exception('Infra validation (attempt %d/%d) failed.', i + 1,
validation_spec.num_tries)
if isinstance(e, error_types.DeadlineExceeded):
logging.info('Consider increasing the value of '
'ValidationSpec.max_loading_time_seconds.')
else:
# If validation has passed without any exception, succeeded.
return True
# Every trial has failed. Marking model as not blessed.
return False
def _ValidateOnce(
self, model_path: str,
serving_binary: serving_bins.ServingBinary,
serving_spec: infra_validator_pb2.ServingSpec,
validation_spec: infra_validator_pb2.ValidationSpec,
requests: List[iv_types.Request]):
deadline = time.time() + validation_spec.max_loading_time_seconds
runner = _create_model_server_runner(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec)
try:
logging.info('Starting %r.', runner)
runner.Start()
# Check model is successfully loaded.
runner.WaitUntilRunning(deadline)
client = serving_binary.MakeClient(runner.GetEndpoint())
client.WaitUntilModelLoaded(
deadline, polling_interval_sec=_DEFAULT_POLLING_INTERVAL_SEC)
# Check model can be successfully queried.
if requests:
client.SendRequests(requests)
finally:
logging.info('Stopping %r.', runner)
runner.Stop()
| 1.265625 | 1 |
learning_python/org/allnix/util.py | ykyang/org.allnix.python | 0 | 5404 | def write(message: str):
print("org.allnix", message)
def read() -> str:
"""Returns a string"""
return "org.allnix"
| 2.8125 | 3 |
metr-la/model/Double_C_STTN.py | happys2333/DL-2021-fall | 1 | 5405 | <filename>metr-la/model/Double_C_STTN.py<gh_stars>1-10
# from folder workMETRLA
# MODEL CODE
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 10:28:06 2020
@author: wb
"""
import torch
import torch.nn as nn
import math
# from GCN_models import GCN
# from One_hot_encoder import One_hot_encoder
import torch.nn.functional as F
import numpy as np
from scipy.sparse.linalg import eigs
from Param import *
from torchsummary import summary
DEVICE = 'cuda:1'
class One_hot_encoder(nn.Module):
def __init__(self, embed_size, time_num=288):
super(One_hot_encoder, self).__init__()
self.time_num = time_num
self.I = nn.Parameter(torch.eye(time_num, time_num, requires_grad=True))
self.onehot_Linear = nn.Linear(time_num, embed_size) # 线性层改变one hot编码维度
def forward(self, i, N=25, T=12):
if i % self.time_num + T > self.time_num:
o1 = self.I[i % self.time_num:, :]
o2 = self.I[0: (i + T) % self.time_num, :]
onehot = torch.cat((o1, o2), 0)
else:
onehot = self.I[i % self.time_num: i % self.time_num + T, :]
# onehot = onehot.repeat(N, 1, 1)
onehot = onehot.expand(N, T, self.time_num)
onehot = self.onehot_Linear(onehot)
return onehot
'''
Attention 基础代码
ScaledDotProductAttention 是通用的
解释dk:
数据进来的时候是B,N,T,C,做attention的时候,C=1 ,不能很好的表征数据高维空间的特征,C ---> embedded size 32 or 64 加入dk = 32,
那么一个头就是32,然后加上多头注意力机制的话,比如8个head,8个头,那就是32*8=256,如果要跟NIPS17 tranformer论文完全对应上,那么dk=64,head = 8 ,all embeded size = 512
'''
class ScaledDotProductAttention(nn.Module):
def __init__(self):
super(ScaledDotProductAttention, self).__init__()
def forward(self, Q, K, V):
'''
Q: [batch_size, n_heads, T(Spatial) or N(Temporal), N(Spatial) or T(Temporal), d_k]
K: [batch_size, n_heads, T(Spatial) or N(Temporal), N(Spatial) or T(Temporal), d_k]
V: [batch_size, n_heads, T(Spatial) or N(Temporal), N(Spatial) or T(Temporal), d_k]
attn_mask: [batch_size, n_heads, seq_len, seq_len] 可能没有
'''
B, n_heads, len1, len2, d_k = Q.shape
scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(d_k)
# scores : [batch_size, n_heads, T(Spatial) or N(Temporal), N(Spatial) or T(Temporal), N(Spatial) or T(Temporal)]
# scores.masked_fill_(attn_mask, -1e9) # Fills elements of self tensor with value where mask is True.
attn = nn.Softmax(dim=-1)(scores)
context = torch.matmul(attn,
V) # [batch_size, n_heads, T(Spatial) or N(Temporal), N(Spatial) or T(Temporal), d_k]]
return context
'''
S 代表spatial ,MultiHeadAttention 代表多头注意力机制
'''
class SMultiHeadAttention(nn.Module):
def __init__(self, embed_size, heads):
super(SMultiHeadAttention, self).__init__()
self.embed_size = embed_size
self.heads = heads
self.head_dim = embed_size // heads
assert (
self.head_dim * heads == embed_size
), "Embedding size needs to be divisible by heads"
# 用Linear来做投影矩阵
# 但这里如果是多头的话,是不是需要声明多个矩阵???
self.W_V = nn.Linear(self.embed_size, self.head_dim * self.heads, bias=False)
self.W_K = nn.Linear(self.embed_size, self.head_dim * self.heads, bias=False)
self.W_Q = nn.Linear(self.embed_size, self.head_dim * self.heads, bias=False)
self.fc_out = nn.Linear(heads * self.head_dim, embed_size)
def forward(self, input_Q, input_K, input_V):
'''
input_Q: [batch_size, N, T, C]
input_K: [batch_size, N, T, C]
input_V: [batch_size, N, T, C]
attn_mask: [batch_size, seq_len, seq_len]
'''
B, N, T, C = input_Q.shape
# [B, N, T, C] --> [B, N, T, h * d_k] --> [B, N, T, h, d_k] --> [B, h, T, N, d_k]
Q = self.W_Q(input_Q).view(B, N, T, self.heads, self.head_dim).transpose(1,
3) # Q: [B, N, T, C] --[B, N, T, self.heads, self.head_dim] -> [B,h,T,N,dk] 然后是为了把N,dk这两维度考虑去做ScaledDotProductAttention ,代表着是spatial attention
K = self.W_K(input_K).view(B, N, T, self.heads, self.head_dim).transpose(1, 3) # K: [B, h, T, N, d_k]
V = self.W_V(input_V).view(B, N, T, self.heads, self.head_dim).transpose(1, 3) # V: [B, h, T, N, d_k]
# attn_mask = attn_mask.unsqueeze(1).repeat(1, n_heads, 1, 1) # attn_mask : [batch_size, n_heads, seq_len, seq_len] seq_len = N
# context: [batch_size, n_heads, len_q, d_v], attn: [batch_size, n_heads, len_q, len_k]
context = ScaledDotProductAttention()(Q, K, V) # [B, h, T, N, d_k]
context = context.permute(0, 3, 2, 1, 4) # [B, N, T, h, d_k]
context = context.reshape(B, N, T, self.heads * self.head_dim) # [B, N, T, C]
# context = context.transpose(1, 2).reshape(batch_size, -1, n_heads * d_v) # context: [batch_size, len_q, n_heads * d_v]
output = self.fc_out(context) # [batch_size, len_q, d_model]
return output
'''
T 代表Temporal ,MultiHeadAttention 代表多头注意力机制
'''
class TMultiHeadAttention(nn.Module):
def __init__(self, embed_size, heads):
super(TMultiHeadAttention, self).__init__()
self.embed_size = embed_size
self.heads = heads
self.head_dim = embed_size // heads
assert (
self.head_dim * heads == embed_size
), "Embedding size needs to be divisible by heads"
# 用Linear来做投影矩阵
# 但这里如果是多头的话,是不是需要声明多个矩阵???
self.W_V = nn.Linear(self.embed_size, self.head_dim * self.heads, bias=False)
self.W_K = nn.Linear(self.embed_size, self.head_dim * self.heads, bias=False)
self.W_Q = nn.Linear(self.embed_size, self.head_dim * self.heads, bias=False)
self.fc_out = nn.Linear(heads * self.head_dim, embed_size)
def forward(self, input_Q, input_K, input_V):
'''
input_Q: [batch_size, N, T, C]
input_K: [batch_size, N, T, C]
input_V: [batch_size, N, T, C]
attn_mask: [batch_size, seq_len, seq_len]
'''
B, N, T, C = input_Q.shape
# [B, N, T, C] --> [B, N, T, h * d_k] --> [B, N, T, h, d_k] --> [B, h, N, T, d_k]
Q = self.W_Q(input_Q).view(B, N, T, self.heads, self.head_dim).permute(0, 3, 1, 2,
4) # Q: [B, h, N, T, d_k] T,dk 就代表是temporal attention
K = self.W_K(input_K).view(B, N, T, self.heads, self.head_dim).permute(0, 3, 1, 2, 4) # K: [B, h, N, T, d_k]
V = self.W_V(input_V).view(B, N, T, self.heads, self.head_dim).permute(0, 3, 1, 2, 4) # V: [B, h, N, T, d_k]
# attn_mask = attn_mask.unsqueeze(1).repeat(1, n_heads, 1, 1) # attn_mask : [batch_size, n_heads, seq_len, seq_len]
# context: [batch_size, n_heads, len_q, d_v], attn: [batch_size, n_heads, len_q, len_k]
context = ScaledDotProductAttention()(Q, K, V) # [B, h, N, T, d_k]
context = context.permute(0, 2, 3, 1, 4) # [B, N, T, h, d_k]
context = context.reshape(B, N, T, self.heads * self.head_dim) # [B, N, T, C]
# context = context.transpose(1, 2).reshape(batch_size, -1, n_heads * d_v) # context: [batch_size, len_q, n_heads * d_v]
output = self.fc_out(context) # [batch_size, len_q, d_model]
return output
class STransformer(nn.Module):
def __init__(self, embed_size, heads, adj, cheb_K, dropout, forward_expansion):
super(STransformer, self).__init__()
# Spatial Embedding
self.adj = adj
self.D_S = adj.to(DEVICE)
self.embed_liner = nn.Linear(adj.shape[0], embed_size)
self.attention = SMultiHeadAttention(embed_size, heads)
self.norm1 = nn.LayerNorm(embed_size)
self.norm2 = nn.LayerNorm(embed_size)
self.feed_forward = nn.Sequential(
nn.Linear(embed_size, forward_expansion * embed_size),
nn.ReLU(),
nn.Linear(forward_expansion * embed_size, embed_size),
)
# 调用GCN
self.norm_adj = nn.InstanceNorm2d(1) # 对邻接矩阵归一化
self.dropout = nn.Dropout(dropout)
self.fs = nn.Linear(embed_size, embed_size)
self.fg = nn.Linear(embed_size, embed_size)
def forward(self, value, key, query):
# value, key, query: [N, T, C] [B, N, T, C]
# Spatial Embedding 部分
# N, T, C = query.shape
# D_S = self.embed_liner(self.D_S) # [N, C]
# D_S = D_S.expand(T, N, C) #[T, N, C]相当于在第一维复制了T份
# D_S = D_S.permute(1, 0, 2) #[N, T, C]
B, N, T, C = query.shape
D_S = self.embed_liner(self.D_S) # [N, C] ---position encoding
D_S = D_S.expand(B, T, N, C) # [B, T, N, C] 相当于在第2维复制了T份, 第一维复制B份
D_S = D_S.permute(0, 2, 1, 3) # [B, N, T, C]
# Spatial Transformer 部分
query = query + D_S
attention = self.attention(query, query, query) # (B, N, T, C)
# Add skip connection, run through normalization and finally dropout
x = self.dropout(self.norm1(attention + query))
forward = self.feed_forward(x)
U_S = self.dropout(self.norm2(forward + x))
# 融合 STransformer and GCN
g = torch.sigmoid(self.fs(U_S)) # (7)
out = g * U_S + (1 - g) # (8)
return out # (B, N, T, C)
class TTransformer(nn.Module):
def __init__(self, embed_size, heads, time_num, dropout, forward_expansion):
super(TTransformer, self).__init__()
# Temporal embedding One hot
self.time_num = time_num
# self.one_hot = One_hot_encoder(embed_size, time_num) # temporal embedding选用one-hot方式 或者
self.temporal_embedding = nn.Embedding(time_num, embed_size) # temporal embedding选用nn.Embedding
self.attention = TMultiHeadAttention(embed_size, heads)
self.norm1 = nn.LayerNorm(embed_size)
self.norm2 = nn.LayerNorm(embed_size)
self.feed_forward = nn.Sequential(
nn.Linear(embed_size, forward_expansion * embed_size),
nn.ReLU(),
nn.Linear(forward_expansion * embed_size, embed_size),
)
self.dropout = nn.Dropout(dropout)
def forward(self, value, key, query, t):
B, N, T, C = query.shape
# D_T = self.one_hot(t, N, T) # temporal embedding选用one-hot方式 或者
D_T = self.temporal_embedding(torch.arange(0, T).to(DEVICE)) # temporal embedding选用nn.Embedding
D_T = D_T.expand(B, N, T, C)
# temporal embedding加到query。 原论文采用concatenated
query = query + D_T
attention = self.attention(query, query, query)
# Add skip connection, run through normalization and finally dropout
x = self.dropout(self.norm1(attention + query))
forward = self.feed_forward(x)
out = self.dropout(self.norm2(forward + x))
return out
### STBlock
class STTransformerBlock(nn.Module):
def __init__(self, embed_size, heads, adj, time_num, cheb_K, dropout, forward_expansion):
super(STTransformerBlock, self).__init__()
self.STransformer = STransformer(embed_size, heads, adj, cheb_K, dropout, forward_expansion)
self.TTransformer = TTransformer(embed_size, heads, time_num, dropout, forward_expansion)
self.norm1 = nn.LayerNorm(embed_size)
self.norm2 = nn.LayerNorm(embed_size)
self.dropout = nn.Dropout(dropout)
def forward(self, value, key, query, t):
# value, key, query: [N, T, C] [B, N, T, C]
# Add skip connection,run through normalization and finally dropout
x1 = self.norm1(self.STransformer(value, key, query) + query) # (B, N, T, C)
x2 = self.dropout(self.norm2(self.TTransformer(x1, x1, x1, t) + x1))
return x2
### Encoder
class Encoder(nn.Module):
# 堆叠多层 ST-Transformer Block
def __init__(
self,
embed_size,
num_layers,
heads,
adj,
time_num,
device,
forward_expansion,
cheb_K,
dropout,
):
super(Encoder, self).__init__()
self.embed_size = embed_size
self.device = device
self.layers = nn.ModuleList(
[
STTransformerBlock(
embed_size,
heads,
adj,
time_num,
cheb_K,
dropout=dropout,
forward_expansion=forward_expansion
)
for _ in range(num_layers)
]
)
self.dropout = nn.Dropout(dropout)
def forward(self, x, t):
# x: [N, T, C] [B, N, T, C]
out = self.dropout(x)
# In the Encoder the query, key, value are all the same.
for layer in self.layers:
out = layer(out, out, out, t)
return out
### Transformer
class Transformer(nn.Module):
def __init__(
self,
adj,
embed_size,
num_layers,
heads,
time_num,
forward_expansion, ##?
cheb_K,
dropout,
device=DEVICE
):
super(Transformer, self).__init__()
self.encoder = Encoder(
embed_size,
num_layers,
heads,
adj,
time_num,
device,
forward_expansion,
cheb_K,
dropout
)
self.device = device
def forward(self, src, t):
## scr: [N, T, C] [B, N, T, C]
enc_src = self.encoder(src, t)
return enc_src # [B, N, T, C]
### ST Transformer: Total Model
class STTransformer(nn.Module):
def __init__(
self,
adj,
in_channels,
embed_size,
time_num,
num_layers,
T_dim,
output_T_dim,
heads,
cheb_K,
forward_expansion,
dropout=0
):
super(STTransformer, self).__init__()
self.forward_expansion = forward_expansion # feed forward 的 embeded size 8,16,32....1024
# 第一次卷积扩充通道数
self.conv1 = nn.Conv2d(in_channels, embed_size, 1) # Channel = 1 给 扩维,成 embeded size
self.Transformer = Transformer(
adj,
embed_size,
num_layers,
heads,
time_num,
forward_expansion,
cheb_K,
dropout=0
)
# 缩小时间维度。 例:T_dim=12到output_T_dim=3,输入12维降到输出3维 or 12in 12 out
self.conv2 = nn.Conv2d(T_dim, output_T_dim, 1)
# 缩小通道数,降到1维。
self.conv3 = nn.Conv2d(embed_size, in_channels, 1)
self.relu = nn.ReLU() # 和归一化搭配好,防止梯度爆炸,消失。
def forward(self, x):
# platform: (CHANNEL, TIMESTEP_IN, N_NODE)
# input x shape[ C, N, T]
# C:通道数量。 N:传感器数量。 T:时间数量
# x = x.unsqueeze(0)
# x = np.transpose(x,(0,2,1)).to(DEVICE)
input_Transformer = self.conv1(x) # conv 要求第二维度是C, 也就是必须得B C + 其他
# input_Transformer = input_Transformer.squeeze(0)
# input_Transformer = input_Transformer.permute(1, 2, 0)
input_Transformer = input_Transformer.permute(0, 2, 3, 1)
# input_Transformer shape[N, T, C] [B, N, T, C]
output_Transformer = self.Transformer(input_Transformer, self.forward_expansion) # [B, N, T, C]
output_Transformer = output_Transformer.permute(0, 2, 1, 3)
# output_Transformer shape[B, T, N, C]
# output_Transformer = output_Transformer.unsqueeze(0)
out = self.relu(self.conv2(output_Transformer)) # 等号左边 out shape: [1, output_T_dim, N, C]
out = out.permute(0, 3, 2, 1) # 等号左边 out shape: [B, C, N, output_T_dim]
out = self.conv3(out) # 等号左边 out shape: [B, 1, N, output_T_dim]
# out = out.squeeze(1)
out = out.permute(0, 1, 3, 2)
# print('out: ',out.shape)
return out # [B, N, output_dim]
# return out shape: [N, output_dim]
def print_params(model_name, model):
param_count = 0
for name, param in model.named_parameters():
if param.requires_grad:
param_count += param.numel()
print(f'{model_name}, {param_count} trainable parameters in total.')
return
import sys
import pandas as pd
def main():
GPU = sys.argv[-1] if len(sys.argv) == 2 else '1'
device = torch.device("cuda:{}".format(GPU)) if torch.cuda.is_available() else torch.device("cpu")
in_channels = 2 # Channels of input
embed_size = 32 # Dimension of hidden embedding features
time_num = 288
num_layers = 2 # Number of ST Block
T_dim = 12 # Input length, should be the same as prepareData.py
output_T_dim = 12 # Output Expected length
heads = 4 # Number of Heads in MultiHeadAttention
cheb_K = 2 # Order for Chebyshev Polynomials (Eq 2)
forward_expansion = 32 # Dimension of Feed Forward Network: embed_size --> embed_size * forward_expansion --> embed_size
dropout = 0
A = pd.read_csv(ADJPATH).values
A = torch.Tensor(A)
### Construct Network
model = STTransformer(
A,
in_channels,
embed_size,
time_num,
num_layers,
T_dim,
output_T_dim,
heads,
cheb_K,
forward_expansion,
dropout).to(DEVICE)
summary(model, (2, N_NODE, TIMESTEP_IN), device=device)
print_params('STTransformer', model)
if __name__ == '__main__':
main()
'''
布置作业:
1. 设计 only Spatial Transformer 的版本,跑出PEMSBAY的结果 12 步in 12 步 out
2. 设计 only Temporal Transformer 的版本,跑出PEMSBAY的结果 12 步in 12 步 out
3. 设计 Temporal-Spatial Transformer 的版本,跑出PEMSBAY的结果 12 步in 12 步 out
4. 前面的版本完成后,全部升级为,C 维度由1变成2,多的一个C是时间戳,时间戳的写法,参考
也就是说原来是B N T C=1 ,现在要求改成 B,N,T,C=2, 然后跑出1,2,3 升级版结果。 12 步in 12 步 out PEMSBAY 数据集
'''
| 2.125 | 2 |
tetrisanim3.py | daniel-chuang/tetris | 0 | 5406 | <reponame>daniel-chuang/tetris
# animation for medium article
from termcolor import colored
import time
import imageio
import pyautogui
pyautogui.FAILSAFE = True
matrix = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 2, 2, 2, 0, 0, 0],
[0, 0, 0, 0, 0, 2, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 0, 1, 0, 0, 0, 0, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
lst = set()
for i in range(21):
for z in range(10):
for row in range(len(matrix)):
if 0 not in matrix[row]:
lst.add(row)
if (i == 20 or i > row) and row in lst:
print(colored("1 " * 10, "green"))
else:
for element in range(len(matrix[row])):
if i == row and z == element:
print(colored(matrix[row][element], "green"), end=" ", flush=False)
elif matrix[row][element] == 1:
print(colored(matrix[row][element], "red"), end=" ", flush=False)
elif matrix[row][element] == 2:
print(colored(matrix[row][element], "blue"), end=" ", flush=False)
else:
print(matrix[row][element], end=" ", flush=False)
print("")
print("")
# takes a screenshot
pyautogui.moveTo(338, 580, duration = 0)
pyautogui.hotkey('command', 'shift', '4')
pyautogui.dragTo(547, 1000, duration = 0, button = 'left')
| 3.0625 | 3 |
inventories/models.py | destodasoftware/kately_api | 0 | 5407 | from django.db import models
from products.models import Product
from utils.models import Utility
class Inventory(Utility):
inventory_number = models.CharField(unique=True, max_length=100, blank=True, null=True)
supplier = models.CharField(max_length=100, blank=True, null=True)
user = models.ForeignKey('auth.User', on_delete=models.SET_NULL, blank=True, null=True)
is_adjusment = models.BooleanField(default=False)
def __str__(self):
return self.inventory_number
class InventoryItem(Utility):
inventory = models.ForeignKey(Inventory, on_delete=models.CASCADE)
product = models.ForeignKey(Product, on_delete=models.CASCADE)
quantity = models.PositiveIntegerField(default=1)
def __str__(self):
return self.product.name
| 2.28125 | 2 |
hierarchical_app/views.py | stephken/Hierarchical_assessment | 0 | 5408 | from django.shortcuts import render
from hierarchical_app.models import Folder
# Create your views here.
def index_view(request):
return render(request, 'index.html', {'welcome': "Welcome to Kens Hierarchical Data and You assessment", 'folders': Folder.objects.all()})
| 1.867188 | 2 |
bin/train_vit.py | ramizdundar/Chexpert | 0 | 5409 | import sys
import os
import argparse
import logging
import json
import time
import subprocess
from shutil import copyfile
import numpy as np
from sklearn import metrics
from easydict import EasyDict as edict
import torch
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torch.nn import DataParallel
from vit_pytorch import ViT
from tensorboardX import SummaryWriter
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../')
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
from data.dataset import ImageDataset # noqa
from model.classifier import Classifier # noqa
from utils.misc import lr_schedule # noqa
from model.utils import get_optimizer # noqa
parser = argparse.ArgumentParser(description='Train model')
parser.add_argument('cfg_path', default=None, metavar='CFG_PATH', type=str,
help="Path to the config file in yaml format")
parser.add_argument('save_path', default=None, metavar='SAVE_PATH', type=str,
help="Path to the saved models")
parser.add_argument('--num_workers', default=8, type=int, help="Number of "
"workers for each data loader")
parser.add_argument('--device_ids', default='0,1,2,3', type=str,
help="GPU indices ""comma separated, e.g. '0,1' ")
parser.add_argument('--pre_train', default=None, type=str, help="If get"
"parameters from pretrained model")
parser.add_argument('--resume', default=0, type=int, help="If resume from "
"previous run")
parser.add_argument('--logtofile', default=False, type=bool, help="Save log "
"in save_path/log.txt if set True")
parser.add_argument('--verbose', default=False, type=bool, help="Detail info")
def get_loss(output, target, index, device, cfg):
if cfg.criterion == 'BCE':
for num_class in cfg.num_classes:
assert num_class == 1
target = target[:, index].view(-1)
pos_weight = torch.from_numpy(
np.array(cfg.pos_weight,
dtype=np.float32)).to(device).type_as(target)
if cfg.batch_weight:
if target.sum() == 0:
loss = torch.tensor(0., requires_grad=True).to(device)
else:
weight = (target.size()[0] - target.sum()) / target.sum()
loss = F.binary_cross_entropy_with_logits(
output[index].view(-1), target, pos_weight=weight)
else:
loss = F.binary_cross_entropy_with_logits(
output[index].view(-1), target, pos_weight=pos_weight[index])
label = torch.sigmoid(output[index].view(-1)).ge(0.5).float()
acc = (target == label).float().sum() / len(label)
else:
raise Exception('Unknown criterion : {}'.format(cfg.criterion))
return (loss, acc)
def train_epoch(summary, summary_dev, cfg, args, model, dataloader,
dataloader_dev, optimizer, summary_writer, best_dict,
dev_header):
torch.set_grad_enabled(True)
model.train()
device_ids = list(map(int, args.device_ids.split(',')))
device = torch.device('cuda:{}'.format(device_ids[0]))
steps = len(dataloader)
dataiter = iter(dataloader)
label_header = dataloader.dataset._label_header
num_tasks = len(cfg.num_classes)
time_now = time.time()
loss_sum = np.zeros(num_tasks)
acc_sum = np.zeros(num_tasks)
for step in range(steps):
image, target = next(dataiter)
image = image.to(device)
target = target.to(device)
# output, logit_map = model(image)
output = model(image)
output = [torch.unsqueeze(i, 1) for i in output.T]
# different number of tasks
loss = 0
for t in range(num_tasks):
loss_t, acc_t = get_loss(output, target, t, device, cfg)
loss += loss_t
loss_sum[t] += loss_t.item()
acc_sum[t] += acc_t.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
summary['step'] += 1
if summary['step'] % cfg.log_every == 0:
time_spent = time.time() - time_now
time_now = time.time()
loss_sum /= cfg.log_every
acc_sum /= cfg.log_every
loss_str = ' '.join(map(lambda x: '{:.5f}'.format(x), loss_sum))
acc_str = ' '.join(map(lambda x: '{:.3f}'.format(x), acc_sum))
logging.info(
'{}, Train, Epoch : {}, Step : {}, Loss : {}, '
'Acc : {}, Run Time : {:.2f} sec'
.format(time.strftime("%Y-%m-%d %H:%M:%S"),
summary['epoch'] + 1, summary['step'], loss_str,
acc_str, time_spent))
for t in range(num_tasks):
summary_writer.add_scalar(
'train/loss_{}'.format(label_header[t]), loss_sum[t],
summary['step'])
summary_writer.add_scalar(
'train/acc_{}'.format(label_header[t]), acc_sum[t],
summary['step'])
loss_sum = np.zeros(num_tasks)
acc_sum = np.zeros(num_tasks)
if summary['step'] % cfg.test_every == 0:
time_now = time.time()
summary_dev, predlist, true_list = test_epoch(
summary_dev, cfg, args, model, dataloader_dev)
time_spent = time.time() - time_now
auclist = []
for i in range(len(cfg.num_classes)):
y_pred = predlist[i]
y_true = true_list[i]
fpr, tpr, thresholds = metrics.roc_curve(
y_true, y_pred, pos_label=1)
auc = metrics.auc(fpr, tpr)
auclist.append(auc)
summary_dev['auc'] = np.array(auclist)
loss_dev_str = ' '.join(map(lambda x: '{:.5f}'.format(x),
summary_dev['loss']))
acc_dev_str = ' '.join(map(lambda x: '{:.3f}'.format(x),
summary_dev['acc']))
auc_dev_str = ' '.join(map(lambda x: '{:.3f}'.format(x),
summary_dev['auc']))
logging.info(
'{}, Dev, Step : {}, Loss : {}, Acc : {}, Auc : {},'
'Mean auc: {:.3f} ''Run Time : {:.2f} sec' .format(
time.strftime("%Y-%m-%d %H:%M:%S"),
summary['step'],
loss_dev_str,
acc_dev_str,
auc_dev_str,
summary_dev['auc'].mean(),
time_spent))
for t in range(len(cfg.num_classes)):
summary_writer.add_scalar(
'dev/loss_{}'.format(dev_header[t]),
summary_dev['loss'][t], summary['step'])
summary_writer.add_scalar(
'dev/acc_{}'.format(dev_header[t]), summary_dev['acc'][t],
summary['step'])
summary_writer.add_scalar(
'dev/auc_{}'.format(dev_header[t]), summary_dev['auc'][t],
summary['step'])
save_best = False
mean_acc = summary_dev['acc'][cfg.save_index].mean()
if mean_acc >= best_dict['acc_dev_best']:
best_dict['acc_dev_best'] = mean_acc
if cfg.best_target == 'acc':
save_best = True
mean_auc = summary_dev['auc'][cfg.save_index].mean()
if mean_auc >= best_dict['auc_dev_best']:
best_dict['auc_dev_best'] = mean_auc
if cfg.best_target == 'auc':
save_best = True
mean_loss = summary_dev['loss'][cfg.save_index].mean()
if mean_loss <= best_dict['loss_dev_best']:
best_dict['loss_dev_best'] = mean_loss
if cfg.best_target == 'loss':
save_best = True
if save_best:
torch.save(
{'epoch': summary['epoch'],
'step': summary['step'],
'acc_dev_best': best_dict['acc_dev_best'],
'auc_dev_best': best_dict['auc_dev_best'],
'loss_dev_best': best_dict['loss_dev_best'],
'state_dict': model.module.state_dict()},
os.path.join(args.save_path, 'best{}.ckpt'.format(
best_dict['best_idx']))
)
best_dict['best_idx'] += 1
if best_dict['best_idx'] > cfg.save_top_k:
best_dict['best_idx'] = 1
logging.info(
'{}, Best, Step : {}, Loss : {}, Acc : {},Auc :{},'
'Best Auc : {:.3f}' .format(
time.strftime("%Y-%m-%d %H:%M:%S"),
summary['step'],
loss_dev_str,
acc_dev_str,
auc_dev_str,
best_dict['auc_dev_best']))
model.train()
torch.set_grad_enabled(True)
summary['epoch'] += 1
return summary, best_dict
def test_epoch(summary, cfg, args, model, dataloader):
torch.set_grad_enabled(False)
model.eval()
device_ids = list(map(int, args.device_ids.split(',')))
device = torch.device('cuda:{}'.format(device_ids[0]))
steps = len(dataloader)
dataiter = iter(dataloader)
num_tasks = len(cfg.num_classes)
loss_sum = np.zeros(num_tasks)
acc_sum = np.zeros(num_tasks)
predlist = list(x for x in range(len(cfg.num_classes)))
true_list = list(x for x in range(len(cfg.num_classes)))
for step in range(steps):
image, target = next(dataiter)
image = image.to(device)
target = target.to(device)
output = model(image)
output = [torch.unsqueeze(i, 1) for i in output.T]
# different number of tasks
for t in range(len(cfg.num_classes)):
loss_t, acc_t = get_loss(output, target, t, device, cfg)
# AUC
output_tensor = torch.sigmoid(
output[t].view(-1)).cpu().detach().numpy()
target_tensor = target[:, t].view(-1).cpu().detach().numpy()
if step == 0:
predlist[t] = output_tensor
true_list[t] = target_tensor
else:
predlist[t] = np.append(predlist[t], output_tensor)
true_list[t] = np.append(true_list[t], target_tensor)
loss_sum[t] += loss_t.item()
acc_sum[t] += acc_t.item()
summary['loss'] = loss_sum / steps
summary['acc'] = acc_sum / steps
return summary, predlist, true_list
def run(args):
with open(args.cfg_path) as f:
cfg = edict(json.load(f))
if args.verbose is True:
print(json.dumps(cfg, indent=4))
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
if args.logtofile is True:
logging.basicConfig(filename=args.save_path + '/log.txt',
filemode="w", level=logging.INFO)
else:
logging.basicConfig(level=logging.INFO)
if not args.resume:
with open(os.path.join(args.save_path, 'cfg.json'), 'w') as f:
json.dump(cfg, f, indent=1)
device_ids = list(map(int, args.device_ids.split(',')))
num_devices = torch.cuda.device_count()
if num_devices < len(device_ids):
raise Exception(
'#available gpu : {} < --device_ids : {}'
.format(num_devices, len(device_ids)))
device = torch.device('cuda:{}'.format(device_ids[0]))
# model = Classifier(cfg)
model = ViT(
cfg = cfg,
image_size=cfg.width,
patch_size=32,
num_classes=5,
dim=1024,
depth=6,
heads=8,
mlp_dim=512,
dropout=0.3,
emb_dropout=0.3,
channels=3
)
if args.verbose is True:
from torchsummary import summary
if cfg.fix_ratio:
h, w = cfg.long_side, cfg.long_side
else:
h, w = cfg.height, cfg.width
summary(model.to(device), (3, h, w))
model = DataParallel(model, device_ids=device_ids).to(device).train()
if args.pre_train is not None:
if os.path.exists(args.pre_train):
ckpt = torch.load(args.pre_train, map_location=device)
model.module.load_state_dict(ckpt)
optimizer = get_optimizer(model.parameters(), cfg)
src_folder = os.path.dirname(os.path.abspath(__file__)) + '/../'
dst_folder = os.path.join(args.save_path, 'classification')
rc, size = subprocess.getstatusoutput('du --max-depth=0 %s | cut -f1'
% src_folder)
if rc != 0:
raise Exception('Copy folder error : {}'.format(rc))
rc, err_msg = subprocess.getstatusoutput('cp -R %s %s' % (src_folder,
dst_folder))
if rc != 0:
raise Exception('copy folder error : {}'.format(err_msg))
copyfile(cfg.train_csv, os.path.join(args.save_path, 'train.csv'))
copyfile(cfg.dev_csv, os.path.join(args.save_path, 'dev.csv'))
dataloader_train = DataLoader(
ImageDataset(cfg.train_csv, cfg, mode='train'),
batch_size=cfg.train_batch_size, num_workers=args.num_workers,
drop_last=True, shuffle=True)
dataloader_dev = DataLoader(
ImageDataset(cfg.dev_csv, cfg, mode='dev'),
batch_size=cfg.dev_batch_size, num_workers=args.num_workers,
drop_last=False, shuffle=False)
dev_header = dataloader_dev.dataset._label_header
summary_train = {'epoch': 0, 'step': 0}
summary_dev = {'loss': float('inf'), 'acc': 0.0}
summary_writer = SummaryWriter(args.save_path)
epoch_start = 0
best_dict = {
"acc_dev_best": 0.0,
"auc_dev_best": 0.0,
"loss_dev_best": float('inf'),
"fused_dev_best": 0.0,
"best_idx": 1}
if args.resume:
ckpt_path = os.path.join(args.save_path, 'train.ckpt')
ckpt = torch.load(ckpt_path, map_location=device)
model.module.load_state_dict(ckpt['state_dict'])
summary_train = {'epoch': ckpt['epoch'], 'step': ckpt['step']}
best_dict['acc_dev_best'] = ckpt['acc_dev_best']
best_dict['loss_dev_best'] = ckpt['loss_dev_best']
best_dict['auc_dev_best'] = ckpt['auc_dev_best']
epoch_start = ckpt['epoch']
for epoch in range(epoch_start, cfg.epoch):
lr = lr_schedule(cfg.lr, cfg.lr_factor, summary_train['epoch'],
cfg.lr_epochs)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
summary_train, best_dict = train_epoch(
summary_train, summary_dev, cfg, args, model,
dataloader_train, dataloader_dev, optimizer,
summary_writer, best_dict, dev_header)
time_now = time.time()
summary_dev, predlist, true_list = test_epoch(
summary_dev, cfg, args, model, dataloader_dev)
time_spent = time.time() - time_now
auclist = []
for i in range(len(cfg.num_classes)):
y_pred = predlist[i]
y_true = true_list[i]
fpr, tpr, thresholds = metrics.roc_curve(
y_true, y_pred, pos_label=1)
auc = metrics.auc(fpr, tpr)
auclist.append(auc)
summary_dev['auc'] = np.array(auclist)
loss_dev_str = ' '.join(map(lambda x: '{:.5f}'.format(x),
summary_dev['loss']))
acc_dev_str = ' '.join(map(lambda x: '{:.3f}'.format(x),
summary_dev['acc']))
auc_dev_str = ' '.join(map(lambda x: '{:.3f}'.format(x),
summary_dev['auc']))
logging.info(
'{}, Dev, Step : {}, Loss : {}, Acc : {}, Auc : {},'
'Mean auc: {:.3f} ''Run Time : {:.2f} sec' .format(
time.strftime("%Y-%m-%d %H:%M:%S"),
summary_train['step'],
loss_dev_str,
acc_dev_str,
auc_dev_str,
summary_dev['auc'].mean(),
time_spent))
for t in range(len(cfg.num_classes)):
summary_writer.add_scalar(
'dev/loss_{}'.format(dev_header[t]), summary_dev['loss'][t],
summary_train['step'])
summary_writer.add_scalar(
'dev/acc_{}'.format(dev_header[t]), summary_dev['acc'][t],
summary_train['step'])
summary_writer.add_scalar(
'dev/auc_{}'.format(dev_header[t]), summary_dev['auc'][t],
summary_train['step'])
save_best = False
mean_acc = summary_dev['acc'][cfg.save_index].mean()
if mean_acc >= best_dict['acc_dev_best']:
best_dict['acc_dev_best'] = mean_acc
if cfg.best_target == 'acc':
save_best = True
mean_auc = summary_dev['auc'][cfg.save_index].mean()
if mean_auc >= best_dict['auc_dev_best']:
best_dict['auc_dev_best'] = mean_auc
if cfg.best_target == 'auc':
save_best = True
mean_loss = summary_dev['loss'][cfg.save_index].mean()
if mean_loss <= best_dict['loss_dev_best']:
best_dict['loss_dev_best'] = mean_loss
if cfg.best_target == 'loss':
save_best = True
if save_best:
torch.save(
{'epoch': summary_train['epoch'],
'step': summary_train['step'],
'acc_dev_best': best_dict['acc_dev_best'],
'auc_dev_best': best_dict['auc_dev_best'],
'loss_dev_best': best_dict['loss_dev_best'],
'state_dict': model.module.state_dict()},
os.path.join(args.save_path,
'best{}.ckpt'.format(best_dict['best_idx']))
)
best_dict['best_idx'] += 1
if best_dict['best_idx'] > cfg.save_top_k:
best_dict['best_idx'] = 1
logging.info(
'{}, Best, Step : {}, Loss : {}, Acc : {},'
'Auc :{},Best Auc : {:.3f}' .format(
time.strftime("%Y-%m-%d %H:%M:%S"),
summary_train['step'],
loss_dev_str,
acc_dev_str,
auc_dev_str,
best_dict['auc_dev_best']))
torch.save({'epoch': summary_train['epoch'],
'step': summary_train['step'],
'acc_dev_best': best_dict['acc_dev_best'],
'auc_dev_best': best_dict['auc_dev_best'],
'loss_dev_best': best_dict['loss_dev_best'],
'state_dict': model.module.state_dict()},
os.path.join(args.save_path, 'train.ckpt'))
summary_writer.close()
def main():
args = parser.parse_args()
if args.verbose is True:
print('Using the specified args:')
print(args)
run(args)
if __name__ == '__main__':
main()
| 1.921875 | 2 |
Sets/the capaint s room.py | AndreasGeiger/hackerrank-python | 0 | 5410 | groupSize = input()
groups = list(map(int,input().split(' ')))
tmpArray1 = set()
tmpArray2 = set()
for i in groups:
if i in tmpArray1:
tmpArray2.discard(i)
else:
tmpArray1.add(i)
tmpArray2.add(i)
for i in tmpArray2:
print(i)
| 3.25 | 3 |
tests/testsoma.py | gtmadureira/Python | 4 | 5411 | import unittest
from hf_src.main import soma
class TestSoma(unittest.TestCase):
def test_retorno_soma_15_30(self):
self.assertEqual(soma(15, 30), 45)
| 2.375 | 2 |
src/oictest/setup.py | rohe/oictest | 32 | 5412 | import copy
import json
from oic.utils.authn.client import CLIENT_AUTHN_METHOD
from oic.utils.keyio import KeyJar
from oic.utils.keyio import KeyBundle
__author__ = 'roland'
import logging
logger = logging.getLogger(__name__)
class OIDCError(Exception):
pass
def flow2sequence(operations, item):
flow = operations.FLOWS[item]
return [operations.PHASES[phase] for phase in flow["sequence"]]
class OIDCTestSetup(object):
def __init__(self, client_cls, config, test_defs):
"""
:param config: Imported configuration module
:return:
"""
self.client_cls = client_cls
self.config = config
self.test_features = []
self.client = self.create_client(**config.CLIENT)
self.test_defs = test_defs
def create_client(self, **kwargs):
"""
Instantiate a _client instance
:param: Keyword arguments
Keys are ["srv_discovery_url", "client_info", "client_registration",
"provider_info". "keys]
:return: _client instance
"""
_key_set = set(kwargs.keys())
args = {}
_client = self.client_cls(client_authn_method=CLIENT_AUTHN_METHOD,
behaviour=kwargs["behaviour"],
verify_ssl=self.config.VERIFY_SSL, **args)
# The behaviour parameter is not significant for the election process
_key_set.discard("behaviour")
try:
setattr(_client, "allow", kwargs["allow"])
except KeyError:
pass
else:
_key_set.discard("allow")
try:
jwks = self.construct_jwks(_client, kwargs["keys"])
except KeyError:
pass
else:
# export JWKS
f = open("export/jwk.json", "w")
f.write(json.dumps(jwks))
f.close()
_client.jwks_uri = self.config.CLIENT["key_export_url"]
self.test_features = _key_set
try:
_client.client_prefs = copy.copy(kwargs["preferences"])
except KeyError:
pass
else:
_key_set.discard("preferences")
if "client_info" in _key_set:
_client.redirect_uris = self.config.CLIENT[
"client_info"]["redirect_uris"]
elif "client_registration" in _key_set:
reg_info = self.config.CLIENT["client_registration"]
_client.redirect_uris = reg_info["redirect_uris"]
_client.client_id = reg_info["client_id"]
_client.client_secret = reg_info["client_secret"]
return _client
@staticmethod
def construct_jwks(_client, key_conf):
"""
Construct the jwks
"""
if _client.keyjar is None:
_client.keyjar = KeyJar()
kbl = []
kid_template = "a%d"
kid = 0
for typ, info in key_conf.items():
kb = KeyBundle(source="file://%s" % info["key"], fileformat="der",
keytype=typ)
for k in kb.keys():
k.serialize()
k.kid = kid_template % kid
kid += 1
_client.kid[k.use][k.kty] = k.kid
_client.keyjar.add_kb("", kb)
kbl.append(kb)
jwks = {"keys": []}
for kb in kbl:
# ignore simple keys
jwks["keys"].extend([k.to_dict()
for k in kb.keys() if k.kty != 'oct'])
return jwks
def make_sequence(self, flow):
"""
Translate a flow name into a sequence of request/responses.
:param flow: Which test flow to use
:return: test sequence and test definitions
"""
sequence = flow2sequence(self.test_defs, flow)
res = {"sequence": sequence,
"tests": {"pre": [], "post": []},
"flow": [flow],
"block": [],
"mode": "",
"expect_exception": False}
_flow = self.test_defs.FLOWS[flow]
for param in ["tests", "block", "mode", "expect_exception"]:
try:
res[param] = _flow[param]
except KeyError:
pass
return res
def add_init(self, test_spec):
"""
Add _client registration and provider info gathering if necessary
:param test_spec:
:return:
"""
_seq = test_spec["sequence"]
_flow = test_spec["flow"]
if "client_info" in self.test_features and \
"registration" not in test_spec["block"]:
_register = True
# May not be the first item in the sequence
for sq in _seq:
try:
if sq[0].request == "RegistrationRequest":
_register = False
except TypeError:
pass
if _register:
_ext = self.test_defs.PHASES["oic-registration"]
_seq.insert(0, _ext)
_flow.insert(0, "oic-registration")
if "srv_discovery_url" in self.test_features:
op_spec = self.test_defs.PHASES["provider-discovery"]
if op_spec not in _seq:
_seq.insert(0, op_spec)
_flow.insert(0, "provider-discovery")
return test_spec
def request_and_return(conv, url, response=None, method="GET", body=None,
body_type="json", state="", http_args=None,
**kwargs):
"""
:param url: The URL to which the request should be sent
:param response: Response type
:param method: Which HTTP method to use
:param body: A message body if any
:param body_type: The format of the body of the return message
:param http_args: Arguments for the HTTP _client
:return: A cls or ErrorResponse instance or the HTTP response
instance if no response body was expected.
"""
if http_args is None:
http_args = {}
_cli = conv._client
try:
_resp = _cli.http_request(url, method, data=body, **http_args)
except Exception:
raise
conv.position = url
conv.last_response = _resp
conv.last_content = _resp.content
if not "keyjar" in kwargs:
kwargs["keyjar"] = conv.keyjar
_response = _cli.parse_request_response(_resp, response, body_type, state,
**kwargs)
conv.protocol_response.append((_response, _resp.content))
return _response
def test_summation(conv, sid):
status = 0
for item in conv.test_output:
if item["status"] > status:
status = item["status"]
if status == 0:
status = 1
info = {
"id": sid,
"status": status,
"tests": conv.test_output
}
return info | 2.234375 | 2 |
HLTrigger/Configuration/python/HLT_75e33/modules/hltPFPuppiNoLep_cfi.py | PKUfudawei/cmssw | 1 | 5413 | <gh_stars>1-10
import FWCore.ParameterSet.Config as cms
hltPFPuppiNoLep = cms.EDProducer("PuppiProducer",
DeltaZCut = cms.double(0.1),
DeltaZCutForChargedFromPUVtxs = cms.double(0.2),
EtaMaxCharged = cms.double(99999.0),
EtaMaxPhotons = cms.double(2.5),
EtaMinUseDeltaZ = cms.double(-1.0),
MinPuppiWeight = cms.double(0.01),
NumOfPUVtxsForCharged = cms.uint32(0),
PUProxyValue = cms.InputTag("hltPixelClustersMultiplicity"),
PtMaxCharged = cms.double(-1.0),
PtMaxNeutrals = cms.double(200.0),
PtMaxNeutralsStartSlope = cms.double(0.0),
PtMaxPhotons = cms.double(20.0),
UseDeltaZCut = cms.bool(True),
UseFromPVLooseTight = cms.bool(False),
algos = cms.VPSet(
cms.PSet(
EtaMaxExtrap = cms.double(2.0),
MedEtaSF = cms.vdouble(1.0, 1.0),
MinNeutralPt = cms.vdouble(0.5105, 0.821),
MinNeutralPtSlope = cms.vdouble(9.51e-06, 1.902e-05),
RMSEtaSF = cms.vdouble(1.0, 1.0),
etaMax = cms.vdouble(2.5, 3.5),
etaMin = cms.vdouble(0.0, 2.5),
ptMin = cms.vdouble(0.0, 0.0),
puppiAlgos = cms.VPSet(cms.PSet(
algoId = cms.int32(5),
applyLowPUCorr = cms.bool(True),
combOpt = cms.int32(0),
cone = cms.double(0.4),
rmsPtMin = cms.double(0.1),
rmsScaleFactor = cms.double(1.0),
useCharged = cms.bool(True)
))
),
cms.PSet(
EtaMaxExtrap = cms.double(2.0),
MedEtaSF = cms.vdouble(0.75),
MinNeutralPt = cms.vdouble(3.656),
MinNeutralPtSlope = cms.vdouble(5.072e-05),
RMSEtaSF = cms.vdouble(1.0),
etaMax = cms.vdouble(10.0),
etaMin = cms.vdouble(3.5),
ptMin = cms.vdouble(0.0),
puppiAlgos = cms.VPSet(cms.PSet(
algoId = cms.int32(5),
applyLowPUCorr = cms.bool(True),
combOpt = cms.int32(0),
cone = cms.double(0.4),
rmsPtMin = cms.double(0.5),
rmsScaleFactor = cms.double(1.0),
useCharged = cms.bool(False)
))
)
),
applyCHS = cms.bool(True),
candName = cms.InputTag("particleFlowTmp"),
clonePackedCands = cms.bool(False),
invertPuppi = cms.bool(False),
puppiDiagnostics = cms.bool(False),
puppiNoLep = cms.bool(True),
useExistingWeights = cms.bool(False),
useExp = cms.bool(False),
usePUProxyValue = cms.bool(True),
vertexName = cms.InputTag("goodOfflinePrimaryVertices"),
vtxNdofCut = cms.int32(4),
vtxZCut = cms.double(24)
)
| 1.164063 | 1 |
wizbin/build.py | RogueScholar/debreate | 97 | 5414 | # -*- coding: utf-8 -*-
## \package wizbin.build
# MIT licensing
# See: docs/LICENSE.txt
import commands, os, shutil, subprocess, traceback, wx
from dbr.functions import FileUnstripped
from dbr.language import GT
from dbr.log import DebugEnabled
from dbr.log import Logger
from dbr.md5 import WriteMD5
from fileio.fileio import ReadFile
from fileio.fileio import WriteFile
from globals.bitmaps import ICON_EXCLAMATION
from globals.bitmaps import ICON_INFORMATION
from globals.errorcodes import dbrerrno
from globals.execute import ExecuteCommand
from globals.execute import GetExecutable
from globals.execute import GetSystemInstaller
from globals.ident import btnid
from globals.ident import chkid
from globals.ident import inputid
from globals.ident import pgid
from globals.paths import ConcatPaths
from globals.paths import PATH_app
from globals.strings import GS
from globals.strings import RemoveEmptyLines
from globals.strings import TextIsEmpty
from globals.system import PY_VER_MAJ
from globals.tooltips import SetPageToolTips
from input.toggle import CheckBox
from input.toggle import CheckBoxESS
from startup.tests import UsingTest
from ui.button import CreateButton
from ui.checklist import CheckListDialog
from ui.dialog import DetailedMessageDialog
from ui.dialog import ShowErrorDialog
from ui.layout import BoxSizer
from ui.output import OutputLog
from ui.panel import BorderedPanel
from ui.progress import PD_DEFAULT_STYLE
from ui.progress import ProgressDialog
from ui.progress import TimedProgressDialog
from ui.style import layout as lyt
from wiz.helper import FieldEnabled
from wiz.helper import GetField
from wiz.helper import GetMainWindow
from wiz.helper import GetPage
from wiz.wizard import WizardPage
## Build page
class Page(WizardPage):
## Constructor
#
# \param parent
# Parent <b><i>wx.Window</i></b> instance
def __init__(self, parent):
WizardPage.__init__(self, parent, pgid.BUILD)
# ----- Extra Options
pnl_options = BorderedPanel(self)
self.chk_md5 = CheckBoxESS(pnl_options, chkid.MD5, GT(u'Create md5sums file'),
name=u'MD5', defaultValue=True, commands=u'md5sum')
# The » character denotes that an alternate tooltip should be shown if the control is disabled
self.chk_md5.tt_name = u'md5»'
self.chk_md5.col = 0
# Option to strip binaries
self.chk_strip = CheckBoxESS(pnl_options, chkid.STRIP, GT(u'Strip binaries'),
name=u'strip»', defaultValue=True, commands=u'strip')
self.chk_strip.col = 0
# Deletes the temporary build tree
self.chk_rmstage = CheckBoxESS(pnl_options, chkid.DELETE, GT(u'Delete staged directory'),
name=u'RMSTAGE', defaultValue=True)
self.chk_rmstage.col = 0
# Checks the output .deb for errors
self.chk_lint = CheckBoxESS(pnl_options, chkid.LINT, GT(u'Check package for errors with lintian'),
name=u'LINTIAN', defaultValue=True, commands=u'lintian')
self.chk_lint.tt_name = u'lintian»'
self.chk_lint.col = 0
# Installs the deb on the system
self.chk_install = CheckBox(pnl_options, chkid.INSTALL, GT(u'Install package after build'),
name=u'INSTALL', commands=(u'gdebi-gtk', u'gdebi-kde',))
self.chk_install.tt_name = u'install»'
self.chk_install.col = 0
# *** Lintian Overrides *** #
if UsingTest(u'alpha'):
# FIXME: Move next to lintian check box
Logger.Info(__name__, u'Enabling alpha feature "lintian overrides" option')
self.lint_overrides = []
btn_lint_overrides = CreateButton(self, label=GT(u'Lintian overrides'))
btn_lint_overrides.Bind(wx.EVT_BUTTON, self.OnSetLintOverrides)
btn_build = CreateButton(self, btnid.BUILD, GT(u'Build'), u'build', 64)
# Display log
dsp_log = OutputLog(self)
SetPageToolTips(self)
# *** Event Handling *** #
btn_build.Bind(wx.EVT_BUTTON, self.OnBuild)
# *** Layout *** #
lyt_options = wx.GridBagSizer()
next_row = 0
prev_row = next_row
for CHK in pnl_options.Children:
row = next_row
FLAGS = lyt.PAD_LR
if CHK.col:
row = prev_row
FLAGS = wx.RIGHT
lyt_options.Add(CHK, (row, CHK.col), flag=FLAGS, border=5)
if not CHK.col:
prev_row = next_row
next_row += 1
pnl_options.SetSizer(lyt_options)
pnl_options.SetAutoLayout(True)
pnl_options.Layout()
lyt_buttons = BoxSizer(wx.HORIZONTAL)
lyt_buttons.Add(btn_build, 1)
lyt_main = BoxSizer(wx.VERTICAL)
lyt_main.AddSpacer(10)
lyt_main.Add(wx.StaticText(self, label=GT(u'Extra Options')), 0,
lyt.ALGN_LB|wx.LEFT, 5)
lyt_main.Add(pnl_options, 0, wx.LEFT, 5)
lyt_main.AddSpacer(5)
if UsingTest(u'alpha'):
#lyt_main.Add(wx.StaticText(self, label=GT(u'Lintian overrides')), 0, wx.LEFT, 5)
lyt_main.Add(btn_lint_overrides, 0, wx.LEFT, 5)
lyt_main.AddSpacer(5)
lyt_main.Add(lyt_buttons, 0, lyt.ALGN_C)
lyt_main.Add(dsp_log, 2, wx.EXPAND|lyt.PAD_LRB, 5)
self.SetAutoLayout(True)
self.SetSizer(lyt_main)
self.Layout()
## Method that builds the actual Debian package
#
# \param task_list
# \b \e dict : Task string IDs & page data
# \param build_path
# \b \e unicode|str : Directory where .deb will be output
# \param filename
# \b \e unicode|str : Basename of output file without .deb extension
# \return
# \b \e dbrerror : SUCCESS if build completed successfully
def Build(self, task_list, build_path, filename):
# Declare this here in case of error before progress dialog created
build_progress = None
try:
# Other mandatory tasks that will be processed
mandatory_tasks = (
u'stage',
u'install_size',
u'control',
u'build',
)
# Add other mandatory tasks
for T in mandatory_tasks:
task_list[T] = None
task_count = len(task_list)
# Add each file for updating progress dialog
if u'files' in task_list:
task_count += len(task_list[u'files'])
# Add each script for updating progress dialog
if u'scripts' in task_list:
task_count += len(task_list[u'scripts'])
if DebugEnabled():
task_msg = GT(u'Total tasks: {}').format(task_count)
print(u'DEBUG: [{}] {}'.format(__name__, task_msg))
for T in task_list:
print(u'\t{}'.format(T))
create_changelog = u'changelog' in task_list
create_copyright = u'copyright' in task_list
pg_control = GetPage(pgid.CONTROL)
pg_menu = GetPage(pgid.MENU)
stage_dir = u'{}/{}__dbp__'.format(build_path, filename)
if os.path.isdir(u'{}/DEBIAN'.format(stage_dir)):
try:
shutil.rmtree(stage_dir)
except OSError:
ShowErrorDialog(GT(u'Could not free stage directory: {}').format(stage_dir),
title=GT(u'Cannot Continue'))
return (dbrerrno.EEXIST, None)
# Actual path to new .deb
deb = u'"{}/{}.deb"'.format(build_path, filename)
progress = 0
task_msg = GT(u'Preparing build tree')
Logger.Debug(__name__, task_msg)
wx.Yield()
build_progress = ProgressDialog(GetMainWindow(), GT(u'Building'), task_msg,
maximum=task_count,
style=PD_DEFAULT_STYLE|wx.PD_ELAPSED_TIME|wx.PD_ESTIMATED_TIME|wx.PD_CAN_ABORT)
DIR_debian = ConcatPaths((stage_dir, u'DEBIAN'))
# Make a fresh build tree
os.makedirs(DIR_debian)
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
def UpdateProgress(current_task, message=None):
task_eval = u'{} / {}'.format(current_task, task_count)
if message:
Logger.Debug(__name__, u'{} ({})'.format(message, task_eval))
wx.Yield()
build_progress.Update(current_task, message)
return
wx.Yield()
build_progress.Update(current_task)
# *** Files *** #
if u'files' in task_list:
UpdateProgress(progress, GT(u'Copying files'))
no_follow_link = GetField(GetPage(pgid.FILES), chkid.SYMLINK).IsChecked()
# TODO: move this into a file functions module
def _copy(f_src, f_tgt, exe=False):
# NOTE: Python 3 appears to have follow_symlinks option for shutil.copy
# FIXME: copying nested symbolic link may not work
if os.path.isdir(f_src):
if os.path.islink(f_src) and no_follow_link:
Logger.Debug(__name__, u'Adding directory symbolic link to stage: {}'.format(f_tgt))
os.symlink(os.readlink(f_src), f_tgt)
else:
Logger.Debug(__name__, u'Adding directory to stage: {}'.format(f_tgt))
shutil.copytree(f_src, f_tgt)
os.chmod(f_tgt, 0o0755)
elif os.path.isfile(f_src):
if os.path.islink(f_src) and no_follow_link:
Logger.Debug(__name__, u'Adding file symbolic link to stage: {}'.format(f_tgt))
os.symlink(os.readlink(f_src), f_tgt)
else:
if exe:
Logger.Debug(__name__, u'Adding executable to stage: {}'.format(f_tgt))
else:
Logger.Debug(__name__, u'Adding file to stage: {}'.format(f_tgt))
shutil.copy(f_src, f_tgt)
# Set FILE permissions
if exe:
os.chmod(f_tgt, 0o0755)
else:
os.chmod(f_tgt, 0o0644)
files_data = task_list[u'files']
for FILE in files_data:
file_defs = FILE.split(u' -> ')
source_file = file_defs[0]
target_file = u'{}{}/{}'.format(stage_dir, file_defs[2], file_defs[1])
target_dir = os.path.dirname(target_file)
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
# Remove asteriks from exectuables
exe = False
if source_file[-1] == u'*':
exe = True
source_file = source_file[:-1]
_copy(source_file, u'{}/{}'.format(target_dir, os.path.basename(source_file)), exe)
# Individual files
progress += 1
UpdateProgress(progress)
# Entire file task
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
# *** Strip files ***#
# FIXME: Needs only be run if 'files' step is used
if u'strip' in task_list:
UpdateProgress(progress, GT(u'Stripping binaries'))
for ROOT, DIRS, FILES in os.walk(stage_dir): #@UnusedVariable
for F in FILES:
# Don't check files in DEBIAN directory
if ROOT != DIR_debian:
F = ConcatPaths((ROOT, F))
if FileUnstripped(F):
Logger.Debug(__name__, u'Unstripped file: {}'.format(F))
# FIXME: Strip command should be set as class member?
ExecuteCommand(GetExecutable(u'strip'), F)
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
package = GetField(pg_control, inputid.PACKAGE).GetValue()
# Make sure that the directory is available in which to place documentation
if create_changelog or create_copyright:
doc_dir = u'{}/usr/share/doc/{}'.format(stage_dir, package)
if not os.path.isdir(doc_dir):
os.makedirs(doc_dir)
# *** Changelog *** #
if create_changelog:
UpdateProgress(progress, GT(u'Creating changelog'))
# If changelog will be installed to default directory
changelog_target = task_list[u'changelog'][0]
if changelog_target == u'STANDARD':
changelog_target = ConcatPaths((u'{}/usr/share/doc'.format(stage_dir), package))
else:
changelog_target = ConcatPaths((stage_dir, changelog_target))
if not os.path.isdir(changelog_target):
os.makedirs(changelog_target)
WriteFile(u'{}/changelog'.format(changelog_target), task_list[u'changelog'][1])
CMD_gzip = GetExecutable(u'gzip')
if CMD_gzip:
UpdateProgress(progress, GT(u'Compressing changelog'))
c = u'{} -n --best "{}/changelog"'.format(CMD_gzip, changelog_target)
clog_status = commands.getstatusoutput(c.encode(u'utf-8'))
if clog_status[0]:
ShowErrorDialog(GT(u'Could not compress changelog'), clog_status[1], warn=True, title=GT(u'Warning'))
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
# *** Copyright *** #
if create_copyright:
UpdateProgress(progress, GT(u'Creating copyright'))
WriteFile(u'{}/usr/share/doc/{}/copyright'.format(stage_dir, package), task_list[u'copyright'])
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
# Characters that should not be in filenames
invalid_chars = (u' ', u'/')
# *** Menu launcher *** #
if u'launcher' in task_list:
UpdateProgress(progress, GT(u'Creating menu launcher'))
# This might be changed later to set a custom directory
menu_dir = u'{}/usr/share/applications'.format(stage_dir)
menu_filename = pg_menu.GetOutputFilename()
# Remove invalid characters from filename
for char in invalid_chars:
menu_filename = menu_filename.replace(char, u'_')
if not os.path.isdir(menu_dir):
os.makedirs(menu_dir)
WriteFile(u'{}/{}.desktop'.format(menu_dir, menu_filename), task_list[u'launcher'])
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
# *** md5sums file *** #
# Good practice to create hashes before populating DEBIAN directory
if u'md5sums' in task_list:
UpdateProgress(progress, GT(u'Creating md5sums'))
if not WriteMD5(stage_dir, parent=build_progress):
# Couldn't call md5sum command
build_progress.Cancel()
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
# *** Scripts *** #
if u'scripts' in task_list:
UpdateProgress(progress, GT(u'Creating scripts'))
scripts = task_list[u'scripts']
for SCRIPT in scripts:
script_name = SCRIPT
script_text = scripts[SCRIPT]
script_filename = ConcatPaths((stage_dir, u'DEBIAN', script_name))
WriteFile(script_filename, script_text)
# Make sure scipt path is wrapped in quotes to avoid whitespace errors
os.chmod(script_filename, 0755)
os.system((u'chmod +x "{}"'.format(script_filename)))
# Individual scripts
progress += 1
UpdateProgress(progress)
# Entire script task
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
# *** Control file *** #
UpdateProgress(progress, GT(u'Getting installed size'))
# Get installed-size
installed_size = os.popen((u'du -hsk "{}"'.format(stage_dir))).readlines()
installed_size = installed_size[0].split(u'\t')
installed_size = installed_size[0]
# Insert Installed-Size into control file
control_data = pg_control.Get().split(u'\n')
control_data.insert(2, u'Installed-Size: {}'.format(installed_size))
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
# Create final control file
UpdateProgress(progress, GT(u'Creating control file'))
# dpkg fails if there is no newline at end of file
control_data = u'\n'.join(control_data).strip(u'\n')
# Ensure there is only one empty trailing newline
# Two '\n' to show physical empty line, but not required
# Perhaps because string is not null terminated???
control_data = u'{}\n\n'.format(control_data)
WriteFile(u'{}/DEBIAN/control'.format(stage_dir), control_data, noStrip=u'\n')
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
# *** Final build *** #
UpdateProgress(progress, GT(u'Running dpkg'))
working_dir = os.path.split(stage_dir)[0]
c_tree = os.path.split(stage_dir)[1]
deb_package = u'{}.deb'.format(filename)
# Move the working directory becuase dpkg seems to have problems with spaces in path
os.chdir(working_dir)
# HACK to fix file/dir permissions
for ROOT, DIRS, FILES in os.walk(stage_dir):
for D in DIRS:
D = u'{}/{}'.format(ROOT, D)
os.chmod(D, 0o0755)
for F in FILES:
F = u'{}/{}'.format(ROOT, F)
if os.access(F, os.X_OK):
os.chmod(F, 0o0755)
else:
os.chmod(F, 0o0644)
# FIXME: Should check for working fakeroot & dpkg-deb executables
build_status = commands.getstatusoutput((u'{} {} -b "{}" "{}"'.format(GetExecutable(u'fakeroot'), GetExecutable(u'dpkg-deb'), c_tree, deb_package)))
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
# *** Delete staged directory *** #
if u'rmstage' in task_list:
UpdateProgress(progress, GT(u'Removing temp directory'))
try:
shutil.rmtree(stage_dir)
except OSError:
ShowErrorDialog(GT(u'An error occurred when trying to delete the build tree'),
parent=build_progress)
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
# *** ERROR CHECK
if u'lintian' in task_list:
UpdateProgress(progress, GT(u'Checking package for errors'))
# FIXME: Should be set as class memeber?
CMD_lintian = GetExecutable(u'lintian')
errors = commands.getoutput((u'{} {}'.format(CMD_lintian, deb)))
if errors != wx.EmptyString:
e1 = GT(u'Lintian found some issues with the package.')
e2 = GT(u'Details saved to {}').format(filename)
WriteFile(u'{}/{}.lintian'.format(build_path, filename), errors)
DetailedMessageDialog(build_progress, GT(u'Lintian Errors'),
ICON_INFORMATION, u'{}\n{}.lintian'.format(e1, e2), errors).ShowModal()
progress += 1
# Close progress dialog
wx.Yield()
build_progress.Update(progress)
build_progress.Destroy()
# Build completed successfullly
if not build_status[0]:
return (dbrerrno.SUCCESS, deb_package)
if PY_VER_MAJ <= 2:
# Unicode decoder has trouble with certain characters. Replace any
# non-decodable characters with � (0xFFFD).
build_output = list(build_status[1])
# String & unicode string incompatibilities
index = 0
for C in build_output:
try:
GS(C)
except UnicodeDecodeError:
build_output[index] = u'�'
index += 1
build_status = (build_status[0], u''.join(build_output))
# Build failed
return (build_status[0], build_status[1])
except:
if build_progress:
build_progress.Destroy()
return(dbrerrno.EUNKNOWN, traceback.format_exc())
## TODO: Doxygen
#
# \return
# \b \e tuple containing Return code & build details
def BuildPrep(self):
# Declare these here in case of error before dialogs created
save_dia = None
prebuild_progress = None
try:
# List of tasks for build process
# 'stage' should be very first task
task_list = {}
# Control page
pg_control = GetPage(pgid.CONTROL)
fld_package = GetField(pg_control, inputid.PACKAGE)
fld_version = GetField(pg_control, inputid.VERSION)
fld_maint = GetField(pg_control, inputid.MAINTAINER)
fld_email = GetField(pg_control, inputid.EMAIL)
fields_control = (
fld_package,
fld_version,
fld_maint,
fld_email,
)
# Menu launcher page
pg_launcher = GetPage(pgid.MENU)
# Check to make sure that all required fields have values
required = list(fields_control)
if pg_launcher.IsOkay():
task_list[u'launcher'] = pg_launcher.Get()
required.append(GetField(pg_launcher, inputid.NAME))
if not GetField(pg_launcher, chkid.FNAME).GetValue():
required.append(GetField(pg_launcher, inputid.FNAME))
for item in required:
if TextIsEmpty(item.GetValue()):
field_name = GT(item.GetName().title())
page_name = pg_control.GetName()
if item not in fields_control:
page_name = pg_launcher.GetName()
return (dbrerrno.FEMPTY, u'{} ➜ {}'.format(page_name, field_name))
# Get information from control page for default filename
package = fld_package.GetValue()
# Remove whitespace
package = package.strip(u' \t')
package = u'-'.join(package.split(u' '))
version = fld_version.GetValue()
# Remove whitespace
version = version.strip(u' \t')
version = u''.join(version.split())
arch = GetField(pg_control, inputid.ARCH).GetStringSelection()
# Dialog for save destination
ttype = GT(u'Debian packages')
save_dia = wx.FileDialog(self, GT(u'Save'), os.getcwd(), wx.EmptyString, u'{}|*.deb'.format(ttype),
wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT|wx.FD_CHANGE_DIR)
save_dia.SetFilename(u'{}_{}_{}.deb'.format(package, version, arch))
if not save_dia.ShowModal() == wx.ID_OK:
return (dbrerrno.ECNCLD, None)
build_path = os.path.split(save_dia.GetPath())[0]
filename = os.path.split(save_dia.GetPath())[1].split(u'.deb')[0]
# Control, menu, & build pages not added to this list
page_checks = (
(pgid.FILES, u'files'),
(pgid.SCRIPTS, u'scripts'),
(pgid.CHANGELOG, u'changelog'),
(pgid.COPYRIGHT, u'copyright'),
)
# Install step is not added to this list
# 'control' should be after 'md5sums'
# 'build' should be after 'control'
other_checks = (
(self.chk_md5, u'md5sums'),
(self.chk_strip, u'strip'),
(self.chk_rmstage, u'rmstage'),
(self.chk_lint, u'lintian'),
)
prep_task_count = len(page_checks) + len(other_checks)
progress = 0
wx.Yield()
prebuild_progress = ProgressDialog(GetMainWindow(), GT(u'Preparing to build'),
maximum=prep_task_count)
if wx.MAJOR_VERSION < 3:
# Resize dialog for better fit
pb_size = prebuild_progress.GetSizeTuple()
pb_size = (pb_size[0]+200, pb_size[1])
prebuild_progress.SetSize(pb_size)
prebuild_progress.CenterOnParent()
for PID, id_string in page_checks:
wx.Yield()
prebuild_progress.Update(progress, GT(u'Checking {}').format(id_string))
wizard_page = GetPage(PID)
if wizard_page.IsOkay():
task_list[id_string] = wizard_page.Get()
progress += 1
for task_check, id_string in other_checks:
wx.Yield()
prebuild_progress.Update(progress, GT(u'Testing for: {}').format(task_check.GetLabel()))
if task_check.GetValue():
task_list[id_string] = None
progress += 1
# Close progress dialog
wx.Yield()
prebuild_progress.Update(progress)
prebuild_progress.Destroy()
return (dbrerrno.SUCCESS, (task_list, build_path, filename))
except:
if save_dia:
save_dia.Destroy()
if prebuild_progress:
prebuild_progress.Destroy()
return (dbrerrno.EUNKNOWN, traceback.format_exc())
## TODO: Doxygen
def GetSaveData(self):
build_list = []
options = (
self.chk_md5,
self.chk_rmstage,
self.chk_lint,
)
for O in options:
if O.GetValue():
build_list.append(u'1')
else:
build_list.append(u'0')
if self.chk_strip.GetValue():
build_list.append(u'strip')
return u'<<BUILD>>\n{}\n<</BUILD>>'.format(u'\n'.join(build_list))
## Installs the built .deb package onto the system
#
# Uses the system's package installer:
# gdebi if available or dpkg
#
# Shows a success dialog if installed. Otherwise shows an
# error dialog.
# \param package
# \b \e unicode|str : Path to package to be installed
def InstallPackage(self, package):
system_installer = GetSystemInstaller()
if not system_installer:
ShowErrorDialog(
GT(u'Cannot install package'),
GT(u'A compatible package manager could not be found on the system'),
__name__,
warn=True
)
return
Logger.Info(__name__, GT(u'Attempting to install package: {}').format(package))
Logger.Info(__name__, GT(u'Installing with {}').format(system_installer))
install_cmd = (system_installer, package,)
wx.Yield()
# FIXME: Use ExecuteCommand here
install_output = subprocess.Popen(install_cmd)
# Command appears to not have been executed correctly
if install_output == None:
ShowErrorDialog(
GT(u'Could not install package: {}'),
GT(u'An unknown error occurred'),
__name__
)
return
# Command executed but did not return success code
if install_output.returncode:
err_details = (
GT(u'Process returned code {}').format(install_output.returncode),
GT(u'Command executed: {}').format(u' '.join(install_cmd)),
)
ShowErrorDialog(
GT(u'An error occurred during installation'),
u'\n'.join(err_details),
__name__
)
return
## TODO: Doxygen
def OnBuild(self, event=None):
# Build preparation
ret_code, build_prep = self.BuildPrep()
if ret_code == dbrerrno.ECNCLD:
return
if ret_code == dbrerrno.FEMPTY:
err_dia = DetailedMessageDialog(GetMainWindow(), GT(u'Cannot Continue'), ICON_EXCLAMATION,
text=u'{}\n{}'.format(GT(u'One of the required fields is empty:'), build_prep))
err_dia.ShowModal()
err_dia.Destroy()
return
if ret_code == dbrerrno.SUCCESS:
task_list, build_path, filename = build_prep
# Actual build
ret_code, result = self.Build(task_list, build_path, filename)
# FIXME: Check .deb package timestamp to confirm build success
if ret_code == dbrerrno.SUCCESS:
DetailedMessageDialog(GetMainWindow(), GT(u'Success'), ICON_INFORMATION,
text=GT(u'Package created successfully')).ShowModal()
# Installing the package
if FieldEnabled(self.chk_install) and self.chk_install.GetValue():
self.InstallPackage(result)
return
if result:
ShowErrorDialog(GT(u'Package build failed'), result)
else:
ShowErrorDialog(GT(u'Package build failed with unknown error'))
return
if build_prep:
ShowErrorDialog(GT(u'Build preparation failed'), build_prep)
else:
ShowErrorDialog(GT(u'Build preparation failed with unknown error'))
## TODO: Doxygen
#
# TODO: Show warning dialog that this could take a while
# TODO: Add cancel option to progress dialog
# FIXME: List should be cached so no need for re-scanning
def OnSetLintOverrides(self, event=None):
Logger.Debug(__name__, GT(u'Setting Lintian overrides...'))
lintian_tags_file = u'{}/data/lintian/tags'.format(PATH_app)
if not os.path.isfile(lintian_tags_file):
Logger.Error(__name__, u'Lintian tags file is missing: {}'.format(lintian_tags_file))
return False
lint_tags = RemoveEmptyLines(ReadFile(lintian_tags_file, split=True))
if lint_tags:
Logger.Debug(__name__, u'Lintian tags set')
# DEBUG: Start
if DebugEnabled() and len(lint_tags) > 50:
print(u' Reducing tag count to 200 ...')
lint_tags = lint_tags[:50]
Logger.Debug(__name__, u'Processing {} tags'.format(len(lint_tags)))
# DEBUG: End
tag_count = len(lint_tags)
def GetProgressMessage(message, count=tag_count):
return u'{} ({} {})'.format(message, count, GT(u'tags'))
progress = TimedProgressDialog(GetMainWindow(), GT(u'Building Tag List'),
GetProgressMessage(GT(u'Scanning default tags')))
progress.Start()
wx.Yield()
# Create the dialog
overrides_dialog = CheckListDialog(GetMainWindow(), title=GT(u'Lintian Overrides'),
allow_custom=True)
# FIXME: Needs progress dialog
overrides_dialog.InitCheckList(tuple(lint_tags))
progress.SetMessage(GetProgressMessage(GT(u'Setting selected overrides')))
for T in lint_tags:
if T in self.lint_overrides:
overrides_dialog.SetItemCheckedByLabel(T)
self.lint_overrides.remove(T)
progress.SetMessage(GetProgressMessage(GT(u'Adding custom tags'), len(self.lint_overrides)))
# Remaining tags should be custom entries
# FIXME:
if self.lint_overrides:
for T in self.lint_overrides:
overrides_dialog.AddItem(T, True)
progress.Stop()
if overrides_dialog.ShowModal() == wx.ID_OK:
# Remove old overrides
self.lint_overrides = []
for L in overrides_dialog.GetCheckedLabels():
Logger.Debug(__name__, GT(u'Adding Lintian override: {}').format(L))
self.lint_overrides.append(L)
return True
else:
Logger.Debug(__name__, u'Setting lintian tags failed')
return False
## TODO: Doxygen
#
# TODO: Use string names in project file but retain
# compatibility with older projects that use
# integer values.
def Set(self, data):
# ???: Redundant
self.Reset()
build_data = data.split(u'\n')
if GetExecutable(u'md5sum'):
try:
self.chk_md5.SetValue(int(build_data[0]))
except IndexError:
pass
try:
self.chk_rmstage.SetValue(int(build_data[1]))
except IndexError:
pass
if GetExecutable(u'lintian'):
try:
self.chk_lint.SetValue(int(build_data[2]))
except IndexError:
pass
self.chk_strip.SetValue(GetExecutable(u'strip') and u'strip' in build_data)
## TODO: Doxygen
def SetSummary(self, event=None):
pg_scripts = GetPage(pgid.SCRIPTS)
# Make sure the page is not destroyed so no error is thrown
if self:
# Set summary when "Build" page is shown
# Get the file count
files_total = GetPage(pgid.FILES).GetFileCount()
f = GT(u'File Count')
file_count = u'{}: {}'.format(f, files_total)
# Scripts to make
scripts_to_make = []
scripts = ((u'preinst', pg_scripts.chk_preinst),
(u'postinst', pg_scripts.chk_postinst),
(u'prerm', pg_scripts.chk_prerm),
(u'postrm', pg_scripts.chk_postrm))
for script in scripts:
if script[1].IsChecked():
scripts_to_make.append(script[0])
s = GT(u'Scripts')
if len(scripts_to_make):
scripts_to_make = u'{}: {}'.format(s, u', '.join(scripts_to_make))
else:
scripts_to_make = u'{}: 0'.format(s)
self.summary.SetValue(u'\n'.join((file_count, scripts_to_make)))
| 1.5 | 2 |
__main__.py | maelstromdat/YOSHI | 6 | 5415 | from YoshiViz import Gui
if __name__ == '__main__':
#file director
gui = Gui.Gui()
"""
report_generator.\
generate_pdf_report(fileDirectory, repositoryName, tempCommunityType)
"""
print('the type of', repositoryName, 'is', tempCommunityType, '\n"check .\YoshiViz\output"')
| 1.84375 | 2 |
hpotter/src/lazy_init.py | LarsenClose/dr.hpotter | 1 | 5416 | ''' Wrap an __init__ function so that I don't have to assign all the
parameters to a self. variable. '''
# https://stackoverflow.com/questions/5048329/python-decorator-for-automatic-binding-init-arguments
import inspect
from functools import wraps
def lazy_init(init):
''' Create an annotation to assign all the parameters to a self.
variable. '''
arg_names = inspect.getfullargspec(init)[0]
# pylint: disable=E1101
@wraps(init)
def new_init(self, *args):
for name, value in zip(arg_names[1:], args):
setattr(self, name, value)
init(self, *args)
return new_init
| 3.359375 | 3 |
main.py | technojam/MLian | 1 | 5417 | # def register_feed():
import os
import cv2
path = '/UserImage'
cam = cv2.VideoCapture(0)
name=input("Name: ")
cv2.namedWindow("test")
img_counter = 0
while True:
ret, frame = cam.read()
if not ret:
print("failed to grab frame")
break
else:
cv2.imshow("test", frame)
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k%256 == 32:
# SPACE pressed
# img_name = "opencv_frame_{}.png".format(img_counter)
cv2.imwrite(name + ".jpg", frame)
# print("{} written!".format(img_name))
print("Image Captured! Proceed...")
img_counter += 1
cam.release()
cv2.destroyAllWindows() | 3.046875 | 3 |
models/train.py | Hiwyl/keras_cnn_finetune | 1 | 5418 | # -*- encoding: utf-8 -*-
'''
@Author : lance
@Email : <EMAIL>
'''
import time
from model_cx.inceptionresnet import inceptionresnet
from model_cx.vgg19two import vgg19_all_lr
from model_cx.inceptionv3 import inceptionv3
from model_cx.densenet import densenet
from model_cx.nasnet import nasnet
from model_cx.merge import merge
from model_cx.bcnn import bilinearnet
from model_cx.resnet import ResNet50
from model_cx.mobilenetv2 import mobilenetv2
from model_cx.senet import senet
if __name__=="__main__":
classes = 1
epochs = 100
steps_per_epoch = 113
validation_steps = 48
shape=(224,224)
print("开始训练...")
start = time.time()
#
# try:
# print("densenet")
# densenet(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
# try:
# print("bcnn")
# bilinearnet(classes, epochs, steps_per_epoch, validation_steps, shape)
#
# except Exception as e:
# print(e)
# try:
# print("resnet")
# ResNet50(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
try:
print("merge")
merge(classes, epochs, steps_per_epoch, validation_steps, shape)
except Exception as e:
print(e)
# try:
# print("ince_res")
# inceptionresnet(classes, epochs, steps_per_epoch, validation_steps, (299, 299))
# # inceptionresnet(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
# try:
# print("mobilenetv2")
# mobilenetv2(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
# try:
# print("inceptionv3")
# inceptionv3(classes, epochs, steps_per_epoch, validation_steps, (299, 299))
# # inceptionv3(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
try:
print("nasnet")
nasnet(classes, epochs, steps_per_epoch, validation_steps, shape)
except Exception as e:
print(e)
try:
print("vgg19two")
vgg19_all_lr(classes, epochs, steps_per_epoch, validation_steps, shape)
except Exception as e:
print(e)
try:
print("senet")
vgg19_all_lr(classes, epochs, steps_per_epoch, validation_steps, (100,100))
except Exception as e:
print(e)
end = time.time()
print("ETA:", (end - start) / 3600) | 2.1875 | 2 |
src/probnum/randprocs/markov/integrator/_preconditioner.py | alpiges/probnum | 0 | 5419 | """Coordinate changes in state space models."""
import abc
try:
# cached_property is only available in Python >=3.8
from functools import cached_property
except ImportError:
from cached_property import cached_property
import numpy as np
import scipy.special # for vectorised factorial
from probnum import config, linops, randvars
def apply_precon(precon, rv):
# public (because it is needed in some integrator implementations),
# but not exposed to the 'randprocs' namespace
# (i.e. not imported in any __init__.py).
# There is no way of checking whether `rv` has its Cholesky factor computed already or not.
# Therefore, since we need to update the Cholesky factor for square-root filtering,
# we also update the Cholesky factor for non-square-root algorithms here,
# which implies additional cost.
# See Issues #319 and #329.
# When they are resolved, this function here will hopefully be superfluous.
new_mean = precon @ rv.mean
new_cov_cholesky = precon @ rv.cov_cholesky # precon is diagonal, so this is valid
new_cov = new_cov_cholesky @ new_cov_cholesky.T
return randvars.Normal(new_mean, new_cov, cov_cholesky=new_cov_cholesky)
class Preconditioner(abc.ABC):
"""Coordinate change transformations as preconditioners in state space models.
For some models, this makes the filtering and smoothing steps more numerically
stable.
"""
@abc.abstractmethod
def __call__(self, step) -> np.ndarray:
# if more than step is needed, add them into the signature in the future
raise NotImplementedError
@cached_property
def inverse(self) -> "Preconditioner":
raise NotImplementedError
class NordsieckLikeCoordinates(Preconditioner):
"""Nordsieck-like coordinates.
Similar to Nordsieck coordinates (which store the Taylor coefficients instead of the
derivatives), but better for ODE filtering and smoothing. Used in integrator-transitions, e.g. in
:class:`IntegratedWienerTransition`.
"""
def __init__(self, powers, scales, dimension):
# Clean way of assembling these coordinates cheaply,
# because the powers and scales of the inverse
# are better read off than inverted
self.powers = powers
self.scales = scales
self.dimension = dimension
@classmethod
def from_order(cls, order, dimension):
# used to conveniently initialise in the beginning
powers = np.arange(order, -1, -1)
scales = scipy.special.factorial(powers)
return cls(
powers=powers + 0.5,
scales=scales,
dimension=dimension,
)
def __call__(self, step):
scaling_vector = np.abs(step) ** self.powers / self.scales
if config.matrix_free:
return linops.Kronecker(
A=linops.Identity(self.dimension),
B=linops.Scaling(factors=scaling_vector),
)
return np.kron(np.eye(self.dimension), np.diag(scaling_vector))
@cached_property
def inverse(self) -> "NordsieckLikeCoordinates":
return NordsieckLikeCoordinates(
powers=-self.powers,
scales=1.0 / self.scales,
dimension=self.dimension,
)
| 2.109375 | 2 |
allauth/socialaccount/providers/linkedin/provider.py | mina-gaid/scp | 1 | 5420 | <filename>allauth/socialaccount/providers/linkedin/provider.py
from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth.provider import OAuthProvider
from allauth.socialaccount import app_settings
class LinkedInAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get('public-profile-url')
def get_avatar_url(self):
# try to return the higher res picture-urls::(original) first
try:
if self.account.extra_data.get('picture-urls', {}).get(
'picture-url'):
return self.account.extra_data.get('picture-urls', {}).get(
'picture-url')
except:
# if we can't get higher res for any reason, we'll just return the
# low res
pass
return self.account.extra_data.get('picture-url')
def to_str(self):
dflt = super(LinkedInAccount, self).to_str()
name = self.account.extra_data.get('name', dflt)
first_name = self.account.extra_data.get('first-name', None)
last_name = self.account.extra_data.get('last-name', None)
if first_name and last_name:
name = first_name + ' ' + last_name
return name
class LinkedInProvider(OAuthProvider):
id = 'linkedin'
name = 'LinkedIn'
account_class = LinkedInAccount
def get_default_scope(self):
scope = []
if app_settings.QUERY_EMAIL:
scope.append('r_emailaddress')
return scope
def get_profile_fields(self):
default_fields = ['id',
'first-name',
'last-name',
'email-address',
'picture-url',
'picture-urls::(original)',
# picture-urls::(original) is higher res
'public-profile-url']
fields = self.get_settings().get('PROFILE_FIELDS', default_fields)
return fields
def extract_uid(self, data):
return data['id']
def extract_common_fields(self, data):
return dict(email=data.get('email-address'),
first_name=data.get('first-name'),
last_name=data.get('last-name'))
providers.registry.register(LinkedInProvider)
| 2.484375 | 2 |
game2048/myNew.py | CCTQL/2048-api | 0 | 5421 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets
from torch.autograd import Variable
from sklearn.model_selection import train_test_split
import time
import pandas as pd
import numpy as np
import csv
batch_size = 128
NUM_EPOCHS = 30
LR = 0.001
TIME_STEP = 4
class CCRNN(nn.Module):
def __init__(self):
# 继承RNN
super(CCRNN, self).__init__()
self.ccLSTM = nn.LSTM(
input_size=4,
hidden_size=128,
num_layers=4,
bidirectional=True,
batch_first=True
)
self.ccCNN22 = nn.Conv2d(
in_channels=1,
out_channels=1,
kernel_size=2,
stride=2,
padding=0
)
self.ccCNN14 = nn.Conv2d(
in_channels=1,
out_channels=1,
kernel_size=(1, 4),
stride=1,
padding=0
)
self.ccCNN41 = nn.Conv2d(
in_channels=1,
out_channels=1,
kernel_size=(4, 1),
stride=1,
padding=0
)
self.CNN22toFC = nn.Linear(4, 64)
self.CNN41toFC = nn.Linear(4, 32)
self.CNN14toFC = nn.Linear(4, 32)
self.LSTMtoFC = nn.Linear(256, 128)
self.FCtoOut = nn.Linear(256, 4)
def forward(self, x):
LSTM_out, (h_n, c_n) = self.ccLSTM(x, None)
CNN_in = torch.unsqueeze(x[:, 0:4, :], 1)
CNN_out22 = self.ccCNN22(CNN_in)
CNN_out41 = self.ccCNN41(CNN_in)
CNN_out14 = self.ccCNN14(CNN_in)
CNN22_reshape = CNN_out22.view(-1, 4)
CNN14_reshape = CNN_out41.view(-1, 4)
CNN41_reshape = CNN_out14.view(-1, 4)
CNN22toFC = self.CNN22toFC(CNN22_reshape)
CNN14toFC = self.CNN14toFC(CNN14_reshape)
CNN41toFC = self.CNN41toFC(CNN41_reshape)
LSTMtoFC = self.LSTMtoFC(LSTM_out[:, -1, :])
CNNandLSTM = torch.cat((CNN22toFC, CNN41toFC, CNN14toFC, LSTMtoFC), 1)
out = self.FCtoOut(CNNandLSTM)
return out
#------------------读入数据-----------------------------
csv_data = pd.read_csv('./drive/My Drive/DATA.csv')
csv_data = csv_data.values
A = csv_data.shape[0]
board_data = csv_data[:,0:16]
# X = np.log2(X)
X = torch.FloatTensor(board_data)
X = np.int64(board_data)
# 转置后拼接
X = np.reshape(X, (-1,4,4))
XT = X.transpose(0,2,1)
X = np.concatenate((X,XT),axis=1)
print(X.shape)
direction_data = csv_data[:,16]
Y = np.int64(direction_data)
#-------------------------------------------------------
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2,shuffle=False)
X_train = torch.FloatTensor(X_train)
X_test = torch.FloatTensor(X_test)
Y_train = torch.LongTensor(Y_train)
Y_test = torch.LongTensor(Y_test)
train_dataset = torch.utils.data.TensorDataset(X_train,Y_train)
# test_dataset = torch.utils.data.TensorDataset(X_test,Y_test)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True
)
# test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
# batch_size=batch_size,
# shuffle=False
# )
batch_size = 128
NUM_EPOCHS = 30
LR = 0.001
TIME_STEP = 4
#------------------读入数据-----------------------------
csv_data = pd.read_csv('./drive/My Drive/DATA.csv')
csv_data = csv_data.values
A = csv_data.shape[0]
board_data = csv_data[:,0:16]
# X = np.log2(X)
X = torch.FloatTensor(board_data)
X = np.int64(board_data)
# 转置后拼接
X = np.reshape(X, (-1,4,4))
XT = X.transpose(0,2,1)
X = np.concatenate((X,XT),axis=1)
print(X.shape)
direction_data = csv_data[:,16]
Y = np.int64(direction_data)
model = CCRNN()
model = model.cuda()
optimizer = optim.Adam(model.parameters(), lr = 0.001)
def train(epoch):
for batch_idx, (data, target) in enumerate(train_loader):
data, target = Variable(data).cuda(), Variable(target).cuda()
data = data/11.0
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if batch_idx % 50 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\t Loss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
torch.save(self.model, 'rnn_model_' + str(epoch) + '.pkl')
if __name__ == '__main__':
for epoch in range(0, NUM_EPOCHS):
train(epoch) | 2.6875 | 3 |
HW2/dbsys-hw2/Database.py | yliu120/dbsystem | 0 | 5422 | import json, io, os, os.path
from Catalog.Schema import DBSchema, DBSchemaEncoder, DBSchemaDecoder
from Query.Plan import PlanBuilder
from Storage.StorageEngine import StorageEngine
class Database:
"""
A top-level database engine class.
For now, this primarily maintains a simple catalog,
mapping relation names to schema objects.
Also, it provides the ability to construct query
plan objects, as well as wrapping the storage layer methods.
"""
checkpointEncoding = "latin1"
checkpointFile = "db.catalog"
def __init__(self, **kwargs):
other = kwargs.get("other", None)
if other:
self.fromOther(other)
else:
storageArgs = {k:v for (k,v) in kwargs.items() \
if k in ["pageSize", "poolSize", "dataDir", "indexDir"]}
self.relationMap = kwargs.get("relations", {})
self.defaultPageSize = kwargs.get("pageSize", io.DEFAULT_BUFFER_SIZE)
self.storage = kwargs.get("storage", StorageEngine(**storageArgs))
checkpointFound = os.path.exists(os.path.join(self.storage.fileMgr.dataDir, Database.checkpointFile))
restoring = "restore" in kwargs
if not restoring and checkpointFound:
self.restore()
def fromOther(self, other):
self.relationMap = other.relationMap
self.defaultPageSize = other.defaultPageSize
self.storage = other.storage
def close(self):
if self.storage:
self.storage.close()
# Database internal components
def storageEngine(self):
return self.storage
def bufferPool(self):
return self.storage.bufferPool if self.storage else None
def fileManager(self):
return self.storage.fileMgr if self.storage else None
# User API
# Catalog methods
def relations(self):
return self.relationMap.keys()
def hasRelation(self, relationName):
return relationName in self.relationMap
def relationSchema(self, relationName):
if relationName in self.relationMap:
return self.relationMap[relationName]
# DDL statements
def createRelation(self, relationName, relationFields):
if relationName not in self.relationMap:
schema = DBSchema(relationName, relationFields)
self.relationMap[relationName] = schema
self.storage.createRelation(relationName, schema)
self.checkpoint()
else:
raise ValueError("Relation '" + relationName + "' already exists")
def removeRelation(self, relationName):
if relationName in self.relationMap:
del self.relationMap[relationName]
self.storage.removeRelation(relationName)
self.checkpoint()
else:
raise ValueError("No relation '" + relationName + "' found in database")
# DML statements
# Returns a tuple id for the newly inserted data.
def insertTuple(self, relationName, tupleData):
if relationName in self.relationMap:
return self.storage.insertTuple(relationName, tupleData)
else:
raise ValueError("Unknown relation '" + relationName + "' while inserting a tuple")
def deleteTuple(self, tupleId):
self.storage.deleteTuple(tupleId)
def updateTuple(self, tupleId, tupleData):
self.storage.updateTuple(tupleId, tupleData)
# Queries
# Returns an empty query builder that can access the current database.
def query(self):
return PlanBuilder(db=self)
# Returns an iterable for query results, after initializing the given plan.
def processQuery(self, queryPlan):
return queryPlan.prepare(self)
# Save the database internals to the data directory.
def checkpoint(self):
if self.storage:
dbcPath = os.path.join(self.storage.fileMgr.dataDir, Database.checkpointFile)
with open(dbcPath, 'w', encoding=Database.checkpointEncoding) as f:
f.write(self.pack())
# Load relations and schema from an existing data directory.
def restore(self):
if self.storage:
dbcPath = os.path.join(self.storage.fileMgr.dataDir, Database.checkpointFile)
with open(dbcPath, 'r', encoding=Database.checkpointEncoding) as f:
other = Database.unpack(f.read(), self.storage)
self.fromOther(other)
# Database schema catalog serialization
def pack(self):
if self.relationMap is not None:
return json.dumps([self.relationMap, self.defaultPageSize], cls=DBSchemaEncoder)
@classmethod
def unpack(cls, buffer, storageEngine):
(relationMap, pageSize) = json.loads(buffer, cls=DBSchemaDecoder)
return cls(relations=relationMap, pageSize=pageSize, storage=storageEngine, restore=True)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2.5625 | 3 |
tests/test_arr_add_value.py | dboyliao/TaipeiPy-pybind11-buffer-array | 1 | 5423 | <reponame>dboyliao/TaipeiPy-pybind11-buffer-array
import numpy as np
import mylib
def test_arr_add_value():
for _ in range(10):
shape = np.random.randint(1, 10, size=np.random.randint(3, 10)).tolist()
in_arr = np.random.rand(*shape).astype(np.double)
ok = np.allclose(mylib.array_add_value(in_arr, np.pi), in_arr + np.pi)
if not ok:
raise ValueError("incorrect result")
| 2.28125 | 2 |
moderngl_window/resources/data.py | DavideRuzza/moderngl-window | 142 | 5424 | <reponame>DavideRuzza/moderngl-window
"""
Registry general data files
"""
from typing import Any
from moderngl_window.resources.base import BaseRegistry
from moderngl_window.meta import DataDescription
class DataFiles(BaseRegistry):
"""Registry for requested data files"""
settings_attr = "DATA_LOADERS"
def load(self, meta: DataDescription) -> Any:
"""Load data file with the configured loaders.
Args:
meta (:py:class:`~moderngl_window.meta.data.DataDescription`): the resource description
Returns:
Any: The loaded resource
"""
return super().load(meta)
data = DataFiles()
| 1.828125 | 2 |
tests/test_units/test_mapper_str.py | frewsxcv/routes | 1 | 5425 | import unittest
from routes import Mapper
class TestMapperStr(unittest.TestCase):
def test_str(self):
m = Mapper()
m.connect('/{controller}/{action}')
m.connect('entries', '/entries', controller='entry', action='index')
m.connect('entry', '/entries/{id}', controller='entry', action='show')
expected = """\
Route name Methods Path
/{controller}/{action}
entries /entries
entry /entries/{id}"""
for expected_line, actual_line in zip(expected.splitlines(), str(m).splitlines()):
assert expected_line == actual_line.rstrip()
| 3.375 | 3 |
quarkchain/tools/config_slave.py | HAOYUatHZ/pyquarkchain | 1 | 5426 | <gh_stars>1-10
"""
python config_slave.py 127.0.0.1 38000 38006 127.0.0.2 18999 18002
will generate 4 slave server configs accordingly. will be used in deployment automation to configure a cluster.
usage: python config_slave.py <host1> <port1> <port2> <host2> <port3> ...
"""
import argparse
import collections
import json
import os
FILE = "../../testnet/2/cluster_config_template.json"
if "QKC_CONFIG" in os.environ:
FILE = os.environ["QKC_CONFIG"]
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"hostports",
nargs="+",
metavar="hostports",
help="Host and ports for slave config",
)
args = parser.parse_args()
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
###############
# parse hosts and ports to form a slave list
###############
host_port_mapping = collections.defaultdict(list)
last_host = None
for host_or_port in args.hostports: # type: str
if not host_or_port.isdigit(): # host
last_host = host_or_port
else: # port
host_port_mapping[last_host].append(host_or_port)
assert None not in host_port_mapping
slave_num = sum(len(port_list) for port_list in host_port_mapping.values())
# make sure number of slaves is power of 2
assert slave_num > 0 and (slave_num & (slave_num - 1) == 0)
slave_servers, i = [], 0
for host, port_list in host_port_mapping.items():
for port in port_list:
s = {
"HOST": host,
"PORT": int(port),
"ID": "S%d" % i,
"CHAIN_MASK_LIST": [i | slave_num],
}
slave_servers.append(s)
i += 1
###############
# read config file and substitute with updated slave config
###############
with open(FILE, "r+") as f:
parsed_config = json.load(f)
parsed_config["SLAVE_LIST"] = slave_servers
f.seek(0)
f.truncate()
f.write(json.dumps(parsed_config, indent=4))
if __name__ == "__main__":
main()
| 2.484375 | 2 |
python-function-files-dictionaries/week4-assignment1.py | MauMendes/python3-programming-specialization | 0 | 5427 | <filename>python-function-files-dictionaries/week4-assignment1.py
#1) Write a function, sublist, that takes in a list of numbers as the parameter. In the function, use a while loop to return a sublist of the input list.
# The sublist should contain the same values of the original list up until it reaches the number 5 (it should not contain the number 5).
def sublist(input_lst):
out_lst = list()
number = 0
i = 0
print(input_lst)
print(len(input_lst))
length = len(input_lst)
while i<length:
number = input_lst[i]
i+=1
if number==5: break
else : out_lst.append(number)
print(out_lst)
return out_lst
#2) Write a function called check_nums that takes a list as its parameter, and contains a while loop that only stops once the element of the
# list is the number 7. What is returned is a list of all of the numbers up until it reaches 7.def check_nums(input_lst):
def check_nums(input_lst):
out_lst = list()
number = 0
i = 0
print(input_lst)
print(len(input_lst))
length = len(input_lst)
while i<length:
number = input_lst[i]
i+=1
if number==7: break
else : out_lst.append(number)
print(out_lst)
return out_lst
#3) Write a function, sublist, that takes in a list of strings as the parameter. In the function, use a while loop to return a sublist of the input list.
# The sublist should contain the same values of the original list up until it reaches the string “STOP” (it should not contain the string “STOP”).
def sublist(in_lst):
out_list = list()
str = ""
i = 0
while str!="STOP":
str = in_lst[i]
i+=1
if str=="STOP": break
else: out_list.append(str)
return out_list
#4) Write a function called stop_at_z that iterates through a list of strings. Using a while loop, append each string to a new list until the string that
# appears is “z”. The function should return the new list.
def stop_at_z(in_lst):
out_list = list()
str = ""
i = 0
while str!="z":
str = in_lst[i]
i+=1
if str=="z": break
else: out_list.append(str)
return out_list
#5) Below is a for loop that works. Underneath the for loop, rewrite the problem so that it does the same thing, but using a while loop instead of a for loop.
# Assign the accumulated total in the while loop code to the variable sum2. Once complete, sum2 should equal sum1.
lst = [65, 78, 21, 33]
lenght = len(lst)
i = 0
sum2 = 0
while i<lenght:
sum2 += lst[i]
i+=1
#6) Challenge: Write a function called beginning that takes a list as input and contains a while loop that only stops once the element of the list is the string ‘bye’.
# What is returned is a list that contains up to the first 10 strings, regardless of where the loop stops. (i.e., if it stops on the 32nd element, the first 10 are
# returned. If “bye” is the 5th element, the first 4 are returned.) If you want to make this even more of a challenge, do this without slicing
def beginning(in_list):
length = len(in_list)
out_lst = list()
i = 0
str = ""
while i<length:
str = in_list[i]
i+=1
if str=="bye" or i>10:
break
out_lst.append(str)
return out_lst
| 4.21875 | 4 |
saas/backend/apps/group/views.py | Canway-shiisa/bk-iam-saas | 0 | 5428 | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from functools import wraps
from typing import List
from django.shortcuts import get_object_or_404
from django.utils.translation import gettext as _
from drf_yasg.utils import swagger_auto_schema
from pydantic.tools import parse_obj_as
from rest_framework import serializers, status, views
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet, mixins
from backend.account.permissions import RolePermission, role_perm_class
from backend.apps.application.serializers import ConditionCompareSLZ, ConditionTagSLZ
from backend.apps.group import tasks # noqa
from backend.apps.group.models import Group
from backend.apps.policy.serializers import PolicyDeleteSLZ, PolicySLZ, PolicySystemSLZ
from backend.apps.template.models import PermTemplatePolicyAuthorized
from backend.audit.audit import audit_context_setter, view_audit_decorator
from backend.biz.group import GroupBiz, GroupCheckBiz, GroupMemberExpiredAtBean
from backend.biz.policy import PolicyBean, PolicyOperationBiz, PolicyQueryBiz
from backend.biz.policy_tag import ConditionTagBean, ConditionTagBiz
from backend.biz.role import RoleBiz, RoleListQuery, RoleObjectRelationChecker
from backend.biz.template import TemplateBiz
from backend.common.error_codes import error_codes
from backend.common.filters import NoCheckModelFilterBackend
from backend.common.serializers import SystemQuerySLZ
from backend.common.time import PERMANENT_SECONDS
from backend.service.constants import PermissionCodeEnum, RoleType, SubjectType
from backend.service.models import Subject
from backend.trans.group import GroupTrans
from .audit import (
GroupCreateAuditProvider,
GroupDeleteAuditProvider,
GroupMemberCreateAuditProvider,
GroupMemberDeleteAuditProvider,
GroupMemberRenewAuditProvider,
GroupPolicyDeleteAuditProvider,
GroupPolicyUpdateAuditProvider,
GroupTemplateCreateAuditProvider,
GroupTransferAuditProvider,
GroupUpdateAuditProvider,
)
from .constants import OperateEnum
from .filters import GroupFilter, GroupTemplateSystemFilter
from .serializers import (
GroupAddMemberSLZ,
GroupAuthoriedConditionSLZ,
GroupAuthorizationSLZ,
GroupCreateSLZ,
GroupDeleteMemberSLZ,
GroupIdSLZ,
GroupMemberUpdateExpiredAtSLZ,
GroupPolicyUpdateSLZ,
GroupSLZ,
GroupTemplateDetailSchemaSLZ,
GroupTemplateDetailSLZ,
GroupTemplateSchemaSLZ,
GroupTemplateSLZ,
GroupTransferSLZ,
GroupUpdateSLZ,
MemberSLZ,
SearchMemberSLZ,
)
permission_logger = logging.getLogger("permission")
def check_readonly_group(operation):
"""用户组可读检测"""
def decorate(func):
@wraps(func)
def wrapper(view, request, *args, **kwargs):
group = view.get_object()
readonly = group.readonly
if readonly:
raise error_codes.FORBIDDEN.format(
message=_("只读用户组({})无法进行({})操作!").format(group.id, operation), replace=True
)
response = func(view, request, *args, **kwargs)
return response
return wrapper
return decorate
class GroupQueryMixin:
def get_queryset(self):
request = self.request
return RoleListQuery(request.role, request.user).query_group()
class GroupPermissionMixin:
def check_object_permissions(self, request, obj):
if not RoleObjectRelationChecker(request.role).check_group(obj):
self.permission_denied(request, message=f"{request.role.type} role can not access group {obj.id}")
class GroupViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin, GenericViewSet):
permission_classes = [RolePermission]
action_permission = {
"create": PermissionCodeEnum.MANAGE_GROUP.value,
"update": PermissionCodeEnum.MANAGE_GROUP.value,
"destroy": PermissionCodeEnum.MANAGE_GROUP.value,
}
queryset = Group.objects.all()
serializer_class = GroupSLZ
filterset_class = GroupFilter
lookup_field = "id"
group_biz = GroupBiz()
group_check_biz = GroupCheckBiz()
role_biz = RoleBiz()
group_trans = GroupTrans()
@swagger_auto_schema(
operation_description="创建用户组",
request_body=GroupCreateSLZ(label="用户组"),
responses={status.HTTP_201_CREATED: GroupIdSLZ(label="用户组ID")},
tags=["group"],
)
@view_audit_decorator(GroupCreateAuditProvider)
def create(self, request, *args, **kwargs):
"""
创建用户组
"""
serializer = GroupCreateSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
user_id = request.user.username
data = serializer.validated_data
# 用户组名称在角色内唯一
self.group_check_biz.check_role_group_name_unique(request.role.id, data["name"])
# 用户组数量在角色内是否超限
number_of_new_group = 1 # 接口只支持创建一个用户组,不支持批量,所以新增用户组数量为1
self.group_check_biz.check_role_group_limit(request.role, number_of_new_group)
# 检测成员是否满足管理的授权范围
members = parse_obj_as(List[Subject], data["members"])
self.group_check_biz.check_role_subject_scope(request.role, members)
group = self.group_biz.create_and_add_members(
request.role.id, data["name"], data["description"], user_id, members, data["expired_at"]
)
# 使用长时任务触发多个模板同时授权
if data["templates"]:
templates = self.group_trans.from_group_grant_data(data["templates"])
self.group_biz.grant(request.role, group, templates)
# 写入审计上下文
audit_context_setter(group=group)
return Response({"id": group.id}, status=status.HTTP_201_CREATED)
def get_queryset(self):
request = self.request
role = request.role
username = request.user.username
filter_role_id = request.query_params.get("role_id")
# 如果当前角色是staff 并且 存在筛选的role_id
if role.type == RoleType.STAFF.value and filter_role_id:
# 检查用户是否在角色的授权范围内
filter_role = self.role_biz.get_role_scope_include_user(filter_role_id, username)
if not filter_role:
return Group.objects.none()
# 返回角色的用户组列表
return RoleListQuery(filter_role, request.user).query_group()
return RoleListQuery(role, request.user).query_group()
@swagger_auto_schema(
operation_description="用户组列表",
responses={status.HTTP_200_OK: GroupSLZ(label="用户组", many=True)},
tags=["group"],
)
def list(self, request, *args, **kwargs):
return super().list(request, *args, **kwargs)
@swagger_auto_schema(
operation_description="用户组详情",
responses={status.HTTP_200_OK: GroupSLZ(label="用户组")},
tags=["group"],
)
def retrieve(self, request, *args, **kwargs):
return super().retrieve(request, *args, **kwargs)
@swagger_auto_schema(
operation_description="修改用户组",
request_body=GroupUpdateSLZ(label="用户组"),
responses={status.HTTP_200_OK: GroupUpdateSLZ(label="用户组")},
tags=["group"],
)
@view_audit_decorator(GroupUpdateAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_UPDATE.label)
def update(self, request, *args, **kwargs):
group = self.get_object()
serializer = GroupUpdateSLZ(group, data=request.data)
serializer.is_valid(raise_exception=True)
user_id = request.user.username
data = serializer.validated_data
# 用户组名称在角色内唯一
self.group_check_biz.check_role_group_name_unique(request.role.id, data["name"], group.id)
group = self.group_biz.update(group, data["name"], data["description"], user_id)
# 写入审计上下文
audit_context_setter(group=group)
return Response(serializer.data)
@swagger_auto_schema(
operation_description="删除用户组",
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupDeleteAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_DELETE.label)
def destroy(self, request, *args, **kwargs):
group = self.get_object()
self.group_biz.delete(group.id)
# 写入审计上下文
audit_context_setter(group=group)
return Response({})
class GroupMemberViewSet(GroupPermissionMixin, GenericViewSet):
permission_classes = [RolePermission]
action_permission = {
"list": PermissionCodeEnum.MANAGE_GROUP.value,
"create": PermissionCodeEnum.MANAGE_GROUP.value,
"destroy": PermissionCodeEnum.MANAGE_GROUP.value,
}
queryset = Group.objects.all()
lookup_field = "id"
biz = GroupBiz()
group_check_biz = GroupCheckBiz()
@swagger_auto_schema(
operation_description="用户组成员列表",
query_serializer=SearchMemberSLZ(label="keyword"),
responses={status.HTTP_200_OK: MemberSLZ(label="成员")},
tags=["group"],
)
def list(self, request, *args, **kwargs):
group = get_object_or_404(self.queryset, pk=kwargs["id"])
# 校验权限
checker = RoleObjectRelationChecker(request.role)
if not checker.check_group(group):
raise error_codes.FORBIDDEN.format(message=_("用户组({})不在当前用户身份可访问的范围内").format(group.id), replace=True)
if request.query_params.get("keyword"):
slz = SearchMemberSLZ(data=request.query_params)
slz.is_valid(raise_exception=True)
keyword = slz.validated_data["keyword"].lower()
group_members = self.biz.search_member_by_keyword(group.id, keyword)
return Response({"results": [one.dict() for one in group_members]})
pagination = LimitOffsetPagination()
limit = pagination.get_limit(request)
offset = pagination.get_offset(request)
count, group_members = self.biz.list_paging_group_member(group.id, limit, offset)
return Response({"count": count, "results": [one.dict() for one in group_members]})
@swagger_auto_schema(
operation_description="用户组添加成员",
request_body=GroupAddMemberSLZ(label="成员"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupMemberCreateAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_MEMBER_CREATE.label)
def create(self, request, *args, **kwargs):
serializer = GroupAddMemberSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
group = self.get_object()
data = serializer.validated_data
members_data = data["members"]
expired_at = data["expired_at"]
# 成员Dict结构转换为Subject结构,并去重
members = list(set(parse_obj_as(List[Subject], members_data)))
# 检测成员是否满足管理的授权范围
self.group_check_biz.check_role_subject_scope(request.role, members)
self.group_check_biz.check_member_count(group.id, len(members))
permission_logger.info("group %s add members %s by user %s", group.id, members, request.user.username)
# 添加成员
self.biz.add_members(group.id, members, expired_at)
# 写入审计上下文
audit_context_setter(group=group, members=[m.dict() for m in members])
return Response({}, status=status.HTTP_201_CREATED)
@swagger_auto_schema(
operation_description="用户组删除成员",
request_body=GroupDeleteMemberSLZ(label="成员"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupMemberDeleteAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_MEMBER_DELETE.label)
def destroy(self, request, *args, **kwargs):
serializer = GroupDeleteMemberSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
group = self.get_object()
data = serializer.validated_data
permission_logger.info(
"group %s delete members %s by user %s", group.id, data["members"], request.user.username
)
self.biz.remove_members(str(group.id), parse_obj_as(List[Subject], data["members"]))
# 写入审计上下文
audit_context_setter(group=group, members=data["members"])
return Response({})
class GroupMemberUpdateExpiredAtViewSet(GroupPermissionMixin, GenericViewSet):
permission_classes = [role_perm_class(PermissionCodeEnum.MANAGE_GROUP.value)]
queryset = Group.objects.all()
lookup_field = "id"
# service
group_biz = GroupBiz()
@swagger_auto_schema(
operation_description="用户组成员续期",
request_body=GroupMemberUpdateExpiredAtSLZ(label="成员"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupMemberRenewAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_MEMBER_RENEW.label)
def create(self, request, *args, **kwargs):
serializer = GroupMemberUpdateExpiredAtSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
group = self.get_object()
data = serializer.validated_data
permission_logger.info(
"group %s update members %s expired_at by user %s", group.id, data["members"], request.user.username
)
for m in data["members"]:
m["policy_expired_at"] = m.pop("expired_at")
self.group_biz.update_members_expired_at(
group.id, parse_obj_as(List[GroupMemberExpiredAtBean], data["members"])
)
# 写入审计上下文
audit_context_setter(group=group, members=data["members"])
return Response({})
class GroupTemplateViewSet(GroupPermissionMixin, GenericViewSet):
permission_classes = [RolePermission]
action_permission = {"create": PermissionCodeEnum.MANAGE_GROUP.value}
pagination_class = None # 去掉swagger中的limit offset参数
queryset = Group.objects.all()
filterset_class = GroupTemplateSystemFilter
filter_backends = [NoCheckModelFilterBackend]
lookup_field = "id"
template_biz = TemplateBiz()
@swagger_auto_schema(
operation_description="用户组拥有的权限模板列表",
responses={status.HTTP_200_OK: GroupTemplateSchemaSLZ(label="权限模板", many=True)},
tags=["group"],
)
def list(self, request, *args, **kwargs):
group = get_object_or_404(self.queryset, pk=kwargs["id"])
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
queryset = PermTemplatePolicyAuthorized.objects.filter_by_subject(subject).defer("_data")
queryset = self.filter_queryset(queryset)
return Response(GroupTemplateSLZ(queryset, many=True).data)
@swagger_auto_schema(
operation_description="用户组权限模板授权信息",
responses={status.HTTP_200_OK: GroupTemplateDetailSchemaSLZ(label="授权信息")},
tags=["group"],
)
def retrieve(self, request, *args, **kwargs):
group = get_object_or_404(self.queryset, pk=kwargs["id"])
template_id = kwargs["template_id"]
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
authorized_template = PermTemplatePolicyAuthorized.objects.get_by_subject_template(subject, int(template_id))
return Response(GroupTemplateDetailSLZ(authorized_template).data)
class GroupPolicyViewSet(GroupPermissionMixin, GenericViewSet):
permission_classes = [RolePermission]
action_permission = {
"create": PermissionCodeEnum.MANAGE_GROUP.value,
"destroy": PermissionCodeEnum.MANAGE_GROUP.value,
"update": PermissionCodeEnum.MANAGE_GROUP.value,
}
pagination_class = None # 去掉swagger中的limit offset参数
queryset = Group.objects.all()
lookup_field = "id"
policy_query_biz = PolicyQueryBiz()
policy_operation_biz = PolicyOperationBiz()
group_biz = GroupBiz()
group_trans = GroupTrans()
@swagger_auto_schema(
operation_description="用户组添加权限",
request_body=GroupAuthorizationSLZ(label="授权信息"),
responses={status.HTTP_201_CREATED: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupTemplateCreateAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_POLICY_CREATE.label)
def create(self, request, *args, **kwargs):
serializer = GroupAuthorizationSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
group = self.get_object()
data = serializer.validated_data
templates = self.group_trans.from_group_grant_data(data["templates"])
self.group_biz.grant(request.role, group, templates)
# 写入审计上下文
audit_context_setter(
group=group,
templates=[{"system_id": t["system_id"], "template_id": t["template_id"]} for t in data["templates"]],
)
return Response({}, status=status.HTTP_201_CREATED)
@swagger_auto_schema(
operation_description="用户组自定义权限列表",
query_serializer=SystemQuerySLZ,
responses={status.HTTP_200_OK: PolicySLZ(label="策略", many=True)},
tags=["group"],
)
def list(self, request, *args, **kwargs):
slz = SystemQuerySLZ(data=request.query_params)
slz.is_valid(raise_exception=True)
system_id = slz.validated_data["system_id"]
group = get_object_or_404(self.queryset, pk=kwargs["id"])
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
policies = self.policy_query_biz.list_by_subject(system_id, subject)
# ResourceNameAutoUpdate
updated_policies = self.policy_operation_biz.update_due_to_renamed_resource(system_id, subject, policies)
return Response([p.dict() for p in updated_policies])
@swagger_auto_schema(
operation_description="用户组删除自定义权限",
request_body=PolicyDeleteSLZ(label="ids"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupPolicyDeleteAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_POLICY_DELETE.label)
def destroy(self, request, *args, **kwargs):
slz = PolicyDeleteSLZ(data=request.data)
slz.is_valid(raise_exception=True)
system_id = slz.validated_data["system_id"]
ids = slz.validated_data["ids"]
group = self.get_object()
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
permission_logger.info(
"subject type=%s, id=%s policy deleted by user %s", subject.type, subject.id, request.user.username
)
policy_list = self.policy_query_biz.query_policy_list_by_policy_ids(system_id, subject, ids)
# 删除权限
self.policy_operation_biz.delete_by_ids(system_id, subject, ids)
# 写入审计上下文
audit_context_setter(group=group, system_id=system_id, policies=policy_list.policies)
return Response()
@swagger_auto_schema(
operation_description="用户组权限修改",
request_body=GroupPolicyUpdateSLZ(label="修改策略"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupPolicyUpdateAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_POLICY_UPDATE.label)
def update(self, request, *args, **kwargs):
group = self.get_object()
slz = GroupPolicyUpdateSLZ(data=request.data)
slz.is_valid(raise_exception=True)
data = slz.validated_data
system_id = data["system_id"]
template_id = data["template_id"]
policies = [PolicyBean(expired_at=PERMANENT_SECONDS, **action) for action in data["actions"]]
self.group_biz.update_policies(request.role, group.id, system_id, template_id, policies)
# 写入审计上下文
audit_context_setter(group=group, system_id=system_id, template_id=template_id, policies=policies)
return Response({})
class GroupSystemViewSet(GenericViewSet):
pagination_class = None # 去掉swagger中的limit offset参数
queryset = Group.objects.all()
lookup_field = "id"
biz = GroupBiz()
@swagger_auto_schema(
operation_description="用户组有权限的所有系统列表",
responses={status.HTTP_200_OK: PolicySystemSLZ(label="系统", many=True)},
tags=["group"],
)
def list(self, request, *args, **kwargs):
group = self.get_object()
data = self.biz.list_system_counter(group.id)
return Response([one.dict() for one in data])
class GroupTransferView(views.APIView):
"""
用户组转出
"""
permission_classes = [role_perm_class(PermissionCodeEnum.TRANSFER_GROUP.value)]
role_biz = RoleBiz()
@swagger_auto_schema(
operation_description="用户组批量转出",
request_body=GroupTransferSLZ(label="用户转移"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupTransferAuditProvider)
def post(self, request, *args, **kwargs):
slz = GroupTransferSLZ(data=request.data, context={"role": request.role})
slz.is_valid(raise_exception=True)
group_ids = slz.validated_data["group_ids"]
role_id = slz.validated_data["role_id"]
self.role_biz.transfer_groups_role(group_ids, role_id)
audit_context_setter(group_ids=group_ids, role_id=role_id)
return Response({})
class GroupTemplateConditionCompareView(GroupPermissionMixin, GenericViewSet):
condition_biz = ConditionTagBiz()
template_biz = TemplateBiz()
queryset = Group.objects.all()
lookup_field = "id"
@swagger_auto_schema(
operation_description="权限模板操作条件对比",
request_body=GroupAuthoriedConditionSLZ(label="操作条件"),
responses={status.HTTP_200_OK: ConditionTagSLZ(label="条件差异", many=True)},
tags=["group"],
)
def create(self, request, *args, **kwargs):
serializer = GroupAuthoriedConditionSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
group = self.get_object()
action_id = data["action_id"]
resource_group_id = data["resource_group_id"]
related_resource_type = data["related_resource_type"]
new_condition = parse_obj_as(List[ConditionTagBean], related_resource_type["condition"])
# 从模板数据中查找匹配的操作, 资源类型的条件
template_id = kwargs["template_id"]
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
authorized_template = PermTemplatePolicyAuthorized.objects.get_by_subject_template(subject, int(template_id))
for action in authorized_template.data["actions"]:
policy = PolicyBean.parse_obj(action)
# 查询对应的操作
if policy.action_id == action_id:
# 操作操作中对应于资源类型的操作
related_resource_type = policy.get_related_resource_type(
resource_group_id, related_resource_type["system_id"], related_resource_type["type"]
)
old_condition = related_resource_type.condition if related_resource_type else []
# 对比用户组已有的条件与用户提交的条件
conditions = self.condition_biz.compare_and_tag(
new_condition, parse_obj_as(List[ConditionTagBean], old_condition), is_template=True
)
return Response([c.dict() for c in conditions])
raise error_codes.VALIDATE_ERROR.format(_("模板: {} 没有操作: {} 的权限").format(template_id, action_id))
class GroupCustomPolicyConditionCompareView(GroupPermissionMixin, GenericViewSet):
policy_biz = PolicyQueryBiz()
condition_biz = ConditionTagBiz()
queryset = Group.objects.all()
lookup_field = "id"
@swagger_auto_schema(
operation_description="条件差异对比",
request_body=ConditionCompareSLZ(label="资源条件"),
responses={status.HTTP_200_OK: ConditionTagSLZ(label="条件差异", many=True)},
tags=["group"],
)
def create(self, request, *args, **kwargs):
serializer = ConditionCompareSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
group = self.get_object()
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
# 1. 查询policy的condition
related_resource_type = data["related_resource_type"]
old_condition = self.policy_biz.get_policy_resource_type_conditions(
subject,
data["policy_id"],
data["resource_group_id"],
related_resource_type["system_id"],
related_resource_type["type"],
)
# 2. 对比合并差异
conditions = self.condition_biz.compare_and_tag(
parse_obj_as(List[ConditionTagBean], related_resource_type["condition"]),
parse_obj_as(List[ConditionTagBean], old_condition),
is_template=True,
)
return Response([c.dict() for c in conditions])
| 1.351563 | 1 |
towers.py | fillest/7drl2013 | 1 | 5429 | <filename>towers.py<gh_stars>1-10
import util
import libtcodpy as tcod
import enemies
import operator
class Missile (util.Entity):
sym = '*'
color = tcod.white
class BasicMissile (Missile):
color = tcod.yellow
class IceMissile (Missile):
color = tcod.light_blue
class AoeMissile (Missile):
color = tcod.red
class Building (util.Entity):
sym = '@'
max_hp = 1
cost = 0
def __init__ (self, *args):
super(Building, self).__init__(*args)
self.hp = self.max_hp
def hurt (self, hp):
self.hp -= hp
if self.hp < 1:
self.die()
def hit (self, e):
if e in self.state.entities:
e.hurt(self.damage)
def die (self):
if self in self.state.entities:
self.delete()
def put (self):
assert self.state.energy > 0
self.state.entities.append(self)
self.state.energy -= self.cost
return self
def delete (self):
self.state.entities.remove(self)
self.state.energy += self.cost
return self
class Heart (Building):
sym = '&'
color = tcod.darker_red
max_hp = 20
def delete (self):
self.state.is_paused = True
return super(Heart, self).delete()
class Bait (Building):
sym = Heart.sym
color = tcod.pink
max_hp = 10
class Tower (Building):
radius = 15
max_hp = 10
damage = 1
missile = None
def __init__ (self, *args):
super(Tower, self).__init__(*args)
self.cooldown = False
def update (self):
if not self.cooldown:
# dist_min = None
# target = None
# for e in self.state.entities.enemies():
# d = util.dist(self.x, self.y, e.x, e.y)
# if d < (self.radius + 1) and ((dist_min is None) or (d < dist_min)):
# dist_min = d
# target = e
preferred_targets = []
other_targets = []
for e in self.state.entities.enemies():
d = util.dist(self.x, self.y, e.x, e.y)
if d < (self.radius + 1):
if e in self.state.targets_towers:
total_damage = sum([t.damage for t in self.state.targets_towers[e]])
if total_damage < e.hp:
preferred_targets.append((d, e))
else:
other_targets.append((d, e))
else:
preferred_targets.append((d, e))
target = None
if preferred_targets:
_d, target = sorted(preferred_targets, key = operator.itemgetter(0))[0]
elif other_targets:
_d, target = sorted(other_targets, key = operator.itemgetter(0))[0]
if target:
self.state.targets_towers[target].append(self)
self._shoot(target)
def render (self):
super(Tower, self).render()
if self.mouse_over:
# if True:
for x in range(self.x - (self.radius + 1), self.x + (self.radius + 1)):
for y in range(self.y - (self.radius + 1), self.y + (self.radius + 1)):
if util.dist(self.x, self.y, x, y) < (self.radius + 1):
tcod.console_set_char_background(0, x, y, tcod.Color(*[15]*3), flag = tcod.BKGND_SET)
def _shoot (self, e):
self.cooldown = True
def clear_cd ():
self.cooldown = False
self.state.timers.start_run_once(1000, clear_cd)
m = self.missile(self.state, self.x, self.y)
self.state.entities.append(m)
missile_speed = 20
self.state.timers.start(missile_speed, self.update_missile, [m, e])
def update_missile (self, m, e):
tcod.line_init(m.x, m.y, e.x, e.y)
x, y = tcod.line_step()
if x is None:
self.state.entities.remove(m)
self.hit(e)
return util.STOP
else:
m.x = x
m.y = y
class BasicTower (Tower):
color = tcod.dark_green
missile = BasicMissile
cost = 1
class ResearchBuilding (Building):
color = tcod.dark_sepia
cost = 1
def __init__ (self, *args):
super(ResearchBuilding, self).__init__(*args)
self.timer = self.state.timers.start(1000, self._research)
def _research (self):
pass
class AoeExplosion (util.Entity):
sym = '*'
color = tcod.dark_red
def __init__ (self, radius, *args):
super(AoeExplosion, self).__init__(*args)
self.radius = radius
def render (self):
for x in range(self.x - self.radius, self.x + self.radius):
for y in range(self.y - self.radius, self.y + self.radius):
tcod.console_put_char(0, x, y, self.sym, tcod.BKGND_NONE)
tcod.console_set_char_foreground(0, x, y, self.color)
class AoeTower (Tower):
color = tcod.dark_orange
missile = AoeMissile
cost = 2
def hit (self, target):
radius = 2
for x in range(target.x - radius, target.x + radius):
for y in range(target.y - radius, target.y + radius):
for e in self.state.entities.enemies():
if (e.x, e.y) == (x, y):
if e in self.state.entities: #TODO copypaste
e.hurt(self.damage)
e = AoeExplosion(radius, self.state, target.x, target.y)
self.state.entities.append(e)
self.state.timers.start_run_once(70, lambda: self.state.entities.remove(e))
class IceTower (Tower):
damage = 0.2
color = tcod.dark_blue
missile = IceMissile
cost = 1
def hit (self, target):
target.hurt(self.damage)
if not getattr(target, 'is_debuffed', False):
old_speed = target.timer.interval
target.timer.interval *= 3
target.timer.time_buf *= 3
target.is_debuffed = True
def rollback ():
target.timer.interval = old_speed
target.timer.time_buf /= 3
target.is_debuffed = False
self.rollback_timer = self.state.timers.start_run_once(1000, rollback)
elif getattr(self, 'rollback_timer', False):
self.rollback_timer.reset()
| 2.890625 | 3 |
python/mandelbrot.py | lukasjoc/random | 1 | 5430 | #!/usr/bin/python3
from PIL import Image
from numpy import complex, array
from tqdm import tqdm
import colorsys
W=512
#W=142
def mandelbrot(x, y):
def get_colors(i):
color = 255 * array(colorsys.hsv_to_rgb(i / 255.0, 1.0, 0.5))
return tuple(color.astype(int))
c, cc = 0, complex(x, y)
for i in range(1, 1000):
if abs(c) > 2:
return get_colors(i)
c = c * c + cc
return 0,0,0
if __name__ == "__main__":
img = Image.new("RGB", (W, int(W / 2)))
pixels = img.load()
for x in tqdm(range(img.size[0])):
for y in tqdm(range(img.size[1])):
xx = (x - (0.75 * W)) / (W / 4)
yy = (y - (W / 4)) / (W / 4)
pixels[x, y] = mandelbrot(xx, yy)
img.show()
img.save("mandelbrot.jpg")
| 2.953125 | 3 |
tests/unit/commands/test_deploy.py | tonyreina/mlt | 1 | 5431 | <reponame>tonyreina/mlt
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: EPL-2.0
#
from __future__ import print_function
import uuid
import pytest
from mock import call, MagicMock
from mlt.commands.deploy import DeployCommand
from test_utils.io import catch_stdout
@pytest.fixture
def sleep(patch):
return patch('time.sleep')
@pytest.fixture
def fetch_action_arg(patch):
return patch('files.fetch_action_arg', MagicMock(return_value='output'))
@pytest.fixture
def kube_helpers(patch):
return patch('kubernetes_helpers')
@pytest.fixture
def json_mock(patch):
return patch('json')
@pytest.fixture
def open_mock(patch):
return patch('open')
@pytest.fixture
def popen_mock(patch):
popen_mock = MagicMock()
popen_mock.return_value.poll.return_value = 0
return patch('Popen', popen_mock)
@pytest.fixture
def process_helpers(patch):
return patch('process_helpers')
@pytest.fixture
def progress_bar(patch):
progress_mock = MagicMock()
progress_mock.duration_progress.side_effect = lambda x, y, z: print(
'Pushing ')
return patch('progress_bar', progress_mock)
@pytest.fixture
def template(patch):
return patch('Template')
@pytest.fixture
def verify_build(patch):
return patch('build_helpers.verify_build')
@pytest.fixture
def verify_init(patch):
return patch('config_helpers.load_config')
@pytest.fixture
def walk_mock(patch):
return patch('os.walk', MagicMock(return_value=['foo', 'bar']))
@pytest.fixture
def yaml(patch):
return patch('yaml.load')
def deploy(no_push, skip_crd_check, interactive, extra_config_args, retries=5):
deploy = DeployCommand(
{'deploy': True, '--no-push': no_push,
'--skip-crd-check': skip_crd_check,
'--interactive': interactive, '--retries': retries,
'--logs':False})
deploy.config = {'name': 'app', 'namespace': 'namespace'}
deploy.config.update(extra_config_args)
with catch_stdout() as caught_output:
deploy.action()
output = caught_output.getvalue()
return output
def verify_successful_deploy(output, did_push=True, interactive=False):
"""assert pushing, deploying, then objs created, then pushed"""
pushing = output.find('Pushing ')
push_skip = output.find('Skipping image push')
deploying = output.find('Deploying ')
inspecting = output.find('Inspect created objects by running:\n')
pushed = output.find('Pushed to ')
pod_connect = output.find('Connecting to pod...')
if did_push:
assert all(var >= 0 for var in (
deploying, inspecting, pushing, pushed))
assert deploying < inspecting, pushing < pushed
else:
assert all(var == -1 for var in (pushing, pushed))
assert all(var >= 0 for var in (deploying, inspecting, push_skip))
assert push_skip < deploying, deploying < inspecting
if interactive:
assert pod_connect > inspecting
def test_deploy_gce(walk_mock, progress_bar, popen_mock, open_mock,
template, kube_helpers, process_helpers, verify_build,
verify_init, fetch_action_arg, json_mock):
json_mock.load.return_value = {
'last_remote_container': 'gcr.io/app_name:container_id',
'last_push_duration': 0.18889}
output = deploy(
no_push=False, skip_crd_check=True,
interactive=False,
extra_config_args={'gceProject': 'gcr://projectfoo'})
verify_successful_deploy(output)
def test_deploy_docker(walk_mock, progress_bar, popen_mock, open_mock,
template, kube_helpers, process_helpers, verify_build,
verify_init, fetch_action_arg, json_mock):
json_mock.load.return_value = {
'last_remote_container': 'gcr.io/app_name:container_id',
'last_push_duration': 0.18889}
output = deploy(
no_push=False, skip_crd_check=True,
interactive=False,
extra_config_args={'registry': 'dockerhub'})
verify_successful_deploy(output)
def test_deploy_without_push(walk_mock, progress_bar, popen_mock, open_mock,
template, kube_helpers, process_helpers,
verify_build, verify_init, fetch_action_arg, json_mock):
json_mock.load.return_value = {
'last_remote_container': 'gcr.io/app_name:container_id',
'last_push_duration': 0.18889}
output = deploy(
no_push=True, skip_crd_check=True,
interactive=False,
extra_config_args={'gceProject': 'gcr://projectfoo'})
verify_successful_deploy(output, did_push=False)
def test_deploy_interactive_one_file(walk_mock, progress_bar, popen_mock,
open_mock, template, kube_helpers,
process_helpers, verify_build,
verify_init, fetch_action_arg, sleep,
yaml, json_mock):
walk_mock.return_value = ['foo']
yaml.return_value = {
'template': {'foo': 'bar'}, 'containers': [{'foo': 'bar'}]}
json_mock.loads.return_value = {'status': {'phase': 'Running'}}
output = deploy(
no_push=False, skip_crd_check=True,
interactive=True,
extra_config_args={'registry': 'dockerhub'})
verify_successful_deploy(output, interactive=True)
# verify that kubectl commands are specifying namespace
for call_args in process_helpers.run_popen.call_args_list:
assert isinstance(call_args, type(call))
assert isinstance(call_args[0], tuple)
assert len(call_args[0]) > 0
command = call_args[0][0]
if command[0] == "kubectl":
assert "--namespace" in command
def test_deploy_interactive_two_files(walk_mock, progress_bar, popen_mock,
open_mock, template, kube_helpers,
process_helpers, verify_build,
verify_init, fetch_action_arg, sleep,
yaml, json_mock):
json_mock.loads.return_value = {'status': {'phase': 'Running'}}
yaml.return_value = {
'template': {'foo': 'bar'}, 'containers': [{'foo': 'bar'}]}
output = deploy(
no_push=False, skip_crd_check=True,
interactive=True,
extra_config_args={'registry': 'dockerhub', '<kube_spec>': 'r'})
verify_successful_deploy(output, interactive=True)
def test_deploy_interactive_pod_not_run(walk_mock, progress_bar, popen_mock,
open_mock, template, kube_helpers,
process_helpers, verify_build,
verify_init, fetch_action_arg, sleep,
yaml, json_mock):
json_mock.loads.return_value = {'status': {'phase': 'Error'}}
yaml.return_value = {
'template': {'foo': 'bar'}, 'containers': [{'foo': 'bar'}]}
with pytest.raises(ValueError):
output = deploy(
no_push=False, skip_crd_check=True,
interactive=True,
extra_config_args={'registry': 'dockerhub', '<kube_spec>': 'r'})
def test_deploy_update_app_run_id(open_mock, json_mock):
run_id = str(uuid.uuid4())
json_mock_data = {
'last_remote_container': 'gcr.io/app_name:container_id',
'last_push_duration': 0.18889}
json_mock.load.return_value = json_mock_data
DeployCommand._update_app_run_id(run_id)
assert json_mock_data['app_run_id'] == run_id
def test_image_push_error(walk_mock, progress_bar, popen_mock, open_mock,
template, kube_helpers, process_helpers, verify_build,
verify_init, fetch_action_arg, json_mock):
json_mock.load.return_value = {
'last_remote_container': 'gcr.io/app_name:container_id',
'last_push_duration': 0.18889}
# setup mock to induce and error during the deploy
popen_mock.return_value.poll.return_value = 1
output_str = "normal output..."
error_str = "error message..."
build_output = MagicMock()
build_output.decode.return_value = output_str
error_output = MagicMock()
error_output.decode.return_value = error_str
popen_mock.return_value.communicate.return_value = (build_output,
error_output)
deploy_cmd = DeployCommand({'deploy': True,
'--skip-crd-check': True,
'--no-push': False})
deploy_cmd.config = {'name': 'app', 'namespace': 'namespace'}
deploy_cmd.config.update({'gceProject': 'gcr://projectfoo'})
with catch_stdout() as caught_output:
with pytest.raises(SystemExit):
deploy_cmd.action()
output = caught_output.getvalue()
# assert that we got the normal output, followed by the error message
output_location = output.find(output_str)
error_location = output.find(error_str)
assert all(var >= 0 for var in (output_location, error_location))
assert output_location < error_location
| 1.875 | 2 |
packages/mccomponents/tests/mccomponentsbpmodule/sample/Broadened_E_Q_Kernel_TestCase.py | mcvine/mcvine | 5 | 5432 | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# <NAME>
# California Institute of Technology
# (C) 2006-2010 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
standalone = True
import unittestX as unittest
import journal
debug = journal.debug( "Broadened_E_Q_Kernel_TestCase" )
warning = journal.warning( "Broadened_E_Q_Kernel_TestCase" )
import mcni
from mccomposite import mccompositebp
from mccomponents import mccomponentsbp
class TestCase(unittest.TestCase):
def test(self):
E_Q = "Q*Q/3."
S_Q = "1"
sigma_Q = "Q/2."
Qmin = 0; Qmax = 10
absorption_coefficient = scattering_coefficient = 1.
kernel = mccomponentsbp.create_Broadened_E_Q_Kernel(
E_Q, S_Q, sigma_Q,
Qmin, Qmax,
absorption_coefficient,
scattering_coefficient,
)
ei = 500 # meV
from mcni.utils import conversion
vil = conversion.e2v(ei)
vi = (0,0,vil)
import numpy.linalg as nl
import numpy as np
for i in range(10):
event = mcni.neutron(
r = (0,0,0), v = vi,
prob = 1, time = 0 )
kernel.scatter( event );
vf = np.array(event.state.velocity)
diffv = vi - vf
Q = conversion.v2k(nl.norm(diffv))
ef = conversion.v2e(nl.norm(vf))
E = ei - ef
# print E, Q, event
E1 = eval(E_Q)
continue
return
pass # end of TestCase
def main():
unittest.main()
return
if __name__ == "__main__":
main()
# version
__id__ = "$Id: TestCase.py 696 2010-11-09 06:23:06Z linjiao $"
# End of file
| 1.976563 | 2 |
baseline/ns-vqa/reason/options/test_options.py | robinzixuan/Video-Question-Answering-HRI | 52 | 5433 | from .base_options import BaseOptions
class TestOptions(BaseOptions):
"""Test Option Class"""
def __init__(self):
super(TestOptions, self).__init__()
self.parser.add_argument('--load_checkpoint_path', required=True, type=str, help='checkpoint path')
self.parser.add_argument('--save_result_path', required=True, type=str, help='save result path')
self.parser.add_argument('--max_val_samples', default=None, type=int, help='max val data')
self.parser.add_argument('--batch_size', default=256, type=int, help='batch_size')
self.is_train = False | 2.390625 | 2 |
Model_setup/NEISO_data_file/downsampling_generators_v1.py | keremakdemir/ISONE_UCED | 0 | 5434 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 24 18:45:34 2020
@author: kakdemi
"""
import pandas as pd
#importing generators
all_generators = pd.read_excel('generators2.xlsx', sheet_name='NEISO generators (dispatch)')
#getting all oil generators
all_oil = all_generators[all_generators['typ']=='oil'].copy()
#getting all generators in every zone
CT_oil = all_oil[all_oil['zone']=='CT'].copy()
ME_oil = all_oil[all_oil['zone']=='ME'].copy()
NEMA_oil = all_oil[all_oil['zone']=='NEMA'].copy()
NH_oil = all_oil[all_oil['zone']=='NH'].copy()
RI_oil = all_oil[all_oil['zone']=='RI'].copy()
SEMA_oil = all_oil[all_oil['zone']=='SEMA'].copy()
VT_oil = all_oil[all_oil['zone']=='VT'].copy()
WCMA_oil = all_oil[all_oil['zone']=='WCMA'].copy()
#defining zones
zones = ['CT','ME','NEMA','NH','RI','SEMA','VT','WCMA']
#getting all slack generators
all_slack = all_generators[all_generators['typ']=='slack'].copy()
#getting generators other than slack and oil
all_other = all_generators[(all_generators['typ']!='oil') & (all_generators['typ']!='slack')].copy()
#defining a function to downsample oil generators
def oil_downsampler(zone):
#copying the oil generators in that zone and sorting wrt to their seg1 heat rate
Selected_line_oil = globals()[zone+'_oil'].copy()
sorted_df = Selected_line_oil.sort_values(by=['seg1'])
sorted_df_reset = sorted_df.reset_index(drop=True)
#creating 3 chunks wrt their heatrates
heat_rate = list(sorted_df_reset.loc[:,'seg1'])
num = int(len(heat_rate)/3)
First_plant = sorted_df_reset.iloc[:num,:].copy()
Second_plant = sorted_df_reset.iloc[num:num*2,:].copy()
Third_plant = sorted_df_reset.iloc[num*2:,:].copy()
#finding the relevant parameters for the downsampled oil plants
First_cap = First_plant.loc[:,'netcap'].sum()
Second_cap = Second_plant.loc[:,'netcap'].sum()
Third_cap = Third_plant.loc[:,'netcap'].sum()
netcap = [First_cap, Second_cap, Third_cap]
ramp_1 = First_cap
ramp_2 = Second_cap
ramp_3 = Third_cap
ramp = [ramp_1, ramp_2, ramp_3]
First_min_cap = First_cap*0.35
Second_min_cap = Second_cap*0.35
Third_min_cap = Third_cap*0.35
min_cap = [First_min_cap, Second_min_cap, Third_min_cap]
Min_u = [1, 1, 1]
Min_d = [1, 1, 1]
zones = [zone, zone, zone]
types = ['oil', 'oil', 'oil']
seg_1_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'seg1']
seg_1_1_new = seg_1_1.sum()/First_plant.loc[:,'netcap'].sum()
seg_1_2 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'seg2']
seg_1_2_new = seg_1_2.sum()/First_plant.loc[:,'netcap'].sum()
seg_1_3 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'seg3']
seg_1_3_new = seg_1_3.sum()/First_plant.loc[:,'netcap'].sum()
seg_2_1 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'seg1']
seg_2_1_new = seg_2_1.sum()/Second_plant.loc[:,'netcap'].sum()
seg_2_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'seg2']
seg_2_2_new = seg_2_2.sum()/Second_plant.loc[:,'netcap'].sum()
seg_2_3 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'seg3']
seg_2_3_new = seg_2_3.sum()/Second_plant.loc[:,'netcap'].sum()
seg_3_1 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'seg1']
seg_3_1_new = seg_3_1.sum()/Third_plant.loc[:,'netcap'].sum()
seg_3_2 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'seg2']
seg_3_2_new = seg_3_2.sum()/Third_plant.loc[:,'netcap'].sum()
seg_3_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'seg3']
seg_3_3_new = seg_3_3.sum()/Third_plant.loc[:,'netcap'].sum()
seg_1 = [seg_1_1_new, seg_2_1_new, seg_3_1_new]
seg_2 = [seg_1_2_new, seg_2_2_new, seg_3_2_new]
seg_3 = [seg_1_3_new, seg_2_3_new, seg_3_3_new]
var_om_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'var_om']
var_om_1_new = var_om_1.sum()/First_plant.loc[:,'netcap'].sum()
var_om_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'var_om']
var_om_2_new = var_om_2.sum()/Second_plant.loc[:,'netcap'].sum()
var_om_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'var_om']
var_om_3_new = var_om_3.sum()/Third_plant.loc[:,'netcap'].sum()
var_om = [var_om_1_new, var_om_2_new, var_om_3_new]
no_load_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'no_load']
no_load_1_new = no_load_1.sum()/First_plant.loc[:,'netcap'].sum()
no_load_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'no_load']
no_load_2_new = no_load_2.sum()/Second_plant.loc[:,'netcap'].sum()
no_load_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'no_load']
no_load_3_new = no_load_3.sum()/Third_plant.loc[:,'netcap'].sum()
no_load = [no_load_1_new, no_load_2_new, no_load_3_new]
st_cost_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'st_cost']
st_cost_1_new = st_cost_1.sum()/First_plant.loc[:,'netcap'].sum()
st_cost_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'st_cost']
st_cost_2_new = st_cost_2.sum()/Second_plant.loc[:,'netcap'].sum()
st_cost_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'st_cost']
st_cost_3_new = st_cost_3.sum()/Third_plant.loc[:,'netcap'].sum()
st_cost = [st_cost_1_new, st_cost_2_new, st_cost_3_new]
name = [zone+'_agg_oil_1', zone+'_agg_oil_2', zone+'_agg_oil_3']
#creating a dataframe that includes downsampled oil generators
list_labels = list(WCMA_oil.columns)
list_columns = [name, types, zones, netcap, seg_1, seg_2, seg_3, min_cap, ramp, Min_u,
Min_d, var_om, no_load, st_cost]
zipped_list = list(zip(list_labels, list_columns))
gen_df = dict(zipped_list)
df_oils = pd.DataFrame(gen_df)
return df_oils
#downsampling oil generators in every zone by using the defined function
for z in zones:
globals()[z+'_agg_oil_df'] = oil_downsampler(z)
#adding downsampled oil generators to create a complete list of generators
final_generators = pd.concat([all_other, CT_agg_oil_df, ME_agg_oil_df, NEMA_agg_oil_df,
NH_agg_oil_df, RI_agg_oil_df, SEMA_agg_oil_df, VT_agg_oil_df,
WCMA_agg_oil_df, all_slack], ignore_index=True)
#exporting the generators as an Excel file
final_generators.to_excel('generators.xlsx', sheet_name='NEISO generators (dispatch)', index=False)
| 2.546875 | 3 |
GUI1.py | otmanabdoun/IHM-Python | 3 | 5435 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 16 19:47:41 2021
@author: User
"""
import tkinter as tk
racine = tk . Tk ()
label = tk . Label ( racine , text ="J ' adore Python !")
bouton = tk . Button ( racine , text =" Quitter ", command = racine . destroy )
label . pack ()
bouton . pack () | 2.71875 | 3 |
app/routes/v1/endpoints/clickup.py | ertyurk/bugme | 0 | 5436 | from fastapi import APIRouter, status, Body, HTTPException
from fastapi.encoders import jsonable_encoder
from starlette.responses import JSONResponse
from app.models.common import *
from app.models.clickup import *
from app.database.crud.clickup import *
router = APIRouter()
@router.get("/", response_description="Clickup integrations are retrieved.")
async def get_clickup_integrations():
clickups = await retrieve_clickups()
return (
ResponseModel(clickups, "Clickup integrations data retrieved successfully")
if len(clickups) > 0
else ResponseModel(clickups, "Empty list returned")
)
@router.post(
"/", response_description="Clickup integrations data added into the database."
)
async def add_clickup_a_integration(clickup: ClickupModel = Body(...)):
clickup = jsonable_encoder(clickup)
new_clickup = await add_new_clickup(clickup)
return ResponseModel(
new_clickup,
"clickup integration created successfully.",
status.HTTP_201_CREATED,
)
@router.get("/{id}/", response_description="Clickup data retrieved.")
async def find_clickup_integration(id):
clickup = await retrieve_clickup(id)
return (
ResponseModel(clickup, "Clickup integrations data retrieved successfully")
if clickup
else ErrorResponseModel(
"An error occured.", status.HTTP_404_NOT_FOUND, "Integration doesn't exist."
)
)
@router.put(
"/{id}/", response_description="Clickup integrations data updated in the database."
)
async def update_a_clickup_integration(
id: str, clickup: UpdateClickupModel = Body(...)
):
clickup = jsonable_encoder(clickup)
updated_clickup = await update_clickup_data(id, clickup)
return (
ResponseModel({"id": id}, "Clickup integration updated successfully")
if updated_clickup
else ErrorResponseModel(
"An error occurred",
status.HTTP_404_NOT_FOUND,
"There was an error updating the Clickup integration.",
)
)
@router.delete("/{id}/", response_description="Delete the integration")
async def delete_clickup_integration(id: str):
deleted_clickup = await delete_integration(id)
return (
ResponseModel(
"Integration with ID: {} removed".format(id),
"Integration deleted successfully",
)
if deleted_clickup
else ErrorResponseModel(
"An error occured",
status.HTTP_404_NOT_FOUND,
"Integration with id {0} doesn't exist".format(id),
)
)
| 2.5625 | 3 |
cellfinder_core/main.py | npeschke/cellfinder-core | 5 | 5437 | <filename>cellfinder_core/main.py<gh_stars>1-10
"""
N.B imports are within functions to prevent tensorflow being imported before
it's warnings are silenced
"""
import os
import logging
from imlib.general.logging import suppress_specific_logs
tf_suppress_log_messages = [
"multiprocessing can interact badly with TensorFlow"
]
def main(
signal_array,
background_array,
voxel_sizes,
start_plane=0,
end_plane=-1,
trained_model=None,
model_weights=None,
model="resnet50_tv",
batch_size=32,
n_free_cpus=2,
network_voxel_sizes=[5, 1, 1],
soma_diameter=16,
ball_xy_size=6,
ball_z_size=15,
ball_overlap_fraction=0.6,
log_sigma_size=0.2,
n_sds_above_mean_thresh=10,
soma_spread_factor=1.4,
max_cluster_size=100000,
cube_width=50,
cube_height=50,
cube_depth=20,
network_depth="50",
):
suppress_tf_logging(tf_suppress_log_messages)
from cellfinder_core.detect import detect
from cellfinder_core.classify import classify
from cellfinder_core.tools import prep
from pathlib import Path
home = Path.home()
install_path = home / ".cellfinder"
logging.info("Detecting cell candidates")
points = detect.main(
signal_array,
start_plane,
end_plane,
voxel_sizes,
soma_diameter,
max_cluster_size,
ball_xy_size,
ball_z_size,
ball_overlap_fraction,
soma_spread_factor,
n_free_cpus,
log_sigma_size,
n_sds_above_mean_thresh,
)
model_weights = prep.prep_classification(
trained_model, model_weights, install_path, model, n_free_cpus
)
if len(points) > 0:
logging.info("Running classification")
points = classify.main(
points,
signal_array,
background_array,
n_free_cpus,
voxel_sizes,
network_voxel_sizes,
batch_size,
cube_height,
cube_width,
cube_depth,
trained_model,
model_weights,
network_depth,
)
else:
logging.info("No candidates, skipping classification")
return points
# logging.info("Saving classified cells")
# save_cells(points, classified_points_path)
def suppress_tf_logging(tf_suppress_log_messages):
"""
Prevents many lines of logs such as:
"2019-10-24 16:54:41.363978: I tensorflow/stream_executor/platform/default
/dso_loader.cc:44] Successfully opened dynamic library libcuda.so.1"
"""
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
for message in tf_suppress_log_messages:
suppress_specific_logs("tensorflow", message)
| 1.71875 | 2 |
server.py | rezist-ro/rezistenta.tv | 0 | 5438 | # coding=utf-8
import dateutil.parser
import flask
import json
import os
import time
import urllib
import yaml
EPISODES = yaml.load(open("episodes.yaml").read())
app = flask.Flask(__name__,
static_path="/assets",
static_folder="assets")
app.jinja_env.filters["strftime"] = \
lambda str, fmt: dateutil.parser.parse(str).strftime(fmt)
app.jinja_env.filters["quote_plus"] = lambda u: urllib.quote_plus(u)
ASSETS = os.path.join(app.root_path, "assets")
@app.route("/favicon.ico")
def favicon():
return flask.send_from_directory(
ASSETS,
"favicon.ico",
mimetype="image/icon")
@app.route("/")
def home():
return flask.render_template("pages/home.html",
playlist=os.environ["PLAYLIST"],
episodes=EPISODES,
autoplay=not app.debug)
@app.route("/episod/<int:number>")
def episode(number):
if number < 1:
return "not found"
elif number > len(EPISODES):
return "coming soon"
else:
episode = EPISODES[len(EPISODES) - number]
template = "pages/episode/%s.html" % (
"youtube" if "yt" in episode else "facebook"
)
return flask.render_template(template,
number=number,
episode=episode,
episodes=EPISODES)
| 2.546875 | 3 |
problem020.py | mazayus/ProjectEuler | 0 | 5439 | #!/usr/bin/env python3
from functools import *
import operator
def factorial(number):
assert number >= 1
return reduce(operator.mul, range(1, number+1))
def digits(number):
yield from (int(digit) for digit in str(number))
print(sum(digits(factorial(100))))
| 3.984375 | 4 |
transformer.py | ghafran/KerasPersonLab | 0 | 5440 | <reponame>ghafran/KerasPersonLab
import numpy as np
from math import cos, sin, pi
import cv2
import random
from config import config, TransformationParams
from data_prep import map_coco_to_personlab
class AugmentSelection:
def __init__(self, flip=False, degree = 0., crop = (0,0), scale = 1.):
self.flip = flip
self.degree = degree #rotate
self.crop = crop #shift actually
self.scale = scale
@staticmethod
def random():
flip = random.uniform(0.,1.) > TransformationParams.flip_prob
degree = random.uniform(-1.,1.) * TransformationParams.max_rotate_degree
scale = (TransformationParams.scale_max - TransformationParams.scale_min)*random.uniform(0.,1.)+TransformationParams.scale_min \
if random.uniform(0.,1.) < TransformationParams.scale_prob else 1.
x_offset = int(random.uniform(-1.,1.) * TransformationParams.center_perterb_max);
y_offset = int(random.uniform(-1.,1.) * TransformationParams.center_perterb_max);
return AugmentSelection(flip, degree, (x_offset,y_offset), scale)
@staticmethod
def unrandom():
flip = False
degree = 0.
scale = 1.
x_offset = 0
y_offset = 0
return AugmentSelection(flip, degree, (x_offset,y_offset), scale)
def affine(self, center=(config.IMAGE_SHAPE[1]//2, config.IMAGE_SHAPE[0]//2) , scale_self=1.):
# the main idea: we will do all image transformations with one affine matrix.
# this saves lot of cpu and make code significantly shorter
# same affine matrix could be used to transform joint coordinates afterwards
A = self.scale * cos(self.degree / 180. * pi )
B = self.scale * sin(self.degree / 180. * pi )
# scale_size = TransformationParams.target_dist / scale_self * self.scale
scale_size = TransformationParams.target_dist / self.scale
(width, height) = center
center_x = width + self.crop[0]
center_y = height + self.crop[1]
center2zero = np.array( [[ 1., 0., -center_x],
[ 0., 1., -center_y ],
[ 0., 0., 1. ]] )
rotate = np.array( [[ A, B, 0 ],
[ -B, A, 0 ],
[ 0, 0, 1. ] ])
scale = np.array( [[ scale_size, 0, 0 ],
[ 0, scale_size, 0 ],
[ 0, 0, 1. ] ])
flip = np.array( [[ -1 if self.flip else 1., 0., 0. ],
[ 0., 1., 0. ],
[ 0., 0., 1. ]] )
center2center = np.array( [[ 1., 0., config.IMAGE_SHAPE[1]//2],
[ 0., 1., config.IMAGE_SHAPE[0]//2 ],
[ 0., 0., 1. ]] )
# order of combination is reversed
combined = center2center.dot(flip).dot(scale).dot(rotate).dot(center2zero)
return combined[0:2]
class Transformer:
@staticmethod
def transform(img, masks, keypoints, aug=AugmentSelection.random()):
# warp picture and mask
M = aug.affine(center=(img.shape[1]//2, img.shape[0]//2))
cv_shape = (config.IMAGE_SHAPE[1], config.IMAGE_SHAPE[0])
# TODO: need to understand this, scale_provided[0] is height of main person divided by 368, caclulated in generate_hdf5.py
# print(img.shape)
# for i, img in enumerate(input_transform_targets):
img = cv2.warpAffine(img, M, cv_shape, flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT, borderValue=(127,127,127))
# concat = np.stack(output_transform_targets, axis=-1)
# fix from https://github.com/octiapp/KerasPersonLab/issues/2
# masks = cv2.warpAffine(masks, M, cv_shape, flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT, borderValue=0)
out_masks = np.zeros(cv_shape[::-1]+(masks.shape[-1],))
for i in range(masks.shape[-1]):
out_masks[:,:,i] = cv2.warpAffine(masks[:,:,i], M, cv_shape, flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT, borderValue=0)
masks = out_masks
# warp key points
#TODO: joint could be cropped by augmentation, in this case we should mark it as invisible.
#update: may be we don't need it actually, original code removed part sliced more than half totally, may be we should keep it
keypoints = map_coco_to_personlab(keypoints)
original_points = keypoints.copy()
# print keypoints
original_points[:,:,2]=1 # we reuse 3rd column in completely different way here, it is hack
converted_points = np.matmul(M, original_points.transpose([0,2,1])).transpose([0,2,1])
keypoints[:,:,0:2]=converted_points
cropped_kp = keypoints[:,:,0] >= config.IMAGE_SHAPE[1]
cropped_kp = np.logical_or(cropped_kp, keypoints[:,:,1] >= config.IMAGE_SHAPE[0])
cropped_kp = np.logical_or(cropped_kp, keypoints[:,:,0] < 0)
cropped_kp = np.logical_or(cropped_kp, keypoints[:,:,1] < 0)
keypoints[cropped_kp, 2] = 0
# we just made image flip, i.e. right leg just became left leg, and vice versa
if aug.flip:
tmpLeft = keypoints[:, config.LEFT_KP, :]
tmpRight = keypoints[:, config.RIGHT_KP, :]
keypoints[:, config.LEFT_KP, :] = tmpRight
keypoints[:, config.RIGHT_KP, :] = tmpLeft
# print keypoints
return img, masks, keypoints
| 2.796875 | 3 |
enan/__init__.py | mizuno-group/enan | 0 | 5441 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 25 15:46:32 2019
@author: tadahaya
"""
from .binom import BT
from .connect import Connect
from .fet import FET
from .gsea import GSEA
from .ssgsea import ssGSEA
__copyright__ = 'Copyright (C) 2020 MIZUNO Tadahaya'
__version__ = '1.0.3'
__license__ = 'MIT'
__author__ = '<NAME>'
__author_email__ = '<EMAIL>' | 1.25 | 1 |
app/helpers/__init__.py | Hacker-1202/Selfium | 14 | 5442 | <filename>app/helpers/__init__.py
"""
Selfium Helper Files
~~~~~~~~~~~~~~~~~~~
All Helper Files used in Selfium project;
:copyright: (c) 2021 - Caillou and ZeusHay;
:license: MIT, see LICENSE for more details.
"""
from .getUser import *
from .getGuild import *
from .params import *
from .notify import *
from .sendEmbed import *
from .isStaff import * | 1.070313 | 1 |
NLP/UNIMO/src/finetune/visual_entailment.py | zhangyimi/Research | 1,319 | 5443 | <gh_stars>1000+
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for visual_entailment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob
import time
import numpy as np
import paddle.fluid as fluid
from model.unimo_finetune import UNIMOModel
from eval import glue_eval
from collections import OrderedDict
from utils.utils import print_eval_log
def kl_divergence_with_logits(q_logits, p_logits):
"""
symmetric KL-divergence (See SMART, Sec 3.1)
q_logits: logits
p_logits: delta_logits
"""
q = fluid.layers.softmax(input=q_logits)
p = fluid.layers.softmax(input=p_logits)
kl_qp = fluid.layers.reduce_sum(q * (fluid.layers.log(q) - fluid.layers.log(p)), -1)
kl_pq = fluid.layers.reduce_sum(p * (fluid.layers.log(p) - fluid.layers.log(q)), -1)
vat_loss = fluid.layers.mean(x=kl_qp+kl_pq)
return vat_loss
def create_model(args, config, pyreader_name="train_reader", is_train=True):
"""create_model"""
shapes = [[-1, args.max_seq_len, 1], # src_ids
[-1, args.max_seq_len, 1], # pos_ids
[-1, args.max_seq_len, 1], # sent_ids
[-1, args.max_img_len + args.max_seq_len, args.max_img_len + args.max_seq_len], # input_mask
[-1, args.max_img_len, 1], # v_mask
[-1, args.max_seq_len, 1], # t_mask
[-1, args.max_img_len, config["image_embedding_size"]], # image_embedding
[-1, args.max_img_len, 5], # image_loc
[-1, 1] # labels
]
dtypes = ['int64', 'int64', 'int64', 'float32', 'float32', 'float32', 'float32','float32', 'int64']
lod_levels = [0, 0, 0, 0, 0, 0, 0, 0, 0]
pyreader = fluid.layers.py_reader(
capacity=70,
shapes=shapes,
dtypes=dtypes,
lod_levels=lod_levels,
name=pyreader_name,
use_double_buffer=True)
(src_ids, pos_ids, sent_ids, input_mask, v_mask, t_mask, image_embedding, image_loc, labels) \
= fluid.layers.read_file(pyreader)
emb_ids = {"word_embedding": src_ids, "sent_embedding": sent_ids, "pos_embedding": pos_ids}
image_input = {"image_embedding": image_embedding, "loc_embedding": image_loc}
adv_step, adv_lr, norm_type, adv_max_norm, adv_init_mag = \
args.adv_step, args.adv_lr, args.norm_type, args.adv_max_norm, args.adv_init_mag
assert adv_step > 0 and adv_init_mag > 0
def get_loss_and_logits(text_feats, image_feats):
feats = text_feats + image_feats
cls_params_name = ["cls_out_w_0", "cls_out_b_0"]
feats = fluid.layers.fc(
input=feats,
size=2048,
param_attr=fluid.ParamAttr(
name=cls_params_name[0],
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(
name=cls_params_name[1], initializer=fluid.initializer.Constant(0.)))
feats = fluid.layers.dropout(
x=feats,
dropout_prob=0.1,
dropout_implementation="upscale_in_train")
cls_params_name = ["cls_out_w_1", "cls_out_b_1"]
logits = fluid.layers.fc(
input=feats,
size=args.num_labels,
param_attr=fluid.ParamAttr(
name=cls_params_name[0],
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(
name=cls_params_name[1], initializer=fluid.initializer.Constant(0.)))
ce_loss, probs = fluid.layers.softmax_with_cross_entropy(
logits=logits, label=labels, return_softmax=True)
loss = fluid.layers.mean(x=ce_loss) / adv_step
return loss, logits, probs
def init_delta(input, mask, shape, name='text'):
real_seq_len = fluid.layers.shape(input)[1]
fake = fluid.layers.data(name=name+"_fake", shape=shape, dtype='float32')
mask_slice = fluid.layers.slice(mask, axes=[1], starts=[0], ends=fluid.layers.shape(mask)[1])
length = fluid.layers.reduce_sum(mask_slice, dim=1, keep_dim=True) * shape[-1]
# l2 norm
delta = fluid.layers.uniform_random_batch_size_like(mask, shape=fake.shape, min=-1.0, max=1.0)
delta = fluid.layers.slice(delta, axes=[1], starts=[0], ends=real_seq_len)
delta = delta * mask_slice
mag = adv_init_mag / fluid.layers.sqrt(length)
delta = delta * mag
return delta
if is_train:
text_emb_shape = [-1, args.max_seq_len, config['hidden_size']]
text_delta = init_delta(src_ids, t_mask, text_emb_shape, name='text')
image_emb_shape = [-1, args.max_img_len, config['image_embedding_size']]
image_delta = init_delta(image_embedding, v_mask, image_emb_shape, name='img')
else:
text_delta, image_delta = None, None
def pgd_with_l2(loss, delta):
# grad
delta_grad = fluid.backward.gradients(loss, delta)[0]
# l2 norm
delta_norm = fluid.layers.sqrt(fluid.layers.reduce_sum(fluid.layers.pow(fluid.layers.reshape(delta_grad, \
[fluid.layers.shape(delta_grad)[0], -1]), factor=2), dim=1, keep_dim=True))
delta_norm = fluid.layers.clamp(delta_norm, min=float(1e-8))
# pgd
delta = delta + adv_lr * delta_grad / delta_norm
# projection
if adv_max_norm > 0:
exceed_mask = (delta_norm > adv_max_norm).astype('float32')
reweights = (adv_max_norm / delta_norm) * exceed_mask + (1 - exceed_mask)
delta = delta * reweights
delta_grad.stop_gradient=True
return delta
loss = None
for iter in range(adv_step):
vl_pure = UNIMOModel(
emb_ids=emb_ids,
input_mask=input_mask,
config=config,
image_input=image_input,
weight_sharing=args.weight_sharing
)
vl_text = UNIMOModel(
text_adv_delta=text_delta,
emb_ids=emb_ids,
input_mask=input_mask,
config=config,
image_input=image_input,
weight_sharing=args.weight_sharing
)
vl_image = UNIMOModel(
image_adv_delta=image_delta,
emb_ids=emb_ids,
input_mask=input_mask,
config=config,
image_input=image_input,
weight_sharing=args.weight_sharing
)
h_pure_text, h_pure_image = vl_pure.get_pooled_output()
h_text_text, h_text_image = vl_text.get_pooled_output()
h_image_text, h_image_image = vl_image.get_pooled_output()
loss_pure, logit_pure, probs_pure = get_loss_and_logits(h_pure_text, h_pure_image)
loss_text, logit_text, probs_text = get_loss_and_logits(h_text_text, h_text_image)
loss_image, logit_image, probs_image = get_loss_and_logits(h_image_text, h_image_image)
if is_train:
text_delta = pgd_with_l2(loss_text, text_delta)
image_delta = pgd_with_l2(loss_image, image_delta)
kl_adv_text_loss = kl_divergence_with_logits(logit_pure, logit_text)
kl_adv_image_loss = kl_divergence_with_logits(logit_pure, logit_image)
cur_loss = loss_pure + loss_text + loss_image + kl_adv_text_loss + kl_adv_image_loss
loss = cur_loss if loss is None else loss + cur_loss
num_seqs = fluid.layers.create_tensor(dtype='int64')
accuracy = fluid.layers.accuracy(input=probs_pure, label=labels, total=num_seqs)
graph_vars = {
"loss": loss,
"probs": probs_pure,
"accuracy": accuracy,
"labels": labels,
"num_seqs": num_seqs
}
for k, v in graph_vars.items():
v.persistable = False
return pyreader, graph_vars
def evaluate(args, exe, test_pyreader, graph_vars, eval_phase, dev_count=1, gpu_id=0):
"""evaluate"""
all_mat = []
test_pyreader.start()
time_begin = time.time()
fetch_list = [graph_vars["probs"].name, graph_vars["labels"].name]
while True:
try:
np_probs, np_labels = exe.run(fetch_list=fetch_list)
np_preds = np.argmax(np_probs, axis=1).reshape((-1, 1))
np_labels = np_labels.reshape((-1, 1))
mat = np.concatenate([np_preds, np_labels], axis=1)
all_mat.extend(mat.tolist())
except fluid.core.EOFException:
test_pyreader.reset()
break
all_mat = np.array(all_mat)
time_end = time.time()
save_file = "%s/%s.trainers_%d.part_%d.npy" % (args.eval_dir, eval_phase, dev_count, gpu_id)
np.save(save_file, all_mat)
tmp_file = "%s/%s.trainers_%d.part_%d.finish" % (args.eval_dir, eval_phase, dev_count, gpu_id)
tmp_writer = open(tmp_file, "w")
tmp_writer.close()
if gpu_id == 0:
while True:
ret = os.popen('find %s -maxdepth 1 -name "%s.trainers_%d.part_*.finish"' %
(args.eval_dir, eval_phase, dev_count)).readlines()
if len(ret) != dev_count:
time.sleep(1)
continue
else:
break
all_mats = []
save_files = glob.glob("%s/%s.trainers_%d.part_*.npy" % (args.eval_dir, eval_phase, dev_count))
for cur_save_file in save_files:
mat = np.load(cur_save_file).tolist()
all_mats.extend(mat)
all_mats = np.array(all_mats)
cur_time = str(int(time.time()))
os.system("mkdir %s/%s" % (args.eval_dir, cur_time))
os.system("mv %s/%s.trainers_%d.* %s/%s" % (args.eval_dir, eval_phase, dev_count, args.eval_dir, cur_time))
ret = OrderedDict()
ret['phase'] = eval_phase
ret['loss'] = -1
ret['data_num'] = all_mats.shape[0]
ret['used_time'] = round(time_end - time_begin, 4)
metrics = OrderedDict()
metrics["simple_accuracy"] = glue_eval.simple_accuracy
if args.eval_mertrics in metrics:
ret_metric = metrics[args.eval_mertrics](all_mats[:, 0], all_mats[:, 1])
ret.update(ret_metric)
print_eval_log(ret)
else:
raise ValueError('unsupported metric {}'.format(args.eval_mertrics))
return ret
else:
return None
| 1.679688 | 2 |
src/records.py | oth-datapipeline/ingestion-scripts | 0 | 5444 | <filename>src/records.py
from faust import Record
class RssFeed(Record, serializer='json'):
feed_source: str
title: str
link: str
published: str = None
author: str = None
summary: str = None
published_parsed: list = None
authors: list = None
tags: list = None
comments: str = None
content: list = None
source: dict = None
class TwitterTrend(Record, serializer='json'):
pass
class Tweet(Record, serializer="json"):
tweet_id: str
text: str
created_at: str
metrics: dict
author: dict
trend: str
place: str = None
hashtags: list = None
class RedditPost(Record, serializer='json'):
id: str
title: str
author: dict
created: str
score: int
upvote_ratio: float
reddit: dict
domain: str = None
url: str = None
comments: list = None
keywords: list = None
| 2.578125 | 3 |
leetcode/102-Medium-Binary-Tree-Level-Order-Traversal/answer.py | vaishali-bariwal/Practice-Coding-Questions | 25 | 5445 | #!/usr/bin/python3
#------------------------------------------------------------------------------
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
stack = [(root, 0)]
result = []
while stack:
(node, level) = stack.pop(0)
if level == len(result):
result.append([])
result[level].append(node.val)
if node.left: stack.append((node.left, level+1))
if node.right: stack.append((node.right, level+1))
return result
#------------------------------------------------------------------------------
#Testing
| 4.09375 | 4 |
setup.py | shirayu/fitbit-dumper | 0 | 5446 | <gh_stars>0
#!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name="",
version="0.01",
packages=find_packages(),
install_requires=[
"fitbit"
],
dependency_links=[
],
extras_require={
"tests": [
"flake8",
"autopep8",
]
}
)
| 1.109375 | 1 |
src/main.py | mtnmunuklu/SigmaToExcel | 10 | 5447 | <filename>src/main.py
import sys
sys.path.append("../")
from src.app.sigma import SigmaConverter
if __name__ == "__main__":
sigmaconverter = SigmaConverter()
sigmaconverter.read_from_file()
sigmaconverter.write_to_excel()
| 1.875 | 2 |
server/processes/migrations/0132_auto_20201108_0540.py | CloudReactor/task_manager | 0 | 5448 | # Generated by Django 2.2.14 on 2020-11-08 05:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('processes', '0131_auto_20201107_2316'),
]
operations = [
migrations.RunSQL(
"UPDATE processes_workflow SET run_environment_id = scheduling_run_environment_id WHERE run_environment_id IS NULL;",
reverse_sql='',
),
]
| 1.523438 | 2 |
sparsely_lstmvae_main.py | pengkangzaia/usad | 0 | 5449 | <gh_stars>0
from model.sparsely_lstm_vae import *
import torch.utils.data as data_utils
from sklearn import preprocessing
from utils.eval_methods import *
device = get_default_device()
# Read data
# normal = pd.read_csv("data/SWaT_Dataset_Normal_v1.csv") # , nrows=1000)
normal = pd.read_csv("data/SWaT/SWaT_Dataset_Normal_v1.csv", nrows=10000) # , nrows=1000)
normal = normal.drop(["Timestamp", "Normal/Attack"], axis=1)
# normal.shape
# Transform all columns into float64
for i in list(normal):
normal[i] = normal[i].apply(lambda x: str(x).replace(",", "."))
normal = normal.astype(float)
# 数据预处理
min_max_scaler = preprocessing.MinMaxScaler()
x = normal.values
x_scaled = min_max_scaler.fit_transform(x)
normal = pd.DataFrame(x_scaled)
# Read data
# attack = pd.read_csv("data/SWaT_Dataset_Attack_v0.csv", sep=";") # , nrows=1000)
attack = pd.read_csv("data/SWaT/SWaT_Dataset_Attack_v0.csv", sep=";", nrows=10000) # , nrows=1000)
labels = [float(label != 'Normal') for label in attack["Normal/Attack"].values]
attack = attack.drop(["Timestamp", "Normal/Attack"], axis=1)
# Transform all columns into float64
for i in list(attack):
attack[i] = attack[i].apply(lambda x: str(x).replace(",", "."))
attack = attack.astype(float)
x = attack.values
x_scaled = min_max_scaler.transform(x)
attack = pd.DataFrame(x_scaled)
############## windows ###################
window_size = 12
# np.arange(window_size)[None, :] 1*12 (0,1,2,3,4,5,6,7,8,9,10,11)一行12列
# np.arange(normal.shape[0] - window_size)[:, None] (1000-12)*1 (0,1,2,3,4,5...) 988列,每列递增
# np.arange(window_size)[None, :] + np.arange(normal.shape[0] - window_size)[:, None] (1000-12)*12
windows_normal = normal.values[np.arange(window_size)[None, :] + np.arange(attack.shape[0] - window_size)[:, None]]
windows_attack = attack.values[np.arange(window_size)[None, :] + np.arange(attack.shape[0] - window_size)[:, None]]
############## training ###################
# BATCH_SIZE = 7919
BATCH_SIZE = 200
N_EPOCHS = 100
hidden_size = 100
latent_size = 40
# w_size = windows_normal.shape[1] * windows_normal.shape[2] # window_size * feature_size
# z_size = windows_normal.shape[1] * hidden_size # window_size * hidden_size
windows_normal_train = windows_normal[:int(np.floor(.8 * windows_normal.shape[0]))]
windows_normal_val = windows_normal[int(np.floor(.8 * windows_normal.shape[0])):int(np.floor(windows_normal.shape[0]))]
train_loader = torch.utils.data.DataLoader(data_utils.TensorDataset(
torch.from_numpy(windows_normal_train).float().view(([windows_normal_train.shape[0], windows_normal_train.shape[1], windows_normal_train.shape[2]]))
), batch_size=BATCH_SIZE, shuffle=False, num_workers=0)
val_loader = torch.utils.data.DataLoader(data_utils.TensorDataset(
torch.from_numpy(windows_normal_val).float().view(([windows_normal_val.shape[0], windows_normal_train.shape[1], windows_normal_train.shape[2]]))
), batch_size=BATCH_SIZE, shuffle=False, num_workers=0)
test_loader = torch.utils.data.DataLoader(data_utils.TensorDataset(
torch.from_numpy(windows_attack).float().view(([windows_attack.shape[0], windows_attack.shape[1], windows_attack.shape[2]]))
), batch_size=BATCH_SIZE, shuffle=False, num_workers=0)
model = SparselyLstmVae(BATCH_SIZE, window_size, windows_normal.shape[2], hidden_size, latent_size, former_step=3)
model = to_device(model, device)
val_loss, train_loss = training(N_EPOCHS, model, train_loader, val_loader)
plot_simple_history(val_loss)
plot_train_loss(train_loss)
torch.save({'ae': model.state_dict()}, "saved_model/model.pth")
############ testing #################
checkpoint = torch.load("model.pth")
model.load_state_dict(checkpoint['ae'])
# 每一个batch都有一个result。组成result集合
results = testing(model, test_loader)
windows_labels = []
for i in range(len(labels) - window_size):
windows_labels.append(list(np.int_(labels[i:i + window_size])))
# 窗口中有误差,则为异常,表示为1
y_test = [1.0 if (np.sum(window) > 0) else 0 for window in windows_labels]
# 样本太少的话,误差会很大
y_pred = np.concatenate(
[torch.stack(results[:-1]).flatten().detach().cpu().numpy(),
results[-1].flatten().detach().cpu().numpy()])
y_pred = (y_pred - y_pred.min()) / (y_pred.max() - y_pred.min())
threshold = ROC(y_test, y_pred)
t, th = bf_search(y_pred, y_test, start=0, end=1, step_num=1000, display_freq=50)
| 2.375 | 2 |
src/demo/tasks.py | MexsonFernandes/AsynchronousTasks-Django-Celery-RabbitMQ-Redis | 1 | 5450 | from __future__ import absolute_import, unicode_literals
from dcs.celeryconf import app
import time
from django.core.mail import EmailMessage
@app.task(bind=True, ignore_result=False, max_retries=3)
def demo_task1(self):
result = {
'val1': 1,
'val2': 2,
'val3': 3,
}
print("hellp")
from_email = '<EMAIL>'
to_list = ['<EMAIL>',]
sendemail = EmailMessage("Message received!!!", "Hello test", str(from_email), to_list)
sendemail.send()
return result
| 1.945313 | 2 |
pytorch_translate/models/__init__.py | Ayansam1152/translate | 748 | 5451 | <reponame>Ayansam1152/translate
#!/usr/bin/env python3
import importlib
import os
# automatically import any Python files in the models/ directory
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
model_name = file[: file.find(".py")]
importlib.import_module("pytorch_translate.models." + model_name)
| 2.28125 | 2 |
app/config/secure.py | mapeimapei/awesome-flask-webapp | 2 | 5452 | # -*- coding: utf-8 -*-
__author__ = '带土'
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:[email protected]:3306/awesome'
SECRET_KEY = <KEY>'
# Email 配置
MAIL_SERVER = 'smtp.exmail.qq.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TSL = False
MAIL_USERNAME = '<EMAIL>'
MAIL_PASSWORD = '<PASSWORD>'
MAIL_SUBJECT_PREFIX = '[鱼书]'
MAIL_SENDER = '鱼书 <<EMAIL>>'
# 开启数据库查询性能测试
SQLALCHEMY_RECORD_QUERIES = True
# 性能测试的阀值
DATABASE_QUERY_TIMEOUT = 0.5
SQLALCHEMY_TRACK_MODIFICATIONS = True
WTF_CSRF_CHECK_DEFAULT = False
SQLALCHEMY_ECHO = True
from datetime import timedelta
REMEMBER_COOKIE_DURATION = timedelta(days=30)
PROXY_API = 'http://ip.yushu.im/get'
# PERMANENT_SESSION_LIFETIME = 3600
| 2 | 2 |
src/users/migrations/0014_auto_20200801_1008.py | aliharby12/Simple-vezeeta-project | 0 | 5453 | <reponame>aliharby12/Simple-vezeeta-project<gh_stars>0
# Generated by Django 2.2 on 2020-08-01 08:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0013_auto_20200731_1810'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='Specialist_doctor',
field=models.CharField(choices=[('جراحه اطفال', 'جراحه اططفال'), ('تخسيس', 'تخسيس'), ('عظام', 'عظام'), ('جراحه عامه', 'جراحه عامه'), ('اطفال', 'اطفال'), ('اورام', 'اورام'), ('مخ واعصاب', 'مخ واعصاب'), ('انف واذن', 'انف واذن'), ('امراض دم', 'امراض دم'), ('باطنة', 'باطنه'), ('اسنان', 'اسنان'), ('جراحه تجميل', 'جراحه تجميل'), ('حميات', 'حميات'), ('نسا وتوليد', 'نسا وتوليد')], default='باطنه', max_length=255, verbose_name='التخصص'),
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='اسم صاحب التعليق')),
('email', models.EmailField(max_length=254, verbose_name='البريد الالكتروني')),
('body', models.TextField(verbose_name='محتوى التعليق')),
('comment_date', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='users.Profile')),
],
),
]
| 1.75 | 2 |
caller_v3/app/api/v1/docker.py | tienthegainz/pipeline_executor_docker_call | 0 | 5454 | from typing import Any, List, Callable
from fastapi import APIRouter, HTTPException, status, BackgroundTasks
from app import schemas
from app.core import docker_client
import json
from copy import deepcopy
router = APIRouter()
@router.get("/images", response_model=schemas.DockerImageRespond)
def get_docker_image() -> Any:
images_list = docker_client.images.list(all=True)
return {
'images': [{'id': image.short_id, 'tags': image.tags} for image in images_list if image.tags]
}
@router.get("/volumes", response_model=schemas.DockerVolumeRespond)
def get_docker_volume() -> Any:
volumes_list = docker_client.volumes.list()
return {
'volumes': [{'id': volume.short_id, 'name': volume.name} for volume in volumes_list]
}
| 2.3125 | 2 |
keras/models.py | kalyc/keras-apache-mxnet | 300 | 5455 | <filename>keras/models.py
"""Model-related utilities.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import backend as K
from .utils.generic_utils import has_arg
from .utils.generic_utils import to_list
from .engine.input_layer import Input
from .engine.input_layer import InputLayer
from .engine.training import Model
from .engine.sequential import Sequential
from .engine.saving import save_model
from .engine.saving import load_model
from .engine.saving import model_from_config
from .engine.saving import model_from_yaml
from .engine.saving import model_from_json
from .engine.saving import save_mxnet_model
try:
import h5py
except ImportError:
h5py = None
def _clone_functional_model(model, input_tensors=None):
"""Clone a functional `Model` instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
# Arguments
model: Instance of `Model`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
# Returns
An instance of `Model` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
# Raises
ValueError: in case of invalid `model` argument value.
"""
if not isinstance(model, Model):
raise ValueError('Expected `model` argument '
'to be a `Model` instance, got ', model)
if isinstance(model, Sequential):
raise ValueError('Expected `model` argument '
'to be a functional `Model` instance, '
'got a `Sequential` instance instead:', model)
layer_map = {} # Cache for created layers.
tensor_map = {} # Map {reference_tensor: (corresponding_tensor, mask)}
if input_tensors is None:
# Create placeholders to build the model on top of.
input_layers = []
input_tensors = []
for layer in model._input_layers:
input_tensor = Input(batch_shape=layer.batch_input_shape,
dtype=layer.dtype,
sparse=layer.sparse,
name=layer.name)
input_tensors.append(input_tensor)
# Cache newly created input layer.
newly_created_input_layer = input_tensor._keras_history[0]
layer_map[layer] = newly_created_input_layer
for _original, _cloned in zip(model._input_layers, input_layers):
layer_map[_original] = _cloned
else:
# Make sure that all input tensors come from a Keras layer.
# If tensor comes from an input layer: cache the input layer.
input_tensors = to_list(input_tensors)
_input_tensors = []
for i, x in enumerate(input_tensors):
if not K.is_keras_tensor(x):
name = model._input_layers[i].name
input_tensor = Input(tensor=x,
name='input_wrapper_for_' + name)
_input_tensors.append(input_tensor)
# Cache newly created input layer.
original_input_layer = x._keras_history[0]
newly_created_input_layer = input_tensor._keras_history[0]
layer_map[original_input_layer] = newly_created_input_layer
else:
_input_tensors.append(x)
input_tensors = _input_tensors
for x, y in zip(model.inputs, input_tensors):
tensor_map[x] = (y, None) # tensor, mask
# Iterated over every node in the reference model, in depth order.
depth_keys = list(model._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
for depth in depth_keys:
nodes = model._nodes_by_depth[depth]
for node in nodes:
# Recover the corresponding layer.
layer = node.outbound_layer
# Get or create layer.
if layer not in layer_map:
# Clone layer.
new_layer = layer.__class__.from_config(layer.get_config())
layer_map[layer] = new_layer
layer = new_layer
else:
# Reuse previously cloned layer.
layer = layer_map[layer]
# Don't call InputLayer multiple times.
if isinstance(layer, InputLayer):
continue
# Gather inputs to call the new layer.
reference_input_tensors = node.input_tensors
reference_output_tensors = node.output_tensors
# If all previous input tensors are available in tensor_map,
# then call node.inbound_layer on them.
computed_data = [] # List of tuples (input, mask).
for x in reference_input_tensors:
if x in tensor_map:
computed_data.append(tensor_map[x])
if len(computed_data) == len(reference_input_tensors):
# Call layer.
if node.arguments:
kwargs = node.arguments
else:
kwargs = {}
if len(computed_data) == 1:
computed_tensor, computed_mask = computed_data[0]
if has_arg(layer.call, 'mask'):
if 'mask' not in kwargs:
kwargs['mask'] = computed_mask
output_tensors = to_list(
layer(computed_tensor, **kwargs))
output_masks = to_list(
layer.compute_mask(computed_tensor,
computed_mask))
computed_tensors = [computed_tensor]
computed_masks = [computed_mask]
else:
computed_tensors = [x[0] for x in computed_data]
computed_masks = [x[1] for x in computed_data]
if has_arg(layer.call, 'mask'):
if 'mask' not in kwargs:
kwargs['mask'] = computed_masks
output_tensors = to_list(
layer(computed_tensors, **kwargs))
output_masks = to_list(
layer.compute_mask(computed_tensors,
computed_masks))
# Update tensor_map.
for x, y, mask in zip(reference_output_tensors,
output_tensors,
output_masks):
tensor_map[x] = (y, mask)
# Check that we did compute the model outputs,
# then instantiate a new model from inputs and outputs.
output_tensors = []
for x in model.outputs:
assert x in tensor_map, 'Could not compute output ' + str(x)
tensor, _ = tensor_map[x]
output_tensors.append(tensor)
return Model(input_tensors, output_tensors, name=model.name)
def _clone_sequential_model(model, input_tensors=None):
"""Clone a `Sequential` model instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
# Arguments
model: Instance of `Sequential`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
# Returns
An instance of `Sequential` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
# Raises
ValueError: in case of invalid `model` argument value.
"""
if not isinstance(model, Sequential):
raise ValueError('Expected `model` argument '
'to be a `Sequential` model instance, '
'but got:', model)
def clone(layer):
return layer.__class__.from_config(layer.get_config())
layers = [clone(layer) for layer in model.layers]
if input_tensors is None:
return Sequential(layers=layers, name=model.name)
else:
if len(to_list(input_tensors)) != 1:
raise ValueError('To clone a `Sequential` model, we expect '
' at most one tensor '
'as part of `input_tensors`.')
x = to_list(input_tensors)[0]
if K.is_keras_tensor(x):
origin_layer = x._keras_history[0]
if isinstance(origin_layer, InputLayer):
return Sequential(layers=[origin_layer] + layers,
name=model.name)
else:
raise ValueError('Cannot clone a `Sequential` model on top '
'of a tensor that comes from a Keras layer '
'other than an `InputLayer`. '
'Use the functional API instead.')
input_tensor = Input(tensor=x,
name='input_wrapper_for_' + str(x.name))
input_layer = input_tensor._keras_history[0]
return Sequential(layers=[input_layer] + layers, name=model.name)
def clone_model(model, input_tensors=None):
"""Clone any `Model` instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
# Arguments
model: Instance of `Model`
(could be a functional model or a Sequential model).
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
# Returns
An instance of `Model` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
# Raises
ValueError: in case of invalid `model` argument value.
"""
if isinstance(model, Sequential):
return _clone_sequential_model(model, input_tensors=input_tensors)
else:
return _clone_functional_model(model, input_tensors=input_tensors)
| 2.515625 | 3 |
pythonFiles/tests/testing_tools/adapter/test_functional.py | erinxocon/vscode-python | 0 | 5456 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from __future__ import unicode_literals
import json
import os
import os.path
import subprocess
import sys
import unittest
import pytest
from ...__main__ import TESTING_TOOLS_ROOT
CWD = os.getcwd()
DATA_DIR = os.path.join(os.path.dirname(__file__), '.data')
SCRIPT = os.path.join(TESTING_TOOLS_ROOT, 'run_adapter.py')
def resolve_testroot(name):
projroot = os.path.join(DATA_DIR, name)
return projroot, os.path.join(projroot, 'tests')
def run_adapter(cmd, tool, *cliargs):
try:
return _run_adapter(cmd, tool, *cliargs)
except subprocess.CalledProcessError:
# Re-run pytest but print out stdout & stderr this time
try:
return _run_adapter(cmd, tool, *cliargs, hidestdio=False)
except subprocess.CalledProcessError as exc:
print(exc.output)
def _run_adapter(cmd, tool, *cliargs, **kwargs):
hidestdio = kwargs.pop('hidestdio', True)
assert not kwargs
kwds = {}
argv = [sys.executable, SCRIPT, cmd, tool, '--'] + list(cliargs)
if not hidestdio:
argv.insert(4, '--no-hide-stdio')
kwds['stderr'] = subprocess.STDOUT
argv.append('--cache-clear')
print('running {!r}'.format(' '.join(arg.rpartition(CWD + '/')[-1] for arg in argv)))
return subprocess.check_output(argv,
universal_newlines=True,
**kwds)
def fix_path(nodeid):
return nodeid.replace('/', os.path.sep)
def fix_test_order(tests):
if sys.version_info >= (3, 6):
return tests
fixed = []
curfile = None
group = []
for test in tests:
if (curfile or '???') not in test['id']:
fixed.extend(sorted(group, key=lambda t: t['id']))
group = []
curfile = test['id'].partition('.py::')[0] + '.py'
group.append(test)
fixed.extend(sorted(group, key=lambda t: t['id']))
return fixed
def fix_source(tests, testid, srcfile, lineno):
testid = fix_path(testid)
for test in tests:
if test['id'] == testid:
break
else:
raise KeyError('test {!r} not found'.format(testid))
if not srcfile:
srcfile = test['source'].rpartition(':')[0]
test['source'] = fix_path('{}:{}'.format(srcfile, lineno))
@pytest.mark.functional
class PytestTests(unittest.TestCase):
def complex(self, testroot):
results = COMPLEX.copy()
results['root'] = testroot
return [results]
def test_discover_simple(self):
projroot, testroot = resolve_testroot('simple')
out = run_adapter('discover', 'pytest',
'--rootdir', projroot,
testroot)
result = json.loads(out)
self.maxDiff = None
self.assertEqual(result, [{
'root': projroot,
'rootid': '.',
'parents': [
{'id': fix_path('./tests'),
'kind': 'folder',
'name': 'tests',
'parentid': '.',
},
{'id': fix_path('./tests/test_spam.py'),
'kind': 'file',
'name': 'test_spam.py',
'parentid': fix_path('./tests'),
},
],
'tests': [
{'id': fix_path('./tests/test_spam.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_spam.py:2'),
'markers': [],
'parentid': fix_path('./tests/test_spam.py'),
},
],
}])
def test_discover_complex_default(self):
projroot, testroot = resolve_testroot('complex')
expected = self.complex(projroot)
expected[0]['tests'] = fix_test_order(expected[0]['tests'])
if sys.version_info < (3,):
decorated = [
'./tests/test_unittest.py::MyTests::test_skipped',
'./tests/test_unittest.py::MyTests::test_maybe_skipped',
'./tests/test_unittest.py::MyTests::test_maybe_not_skipped',
]
for testid in decorated:
fix_source(expected[0]['tests'], testid, None, 0)
out = run_adapter('discover', 'pytest',
'--rootdir', projroot,
testroot)
result = json.loads(out)
result[0]['tests'] = fix_test_order(result[0]['tests'])
self.maxDiff = None
self.assertEqual(result, expected)
def test_discover_complex_doctest(self):
projroot, _ = resolve_testroot('complex')
expected = self.complex(projroot)
# add in doctests from test suite
expected[0]['parents'].insert(3, {
'id': fix_path('./tests/test_doctest.py'),
'kind': 'file',
'name': 'test_doctest.py',
'parentid': fix_path('./tests'),
})
expected[0]['tests'].insert(2, {
'id': fix_path('./tests/test_doctest.py::tests.test_doctest'),
'name': 'tests.test_doctest',
'source': fix_path('./tests/test_doctest.py:1'),
'markers': [],
'parentid': fix_path('./tests/test_doctest.py'),
})
# add in doctests from non-test module
expected[0]['parents'].insert(0, {
'id': fix_path('./mod.py'),
'kind': 'file',
'name': 'mod.py',
'parentid': '.',
})
expected[0]['tests'] = [
{'id': fix_path('./mod.py::mod'),
'name': 'mod',
'source': fix_path('./mod.py:1'),
'markers': [],
'parentid': fix_path('./mod.py'),
},
{'id': fix_path('./mod.py::mod.Spam'),
'name': 'mod.Spam',
'source': fix_path('./mod.py:33'),
'markers': [],
'parentid': fix_path('./mod.py'),
},
{'id': fix_path('./mod.py::mod.Spam.eggs'),
'name': 'mod.Spam.eggs',
'source': fix_path('./mod.py:43'),
'markers': [],
'parentid': fix_path('./mod.py'),
},
{'id': fix_path('./mod.py::mod.square'),
'name': 'mod.square',
'source': fix_path('./mod.py:18'),
'markers': [],
'parentid': fix_path('./mod.py'),
},
] + expected[0]['tests']
expected[0]['tests'] = fix_test_order(expected[0]['tests'])
if sys.version_info < (3,):
decorated = [
'./tests/test_unittest.py::MyTests::test_skipped',
'./tests/test_unittest.py::MyTests::test_maybe_skipped',
'./tests/test_unittest.py::MyTests::test_maybe_not_skipped',
]
for testid in decorated:
fix_source(expected[0]['tests'], testid, None, 0)
out = run_adapter('discover', 'pytest',
'--rootdir', projroot,
'--doctest-modules',
projroot)
result = json.loads(out)
result[0]['tests'] = fix_test_order(result[0]['tests'])
self.maxDiff = None
self.assertEqual(result, expected)
def test_discover_not_found(self):
projroot, testroot = resolve_testroot('notests')
out = run_adapter('discover', 'pytest',
'--rootdir', projroot,
testroot)
result = json.loads(out)
self.maxDiff = None
self.assertEqual(result, [])
# TODO: Expect the following instead?
#self.assertEqual(result, [{
# 'root': projroot,
# 'rootid': '.',
# 'parents': [],
# 'tests': [],
# }])
COMPLEX = {
'root': None,
'rootid': '.',
'parents': [
#
{'id': fix_path('./tests'),
'kind': 'folder',
'name': 'tests',
'parentid': '.',
},
# +++
{'id': fix_path('./tests/test_42-43.py'),
'kind': 'file',
'name': 'test_42-43.py',
'parentid': fix_path('./tests'),
},
# +++
{'id': fix_path('./tests/test_42.py'),
'kind': 'file',
'name': 'test_42.py',
'parentid': fix_path('./tests'),
},
# +++
{'id': fix_path('./tests/test_doctest.txt'),
'kind': 'file',
'name': 'test_doctest.txt',
'parentid': fix_path('./tests'),
},
# +++
{'id': fix_path('./tests/test_foo.py'),
'kind': 'file',
'name': 'test_foo.py',
'parentid': fix_path('./tests'),
},
# +++
{'id': fix_path('./tests/test_mixed.py'),
'kind': 'file',
'name': 'test_mixed.py',
'parentid': fix_path('./tests'),
},
{'id': fix_path('./tests/test_mixed.py::MyTests'),
'kind': 'suite',
'name': 'MyTests',
'parentid': fix_path('./tests/test_mixed.py'),
},
{'id': fix_path('./tests/test_mixed.py::TestMySuite'),
'kind': 'suite',
'name': 'TestMySuite',
'parentid': fix_path('./tests/test_mixed.py'),
},
# +++
{'id': fix_path('./tests/test_pytest.py'),
'kind': 'file',
'name': 'test_pytest.py',
'parentid': fix_path('./tests'),
},
{'id': fix_path('./tests/test_pytest.py::TestEggs'),
'kind': 'suite',
'name': 'TestEggs',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam'),
'kind': 'suite',
'name': 'TestParam',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam::test_param_13'),
'kind': 'function',
'name': 'test_param_13',
'parentid': fix_path('./tests/test_pytest.py::TestParam'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll'),
'kind': 'suite',
'name': 'TestParamAll',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13'),
'kind': 'function',
'name': 'test_param_13',
'parentid': fix_path('./tests/test_pytest.py::TestParamAll'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13'),
'kind': 'function',
'name': 'test_spam_13',
'parentid': fix_path('./tests/test_pytest.py::TestParamAll'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam'),
'kind': 'suite',
'name': 'TestSpam',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam::TestHam'),
'kind': 'suite',
'name': 'TestHam',
'parentid': fix_path('./tests/test_pytest.py::TestSpam'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam::TestHam::TestEggs'),
'kind': 'suite',
'name': 'TestEggs',
'parentid': fix_path('./tests/test_pytest.py::TestSpam::TestHam'),
},
{'id': fix_path('./tests/test_pytest.py::test_fixture_param'),
'kind': 'function',
'name': 'test_fixture_param',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_01'),
'kind': 'function',
'name': 'test_param_01',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_11'),
'kind': 'function',
'name': 'test_param_11',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13'),
'kind': 'function',
'name': 'test_param_13',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_markers'),
'kind': 'function',
'name': 'test_param_13_markers',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_repeat'),
'kind': 'function',
'name': 'test_param_13_repeat',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_skipped'),
'kind': 'function',
'name': 'test_param_13_skipped',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13'),
'kind': 'function',
'name': 'test_param_23_13',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_raises'),
'kind': 'function',
'name': 'test_param_23_raises',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33'),
'kind': 'function',
'name': 'test_param_33',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33_ids'),
'kind': 'function',
'name': 'test_param_33_ids',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_fixture'),
'kind': 'function',
'name': 'test_param_fixture',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_mark_fixture'),
'kind': 'function',
'name': 'test_param_mark_fixture',
'parentid': fix_path('./tests/test_pytest.py'),
},
# +++
{'id': fix_path('./tests/test_pytest_param.py'),
'kind': 'file',
'name': 'test_pytest_param.py',
'parentid': fix_path('./tests'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll'),
'kind': 'suite',
'name': 'TestParamAll',
'parentid': fix_path('./tests/test_pytest_param.py'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13'),
'kind': 'function',
'name': 'test_param_13',
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13'),
'kind': 'function',
'name': 'test_spam_13',
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll'),
},
{'id': fix_path('./tests/test_pytest_param.py::test_param_13'),
'kind': 'function',
'name': 'test_param_13',
'parentid': fix_path('./tests/test_pytest_param.py'),
},
# +++
{'id': fix_path('./tests/test_unittest.py'),
'kind': 'file',
'name': 'test_unittest.py',
'parentid': fix_path('./tests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests'),
'kind': 'suite',
'name': 'MyTests',
'parentid': fix_path('./tests/test_unittest.py'),
},
{'id': fix_path('./tests/test_unittest.py::OtherTests'),
'kind': 'suite',
'name': 'OtherTests',
'parentid': fix_path('./tests/test_unittest.py'),
},
##
{'id': fix_path('./tests/v'),
'kind': 'folder',
'name': 'v',
'parentid': fix_path('./tests'),
},
## +++
{'id': fix_path('./tests/v/test_eggs.py'),
'kind': 'file',
'name': 'test_eggs.py',
'parentid': fix_path('./tests/v'),
},
{'id': fix_path('./tests/v/test_eggs.py::TestSimple'),
'kind': 'suite',
'name': 'TestSimple',
'parentid': fix_path('./tests/v/test_eggs.py'),
},
## +++
{'id': fix_path('./tests/v/test_ham.py'),
'kind': 'file',
'name': 'test_ham.py',
'parentid': fix_path('./tests/v'),
},
## +++
{'id': fix_path('./tests/v/test_spam.py'),
'kind': 'file',
'name': 'test_spam.py',
'parentid': fix_path('./tests/v'),
},
##
{'id': fix_path('./tests/w'),
'kind': 'folder',
'name': 'w',
'parentid': fix_path('./tests'),
},
## +++
{'id': fix_path('./tests/w/test_spam.py'),
'kind': 'file',
'name': 'test_spam.py',
'parentid': fix_path('./tests/w'),
},
## +++
{'id': fix_path('./tests/w/test_spam_ex.py'),
'kind': 'file',
'name': 'test_spam_ex.py',
'parentid': fix_path('./tests/w'),
},
##
{'id': fix_path('./tests/x'),
'kind': 'folder',
'name': 'x',
'parentid': fix_path('./tests'),
},
###
{'id': fix_path('./tests/x/y'),
'kind': 'folder',
'name': 'y',
'parentid': fix_path('./tests/x'),
},
####
{'id': fix_path('./tests/x/y/z'),
'kind': 'folder',
'name': 'z',
'parentid': fix_path('./tests/x/y'),
},
#####
{'id': fix_path('./tests/x/y/z/a'),
'kind': 'folder',
'name': 'a',
'parentid': fix_path('./tests/x/y/z'),
},
##### +++
{'id': fix_path('./tests/x/y/z/a/test_spam.py'),
'kind': 'file',
'name': 'test_spam.py',
'parentid': fix_path('./tests/x/y/z/a'),
},
#####
{'id': fix_path('./tests/x/y/z/b'),
'kind': 'folder',
'name': 'b',
'parentid': fix_path('./tests/x/y/z'),
},
##### +++
{'id': fix_path('./tests/x/y/z/b/test_spam.py'),
'kind': 'file',
'name': 'test_spam.py',
'parentid': fix_path('./tests/x/y/z/b'),
},
#### +++
{'id': fix_path('./tests/x/y/z/test_ham.py'),
'kind': 'file',
'name': 'test_ham.py',
'parentid': fix_path('./tests/x/y/z'),
},
],
'tests': [
##########
{'id': fix_path('./tests/test_42-43.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_42-43.py:2'),
'markers': [],
'parentid': fix_path('./tests/test_42-43.py'),
},
#####
{'id': fix_path('./tests/test_42.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_42.py:2'),
'markers': [],
'parentid': fix_path('./tests/test_42.py'),
},
#####
{'id': fix_path('./tests/test_doctest.txt::test_doctest.txt'),
'name': 'test_doctest.txt',
'source': fix_path('./tests/test_doctest.txt:1'),
'markers': [],
'parentid': fix_path('./tests/test_doctest.txt'),
},
#####
{'id': fix_path('./tests/test_foo.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_foo.py:3'),
'markers': [],
'parentid': fix_path('./tests/test_foo.py'),
},
#####
{'id': fix_path('./tests/test_mixed.py::test_top_level'),
'name': 'test_top_level',
'source': fix_path('./tests/test_mixed.py:5'),
'markers': [],
'parentid': fix_path('./tests/test_mixed.py'),
},
{'id': fix_path('./tests/test_mixed.py::test_skipped'),
'name': 'test_skipped',
'source': fix_path('./tests/test_mixed.py:9'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_mixed.py'),
},
{'id': fix_path('./tests/test_mixed.py::TestMySuite::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_mixed.py:16'),
'markers': [],
'parentid': fix_path('./tests/test_mixed.py::TestMySuite'),
},
{'id': fix_path('./tests/test_mixed.py::MyTests::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_mixed.py:22'),
'markers': [],
'parentid': fix_path('./tests/test_mixed.py::MyTests'),
},
{'id': fix_path('./tests/test_mixed.py::MyTests::test_skipped'),
'name': 'test_skipped',
'source': fix_path('./tests/test_mixed.py:25'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_mixed.py::MyTests'),
},
#####
{'id': fix_path('./tests/test_pytest.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_pytest.py:6'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_failure'),
'name': 'test_failure',
'source': fix_path('./tests/test_pytest.py:10'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_runtime_skipped'),
'name': 'test_runtime_skipped',
'source': fix_path('./tests/test_pytest.py:14'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_runtime_failed'),
'name': 'test_runtime_failed',
'source': fix_path('./tests/test_pytest.py:18'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_raises'),
'name': 'test_raises',
'source': fix_path('./tests/test_pytest.py:22'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_skipped'),
'name': 'test_skipped',
'source': fix_path('./tests/test_pytest.py:26'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_maybe_skipped'),
'name': 'test_maybe_skipped',
'source': fix_path('./tests/test_pytest.py:31'),
'markers': ['skip-if'],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_known_failure'),
'name': 'test_known_failure',
'source': fix_path('./tests/test_pytest.py:36'),
'markers': ['expected-failure'],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_warned'),
'name': 'test_warned',
'source': fix_path('./tests/test_pytest.py:41'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_custom_marker'),
'name': 'test_custom_marker',
'source': fix_path('./tests/test_pytest.py:46'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_multiple_markers'),
'name': 'test_multiple_markers',
'source': fix_path('./tests/test_pytest.py:51'),
'markers': ['expected-failure', 'skip', 'skip-if'],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_dynamic_1'),
'name': 'test_dynamic_1',
'source': fix_path('./tests/test_pytest.py:62'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_dynamic_2'),
'name': 'test_dynamic_2',
'source': fix_path('./tests/test_pytest.py:62'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_dynamic_3'),
'name': 'test_dynamic_3',
'source': fix_path('./tests/test_pytest.py:62'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_pytest.py:70'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestSpam'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam::test_skipped'),
'name': 'test_skipped',
'source': fix_path('./tests/test_pytest.py:73'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py::TestSpam'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam::TestHam::TestEggs::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_pytest.py:81'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestSpam::TestHam::TestEggs'),
},
{'id': fix_path('./tests/test_pytest.py::TestEggs::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_pytest.py:93'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestEggs'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_01[]'),
'name': 'test_param_01[]',
'source': fix_path('./tests/test_pytest.py:103'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_01'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_11[x0]'),
'name': 'test_param_11[x0]',
'source': fix_path('./tests/test_pytest.py:108'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_11'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13[x0]'),
'name': 'test_param_13[x0]',
'source': fix_path('./tests/test_pytest.py:113'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13[x1]'),
'name': 'test_param_13[x1]',
'source': fix_path('./tests/test_pytest.py:113'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13[x2]'),
'name': 'test_param_13[x2]',
'source': fix_path('./tests/test_pytest.py:113'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_repeat[x0]'),
'name': 'test_param_13_repeat[x0]',
'source': fix_path('./tests/test_pytest.py:118'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_repeat'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_repeat[x1]'),
'name': 'test_param_13_repeat[x1]',
'source': fix_path('./tests/test_pytest.py:118'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_repeat'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_repeat[x2]'),
'name': 'test_param_13_repeat[x2]',
'source': fix_path('./tests/test_pytest.py:118'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_repeat'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33[1-1-1]'),
'name': 'test_param_33[1-1-1]',
'source': fix_path('./tests/test_pytest.py:123'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33[3-4-5]'),
'name': 'test_param_33[3-4-5]',
'source': fix_path('./tests/test_pytest.py:123'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33[0-0-0]'),
'name': 'test_param_33[0-0-0]',
'source': fix_path('./tests/test_pytest.py:123'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33_ids[v1]'),
'name': 'test_param_33_ids[v1]',
'source': fix_path('./tests/test_pytest.py:128'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33_ids'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33_ids[v2]'),
'name': 'test_param_33_ids[v2]',
'source': fix_path('./tests/test_pytest.py:128'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33_ids'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33_ids[v3]'),
'name': 'test_param_33_ids[v3]',
'source': fix_path('./tests/test_pytest.py:128'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33_ids'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[1-1-z0]'),
'name': 'test_param_23_13[1-1-z0]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[1-1-z1]'),
'name': 'test_param_23_13[1-1-z1]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[1-1-z2]'),
'name': 'test_param_23_13[1-1-z2]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[3-4-z0]'),
'name': 'test_param_23_13[3-4-z0]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[3-4-z1]'),
'name': 'test_param_23_13[3-4-z1]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[3-4-z2]'),
'name': 'test_param_23_13[3-4-z2]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[0-0-z0]'),
'name': 'test_param_23_13[0-0-z0]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[0-0-z1]'),
'name': 'test_param_23_13[0-0-z1]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[0-0-z2]'),
'name': 'test_param_23_13[0-0-z2]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_markers[x0]'),
'name': 'test_param_13_markers[x0]',
'source': fix_path('./tests/test_pytest.py:140'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_markers'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_markers[???]'),
'name': 'test_param_13_markers[???]',
'source': fix_path('./tests/test_pytest.py:140'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_markers'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_markers[2]'),
'name': 'test_param_13_markers[2]',
'source': fix_path('./tests/test_pytest.py:140'),
'markers': ['expected-failure'],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_markers'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_skipped[x0]'),
'name': 'test_param_13_skipped[x0]',
'source': fix_path('./tests/test_pytest.py:149'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_skipped'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_skipped[x1]'),
'name': 'test_param_13_skipped[x1]',
'source': fix_path('./tests/test_pytest.py:149'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_skipped'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_skipped[x2]'),
'name': 'test_param_13_skipped[x2]',
'source': fix_path('./tests/test_pytest.py:149'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_skipped'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_raises[1-None]'),
'name': 'test_param_23_raises[1-None]',
'source': fix_path('./tests/test_pytest.py:155'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_raises'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_raises[1.0-None]'),
'name': 'test_param_23_raises[1.0-None]',
'source': fix_path('./tests/test_pytest.py:155'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_raises'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_raises[2-catch2]'),
'name': 'test_param_23_raises[2-catch2]',
'source': fix_path('./tests/test_pytest.py:155'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_raises'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_pytest.py:164'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParam'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam::test_param_13[x0]'),
'name': 'test_param_13[x0]',
'source': fix_path('./tests/test_pytest.py:167'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParam::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam::test_param_13[x1]'),
'name': 'test_param_13[x1]',
'source': fix_path('./tests/test_pytest.py:167'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParam::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam::test_param_13[x2]'),
'name': 'test_param_13[x2]',
'source': fix_path('./tests/test_pytest.py:167'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParam::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13[x0]'),
'name': 'test_param_13[x0]',
'source': fix_path('./tests/test_pytest.py:175'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13[x1]'),
'name': 'test_param_13[x1]',
'source': fix_path('./tests/test_pytest.py:175'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13[x2]'),
'name': 'test_param_13[x2]',
'source': fix_path('./tests/test_pytest.py:175'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13[x0]'),
'name': 'test_spam_13[x0]',
'source': fix_path('./tests/test_pytest.py:178'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13[x1]'),
'name': 'test_spam_13[x1]',
'source': fix_path('./tests/test_pytest.py:178'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13[x2]'),
'name': 'test_spam_13[x2]',
'source': fix_path('./tests/test_pytest.py:178'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_fixture'),
'name': 'test_fixture',
'source': fix_path('./tests/test_pytest.py:192'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_mark_fixture'),
'name': 'test_mark_fixture',
'source': fix_path('./tests/test_pytest.py:196'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_fixture[x0]'),
'name': 'test_param_fixture[x0]',
'source': fix_path('./tests/test_pytest.py:201'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_fixture[x1]'),
'name': 'test_param_fixture[x1]',
'source': fix_path('./tests/test_pytest.py:201'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_fixture[x2]'),
'name': 'test_param_fixture[x2]',
'source': fix_path('./tests/test_pytest.py:201'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_mark_fixture[x0]'),
'name': 'test_param_mark_fixture[x0]',
'source': fix_path('./tests/test_pytest.py:207'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_mark_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_mark_fixture[x1]'),
'name': 'test_param_mark_fixture[x1]',
'source': fix_path('./tests/test_pytest.py:207'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_mark_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_mark_fixture[x2]'),
'name': 'test_param_mark_fixture[x2]',
'source': fix_path('./tests/test_pytest.py:207'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_mark_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_fixture_param[spam]'),
'name': 'test_fixture_param[spam]',
'source': fix_path('./tests/test_pytest.py:216'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_fixture_param'),
},
{'id': fix_path('./tests/test_pytest.py::test_fixture_param[eggs]'),
'name': 'test_fixture_param[eggs]',
'source': fix_path('./tests/test_pytest.py:216'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_fixture_param'),
},
######
{'id': fix_path('./tests/test_pytest_param.py::test_param_13[x0]'),
'name': 'test_param_13[x0]',
'source': fix_path('./tests/test_pytest_param.py:8'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::test_param_13[x1]'),
'name': 'test_param_13[x1]',
'source': fix_path('./tests/test_pytest_param.py:8'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::test_param_13[x2]'),
'name': 'test_param_13[x2]',
'source': fix_path('./tests/test_pytest_param.py:8'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13[x0]'),
'name': 'test_param_13[x0]',
'source': fix_path('./tests/test_pytest_param.py:14'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13[x1]'),
'name': 'test_param_13[x1]',
'source': fix_path('./tests/test_pytest_param.py:14'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13[x2]'),
'name': 'test_param_13[x2]',
'source': fix_path('./tests/test_pytest_param.py:14'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13[x0]'),
'name': 'test_spam_13[x0]',
'source': fix_path('./tests/test_pytest_param.py:17'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13[x1]'),
'name': 'test_spam_13[x1]',
'source': fix_path('./tests/test_pytest_param.py:17'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13[x2]'),
'name': 'test_spam_13[x2]',
'source': fix_path('./tests/test_pytest_param.py:17'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13'),
},
######
{'id': fix_path('./tests/test_unittest.py::MyTests::test_dynamic_'),
'name': 'test_dynamic_',
'source': fix_path('./tests/test_unittest.py:54'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_failure'),
'name': 'test_failure',
'source': fix_path('./tests/test_unittest.py:34'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_known_failure'),
'name': 'test_known_failure',
'source': fix_path('./tests/test_unittest.py:37'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_maybe_not_skipped'),
'name': 'test_maybe_not_skipped',
'source': fix_path('./tests/test_unittest.py:17'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_maybe_skipped'),
'name': 'test_maybe_skipped',
'source': fix_path('./tests/test_unittest.py:13'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_unittest.py:6'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_skipped'),
'name': 'test_skipped',
'source': fix_path('./tests/test_unittest.py:9'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_skipped_inside'),
'name': 'test_skipped_inside',
'source': fix_path('./tests/test_unittest.py:21'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_with_nested_subtests'),
'name': 'test_with_nested_subtests',
'source': fix_path('./tests/test_unittest.py:46'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_with_subtests'),
'name': 'test_with_subtests',
'source': fix_path('./tests/test_unittest.py:41'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::OtherTests::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_unittest.py:61'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::OtherTests'),
},
###########
{'id': fix_path('./tests/v/test_eggs.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/v/spam.py:2'),
'markers': [],
'parentid': fix_path('./tests/v/test_eggs.py'),
},
{'id': fix_path('./tests/v/test_eggs.py::TestSimple::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/v/spam.py:8'),
'markers': [],
'parentid': fix_path('./tests/v/test_eggs.py::TestSimple'),
},
######
{'id': fix_path('./tests/v/test_ham.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/v/spam.py:2'),
'markers': [],
'parentid': fix_path('./tests/v/test_ham.py'),
},
{'id': fix_path('./tests/v/test_ham.py::test_not_hard'),
'name': 'test_not_hard',
'source': fix_path('./tests/v/spam.py:2'),
'markers': [],
'parentid': fix_path('./tests/v/test_ham.py'),
},
######
{'id': fix_path('./tests/v/test_spam.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/v/spam.py:2'),
'markers': [],
'parentid': fix_path('./tests/v/test_spam.py'),
},
{'id': fix_path('./tests/v/test_spam.py::test_simpler'),
'name': 'test_simpler',
'source': fix_path('./tests/v/test_spam.py:4'),
'markers': [],
'parentid': fix_path('./tests/v/test_spam.py'),
},
###########
{'id': fix_path('./tests/w/test_spam.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/w/test_spam.py:4'),
'markers': [],
'parentid': fix_path('./tests/w/test_spam.py'),
},
{'id': fix_path('./tests/w/test_spam_ex.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/w/test_spam_ex.py:4'),
'markers': [],
'parentid': fix_path('./tests/w/test_spam_ex.py'),
},
###########
{'id': fix_path('./tests/x/y/z/test_ham.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/x/y/z/test_ham.py:2'),
'markers': [],
'parentid': fix_path('./tests/x/y/z/test_ham.py'),
},
######
{'id': fix_path('./tests/x/y/z/a/test_spam.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/x/y/z/a/test_spam.py:11'),
'markers': [],
'parentid': fix_path('./tests/x/y/z/a/test_spam.py'),
},
{'id': fix_path('./tests/x/y/z/b/test_spam.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/x/y/z/b/test_spam.py:7'),
'markers': [],
'parentid': fix_path('./tests/x/y/z/b/test_spam.py'),
},
],
}
| 1.914063 | 2 |
mmgp/kernels/wavelet_slice.py | axdahl/SC-MMGP | 0 | 5457 | '''
Wavelet kernel
slice allows kernel operation on feature subset
active_dims is iterable of feature dimensions to extract
input_dim must equal dimension defined by active_dims
'''
import numpy as np
import tensorflow as tf
from .. import util
from . import kernel
from .kernel_extras import *
class WaveletSlice(kernel.Kernel):
def __init__(self, input_dim, active_dims=None, shift=0, scale = 0.01,
white=0.01, input_scaling=False):
if input_scaling:
self.shift = tf.Variable(shift * tf.ones([input_dim]))
self.scale = tf.Variable(scale * tf.ones([input_dim]))
else:
self.shift = tf.Variable([shift], dtype=tf.float32)
self.scale = tf.Variable([scale], dtype=tf.float32)
self.input_dim = input_dim
self.active_dims = active_dims
self.white = white
def kernel(self, points1, points2=None):
if points2 is None:
points2 = points1
white_noise = (self.white * util.eye(tf.shape(points1)[0]) +
0.1 * self.white * tf.ones( [tf.shape(points1)[0], tf.shape(points1)[0]]))
else:
white_noise = 0.01 * self.white * tf.ones( [tf.shape(points1)[0], tf.shape(points2)[0]] )
points1, points2 = dim_slice(self, points1, points2)
def h(x):
# Zhang wavelet
#return tf.cos(1.75*x)*tf.exp(-0.5*x**2)
# mexican hat wavelet
return (1-x**2)*tf.exp(-0.5*x**2)
kern1, kern2 = h((points1 - self.shift)/tf.exp(self.scale)), h((points2 - self.shift)/tf.exp(self.scale))
kern1, kern2 = tf.reduce_prod(kern1, axis=1), tf.reduce_prod(kern2, axis=1)
kern = tf.einsum('i,j->ij', kern1, kern2)
return kern + white_noise
def diag_kernel(self, points):
def h(x):
# Zhang wavelet
return tf.cos(1.75*x)*tf.exp(-0.5*x**2)
# mexican hat wavelet
#return (1-x**2)*tf.exp(-0.5*x**2)
points = dim_slice_diag(self, points)
kern = tf.reduce_prod(h((points - self.shift)/tf.exp(self.scale)) , axis=1) **2
return kern + self.white
def get_params(self):
return [self.shift, self.scale]
| 2.6875 | 3 |
transform.py | latenite4/python3 | 0 | 5458 |
#!/usr/bin/python3
#program to parse png images and change images
# cmd: python3 transform.py
# you must have local input/ and output/ directories
#
# name: <NAME>
# date: 12/27/20
# cmdline: python transform.py cmd show image='city.png' --ulx=1 --uly=2 --brx=0 --bry=9
# python transform.py show city.png
# python transform.py blur city.png
from image import Image
import numpy as np
import time, os, argparse, string
#from tkinter import *
import imghdr
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
def adjust_brightness(image,factor):
#scale each value by some amount
x_pixels, y_pixels,num_channels = image.array.shape
new_im = Image(x_pixels=x_pixels,y_pixels=y_pixels,num_channels=num_channels)
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
new_im.array[x,y,c] = image.array[x,y,c] * factor #non vectorized version
#vectorized version
# new_im.array = image.array * factor -# this is faster
return new_im
#adjust the contrast by increasing difference from user
#defined midpoint
def adjust_contrast(image, factor, mid=0.5):
x_pixels, y_pixels,num_channels = image.array.shape
new_im = Image(x_pixels=x_pixels,y_pixels=y_pixels,num_channels=num_channels)
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
new_im.array[x,y,c] = (image.array[x,y,c] -mid)* factor + mid #non vectorized version
#vectorized version
# new_im.array = (image.array - mid) * factor + mid
return new_im
# blur and image
def blur(image, k_size):
#k_size is the number of pixels to use when doing the blur
#k_size=3 would be above and below and left neighbor, right neighbor pixels, and diagonal
#neighbor pixels.
im = Image(filename = image)
x_pixels, y_pixels,num_channels = im.array.shape
new_im = Image(x_pixels=x_pixels,y_pixels=y_pixels,num_channels=num_channels)
neighbor_range = k_size // 2
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
total = 0
for x_i in range(max(0,x-neighbor_range), min(new_im.x_pixels-1, x+neighbor_range)+1):
for y_i in range(max(0,y-neighbor_range), min(new_im.y_pixels-1, y+neighbor_range)+1):
total += image.array[x_i, y_i, c]
new_im.array[x,y,c] = total / (k_size **2) # average for kernel size in image
return new_im
def apply_kernel(image, kernel):
# the kernel should be a 2D array that represents the kernel we'll use!
# for the sake of simiplicity of this implementation, let's assume that the kernel is SQUARE
# for example the sobel x kernel (detecting horizontal edges) is as follows:
# [1 0 -1]
# [2 0 -2]
# [1 0 -1]
x_pixels, y_pixels, num_channels = image.array.shape # represents x, y pixels of image, # channels (R, G, B)
new_im = Image(x_pixels=x_pixels, y_pixels=y_pixels, num_channels=num_channels) # making a new array to copy values to!
neighbor_range = kernel.shape[0] // 2 # this is a variable that tells us how many neighbors we actually look at (ie for a 3x3 kernel, this value should be 1)
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
total = 0
for x_i in range(max(0,x-neighbor_range), min(new_im.x_pixels-1, x+neighbor_range)+1):
for y_i in range(max(0,y-neighbor_range), min(new_im.y_pixels-1, y+neighbor_range)+1):
x_k = x_i + neighbor_range - x
y_k = y_i + neighbor_range - y
kernel_val = kernel[x_k, y_k]
total += image.array[x_i, y_i, c] * kernel_val
new_im.array[x, y, c] = total
return new_im
def combine_images(image1, image2):
# let's combine two images using the squared sum of squares: value = sqrt(value_1**2, value_2**2)
# size of image1 and image2 MUST be the same
x_pixels, y_pixels, num_channels = image1.array.shape # represents x, y pixels of image, # channels (R, G, B)
new_im = Image(x_pixels=x_pixels, y_pixels=y_pixels, num_channels=num_channels) # making a new array to copy values to!
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
new_im.array[x, y, c] = (image1.array[x, y, c]**2 + image2.array[x, y, c]**2)**0.5
return new_im
def show_image(in_image):
path="input/"
img = mpimg.imread(path+in_image)
imgplot = plt.imshow(img)
plt.show()
# check for necessary parts of the runtime environment
def check_env( in_image):
#check to verify that output/input dirs exist:
path = './output/'
is_path = os.path.isdir(path)
if not is_path:
print('local ./output dir must exist, cannot continue...')
print(quit)
quit()
#verify output is writeable
is_w = os.access(path, os.W_OK)
if not is_w:
print('local ./output dir must be writeable, cannot continue...')
print(quit)
quit()
path = './input/'
is_path = os.path.isdir(path)
if not is_path:
print('local ./input dir must exist, cannot continue...')
print(quit)
quit()
#verify input image
if in_image:
thefile = 'input/'+in_image
print('file path: '+thefile)
is_file = os.path.isfile(thefile)
if not is_file:
print(f'local ./input file {in_image} must exist, cannot continue...')
print(quit)
quit()
if imghdr.what(thefile) != 'png':
print('wrong image file type, cannot continue...')
print(quit)
quit()
def cmd():
print("routine cmd")
# setup command line args and parms
# optional args have --
# fixed (required args do not have --)
def arg_init():
parser = argparse.ArgumentParser(description='Process an image.')
parser.add_argument("cmd",help="command to this program",type=str)
parser.add_argument("image",help="input image name for the command",type=str)
parser.add_argument("--ulx",action='store_true',help="upperleft x in image")
parser.add_argument("--uly",action='store_true',help="upperleft y in image")
parser.add_argument("--brx",action='store_true',help="bottomright x in image")
parser.add_argument("--bry",action='store_true',help="bottomright y in image")
group = parser.add_mutually_exclusive_group()
group.add_argument('--v', action='store_true',help="add more text output")
group.add_argument('--q', action='store_true',help="minimal output")
args = parser.parse_args()
print(args.image)
#if args.cmd != "show" and args.cmd != "blur":
return args
#def show_image(filename):
if __name__ == '__main__':
args = arg_init()
check_env(args.image)
lake = Image(filename = 'lake.png')
city = Image(filename='city.png')
start_time = time.time()
# brightened_im = adjust_brightness(lake, 1.7)
# brightened_im.write_image('brightened.png')
# darkened_im = adjust_brightness(lake, 0.3)
# darkened_im.write_image('darkened.png')
# incr_contrast = adjust_contrast(lake, 2,0.5)
# incr_contrast.write_image('incr_contrast.png')
# decr_contrast = adjust_contrast(lake, 0.5,0.5)
# decr_contrast.write_image('decr_contrast.png')
# blur_3 = blur(city,3)
# blur_3.write_image('blur_k3.png')
# blur_15 = blur(city,15)
# blur_15.write_image('blur_k15.png')
# let's apply a sobel kernel on the x and y axis
# sobel_x = apply_kernel(city, np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]))
# sobel_x.write_image('edge_x.png')
# sobel_y = apply_kernel(city, np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]]))
# sobel_y.write_image('edge_y.png')
# # this will show x and y edges
# sobel_xy = combine_images(sobel_x, sobel_y)
# sobel_xy.write_image('edge_xy.png')
if args.cmd == "show" and args.image:
show_image(args.image)
if args.cmd == "blur" and args.image:
blur_15 = blur(args.image,15)
blur_15.write_image(args.image+'blur_k15.png')
show_image(blur_k15.png)
if args.v:
print(f'total execution duration: {time.time() - start_time}s')
| 3.5 | 4 |
plugins/wyr.py | Jeglet/pcbot | 0 | 5459 | <gh_stars>0
""" Would you rather? This plugin includes would you rather functionality
"""
import asyncio
import random
import re
import discord
import bot
import plugins
from pcbot import Config
client = plugins.client # type: bot.Client
db = Config("would-you-rather", data=dict(timeout=10, responses=["**{name}** would **{choice}**!"], questions=[]),
pretty=True)
command_pattern = re.compile(r"(.+)(?:\s+or|\s*,)\s+([^?]+)\?*")
sessions = set() # All running would you rather's are in this set
@plugins.argument("{open}option ...{close} or/, {open}other option ...{close}[?]", allow_spaces=True)
async def options(arg):
""" Command argument for receiving two options. """
match = command_pattern.match(arg)
assert match
assert not match.group(1).lower() == match.group(2).lower(), "**The choices cannot be the same.**"
return match.group(1), match.group(2)
def get_choice(choices: list, choice: str):
""" Get the chosen option. This accept 1 and 2 as numbers. """
if choice == "1":
return 0
if choice == "2":
return 1
choices = list(map(str.lower, choices))
words = list(map(str.split, choices))
# Go through all words in the given message, and find any words unique to a choice
for word in choice.lower().split():
if word in words[0] and word not in words[1]:
return 0
elif word in words[1] and word not in words[0]:
return 1
# Invalid choice
return None
@plugins.command(aliases="wyr rather either")
async def wouldyourather(message: discord.Message, opt: options = None):
""" Ask the bot if he would rather, or have the bot ask you.
**Examples:**
Registering a choice: `!wouldyourather lie or be lied to`
Asking the bot: `!wouldyourather`"""
# If there are no options, the bot will ask the questions (if there are any to choose from)
if opt is None:
assert message.channel.id not in sessions, "**A would you rather session is already in progress.**"
sessions.add(message.channel.id)
assert db.data["questions"], "**There are ZERO questions saved. Ask me one!**"
question = random.choice(db.data["questions"])
choices = question["choices"]
await client.say(message, "Would you rather **{}** or **{}**?".format(*choices))
timeout = db.data["timeout"]
replied = []
# Wait for replies from anyone in the channel
while True:
def check(m):
return m.channel == message.channel and m.author not in replied
try:
reply = await client.wait_for_message(timeout=timeout, check=check)
# Break on timeout
except asyncio.TimeoutError:
break
# Check if the choice is valid
choice = get_choice(choices, reply.content)
if choice is None:
continue
# Register that this author has replied
replied.append(reply.author)
# Update the answers in the DB
# We don't care about multiples, just the amount (yes it will probably be biased)
question["answers"][choice] += 1
name = reply.author.display_name
response = random.choice(db.data["responses"]).format(name=name, NAME=name.upper(),
choice=choices[choice])
await client.say(message, response)
# Say the total tallies
await client.say(message, "A total of {0} would **{2}**, while {1} would **{3}**!".format(
*question["answers"], *choices))
await db.asyncsave()
sessions.remove(message.channel.id)
# Otherwise, the member asked a question to the bot
else:
db.data["questions"].append(dict(
choices=list(opt),
answers=[0, 0]
))
await db.asyncsave()
answer = random.choice(opt)
await client.say(message, "**I would {}**!".format(answer))
@wouldyourather.command(aliases="delete", owner=True)
async def remove(message: discord.Message, opt: options):
""" Remove a wouldyourather question with the given options. """
for q in db.data["questions"]:
if q["choices"][0] == opt[0] and q["choices"][1] == opt[1]:
db.data["questions"].remove(q)
await db.asyncsave()
await client.say(message, "**Entry removed.**")
break
else:
await client.say(message, "**Could not find the question.**")
| 3.265625 | 3 |
suit_tool/argparser.py | bergzand/suit-manifest-generator | 16 | 5460 | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright 2019-2020 ARM Limited or its affiliates
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import sys, argparse, os
from suit_tool import __version__
from suit_tool import keygen
from suit_tool import get_pubkey
import json
import re
def str_to_component(s):
types = {
'file' : ('file', lambda x : str(x.strip('"'))),
# 'desc' : ('component-description', lambda x : str(x.strip('"'))),
'inst' : ('install-id', lambda x : [ str(y) for y in eval(x) ]),
'uri' : ('uri', lambda x : str(x.strip('"')))
}
d = {types[k][0]:types[k][1](v) for k,v in [ re.split(r'=',e, maxsplit=1) for e in re.split(r''',\s*(?=["']?[a-zA-Z0-9_-]+["']?=)''', s)]}
return d
class MainArgumentParser(object):
def __init__(self):
self.parser = self._make_parser()
def _make_parser(self):
parser = argparse.ArgumentParser(description = 'Create or transform a manifest.'
' Use {} [command] -h for help on each command.'.format(sys.argv[0]))
# Add all top-level commands
parser.add_argument('-l', '--log-level', choices=['debug','info','warning','exception'], default='info',
help='Set the verbosity level of console output.')
parser.add_argument('--version', action='version', version=__version__,
help='display the version'
)
subparsers = parser.add_subparsers(dest="action")
subparsers.required = True
create_parser = subparsers.add_parser('create', help='Create a new manifest')
# create_parser.add_argument('-v', '--manifest-version', choices=['1'], default='1')
create_parser.add_argument('-i', '--input-file', metavar='FILE', type=argparse.FileType('r'),
help='An input file describing the update. The file must be formated as JSON. The overal structure is described in README.')
create_parser.add_argument('-o', '--output-file', metavar='FILE', type=argparse.FileType('wb'), required=True)
create_parser.add_argument('-f', '--format', metavar='FMT', choices=['suit', 'suit-debug', 'json'], default='suit')
create_parser.add_argument('-s', '--severable', action='store_true', help='Convert large elements to severable fields.')
create_parser.add_argument('-c', '--add-component', action='append', type=str_to_component, dest='components', default=[])
sign_parser = subparsers.add_parser('sign', help='Sign a manifest')
sign_parser.add_argument('-m', '--manifest', metavar='FILE', type=argparse.FileType('rb'), required=True)
sign_parser.add_argument('-k', '--private-key', metavar='FILE', type=argparse.FileType('rb'), required=True)
sign_parser.add_argument('-i', '--key-id', metavar='ID', type=str)
sign_parser.add_argument('-o', '--output-file', metavar='FILE', type=argparse.FileType('wb'), required=True)
parse_parser = subparsers.add_parser('parse', help='Parse a manifest')
parse_parser.add_argument('-m', '--manifest', metavar='FILE', type=argparse.FileType('rb'), required=True)
parse_parser.add_argument('-j', '--json-output', default=False, action='store_true', dest='json')
get_pubkey_parser = subparsers.add_parser('pubkey', help='Get the public key for a supplied private key.')
get_pubkey_parser.add_argument('-k', '--private-key', metavar='FILE', type=argparse.FileType('rb'), required=True)
get_pubkey_parser.add_argument('-f', '--output-format', choices=get_pubkey.OutputFormaters.keys(), default='pem')
get_pubkey_parser.add_argument('-o', '--output-file', metavar='FILE', type=argparse.FileType('wb'), default=sys.stdout)
keygen_parser = subparsers.add_parser('keygen', help='Create a signing key. Not for production use')
keygen_parser.add_argument('-t', '--type', choices=keygen.KeyGenerators.keys(),
default='secp256r1', help='The type of the key to generate')
keygen_parser.add_argument('-o', '--output-file', metavar='FILE', type=argparse.FileType('wb'), default=sys.stdout)
keygen_parser.add_argument('-f', '--output-format', choices=keygen.OutputFormaters.keys(), default='pem')
keygen_parser.add_argument('-l', '--levels', help='The number of hss-lms levels', type=int, default=2)
sever_parser = subparsers.add_parser('sever', help='Remove one or more severable elements from the manifest, if present.')
sever_parser.add_argument('-m', '--manifest', metavar='FILE', type=argparse.FileType('rb'), required=True)
sever_parser.add_argument('-o', '--output-file', metavar='FILE', type=argparse.FileType('wb'), required=True)
sever_parser.add_argument('-e', '--element', action='append', type=str, dest='elements', default=[])
sever_parser.add_argument('-a', '--all', action='store_true', default=False)
return parser
def parse_args(self, args=None):
self.options = self.parser.parse_args(args)
return self
| 2.015625 | 2 |
python/process/process_pool.py | y2ghost/study | 0 | 5461 | <reponame>y2ghost/study
import random
import time
from multiprocessing import Pool
def worker(name: str) -> None:
print(f'Started worker {name}')
worker_time = random.choice(range(1, 5))
time.sleep(worker_time)
print(f'{name} worker finished in {worker_time} seconds')
if __name__ == '__main__':
process_names = [f'computer_{i}' for i in range(15)]
pool = Pool(processes=5)
pool.map(worker, process_names)
# pool.terminate()
| 3.375 | 3 |
Project/Support-NotSourced/generic_pydicom_ns.py | mazalgarab-git/OSICpypy | 1 | 5462 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 7 11:48:59 2020
@author: mazal
"""
"""
=========================================
Support functions of pydicom (Not sourced)
=========================================
Purpose: Create support functions for the pydicom project
"""
"""
Test mode 1 | Basics
testMode = True
reportMode = False
Test mode 2 | Function Report
testMode = False
reportMode = True
Commisionning mode
testMode = False
reportMode = False
"""
testMode = False
reportMode = False
"""
=========================================
Function 1: Aleatory Sampling
=========================================
Purpose: Build an aleatory sample given a train dataset of Kaggle for competition and a sample size
Raw code reference (see Tester.py): Test 5
"""
def trainDatasetSampler(samplingSize,testMode,reportMode):
# Set sampling size (% of the train population)
samplingSize = 5
# Build a Sampling dataset | Phase 1: Determine: (1) the source path of the train data; (2) the location path of the sampling
import os
import pandas as pd
path_source = 'Y:/Kaggle_OSIC/2-Data/train/'
path_source_test = 'Y:/Kaggle_OSIC/2-Data/test/'
path_destination = 'Y:/Kaggle_OSIC/4-Data (Sampling)/train/'
path_destination_test = 'Y:/Kaggle_OSIC/4-Data (Sampling)/test/'
path_destination_outcome = 'Y:/Kaggle_OSIC/4-Data (Sampling)/outcome/'
# Build a Sampling dataset | Phase 2: Build dataset using the following features from train data: (1) ID; (2) # of DICOM files per ID (including percentage).
## Improvement: (3) # of other registers (not related to DICOM files)
os.chdir(path_source)
ID_list = os.listdir(path_source)
ID_list_range = len(ID_list)
DICOMFile_list = []
DICOMFileNumber_list = []
for i in range(0,ID_list_range):
path_ID = path_source + ID_list[i] + '/'
DICOMFile_list_unitary = os.listdir(path_ID)
DICOMFile_list = DICOMFile_list + [DICOMFile_list_unitary]
DICOMFileNumber_list_unitary = len(DICOMFile_list_unitary)
DICOMFileNumber_list = DICOMFileNumber_list + [DICOMFileNumber_list_unitary]
Population_Dictionary = {'ID':ID_list,'NumberDicomFiles':DICOMFileNumber_list,'DicomFIles':DICOMFile_list}
Population_DataFrame = pd.DataFrame(data = Population_Dictionary)
DICOMFilePercentage_list = []
TotalNumberDicomFiles = sum(Population_DataFrame.NumberDicomFiles)
for j in range(0,ID_list_range):
Percentage = Population_DataFrame['NumberDicomFiles'][j] / TotalNumberDicomFiles * 100
Percentage = round(Percentage,6)
DICOMFilePercentage_list = DICOMFilePercentage_list + [Percentage]
Population_Percentage_Dictionary = {'Percentage':DICOMFilePercentage_list}
Population_Percentage_DataFrame = pd.DataFrame(data=Population_Percentage_Dictionary)
Population_DataFrame = pd.concat([Population_DataFrame, Population_Percentage_DataFrame],axis=1, sort=False)
filename_population = 'populationDataset.csv'
path_population = path_destination_outcome
Population_DataFrame.to_csv(path_population+filename_population)
# Build a Sampling dataset | Phase 3: Get an aleatory grouping of IDs (just tags)
import random
Population_DataFrame_IndexToSample=[]
Population_DataFrame_IDToSample=[]
Population_DataFrame_PercentageToSample=[]
samplingSizeGoal = 0
while (samplingSizeGoal <= samplingSize):
randomNumberTermination = len(Population_DataFrame.ID)
randomNumber = random.randrange(0,randomNumberTermination,1)
if (randomNumber not in Population_DataFrame_IndexToSample):
Population_DataFrame_IndexToSample = Population_DataFrame_IndexToSample + [randomNumber]
ID_unitary = Population_DataFrame.ID[randomNumber]
Population_DataFrame_IDToSample = Population_DataFrame_IDToSample + [ID_unitary]
Percentage_unitary = Population_DataFrame.Percentage[randomNumber]
Population_DataFrame_PercentageToSample = Population_DataFrame_PercentageToSample + [Percentage_unitary]
samplingSize_unitary = Population_DataFrame.Percentage[randomNumber]
samplingSizeGoal = samplingSizeGoal + samplingSize_unitary
samplingDataset_Dictionary = {'Index':Population_DataFrame_IndexToSample,'ID':Population_DataFrame_IDToSample,'Percentage':Population_DataFrame_PercentageToSample}
samplingDataset_DataFrame = pd.DataFrame(data=samplingDataset_Dictionary)
filename_sampling = 'samplingDataset.csv'
path_sampling = path_destination_outcome
samplingDataset_DataFrame.to_csv(path_sampling+filename_sampling)
# Build a Sampling dataset | Phase 3: Get train dataset (an aleatory grouping of IDs; tree-copy task)
from distutils.dir_util import create_tree
from distutils.dir_util import remove_tree
from distutils.dir_util import copy_tree
remove_tree(path_destination)
create_tree(path_destination,[])
if testMode == True:
print("=========================================")
print("Building the Sampling Dataset given the Train Dataset of Kaggle for competition")
print("=========================================")
for k in Population_DataFrame_IDToSample:
path_source_unitary = path_source + k + '/'
path_destination_unitary = path_destination + k + '/'
create_tree(path_destination_unitary,[])
copy_tree(path_source_unitary,path_destination_unitary)
if testMode == True: print("ID tree copied: ",k)
# Build a Sampling dataset | Phase 4: Get test dataset (tree-copy task)
## Assumption: The complete test dataset is copied.
from distutils.dir_util import create_tree
from distutils.dir_util import remove_tree
from distutils.dir_util import copy_tree
remove_tree(path_destination_test)
create_tree(path_destination_test,[])
if testMode == True:
print("=========================================")
print("Building the Test Dataset given the Test Dataset of Kaggle for competition")
print("=========================================")
IDList_test = os.listdir(path_source_test)
for l in IDList_test:
path_source_unitary = path_source + l + '/'
path_destination_unitary = path_destination_test + l + '/'
create_tree(path_destination_unitary,[])
copy_tree(path_source_unitary,path_destination_unitary)
if testMode == True: print("ID tree copied: ",l)
if (testMode == False and reportMode == True):
from datetime import date
reportDate = date.today()
print("=========================================")
print("Function Report | Date:",reportDate.year,'/',reportDate.month,'/',reportDate.day,'/' )
print("=========================================")
print("Function: trainDatasetSampler(samplingSize,testMode)")
print("=========================================")
print("(1) Inputs")
print("=========================================")
print("-Sampling Size :", samplingSize, "%")
print("-Test Mode : False")
print("=========================================")
print("(2) Outputs")
print("=========================================")
print("-Type of sample: Aleatory based on IDs")
print("-Train dataset percentage to sample (base): ", round(abs(samplingSize),6),"%")
print("-Train dataset percentage to sample (adjustment): ", round(abs(samplingSizeGoal-samplingSize),6),"%")
print("-Train dataset percentage to sample (fitted): ", round(samplingSizeGoal,6),"%")
print("-Population of Train dataset (just information) available in file: ", filename_population)
print("-Sample of Train dataset (just information) available in file: ", filename_sampling)
print("=========================================")
print("(2) Outcomes:")
print("=========================================")
print("Being the outcome expressed under the variable result, outcomes are as follows:")
print("result[0] -> Dataframe for Population")
print("result[1] -> Dataframe for Sample")
print("result[2] -> Test Mode")
print("result[3] -> Rerport Mode")
print("=========================================")
return Population_DataFrame, samplingDataset_DataFrame, testMode, reportMode
if testMode == True:
samplingSize = 5
resultFunction1 = trainDatasetSampler(samplingSize,testMode,reportMode)
print("=========================================")
print("Population dataset:")
print("=========================================")
print(resultFunction1[0])
print("=========================================")
print("Population dataset:")
print("=========================================")
print(resultFunction1[1])
print("=========================================")
print("Test result Function 1: Success")
print("=========================================")
"""
=========================================
Function 2: Submission Builder
=========================================
Purpose: Build a submission CSV file
Raw code reference (see Tester.py): Test 8
"""
def SubmissionBuilder(ProductType,filename,testMode):
import os
import pandas as pd
# Set ProductType
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
# Set productType and splitType
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
# Set outcome
path_outcome = path_ProductType + 'outcome/'
# Get raw data as a DataFrame
os.chdir(path_outcome)
rawFile_DataFrame = pd.read_csv('submissionRawFile_2020_09_19.csv')
# Get submission file template as a DataFrame
os.chdir(path_ProductType)
submissionFile_DataFrame = pd.read_csv('sample_submission.csv')
# Get submission data as required in submission file
submissionNumber_range = len(rawFile_DataFrame.index)
IDcases_List = submissionFile_DataFrame.Patient_Week.copy()
IDcases_List = IDcases_List[0:5]
IDcases_List_range = len(IDcases_List)
for i in range (0,IDcases_List_range):
IDcases_List[i] = IDcases_List[i][:-4]
# Get submission data as required in submission file | FVC
FVCDataList = []
for k in range(0,submissionNumber_range):
for j in IDcases_List:
# Get datum in raw data
IDlabel_rawFile = str(j)+str('_FVC')
datum = rawFile_DataFrame[IDlabel_rawFile][k]
datum = round(datum,0)
# Set datum in submission file
FVCDataList = FVCDataList + [datum]
submissionFile_DataFrame['FVC'] = FVCDataList
# Get submission data as required in submission file | Confidence
CONDataList = []
for k in range(0,submissionNumber_range):
for j in IDcases_List:
# Get datum in raw data
IDlabel_rawFile = str(j)+str('_CON')
datum = rawFile_DataFrame[IDlabel_rawFile][k]
datum = round(datum,0)
# Set datum in submission file
CONDataList = CONDataList + [datum]
submissionFile_DataFrame['Confidence'] = CONDataList
# Save file | Get directory
path_destination = path_outcome+'submissions/'
try:
os.chdir(path_destination)
GetCreation = True
except FileNotFoundError:
GetCreation = False
if GetCreation == False:
from distutils.dir_util import mkpath
mkpath(path_destination)
os.chdir(path_destination)
submissionList = os.listdir(path_destination)
number = len(submissionList)
filename = 'submission_'+str(number+1)+'.csv'
submissionFile_DataFrame.to_csv(filename, index=False)
return submissionFile_DataFrame, filename, testMode
if testMode == True:
ProductType = 'population'
filename = 'submissionRawFile_2020_09_19.csv'
resultFunction2 = SubmissionBuilder(ProductType,filename,testMode)
print("=========================================")
print("Product Type:")
print("=========================================")
print(ProductType)
print("=========================================")
print("Submission File saved as:")
print("=========================================")
print(resultFunction2[1])
print("=========================================")
print("Test result Function 2: Success")
print("=========================================")
"""
=========================================
Function 3: Dataset builder (Stacking solution case) to process with ML models
=========================================
Purpose: Build an input dataset to be processed with an stacking solution
Raw code reference (see Tester.py): Test 15
"""
def stacking_Dataset_Builder(ProductType, PydicomMode, reportMode, testMode):
# Set Product Type and its corresponding path
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
# Set working directory
import os
os.chdir(path_ProductType)
# Get train dataset and test dataset
import pandas as pd
filename_trainDataset = 'train.csv'
train_dataset = pd.read_csv(path_ProductType+filename_trainDataset)
filename_testDataset = 'test.csv'
test_dataset = pd.read_csv(path_ProductType+filename_testDataset)
# Get submission dataset (template)
import numpy as np
path_resources = 'Y:/Kaggle_OSIC/3-Data (Prototype)/resources/'
if (PydicomMode == False):
filename_submissionDataset = 'submissionInputDataset.csv'
else:
filename_submissionDataset = 'submissionInputDataset_pydicom.csv'
submission_dataset = pd.read_csv(path_resources+filename_submissionDataset)
submission_dataset = submission_dataset.replace(np.nan,'iNaN')
# Adjust train dataset | Phase 1: Get ID list of the test dataset
IDList = list(test_dataset.Patient)
# Adjust train dataset | Phase 2: Get submission instances from train dataset
instancesPopulation = len(train_dataset.Patient)
indexList = []
for i in IDList:
for j in range(0,instancesPopulation):
if i == train_dataset.Patient[j]:
indexToInclude = train_dataset.index[j]
indexList = indexList + [indexToInclude]
# Adjust train dataset | Phase 3: Create an adjusted train dataset | a. Remove test instances from train dataset and reset index
train_dataset_adjusted = train_dataset.drop(indexList)
train_dataset_adjusted.reset_index
# Adjust train dataset | Phase 3: Create an adjusted train dataset | b. Get Transferring data from train dataset
instanceToTrasferList_index = []
for k in range(0,instancesPopulation):
for l in IDList:
if train_dataset.Patient[k] == l:
instanceToTransfer_Index = train_dataset.index[k]
instanceToTrasferList_index = instanceToTrasferList_index + [instanceToTransfer_Index]
train_dataset_instancesToTransfer = train_dataset.take(instanceToTrasferList_index)
train_dataset_instancesToTransfer.index
train_dataset_instancesToTransfer = train_dataset_instancesToTransfer.reset_index()
train_dataset_instancesToTransfer.drop(columns='index')
# Adjust train dataset | Phase 3: Create an adjusted train dataset | c. Update the submission dataset with the transferring data in b.
submission_dataset_range = len(submission_dataset.Patient)
train_dataset_instancesToTransfer_range = len(train_dataset_instancesToTransfer.Patient)
Patient_List = []
Week_List = []
FVC_List = []
Percent_List = []
Age_List = []
Sex_List = []
SmokingStatus_List = []
for m in range (0,submission_dataset_range):
timesCopy = 0
if(submission_dataset.Patient[m] in IDList):
referenceWeek = submission_dataset.Weeks[m]
for n in range (0,train_dataset_instancesToTransfer_range):
if(train_dataset_instancesToTransfer.Patient[n] == submission_dataset.Patient[m] and train_dataset_instancesToTransfer.Weeks[n] == referenceWeek):
if (timesCopy == 0):
submission_dataset.FVC[m] = train_dataset_instancesToTransfer.FVC[n]
submission_dataset.Percent[m] = train_dataset_instancesToTransfer.Percent[n]
submission_dataset.Age[m] = train_dataset_instancesToTransfer.Age[n]
submission_dataset.Sex[m] = train_dataset_instancesToTransfer.Sex[n]
submission_dataset.SmokingStatus[m] = train_dataset_instancesToTransfer.SmokingStatus[n]
timesCopy = timesCopy + 1
else:
# Additional instances to include
Patient_List = Patient_List + [train_dataset_instancesToTransfer.Patient[n]]
Week_List = Week_List + [train_dataset_instancesToTransfer.Weeks[n]]
FVC_List = FVC_List + [train_dataset_instancesToTransfer.FVC[n]]
Percent_List = Percent_List + [train_dataset_instancesToTransfer.Percent[n]]
Age_List = Age_List + [train_dataset_instancesToTransfer.Age[n]]
Sex_List = Sex_List + [train_dataset_instancesToTransfer.Sex[n]]
SmokingStatus_List = SmokingStatus_List + [train_dataset_instancesToTransfer.SmokingStatus[n]]
# Adjust train dataset | Phase 3: Create an adjusted train dataset | d. Add common values to submission dataset given those from the test dataset (Features: Age, Sex, SmokingStatus)
submission_dataset_range = len(submission_dataset.Patient)
for o in range(0,submission_dataset_range):
if(submission_dataset.Patient[o] in IDList):
for p in range(0,train_dataset_instancesToTransfer_range):
if(submission_dataset.Patient[o] == train_dataset_instancesToTransfer.Patient[p]):
submission_dataset.Age[o] = train_dataset_instancesToTransfer.Age[p]
submission_dataset.Sex[o] = train_dataset_instancesToTransfer.Sex[p]
submission_dataset.SmokingStatus[o] = train_dataset_instancesToTransfer.SmokingStatus[p]
# Scenario to replace NaN values: Average FVC for a given Patient
averageFVC = train_dataset_instancesToTransfer.FVC[train_dataset_instancesToTransfer.Patient == train_dataset_instancesToTransfer.Patient[p]].mean()
submission_dataset.FVC[o] = averageFVC
# Adjust train dataset | Phase 4: Create an adjusted train dataset | e. Concatenate the submission dataset (and additional instance) and the adjusted train dataset
additionalDictionary = {submission_dataset.columns[0]:Patient_List,
submission_dataset.columns[1]:Week_List,
submission_dataset.columns[2]:FVC_List,
submission_dataset.columns[3]:Percent_List,
submission_dataset.columns[4]:Age_List,
submission_dataset.columns[5]:Sex_List,
submission_dataset.columns[6]:SmokingStatus_List}
additional_dataset = pd.DataFrame(data=additionalDictionary)
frames = [train_dataset_adjusted,submission_dataset,additional_dataset]
train_dataset_adjusted = pd.concat(frames)
train_dataset_adjusted = train_dataset_adjusted.reset_index()
train_dataset_adjusted = train_dataset_adjusted.drop(columns='index')
# Adjust train dataset with pydicom train dataset) | Phase 1: Get pydicom train dataset
if(PydicomMode == True):
filename_pydicom = 'train_pydicom.csv'
path_ProductType_pydicom = path_ProductType + 'outcome/'
train_dataset_pydicom = pd.read_csv(path_ProductType_pydicom + filename_pydicom)
# Adjust train dataset with pydicom train dataset) | Phase 2: Include values from train_adjusted_pydicom.py into adjusted train dataset
if(PydicomMode == True):
instancesToInclude_List = list(train_dataset_pydicom.Patient)
InstanceToInclude_Patient = i
newIndex = len(train_dataset_adjusted.Patient)
for i in instancesToInclude_List:
# Get instance to transfer
InstanceToInclude_Patient = i
InstanceToInclude_Week = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].Weeks)[0]
InstanceToInclude_indexType1_Exhalation = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].indexType1_Exhalation)[0]
InstanceToInclude_indexType1_Inhalation = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].indexType1_Inhalation)[0]
InstanceToInclude_ImageType = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].ImageType)[0]
# Put instance into train_dataset_adjusted DataFrame
if (0 in list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Weeks)):
# Get index
indexToComplete = list(train_dataset_adjusted[train_dataset_adjusted.Weeks == 0].Patient[train_dataset_adjusted.Patient == i].index)
# Complete instance
train_dataset_adjusted.indexType1_Exhalation[indexToComplete] = InstanceToInclude_indexType1_Exhalation
train_dataset_adjusted.indexType1_Inhalation[indexToComplete] = InstanceToInclude_indexType1_Inhalation
train_dataset_adjusted.ImageType[indexToComplete] = str(InstanceToInclude_ImageType)
else:
# Add new instance
## Get repeatable instances
repeatableInstance1 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].FVC)[0]
repeatableInstance2 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Percent)[0]
repeatableInstance3 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Age)[0]
repeatableInstance4 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Sex)[0]
repeatableInstance5 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].SmokingStatus)[0]
## Get Dictionary
DictionaryToInclude = {}
DictionaryToInclude['Patient'] = InstanceToInclude_Patient
DictionaryToInclude['Weeks'] = InstanceToInclude_Week
DictionaryToInclude['FVC'] = repeatableInstance1
DictionaryToInclude['Percent'] = repeatableInstance2
DictionaryToInclude['Age'] = repeatableInstance3
DictionaryToInclude['Sex'] = repeatableInstance4
DictionaryToInclude['SmokingStatus'] = repeatableInstance5
DictionaryToInclude['indexType1_Exhalation'] = InstanceToInclude_indexType1_Exhalation
DictionaryToInclude['indexType1_Inhalation'] = InstanceToInclude_indexType1_Inhalation
DictionaryToInclude['ImageType'] = str(InstanceToInclude_ImageType)
## Get DataFrame
DataFrameToInclude = pd.DataFrame(data = DictionaryToInclude, index=[newIndex])
newIndex = newIndex + 1
## Concatenate DataFrame
train_dataset_adjusted = pd.concat([train_dataset_adjusted, DataFrameToInclude])
# nan filling
train_dataset_adjusted = train_dataset_adjusted.replace('iNaN',np.nan)
# Specifying dtype
train_dataset_adjusted.astype({'Patient': 'O'}).dtypes
train_dataset_adjusted.astype({'Weeks': 'float64'}).dtypes
train_dataset_adjusted.astype({'Percent': 'float64'}).dtypes
train_dataset_adjusted.astype({'Age': 'float64'}).dtypes
train_dataset_adjusted.astype({'Sex': 'O'}).dtypes
train_dataset_adjusted.astype({'SmokingStatus': 'O'}).dtypes
train_dataset_adjusted.astype({'FVC': 'float64'}).dtypes
if(PydicomMode == True):
train_dataset_adjusted.astype({'indexType1_Exhalation': 'float64'}).dtypes
train_dataset_adjusted.astype({'indexType1_Inhalation': 'float64'}).dtypes
train_dataset_adjusted.astype({'ImageType': 'O'}).dtypes
# Get CSV file
path_output = path_ProductType +'outcome/'
if(PydicomMode == False):
filename_output = 'train_adjusted.csv'
else:
filename_output = 'train_adjusted_pydicom.csv'
train_dataset_adjusted.to_csv(path_output+filename_output)
# Function Result
resultFunction = train_dataset_adjusted,path_output,filename_output
# Report Mode
if reportMode == True:
print("=========================================")
print("Function Report")
print("=========================================")
print("DataFrame")
print("=========================================")
print(resultFunction[0])
print("=========================================")
print("Product Type: ", ProductType)
print("=========================================")
print("Pydicom Mode: ", PydicomMode)
print("=========================================")
print("Location of Input File:", resultFunction[1])
print("=========================================")
print("Input File saved as:", resultFunction[2])
print("=========================================")
print("Data type of the dataset")
print("=========================================")
print(resultFunction[0].dtypes)
print("=========================================")
print("Test result Function 3: Success")
print("=========================================")
return resultFunction
if testMode == True:
ProductType = 'prototype'
PydicomMode = True
reportMode = False
resultFunction3 = stacking_Dataset_Builder(ProductType, PydicomMode, reportMode, testMode)
print("=========================================")
print("Function Report")
print("=========================================")
print("DataFrame")
print("=========================================")
print(resultFunction3[0])
print("=========================================")
print("=========================================")
print("Product Type: ", ProductType)
print("=========================================")
print("Pydicom Mode: ", PydicomMode)
print("=========================================")
print("Location of Input File:", resultFunction3[1])
print("=========================================")
print("Input File saved as:", resultFunction3[2])
print("=========================================")
print("Data type of the dataset")
print("=========================================")
print(resultFunction3[0].dtypes)
print("=========================================")
print("Test result Function 3: Success")
print("=========================================")
"""
=========================================
Function 4: Submission dataset builder (Stacking solution case) after ML outcome
=========================================
Purpose: Build a submission CSV file (Stacking solution case)
Raw code reference (see Tester.py): Test 17
About the Shape Parameter: It amounts to c = 0.12607421874999922 for every instance in the oject of concern. c value has been computed
deeming the following data fitting scope: (1) Data: FVC predictions; (2) Probability density function as follows (staistical function
in scipy renowend as scipy.stats.loglaplace): loglaplace.pdf(x, c, loc=0, scale=1).
"""
def Stacking_Submission_Dataset_Builder(ProductType,shapeParameter_DataFrame,pydicomMode,testMode):
# Set Product Type and its corresponding path
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
# Set working directory
import os
os.chdir(path_ProductType + 'outcome/')
# Get result data and test dataset
import pandas as pd
if(pydicomMode == True):
filename_resultDataset = 'result_pydicom.csv'
else:
filename_resultDataset = 'result.csv'
result_dataset = pd.read_csv(path_ProductType+'outcome/'+filename_resultDataset)
filename_testDataset = 'test.csv'
test_dataset = pd.read_csv(path_ProductType+filename_testDataset)
# Get submission instances | Phase 1: Index
IDList = list(test_dataset.Patient)
IDList_index_dictionary = {}
for i in IDList:
itemToInclude = result_dataset.Patient[result_dataset.Patient==i].index
IDList_index_dictionary[i] = itemToInclude
# Get submission instances | Phase 2: Extract submission instances from result dataset
IDList_index = []
IDList_columns = ['Patient', 'Weeks', 'Random Forest', 'Lasso', 'Gradient Boosting', 'Stacking Regressor']
for j in IDList: IDList_index = IDList_index + list(IDList_index_dictionary[j])
submission_dataset = result_dataset.loc[IDList_index]
# Get submission instances | Phase 3: Extract duplicated instances
submission_dataset = submission_dataset.drop_duplicates(subset=['Patient','Weeks'])
# Get submission instances | Phase 4: Sort submission instances by Weeks (ascending) and reset index
submission_dataset = submission_dataset.sort_values(by=['Weeks','Patient'])
submission_dataset = submission_dataset.reset_index()
submission_dataset = submission_dataset.drop(columns=['Unnamed: 0','index'])
# Get confidence measure | Phase 1: Get shape Parameter DataFrame by default
## When shapeParameter_DataFrame==[], parameter c = 0.126074 is assigned by default per model and ID
if (shapeParameter_DataFrame == []):
shapeParameter_dictionary = {}
shapeParameter = 0.126074
MLModelList = IDList_columns[2:]
for l in MLModelList:
keyShapeParameter = 'c Parameter_'+l
shapeParameter_dictionary[keyShapeParameter] = [shapeParameter,shapeParameter,shapeParameter,shapeParameter,shapeParameter]
shapeParameter_DataFrame = pd.DataFrame(data = shapeParameter_dictionary, index = IDList)
# Get confidence measure | Phase 2: Get standard-deviation-clipped per instance
## Metric - Part 1: standard_deviation_clipped = max(standard_deviation, 70)
## Build a DataFrame with standard-deviation-clipped values given an ID and a ML Model: standardDeviationClipped_DataFrame
standardDeviationClipped_DataFrame = shapeParameter_DataFrame.copy()
columnLabels = list(standardDeviationClipped_DataFrame.columns)
columnLabels_SDC_dictionary = {}
for i in columnLabels:
columnLabels_item ='SD_Clipped'+i[11:]
columnLabels_SDC_dictionary[i]=columnLabels_item
standardDeviationClipped_DataFrame = standardDeviationClipped_DataFrame.rename(columns=columnLabels_SDC_dictionary)
import numpy as np
standardDeviationClipped_DataFrame = standardDeviationClipped_DataFrame.replace(3,np.nan)
ID_List = list(standardDeviationClipped_DataFrame.index)
SDModel_List = list(standardDeviationClipped_DataFrame.columns)
CParameter_List = list(shapeParameter_DataFrame.columns)
numy = 0
from scipy.stats import loglaplace
for j in ID_List:
for k in SDModel_List:
itemToInclude = CParameter_List[numy]
c = shapeParameter_DataFrame[itemToInclude][j]
sd_LL = loglaplace.std(c, loc=0, scale=100)
standardDeviationClipped_DataFrame[k][j] = max(70,sd_LL) # j: index is ID | k: SD_Clipped_(ML Model)
numy = numy + 1
numy = 0
# Get confidence measure | Phase 3: Get metric axe per model: |FVC_true - FVC_predicted|
## Metric - Part 1: |FVC_true - FVC_pred|
if(pydicomMode == True):
variableNumber = 10
else:
variableNumber = 7
MLModelList = list(submission_dataset.columns[variableNumber:])
metric_dictionary = {}
for j in MLModelList:
metric_differential = abs(submission_dataset.FVC - submission_dataset[j])
metric_differential = list(metric_differential)
keyToInclude = 'metric_'+j
metric_dictionary[keyToInclude] = metric_differential
metric_DataFrame = pd.DataFrame(data=metric_dictionary)
# Get confidence measure | Phase 4: Get metric axe per model: min(|FVC_true - FVC_predicted|, 1000)
## metric per instance
## Metric - Part 2: min(|FVC_true - FVC_pred|,1000)
metricLabels = list(metric_DataFrame.columns)
instancesNumber = len(submission_dataset.index)
for i in metricLabels:
j = 0
while (j<instancesNumber):
metric_DataFrame[i][j] = min(metric_DataFrame[i][j],1000)
j = j+1
submission_dataset = submission_dataset.join(metric_DataFrame)
# Get confidence measure | Phase 5: Get metric axe per model: (-1 * differential * 2^0.5 / SDC ) - ln(2^0.5 * SCD)
## metric per instance
## differential = min(|FVC_true - FVC_predicted|, 1000)
## SDC: Standard Deviation Clipped
## Metric - Part 2: min(|FVC_true - FVC_pred|,1000)
IDList = list(test_dataset.Patient)
SDModel_List = list(standardDeviationClipped_DataFrame.columns)
SDModel_index_List = list(standardDeviationClipped_DataFrame.index)
metric_lists = list(metric_DataFrame.columns)
metric_index_lists = list(metric_DataFrame.index)
submission_dataset_index_List = list(submission_dataset.index)
instancesNumber = len(submission_dataset_index_List)
indexPerID_dictionary = {}
### Step 1: Get index per ID to compute
for i in IDList:
listToInclude = list(submission_dataset.Patient[submission_dataset.Patient == i].index)
indexPerID_dictionary[i] = listToInclude
indexPerID_DataFrame = pd.DataFrame(data=indexPerID_dictionary)
### Step 3: Compute metric
import math
from math import log1p
for k in IDList:
for i in metric_lists:
for j in list(indexPerID_DataFrame[k]):
differential = submission_dataset[i][j]
SDC_Label = 'SD_Clipped_' + i[7:]
SDC = standardDeviationClipped_DataFrame[SDC_Label][k]
metric_part1 = -1* 2**0.5 * differential / SDC
metric_part2 = -1 * math.log1p(2**0.5 * SDC)
metric = metric_part1 + metric_part2
submission_dataset[i][j] = metric
# Result function specification
resultFunction = submission_dataset,shapeParameter_DataFrame,standardDeviationClipped_DataFrame
# Get submission files | Phase 1: Get submission file template
filename = 'sample_submission.csv'
submissionFile = pd.read_csv(path_ProductType+filename)
## Get submission files | Phase 2: Create directory
try:
path_output = path_ProductType + 'submission/'
os.chdir(path_output)
except FileNotFoundError:
import distutils.ccompiler
path_output = path_ProductType + 'submission/'
distutils.dir_util.mkpath(path_output)
## Get submission files | Phase 3: Get correlative
files_list = os.listdir(path_output)
try:
maxNumber = max(files_list)
maxNumber = maxNumber[:-4]
maxNumber = int(maxNumber)
nextNumber = maxNumber+1
except ValueError:
nextNumber = 0
## Get submission files | Phase 4: Get models to include and their corresponding metrics
ModelToInclude = IDList_columns[2:]
## Get submission files | Phase 5: Build Files
for i in ModelToInclude:
filename = 'sample_submission.csv'
submissionFile = pd.read_csv(path_ProductType+filename)
submissionFile_columns = list(submissionFile.columns)
fvc_array = np.array(submission_dataset[i])
confidence_array = np.array(submission_dataset['metric_'+i])
submissionFile['FVC'] = fvc_array
submissionFile['Confidence'] = confidence_array
filename_output = str(nextNumber)+'.csv'
path_output = path_ProductType +'submission/'
submissionFile.to_csv(path_output+filename_output,columns=submissionFile_columns,index=False)
nextNumber = nextNumber + 1
return resultFunction
if testMode == True:
# Set Product type
ProductType = 'prototype'
# ShapeParameter_Dataframe
example = False
if (example == True):
import pandas as pd
shapeParameter_IDList = ['ID00419637202311204720264','ID00421637202311550012437','ID00422637202311677017371','ID00423637202312137826377','ID00426637202313170790466']
c_List1 = [3,3,3,3,3]
c_List2 = [3,3,3,3,3]
c_List3 = [3,3,3,3,3]
c_List4 = [3,3,3,3,3]
shapeParameter_dictionary = {'Random Forest':c_List1, 'Lasso':c_List2, 'Gradient Boosting':c_List3, 'Stacking Regressor':c_List4}
shapeParameter_DataFrame = pd.DataFrame(data = shapeParameter_dictionary, index = shapeParameter_IDList)
else:
shapeParameter_DataFrame = []
# Set Pydicom mode
pydicomMode = True
resultFunction4 = Stacking_Submission_Dataset_Builder(ProductType,shapeParameter_DataFrame,pydicomMode,testMode)
print("=========================================")
print("Shape Parameter - Laplace Log Likelihood:")
print("=========================================")
print(resultFunction4[1])
print("Standard Deviation Clipped - Laplace Log Likelihood:")
print("=========================================")
print(resultFunction4[2])
print("=========================================")
print("Test result Function 4: Success")
print("=========================================")
"""
=========================================
Function 5: Get parameters given a must-usage of a log-laplace distribution (i.e. Laplace Log Likelihood)
=========================================
Purpose: Get shape parameter visualization for loglaplace
Raw code reference (see Tester.py): Test 17
"""
def shapeParameter_visualizer(ProductType,testMode):
import numpy as np
from scipy.stats import loglaplace
import matplotlib.pyplot as plt
fig, ax = plt.subplots(4, 5, sharex=False, sharey=False, figsize=(32, 24))
## Get IDs to test
import os
import pandas as pd
## Set Product Type and its corresponding path
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
## Get probabilities from predicted values grouping by ID and Model
path = path_ProductType + 'outcome/'
filename = 'result.csv'
y_pred = pd.read_csv(path+filename)
## Get IDs to test
path = path_ProductType
filename = 'test.csv'
test_dataset = pd.read_csv(path+filename)
ID_List = list(test_dataset.Patient)
## Get models
model_List = ['Random Forest', 'Lasso', 'Gradient Boosting', 'Stacking Regressor']
## Grouping task
k = 0
l = 0
for i in ID_List:
k = 0
for j in model_List:
# Data Fit task
#r = y_pred[y_pred.Patient==i][j]/sum(y_pred[y_pred.Patient==i][j])
r = y_pred[y_pred.Patient==i][j]
r = np.array(r)
c1, loc1, scale1 = loglaplace.fit(r,floc=0,fscale=1)
c = c1
# # Calculate a few first moments
# mean, var, skew, kurt = loglaplace.stats(c, moments='mvsk')
# Display the probability density function (pdf):
x = np.linspace(loglaplace.ppf(0.01, c), loglaplace.ppf(0.99, c), num=100)
ax[k,l].plot(x, loglaplace.pdf(x, c),'r-', lw=5, alpha=0.6, label='loglaplace pdf')
# Freeze the distribution and display the frozen pdf:
rv = loglaplace(c)
ax[k,l].plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
# Generate random numbers:
r = loglaplace.rvs(c1, loc=0, scale=1, size=1000)
# And compare the histogram:
#ax[k,l].hist(r, density=True, histtype='stepfilled', alpha=0.2)
ax[k,l].legend(loc='best', frameon=False)
# Set limits
#ax[k,l].set_xlim(0,0.1)
#ax[k,l].set_ylim(0,4)
ax[k,l].set_xlabel('x')
ax[k,l].set_ylabel('f(x,c)')
# Check Accuracy
vals = loglaplace.ppf([0.001, 0.5, 0.999], c)
accuracy = np.allclose([0.001, 0.5, 0.999], loglaplace.cdf(vals, c))
# Returns True if two arrays are element-wise equal within a tolerance.
if(accuracy == True):
accuracy = 'Equal case'
else:
accuracy = 'Unequal case'
# Set title
title = str('Probability density function for loglaplace'+'\n'+i + '\n' + j + ' | Accuracy:'+accuracy)
ax[k,l].set_title(title)
k = k + 1
l = l + 1
plt.tight_layout()
plt.show()
resultFunction = c
return resultFunction
if testMode == True:
# Set Product type
ProductType = 'prototype'
# ShapeParameter_Dataframe
resultFunction5 = shapeParameter_visualizer(ProductType, testMode = True)
print("=========================================")
print("Shape Parameter - Laplace Log Likelihood:")
print("=========================================")
print(resultFunction5)
print("=========================================")
print("Test result Function 4: Success")
print("=========================================")
# """
# =========================================
# Function : Dataset builder 2 (Stacking solution case) to process with ML models
# =========================================
# Purpose: Build an input dataset to be processed with an stacking solution but including Pydicom image-processing solution
# Raw code reference (see Tester.py): 15
# """
# def stacking_Dataset_Builder_PydicomSolution(productType, testMode):
# # Set Product Type and its corresponding path
# if ProductType == 'population':
# path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
# if ProductType == 'prototype':
# path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
# if ProductType == 'sampling':
# path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
| 2.484375 | 2 |
paperscraper/scrapers/keywords.py | ahmed-shariff/scraper | 1 | 5463 | <gh_stars>1-10
import re
regex = re.compile(r'[\n\r\t]')
def acm_digital_library(soup):
try:
keywords = set()
keywords_parent_ol = soup.find('ol', class_="rlist organizational-chart")
keywords_divs = keywords_parent_ol.findChildren('div', recursive=True)
for kw_parent in keywords_divs:
kw = kw_parent.text
keywords.add(regex.sub("", kw.split(",")[0]))
return list(keywords)
except Exception as e:
print(e)
return None
def graphics_interface_proceedings(soup):
return None
def ieee_explore(soup):
try:
keywords = set()
ggp_ul = soup.find('ul', class_="doc-keywords-list stats-keywords-list")
gp_li = ggp_ul.findChildren("li", class_="doc-keywords-list-item", recursive=False)
for p_li in gp_li:
if p_li.find('strong').text in ["IEEE Keywords", "INSPEC: Controlled Indexing", "INSPEC: Non-Controlled Indexing", "MeSH Terms"]:
for keywords_l in p_li.find('ul').findChildren("li", recursive=False):
a_tag = keywords_l.find("a", class_="stats-keywords-list-item")
if a_tag is not None:
keywords.add(str(regex.sub("", a_tag.text.split(",")[0])))
else:
keywords.add(str(regex.sub("", str(keywords_l.text).split(",")[0])))
return list(keywords)
except Exception as e:
print(e)
return None
def eurographics_digital_library(soup):
try:
keywords_set = set()
p_tablebody = soup.find('table', class_="detailtable").find("tbody")
p_trs = p_tablebody.findChildren('tr')
for tr in p_trs:
label = tr.find("td", class_="label-cell")
if label.text == "dc.subject":
keywords = tr.find("td", class_="word-break")
# e.g. CASE 1: ['Categories and Subject Descriptors (according to ACM CCS): I.4.1 [Image Processing and Computer Vision]: Enhancement-Filtering I.3.3 [Computer Graphics]: Picture/Image Generation-Bitmap and framebuffer operations']
# e.g. CASE 2 [TODO: Not taken care of yet] Categories and Subject Descriptors (according to ACM CCS): Information Interfaces And Presentation (e.g., HCI) [H.5.2]: User Interfaces-Graphical user interfaces (GUI)
# Step 1: Remove annoying substrings
# Step 2: Choose to take ONLY Categories, not the Subject Descriptors > Write a REGEX to take substrings between [].
# Step 3: Split the string by , or ; or :
to_replaces = ["CCS Concepts", "Categories and Subject Descriptors", "Categories and subject descriptors", "Categories and Subject Descriptors (according to ACM CCS)", "according to ACM CCS"]
keywords_str = keywords.text
for to_replace in to_replaces:
keywords_str = keywords_str.replace(to_replace, "")
keywords_extracted = re.findall(r'\[(.*?)\]', keywords_str)
if keywords_extracted:
keywords_set.update(keywords_extracted)
else:
keywords_set.update(re.split(',|:|;', keywords_str))
return list(keywords_set)
except Exception as e:
print(e)
return None
def springer_v2(soup):
try:
keywords = set()
keywords_parent_div = soup.find('div', class_="KeywordGroup")
keywords_span = keywords_parent_div.findChildren("span", class_="Keyword")
for k in keywords_span:
keywords.add(k.text)
return list(keywords)
except Exception as e:
print(e)
return None
def dagstuhl(soup):
try:
keywords_label = soup.find('b', text="Keywords:")
keywords_parent_font = keywords_label.parent
keywords_parent_td = keywords_parent_font.parent
keywords_font = keywords_parent_td.find_next('td').find_next('td').find("font")
if keywords_font is not None:
return re.split(',', keywords_font.text)
except Exception as e:
print(e)
return None
def springer_v1(soup):
try:
keywords = set()
keywords_parent_section = soup.find('ul', class_="c-article-subject-list")
keywords_li = keywords_parent_section.findChildren("li", class_="c-article-subject-list__subject")
for k in keywords_li:
kw = k.find("span").text
keywords.add(str(regex.sub("", kw)).strip())
return list(keywords)
except Exception as e:
print(e)
return None
def wiley_online_library(soup):
try:
keywords_parent_section = soup.find('section', class_="keywords")
keywords_ul = keywords_parent_section.find('ul')
keywords_lis = keywords_ul.findChildren("li")
keywords_set = set()
for keywords_li in keywords_lis:
# e.g. Case 1: "[3.1.1] Human-Centered Computing" and so on
# e.g. Case 2: CCS Concepts don't have '[' and ']' but they have strings such as "• Human‐centered computing → Graph drawings"
# Step 1: Remove annoying substrings
# Step 2: Choose to take ONLY Categories, not the Subject Descriptors > Write a REGEX to take substrings between [].
# Step 3: Split the string by , or ; or :
to_replaces = ["CCS Concepts", "Categories and Subject Descriptors", "Categories and subject descriptors", "Categories and Subject Descriptors (according to ACM CCS)", "according to ACM CCS"]
keywords_str = keywords_li.find("a").text
for to_replace in to_replaces:
keywords_str = keywords_str.replace(to_replace, "")
keywords_extracted = re.findall(r'\[(.*?)\]', keywords_str)
if keywords_extracted:
keywords_set.update(keywords_extracted)
else:
# CCS Concepts don't have '[' and ']' but they have strings such as "• Human‐centered computing → Graph drawings"
regex_find = r'•(.*)→(.*)'
regex_replace = r'\1;\2' # set the delimiter to either , : ; (as is used below to split)
keywords_str = re.sub(regex_find, regex_replace, keywords_str)
keywords_set.update(re.split(',|:|;', keywords_str))
return list(keywords_set)
except Exception as e:
print(e)
return None
def cogsci(soup):
return None
def scitepress(soup):
try:
keywords_set = set()
keywords_span = soup.find('span', id="ContentPlaceHolder1_LinkPaperPage_LinkPaperContent_LabelPublicationDetailKeywords")
for kw in keywords_span.text.split(","):
keywords_set.add(kw)
return list(keywords_set)
except Exception as e:
print(e)
return None
def scienceopen(soup):
try:
keywords_set = set()
for span_label in soup.find_all('span', class_="so-metadata-label"):
if "Keywords" in span_label.text:
for keyword_a in span_label.find_next_siblings('a'):
keywords_set.add(keyword_a.text)
return list(keywords_set)
except Exception as e:
pass
return None
def aaai(soup):
return None
def get_keywords(publisher, soup):
keywords_list = None
if publisher == "acm_digital_library":
keywords_list = acm_digital_library(soup)
elif publisher == "graphics_interface_proceedings":
keywords_list = graphics_interface_proceedings(soup)
elif publisher == "ieee_explore":
keywords_list = ieee_explore(soup)
elif publisher == "cogsci":
keywords_list = cogsci(soup)
elif publisher == "springer_v1":
keywords_list = springer_v1(soup)
elif publisher == "springer_v2":
keywords_list = springer_v2(soup)
elif publisher == "scitepress":
keywords_list = scitepress(soup)
elif publisher == "scienceopen":
keywords_list = scienceopen(soup)
elif publisher == "eurographics_digital_library":
keywords_list = eurographics_digital_library(soup)
elif publisher == "wiley_online_library":
keywords_list = wiley_online_library(soup)
elif publisher == "dagstuhl":
keywords_list = dagstuhl(soup)
elif publisher == "aaai":
keywords_list = aaai(soup)
return None if len(keywords_list) == 0 else keywords_list
| 2.703125 | 3 |
topobank/publication/models.py | ContactEngineering/TopoBank | 3 | 5464 | from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.safestring import mark_safe
from django.conf import settings
MAX_LEN_AUTHORS_FIELD = 512
CITATION_FORMAT_FLAVORS = ['html', 'ris', 'bibtex', 'biblatex']
DEFAULT_KEYWORDS = ['surface', 'topography']
class UnknownCitationFormat(Exception):
def __init__(self, flavor):
self._flavor = flavor
def __str__(self):
return f"Unknown citation format flavor '{self._flavor}'."
class Publication(models.Model):
LICENSE_CHOICES = [(k, settings.CC_LICENSE_INFOS[k]['option_name'])
for k in ['cc0-1.0', 'ccby-4.0', 'ccbysa-4.0']]
short_url = models.CharField(max_length=10, unique=True, null=True)
surface = models.OneToOneField("manager.Surface", on_delete=models.PROTECT, related_name='publication')
original_surface = models.ForeignKey("manager.Surface", on_delete=models.SET_NULL,
null=True, related_name='derived_publications')
publisher = models.ForeignKey("users.User", on_delete=models.PROTECT)
publisher_orcid_id = models.CharField(max_length=19, default='') # 16 digits including 3 dashes
version = models.PositiveIntegerField(default=1)
datetime = models.DateTimeField(auto_now_add=True)
license = models.CharField(max_length=12, choices=LICENSE_CHOICES, blank=False, default='')
authors = models.CharField(max_length=MAX_LEN_AUTHORS_FIELD)
container = models.FileField(max_length=50, default='')
def get_absolute_url(self):
return reverse('publication:go', args=[self.short_url])
def get_full_url(self, request):
return request.build_absolute_uri(self.get_absolute_url())
def get_citation(self, flavor, request):
if flavor not in CITATION_FORMAT_FLAVORS:
raise UnknownCitationFormat(flavor)
method_name = '_get_citation_as_'+flavor
return getattr(self, method_name)(request)
def _get_citation_as_html(self, request):
s = '{authors}. ({year}). contact.engineering. <em>{surface.name} (Version {version})</em>.'
s += ' <a href="{publication_url}">{publication_url}</a>'
s = s.format(
authors=self.authors,
year=self.datetime.year,
version=self.version,
surface=self.surface,
publication_url=self.get_full_url(request),
)
return mark_safe(s)
def _get_citation_as_ris(self, request):
# see http://refdb.sourceforge.net/manual-0.9.6/sect1-ris-format.html
# or https://en.wikipedia.org/wiki/RIS_(file_format)
# or https://web.archive.org/web/20120526103719/http://refman.com/support/risformat_intro.asp
# https://web.archive.org/web/20120717122530/http://refman.com/support/direct%20export.zip
s = ""
def add(key, value):
nonlocal s
s += f"{key} - {value}\n"
# Electronic citation / Website
add('TY', 'ELEC')
# Title
add('TI', f"{self.surface.name} (Version {self.version})")
# Authors
for author in self.authors.split(','):
add('AU', author.strip())
# Publication Year
add('PY', format(self.datetime, '%Y/%m/%d/'))
# URL
add('UR', self.get_full_url(request))
# Name of Database
add('DB', 'contact.engineering')
# Notes
add('N1', self.surface.description)
# add keywords, defaults ones and tags
for kw in DEFAULT_KEYWORDS:
add('KW', kw)
for t in self.surface.tags.all():
add('KW', t.name)
# End of record, must be empty and last tag
add('ER', '')
return s.strip()
def _get_citation_as_bibtex(self, request):
title = f"{self.surface.name} (Version {self.version})"
shortname = f"{self.surface.name}_v{self.version}".lower().replace(' ','_')
keywords = ",".join(DEFAULT_KEYWORDS)
if self.surface.tags.count()>0:
keywords += ","+",".join(t.name for t in self.surface.tags.all())
s = """
@misc{{
{shortname},
title = {{{title}}},
author = {{{author}}},
year = {{{year}}},
note = {{{note}}},
keywords = {{{keywords}}},
howpublished = {{{publication_url}}},
}}
""".format(title=title,
author=self.authors.replace(', ', ' and '),
year=self.datetime.year,
note=self.surface.description,
publication_url=self.get_full_url(request),
keywords=keywords,
shortname=shortname,
)
return s.strip()
def _get_citation_as_biblatex(self, request):
shortname = f"{self.surface.name}_v{self.version}".lower().replace(' ','_')
keywords = ",".join(DEFAULT_KEYWORDS)
if self.surface.tags.count()>0:
keywords += ","+",".join(t.name for t in self.surface.tags.all())
s = """
@online{{
{shortname},
title = {{{title}}},
version = {{{version}}},
author = {{{author}}},
year = {{{year}}},
month = {{{month}}},
date = {{{date}}},
note = {{{note}}},
keywords = {{{keywords}}},
url = {{{url}}},
urldate = {{{urldate}}}
}}
""".format(title=self.surface.name,
version=self.version,
author=self.authors.replace(', ', ' and '),
year=self.datetime.year,
month=self.datetime.month,
date=format(self.datetime, "%Y-%m-%d"),
note=self.surface.description,
url=self.get_full_url(request),
urldate=format(timezone.now(), "%Y-%m-%d"),
keywords=keywords,
shortname=shortname,
)
return s.strip()
@property
def storage_prefix(self):
"""Return prefix used for storage.
https://docs.djangoproject.com/en/2.2/ref/models/fields/#django.db.models.FileField.upload_to
Looks like a relative path to a directory.
If storage is on filesystem, the prefix should correspond
to a real directory.
"""
return "publications/{}/".format(self.short_url)
@property
def container_storage_path(self):
"""Return relative path of container in storage."""
return f"{self.storage_prefix}container.zip"
| 2.109375 | 2 |
vendor/migrations/0003_store_password.py | rayhu-osu/vcube | 1 | 5465 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-24 19:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vendor', '0002_store_image'),
]
operations = [
migrations.AddField(
model_name='store',
name='password',
field=models.CharField(default=1, max_length=30),
preserve_default=False,
),
]
| 1.570313 | 2 |
lec2.py | widnerlr/isat252 | 0 | 5466 | <gh_stars>0
"""
Your module description
"""
"""
this is my second py code
for my second lecture
"""
#print ('hello world') # this is a single line commment
# this is my second line comment
#print(type("123."))
#print ("Hello World".upper())
#print("Hello World".lower())
#print("hello" + "world" + ".")
#print(2**3)
#my_str = "hello world"
#print(my_str)
#my_str = "Tom"
#print(my_str)
my_int = 2
my_float = 3.0
print(my_int + my_float) | 3.3125 | 3 |
day_22_b.py | Gyaha/AOC2020 | 0 | 5467 | <reponame>Gyaha/AOC2020<filename>day_22_b.py
def play_recursively_combat(p1: list, p2: list) -> bool:
rounds = set()
winner = None
while len(p1) > 0 and len(p2) > 0:
r = tuple(p1 + [-1] + p2)
if r in rounds:
return True
else:
rounds.add(r)
c1 = p1.pop(0)
c2 = p2.pop(0)
if c1 <= len(p1) and c2 <= len(p2):
winner = play_recursively_combat(p1[:c1], p2[:c2])
else:
winner = c1 > c2
if winner:
p1.append(c1)
p1.append(c2)
else:
p2.append(c2)
p2.append(c1)
return winner
def play_combat(s: str):
p1, p2 = s.strip().split("\n\n")
p1, p2 = convert_cards(p1), convert_cards(p2)
winner = play_recursively_combat(p1, p2)
w = p1 if winner else p2
s = 0
for i, c in enumerate(reversed(w), 1):
s += c * i
return s
def convert_cards(s: str) -> list:
c = []
for p in s.splitlines()[1:]:
c.append(int(p))
return c
def run_tests():
test_input = """Player 1:
9
2
6
3
1
Player 2:
5
8
4
7
10"""
test_output = 291
assert play_combat(test_input) == test_output
test_input = """Player 1:
43
19
Player 2:
2
29
14"""
assert play_combat(test_input)
def run() -> int:
with open("inputs/input_22.txt") as file:
data = file.read()
return play_combat(data)
if __name__ == "__main__":
run_tests()
import time
time_start = time.perf_counter()
print(run())
time_end = time.perf_counter() - time_start
print(f"Time: {time_end:0.4f} sec")
| 3.375 | 3 |
Auth/Constants/LoginOpCode.py | sundayz/idewave-core | 0 | 5468 | <reponame>sundayz/idewave-core
from enum import Enum
class LoginOpCode(Enum):
''' Opcodes during login process '''
LOGIN_CHALL = 0x00
LOGIN_PROOF = 0x01
RECON_CHALL = 0x02 # currently do not in use
RECON_PROOF = 0x03 # currently do not in use
REALMLIST = 0x10
class LoginResult(Enum):
''' Error codes '''
SUCCESS = 0x00
| 2.203125 | 2 |
LINETOKEN/__init__.py | pratannaimjoi/tokenIpad | 0 | 5469 | <reponame>pratannaimjoi/tokenIpad
# -*- coding: utf-8 -*-
from .LineApi import LINE
from .lib.Gen.ttypes import *
| 0.839844 | 1 |
main.py | seton-develops/PDF-Camelot-Folder-Executable | 0 | 5470 | <filename>main.py
'''
Created on Jun 17, 2021
@author: Sean
'''
import PDF2CSV_GUI
def main():
j = PDF2CSV_GUI.Convert_GUI()
if __name__ == "__main__":
main() | 1.9375 | 2 |
Part1/bot_read.py | Mildlyoffbeat/RedditBot-1 | 0 | 5471 | #!/usr/bin/python
import praw
reddit = praw.Reddit('mob-secondbot')
subreddit = reddit.subreddit("learnpython")
for submission in subreddit.hot(limit=5):
print("Title: ", submission.title)
print("Text: ", submission.selftext)
print("Score: ", submission.score)
print("---------------------------------\n")
| 2.90625 | 3 |
17/kazuate_liar.cpp.py | Siketyan/Programming-I | 0 | 5472 | <filename>17/kazuate_liar.cpp.py
from subprocess import Popen, PIPE, call
name = "kazuate_liar.o"
src = """
#include <iostream>
#include <random>
using namespace std;
int main()
{
random_device rd;
mt19937 mt(rd());
uniform_int_distribution<int> randfive(0, 4);
uniform_int_distribution<int> randint(1, 100);
int count = 0;
int num = randint(mt);
while (1)
{
int i;
cout << "数を当ててみて ";
cin >> i;
if (i < 1 || i > 100)
{
cout << "不正な入力です。" << endl;
continue;
}
count++;
bool liar = randfive(mt) == 0;
if (i == num)
{
cout << "正解です。おめでとう。 (" << count << " 回目)" << endl;
break;
}
else if ((liar && i > num) || i < num)
{
cout << "もっと大きいよ。" << endl;
}
else
{
cout << "もっと小さいよ。" << endl;
}
}
return 0;
}
""";
proc = Popen(["g++", "-o", name, "-x", "c++", "-"], stdin = PIPE);
proc.communicate(src.encode());
call(["./" + name]);
| 2.671875 | 3 |
src/terrafort/main.py | silvercar/terrafort | 1 | 5473 | <gh_stars>1-10
"""
Terrafort
Generate terraform templates for specific resources
"""
import click
from .providers.aws import Aws
@click.group()
@click.option('--commands',
is_flag=True,
help="Output import commands instead of a terraform template")
@click.version_option()
@click.pass_context
def cli(ctx, commands=False):
ctx.obj = {'commands': commands}
cli.add_command(Aws.aws_db_instance)
cli.add_command(Aws.aws_iam_instance_profile)
cli.add_command(Aws.aws_instance)
cli.add_command(Aws.aws_security_group)
if __name__ == "__main__":
# pylint: disable=unexpected-keyword-arg,no-value-for-parameter
cli(obj={})
| 1.960938 | 2 |
src/ping.py | jnsougata/rich-embed | 0 | 5474 | <gh_stars>0
import discord
import app_util
class Ping(app_util.Cog):
def __init__(self, bot: app_util.Bot):
self.bot = bot
@app_util.Cog.command(
command=app_util.SlashCommand(
name='ping', description='shows avg ping of client'
),
guild_id=877399405056102431
)
async def command(self, ctx: app_util.Context):
await ctx.send_response(embed=discord.Embed(title=f'{self.bot.latency * 1000:.2f}ms'))
def setup(bot: app_util.Bot):
bot.add_application_cog(Ping(bot))
| 2.53125 | 3 |
2020/24/visualization.py | AlbertVeli/AdventOfCode | 0 | 5475 | #!/usr/bin/env python3
import sys
import re
import numpy as np
from PIL import Image
moves = { 'e': (2, 0), 'se': (1, 2), 'sw': (-1, 2), 'w': (-2, 0), 'nw': (-1, -2), 'ne': (1, -2) }
# Save (x, y): True/False in tiles. True = black, False = white.
tiles = {}
for line in open(sys.argv[1]).read().splitlines():
pos = np.array((0, 0))
for d in re.findall(r'e|se|sw|w|nw|ne', line):
pos += moves[d]
t = tuple(pos)
if t in tiles:
tiles[t] = not tiles[t]
else:
tiles[t] = True
# Part 1
print('black:', sum(val == True for val in tiles.values()))
# -- Part 2 --
# take a chance on how wide it needs to be
width = 300
heigth = 300
board = np.zeros(width * heigth, dtype=np.int8)
board = board.reshape(heigth, width)
# Fill in tiles, move to center
for key, value in tiles.items():
x, y = key
x += width // 2
y += heigth // 2
board[y][x] = value
def black_neighbours(y, x, b):
num = 0
for m in moves.values():
num += b[(y + m[1], x + m[0])]
return num
def game():
board_copy = np.copy(board)
w, h = board.shape
# Don't do outer edge (to avoid special cases)
for y in range(2, h - 2):
for x in range(2, w - 2):
tile = board_copy[(y, x)]
n = black_neighbours(y, x, board_copy)
if tile:
# black
if n == 0 or n > 2:
board[(y, x)] = False
else:
# white
if n == 2:
board[(y, x)] = True
def save_image(day):
colours = [(0, 0, 0), (255, 255, 255)]
im = Image.new('RGB', (width, heigth))
for y in range(heigth):
for x in range(width):
c = colours[board[y][x]]
im.putpixel((x, y), c)
im.save('img%03d.png' % (day))
save_image(0)
for day in range(1, 101):
game()
save_image(day)
print('Day %d: %d' % (day, len(np.where(board == True)[0])))
ys, xs = np.where(board)
print(min(ys), max(ys), min(xs), max(xs))
| 3.0625 | 3 |
experimental/tracing/bin/diff_heap_profiler.py | BearerPipelineTest/catapult | 1,894 | 5476 | #!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from __future__ import print_function
import argparse
import gzip
import json
import os
import shutil
import six
from six.moves import zip
_OUTPUT_DIR = 'output'
_OUTPUT_GRAPH_DIR = os.path.join(_OUTPUT_DIR, 'graph')
class Process(object):
def __init__(self):
self.pid = None
self.name = None
self.labels = None
self.types = {}
self.strings = {}
self.stackframes = {}
self.allocators = None
self.version = None
class Entry(object):
def __init__(self):
self.count = None
self.size = None
self.type = None
self.stackframe = None
class GraphDump(object):
def __init__(self):
self.pid = None
self.name = None
self.labels = None
self.heap = None
self.root = ''
self.leaks = ''
self.leak_stackframes = 0
self.leak_objects = 0
def OpenTraceFile(file_path, mode):
if file_path.endswith('.gz'):
return gzip.open(file_path, mode + 'b')
return open(file_path, mode + 't')
def FindMemoryDumps(filename):
processes = {}
with OpenTraceFile(filename, 'r') as f:
data = json.loads(f.read().decode('utf-8'))
for event in data['traceEvents']:
pid = event['pid']
if pid not in processes:
processes[pid] = Process()
processes[pid].pid = pid
process = processes[pid]
# Retrieve process informations.
if event['ph'] == 'M':
if event['name'] == 'process_name' and 'name' in event['args']:
process.name = event['args']['name']
if event['name'] == 'process_labels' and 'labels' in event['args']:
process.labels = event['args']['labels']
if event['name'] == 'typeNames':
process.types = {}
for type_id, t in six.iteritems(event['args']['typeNames']):
process.types[int(type_id)] = t
if event['name'] == 'stackFrames':
process.stackframes = {}
for stack_id, s in six.iteritems(event['args']['stackFrames']):
new_stackframe = {}
new_stackframe['name'] = s['name']
if 'parent' in s:
new_stackframe['parent'] = int(s['parent'])
process.stackframes[int(stack_id)] = new_stackframe
# Look for a detailed memory dump event.
if not ((event['name'] == 'periodic_interval' or
event['name'] == 'explicitly_triggered') and
event['args']['dumps']['level_of_detail'] == 'detailed'):
continue
# Check for a memory dump V1.
if u'heaps' in event['args']['dumps']:
# Get the first memory dump.
if not process.allocators:
process.version = 1
process.allocators = event['args']['dumps']['heaps']
# Check for a memory dump V2.
# See format: [chromium] src/base/trace_event/heap_profiler_event_writer.h
if u'heaps_v2' in event['args']['dumps']:
# Memory dump format V2 is dumping information incrementally. Update
# the cumulated indexes.
maps = event['args']['dumps']['heaps_v2']['maps']
for string in maps['strings']:
process.strings[string['id']] = string['string']
for node in maps['nodes']:
node_v1 = {}
node_v1['name'] = process.strings[node['name_sid']]
if 'parent' in node:
node_v1['parent'] = node['parent']
process.stackframes[node['id']] = node_v1
for t in maps['types']:
process.types[t['id']] = process.strings[t['name_sid']]
# Get the first memory dump.
if not process.allocators:
dump = event['args']['dumps']
process.version = 2
process.allocators = dump['heaps_v2']['allocators']
# Remove processes with incomplete memory dump.
for pid, process in processes.items():
if not (process.allocators and process.stackframes and process.types):
del processes[pid]
return processes
def ResolveMemoryDumpFields(entries, stackframes, types):
def ResolveStackTrace(stack_id, stackframes):
stackframe = stackframes[stack_id]
tail = ()
if 'parent' in stackframe:
tail = ResolveStackTrace(stackframe['parent'], stackframes)
name = stackframe['name'].replace('\r', '').replace('\n', '')
return (name,) + tail
def ResolveType(type_id, types):
return types[type_id]
for entry in entries:
# Stackframe may be -1 (18446744073709551615L) when not stackframe are
# available.
if entry.stackframe not in stackframes:
entry.stackframe = []
else:
entry.stackframe = ResolveStackTrace(entry.stackframe, stackframes)
entry.type = ResolveType(entry.type, types)
def IncrementHeapEntry(stack, count, size, typename, root):
if not stack:
root['count'] += count
root['size'] += size
if typename not in root['count_by_type']:
root['count_by_type'][typename] = 0
root['count_by_type'][typename] += count
else:
top = stack[-1]
tail = stack[:-1]
if top not in root['children']:
new_node = {}
new_node['count'] = 0
new_node['size'] = 0
new_node['children'] = {}
new_node['count_by_type'] = {}
root['children'][top] = new_node
IncrementHeapEntry(tail, count, size, typename, root['children'][top])
def CanonicalHeapEntries(root):
total_count = 0
total_size = 0
for child in six.itervalues(root['children']):
total_count += child['count']
total_size += child['size']
root['count'] -= total_count
root['size'] -= total_size
for typename in root['count_by_type']:
total_count_for_type = 0
for child in six.itervalues(root['children']):
if typename in child['count_by_type']:
total_count_for_type += child['count_by_type'][typename]
root['count_by_type'][typename] -= total_count_for_type
for child in six.itervalues(root['children']):
CanonicalHeapEntries(child)
def FindLeaks(root, stack, leaks, threshold, size_threshold):
for frame in root['children']:
FindLeaks(root['children'][frame], [frame] + stack, leaks, threshold,
size_threshold)
if root['count'] > threshold and root['size'] > size_threshold:
leaks.append({'count': root['count'],
'size': root['size'],
'count_by_type': root['count_by_type'],
'stackframes': stack})
def DumpTree(root, frame, output, threshold, size_threshold):
output.write('\n{ \"name\": \"%s\",' % frame)
if root['count'] > threshold and root['count'] > size_threshold:
output.write(' \"size\": \"%s\",' % root['size'])
output.write(' \"count\": \"%s\",' % root['count'])
output.write(' \"children\": [')
is_first = True
for child_frame, child in root['children'].items():
if is_first:
is_first = False
else:
output.write(',')
DumpTree(child, child_frame, output, threshold, size_threshold)
output.write(']')
output.write('}')
def GetEntries(heap, process):
"""
Returns all entries in a heap, after filtering out unknown entries, and doing
some post processing to extract the relevant fields.
"""
if not process:
return []
entries = []
if process.version == 1:
for raw_entry in process.allocators[heap]['entries']:
# Cumulative sizes and types are skipped. see:
# https://chromium.googlesource.com/chromium/src/+/a990af190304be5bf38b120799c594df5a293518/base/trace_event/heap_profiler_heap_dump_writer.cc#294
if 'type' not in raw_entry or not raw_entry['bt']:
continue
entry = Entry()
entry.count = int(raw_entry['count'], 16)
entry.size = int(raw_entry['size'], 16)
entry.type = int(raw_entry['type'])
entry.stackframe = int(raw_entry['bt'])
entries.append(entry)
elif process.version == 2:
raw_entries = list(zip(process.allocators[heap]['counts'],
process.allocators[heap]['sizes'],
process.allocators[heap]['types'],
process.allocators[heap]['nodes']))
for (raw_count, raw_size, raw_type, raw_stackframe) in raw_entries:
entry = Entry()
entry.count = raw_count
entry.size = raw_size
entry.type = raw_type
entry.stackframe = raw_stackframe
entries.append(entry)
# Resolve fields by looking into indexes
ResolveMemoryDumpFields(entries, process.stackframes, process.types)
return entries
def FilterProcesses(processes, filter_by_name, filter_by_labels):
remaining_processes = {}
for pid, process in six.iteritems(processes):
if filter_by_name and process.name != filter_by_name:
continue
if (filter_by_labels and
(not process.labels or filter_by_labels not in process.labels)):
continue
remaining_processes[pid] = process
return remaining_processes
def FindRelevantProcesses(start_trace, end_trace,
filter_by_name,
filter_by_labels,
match_by_labels):
# Retrieve the processes and the associated memory dump.
end_processes = FindMemoryDumps(end_trace)
end_processes = FilterProcesses(end_processes, filter_by_name,
filter_by_labels)
start_processes = None
if start_trace:
start_processes = FindMemoryDumps(start_trace)
start_processes = FilterProcesses(start_processes, filter_by_name,
filter_by_labels)
# Build a sequence of pair of processes to be compared.
processes = []
if not start_processes:
# Only keep end-processes.
for _, end_process in six.iteritems(end_processes):
processes.append((None, end_process))
elif match_by_labels:
# Processes are paired based on name/labels.
for _, end_process in six.iteritems(end_processes):
matching_start_process = None
for _, start_process in six.iteritems(start_processes):
if (start_process.name == end_process.name and
(start_process.name in ['Browser', 'GPU'] or
start_process.labels == end_process.labels)):
matching_start_process = start_process
if matching_start_process:
processes.append((matching_start_process, end_process))
else:
# Processes are paired based on their PID.
relevant_pids = set(end_processes.keys()) & set(start_processes.keys())
for pid in relevant_pids:
start_process = start_processes[pid]
end_process = end_processes[pid]
processes.append((start_process, end_process))
return processes
def BuildGraphDumps(processes, threshold, size_threshold):
"""
Build graph for a sequence of pair of processes.
If start_process is None, counts objects in end_trace.
Otherwise, counts objects present in end_trace, but not in start_process.
"""
graph_dumps = []
for (start_process, end_process) in processes:
pid = end_process.pid
name = end_process.name if end_process.name else ''
labels = end_process.labels if end_process.labels else ''
print('Process[%d] %s: %s' % (pid, name, labels))
for heap in end_process.allocators:
start_entries = GetEntries(heap, start_process)
end_entries = GetEntries(heap, end_process)
graph = GraphDump()
graph.pid = pid
graph.name = name
graph.labels = labels
graph.heap = heap
graph_dumps.append(graph)
# Do the math: diffing start and end memory dumps.
root = {}
root['count'] = 0
root['size'] = 0
root['children'] = {}
root['count_by_type'] = {}
for entry in start_entries:
if entry.type:
IncrementHeapEntry(entry.stackframe, - entry.count, - entry.size,
entry.type, root)
for entry in end_entries:
if entry.type:
IncrementHeapEntry(entry.stackframe, entry.count, entry.size,
entry.type, root)
CanonicalHeapEntries(root)
graph.root = root
# Find leaks
leaks = []
FindLeaks(root, [], leaks, threshold, size_threshold)
leaks.sort(reverse=True, key=lambda k: k['size'])
if leaks:
print(' %s: %d potential leaks found.' % (heap, len(leaks)))
graph.leaks = leaks
graph.leak_stackframes = len(leaks)
for leak in leaks:
graph.leak_objects += leak['count']
return graph_dumps
def WritePotentialLeaks(graph_dumps):
for graph in graph_dumps:
if graph.leaks:
filename = 'process_%d_%s-leaks.json' % (graph.pid, graph.heap)
output_filename = os.path.join(_OUTPUT_DIR, filename)
with open(output_filename, 'w') as output:
json.dump(graph.leaks, output)
def WriteGrahDumps(graph_dumps, threshold, size_threshold):
for graph in graph_dumps:
# Dump the remaining allocated objects tree.
filename = 'process_%d_%s-objects.json' % (graph.pid, graph.heap)
output_filename = os.path.join(_OUTPUT_GRAPH_DIR, filename)
if graph.root:
with open(output_filename, 'w') as output:
DumpTree(graph.root, '.', output, threshold, size_threshold)
graph.root = filename
def WriteIndex(graph_dumps):
output_filename = os.path.join(_OUTPUT_GRAPH_DIR, 'index.json')
with open(output_filename, 'w') as output:
json.dump([
{'pid': graph.pid,
'heap': graph.heap,
'name': graph.name,
'labels': graph.labels,
'objects': graph.root,
'potential leaks': graph.leak_stackframes,
'objects leaked': graph.leak_objects,
}
for graph in graph_dumps], output)
def WriteHTML():
# Copy the HTML page.
source = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'diff_heap_profiler.html')
destination = os.path.join(_OUTPUT_GRAPH_DIR, 'index.html')
shutil.copyfile(source, destination)
# Copy the D3 library file.
source = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.path.pardir,
os.path.pardir,
os.path.pardir,
'tracing',
'third_party',
'd3',
'd3.min.js')
destination = os.path.join(_OUTPUT_GRAPH_DIR, 'd3.min.js')
shutil.copyfile(source, destination)
def Main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--flame-graph',
action='store_true',
help='Output a flame graph based on stackframe allocations')
parser.add_argument(
'--threshold',
type=int,
default=0,
help='Objects threshold for being a potential memory leak')
parser.add_argument(
'--size-threshold',
type=int,
default=0,
help='Size threshold for being a potential memory leak')
parser.add_argument(
'--filter-by-name',
type=str,
help='Only keep processes with name (i.e. Browser, Renderer, ...)')
parser.add_argument(
'--filter-by-labels',
type=str,
help='Only keep processes with matching labels')
parser.add_argument(
'--match-by-labels',
action='store_true',
help='Match processes between runs by labels')
parser.add_argument(
'trace',
nargs='+',
help='Trace files to be processed')
options = parser.parse_args()
if options.threshold == 0 and options.size_threshold == 0:
options.threshold = 1000
if len(options.trace) == 1:
end_trace = options.trace[0]
start_trace = None
else:
start_trace = options.trace[0]
end_trace = options.trace[1]
if not os.path.exists(_OUTPUT_DIR):
os.makedirs(_OUTPUT_DIR)
# Find relevant processes to be processed.
processes = FindRelevantProcesses(start_trace, end_trace,
options.filter_by_name,
options.filter_by_labels,
options.match_by_labels)
graph_dumps = BuildGraphDumps(processes, options.threshold,
options.size_threshold)
WritePotentialLeaks(graph_dumps)
if options.flame_graph:
if not os.path.exists(_OUTPUT_GRAPH_DIR):
os.makedirs(_OUTPUT_GRAPH_DIR)
WriteGrahDumps(graph_dumps, options.threshold, options.size_threshold)
WriteIndex(graph_dumps)
WriteHTML()
if __name__ == '__main__':
Main()
| 2.203125 | 2 |
mne_bids/commands/mne_bids_raw_to_bids.py | kingjr/mne-bids | 0 | 5477 | """Write raw files to BIDS format.
example usage: $ mne_bids raw_to_bids --subject_id sub01 --task rest
--raw data.edf --bids_root new_path
"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import mne_bids
from mne_bids import write_raw_bids, BIDSPath
from mne_bids.read import _read_raw
def run():
"""Run the raw_to_bids command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__, usage="usage: %prog options args",
prog_prefix='mne_bids',
version=mne_bids.__version__)
parser.add_option('--subject_id', dest='subject_id',
help=('subject name in BIDS compatible format '
'(01, 02, etc.)'))
parser.add_option('--task', dest='task',
help='name of the task the data is based on')
parser.add_option('--raw', dest='raw_fname',
help='path to the raw MEG file')
parser.add_option('--bids_root', dest='bids_root',
help='The path of the BIDS compatible folder.')
parser.add_option('--session_id', dest='session_id',
help='session name in BIDS compatible format')
parser.add_option('--run', dest='run',
help='run number for this dataset')
parser.add_option('--acq', dest='acq',
help='acquisition parameter for this dataset')
parser.add_option('--events_data', dest='events_data',
help='events file (events.tsv)')
parser.add_option('--event_id', dest='event_id',
help='event id dict', metavar='eid')
parser.add_option('--hpi', dest='hpi',
help='path to the MEG marker points')
parser.add_option('--electrode', dest='electrode',
help='path to head-native digitizer points')
parser.add_option('--hsp', dest='hsp',
help='path to headshape points')
parser.add_option('--config', dest='config',
help='path to the configuration file')
parser.add_option('--overwrite', dest='overwrite',
help="whether to overwrite existing data (BOOLEAN)")
parser.add_option('--line_freq', dest='line_freq',
help="The frequency of the line noise in Hz "
"(e.g. 50 or 60). If unknown, pass None")
opt, args = parser.parse_args()
if len(args) > 0:
parser.print_help()
parser.error('Do not specify arguments without flags. Found: "{}".\n'
.format(args))
if not all([opt.subject_id, opt.task, opt.raw_fname, opt.bids_root]):
parser.print_help()
parser.error('Arguments missing. You need to specify at least the'
'following: --subject_id, --task, --raw, --bids_root.')
bids_path = BIDSPath(
subject=opt.subject_id, session=opt.session_id, run=opt.run,
acquisition=opt.acq, task=opt.task, root=opt.bids_root)
allow_maxshield = False
if opt.raw_fname.endswith('.fif'):
allow_maxshield = True
raw = _read_raw(opt.raw_fname, hpi=opt.hpi, electrode=opt.electrode,
hsp=opt.hsp, config=opt.config,
allow_maxshield=allow_maxshield)
if opt.line_freq is not None:
line_freq = None if opt.line_freq == "None" else opt.line_freq
raw.info['line_freq'] = line_freq
write_raw_bids(raw, bids_path, event_id=opt.event_id,
events_data=opt.events_data, overwrite=opt.overwrite,
verbose=True)
if __name__ == '__main__':
run()
| 2.390625 | 2 |
lab1oop.py | NastiaK/NewRepository | 0 | 5478 | <gh_stars>0
class Calculations:
def __init__(self, first, second):
self.first = first
self.second = second
def add(self):
print(self.first + self.second)
def subtract(self):
print(self.first - self.second)
def multiply(self):
print(self.first * self.second)
def divide(self):
if second == 0:
print("Can't divide by zero")
else:
print(self.first / self.second)
def main():
print("Calculator has started")
while True:
a = float(input("Enter first number "))
b = float(input("Enter second number "))
chooseop = 1
calc=Calculations(a, b)
while (chooseop == 1) | (chooseop == 2) | (chooseop == 3) | (chooseop == 4):
chooseop = int(input("Enter 1 for addition, 2 for subtraction, 3 for multiplication and 4 for division "))
print(chooseop)
if chooseop == 1:
calc.add()
break
elif chooseop == 2:
calc.subtract()
break
elif chooseop == 3:
calc.multiply()
break
elif chooseop == 4:
calc.divide()
break
elif (chooseop != 1) & (chooseop != 2) & (chooseop != 3) & (chooseop != 4):
print("Invalid operation number")
if __name__ == "__main__":
main()
| 3.984375 | 4 |
Arrays/cyclic_rotation.py | Jeans212/codility-dev-training | 0 | 5479 | <reponame>Jeans212/codility-dev-training
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
'''
Rotate an array A to the right by a given number of steps K.
Covert the array to a deque
Apply the rotate() method the rotate the deque in positive K steps
Convert the deque to array
'''
from collections import deque
def solution(A, K):
# write your code in Python 3.6
deq_A = deque(A)
deq_A.rotate(K)
return list(deq_A)
| 3.859375 | 4 |
tests/test_apis.py | hatzel/markdown-spoilers | 2 | 5480 | # -*- coding: utf-8 -*-
"""
Python Markdown
A Python implementation of <NAME>'s Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by <NAME> (http://www.dwerg.net/).
Maintained for a few years by <NAME> (http://www.freewisdom.org).
Currently maintained by <NAME> (https://github.com/waylan),
<NAME> (https://github.com/mitya57) and <NAME> (https://github.com/facelessuser).
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 <NAME> (v. 0.2-1.6b)
Copyright 2004 <NAME> (the original version)
License: BSD (see LICENSE.md for details).
Python-Markdown Regression Tests
================================
Tests of the various APIs with the python markdown lib.
"""
from __future__ import unicode_literals
import unittest
import sys
import os
import markdown
import warnings
from markdown.__main__ import parse_options
from logging import DEBUG, WARNING, CRITICAL
import yaml
import tempfile
from io import BytesIO
from xml.etree.ElementTree import ProcessingInstruction
PY3 = sys.version_info[0] == 3
if not PY3:
def bytes(string, encoding):
return string.encode(encoding)
class TestMarkdownBasics(unittest.TestCase):
""" Tests basics of the Markdown class. """
def setUp(self):
""" Create instance of Markdown. """
self.md = markdown.Markdown()
def testBlankInput(self):
""" Test blank input. """
self.assertEqual(self.md.convert(''), '')
def testWhitespaceOnly(self):
""" Test input of only whitespace. """
self.assertEqual(self.md.convert(' '), '')
def testSimpleInput(self):
""" Test simple input. """
self.assertEqual(self.md.convert('foo'), '<p>foo</p>')
def testInstanceExtension(self):
""" Test Extension loading with a class instance. """
from markdown.extensions.footnotes import FootnoteExtension
markdown.Markdown(extensions=[FootnoteExtension()])
def testEntryPointExtension(self):
""" Test Extension loading with an entry point. """
markdown.Markdown(extensions=['footnotes'])
def testDotNotationExtension(self):
""" Test Extension loading with Name (`path.to.module`). """
markdown.Markdown(extensions=['markdown.extensions.footnotes'])
def testDotNotationExtensionWithClass(self):
""" Test Extension loading with class name (`path.to.module:Class`). """
markdown.Markdown(extensions=['markdown.extensions.footnotes:FootnoteExtension'])
class TestConvertFile(unittest.TestCase):
""" Tests of ConvertFile. """
def setUp(self):
self.saved = sys.stdin, sys.stdout
sys.stdin = BytesIO(bytes('foo', encoding='utf-8'))
sys.stdout = BytesIO()
def tearDown(self):
sys.stdin, sys.stdout = self.saved
def getTempFiles(self, src):
""" Return the file names for two temp files. """
infd, infile = tempfile.mkstemp(suffix='.txt')
with os.fdopen(infd, 'w') as fp:
fp.write(src)
outfd, outfile = tempfile.mkstemp(suffix='.html')
return infile, outfile, outfd
def testFileNames(self):
infile, outfile, outfd = self.getTempFiles('foo')
markdown.markdownFromFile(input=infile, output=outfile)
with os.fdopen(outfd, 'r') as fp:
output = fp.read()
self.assertEqual(output, '<p>foo</p>')
def testFileObjects(self):
infile = BytesIO(bytes('foo', encoding='utf-8'))
outfile = BytesIO()
markdown.markdownFromFile(input=infile, output=outfile)
outfile.seek(0)
self.assertEqual(outfile.read().decode('utf-8'), '<p>foo</p>')
def testStdinStdout(self):
markdown.markdownFromFile()
sys.stdout.seek(0)
self.assertEqual(sys.stdout.read().decode('utf-8'), '<p>foo</p>')
class TestBlockParser(unittest.TestCase):
""" Tests of the BlockParser class. """
def setUp(self):
""" Create instance of BlockParser. """
self.parser = markdown.Markdown().parser
def testParseChunk(self):
""" Test BlockParser.parseChunk. """
root = markdown.util.etree.Element("div")
text = 'foo'
self.parser.parseChunk(root, text)
self.assertEqual(
markdown.serializers.to_xhtml_string(root),
"<div><p>foo</p></div>"
)
def testParseDocument(self):
""" Test BlockParser.parseDocument. """
lines = ['#foo', '', 'bar', '', ' baz']
tree = self.parser.parseDocument(lines)
self.assertIsInstance(tree, markdown.util.etree.ElementTree)
self.assertIs(markdown.util.etree.iselement(tree.getroot()), True)
self.assertEqual(
markdown.serializers.to_xhtml_string(tree.getroot()),
"<div><h1>foo</h1><p>bar</p><pre><code>baz\n</code></pre></div>"
)
class TestBlockParserState(unittest.TestCase):
""" Tests of the State class for BlockParser. """
def setUp(self):
self.state = markdown.blockparser.State()
def testBlankState(self):
""" Test State when empty. """
self.assertEqual(self.state, [])
def testSetSate(self):
""" Test State.set(). """
self.state.set('a_state')
self.assertEqual(self.state, ['a_state'])
self.state.set('state2')
self.assertEqual(self.state, ['a_state', 'state2'])
def testIsSate(self):
""" Test State.isstate(). """
self.assertEqual(self.state.isstate('anything'), False)
self.state.set('a_state')
self.assertEqual(self.state.isstate('a_state'), True)
self.state.set('state2')
self.assertEqual(self.state.isstate('state2'), True)
self.assertEqual(self.state.isstate('a_state'), False)
self.assertEqual(self.state.isstate('missing'), False)
def testReset(self):
""" Test State.reset(). """
self.state.set('a_state')
self.state.reset()
self.assertEqual(self.state, [])
self.state.set('state1')
self.state.set('state2')
self.state.reset()
self.assertEqual(self.state, ['state1'])
class TestHtmlStash(unittest.TestCase):
""" Test Markdown's HtmlStash. """
def setUp(self):
self.stash = markdown.util.HtmlStash()
self.placeholder = self.stash.store('foo')
def testSimpleStore(self):
""" Test HtmlStash.store. """
self.assertEqual(self.placeholder, self.stash.get_placeholder(0))
self.assertEqual(self.stash.html_counter, 1)
self.assertEqual(self.stash.rawHtmlBlocks, ['foo'])
def testStoreMore(self):
""" Test HtmlStash.store with additional blocks. """
placeholder = self.stash.store('bar')
self.assertEqual(placeholder, self.stash.get_placeholder(1))
self.assertEqual(self.stash.html_counter, 2)
self.assertEqual(
self.stash.rawHtmlBlocks,
['foo', 'bar']
)
def testReset(self):
""" Test HtmlStash.reset. """
self.stash.reset()
self.assertEqual(self.stash.html_counter, 0)
self.assertEqual(self.stash.rawHtmlBlocks, [])
class Item(object):
""" A dummy Registry item object for testing. """
def __init__(self, data):
self.data = data
def __repr__(self):
return repr(self.data)
def __eq__(self, other):
return self.data == other
class RegistryTests(unittest.TestCase):
""" Test the processor registry. """
def testCreateRegistry(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
self.assertEqual(len(r), 1)
self.assertIsInstance(r, markdown.util.Registry)
def testRegisterWithoutPriority(self):
r = markdown.util.Registry()
with self.assertRaises(TypeError):
r.register(Item('a'))
def testSortRegistry(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 21)
r.register(Item('c'), 'c', 20.5)
self.assertEqual(len(r), 3)
self.assertEqual(list(r), ['b', 'c', 'a'])
def testIsSorted(self):
r = markdown.util.Registry()
self.assertIs(r._is_sorted, False)
r.register(Item('a'), 'a', 20)
list(r)
self.assertIs(r._is_sorted, True)
r.register(Item('b'), 'b', 21)
self.assertIs(r._is_sorted, False)
r['a']
self.assertIs(r._is_sorted, True)
r._is_sorted = False
r.get_index_for_name('a')
self.assertIs(r._is_sorted, True)
r._is_sorted = False
repr(r)
self.assertIs(r._is_sorted, True)
def testDeregister(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
r.register(Item('c'), 'c', 40)
self.assertEqual(len(r), 3)
r.deregister('b')
self.assertEqual(len(r), 2)
r.deregister('c', strict=False)
self.assertEqual(len(r), 1)
# deregister non-existant item with strict=False
r.deregister('d', strict=False)
self.assertEqual(len(r), 1)
with self.assertRaises(ValueError):
# deregister non-existant item with strict=True
r.deregister('e')
self.assertEqual(list(r), ['a'])
def testRegistryContains(self):
r = markdown.util.Registry()
item = Item('a')
r.register(item, 'a', 20)
self.assertIs('a' in r, True)
self.assertIn(item, r)
self.assertNotIn('b', r)
def testRegistryIter(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(list(r), ['b', 'a'])
def testRegistryGetItemByIndex(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(r[0], 'b')
self.assertEqual(r[1], 'a')
with self.assertRaises(IndexError):
r[3]
def testRegistryGetItemByItem(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(r['a'], 'a')
self.assertEqual(r['b'], 'b')
with self.assertRaises(KeyError):
r['c']
def testRegistrySetItem(self):
r = markdown.util.Registry()
with self.assertRaises(TypeError):
r[0] = 'a'
# TODO: restore this when deprecated __setitem__ is removed.
# with self.assertRaises(TypeError):
# r['a'] = 'a'
# TODO: remove this when deprecated __setitem__ is removed.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
r['a'] = Item('a')
self.assertEqual(list(r), ['a'])
r['b'] = Item('b')
self.assertEqual(list(r), ['a', 'b'])
r['a'] = Item('a1')
self.assertEqual(list(r), ['a1', 'b'])
# Check the warnings
self.assertEqual(len(w), 3)
self.assertTrue(all(issubclass(x.category, DeprecationWarning) for x in w))
def testRegistryDelItem(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
with self.assertRaises(TypeError):
del r[0]
# TODO: restore this when deprecated __del__ is removed.
# with self.assertRaises(TypeError):
# del r['a']
# TODO: remove this when deprecated __del__ is removed.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
r.register(Item('b'), 'b', 15)
r.register(Item('c'), 'c', 10)
del r['b']
self.assertEqual(list(r), ['a', 'c'])
del r['a']
self.assertEqual(list(r), ['c'])
with self.assertRaises(TypeError):
del r['badname']
del r['c']
self.assertEqual(list(r), [])
# Check the warnings
self.assertEqual(len(w), 3)
self.assertTrue(all(issubclass(x.category, DeprecationWarning) for x in w))
def testRegistrySlice(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
r.register(Item('c'), 'c', 40)
slc = r[1:]
self.assertEqual(len(slc), 2)
self.assertIsInstance(slc, markdown.util.Registry)
self.assertEqual(list(slc), ['b', 'a'])
def testGetIndexForName(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(r.get_index_for_name('a'), 1)
self.assertEqual(r.get_index_for_name('b'), 0)
with self.assertRaises(ValueError):
r.get_index_for_name('c')
def testRegisterDupplicate(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b1'), 'b', 10)
self.assertEqual(list(r), ['a', 'b1'])
self.assertEqual(len(r), 2)
r.register(Item('b2'), 'b', 30)
self.assertEqual(len(r), 2)
self.assertEqual(list(r), ['b2', 'a'])
def testRegistryDeprecatedAdd(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
r = markdown.util.Registry()
# Add first item
r.add('c', Item('c'), '_begin')
self.assertEqual(list(r), ['c'])
# Added to beginning
r.add('b', Item('b'), '_begin')
self.assertEqual(list(r), ['b', 'c'])
# Add before first item
r.add('a', Item('a'), '<b')
self.assertEqual(list(r), ['a', 'b', 'c'])
# Add before non-first item
r.add('a1', Item('a1'), '<b')
self.assertEqual(list(r), ['a', 'a1', 'b', 'c'])
# Add after non-last item
r.add('b1', Item('b1'), '>b')
self.assertEqual(list(r), ['a', 'a1', 'b', 'b1', 'c'])
# Add after last item
r.add('d', Item('d'), '>c')
self.assertEqual(list(r), ['a', 'a1', 'b', 'b1', 'c', 'd'])
# Add to end
r.add('e', Item('e'), '_end')
self.assertEqual(list(r), ['a', 'a1', 'b', 'b1', 'c', 'd', 'e'])
with self.assertRaises(ValueError):
r.add('f', Item('f'), 'badlocation')
# Check the warnings
self.assertEqual(len(w), 7)
self.assertTrue(all(issubclass(x.category, DeprecationWarning) for x in w))
class TestErrors(unittest.TestCase):
""" Test Error Reporting. """
def setUp(self):
# Set warnings to be raised as errors
warnings.simplefilter('error')
def tearDown(self):
# Reset warning behavior back to default
warnings.simplefilter('default')
def testNonUnicodeSource(self):
""" Test falure on non-unicode source text. """
if not PY3:
source = "foo".encode('utf-16')
self.assertRaises(UnicodeDecodeError, markdown.markdown, source)
def testBadOutputFormat(self):
""" Test failure on bad output_format. """
self.assertRaises(KeyError, markdown.Markdown, output_format='invalid')
def testLoadExtensionFailure(self):
""" Test failure of an extension to load. """
self.assertRaises(
ImportError,
markdown.Markdown, extensions=['non_existant_ext']
)
def testLoadBadExtension(self):
""" Test loading of an Extension with no makeExtension function. """
self.assertRaises(AttributeError, markdown.Markdown, extensions=['markdown.util'])
def testNonExtension(self):
""" Test loading a non Extension object as an extension. """
self.assertRaises(TypeError, markdown.Markdown, extensions=[object])
def testDotNotationExtensionWithBadClass(self):
""" Test Extension loading with non-existant class name (`path.to.module:Class`). """
self.assertRaises(
AttributeError,
markdown.Markdown,
extensions=['markdown.extensions.footnotes:MissingExtension']
)
def testBaseExtention(self):
""" Test that the base Extension class will raise NotImplemented. """
self.assertRaises(
NotImplementedError,
markdown.Markdown, extensions=[markdown.extensions.Extension()]
)
class testETreeComments(unittest.TestCase):
"""
Test that ElementTree Comments work.
These tests should only be a concern when using cElementTree with third
party serializers (including markdown's (x)html serializer). While markdown
doesn't use ElementTree.Comment itself, we should certainly support any
third party extensions which may. Therefore, these tests are included to
ensure such support is maintained.
"""
def setUp(self):
# Create comment node
self.comment = markdown.util.etree.Comment('foo')
if hasattr(markdown.util.etree, 'test_comment'):
self.test_comment = markdown.util.etree.test_comment
else:
self.test_comment = markdown.util.etree.Comment
def testCommentIsComment(self):
""" Test that an ElementTree Comment passes the `is Comment` test. """
self.assertIs(self.comment.tag, markdown.util.etree.test_comment)
def testCommentIsBlockLevel(self):
""" Test that an ElementTree Comment is recognized as BlockLevel. """
md = markdown.Markdown()
self.assertIs(md.is_block_level(self.comment.tag), False)
def testCommentSerialization(self):
""" Test that an ElementTree Comment serializes properly. """
self.assertEqual(
markdown.serializers.to_html_string(self.comment),
'<!--foo-->'
)
def testCommentPrettify(self):
""" Test that an ElementTree Comment is prettified properly. """
pretty = markdown.treeprocessors.PrettifyTreeprocessor(markdown.Markdown())
pretty.run(self.comment)
self.assertEqual(
markdown.serializers.to_html_string(self.comment),
'<!--foo-->\n'
)
class testElementTailTests(unittest.TestCase):
""" Element Tail Tests """
def setUp(self):
self.pretty = markdown.treeprocessors.PrettifyTreeprocessor(markdown.Markdown())
def testBrTailNoNewline(self):
""" Test that last <br> in tree has a new line tail """
root = markdown.util.etree.Element('root')
br = markdown.util.etree.SubElement(root, 'br')
self.assertEqual(br.tail, None)
self.pretty.run(root)
self.assertEqual(br.tail, "\n")
class testSerializers(unittest.TestCase):
""" Test the html and xhtml serializers. """
def testHtml(self):
""" Test HTML serialization. """
el = markdown.util.etree.Element('div')
el.set('id', 'foo<&">')
p = markdown.util.etree.SubElement(el, 'p')
p.text = 'foo <&escaped>'
p.set('hidden', 'hidden')
markdown.util.etree.SubElement(el, 'hr')
non_element = markdown.util.etree.SubElement(el, None)
non_element.text = 'non-element text'
script = markdown.util.etree.SubElement(non_element, 'script')
script.text = '<&"test\nescaping">'
el.tail = "tail text"
self.assertEqual(
markdown.serializers.to_html_string(el),
'<div id="foo<&">">'
'<p hidden>foo <&escaped></p>'
'<hr>'
'non-element text'
'<script><&"test\nescaping"></script>'
'</div>tail text'
)
def testXhtml(self):
"""" Test XHTML serialization. """
el = markdown.util.etree.Element('div')
el.set('id', 'foo<&">')
p = markdown.util.etree.SubElement(el, 'p')
p.text = 'foo<&escaped>'
p.set('hidden', 'hidden')
markdown.util.etree.SubElement(el, 'hr')
non_element = markdown.util.etree.SubElement(el, None)
non_element.text = 'non-element text'
script = markdown.util.etree.SubElement(non_element, 'script')
script.text = '<&"test\nescaping">'
el.tail = "tail text"
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<div id="foo<&">">'
'<p hidden="hidden">foo<&escaped></p>'
'<hr />'
'non-element text'
'<script><&"test\nescaping"></script>'
'</div>tail text'
)
def testMixedCaseTags(self):
"""" Test preservation of tag case. """
el = markdown.util.etree.Element('MixedCase')
el.text = 'not valid '
em = markdown.util.etree.SubElement(el, 'EMPHASIS')
em.text = 'html'
markdown.util.etree.SubElement(el, 'HR')
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<MixedCase>not valid <EMPHASIS>html</EMPHASIS><HR /></MixedCase>'
)
def testProsessingInstruction(self):
""" Test serialization of ProcessignInstruction. """
pi = ProcessingInstruction('foo', text='<&"test\nescaping">')
self.assertIs(pi.tag, ProcessingInstruction)
self.assertEqual(
markdown.serializers.to_xhtml_string(pi),
'<?foo <&"test\nescaping">?>'
)
def testQNameTag(self):
""" Test serialization of QName tag. """
div = markdown.util.etree.Element('div')
qname = markdown.util.etree.QName('http://www.w3.org/1998/Math/MathML', 'math')
math = markdown.util.etree.SubElement(div, qname)
math.set('display', 'block')
sem = markdown.util.etree.SubElement(math, 'semantics')
msup = markdown.util.etree.SubElement(sem, 'msup')
mi = markdown.util.etree.SubElement(msup, 'mi')
mi.text = 'x'
mn = markdown.util.etree.SubElement(msup, 'mn')
mn.text = '2'
ann = markdown.util.etree.SubElement(sem, 'annotations')
ann.text = 'x^2'
self.assertEqual(
markdown.serializers.to_xhtml_string(div),
'<div>'
'<math display="block" xmlns="http://www.w3.org/1998/Math/MathML">'
'<semantics>'
'<msup>'
'<mi>x</mi>'
'<mn>2</mn>'
'</msup>'
'<annotations>x^2</annotations>'
'</semantics>'
'</math>'
'</div>'
)
def testQNameAttribute(self):
""" Test serialization of QName attribute. """
div = markdown.util.etree.Element('div')
div.set(markdown.util.etree.QName('foo'), markdown.util.etree.QName('bar'))
self.assertEqual(
markdown.serializers.to_xhtml_string(div),
'<div foo="bar"></div>'
)
def testBadQNameTag(self):
""" Test serialization of QName with no tag. """
qname = markdown.util.etree.QName('http://www.w3.org/1998/Math/MathML')
el = markdown.util.etree.Element(qname)
self.assertRaises(ValueError, markdown.serializers.to_xhtml_string, el)
def testQNameEscaping(self):
""" Test QName escaping. """
qname = markdown.util.etree.QName('<&"test\nescaping">', 'div')
el = markdown.util.etree.Element(qname)
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<div xmlns="<&"test escaping">"></div>'
)
def testQNamePreEscaping(self):
""" Test QName that is already partially escaped. """
qname = markdown.util.etree.QName('<&"test escaping">', 'div')
el = markdown.util.etree.Element(qname)
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<div xmlns="<&"test escaping">"></div>'
)
def buildExtension(self):
""" Build an extension which registers fakeSerializer. """
def fakeSerializer(elem):
# Ignore input and return hardcoded output
return '<div><p>foo</p></div>'
class registerFakeSerializer(markdown.extensions.Extension):
def extendMarkdown(self, md):
md.output_formats['fake'] = fakeSerializer
return registerFakeSerializer()
def testRegisterSerializer(self):
self.assertEqual(
markdown.markdown(
'baz', extensions=[self.buildExtension()], output_format='fake'
),
'<p>foo</p>'
)
def testXHTMLOutput(self):
self.assertEqual(
markdown.markdown('foo \nbar', output_format='xhtml'),
'<p>foo<br />\nbar</p>'
)
def testHTMLOutput(self):
self.assertEqual(
markdown.markdown('foo \nbar', output_format='html'),
'<p>foo<br>\nbar</p>'
)
class testAtomicString(unittest.TestCase):
""" Test that AtomicStrings are honored (not parsed). """
def setUp(self):
md = markdown.Markdown()
self.inlineprocessor = md.treeprocessors['inline']
def testString(self):
""" Test that a regular string is parsed. """
tree = markdown.util.etree.Element('div')
p = markdown.util.etree.SubElement(tree, 'p')
p.text = 'some *text*'
new = self.inlineprocessor.run(tree)
self.assertEqual(
markdown.serializers.to_html_string(new),
'<div><p>some <em>text</em></p></div>'
)
def testSimpleAtomicString(self):
""" Test that a simple AtomicString is not parsed. """
tree = markdown.util.etree.Element('div')
p = markdown.util.etree.SubElement(tree, 'p')
p.text = markdown.util.AtomicString('some *text*')
new = self.inlineprocessor.run(tree)
self.assertEqual(
markdown.serializers.to_html_string(new),
'<div><p>some *text*</p></div>'
)
def testNestedAtomicString(self):
""" Test that a nested AtomicString is not parsed. """
tree = markdown.util.etree.Element('div')
p = markdown.util.etree.SubElement(tree, 'p')
p.text = markdown.util.AtomicString('*some* ')
span1 = markdown.util.etree.SubElement(p, 'span')
span1.text = markdown.util.AtomicString('*more* ')
span2 = markdown.util.etree.SubElement(span1, 'span')
span2.text = markdown.util.AtomicString('*text* ')
span3 = markdown.util.etree.SubElement(span2, 'span')
span3.text = markdown.util.AtomicString('*here*')
span3.tail = markdown.util.AtomicString(' *to*')
span2.tail = markdown.util.AtomicString(' *test*')
span1.tail = markdown.util.AtomicString(' *with*')
new = self.inlineprocessor.run(tree)
self.assertEqual(
markdown.serializers.to_html_string(new),
'<div><p>*some* <span>*more* <span>*text* <span>*here*</span> '
'*to*</span> *test*</span> *with*</p></div>'
)
class TestConfigParsing(unittest.TestCase):
def assertParses(self, value, result):
self.assertIs(markdown.util.parseBoolValue(value, False), result)
def testBooleansParsing(self):
self.assertParses(True, True)
self.assertParses('novalue', None)
self.assertParses('yES', True)
self.assertParses('FALSE', False)
self.assertParses(0., False)
self.assertParses('none', False)
def testPreserveNone(self):
self.assertIsNone(markdown.util.parseBoolValue('None', preserve_none=True))
self.assertIsNone(markdown.util.parseBoolValue(None, preserve_none=True))
def testInvalidBooleansParsing(self):
self.assertRaises(ValueError, markdown.util.parseBoolValue, 'novalue')
class TestCliOptionParsing(unittest.TestCase):
""" Test parsing of Command Line Interface Options. """
def setUp(self):
self.default_options = {
'input': None,
'output': None,
'encoding': None,
'output_format': 'xhtml',
'lazy_ol': True,
'extensions': [],
'extension_configs': {},
}
self.tempfile = ''
def tearDown(self):
if os.path.isfile(self.tempfile):
os.remove(self.tempfile)
def testNoOptions(self):
options, logging_level = parse_options([])
self.assertEqual(options, self.default_options)
self.assertEqual(logging_level, CRITICAL)
def testQuietOption(self):
options, logging_level = parse_options(['-q'])
self.assertGreater(logging_level, CRITICAL)
def testVerboseOption(self):
options, logging_level = parse_options(['-v'])
self.assertEqual(logging_level, WARNING)
def testNoisyOption(self):
options, logging_level = parse_options(['--noisy'])
self.assertEqual(logging_level, DEBUG)
def testInputFileOption(self):
options, logging_level = parse_options(['foo.txt'])
self.default_options['input'] = 'foo.txt'
self.assertEqual(options, self.default_options)
def testOutputFileOption(self):
options, logging_level = parse_options(['-f', 'foo.html'])
self.default_options['output'] = 'foo.html'
self.assertEqual(options, self.default_options)
def testInputAndOutputFileOptions(self):
options, logging_level = parse_options(['-f', 'foo.html', 'foo.txt'])
self.default_options['output'] = 'foo.html'
self.default_options['input'] = 'foo.txt'
self.assertEqual(options, self.default_options)
def testEncodingOption(self):
options, logging_level = parse_options(['-e', 'utf-8'])
self.default_options['encoding'] = 'utf-8'
self.assertEqual(options, self.default_options)
def testOutputFormatOption(self):
options, logging_level = parse_options(['-o', 'html'])
self.default_options['output_format'] = 'html'
self.assertEqual(options, self.default_options)
def testNoLazyOlOption(self):
options, logging_level = parse_options(['-n'])
self.default_options['lazy_ol'] = False
self.assertEqual(options, self.default_options)
def testExtensionOption(self):
options, logging_level = parse_options(['-x', 'markdown.extensions.footnotes'])
self.default_options['extensions'] = ['markdown.extensions.footnotes']
self.assertEqual(options, self.default_options)
def testMultipleExtensionOptions(self):
options, logging_level = parse_options([
'-x', 'markdown.extensions.footnotes',
'-x', 'markdown.extensions.smarty'
])
self.default_options['extensions'] = [
'markdown.extensions.footnotes',
'markdown.extensions.smarty'
]
self.assertEqual(options, self.default_options)
def create_config_file(self, config):
""" Helper to create temp config files. """
if not isinstance(config, markdown.util.string_type):
# convert to string
config = yaml.dump(config)
fd, self.tempfile = tempfile.mkstemp('.yml')
with os.fdopen(fd, 'w') as fp:
fp.write(config)
def testExtensionConfigOption(self):
config = {
'markdown.extensions.wikilinks': {
'base_url': 'http://example.com/',
'end_url': '.html',
'html_class': 'test',
},
'markdown.extensions.footnotes:FootnotesExtension': {
'PLACE_MARKER': '~~~footnotes~~~'
}
}
self.create_config_file(config)
options, logging_level = parse_options(['-c', self.tempfile])
self.default_options['extension_configs'] = config
self.assertEqual(options, self.default_options)
def textBoolExtensionConfigOption(self):
config = {
'markdown.extensions.toc': {
'title': 'Some Title',
'anchorlink': True,
'permalink': True
}
}
self.create_config_file(config)
options, logging_level = parse_options(['-c', self.tempfile])
self.default_options['extension_configs'] = config
self.assertEqual(options, self.default_options)
def testExtensionConfigOptionAsJSON(self):
config = {
'markdown.extensions.wikilinks': {
'base_url': 'http://example.com/',
'end_url': '.html',
'html_class': 'test',
},
'markdown.extensions.footnotes:FootnotesExtension': {
'PLACE_MARKER': '~~~footnotes~~~'
}
}
import json
self.create_config_file(json.dumps(config))
options, logging_level = parse_options(['-c', self.tempfile])
self.default_options['extension_configs'] = config
self.assertEqual(options, self.default_options)
def testExtensionConfigOptionMissingFile(self):
self.assertRaises(IOError, parse_options, ['-c', 'missing_file.yaml'])
def testExtensionConfigOptionBadFormat(self):
config = """
[footnotes]
PLACE_MARKER= ~~~footnotes~~~
"""
self.create_config_file(config)
self.assertRaises(yaml.YAMLError, parse_options, ['-c', self.tempfile])
class TestEscapeAppend(unittest.TestCase):
""" Tests escape character append. """
def testAppend(self):
""" Test that appended escapes are only in the current instance. """
md = markdown.Markdown()
md.ESCAPED_CHARS.append('|')
self.assertEqual('|' in md.ESCAPED_CHARS, True)
md2 = markdown.Markdown()
self.assertEqual('|' not in md2.ESCAPED_CHARS, True)
class TestBlockAppend(unittest.TestCase):
""" Tests block kHTML append. """
def testBlockAppend(self):
""" Test that appended escapes are only in the current instance. """
md = markdown.Markdown()
md.block_level_elements.append('test')
self.assertEqual('test' in md.block_level_elements, True)
md2 = markdown.Markdown()
self.assertEqual('test' not in md2.block_level_elements, True)
class TestAncestorExclusion(unittest.TestCase):
""" Tests exclusion of tags in ancestor list. """
class AncestorExample(markdown.inlinepatterns.SimpleTagInlineProcessor):
""" Ancestor Test. """
ANCESTOR_EXCLUDES = ('a',)
def handleMatch(self, m, data):
""" Handle match. """
el = markdown.util.etree.Element(self.tag)
el.text = m.group(2)
return el, m.start(0), m.end(0)
class AncestorExtension(markdown.Extension):
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = {}
def extendMarkdown(self, md):
"""Modify inline patterns."""
pattern = r'(\+)([^\+]+)\1'
md.inlinePatterns.register(TestAncestorExclusion.AncestorExample(pattern, 'strong'), 'ancestor-test', 0)
def setUp(self):
"""Setup markdown object."""
self.md = markdown.Markdown(extensions=[TestAncestorExclusion.AncestorExtension()])
def test_ancestors(self):
""" Test that an extension can exclude parent tags. """
test = """
Some +test+ and a [+link+](http://test.com)
"""
result = """<p>Some <strong>test</strong> and a <a href="http://test.com">+link+</a></p>"""
self.md.reset()
self.assertEqual(self.md.convert(test), result)
def test_ancestors_tail(self):
""" Test that an extension can exclude parent tags when dealing with a tail. """
test = """
[***+em+*+strong+**](http://test.com)
"""
result = """<p><a href="http://test.com"><strong><em>+em+</em>+strong+</strong></a></p>"""
self.md.reset()
self.assertEqual(self.md.convert(test), result)
class TestGeneralDeprecations(unittest.TestCase):
"""Test general deprecations."""
def test_version_deprecation(self):
"""Test that version is deprecated."""
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
version = markdown.version
# Verify some things
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
self.assertEqual(version, markdown.__version__)
def test_version_info_deprecation(self):
"""Test that version info is deprecated."""
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
version_info = markdown.version_info
# Verify some things
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
self.assertEqual(version_info, markdown.__version_info__)
def test_deprecation_wrapper_dir(self):
"""Tests the `__dir__` attribute of the class as it replaces the module's."""
dir_attr = dir(markdown)
self.assertNotIn('version', dir_attr)
self.assertIn('__version__', dir_attr)
self.assertNotIn('version_info', dir_attr)
self.assertIn('__version_info__', dir_attr)
| 2.1875 | 2 |
nervous/utility/config.py | csxeba/nervous | 1 | 5481 | import os
class StressedNetConfig:
def __init__(self,
synaptic_environmental_constraint=0.8,
group_environmental_constraint=0.6,
stress_factor=0.8,
save_folder=os.path.expanduser("~/.nervous/models/")):
self._synaptic_environmental_constraint = synaptic_environmental_constraint
self._group_environmental_constraint = group_environmental_constraint
self._stress_factor = stress_factor
self._save_folder = save_folder
self._sanitize()
def _sanitize(self):
if 1. < self._group_environmental_constraint <= 0.:
raise ValueError("Group environmental constraint has to be in the range [0. - 1.)")
if 1. < self._synaptic_environmental_constraint <= 0.:
raise ValueError("Synaptic environmental constraint has to be in the range [0. - 1.)")
if 1. < self._stress_factor <= 0.:
raise ValueError("Stress factor has to be in the range [0. - 1.)")
if not os.path.exists(self._save_folder):
os.makedirs(self._save_folder)
@property
def synaptic_environmental_constraint(self):
return self._synaptic_environmental_constraint
@synaptic_environmental_constraint.setter
def synaptic_environmental_constraint(self, value):
self._synaptic_environmental_constraint = value
self._sanitize()
@property
def group_environmental_constraint(self):
return self._group_environmental_constraint
@group_environmental_constraint.setter
def group_environmental_constraint(self, value):
self._group_environmental_constraint = value
self._sanitize()
@property
def stress_factor(self):
return self._stress_factor
@stress_factor.setter
def stress_factor(self, value):
self._stress_factor = value
self._sanitize()
@property
def save_folder(self):
return self._save_folder
@save_folder.setter
def save_folder(self, value):
self._save_folder = value
self._sanitize()
def __getitem__(self, item):
if item == "self":
raise ValueError("Hahaha")
return self.__dict__[item]
| 2.5625 | 3 |
mindspore/nn/optim/ftrl.py | XinYao1994/mindspore | 2 | 5482 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""FTRL"""
from mindspore.ops import functional as F, composite as C, operations as P
from mindspore.common.parameter import Parameter
from mindspore.common import Tensor
import mindspore.common.dtype as mstype
from mindspore._checkparam import Validator as validator
from mindspore._checkparam import Rel
from .optimizer import Optimizer, apply_decay, grad_scale
ftrl_opt = C.MultitypeFuncGraph("ftrl_opt")
@ftrl_opt.register("Function", "Tensor", "Number", "Number", "Number", "Tensor", "Tensor", "Tensor", "Tensor")
def _tensor_run_opt(opt, learning_rate, l1, l2, lr_power, linear, gradient, weight, moment):
"""Apply ftrl optimizer to the weight parameter."""
success = True
success = F.depend(success, opt(weight, moment, linear, gradient, learning_rate, l1, l2, lr_power))
return success
def _check_param(initial_accum, learning_rate, lr_power, l1, l2, use_locking, loss_scale=1.0, weight_decay=0.0,
prim_name=None):
"""Check param."""
validator.check_value_type("initial_accum", initial_accum, [float], prim_name)
validator.check_number("initial_accum", initial_accum, 0.0, Rel.GE, prim_name)
validator.check_value_type("learning_rate", learning_rate, [float], prim_name)
validator.check_number("learning_rate", learning_rate, 0.0, Rel.GT, prim_name)
validator.check_value_type("lr_power", lr_power, [float], prim_name)
validator.check_number("lr_power", lr_power, 0.0, Rel.LE, prim_name)
validator.check_value_type("l1", l1, [float], prim_name)
validator.check_number("l1", l1, 0.0, Rel.GE, prim_name)
validator.check_value_type("l2", l2, [float], prim_name)
validator.check_number("l2", l2, 0.0, Rel.GE, prim_name)
validator.check_value_type("use_locking", use_locking, [bool], prim_name)
validator.check_value_type("loss_scale", loss_scale, [float], prim_name)
validator.check_number("loss_scale", loss_scale, 1.0, Rel.GE, prim_name)
validator.check_value_type("weight_decay", weight_decay, [float], prim_name)
validator.check_number("weight_decay", weight_decay, 0.0, Rel.GE, prim_name)
class FTRL(Optimizer):
"""
Implement the FTRL algorithm with ApplyFtrl Operator.
FTRL is an online convex optimization algorithm that adaptively chooses its regularization function
based on the loss functions. Refer to paper `Adaptive Bound Optimization for Online Convex Optimization
<https://arxiv.org/abs/1002.4908>`_. Refer to paper `Ad Click Prediction: a View from the Trenches
<https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf>`_ for engineering document.
Args:
params (list[Parameter]): A list of parameter, which will be updated. The element in `params`
should be Parameter.
initial_accum (float): The starting value for accumulators, must be zero or positive values. Default: 0.1.
learning_rate (float): The learning rate value, should be positive. Default: 0.001.
lr_power (float): Learning rate power controls how the learning rate decreases during training, must be less
than or equal to zero. Use fixed learning rate if lr_power is zero. Default: -0.5.
l1 (float): l1 regularization strength, must be greater than or equal to zero. Default: 0.0.
l2 (float): l2 regularization strength, must be greater than or equal to zero. Default: 0.0.
use_locking (bool): If True use locks for update operation. Default: False.
loss_scale (float): Value for the loss scale. It should be equal to or greater than 1.0. Default: 1.0.
wegith_decay (float): Weight decay value to multiply weight, must be zero or positive value. Default: 0.0.
Inputs:
- **grads** (tuple[Tensor]) - The gradients of `params` in optimizer, the shape is as same as the `params`
in optimizer.
Outputs:
tuple[Parameter], the updated parameters, the shape is the same as `params`.
Examples:
>>> net = Net()
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
>>> opt = nn.FTRL(net.trainable_params())
>>> model = Model(net, loss_fn=loss, optimizer=opt, metrics=None)
"""
def __init__(self, params, initial_accum=0.1, learning_rate=0.001, lr_power=-0.5, l1=0.0, l2=0.0,
use_locking=False, loss_scale=1.0, weight_decay=0.0):
super(FTRL, self).__init__(learning_rate, params)
_check_param(initial_accum, learning_rate, lr_power, l1, l2, use_locking, loss_scale, weight_decay,
self.cls_name)
self.moments = self.parameters.clone(prefix="moments", init=initial_accum)
self.linear = self.parameters.clone(prefix="linear", init='zeros')
self.l1 = l1
self.l2 = l2
self.lr_power = lr_power
self.reciprocal_scale = 1.0 / loss_scale
self.weight_decay = weight_decay
self.decay_tf = tuple((lambda: True)() for x in self.parameters)
self.hyper_map = C.HyperMap()
self.opt = P.ApplyFtrl(use_locking=use_locking)
self.one = Tensor(1, mstype.int32)
def construct(self, grads):
params = self.parameters
moments = self.moments
linear = self.linear
if self.weight_decay > 0.0:
grads = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_tf, params, grads)
if self.reciprocal_scale != 1.0:
grads = self.hyper_map(F.partial(grad_scale, self.reciprocal_scale), grads)
lr = self.learning_rate
success = self.hyper_map(F.partial(ftrl_opt, self.opt, lr, self.l1, self.l2, self.lr_power),
linear, grads, params, moments)
return success
| 1.929688 | 2 |
aws_utils/region_selector.py | skimhub/aws-utils | 0 | 5483 | <gh_stars>0
import datetime
import boto3
US_EAST_REGION = {'us-east-1'}
US_EAST_AVAILABILITY_ZONES = {'us-east-1a', 'us-east-1b', 'us-east-1c', 'us-east-1e'} # note d is missing
INSTANCE_VERSION = 'Linux/UNIX (Amazon VPC)'
def fetch_spot_prices(region, start_time, end_time, instance_type, instance_version=INSTANCE_VERSION):
"""Fetches prices of EC2 spot instances from AWS.
Args:
region (str): region to look for instances in
start_time (datetime.datetime):
end_time (datetime.datetime):
instance_type (str):
instance_version (str): the types of instances that we wish to return prices for.
Returns:
yield str, float: yields tuple of avialability_zone and price over the period
Raises: ValueError,
raised in the event that the boto3 response is empty.
"""
conn = boto3.client('ec2', region_name=region)
res = conn.describe_spot_price_history(StartTime=start_time,
EndTime=end_time,
InstanceTypes=[instance_type],
ProductDescriptions=[instance_version])
for item in res['SpotPriceHistory']:
yield item['AvailabilityZone'], float(item['SpotPrice'])
token = res['NextToken']
while token:
res = conn.describe_spot_price_history(StartTime=start_time,
EndTime=end_time,
InstanceTypes=[instance_type],
ProductDescriptions=[instance_version],
NextToken=token)
for item in res['SpotPriceHistory']:
yield item['AvailabilityZone'], float(item['SpotPrice'])
token = res['NextToken']
def fetch_price_stats_per_availability_zone(region, start_time, end_time, instance_type, instance_version=INSTANCE_VERSION,
filter_availability_zones=None):
"""Groups raw prices by region, returns min, max and avg price.
Args:
region (str): region to look for instances in
start_time (datetime.datetime):
end_time (datetime.datetime):
instance_type (str):
instance_version (str): the types of instances that we wish to return prices for.
filter_availability_zones ({str}): if set then we only return a price if the availability zone is in this list
Returns: dict,
{'us-east-1b': {'min': 2.01, 'max': 3.53,'avg':2.8, 'latest':3.0}}
"""
by_zone = {}
for zone, price in fetch_spot_prices(region, start_time, end_time, instance_type, instance_version):
by_zone.setdefault(zone, []).append(price)
prices_per_region = {}
for zone, prices in by_zone.iteritems():
if filter_availability_zones is None or zone in filter_availability_zones:
region_prices = {'min': min(prices),
'max': max(prices),
'avg': sum(prices) / float(len(prices)),
'latest': prices[0]}
prices_per_region[zone] = region_prices
return prices_per_region
def get_cheapest_availability_zone(instance_type, search_regions=US_EAST_REGION,
filter_availability_zones=US_EAST_AVAILABILITY_ZONES, expected_job_length=datetime.timedelta(days=1)):
"""Get the cheapest availability zone from a set of regions. Cheapest is determined by 'latest price + average price'
over the duration that the job is expected to run for
Args:
filter_availability_zones ({str}): We only return results for zones in this set
instance_type (str): Type of aws instance e.g. "m2.4xlarge"
search_regions ({str}): Set of regions we want to look for availability zones in.
expected_job_length (datetime.timedelta): The period we expect the job to run this is used as the amount of time to look back over
for the average
Returns:
(str, {}) : e.g. ('us-east-1b': {'min': 2.01, 'max': 3.53,'avg':2.8, 'latest':3.0})
"""
if isinstance(search_regions, str):
search_regions = {search_regions}
aggregated_prices = {}
for region in search_regions:
result_stats = fetch_price_stats_per_availability_zone(region,
datetime.datetime.utcnow() - expected_job_length,
datetime.datetime.utcnow(),
instance_type,
filter_availability_zones=filter_availability_zones)
if not len(result_stats):
raise Exception("No valid avialability zones found for region %s" % (region,))
aggregated_prices.update(result_stats)
cheapest_availability_zone, stats = min(aggregated_prices.iteritems(), key=lambda x: x[1]['avg'] + x[1]['latest'])
return cheapest_availability_zone, stats
| 2.5 | 2 |
pynn/__init__.py | jkae/knn-exercise | 0 | 5484 |
from .nearest_neighbor_index import NearestNeighborIndex
from .kd_tree import *
| 1.039063 | 1 |
tests/test_try.py | threecifanggen/python-functional-programming | 3 | 5485 | '''
Author: huangbaochen<<EMAIL>>
Date: 2021-12-11 20:04:19
LastEditTime: 2021-12-11 21:46:16
LastEditors: huangbaochen<<EMAIL>>
Description: 测试Try单子
No MERCY
'''
import pytest
from fppy.try_monad import Try, Success, Fail
from fppy.option import Just, Nothing
@pytest.mark.try_monad
def test_try_apply():
assert Try.apply(1) == Success(1)
assert Try(1) == Success(1)
@pytest.mark.try_monad
def test_try_unapply():
assert Success.unapply(Success(1)) == Just(1)
assert Fail.unapply(Fail(TypeError(), 1)) == Nothing()
with pytest.raises(TypeError):
Fail.unapply(1)
with pytest.raises(TypeError):
Fail.unapply(Success(1))
with pytest.raises(TypeError):
Success.unapply(1)
with pytest.raises(TypeError):
Success.unapply(Fail(Exception(), 1))
def test_try_monad_map():
assert Success(1).map(lambda x: x + 1) == Success(2)
assert Success(1).map(lambda x: x / 0) ==\
Fail(ZeroDivisionError('division by zero'), 1)
assert Fail(ZeroDivisionError('division by zero'), 1)\
.map(lambda x: x + 1) ==\
Fail(ZeroDivisionError('division by zero'), 1)
@pytest.mark.try_monad
def test_try_monad_flat_map():
assert Success(1).flat_map(lambda x: Success(2)) == Success(2)
assert Fail(ZeroDivisionError('division by zero'), 1)\
.flat_map(lambda x: Success(1)) ==\
Fail(ZeroDivisionError('division by zero'), 1)
with pytest.raises(TypeError):
Success(1).flat_map(lambda x: x + 1)
@pytest.mark.try_monad
def test_try_monad_eq():
assert Fail(ZeroDivisionError('division by zero'), 1) ==\
Fail(ZeroDivisionError('division by zero'), 1)
assert Fail(ZeroDivisionError('division by'), 1) !=\
Fail(ZeroDivisionError('division by zero'), 1)
assert Fail(ZeroDivisionError('division by zero'), 0) !=\
Fail(ZeroDivisionError('division by zero'), 1)
@pytest.mark.try_monad
def test_try_monad_get():
assert Fail(ZeroDivisionError('division by zero'), 1)\
.get().args ==\
ZeroDivisionError('division by zero').args
assert Success(1).get() == 1
# pylint: disable=no-member
assert Try("s").get() == "s"
@pytest.mark.try_monad
def test_try_monad_get_or_else():
assert Fail(ZeroDivisionError('division by zero'), 1)\
.get_or_else(2) == 2
assert Success(1).get_or_else(2) == 1
@pytest.mark.try_monad
def test_try_monad_get_error_input():
assert Fail(ZeroDivisionError('division by zero'), 1)\
.get_error_input() == 1
| 2.703125 | 3 |
app/internal/daily_quotes.py | yammesicka/calendar | 0 | 5486 | <reponame>yammesicka/calendar<filename>app/internal/daily_quotes.py
from datetime import date
from typing import Dict, Optional
from sqlalchemy.orm import Session
from sqlalchemy.sql.expression import func
from app.database.models import Quote
TOTAL_DAYS = 366
def create_quote_object(quotes_fields: Dict[str, Optional[str]]) -> Quote:
"""This function create a quote object from given fields dictionary.
It is used for adding the data from the json into the db"""
return Quote(
text=quotes_fields['text'],
author=quotes_fields['author']
)
def quote_per_day(
session: Session, date: date = date.today()
) -> Optional[Quote]:
"""This function provides a daily quote, relevant to the current
day of the year. The quote is randomally selected from a set
of quotes matching to the given day"""
day_num = date.timetuple().tm_yday
quote = session.query(Quote).filter(
Quote.id % TOTAL_DAYS == day_num).order_by(func.random()).first()
return quote
| 3.078125 | 3 |
src/789A.py | viing937/codeforces | 2 | 5487 | n, k = map(int, input().split())
w = list(map(int, input().split()))
r = sum(map(lambda x: (x+k-1)//k, w))
print((r+1)//2)
| 2.65625 | 3 |
platform/server/detect.py | leyyin/godot | 24 | 5488 | <reponame>leyyin/godot<gh_stars>10-100
import os
import sys
def is_active():
return True
def get_name():
return "Server"
def can_build():
if (os.name!="posix"):
return False
return True # enabled
def get_opts():
return [
('use_llvm','Use llvm compiler','no'),
('force_32_bits','Force 32 bits binary','no')
]
def get_flags():
return [
('builtin_zlib', 'no'),
]
def configure(env):
env.Append(CPPPATH=['#platform/server'])
if (env["use_llvm"]=="yes"):
env["CC"]="clang"
env["CXX"]="clang++"
env["LD"]="clang++"
if (env["colored"]=="yes"):
if sys.stdout.isatty():
env.Append(CXXFLAGS=["-fcolor-diagnostics"])
is64=sys.maxsize > 2**32
if (env["bits"]=="default"):
if (is64):
env["bits"]="64"
else:
env["bits"]="32"
#if (env["tools"]=="no"):
# #no tools suffix
# env['OBJSUFFIX'] = ".nt"+env['OBJSUFFIX']
# env['LIBSUFFIX'] = ".nt"+env['LIBSUFFIX']
if (env["target"]=="release"):
env.Append(CCFLAGS=['-O2','-ffast-math','-fomit-frame-pointer'])
elif (env["target"]=="release_debug"):
env.Append(CCFLAGS=['-O2','-ffast-math','-DDEBUG_ENABLED'])
elif (env["target"]=="debug"):
env.Append(CCFLAGS=['-g2', '-Wall','-DDEBUG_ENABLED','-DDEBUG_MEMORY_ENABLED'])
env.Append(CPPFLAGS=['-DSERVER_ENABLED','-DUNIX_ENABLED'])
env.Append(LIBS=['pthread','z']) #TODO detect linux/BSD!
if (env["CXX"]=="clang++"):
env.Append(CPPFLAGS=['-DTYPED_METHOD_BIND'])
env["CC"]="clang"
env["LD"]="clang++"
| 2.1875 | 2 |
telemetry/Truck.py | SnipsMine/ETS2-Speedrun-Tool | 0 | 5489 | <reponame>SnipsMine/ETS2-Speedrun-Tool<filename>telemetry/Truck.py
from telemetry.TruckConstants import ConstantValues
from telemetry.TruckCurrent import CurrentValues
from telemetry.TruckPositioning import Positioning
class TruckValues:
constant_values = None
current_values = None
positioning = None
def __init__(self):
self.current_values = CurrentValues()
self.constant_values = ConstantValues()
self.positioning = Positioning()
| 2.234375 | 2 |
IntroToSpark/Assign4_Q1-6_action.py | petersontylerd/spark-courses | 0 | 5490 |
import csv
from pyspark.sql import SparkSession
from pyspark.sql.types import IntegerType
spark = SparkSession.builder.appName("Assignment4").getOrCreate()
sc = spark.sparkContext
# load data to dataframe
path = 'fake_data.csv'
df = spark.read.format('csv').option('header','true').load(path)
# cast income as an integer
df = df.withColumn('Income', df['Income'].cast(IntegerType()))
# Question 1
print('*' * 30)
print('\nQuestion 1\n')
print(df.rdd.map(lambda x: (x[1], x[0])).groupByKey().mapValues(lambda vals: len(set(vals))).sortBy(lambda a: a[1], ascending = False).take(1))
print('\n\n')
# Question 2
print('*' * 30)
print('\nQuestion 2\n')
print(df.rdd.filter(lambda v: v[1] == 'United States of America').map(lambda x: (x[1], x[4])).groupByKey().mapValues(lambda x: sum(x) / len(x)).collect())
print('\n\n')
# Question 3
print('*' * 30)
print('\nQuestion 3\n')
print(df.rdd.filter(lambda v: v[4] > 100000).filter(lambda v: v[7] == 'FALSE').count())
print('\n\n')
# Question 4
print('*' * 30)
print('\nQuestion 4\n')
print(df.rdd.filter(lambda v: v[1] == 'United States of America').sortBy(lambda x: x[4], ascending = False).map(lambda x: (x[3], x[6], x[4], x[5])).take(10))
print('\n\n')
# Question 5
print('*' * 30)
print('\nQuestion 5\n')
print(df.rdd.groupBy(lambda x: x[5]).count())
print('\n\n')
# Question 6
print('*' * 30)
print('\nQuestion 6\n')
print(df.rdd.filter(lambda v: v[5] == 'Writer').filter(lambda x: x[4] < 100000).count())
print('\n\n')
| 3.484375 | 3 |
src/firebot/tests/factories.py | zipmex/fire | 52 | 5491 | <reponame>zipmex/fire<gh_stars>10-100
import factory
from django.contrib.auth import get_user_model
class UserFactory(factory.DjangoModelFactory):
class Meta:
model = get_user_model()
first_name = factory.Faker('name')
last_name = factory.Faker('name')
email = factory.Faker('email')
| 1.992188 | 2 |
reamber/o2jam/O2JHold.py | Bestfast/reamberPy | 0 | 5492 | <filename>reamber/o2jam/O2JHold.py
from dataclasses import dataclass, field
from reamber.base.Hold import Hold, HoldTail
from reamber.o2jam.O2JNoteMeta import O2JNoteMeta
@dataclass
class O2JHoldTail(HoldTail, O2JNoteMeta):
pass
@dataclass
class O2JHold(Hold, O2JNoteMeta):
""" Defines the O2Jam Bpm Object
The O2Jam Bpm Object is stored in binary file .ojn
"""
_tail: O2JHoldTail = field(init=False)
def _upcastTail(self, **kwargs) -> O2JHoldTail:
return O2JHoldTail(**kwargs)
| 2.796875 | 3 |
peacebot/core/plugins/Miscellaneous/__init__.py | Peacebot-Development/Peacebot-v2 | 3 | 5493 | <filename>peacebot/core/plugins/Miscellaneous/__init__.py<gh_stars>1-10
import lightbulb
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from peacebot.core.utils.time import TimeConverter
def fetch_scheduler(ctx: lightbulb.Context) -> AsyncIOScheduler:
return ctx.bot.d.scheduler
async def convert_time(ctx: lightbulb.Context, time: str) -> float:
seconds = await TimeConverter.convert(TimeConverter, ctx, time)
return seconds
async def send_remainder(ctx: lightbulb.Context, text: str) -> None:
await ctx.respond(
f"{ctx.author.mention} Remainder: `{text}`",
user_mentions=True,
)
| 2.0625 | 2 |
example/android/python/msite_simple_default_browser.py | laichimirum/docker-appium-emulator | 8 | 5494 | import unittest
from appium import webdriver
class MSiteDefaultBrowserAndroidUITests(unittest.TestCase):
def setUp(self):
# Default browser does not exist for android >= 6.0
desired_caps = {
'platformName': 'Android',
'deviceName': 'Android Emulator',
'appPackage': 'com.android.browser',
'appActivity': 'com.android.browser.BrowserActivity',
'avd': 'samsung_galaxy_s6_6.0'
}
self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)
def test_open_url(self):
self.driver.get('http://targeturl.com')
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(MSiteDefaultBrowserAndroidUITests)
unittest.TextTestRunner(verbosity=2).run(suite)
| 2.71875 | 3 |
src/nn/dataset_utils/types_processing.py | sola-st/Nalin | 0 | 5495 | <gh_stars>0
"""
Created on 17-June-2020
@author <NAME>
The types extracted during runtime usually look something like --> <class 'numpy.ndarray'> or
<class 'seaborn.palettes._ColorPalette'> change them to --> ndarray, ColorPalette
"""
import re
remove_chars = re.compile(r'>|\'|<|(class )|_|(type)')
def process_types(tp: str) -> str:
cleaned_type = remove_chars.sub('', tp)
cleaned_type = cleaned_type.split('.')[-1].strip()
return cleaned_type
| 2.640625 | 3 |
src/canvas.py | soootaleb/spare | 1 | 5496 | <filename>src/canvas.py<gh_stars>1-10
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib.ticker as ticker
import numpy as np
import random, matplotlib.pyplot as plt
class PlotCanvas(FigureCanvas):
def __init__(self, parent=None, width=5, height=4, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.plot()
def plot(self):
data = [random.random() for i in range(25)]
ax = self.figure.add_subplot(111)
ax.plot(data, 'r-')
ax.set_title('PyQt Matplotlib Example')
class ImageCanvas(FigureCanvas):
def __init__(self, parent = None, width = 5, height = 4, dpi=100):
fig = Figure(figsize = (width, height), dpi = dpi, frameon = False)
fig.subplots_adjust(bottom=0, top=1, left=0, right=1)
FigureCanvas.__init__(self, fig)
self.setParent(parent)
self.axes = fig.add_subplot(111)
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def plot(self, image):
self.axes.axis('off')
display = image.image
display[display == 1] = 255
if image.color:
self.axes.imshow(image.image)
else :
self.axes.imshow(display, cmap = "gray")
self.show()
class HistogramCanvas(FigureCanvas):
'''
This class is used to plt the histogram of the two objects in the main module.
the values are computed in one of the descriptors.
'''
def __init__(self, parent = None, is_polar = True, width = 8, height = 5, dpi = 100):
self.fig = Figure(figsize=(width, height), dpi=dpi)
FigureCanvas.__init__(self, self.fig)
self.is_polar = is_polar
self.setParent(parent)
if self.is_polar:
self.axes = self.fig.add_subplot(111, projection='polar')
else :
self.axes = self.fig.add_subplot(111)
self.axes.grid(True)
#TODO : Add the names of the objects (fname - extention ?)
FigureCanvas.updateGeometry(self)
def plot(self, histogram, color = None):
self.axes.set_title("Spatial relations between A and B", va='bottom')
if self.is_polar:
self.axes.set_rlim(0,1)
theta = [float(k)/ 180 * np.pi for k in histogram.values.keys()]
#TODO : refractor this ugly-but-working code
if len(theta) > 16:
i = 0
theta_major_name = []
for k in histogram.values.keys():
if i % 3 == 0:
theta_major_name.append(float(k)/ 180 * np.pi)
i+=1
self.axes.xaxis.set_major_locator(ticker.FixedLocator(theta_major_name))
else :
self.axes.xaxis.set_major_locator(ticker.LinearLocator(len(theta)))
self.axes.xaxis.set_minor_locator(ticker.LinearLocator(len(theta)))
self.axes.grid(b = True, which='major', linestyle='-')
self.axes.grid(b = True, which='minor', linestyle='--')
self.axes.plot(theta, list(histogram.values.values()))
else:
self.axes.plot(list(histogram.values.keys()), list(histogram.values.values()))
# self.axes.plot(list(histogram.values.keys()), list(histogram.gaussian), color="red", ls='--')
self.draw()
def clear(self):
self.axes.clear()
def lin_or_polar(self, new_value : bool):
'''
set the type of the histogram to be polar or linear.
'''
self.is_polar = new_value
self.fig.clear()
if self.is_polar:
self.axes = self.fig.add_subplot(111, projection='polar')
else :
self.axes = self.fig.add_subplot(111)
FigureCanvas.updateGeometry(self)
| 2.515625 | 3 |
src/cpg_scpi/test/__init__.py | GeorgBraun/cpg_scpi_python | 0 | 5497 | '''Functional tests for CPG'''
from .. import CircuitPlayground
from .. import __version__ as CircuitPlaygroundVersion
import time
def funcTest(timestamps: bool = False) -> None:
cpg = CircuitPlayground()
if timestamps:
_printFuncTestHeadingWithDeliLine(f'cpg_scpi v{CircuitPlaygroundVersion}\nRUNNING SOME FUNCTIONAL-TESTS WITH THE CPG with timestamps ...\n')
else:
_printFuncTestHeadingWithDeliLine(f'cpg_scpi v{CircuitPlaygroundVersion}\nRUNNING SOME FUNCTIONAL-TESTS WITH THE CPG without timestamps ...\n')
# test_led(cpg)
# test_buttonAny(cpg, timestamps)
# test_switch(cpg, timestamps)
test_temp(cpg, timestamps)
test_light(cpg, timestamps)
test_acc(cpg, timestamps)
test_touch(cpg, timestamps)
_printFuncTestHeadingWithDeliLine('DONE WITH FUNCTIONAL-TESTS')
_printFuncTestDeliLine()
def _printCountdown(start: int = 3) -> None:
for i in range(start, 0, -1):
print(i, end=" ", flush=True)
time.sleep(1)
print('', flush=True)
def _printFuncTestDeliLine() -> None:
print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
def _printFuncTestHeadingWithDeliLine(heading) -> None:
_printFuncTestDeliLine()
print(heading)
def test_buttonAny(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | any button |'
outFormat = '| {:5} | {:12.3f} | {!s:10} |'
else:
outHeading = '| count | any button |'
outFormat = '| {:5} | {!s:10} |'
_printFuncTestHeadingWithDeliLine('Button-Test: Press left or right button...')
print(outHeading)
_printCountdown(3)
count = 10
for i in range(count):
result = (count-i, *cpg.buttonAny_wts()) if timestamps else (count-i, cpg.buttonAny())
print(outFormat.format(*result))
cpg.wait(0.5)
def test_switch(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | switch |'
outFormat = '| {:5} | {:12.3f} | {!s:6} |'
else:
outHeading = '| count | switch |'
outFormat = '| {:5} | {!s:6} |'
_printFuncTestHeadingWithDeliLine('Switch-Test: Change slider switch position...')
print(outHeading)
_printCountdown(3)
count = 10
for i in range(count):
result = (count-i, *cpg.switch_wts()) if timestamps else (count-i, cpg.switch())
print(outFormat.format(*result))
cpg.wait(0.5)
def test_temp(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | temp °C |'
outFormat = '| {:5} | {:12.3f} | {:7.2f} |'
else:
outHeading = '| count | temp °C |'
outFormat = '| {:5} | {:7.2f} |'
_printFuncTestHeadingWithDeliLine('Temp-Sensor-Test ...')
print(outHeading)
_printCountdown(3)
count = 20
for i in range(count):
result = (count-i, *cpg.temp_wts()) if timestamps else (count-i, cpg.temp())
print(outFormat.format(*result))
cpg.wait(0.5)
def test_light(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | light |'
outFormat = '| {:5} | {:12.3f} | {:5} |'
else:
outHeading = '| count | light |'
outFormat = '| {:5} | {:5} |'
_printFuncTestHeadingWithDeliLine('Light-Sensor-Test: Move hand over light sensor...')
print(outHeading)
_printCountdown(3)
count = 20
for i in range(count):
result = (count-i, *cpg.light_wts()) if timestamps else (count-i, cpg.light())
print(outFormat.format(*result))
cpg.wait(0.5)
def test_acc(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | x m/s^2 | y m/s^2 | z m/s^2 |'
outFormat = '| {:5} | {:12.3f} | {:7.2f} | {:7.2f} | {:7.2f} |'
testFunction = cpg.acc_wts
else:
outHeading = '| count | x m/s^2 | y m/s^2 | z m/s^2 |'
outFormat = '| {:5} | {:7.2f} | {:7.2f} | {:7.2f} |'
testFunction = cpg.acc
_printFuncTestHeadingWithDeliLine('Accelerometer-Test: Tilt the CPG board...')
print(outHeading)
_printCountdown(3)
count = 60
for i in range(count):
print(outFormat.format(count-i, *testFunction()))
cpg.wait(0.2)
def test_touch(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | touch | binary |'
outFormat = '| {0:5} | {1:12.3f} | {2:5} | {2:08b} |'
else:
outHeading = '| count | touch | binary |'
outFormat = '| {0:5} | {1:5} | {1:08b} |'
_printFuncTestHeadingWithDeliLine('Touch-Sensor-Test: Touch capacitive sensor pads...')
print(outHeading)
_printCountdown(3)
count = 30
for i in range(count):
result = (count-i, *cpg.touch_wts()) if timestamps else (count-i, cpg.touch())
print(outFormat.format(*result))
cpg.wait(0.5)
def test_led(cpg) -> None:
'''Flash LEDs and run a short chasing light.'''
_printFuncTestHeadingWithDeliLine('LED-Test: Flash LEDs and run a short chasing light...')
print('flashing LEDs...')
test_ledDemo(cpg)
value=1
# print('| val | LEDs |')
for i in range(10):
# print(f'| {value:4} | {value:010b} |')
cpg.led(value)
cpg.wait(0.2)
value <<= 1 # shift 1 bit to the left
for i in range(10):
value >>= 1 # shift 1 bit to the right
# print(f'| {value:4} | {value:010b} |')
cpg.led(value)
cpg.wait(0.2)
print('flashing LEDs...')
test_ledDemo(cpg)
def test_ledDemo(cpg) -> None:
'''Flash LEDs three times.'''
for i in range(3):
cpg.ledDemo()
cpg.wait(0.2)
def testAccSpeed(cpg, iterations: int = 100) -> None:
'''Measure how long it takes to do an accelerometer measurement.'''
print(f'Testing acc measurement speed with {iterations} iterations. Please wait ...')
import timeit
result = timeit.Timer(stmt=lambda: cpg.acc(), setup='pass').timeit(number=iterations)
print(f'Total time: {result:.1f} seconds.')
print(f'On average {(result*1000/iterations):.1f} ms per measurement.')
def testLightSpeed(cpg, iterations: int = 100) -> None:
'''Measure how long it takes to do a light sensor measurement.'''
print(f'Testing light measurement speed with {iterations} iterations. Please wait ...')
import timeit
result = timeit.Timer(stmt=lambda: cpg.light(), setup='pass').timeit(number=iterations)
print(f'Total time: {result:.1f} seconds.')
print(f'On average {(result*1000/iterations):.1f} ms per measurement.')
def _testResponseWaitTime(cpg, iterations: int = 10000) -> None:
'''Test it the wait time for additional, unexpected responses is long enough.'''
print(f'Testing Response-Wait-Time with {iterations} iterations ...')
for i in range(iterations):
if i%100==0: print('try-count', i)
try:
# Request acc measurement values, but do not expect any response, even if the CPG will send one.
cpg._query('MEAS:ACC?', 0)
# If we are still here, we did not get a response. This is bad.
print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
print('ERROR in testResponseWaitTime(): CPG-Response was too late.')
print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
except Exception:
# The normal behavior is a response, resulting in an exception.
# This is what we expected. Therefore, just continue.
pass
| 2.609375 | 3 |
main/models.py | yejun1060/SbjctSclctn | 0 | 5498 | from django.db import models
class Account(models.Model):
clsNb = models.IntegerField()
Name = models.CharField(max_length=10)
pw = models.IntegerField()
def __str__(self):
return self.Name | 2.421875 | 2 |
test/utils/test_geodesic.py | shrey-bansal/pytorch_geometric | 2 | 5499 | from math import sqrt
import torch
from torch_geometric.utils import geodesic_distance
def test_geodesic_distance():
pos = torch.Tensor([[0, 0, 0], [2, 0, 0], [0, 2, 0], [2, 2, 0]])
face = torch.tensor([[0, 1, 3], [0, 2, 3]]).t()
out = geodesic_distance(pos, face)
expected = [
[0, 1, 1, sqrt(2)],
[1, 0, sqrt(2), 1],
[1, sqrt(2), 0, 1],
[sqrt(2), 1, 1, 0],
]
assert torch.allclose(out, torch.tensor(expected))
assert torch.allclose(out, geodesic_distance(pos, face, num_workers=-1))
out = geodesic_distance(pos, face, norm=False)
expected = [
[0, 2, 2, 2 * sqrt(2)],
[2, 0, 2 * sqrt(2), 2],
[2, 2 * sqrt(2), 0, 2],
[2 * sqrt(2), 2, 2, 0],
]
assert torch.allclose(out, torch.tensor(expected))
src = torch.tensor([0, 0, 0, 0])
dest = torch.tensor([0, 1, 2, 3])
out = geodesic_distance(pos, face, src=src, dest=dest)
expected = [0, 1, 1, sqrt(2)]
assert torch.allclose(out, torch.tensor(expected))
out = geodesic_distance(pos, face, src=src[0:1])
expected = [0, 1, 1, sqrt(2)]
assert torch.allclose(out, torch.tensor(expected))
out = geodesic_distance(pos, face, dest=dest)
expected = [0, 0, 0, 0]
assert torch.allclose(out, torch.Tensor(expected))
| 2.5 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.