content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
# Generated by Django 2.2.13 on 2020-09-16 14:47
# Third-party
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("licenses", "0004_auto_20200902_1302"),
]
operations = [
migrations.AlterUniqueTogether(
name="translatedlicensename",
unique_together=None,
),
migrations.RemoveField(
model_name="translatedlicensename",
name="license",
),
migrations.DeleteModel(
name="LicenseLogo",
),
migrations.DeleteModel(
name="TranslatedLicenseName",
),
]
| 22.310345 | 48 | 0.582689 | [
"MIT"
] | Cronus1007/cc-licenses | licenses/migrations/0005_auto_20200916_1047.py | 647 | Python |
from backpack.core.derivatives.linear import LinearDerivatives
from backpack.extensions.curvmatprod.hmp.hmpbase import HMPBase
class HMPLinear(HMPBase):
def __init__(self):
super().__init__(derivatives=LinearDerivatives(), params=["weight", "bias"])
def weight(self, ext, module, g_inp, g_out, backproped):
h_out_mat_prod = backproped
def weight_hmp(mat):
result = self.derivatives.weight_jac_mat_prod(module, g_inp, g_out, mat)
result = h_out_mat_prod(result)
result = self.derivatives.param_mjp("weight", module, g_inp, g_out, result)
return result
return weight_hmp
def bias(self, ext, module, g_inp, g_out, backproped):
h_out_mat_prod = backproped
def bias_hmp(mat):
result = self.derivatives.bias_jac_mat_prod(module, g_inp, g_out, mat)
result = h_out_mat_prod(result)
result = self.derivatives.param_mjp("bias", module, g_inp, g_out, result)
return result
return bias_hmp
| 32.875 | 87 | 0.66635 | [
"MIT"
] | f-dangel/backpack | backpack/extensions/curvmatprod/hmp/linear.py | 1,052 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AgentPoolsOperations:
"""AgentPoolsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2019_06_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
registry_name: str,
agent_pool_name: str,
**kwargs: Any
) -> "_models.AgentPool":
"""Gets the detailed information for a given agent pool.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPool, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str', max_length=20, min_length=3, pattern=r'^[a-zA-Z0-9-]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/agentPools/{agentPoolName}'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
registry_name: str,
agent_pool_name: str,
agent_pool: "_models.AgentPool",
**kwargs: Any
) -> "_models.AgentPool":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str', max_length=20, min_length=3, pattern=r'^[a-zA-Z0-9-]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(agent_pool, 'AgentPool')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AgentPool', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/agentPools/{agentPoolName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
registry_name: str,
agent_pool_name: str,
agent_pool: "_models.AgentPool",
**kwargs: Any
) -> AsyncLROPoller["_models.AgentPool"]:
"""Creates an agent pool for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:param agent_pool: The parameters of an agent pool that needs to scheduled.
:type agent_pool: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AgentPool or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPool]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
agent_pool_name=agent_pool_name,
agent_pool=agent_pool,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str', max_length=20, min_length=3, pattern=r'^[a-zA-Z0-9-]*$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/agentPools/{agentPoolName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
registry_name: str,
agent_pool_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str', max_length=20, min_length=3, pattern=r'^[a-zA-Z0-9-]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/agentPools/{agentPoolName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
registry_name: str,
agent_pool_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a specified agent pool resource.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
agent_pool_name=agent_pool_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str', max_length=20, min_length=3, pattern=r'^[a-zA-Z0-9-]*$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/agentPools/{agentPoolName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
registry_name: str,
agent_pool_name: str,
update_parameters: "_models.AgentPoolUpdateParameters",
**kwargs: Any
) -> "_models.AgentPool":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str', max_length=20, min_length=3, pattern=r'^[a-zA-Z0-9-]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(update_parameters, 'AgentPoolUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AgentPool', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/agentPools/{agentPoolName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
registry_name: str,
agent_pool_name: str,
update_parameters: "_models.AgentPoolUpdateParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.AgentPool"]:
"""Updates an agent pool with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:param update_parameters: The parameters for updating an agent pool.
:type update_parameters: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPoolUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AgentPool or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPool]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
agent_pool_name=agent_pool_name,
update_parameters=update_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str', max_length=20, min_length=3, pattern=r'^[a-zA-Z0-9-]*$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/agentPools/{agentPoolName}'} # type: ignore
def list(
self,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AgentPoolListResult"]:
"""Lists all the agent pools for a specified container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AgentPoolListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPoolListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AgentPoolListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/agentPools'} # type: ignore
async def get_queue_status(
self,
resource_group_name: str,
registry_name: str,
agent_pool_name: str,
**kwargs: Any
) -> "_models.AgentPoolQueueStatus":
"""Gets the count of queued runs for a given agent pool.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPoolQueueStatus, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPoolQueueStatus
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolQueueStatus"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get_queue_status.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str', max_length=20, min_length=3, pattern=r'^[a-zA-Z0-9-]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AgentPoolQueueStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_queue_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/agentPools/{agentPoolName}/listQueueStatus'} # type: ignore
| 52.675 | 232 | 0.67427 | [
"MIT"
] | AFengKK/azure-sdk-for-python | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_06_01_preview/aio/operations/_agent_pools_operations.py | 33,712 | Python |
import dis
import unittest
from test.support.bytecode_helper import BytecodeTestCase
def count_instr_recursively(f, opname):
count = 0
for instr in dis.get_instructions(f):
if instr.opname == opname:
count += 1
if hasattr(f, '__code__'):
f = f.__code__
for c in f.co_consts:
if hasattr(c, 'co_code'):
count += count_instr_recursively(c, opname)
return count
class TestTranforms(BytecodeTestCase):
def check_jump_targets(self, code):
instructions = list(dis.get_instructions(code))
targets = {instr.offset: instr for instr in instructions}
for instr in instructions:
if 'JUMP_' not in instr.opname:
continue
tgt = targets[instr.argval]
# jump to unconditional jump
if tgt.opname in ('JUMP_ABSOLUTE', 'JUMP_FORWARD'):
self.fail(f'{instr.opname} at {instr.offset} '
f'jumps to {tgt.opname} at {tgt.offset}')
# unconditional jump to RETURN_VALUE
if (instr.opname in ('JUMP_ABSOLUTE', 'JUMP_FORWARD') and
tgt.opname == 'RETURN_VALUE'):
self.fail(f'{instr.opname} at {instr.offset} '
f'jumps to {tgt.opname} at {tgt.offset}')
# JUMP_IF_*_OR_POP jump to conditional jump
if '_OR_POP' in instr.opname and 'JUMP_IF_' in tgt.opname:
self.fail(f'{instr.opname} at {instr.offset} '
f'jumps to {tgt.opname} at {tgt.offset}')
def check_lnotab(self, code):
"Check that the lnotab byte offsets are sensible."
code = dis._get_code_object(code)
lnotab = list(dis.findlinestarts(code))
# Don't bother checking if the line info is sensible, because
# most of the line info we can get at comes from lnotab.
min_bytecode = min(t[0] for t in lnotab)
max_bytecode = max(t[0] for t in lnotab)
self.assertGreaterEqual(min_bytecode, 0)
self.assertLess(max_bytecode, len(code.co_code))
# This could conceivably test more (and probably should, as there
# aren't very many tests of lnotab), if peepholer wasn't scheduled
# to be replaced anyway.
def test_unot(self):
# UNARY_NOT POP_JUMP_IF_FALSE --> POP_JUMP_IF_TRUE'
def unot(x):
if not x == 2:
del x
self.assertNotInBytecode(unot, 'UNARY_NOT')
self.assertNotInBytecode(unot, 'POP_JUMP_IF_FALSE')
self.assertInBytecode(unot, 'POP_JUMP_IF_TRUE')
self.check_lnotab(unot)
def test_elim_inversion_of_is_or_in(self):
for line, cmp_op, invert in (
('not a is b', 'IS_OP', 1,),
('not a is not b', 'IS_OP', 0,),
('not a in b', 'CONTAINS_OP', 1,),
('not a not in b', 'CONTAINS_OP', 0,),
):
code = compile(line, '', 'single')
self.assertInBytecode(code, cmp_op, invert)
self.check_lnotab(code)
def test_global_as_constant(self):
# LOAD_GLOBAL None/True/False --> LOAD_CONST None/True/False
def f():
x = None
x = None
return x
def g():
x = True
return x
def h():
x = False
return x
for func, elem in ((f, None), (g, True), (h, False)):
self.assertNotInBytecode(func, 'LOAD_GLOBAL')
self.assertInBytecode(func, 'LOAD_CONST', elem)
self.check_lnotab(func)
def f():
'Adding a docstring made this test fail in Py2.5.0'
return None
self.assertNotInBytecode(f, 'LOAD_GLOBAL')
self.assertInBytecode(f, 'LOAD_CONST', None)
self.check_lnotab(f)
def test_while_one(self):
# Skip over: LOAD_CONST trueconst POP_JUMP_IF_FALSE xx
def f():
while 1:
pass
return list
for elem in ('LOAD_CONST', 'POP_JUMP_IF_FALSE'):
self.assertNotInBytecode(f, elem)
for elem in ('JUMP_ABSOLUTE',):
self.assertInBytecode(f, elem)
self.check_lnotab(f)
def test_pack_unpack(self):
for line, elem in (
('a, = a,', 'LOAD_CONST',),
('a, b = a, b', 'ROT_TWO',),
('a, b, c = a, b, c', 'ROT_THREE',),
):
code = compile(line,'','single')
self.assertInBytecode(code, elem)
self.assertNotInBytecode(code, 'BUILD_TUPLE')
self.assertNotInBytecode(code, 'UNPACK_TUPLE')
self.check_lnotab(code)
def test_folding_of_tuples_of_constants(self):
for line, elem in (
('a = 1,2,3', (1, 2, 3)),
('("a","b","c")', ('a', 'b', 'c')),
('a,b,c = 1,2,3', (1, 2, 3)),
('(None, 1, None)', (None, 1, None)),
('((1, 2), 3, 4)', ((1, 2), 3, 4)),
):
code = compile(line,'','single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
self.assertNotInBytecode(code, 'BUILD_TUPLE')
self.check_lnotab(code)
# Long tuples should be folded too.
code = compile(repr(tuple(range(10000))),'','single')
self.assertNotInBytecode(code, 'BUILD_TUPLE')
# One LOAD_CONST for the tuple, one for the None return value
load_consts = [instr for instr in dis.get_instructions(code)
if instr.opname == 'LOAD_CONST']
self.assertEqual(len(load_consts), 2)
self.check_lnotab(code)
# Bug 1053819: Tuple of constants misidentified when presented with:
# . . . opcode_with_arg 100 unary_opcode BUILD_TUPLE 1 . . .
# The following would segfault upon compilation
def crater():
(~[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
],)
self.check_lnotab(crater)
def test_folding_of_lists_of_constants(self):
for line, elem in (
# in/not in constants with BUILD_LIST should be folded to a tuple:
('a in [1,2,3]', (1, 2, 3)),
('a not in ["a","b","c"]', ('a', 'b', 'c')),
('a in [None, 1, None]', (None, 1, None)),
('a not in [(1, 2), 3, 4]', ((1, 2), 3, 4)),
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
self.assertNotInBytecode(code, 'BUILD_LIST')
self.check_lnotab(code)
def test_folding_of_sets_of_constants(self):
for line, elem in (
# in/not in constants with BUILD_SET should be folded to a frozenset:
('a in {1,2,3}', frozenset({1, 2, 3})),
('a not in {"a","b","c"}', frozenset({'a', 'c', 'b'})),
('a in {None, 1, None}', frozenset({1, None})),
('a not in {(1, 2), 3, 4}', frozenset({(1, 2), 3, 4})),
('a in {1, 2, 3, 3, 2, 1}', frozenset({1, 2, 3})),
):
code = compile(line, '', 'single')
self.assertNotInBytecode(code, 'BUILD_SET')
self.assertInBytecode(code, 'LOAD_CONST', elem)
self.check_lnotab(code)
# Ensure that the resulting code actually works:
def f(a):
return a in {1, 2, 3}
def g(a):
return a not in {1, 2, 3}
self.assertTrue(f(3))
self.assertTrue(not f(4))
self.check_lnotab(f)
self.assertTrue(not g(3))
self.assertTrue(g(4))
self.check_lnotab(g)
def test_folding_of_binops_on_constants(self):
for line, elem in (
('a = 2+3+4', 9), # chained fold
('"@"*4', '@@@@'), # check string ops
('a="abc" + "def"', 'abcdef'), # check string ops
('a = 3**4', 81), # binary power
('a = 3*4', 12), # binary multiply
('a = 13//4', 3), # binary floor divide
('a = 14%4', 2), # binary modulo
('a = 2+3', 5), # binary add
('a = 13-4', 9), # binary subtract
('a = (12,13)[1]', 13), # binary subscr
('a = 13 << 2', 52), # binary lshift
('a = 13 >> 2', 3), # binary rshift
('a = 13 & 7', 5), # binary and
('a = 13 ^ 7', 10), # binary xor
('a = 13 | 7', 15), # binary or
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
for instr in dis.get_instructions(code):
self.assertFalse(instr.opname.startswith('BINARY_'))
self.check_lnotab(code)
# Verify that unfoldables are skipped
code = compile('a=2+"b"', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 2)
self.assertInBytecode(code, 'LOAD_CONST', 'b')
self.check_lnotab(code)
# Verify that large sequences do not result from folding
code = compile('a="x"*10000', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 10000)
self.assertNotIn("x"*10000, code.co_consts)
self.check_lnotab(code)
code = compile('a=1<<1000', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 1000)
self.assertNotIn(1<<1000, code.co_consts)
self.check_lnotab(code)
code = compile('a=2**1000', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 1000)
self.assertNotIn(2**1000, code.co_consts)
self.check_lnotab(code)
def test_binary_subscr_on_unicode(self):
# valid code get optimized
code = compile('"foo"[0]', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 'f')
self.assertNotInBytecode(code, 'BINARY_SUBSCR')
self.check_lnotab(code)
code = compile('"\u0061\uffff"[1]', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', '\uffff')
self.assertNotInBytecode(code,'BINARY_SUBSCR')
self.check_lnotab(code)
# With PEP 393, non-BMP char get optimized
code = compile('"\U00012345"[0]', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', '\U00012345')
self.assertNotInBytecode(code, 'BINARY_SUBSCR')
self.check_lnotab(code)
# invalid code doesn't get optimized
# out of range
code = compile('"fuu"[10]', '', 'single')
self.assertInBytecode(code, 'BINARY_SUBSCR')
self.check_lnotab(code)
def test_folding_of_unaryops_on_constants(self):
for line, elem in (
('-0.5', -0.5), # unary negative
('-0.0', -0.0), # -0.0
('-(1.0-1.0)', -0.0), # -0.0 after folding
('-0', 0), # -0
('~-2', 1), # unary invert
('+1', 1), # unary positive
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
for instr in dis.get_instructions(code):
self.assertFalse(instr.opname.startswith('UNARY_'))
self.check_lnotab(code)
# Check that -0.0 works after marshaling
def negzero():
return -(1.0-1.0)
for instr in dis.get_instructions(negzero):
self.assertFalse(instr.opname.startswith('UNARY_'))
self.check_lnotab(negzero)
# Verify that unfoldables are skipped
for line, elem, opname in (
('-"abc"', 'abc', 'UNARY_NEGATIVE'),
('~"abc"', 'abc', 'UNARY_INVERT'),
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
self.assertInBytecode(code, opname)
self.check_lnotab(code)
def test_elim_extra_return(self):
# RETURN LOAD_CONST None RETURN --> RETURN
def f(x):
return x
self.assertNotInBytecode(f, 'LOAD_CONST', None)
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'RETURN_VALUE']
self.assertEqual(len(returns), 1)
self.check_lnotab(f)
def test_elim_jump_to_return(self):
# JUMP_FORWARD to RETURN --> RETURN
def f(cond, true_value, false_value):
# Intentionally use two-line expression to test issue37213.
return (true_value if cond
else false_value)
self.check_jump_targets(f)
self.assertNotInBytecode(f, 'JUMP_FORWARD')
self.assertNotInBytecode(f, 'JUMP_ABSOLUTE')
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'RETURN_VALUE']
self.assertEqual(len(returns), 2)
self.check_lnotab(f)
def test_elim_jump_to_uncond_jump(self):
# POP_JUMP_IF_FALSE to JUMP_FORWARD --> POP_JUMP_IF_FALSE to non-jump
def f():
if a:
# Intentionally use two-line expression to test issue37213.
if (c
or d):
foo()
else:
baz()
self.check_jump_targets(f)
self.check_lnotab(f)
def test_elim_jump_to_uncond_jump2(self):
# POP_JUMP_IF_FALSE to JUMP_ABSOLUTE --> POP_JUMP_IF_FALSE to non-jump
def f():
while a:
# Intentionally use two-line expression to test issue37213.
if (c
or d):
a = foo()
self.check_jump_targets(f)
self.check_lnotab(f)
def test_elim_jump_to_uncond_jump3(self):
# Intentionally use two-line expressions to test issue37213.
# JUMP_IF_FALSE_OR_POP to JUMP_IF_FALSE_OR_POP --> JUMP_IF_FALSE_OR_POP to non-jump
def f(a, b, c):
return ((a and b)
and c)
self.check_jump_targets(f)
self.check_lnotab(f)
self.assertEqual(count_instr_recursively(f, 'JUMP_IF_FALSE_OR_POP'), 2)
# JUMP_IF_TRUE_OR_POP to JUMP_IF_TRUE_OR_POP --> JUMP_IF_TRUE_OR_POP to non-jump
def f(a, b, c):
return ((a or b)
or c)
self.check_jump_targets(f)
self.check_lnotab(f)
self.assertEqual(count_instr_recursively(f, 'JUMP_IF_TRUE_OR_POP'), 2)
# JUMP_IF_FALSE_OR_POP to JUMP_IF_TRUE_OR_POP --> POP_JUMP_IF_FALSE to non-jump
def f(a, b, c):
return ((a and b)
or c)
self.check_jump_targets(f)
self.check_lnotab(f)
self.assertNotInBytecode(f, 'JUMP_IF_FALSE_OR_POP')
self.assertInBytecode(f, 'JUMP_IF_TRUE_OR_POP')
self.assertInBytecode(f, 'POP_JUMP_IF_FALSE')
# JUMP_IF_TRUE_OR_POP to JUMP_IF_FALSE_OR_POP --> POP_JUMP_IF_TRUE to non-jump
def f(a, b, c):
return ((a or b)
and c)
self.check_jump_targets(f)
self.check_lnotab(f)
self.assertNotInBytecode(f, 'JUMP_IF_TRUE_OR_POP')
self.assertInBytecode(f, 'JUMP_IF_FALSE_OR_POP')
self.assertInBytecode(f, 'POP_JUMP_IF_TRUE')
def test_elim_jump_after_return1(self):
# Eliminate dead code: jumps immediately after returns can't be reached
def f(cond1, cond2):
if cond1: return 1
if cond2: return 2
while 1:
return 3
while 1:
if cond1: return 4
return 5
return 6
self.assertNotInBytecode(f, 'JUMP_FORWARD')
self.assertNotInBytecode(f, 'JUMP_ABSOLUTE')
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'RETURN_VALUE']
self.assertLessEqual(len(returns), 6)
self.check_lnotab(f)
def test_elim_jump_after_return2(self):
# Eliminate dead code: jumps immediately after returns can't be reached
def f(cond1, cond2):
while 1:
if cond1: return 4
self.assertNotInBytecode(f, 'JUMP_FORWARD')
# There should be one jump for the while loop.
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'JUMP_ABSOLUTE']
self.assertEqual(len(returns), 1)
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'RETURN_VALUE']
self.assertLessEqual(len(returns), 2)
self.check_lnotab(f)
def test_make_function_doesnt_bail(self):
def f():
def g()->1+1:
pass
return g
self.assertNotInBytecode(f, 'BINARY_ADD')
self.check_lnotab(f)
def test_constant_folding(self):
# Issue #11244: aggressive constant folding.
exprs = [
'3 * -5',
'-3 * 5',
'2 * (3 * 4)',
'(2 * 3) * 4',
'(-1, 2, 3)',
'(1, -2, 3)',
'(1, 2, -3)',
'(1, 2, -3) * 6',
'lambda x: x in {(3 * -5) + (-1 - 6), (1, -2, 3) * 2, None}',
]
for e in exprs:
code = compile(e, '', 'single')
for instr in dis.get_instructions(code):
self.assertFalse(instr.opname.startswith('UNARY_'))
self.assertFalse(instr.opname.startswith('BINARY_'))
self.assertFalse(instr.opname.startswith('BUILD_'))
self.check_lnotab(code)
def test_in_literal_list(self):
def containtest():
return x in [a, b]
self.assertEqual(count_instr_recursively(containtest, 'BUILD_LIST'), 0)
self.check_lnotab(containtest)
def test_iterate_literal_list(self):
def forloop():
for x in [a, b]:
pass
self.assertEqual(count_instr_recursively(forloop, 'BUILD_LIST'), 0)
self.check_lnotab(forloop)
def test_condition_with_binop_with_bools(self):
def f():
if True or False:
return 1
return 0
self.assertEqual(f(), 1)
self.check_lnotab(f)
def test_if_with_if_expression(self):
# Check bpo-37289
def f(x):
if (True if x else False):
return True
return False
self.assertTrue(f(True))
self.check_lnotab(f)
def test_trailing_nops(self):
# Check the lnotab of a function that even after trivial
# optimization has trailing nops, which the lnotab adjustment has to
# handle properly (bpo-38115).
def f(x):
while 1:
return 3
while 1:
return 5
return 6
self.check_lnotab(f)
def test_assignment_idiom_in_comprehensions(self):
def listcomp():
return [y for x in a for y in [f(x)]]
self.assertEqual(count_instr_recursively(listcomp, 'FOR_ITER'), 1)
def setcomp():
return {y for x in a for y in [f(x)]}
self.assertEqual(count_instr_recursively(setcomp, 'FOR_ITER'), 1)
def dictcomp():
return {y: y for x in a for y in [f(x)]}
self.assertEqual(count_instr_recursively(dictcomp, 'FOR_ITER'), 1)
def genexpr():
return (y for x in a for y in [f(x)])
self.assertEqual(count_instr_recursively(genexpr, 'FOR_ITER'), 1)
class TestBuglets(unittest.TestCase):
def test_bug_11510(self):
# folded constant set optimization was commingled with the tuple
# unpacking optimization which would fail if the set had duplicate
# elements so that the set length was unexpected
def f():
x, y = {1, 1}
return x, y
with self.assertRaises(ValueError):
f()
if __name__ == "__main__":
unittest.main()
| 38.88447 | 91 | 0.534217 | [
"BSD-3-Clause"
] | Froggo8311/brython | www/src/Lib/test/test_peepholer.py | 20,531 | Python |
import pytest
import subprocess
from Browser.assertion_engine import AssertionOperator
@pytest.fixture()
def application_server():
process = subprocess.Popen(
["node", "./node/dynamic-test-app/dist/server.js", "7272"]
)
yield
process.terminate()
@pytest.fixture()
def browser(monkeypatch):
import Browser
browser = Browser.Browser()
yield browser
browser.close_browser("ALL")
def test_open_page_get_text(application_server, browser):
browser.new_page("localhost:7272/dist/")
text = browser.get_text("h1", AssertionOperator["=="], "Login Page")
assert text == "Login Page"
def test_readme_example(browser):
browser.new_page("https://playwright.dev")
assert browser.get_text("h1") == "🎭 Playwright"
def test_new_browser_and_close(browser):
browser.new_browser()
browser.close_browser()
| 22.153846 | 72 | 0.710648 | [
"Apache-2.0"
] | emanlove/robotframework-browser | utest/test_python_usage.py | 867 | Python |
import sys
import os
import shutil
import zipfile
'''
Author: Benny Megidish
Description: This program extracts all the drawing, image and 3D design files out of an 123dx file
Arguments naming conventions is used like in java (camelCase)
'''
numOfFileExtracted = 0
def _extract3d(zipFileDir, destDirectory, outputFileName):
''' a wrapper function for the recursive file extraction function '''
with zipfile.ZipFile(zipFileDir) as zipFile:
_extract3dRecursively(zipFile.namelist(), zipFile, destDirectory, outputFileName)
def _extract3dRecursively(fileList, baseZipFile, destDirectory, outputFileName, numOfFileExtracted=0):
''' extracts all the illustations and models from the 123dx file recursively '''
imageExtList = ['.jpg', '.png']
fusionExtList = ['.smt', '.smb', '.sat', '.igs', '.dxf', '.stp', '.stl']
for member in fileList:
if os.path.isdir(member):
# traverse zip
_extract3dRecursively(os.listdir(member), baseZipFile, destDirectory, outputFileName)
else:
fileExt = os.path.splitext(member)[1]
fileName = os.path.splitext(os.path.basename(member))[0]
# extract only drawing images and 3D files
if fileExt in (fusionExtList + imageExtList):
fullFileName = ''.join([outputFileName, "_", fileName, fileExt])
# find unique file name
while os.path.exists(os.path.join(destDirectory, fullFileName)):
fileName += "#"
fullFileName = ''.join([outputFileName, "_", fileName, fileExt])
# copy file (taken from zipfile's extract)
source = baseZipFile.open(member)
target = open(os.path.join(destDirectory, fullFileName), "wb") # was file() / test for exceptions
with source, target:
shutil.copyfileobj(source, target)
numOfFileExtracted += 1
def _execute(srcDirectory, destDirectory, filename):
''' converts the file into fusion 360 file (this file might be usable in other CAD software as well) '''
outputFileName = os.path.splitext(os.path.basename(filename))[0]
newFileName = outputFileName + '.zip'
oldFilePath = os.path.join(srcDirectory, filename)
newFilePath = os.path.join(srcDirectory, newFileName)
# covert to zip
os.rename(oldFilePath, newFilePath)
# extract files
print('Extracting %s' % oldFilePath)
_extract3d(newFilePath, destDirectory, outputFileName)
# covert back to 123dx
os.rename(newFilePath, oldFilePath)
# delete zip
# os.remove(newFilePath)
def convert(filepath=None):
args = sys.argv
usage = 'USAGE: %s [123D FILE PATH OR DIRECTORY]' % args[0]
directory = os.path.dirname(os.path.realpath(__file__))
succeeded = False
# define working directory and file path
if filepath:
directory = os.path.dirname(filepath)
elif len(args) == 2:
directory = os.path.dirname(args[1])
filepath = args[1]
else:
print(usage)
print('Using current directory..')
extractDirectory = os.path.join(directory, '3DFiles')
# ensure all the variables defined correctly
if os.path.isdir(directory) or (filepath and filepath.endswith(".123dx")):
# create output dir if needed
if not os.path.exists(extractDirectory):
os.makedirs(extractDirectory)
else:
print(usage)
# exit(-1) # incase we are running as a script, exit it
return False
# start the convertion process
if filepath and filepath.endswith(".123dx"):
# single file
if os.path.exists(filepath):
_execute(directory, extractDirectory, filepath)
succeeded = True
else:
print('Failed, %s does not exist' % filepath)
elif os.path.isdir(directory):
# directory
for filename in os.listdir(directory):
if filename.endswith(".123dx"):
_execute(directory, extractDirectory, filename)
succeeded = True
if not succeeded:
print('Failed, could not found *.123dx file in %s' % directory)
if succeeded:
print('Succeeded, you can find you model files inside the 3DFiles folder')
return succeeded
if __name__ == '__main__':
convert()
| 34.325581 | 113 | 0.634598 | [
"MIT"
] | bennymeg/123-Fusion | fusion123/converter.py | 4,428 | Python |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sumOfLeftLeaves(self, root):
"""
:type root: TreeNode
:rtype: int
"""
while not root:
return 0
if root.left and not root.left.left and not root.left.right:
return root.left.val + self.sumOfLeftLeaves(root.right)
return self.sumOfLeftLeaves(root.left) + self.sumOfLeftLeaves(root.right)
| 29.105263 | 81 | 0.587703 | [
"MIT"
] | Apocrypse/LeetCode | Python/404sum_of_left_leaves.py | 553 | Python |
# Copyright (c) 2010 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import xml.sax
import cgi
from boto.compat import six, StringIO
class ResponseGroup(xml.sax.ContentHandler):
"""A Generic "Response Group", which can
be anything from the entire list of Items to
specific response elements within an item"""
def __init__(self, connection=None, nodename=None):
"""Initialize this Item"""
self._connection = connection
self._nodename = nodename
self._nodepath = []
self._curobj = None
self._xml = StringIO()
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.__dict__)
#
# Attribute Functions
#
def get(self, name):
return self.__dict__.get(name)
def set(self, name, value):
self.__dict__[name] = value
def to_xml(self):
return "<%s>%s</%s>" % (self._nodename, self._xml.getvalue(), self._nodename)
#
# XML Parser functions
#
def startElement(self, name, attrs, connection):
self._xml.write("<%s>" % name)
self._nodepath.append(name)
if len(self._nodepath) == 1:
obj = ResponseGroup(self._connection)
self.set(name, obj)
self._curobj = obj
elif self._curobj:
self._curobj.startElement(name, attrs, connection)
return None
def endElement(self, name, value, connection):
self._xml.write("%s</%s>" % (cgi.escape(value).replace("&amp;", "&"), name))
if len(self._nodepath) == 0:
return
obj = None
curval = self.get(name)
if len(self._nodepath) == 1:
if value or not curval:
self.set(name, value)
if self._curobj:
self._curobj = None
#elif len(self._nodepath) == 2:
#self._curobj = None
elif self._curobj:
self._curobj.endElement(name, value, connection)
self._nodepath.pop()
return None
class Item(ResponseGroup):
"""A single Item"""
def __init__(self, connection=None):
"""Initialize this Item"""
ResponseGroup.__init__(self, connection, "Item")
class ItemSet(ResponseGroup):
"""A special ResponseGroup that has built-in paging, and
only creates new Items on the "Item" tag"""
def __init__(self, connection, action, params, page=0):
ResponseGroup.__init__(self, connection, "Items")
self.objs = []
self.iter = None
self.page = page
self.action = action
self.params = params
self.curItem = None
self.total_results = 0
self.total_pages = 0
self.is_valid = False
self.errors = []
def startElement(self, name, attrs, connection):
if name == "Item":
self.curItem = Item(self._connection)
elif self.curItem is not None:
self.curItem.startElement(name, attrs, connection)
return None
def endElement(self, name, value, connection):
if name == 'TotalResults':
self.total_results = value
elif name == 'TotalPages':
self.total_pages = value
elif name == 'IsValid':
if value == 'True':
self.is_valid = True
elif name == 'Code':
self.errors.append({'Code': value, 'Message': None})
elif name == 'Message':
self.errors[-1]['Message'] = value
elif name == 'Item':
self.objs.append(self.curItem)
self._xml.write(self.curItem.to_xml())
self.curItem = None
elif self.curItem is not None:
self.curItem.endElement(name, value, connection)
return None
def __next__(self):
"""Special paging functionality"""
if self.iter is None:
self.iter = iter(self.objs)
try:
return next(self.iter)
except StopIteration:
self.iter = None
self.objs = []
if int(self.page) < int(self.total_pages):
self.page += 1
self._connection.get_response(self.action, self.params, self.page, self)
return next(self)
else:
raise
next = __next__
def __iter__(self):
return self
def to_xml(self):
"""Override to first fetch everything"""
for item in self:
pass
return ResponseGroup.to_xml(self)
| 33.393939 | 92 | 0.609256 | [
"Apache-2.0"
] | 10088/hue | desktop/core/ext-py/boto-2.46.1/boto/ecs/item.py | 5,510 | Python |
# -*- coding: utf-8 -*-
"""DNACenterAPI topology API fixtures and tests.
Copyright (c) 2019 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from tests.environment import DNA_CENTER_VERSION
from tests.models.schema_validator import json_schema_validate
pytestmark = pytest.mark.skipif(DNA_CENTER_VERSION != '1.3.1', reason='version does not match')
def is_valid_get_vlan_details(obj):
json_schema_validate('jsd_6284db4649aa8d31_v1_3_1').validate(obj)
return True
def get_vlan_details(api):
endpoint_result = api.topology.get_vlan_details(
)
return endpoint_result
@pytest.mark.topology
def test_get_vlan_details(api):
assert is_valid_get_vlan_details(
get_vlan_details(api)
)
def get_vlan_details_default(api):
endpoint_result = api.topology.get_vlan_details(
)
return endpoint_result
@pytest.mark.topology
def test_get_vlan_details_default(api):
try:
assert is_valid_get_vlan_details(
get_vlan_details_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_site_topology(obj):
json_schema_validate('jsd_9ba14a9e441b8a60_v1_3_1').validate(obj)
return True
def get_site_topology(api):
endpoint_result = api.topology.get_site_topology(
)
return endpoint_result
@pytest.mark.topology
def test_get_site_topology(api):
assert is_valid_get_site_topology(
get_site_topology(api)
)
def get_site_topology_default(api):
endpoint_result = api.topology.get_site_topology(
)
return endpoint_result
@pytest.mark.topology
def test_get_site_topology_default(api):
try:
assert is_valid_get_site_topology(
get_site_topology_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_physical_topology(obj):
json_schema_validate('jsd_b2b8cb91459aa58f_v1_3_1').validate(obj)
return True
def get_physical_topology(api):
endpoint_result = api.topology.get_physical_topology(
node_type='string'
)
return endpoint_result
@pytest.mark.topology
def test_get_physical_topology(api):
assert is_valid_get_physical_topology(
get_physical_topology(api)
)
def get_physical_topology_default(api):
endpoint_result = api.topology.get_physical_topology(
node_type=None
)
return endpoint_result
@pytest.mark.topology
def test_get_physical_topology_default(api):
try:
assert is_valid_get_physical_topology(
get_physical_topology_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_topology_details(obj):
json_schema_validate('jsd_b9b48ac8463a8aba_v1_3_1').validate(obj)
return True
def get_topology_details(api):
endpoint_result = api.topology.get_topology_details(
vlan_id='string'
)
return endpoint_result
@pytest.mark.topology
def test_get_topology_details(api):
assert is_valid_get_topology_details(
get_topology_details(api)
)
def get_topology_details_default(api):
endpoint_result = api.topology.get_topology_details(
vlan_id='string'
)
return endpoint_result
@pytest.mark.topology
def test_get_topology_details_default(api):
try:
assert is_valid_get_topology_details(
get_topology_details_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_l3_topology_details(obj):
json_schema_validate('jsd_c2b5fb764d888375_v1_3_1').validate(obj)
return True
def get_l3_topology_details(api):
endpoint_result = api.topology.get_l3_topology_details(
topology_type='string'
)
return endpoint_result
@pytest.mark.topology
def test_get_l3_topology_details(api):
assert is_valid_get_l3_topology_details(
get_l3_topology_details(api)
)
def get_l3_topology_details_default(api):
endpoint_result = api.topology.get_l3_topology_details(
topology_type='string'
)
return endpoint_result
@pytest.mark.topology
def test_get_l3_topology_details_default(api):
try:
assert is_valid_get_l3_topology_details(
get_l3_topology_details_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_overall_network_health(obj):
json_schema_validate('jsd_ca91da84401abba1_v1_3_1').validate(obj)
return True
def get_overall_network_health(api):
endpoint_result = api.topology.get_overall_network_health(
timestamp=0
)
return endpoint_result
@pytest.mark.topology
def test_get_overall_network_health(api):
assert is_valid_get_overall_network_health(
get_overall_network_health(api)
)
def get_overall_network_health_default(api):
endpoint_result = api.topology.get_overall_network_health(
timestamp=None
)
return endpoint_result
@pytest.mark.topology
def test_get_overall_network_health_default(api):
try:
assert is_valid_get_overall_network_health(
get_overall_network_health_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
| 26.531746 | 95 | 0.747981 | [
"MIT"
] | wastorga/dnacentersdk | tests/api/v1_3_1/test_topology.py | 6,686 | Python |
# Copyright (c) 2020 Huawei Technologies Co., Ltd
# Copyright (c) 2019, Facebook CORPORATION.
# All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import sys
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestTril(TestCase):
def test_tril(self, device):
dtype_list = [np.float32, np.float16]
format_list = [0, 3, 4]
shape_list = [[5, 5],[4, 5, 6]]
diagonal_list = [-1, 0, 1]
shape_format = [
[i, j, k, l] for i in dtype_list for j in format_list for k in shape_list for l in diagonal_list
]
for item in shape_format:
cpu_input, npu_input = create_common_tensor(item[:-1], 0, 100)
cpu_output = self.cpu_op_exec(cpu_input, item[-1])
npu_output = self.npu_op_exec(npu_input, item[-1])
self.assertRtolEqual(cpu_output, npu_output)
def test_tril_inplace(self, device):
dtype_list = [np.float32, np.float16]
format_list = [0, 3, 4]
shape_list = [[5, 5], [4, 5, 6]]
diagonal_list = [-1, 0, 1]
shape_format = [
[i, j, k, l] for i in dtype_list for j in format_list for k in shape_list for l in diagonal_list
]
for item in shape_format:
cpu_input, npu_input = create_common_tensor(item[:-1], 0, 100)
cpu_output = self.cpu_op_inplace_exec(cpu_input, item[-1])
npu_output = self.npu_op_inplace_exec(npu_input, item[-1])
self.assertRtolEqual(cpu_output, npu_output)
def cpu_op_exec(self, input, diagonal=0):
output = torch.tril(input, diagonal)
output = output.numpy()
return output
def npu_op_exec(self, input, diagonal=0):
output = torch.tril(input, diagonal)
output = output.to("cpu")
output = output.numpy()
return output
def cpu_op_inplace_exec(self, input, diagonal=0):
output = input.tril_(diagonal)
output = output.numpy()
return output
def npu_op_inplace_exec(self, input, diagonal=0):
output = input.tril_(diagonal)
output = output.to("cpu")
output = output.numpy()
return output
instantiate_device_type_tests(TestTril, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests()
| 37.474359 | 108 | 0.660965 | [
"BSD-3-Clause"
] | Ascend/pytorch | test/test_npu/test_network_ops/test_tril.py | 2,923 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class PublicIPPrefixesOperations(object):
"""PublicIPPrefixesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2018-10-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-10-01"
self.config = config
def _delete_initial(
self, resource_group_name, public_ip_prefix_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, public_ip_prefix_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified public IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the PublicIpPrefix.
:type public_ip_prefix_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'}
def get(
self, resource_group_name, public_ip_prefix_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified public IP prefix in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the PublicIPPrefx.
:type public_ip_prefix_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PublicIPPrefix or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PublicIPPrefix', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'}
def _create_or_update_initial(
self, resource_group_name, public_ip_prefix_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'PublicIPPrefix')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PublicIPPrefix', response)
if response.status_code == 201:
deserialized = self._deserialize('PublicIPPrefix', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, public_ip_prefix_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a static or dynamic public IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param parameters: Parameters supplied to the create or update public
IP prefix operation.
:type parameters:
~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns PublicIPPrefix or
ClientRawResponse<PublicIPPrefix> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('PublicIPPrefix', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'}
def _update_tags_initial(
self, resource_group_name, public_ip_prefix_name, tags=None, custom_headers=None, raw=False, **operation_config):
parameters = models.TagsObject(tags=tags)
# Construct URL
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PublicIPPrefix', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_tags(
self, resource_group_name, public_ip_prefix_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates public IP prefix tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns PublicIPPrefix or
ClientRawResponse<PublicIPPrefix> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
tags=tags,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('PublicIPPrefix', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'}
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all the public IP prefixes in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PublicIPPrefix
:rtype:
~azure.mgmt.network.v2018_10_01.models.PublicIPPrefixPaged[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PublicIPPrefixPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PublicIPPrefixPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPPrefixes'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all public IP prefixes in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PublicIPPrefix
:rtype:
~azure.mgmt.network.v2018_10_01.models.PublicIPPrefixPaged[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PublicIPPrefixPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PublicIPPrefixPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes'}
| 46.82218 | 175 | 0.671798 | [
"MIT"
] | acured/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/public_ip_prefixes_operations.py | 24,488 | Python |
# -*- coding:utf8 -*-
# File : env.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 12/29/16
#
# This file is part of TensorArtist.
from ...core import get_logger
from ...core.event import EventManager, register_event, trigger_event
from ...core.utils.meta import notnone_property
from ..graph.env import Env
from ..graph.node import as_tftensor
logger = get_logger(__file__)
__all__ = ['TrainerEnvBase', 'SimpleTrainerEnv']
class TrainerEnvBase(Env):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._snapshot_parts = dict()
self._runtime = dict()
self.add_snapshot_part('variables', self.__dump_network_variable, self.__load_network_variable)
self.add_snapshot_part('runtime', self.__dump_runtime, self.__load_runtime)
def __dump_network_variable(self):
return self.network.fetch_all_variables_dict()
def __load_network_variable(self, variables):
self.network.assign_all_variables_dict(variables)
def __dump_runtime(self):
return self._runtime.copy()
def __load_runtime(self, runtime):
self._runtime = runtime
@property
def runtime(self):
return self._runtime
def add_snapshot_part(self, identifier, dump, load):
self._snapshot_parts[identifier] = (dump, load)
def get_snapshot_parts_ref(self):
return self._snapshot_parts
def load_snapshot(self, snapshot):
for k, v in snapshot.items():
if k not in self._snapshot_parts:
logger.warning('Ignored snapshot part: {}.'.format(k))
else:
loader = self._snapshot_parts[k][1]
loader(v)
return self
def dump_snapshot(self):
snapshot = dict()
for identifier, (d, l) in self._snapshot_parts.items():
snapshot[identifier] = d()
return snapshot
def register_event(self, name, callback, *args, priority=EventManager.DEF_PRIORITY, **kwargs):
register_event(self, name, callback, *args, priority=priority, **kwargs)
return self
def trigger_event(self, name, *args, **kwargs):
trigger_event(self, name, self, *args, **kwargs)
return self
class SimpleTrainerEnv(TrainerEnvBase):
_optimizer = None
@notnone_property
def optimizer(self):
return self._optimizer
def set_optimizer(self, opt):
self._optimizer = opt
return self
def make_optimizable_func(self, loss=None):
loss = loss or self.network.loss
loss = as_tftensor(loss)
func = self.make_func()
func.add_extra_op(self.optimizer.minimize(loss))
return func
| 29.096774 | 103 | 0.661863 | [
"MIT"
] | cosmic119/DiscoGAN | TensorArtist/tartist/nn/train/env.py | 2,706 | Python |
"""Tcp client for synchronous uhd message tcp port"""
import threading
import Queue
import time
import socket
import struct
import numpy as np
class _TcpSyncClient(threading.Thread):
"""Thead for message polling"""
queue = Queue.Queue()
q_quit = Queue.Queue()
ip_address = None
port = None
def __init__(self, ip_address, port, packet_size, packet_type):
super(_TcpSyncClient, self).__init__()
self.ip_address = ip_address
self.port = port
self.packet_size = packet_size
self.packet_type = packet_type
def __exit__(self):
self.stop()
def run(self):
"""connect and poll messages to queue"""
#Establish connection
sock = None
print("Connecting to synchronous uhd message tcp port " + str(self.port))
while self.q_quit.empty():
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.ip_address, self.port))
break
except socket.error:
print("connecting to synchronous uhd message tcp port " + str(self.port))
#traceback.print_exc()
sock.close()
time.sleep(0.5)
print("Connected to synchronous uhd message tcp port " + str(self.port))
#Read messages
sock.settimeout(None)
s = ""
while self.q_quit.empty():
try:
#concatenate to one package
while self.q_quit.empty():
s += sock.recv(self.packet_size)
if (len(s)) >= self.packet_size:
break
res_tuple = struct.unpack( self.packet_type, s[:self.packet_size])
s = s[self.packet_size:]
self.queue.put(res_tuple)
except socket.timeout:
self.stop()
traceback.print_exc()
pass
sock.close()
def stop(self):
"""stop thread"""
print("stop tcp_sync uhd message tcp thread")
self.q_quit.put("end")
class UhdSyncMsg(object):
"""Creates a thread to connect to the synchronous uhd messages tcp port"""
def __init__(self, ip_address = "127.0.0.1", port = 47009, packet_size = 3, packet_type = "fff"):
self.tcpa = _TcpSyncClient(ip_address, port, packet_size, packet_type)
self.tcpa.start()
def __exit__(self):
self.tcpa.stop()
def stop(self):
"""stop tcp thread"""
self.tcpa.stop()
def get_msgs(self, num):
"""get received messages as string of integer"""
out = []
while len(out) < num:
out.append(self.tcpa.queue.get())
return out
def get_msgs_fft(self, num):
"""
get received messages as string of integer
apply fftshift to message
"""
out = []
while len(out) < num:
out.append(self.tcpa.queue.get())
return [np.fft.fftshift(np.array(o)) for o in out]
def get_res(self):
"""get received messages as string of integer"""
out = []
while not self.tcpa.queue.empty():
out.append(self.tcpa.queue.get())
return out
def has_msg(self):
"""Checks if one or more messages were received and empties the message queue"""
return self.get_res() != ""
| 29.850877 | 101 | 0.566265 | [
"MIT"
] | Opendigitalradio/ODR-StaticPrecorrection | src/tcp_sync.py | 3,403 | Python |
import tensorflow as tf
def dense_value_graph(inputs, activation_fn=tf.nn.tanh, scope='value_graph', reuse=None):
with tf.variable_scope(scope, reuse=reuse):
net = inputs
net = tf.contrib.layers.flatten(net)
net = tf.layers.dense(net, 64, activation=activation_fn)
net = tf.layers.dense(net, 64, activation=activation_fn)
state_value = tf.layers.dense(net, 1)
return tf.squeeze(state_value)
| 34.307692 | 89 | 0.686099 | [
"MIT"
] | apparatusbox/rlbox | rlbox/models/value_graphs.py | 446 | Python |
#!/usr/bin/env python3
# Packet MAC Sniffer
# Author Yehia Elghaly
import socket
import textwrap
import struct
from colorama import Fore, Back, Style
def main():
connection = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(3))
while True:
read_data, addr = connection.recvfrom(65536)
send_mac, recv_mac, protocol, packet_data = ethernet(read_data)
print ('\nEthernet Data:')
print (Fore.GREEN + 'Destination: {}, Source: {}, Protocol: {}'. format (send_mac, recv_mac, protocol))
def ethernet(packet_data):
send_mac, recv_mac, protocol = struct.unpack('!6s 6s H', packet_data[:14])
return read_mac_addr(send_mac), read_mac_addr(recv_mac), socket.htons(protocol), packet_data[:14]
def read_mac_addr(bytes):
bytes_s = map('{:02x}'.format, bytes)
return ':'.join(bytes_s).upper()
main() | 31.423077 | 105 | 0.734394 | [
"MIT"
] | bpbpublications/Learn-Penetration-Testing-with-Python-3.x | Chapter 06/Packet-Sniffer-MAC.py | 817 | Python |
# -*- coding: utf-8 -*-
from .domainconfig import DomainConfig # noqa
from .resourceconfig import ResourceConfig # noqa
| 24.6 | 50 | 0.731707 | [
"BSD-3-Clause"
] | Alan01252/eve-sqlalchemy | eve_sqlalchemy/config/__init__.py | 123 | Python |
#!/usr/bin/env python3
# coding: utf-8
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""sparse softmax cross entropy with logits"""
from akg.ops.nn import sparse_softmax_cross_entropy_with_logits as loss
from akg.ops.nn import sparse_softmax_cross_entropy_with_logits_ad as loss_ad
def SparseSoftmaxCrossEntropyWithLogits(features, labels, is_grad=False, sens=1.0):
"""sparse softmax cross entropy with logits"""
if is_grad:
return loss_ad.sparse_softmax_cross_entropy_with_logits_ad(labels, features, reduction='mean', grad_scale=sens)
return loss.sparse_softmax_cross_entropy_with_logits(labels, features, reduction='mean')
| 45.769231 | 119 | 0.785714 | [
"Apache-2.0"
] | Kiike5/akg | python/akg/ms/cce/sparse_softmax_cross_entropy_with_logits.py | 1,190 | Python |
import ast
import copy
import json
import os
import re
from collections import OrderedDict
from dataclasses import fields
from urllib.parse import urlparse
import supervisely_lib as sly
import sly_globals as g
from functools import lru_cache
def camel_to_snake(string_to_process):
return re.sub(r'(?<!^)(?=[A-Z])', ' ', string_to_process).lower()
def process_info_for_showing(info_data):
processed_info = {}
for key, value in info_data.items():
processed_info[camel_to_snake(key).title()] = value
return processed_info
def remove_keys_from_dict(keys_to_remove, data):
for key in keys_to_remove:
data.pop(key, None)
def sly_annotation_to_bbox(sly_label):
rect: sly.Rectangle = sly_label.geometry.to_bbox()
return [rect.top, rect.left,
rect.bottom - rect.top,
rect.right - rect.left]
def generate_data_for_nn_app(images_ids, figures_ids, annotations, padding):
data_for_inference = []
for index, (image_id, figure_id, label) in enumerate(zip(images_ids, figures_ids, annotations)):
if label is None:
raise ValueError(
f"Label with id={figure_id} not found. Maybe cached annotation differs from the actual one. "
f"Please clear cache on settings tab")
image_info = g.spawn_api.image.get_info_by_id(image_id)
image_url = image_info.full_storage_url
bbox = sly_annotation_to_bbox(label)
data_for_inference.append(
{
'index': index,
'url': image_url,
'bbox': bbox,
'figure_id': figure_id
}
)
return data_for_inference
def generate_data_for_calculator_app(embeddings_by_indexes, top_n):
data_for_calculator = {
'embeddings': [current_row['embedding'] for current_row in embeddings_by_indexes],
'top_k': top_n
}
return data_for_calculator
def add_embeddings_to_cache_by_figures(embeddings_by_indexes, data_for_nn):
for current_embedding in embeddings_by_indexes:
current_figure_id = data_for_nn[current_embedding['index']]['figure_id']
g.figures2embeddings[current_figure_id] = current_embedding['embedding']
def calculate_nearest_labels(images_ids, annotations, figures_ids, top_n=5, padding=0):
data_for_nn = generate_data_for_nn_app(images_ids=images_ids, annotations=annotations,
figures_ids=figures_ids, padding=padding)
response = g.api.task.send_request(g.nn_session_id, "inference", data={
'input_data': data_for_nn
}, timeout=99999)
embeddings_by_indexes = ast.literal_eval(json.loads(response)) # [{'index': 0, 'embedding': [...], ..}, ..]
if len(embeddings_by_indexes) != len(data_for_nn):
raise ValueError(f'Data error. Check that the label is selected correctly.')
add_embeddings_to_cache_by_figures(embeddings_by_indexes, data_for_nn)
data_for_calculator = generate_data_for_calculator_app(embeddings_by_indexes, top_n)
response = g.api.task.send_request(g.calculator_session_id, "calculate_similarity", data={
'input_data': data_for_calculator
}, timeout=99999)
nearest_labels = ast.literal_eval(json.loads(response))
# {
# 'pred_dist': [[1.0, ..], ..],
# 'pred_labels': [['label1', ..], ..],
# 'pred_urls': [['image_url1', ..], ..],
# }
return nearest_labels
def get_resized_image(image_storage_url, height):
parsed_link = urlparse(image_storage_url)
return f'{parsed_link.scheme}://{parsed_link.netloc}' \
f'/previews/q/ext:jpeg/resize:fill:0:{height}:0/q:0/plain{parsed_link.path}'
def get_unique_elements(elements_list):
used = set()
return [x for x in elements_list if x not in used and (used.add(x) or True)]
def generate_data_to_show(nearest_labels):
unique_labels = get_unique_elements(nearest_labels['pred_labels'])
data_to_show = {pred_label: {} for pred_label in unique_labels}
data_to_show = OrderedDict(data_to_show)
for dist, label in zip(nearest_labels['pred_dist'],
nearest_labels['pred_labels']):
data_to_show[label]['dist'] = data_to_show[label].get('dist', 0) + dist
if data_to_show[label].get('url', None) is None:
data_to_show[label]['url'] = get_urls_by_label(label)
if data_to_show[label].get('description', None) is None:
data_to_show[label]['description'] = get_item_description_by_label(label)
return dict(data_to_show)
def add_info_to_disable_buttons(data_to_show, assigned_tags, fields, state):
reference_disabled = True
selected_figure_id = fields.get('state.selectedFigureId', -1)
if selected_figure_id not in g.figures_in_reference:
reference_disabled = False
data_to_show = OrderedDict(data_to_show)
for label, data in data_to_show.items():
if label in assigned_tags or (len(assigned_tags) > 0 and state['tagPerImage']):
data_to_show[label].update({'assignDisabled': True,
'referenceDisabled': reference_disabled})
else:
data_to_show[label].update({'assignDisabled': False,
'referenceDisabled': reference_disabled})
return dict(data_to_show)
def get_meta(project_id, from_server=False):
if from_server is True or project_id not in g.project2meta:
meta_json = g.spawn_api.project.get_meta(project_id)
meta = sly.ProjectMeta.from_json(meta_json)
g.project2meta[project_id] = meta
else:
meta = g.project2meta[project_id]
return meta
def update_project_meta(project_id, project_meta: sly.ProjectMeta):
sly.logger.info(f'update_project_meta: {project_id=}, {g.spawn_user_login=}')
g.spawn_api.project.update_meta(project_id, project_meta.to_json())
def _get_or_create_tag_meta(project_id, tag_meta):
for get_from_server_flag in [False, True]: # check tag in local and remote metas
project_meta = get_meta(project_id, from_server=get_from_server_flag)
project_tag_meta: sly.TagMeta = project_meta.get_tag_meta(tag_meta.name)
sly.logger.info(f'_get_or_create_tag_meta: {project_tag_meta is None=}, {get_from_server_flag=}')
if project_tag_meta is not None:
break
if project_tag_meta is None:
project_meta = project_meta.add_tag_meta(tag_meta) # add tag to newest meta
update_project_meta(project_id, project_meta)
project_meta = get_meta(project_id, from_server=True)
project_tag_meta = project_meta.get_tag_meta(tag_meta.name)
return project_tag_meta
def _assign_tag_to_object(project_id, figure_id, tag_meta):
project_tag_meta: sly.TagMeta = _get_or_create_tag_meta(project_id, tag_meta)
g.api.advanced.add_tag_to_object(project_tag_meta.sly_id, figure_id)
def assign_to_object(project_id, figure_id, class_name):
sly.logger.info(f'assign_to_object: {project_id=}, {figure_id=}, {class_name=}')
tag_meta = sly.TagMeta(class_name, sly.TagValueType.NONE)
_assign_tag_to_object(project_id, figure_id, tag_meta)
def get_image_path(image_id):
info = get_image_info(image_id)
local_path = os.path.join(g.cache_path, f"{info.id}{sly.fs.get_file_name_with_ext(info.name)}")
if not sly.fs.file_exists(local_path):
g.spawn_api.image.download_path(image_id, local_path)
return local_path
# @lru_cache(maxsize=10)
def get_annotation(project_id, image_id, optimize=False):
if image_id not in g.image2ann or not optimize:
ann_json = g.spawn_api.annotation.download(image_id).annotation
ann = sly.Annotation.from_json(ann_json, get_meta(project_id))
g.image2ann[image_id] = ann
else:
ann = g.image2ann[image_id]
g.figures_on_frame_count = len(ann.labels)
return ann
def get_image_info(image_id):
info = None
if image_id not in g.image2info:
info = g.spawn_api.image.get_info_by_id(image_id)
g.image2info[image_id] = info
else:
info = g.image2info[image_id]
return info
def clear():
g.project2meta.clear()
# image2info.clear()
g.image2ann.clear()
def convert_dict_to_list(data_to_show):
data_to_show_list = []
for key, value in data_to_show.items():
value['current_label'] = key
data_to_show_list.append(value)
return data_to_show_list
def get_assigned_tags_names_by_label_annotation(label_annotation):
assigned_tags = label_annotation.tags.to_json()
return [assigned_tag.get('name', None) for assigned_tag in assigned_tags
if assigned_tag.get('name', None) is not None]
def get_tag_id_by_tag_name(label_annotation, tag_name):
assigned_tags = label_annotation.tags
for current_tag in assigned_tags:
if current_tag.name == tag_name:
return current_tag.sly_id
# return None
return None
def sort_by_dist(data_to_show):
sorted_predictions_by_dist = sorted(data_to_show, key=lambda d: d['dist'], reverse=True)
for index, row in enumerate(sorted_predictions_by_dist):
row['index'] = index
sorted_predictions_by_dist[index] = row
return sorted_predictions_by_dist
def get_item_description_by_label(current_label):
item = copy.deepcopy(g.items_database.get(current_label, {}))
keys_to_clear = ['url']
for current_key in keys_to_clear:
try:
item.pop(current_key)
except:
pass
return item
def update_review_tags_tab(assigned_tags, fields):
items_for_review = []
for current_tag in assigned_tags:
items_for_review.append({
'current_label': current_tag,
'url': get_urls_by_label(current_tag),
'removingDisabled': False,
'description': get_item_description_by_label(current_tag)
})
if len(items_for_review) == 0:
fields['state.tagsForReview'] = None
else:
fields['state.tagsForReview'] = items_for_review
def update_card_buttons(card_name, assigned_tags, fields, state):
current_card = fields.get(f"state.{card_name}", None)
if current_card is None:
current_card = g.api.task.get_field(g.task_id, f"state.{card_name}")
if current_card:
assign_disabled = True
reference_disabled = True
if current_card.get('current_label', '') not in assigned_tags and not (
len(assigned_tags) > 0 and state['tagPerImage']):
assign_disabled = False
selected_figure_id = fields.get('state.selectedFigureId', -1)
if selected_figure_id not in g.figures_in_reference:
reference_disabled = False
set_buttons(assign_disabled=assign_disabled, reference_disabled=reference_disabled, card_name=card_name,
fields=fields)
def upload_data_to_tabs(nearest_labels, label_annotation, fields, state):
assigned_tags = get_assigned_tags_names_by_label_annotation(label_annotation)
update_review_tags_tab(assigned_tags, fields) # Review tags tab
update_card_buttons('lastAssignedTag', assigned_tags, fields, state) # Last assigned tab
update_card_buttons('selectedDatabaseItem', assigned_tags, fields, state) # Database tab
nearest_labels = {key: value[0] for key, value in nearest_labels.items()} # NN Prediction tab
data_to_show = generate_data_to_show(nearest_labels)
data_to_show = add_info_to_disable_buttons(data_to_show, assigned_tags, fields, state)
data_to_show = convert_dict_to_list(data_to_show)
data_to_show = sort_by_dist(data_to_show)
fields['data.predicted'] = data_to_show
def get_urls_by_label(selected_label):
label_info = g.items_database[selected_label]
return [{'preview': get_resized_image(current_url, g.items_preview_size)}
for current_url in label_info['url']][:g.items_preview_count]
def remove_from_object(project_id, figure_id, tag_name, tag_id):
project_meta = get_meta(project_id)
project_tag_meta: sly.TagMeta = project_meta.get_tag_meta(tag_name)
if project_tag_meta is None:
raise RuntimeError(f"Tag {tag_name} not found in project meta")
g.api.advanced.remove_tag_from_object(project_tag_meta.sly_id, figure_id, tag_id)
def set_button_flag(card_name, flag_name, flag_value, fields):
current_card = g.api.task.get_field(g.task_id, f"state.{card_name}")
if current_card:
fields[f"state.{card_name}.{flag_name}"] = flag_value
def set_buttons(assign_disabled, reference_disabled, card_name, fields):
set_button_flag(flag_name='assignDisabled', flag_value=assign_disabled, card_name=card_name, fields=fields)
set_button_flag(flag_name='referenceDisabled', flag_value=reference_disabled, card_name=card_name, fields=fields)
def get_tagged_objects_count_on_frame(annotation):
tagged_objects = 0
for label in annotation.labels:
if len(label.tags) > 0:
tagged_objects += 1
return tagged_objects
| 35.183288 | 117 | 0.706274 | [
"MIT"
] | supervisely-ecosystem/gl-metric-learning | supervisely/labeling-tool/src/sly_functions.py | 13,053 | Python |
from matplotlib.pyplot import axis
from utils import *
def unify_features(csv_path):
file_name = os.path.basename(csv_path)
file_date = file_name.split("-")[0]
file_year = int(file_date[0:4])
file_date = int(file_date)
col_2020 = ['starttime', 'stoptime', 'start station id',
'start station name', 'start station latitude',
'start station longitude', 'end station id', 'end station name',
'end station latitude', 'end station longitude', 'bikeid']
col_2021 = ['bikeid', 'starttime', 'stoptime', 'start station name',
'start station id', 'end station name', 'end station id',
'start station latitude', 'start station longitude',
'end station latitude', 'end station longitude']
data = pd.read_csv(csv_path, low_memory=False)
if file_date <= 202101:
cols_to_drop = ["birth year", "gender", "tripduration", "usertype"]
for col in cols_to_drop:
data = data.drop(col, axis=1)
data.columns = col_2020
elif file_date > 202101:
cols_to_drop = ["member_casual", "rideable_type"]
for col in cols_to_drop:
data = data.drop(col, axis=1)
data.columns = col_2021
## Export
data.to_csv(f"{csv_path}.csv", index = False)
if __name__ == "__main__":
dataset_path = "./Dataset"
csvs_path = get_csvs_path(dataset_path)
execute_on_dataset(csvs_path, unify_features) | 36.5 | 75 | 0.633562 | [
"MIT"
] | SajjadPSavoji/CitiBikeNYC | preprocess_data.py | 1,460 | Python |
# Author: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Andrew Dykstra <[email protected]>
# Mads Jensen <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
from copy import deepcopy
import warnings
import numpy as np
from scipy import fftpack
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_array_equal, assert_allclose)
from nose.tools import assert_true, assert_raises, assert_not_equal
from mne import (equalize_channels, pick_types, read_evokeds, write_evokeds,
grand_average, combine_evoked)
from mne.evoked import _get_peak, EvokedArray
from mne.epochs import EpochsArray
from mne.utils import _TempDir, requires_pandas, slow_test, requires_version
from mne.io.meas_info import create_info
from mne.externals.six.moves import cPickle as pickle
warnings.simplefilter('always')
fname = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data',
'test-ave.fif')
fname_gz = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data',
'test-ave.fif.gz')
@requires_version('scipy', '0.14')
def test_savgol_filter():
"""Test savgol filtering
"""
h_freq = 10.
evoked = read_evokeds(fname, 0)
freqs = fftpack.fftfreq(len(evoked.times), 1. / evoked.info['sfreq'])
data = np.abs(fftpack.fft(evoked.data))
match_mask = np.logical_and(freqs >= 0, freqs <= h_freq / 2.)
mismatch_mask = np.logical_and(freqs >= h_freq * 2, freqs < 50.)
assert_raises(ValueError, evoked.savgol_filter, evoked.info['sfreq'])
evoked.savgol_filter(h_freq)
data_filt = np.abs(fftpack.fft(evoked.data))
# decent in pass-band
assert_allclose(np.mean(data[:, match_mask], 0),
np.mean(data_filt[:, match_mask], 0),
rtol=1e-4, atol=1e-2)
# suppression in stop-band
assert_true(np.mean(data[:, mismatch_mask]) >
np.mean(data_filt[:, mismatch_mask]) * 5)
def test_hash_evoked():
"""Test evoked hashing
"""
ave = read_evokeds(fname, 0)
ave_2 = read_evokeds(fname, 0)
assert_equal(hash(ave), hash(ave_2))
# do NOT use assert_equal here, failing output is terrible
assert_true(pickle.dumps(ave) == pickle.dumps(ave_2))
ave_2.data[0, 0] -= 1
assert_not_equal(hash(ave), hash(ave_2))
@slow_test
def test_io_evoked():
"""Test IO for evoked data (fif + gz) with integer and str args
"""
tempdir = _TempDir()
ave = read_evokeds(fname, 0)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
# This not being assert_array_equal due to windows rounding
assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-3))
assert_array_almost_equal(ave.times, ave2.times)
assert_equal(ave.nave, ave2.nave)
assert_equal(ave._aspect_kind, ave2._aspect_kind)
assert_equal(ave.kind, ave2.kind)
assert_equal(ave.last, ave2.last)
assert_equal(ave.first, ave2.first)
assert_true(repr(ave))
# test compressed i/o
ave2 = read_evokeds(fname_gz, 0)
assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-8))
# test str access
condition = 'Left Auditory'
assert_raises(ValueError, read_evokeds, fname, condition, kind='stderr')
assert_raises(ValueError, read_evokeds, fname, condition,
kind='standard_error')
ave3 = read_evokeds(fname, condition)
assert_array_almost_equal(ave.data, ave3.data, 19)
# test read_evokeds and write_evokeds
types = ['Left Auditory', 'Right Auditory', 'Left visual', 'Right visual']
aves1 = read_evokeds(fname)
aves2 = read_evokeds(fname, [0, 1, 2, 3])
aves3 = read_evokeds(fname, types)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), aves1)
aves4 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))
for aves in [aves2, aves3, aves4]:
for [av1, av2] in zip(aves1, aves):
assert_array_almost_equal(av1.data, av2.data)
assert_array_almost_equal(av1.times, av2.times)
assert_equal(av1.nave, av2.nave)
assert_equal(av1.kind, av2.kind)
assert_equal(av1._aspect_kind, av2._aspect_kind)
assert_equal(av1.last, av2.last)
assert_equal(av1.first, av2.first)
assert_equal(av1.comment, av2.comment)
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
fname2 = op.join(tempdir, 'test-bad-name.fif')
write_evokeds(fname2, ave)
read_evokeds(fname2)
assert_true(len(w) == 2)
def test_shift_time_evoked():
""" Test for shifting of time scale
"""
tempdir = _TempDir()
# Shift backward
ave = read_evokeds(fname, 0)
ave.shift_time(-0.1, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
# Shift forward twice the amount
ave_bshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_bshift.shift_time(0.2, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_bshift)
# Shift backward again
ave_fshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_fshift.shift_time(-0.1, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_fshift)
ave_normal = read_evokeds(fname, 0)
ave_relative = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
assert_true(np.allclose(ave_normal.data, ave_relative.data,
atol=1e-16, rtol=1e-3))
assert_array_almost_equal(ave_normal.times, ave_relative.times, 10)
assert_equal(ave_normal.last, ave_relative.last)
assert_equal(ave_normal.first, ave_relative.first)
# Absolute time shift
ave = read_evokeds(fname, 0)
ave.shift_time(-0.3, relative=False)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_absolute = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
assert_true(np.allclose(ave_normal.data, ave_absolute.data,
atol=1e-16, rtol=1e-3))
assert_equal(ave_absolute.first, int(-0.3 * ave.info['sfreq']))
def test_evoked_resample():
"""Test for resampling of evoked data
"""
tempdir = _TempDir()
# upsample, write it out, read it in
ave = read_evokeds(fname, 0)
sfreq_normal = ave.info['sfreq']
ave.resample(2 * sfreq_normal)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_up = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
# compare it to the original
ave_normal = read_evokeds(fname, 0)
# and compare the original to the downsampled upsampled version
ave_new = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_new.resample(sfreq_normal)
assert_array_almost_equal(ave_normal.data, ave_new.data, 2)
assert_array_almost_equal(ave_normal.times, ave_new.times)
assert_equal(ave_normal.nave, ave_new.nave)
assert_equal(ave_normal._aspect_kind, ave_new._aspect_kind)
assert_equal(ave_normal.kind, ave_new.kind)
assert_equal(ave_normal.last, ave_new.last)
assert_equal(ave_normal.first, ave_new.first)
# for the above to work, the upsampling just about had to, but
# we'll add a couple extra checks anyway
assert_true(len(ave_up.times) == 2 * len(ave_normal.times))
assert_true(ave_up.data.shape[1] == 2 * ave_normal.data.shape[1])
def test_evoked_detrend():
"""Test for detrending evoked data
"""
ave = read_evokeds(fname, 0)
ave_normal = read_evokeds(fname, 0)
ave.detrend(0)
ave_normal.data -= np.mean(ave_normal.data, axis=1)[:, np.newaxis]
picks = pick_types(ave.info, meg=True, eeg=True, exclude='bads')
assert_true(np.allclose(ave.data[picks], ave_normal.data[picks],
rtol=1e-8, atol=1e-16))
@requires_pandas
def test_to_data_frame():
"""Test evoked Pandas exporter"""
ave = read_evokeds(fname, 0)
assert_raises(ValueError, ave.to_data_frame, picks=np.arange(400))
df = ave.to_data_frame()
assert_true((df.columns == ave.ch_names).all())
df = ave.to_data_frame(index=None).reset_index('time')
assert_true('time' in df.columns)
assert_array_equal(df.values[:, 1], ave.data[0] * 1e13)
assert_array_equal(df.values[:, 3], ave.data[2] * 1e15)
def test_evoked_proj():
"""Test SSP proj operations
"""
for proj in [True, False]:
ave = read_evokeds(fname, condition=0, proj=proj)
assert_true(all(p['active'] == proj for p in ave.info['projs']))
# test adding / deleting proj
if proj:
assert_raises(ValueError, ave.add_proj, [],
{'remove_existing': True})
assert_raises(ValueError, ave.del_proj, 0)
else:
projs = deepcopy(ave.info['projs'])
n_proj = len(ave.info['projs'])
ave.del_proj(0)
assert_true(len(ave.info['projs']) == n_proj - 1)
ave.add_proj(projs, remove_existing=False)
assert_true(len(ave.info['projs']) == 2 * n_proj - 1)
ave.add_proj(projs, remove_existing=True)
assert_true(len(ave.info['projs']) == n_proj)
ave = read_evokeds(fname, condition=0, proj=False)
data = ave.data.copy()
ave.apply_proj()
assert_allclose(np.dot(ave._projector, data), ave.data)
def test_get_peak():
"""Test peak getter
"""
evoked = read_evokeds(fname, condition=0, proj=True)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmin=1)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmax=0.9)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmin=0.02,
tmax=0.01)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', mode='foo')
assert_raises(RuntimeError, evoked.get_peak, ch_type=None, mode='foo')
assert_raises(ValueError, evoked.get_peak, ch_type='misc', mode='foo')
ch_idx, time_idx = evoked.get_peak(ch_type='mag')
assert_true(ch_idx in evoked.ch_names)
assert_true(time_idx in evoked.times)
ch_idx, time_idx = evoked.get_peak(ch_type='mag',
time_as_index=True)
assert_true(time_idx < len(evoked.times))
data = np.array([[0., 1., 2.],
[0., -3., 0]])
times = np.array([.1, .2, .3])
ch_idx, time_idx = _get_peak(data, times, mode='abs')
assert_equal(ch_idx, 1)
assert_equal(time_idx, 1)
ch_idx, time_idx = _get_peak(data * -1, times, mode='neg')
assert_equal(ch_idx, 0)
assert_equal(time_idx, 2)
ch_idx, time_idx = _get_peak(data, times, mode='pos')
assert_equal(ch_idx, 0)
assert_equal(time_idx, 2)
assert_raises(ValueError, _get_peak, data + 1e3, times, mode='neg')
assert_raises(ValueError, _get_peak, data - 1e3, times, mode='pos')
def test_drop_channels_mixin():
"""Test channels-dropping functionality
"""
evoked = read_evokeds(fname, condition=0, proj=True)
drop_ch = evoked.ch_names[:3]
ch_names = evoked.ch_names[3:]
ch_names_orig = evoked.ch_names
dummy = evoked.drop_channels(drop_ch, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, evoked.ch_names)
assert_equal(len(ch_names_orig), len(evoked.data))
evoked.drop_channels(drop_ch)
assert_equal(ch_names, evoked.ch_names)
assert_equal(len(ch_names), len(evoked.data))
def test_pick_channels_mixin():
"""Test channel-picking functionality
"""
evoked = read_evokeds(fname, condition=0, proj=True)
ch_names = evoked.ch_names[:3]
ch_names_orig = evoked.ch_names
dummy = evoked.pick_channels(ch_names, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, evoked.ch_names)
assert_equal(len(ch_names_orig), len(evoked.data))
evoked.pick_channels(ch_names)
assert_equal(ch_names, evoked.ch_names)
assert_equal(len(ch_names), len(evoked.data))
evoked = read_evokeds(fname, condition=0, proj=True)
assert_true('meg' in evoked)
assert_true('eeg' in evoked)
evoked.pick_types(meg=False, eeg=True)
assert_true('meg' not in evoked)
assert_true('eeg' in evoked)
assert_true(len(evoked.ch_names) == 60)
def test_equalize_channels():
"""Test equalization of channels
"""
evoked1 = read_evokeds(fname, condition=0, proj=True)
evoked2 = evoked1.copy()
ch_names = evoked1.ch_names[2:]
evoked1.drop_channels(evoked1.ch_names[:1])
evoked2.drop_channels(evoked2.ch_names[1:2])
my_comparison = [evoked1, evoked2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
def test_evoked_arithmetic():
"""Test evoked arithmetic
"""
ev = read_evokeds(fname, condition=0)
ev1 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=20)
ev2 = EvokedArray(-np.ones_like(ev.data), ev.info, ev.times[0], nave=10)
# combine_evoked([ev1, ev2]) should be the same as ev1 + ev2:
# data should be added according to their `nave` weights
# nave = ev1.nave + ev2.nave
ev = ev1 + ev2
assert_equal(ev.nave, ev1.nave + ev2.nave)
assert_allclose(ev.data, 1. / 3. * np.ones_like(ev.data))
ev = ev1 - ev2
assert_equal(ev.nave, ev1.nave + ev2.nave)
assert_equal(ev.comment, ev1.comment + ' - ' + ev2.comment)
assert_allclose(ev.data, np.ones_like(ev1.data))
# default comment behavior if evoked.comment is None
old_comment1 = ev1.comment
old_comment2 = ev2.comment
ev1.comment = None
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
ev = ev1 - ev2
assert_equal(ev.comment, 'unknown')
ev1.comment = old_comment1
ev2.comment = old_comment2
# equal weighting
ev = combine_evoked([ev1, ev2], weights='equal')
assert_allclose(ev.data, np.zeros_like(ev1.data))
# combine_evoked([ev1, ev2], weights=[1, 0]) should yield the same as ev1
ev = combine_evoked([ev1, ev2], weights=[1, 0])
assert_equal(ev.nave, ev1.nave)
assert_allclose(ev.data, ev1.data)
# simple subtraction (like in oddball)
ev = combine_evoked([ev1, ev2], weights=[1, -1])
assert_allclose(ev.data, 2 * np.ones_like(ev1.data))
assert_raises(ValueError, combine_evoked, [ev1, ev2], weights='foo')
assert_raises(ValueError, combine_evoked, [ev1, ev2], weights=[1])
# grand average
evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
ch_names = evoked1.ch_names[2:]
evoked1.info['bads'] = ['EEG 008'] # test interpolation
evoked1.drop_channels(evoked1.ch_names[:1])
evoked2.drop_channels(evoked2.ch_names[1:2])
gave = grand_average([evoked1, evoked2])
assert_equal(gave.data.shape, [len(ch_names), evoked1.data.shape[1]])
assert_equal(ch_names, gave.ch_names)
assert_equal(gave.nave, 2)
def test_array_epochs():
"""Test creating evoked from array
"""
tempdir = _TempDir()
# creating
rng = np.random.RandomState(42)
data1 = rng.randn(20, 60)
sfreq = 1e3
ch_names = ['EEG %03d' % (i + 1) for i in range(20)]
types = ['eeg'] * 20
info = create_info(ch_names, sfreq, types)
evoked1 = EvokedArray(data1, info, tmin=-0.01)
# save, read, and compare evokeds
tmp_fname = op.join(tempdir, 'evkdary-ave.fif')
evoked1.save(tmp_fname)
evoked2 = read_evokeds(tmp_fname)[0]
data2 = evoked2.data
assert_allclose(data1, data2)
assert_allclose(evoked1.times, evoked2.times)
assert_equal(evoked1.first, evoked2.first)
assert_equal(evoked1.last, evoked2.last)
assert_equal(evoked1.kind, evoked2.kind)
assert_equal(evoked1.nave, evoked2.nave)
# now compare with EpochsArray (with single epoch)
data3 = data1[np.newaxis, :, :]
events = np.c_[10, 0, 1]
evoked3 = EpochsArray(data3, info, events=events, tmin=-0.01).average()
assert_allclose(evoked1.data, evoked3.data)
assert_allclose(evoked1.times, evoked3.times)
assert_equal(evoked1.first, evoked3.first)
assert_equal(evoked1.last, evoked3.last)
assert_equal(evoked1.kind, evoked3.kind)
assert_equal(evoked1.nave, evoked3.nave)
# test match between channels info and data
ch_names = ['EEG %03d' % (i + 1) for i in range(19)]
types = ['eeg'] * 19
info = create_info(ch_names, sfreq, types)
assert_raises(ValueError, EvokedArray, data1, info, tmin=-0.01)
def test_add_channels():
"""Test evoked splitting / re-appending channel types
"""
evoked = read_evokeds(fname, condition=0)
evoked.info['buffer_size_sec'] = None
evoked_eeg = evoked.pick_types(meg=False, eeg=True, copy=True)
evoked_meg = evoked.pick_types(meg=True, copy=True)
evoked_stim = evoked.pick_types(meg=False, stim=True, copy=True)
evoked_eeg_meg = evoked.pick_types(meg=True, eeg=True, copy=True)
evoked_new = evoked_meg.add_channels([evoked_eeg, evoked_stim], copy=True)
assert_true(all(ch in evoked_new.ch_names
for ch in evoked_stim.ch_names + evoked_meg.ch_names))
evoked_new = evoked_meg.add_channels([evoked_eeg], copy=True)
assert_true(ch in evoked_new.ch_names for ch in evoked.ch_names)
assert_array_equal(evoked_new.data, evoked_eeg_meg.data)
assert_true(all(ch not in evoked_new.ch_names
for ch in evoked_stim.ch_names))
# Now test errors
evoked_badsf = evoked_eeg.copy()
evoked_badsf.info['sfreq'] = 3.1415927
evoked_eeg = evoked_eeg.crop(-.1, .1)
assert_raises(RuntimeError, evoked_meg.add_channels, [evoked_badsf])
assert_raises(AssertionError, evoked_meg.add_channels, [evoked_eeg])
assert_raises(ValueError, evoked_meg.add_channels, [evoked_meg])
assert_raises(AssertionError, evoked_meg.add_channels, evoked_badsf)
| 37.070248 | 78 | 0.677684 | [
"BSD-3-Clause"
] | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/mne/tests/test_evoked.py | 17,942 | Python |
import sqlite3
import mmap
import os
import sys
import copy
import math
import tempfile
from tqdm import tqdm
from scipy.stats import norm
from expiringdict import ExpiringDict
cache = ExpiringDict(max_len=100000,max_age_seconds=600)
def get_num_lines(file_path):
fp = open(file_path, "r+")
buf = mmap.mmap(fp.fileno(), 0)
lines = 0
while buf.readline():
lines += 1
return lines
class ExtendedNaiveBayes:
def __init__(self,family,path_to_data="."):
self.family = family
self.db_name = os.path.join(path_to_data,family+".nb.db")
def fit(self,csv_file):
db = sqlite3.connect(":memory:")
c = db.cursor()
try:
c.execute('''CREATE TABLE data (loc TEXT, mac TEXT, val INTEGER, count INTEGER)''')
db.commit()
except sqlite3.OperationalError:
pass
headers = []
with open(csv_file,"r") as f:
for i,line in enumerate(tqdm(f, total=get_num_lines(csv_file))):
line = line.strip()
if i == 0:
headers = line.split(",")
continue
loc = ""
for j,signal in enumerate(line.split(",")):
if j == 0:
loc = signal
continue
if signal.strip() == "":
continue
mac = headers[j]
val = int(round(float(signal.strip())))
c.execute('''SELECT count FROM data WHERE loc = ? AND mac = ? AND val = ?''',(loc, mac, val
))
count = c.fetchone()
if count == None:
c.execute('''INSERT INTO data(loc,mac,val,count)
VALUES(?,?,?,?)''', (loc,mac,val,1))
else:
c.execute('''UPDATE data SET count = ? WHERE loc = ? AND mac = ? AND val = ?''',(count[0]+1,loc,mac,val))
db.commit()
# with open("dump.sql","w") as f:
# for line in db.iterdump():
# f.write('%s\n' % line)
f = tempfile.TemporaryFile()
for line in db.iterdump():
f.write('{}\n'.format(line).encode('utf-8'))
db.close()
# Write disk to file
try:
os.remove(self.db_name)
except:
pass
db = sqlite3.connect(self.db_name)
c = db.cursor()
f.seek(0)
c.executescript(f.read().decode('utf-8'))
f.close()
db.commit()
db.close()
# os.remove("dump.sql")
def get_locations(self):
db = sqlite3.connect(self.db_name)
c = db.cursor()
c.execute('''SELECT loc FROM data GROUP BY loc''')
locs = c.fetchall()
db.close()
locations = []
for l in locs:
locations.append(l[0])
return locations
def prob_mac_given_loc(self,mac,val,loc,positive):
"""
Determine the P(mac=val | loc) (positive)
Determine the P(mac=val | ~loc) (not positive)
"""
name = "{}{}{}{}".format(mac,val,loc,positive)
cached = cache.get(name)
if cached != None:
return cached
P = 0.005
nameData = "{}{}{}".format(mac,loc,positive)
cached = cache.get(nameData)
if cached != None:
if val in cached:
P = cached[val]
return P
# First find all the values for mac at loc
db = sqlite3.connect(self.db_name)
c = db.cursor()
if positive:
c.execute('''SELECT val,count FROM data WHERE loc = ? AND mac = ?''',(loc,mac))
else:
c.execute('''SELECT val,count FROM data WHERE loc != ? AND mac = ?''',(loc,mac))
val_to_count = {}
for row in c.fetchall():
val_to_count[row[0]] = row[1]
db.close()
# apply gaussian filter
new_val_to_count = copy.deepcopy(val_to_count)
width = 3
for v in val_to_count:
for x in range(-1*width**3,width**3+1):
addend = int(round(100*norm.pdf(0,loc=x,scale=width)))
if addend <= 0 :
continue
if v+x not in new_val_to_count:
new_val_to_count[v+x] = 0
new_val_to_count[v+x] = new_val_to_count[v+x]+addend
total = 0
for v in new_val_to_count:
total += new_val_to_count[v]
for v in new_val_to_count:
new_val_to_count[v] = new_val_to_count[v] / total
# 0.5% chance for anything
P = 0.005
if val in new_val_to_count:
P = new_val_to_count[val]
cache[name] = P
cache[nameData] = new_val_to_count
return P
def predict_proba(self,header_unfiltered,csv_data_unfiltered):
header = []
csv_data = []
for i,dat in enumerate(csv_data_unfiltered):
if dat == 0:
continue
csv_data.append(dat)
header.append(header_unfiltered[i])
locations = self.get_locations()
num_locations = len(locations)
NA = 1/num_locations
NnotA = 1-NA
Ps = {}
for i,mac in enumerate(header):
val = int(round(float(csv_data[i])))
for location in locations:
if location not in Ps:
Ps[location] = []
PA = self.prob_mac_given_loc(mac,val,location,True)
PnotA = self.prob_mac_given_loc(mac,val,location,False)
P = PA*NA / (PA*NA + PnotA*NnotA)
Ps[location].append(math.log(P))
P_sum = 0
for location in Ps:
P_sum += math.exp(sum(Ps[location]))
d = {}
for location in Ps:
d[location] = math.exp(sum(Ps[location]))/P_sum
return [(k, d[k]) for k in sorted(d, key=d.get, reverse=True)]
def testit():
a =ExtendedNaiveBayes("testing1")
print("fitting data")
file_to_test = "reverse.csv"
a.fit(file_to_test)
print("done")
with open(file_to_test,"r") as f:
for i,line in enumerate(f):
line = line.strip()
if i == 0:
headers = line.split(",")
continue
headers_submit = []
csv_data_submit = []
loc = ""
for j,signal in enumerate(line.split(",")):
if j == 0:
loc = signal
continue
if signal.strip() == "":
continue
headers_submit.append(headers[j])
csv_data_submit.append(int(round(float(signal.strip()))))
print(loc)
a.predict_proba(headers_submit,csv_data_submit)
| 32.415094 | 129 | 0.501892 | [
"MIT"
] | ChuVal/Respaldo2 | server/ai/src/naive_bayes.py | 6,872 | Python |
from pathlib import Path
import tvm
from tvm import autotvm
from tvm import relay
from tvm.autotvm.tuner import GATuner
from tvm.autotvm.tuner import GridSearchTuner
from tvm.autotvm.tuner import RandomTuner
from tvm.autotvm.tuner import XGBTuner
from rl_tuner.ga_dqn_tuner import GADQNTuner
from rl_tuner.ga_dqn_tuner_debug import GADQNTuner as GADQNTunerDebug
from .get_model import get_model
def tune_model(mod, params, tune_settings, target, model_name):
"""
Tune a model for a specified number of trials along with other tune settings.
Tune settings are specified using a json configuration, as per the TVM tools readme.
"""
early_stopping = tune_settings['early_stopping']
number = tune_settings["number"]
save_path = tune_settings["save_path"]
save_name = tune_settings["save_name"]
repeat = tune_settings["repeat"]
debug = tune_settings.get("debug_gadqn") or False
trials = tune_settings["trials"]
tuner = tune_settings["tuner"]
target = tvm.target.Target(target)
tasks = autotvm.task.extract_from_program(
mod["main"],
target=target,
target_host="llvm",
params=params)
runner = autotvm.LocalRunner(
number=number,
repeat=repeat)
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(build_func="default"), runner=runner)
for i, tsk in enumerate(tasks):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# Create a tuner
if tuner in ("xgb", "xgb-rank"):
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
elif tuner == "ga-dqn" and debug:
tuner_obj = GADQNTunerDebug(tsk)
elif tuner == "ga-dqn":
tuner_obj = GADQNTuner(tsk)
else:
raise ValueError("invalid tuner: %s " % tuner)
abs_path = Path(save_path + save_name).resolve()
abs_path.mkdir(exist_ok=True, parents=True)
abs_path_str = str(abs_path)
tuner_obj.tune(
n_trial=min(trials, len(tsk.config_space)),
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(trials, prefix=prefix),
autotvm.callback.log_to_file(abs_path_str + f"/tuning_record_model={model_name}.json"),
],
)
# Save debug info for rl tuner only
if tuner == "ga-dqn" and debug:
tuner_obj.save_model(save_path, save_name + f"_model={model_name}_layer={i}")
del tuner_obj
def tune_models(data):
"""
Auto tune all models referenced in the json configuration.
"""
target_string = data['target']
tune_settings = data['autotuner_settings']
for model in data['models']:
trace, input_shapes = get_model(model['name'], model['type'])
mod, params = relay.frontend.from_pytorch(trace, input_shapes)
print(f"Tuning model {model['name']}, using strategy {tune_settings['tuner']}")
tune_model(mod, params, tune_settings, target_string, model['name'])
| 35 | 103 | 0.649854 | [
"Apache-2.0"
] | lhutton1/benchmark-tvm | tools/tune_model.py | 3,430 | Python |
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from application.models import Corpus
def layout():
corpus_options = []
corpora = Corpus.query.filter(Corpus.status == 'ready').all()
for corpus in corpora:
corpus_options.append({
'label': '{} (ID: {})'.format(corpus.name, corpus.id),
'value': corpus.id
})
return html.Div(
html.Div([
html.Div([
html.H1(
children='Archetype Clustering',
style={
'marginTop': 20,
'marginRight': 20
},
),
html.P('Perform archetypal analysis on a corpus:'),
html.Ol([
html.Li([
html.Strong('Natural Language Understanding'),
html.Ul([
html.Li(
'Select a corpus that has been analyzed by '
'IBM Watson Natural Language Understanding.'
),
html.Li(
'Select what output variables to discover '
'archetypes from (i.e. concepts, keywords, '
'and entities)'
),
]),
]),
html.Li([
html.Strong('Archetypal Analysis'),
html.Ul([
html.Li(
'From the selected corpus data, archetypes '
'will be created by clustering data over '
'the selected variable using NMF '
'(Non-Negative Matrix Factorization).'
),
html.Li(
'Variables are mapped onto the '
'archetypes/clusters.'
),
]),
]),
]),
]),
html.Div([
dbc.Row([
dbc.Col([
html.Label('Corpus', style={'fontWeight': 'bold'}),
dcc.Dropdown(
id='corpus-dropdown',
options=corpus_options,
value=(corpus_options[0]['value']
if len(corpus_options) else None)
)
], lg=4, xs=12)
], className='mb-4'),
dbc.Row([
dbc.Col([
html.Label('Variables', style={'fontWeight': 'bold'}),
dcc.Dropdown(
id='Var',
options=[
{'label': 'Concepts', 'value': 'concepts'},
{'label': 'Keywords', 'value': 'keywords'},
{'label': 'Entities', 'value': 'entities'},
],
value='concepts',
)
], xs=12, md=4),
dbc.Col([
html.Label('#Archetypes',
style={'fontWeight': 'bold'}),
dcc.Dropdown(
id='NoA',
options=[{'label': k, 'value': k}
for k in range(2, 16)],
value=6,
multi=False
)
], xs=12, md=4),
dbc.Col([
html.Label('Cut at', style={'fontWeight': 'bold'}),
dcc.Dropdown(
id='Threshold',
options=[{'label': str(k)+'%', 'value': k/100}
for k in range(1, 99)],
value=0.1,
multi=False
)
], xs=12, md=4)
])
]),
html.Div([
html.Div([
dcc.Graph(
id='variables-heatmap'
)
])
]),
])
)
| 39.474138 | 78 | 0.320594 | [
"Apache-2.0"
] | Bhaskers-Blu-Org1/discover-archetype | web-app/application/pages/dashboard/layout.py | 4,579 | Python |
#!/usr/bin/env python
# Author: Nick Zwart
# Date: 2016jun01
# Backup all the projects of a git-hub style website via git mirroring.
# https://www.garron.me/en/bits/backup-git-bare-repo.html
import os
import sys
import time
import gitlab # external GitLab API
import github # external GitHub API
import shutil
import hashlib
import optparse
import subprocess
class GitWebsiteTypeAPI:
'''The abstract class to template each git-based website api.
'''
def __init__(self, token, url):
self._token = token
self._url = url
def numProjects(self):
# return the number of projects
pass
def projectPath(self, index):
# return the full path for each project including group i.e.
# <user/group-directory>/<repository-name>
# e.g.
# nckz/BackupHub
pass
def projectURL(self, index):
# return the ssh-url that assumes ssh-keys have been distributed e.g.
# git@git<lab/hub>.com:<user/group>/<repo-name>.git
# e.g.
# [email protected]:nckz/BackupHub.git
pass
class GitLabAPI(GitWebsiteTypeAPI):
def __init__(self, token, url):
GitWebsiteTypeAPI.__init__(self, token, url)
# authenticate a gitlab session
self._gl = gitlab.Gitlab(self._url, self._token)
self._gl.auth()
# list all projects
self._projects = self._gl.projects.list(all=True)
def numProjects(self):
return len(self._projects)
def projectPath(self, index):
return self._projects[index].path_with_namespace
def projectURL(self, index):
return self._projects[index].ssh_url_to_repo
class GitHubAPI(GitWebsiteTypeAPI):
def __init__(self, token, url=''):
GitWebsiteTypeAPI.__init__(self, token, url)
# authenticate a gitlab session
self._gh = github.Github(self._token)
# list all projects
self._projects = self._gh.get_user().get_repos()
def numProjects(self):
return len([i for i in self._projects])
def projectPath(self, index):
return self._projects[index].full_name
def projectURL(self, index):
return self._projects[index].ssh_url
class GitBareMirror:
'''A simple git interface for managing bare-mirroed repos that backup url
accessible upstream repos.
'''
def __init__(self, path, url, overwrite=False, moveAside=False):
self._path = path
self._origin_url = url
self._overwrite = overwrite
self._moveAside = moveAside
if self.validExistingRepo():
self.update()
else:
self.createMirroredRepo()
def validExistingRepo(self):
try:
assert os.path.isdir(self._path), ('The supplied directory '
'does not exist.')
# move to the existing repo and check if its bare
os.chdir(self._path)
cmd = subprocess.Popen('git rev-parse --is-bare-repository',
shell=True, stdout=subprocess.PIPE)
cmd.wait()
# Error checking
assert cmd.returncode != 128, ('The supplied directory '
'exists, but is not a git repo.')
assert cmd.returncode == 0, 'There was an unhandled git error.'
firstline = cmd.stdout.readlines()[0].decode('utf8')
assert 'false' not in firstline, ('The supplied directory '
'is NOT a bare repo.')
assert 'true' in firstline, ('Unable to verify that the repo is '
'bare.')
# check if the existing repo has the same origin url
# -prevent name collision if group/org namespace isn't used
cmd = subprocess.Popen('git config --get remote.origin.url',
shell=True, stdout=subprocess.PIPE)
cmd.wait()
firstline = cmd.stdout.readlines()[0].decode('utf8')
assert self._origin_url in firstline, ('The existing repo '
'has a url that differs from the supplied origin url.')
return True
except AssertionError as err:
print('The given path does not contain a valid repo by:', err)
return False
def update(self):
cmd = subprocess.Popen('git remote update', shell=True,
stdout=subprocess.PIPE)
cmd.wait()
assert cmd.returncode == 0, 'ERROR: git error'
print('SUCCESS (updated)')
def createMirroredRepo(self):
# Handle existing directories based on user options:
# move the dir to a unique name, remove it, or fail w/ exception
if self._moveAside and os.path.exists(self._path):
parentPath = os.path.dirname(self._path)
dirContents = str(os.listdir(parentPath)).encode('utf8')
newNameExt = hashlib.md5(dirContents).hexdigest()
newName = self._path+'_'+newNameExt+'_bu'
print('MOVING PATH', self._path, newName)
shutil.move(self._path, newName)
elif self._overwrite and os.path.exists(self._path):
print('REMOVING PATH', self._path)
shutil.rmtree(self._path)
else:
assert not os.path.exists(self._path), ('ERROR: the supplied path '
'already exists, unable to create mirror.')
os.makedirs(self._path)
os.chdir(self._path)
cmd = subprocess.Popen('git clone --mirror ' + str(self._origin_url)
+ ' .', shell=True, stdout=subprocess.PIPE)
cmd.wait()
print('SUCCESS (new mirror)')
if __name__ == '__main__':
# parse input args
parser = optparse.OptionParser()
parser.add_option('--path', dest='backupPath', action='store',
type='string', default=os.path.expanduser('~/backup'),
help='The directory to store the backups.')
parser.add_option('--ignore-errors', dest='ignoreErrors',
action='store_true', default=False,
help='Continue to backup other repos if one has failed.')
parser.add_option('--overwrite', dest='overwrite',
action='store_true', default=False,
help='Overwrite existing directories.')
parser.add_option('--move-aside', dest='moveAside',
action='store_true', default=False,
help='Move existing directories aside with a tempfile extension.')
parser.add_option('--token', dest='token', action='store',
type='string', default=None,
help='The token required to access the target git web api.')
parser.add_option('--website', dest='website', action='store',
type='string', default=None,
help='The hub website where the git repos are stored.')
parser.add_option('--github', dest='github',
action='store_true', default=False,
help='Connect to GitHub.')
parser.add_option('--gitlab', dest='gitlab',
action='store_true', default=True,
help='Connect to GitLab (default).')
options, args = parser.parse_args(sys.argv)
localtime = time.asctime( time.localtime(time.time()) )
print("BackupHub Start:", localtime)
assert options.token is not None
if options.github:
options.gitlab = False
if options.gitlab:
assert options.website is not None
# Check for existing backup directory and make one if it doesn't exist.
if not os.path.isdir(options.backupPath):
print('The specified backup path doesn\'t exist.')
sys.exit(1)
# Get the repository info from the git web api.
if options.github:
webapi = GitHubAPI(options.token)
elif options.gitlab:
webapi = GitLabAPI(options.token, options.website)
# Display whats going on as the repos get either updated or newly mirrored.
print('Repository:')
for i in range(webapi.numProjects()):
try:
curPath = os.path.join(options.backupPath, webapi.projectPath(i))
curURL = webapi.projectURL(i)
print('\nSyncing: ', curURL, curPath)
repo = GitBareMirror(curPath, curURL, overwrite=options.overwrite,
moveAside=options.moveAside)
except Exception as err:
if options.ignoreErrors:
print(err)
else:
raise
localtime = time.asctime( time.localtime(time.time()) )
print("BackupHub Finished:", localtime)
| 35.57563 | 79 | 0.615803 | [
"MIT"
] | nckz/BackupHub | BackupHub.py | 8,467 | Python |
import os
from src.multi_site_inputs_parser import multi_site_csv_parser
from src.parse_api_responses_to_csv import parse_responses_to_csv_with_template
from src.post_and_poll import get_api_results
from src.parse_api_responses_to_excel import parse_api_responses_to_excel
"""
Change these values
"""
##############################################################################################################
API_KEY = 'DEMO KEY' # REPLACE WITH YOUR API KEY
inputs_path = os.path.join('inputs')
outputs_path = os.path.join('Ref_Profile_Full_Service_Restaurant')
output_template = os.path.join(outputs_path, 'results_template.csv')
output_file = os.path.join(outputs_path, 'results_summary.csv')
##############################################################################################################
server = 'https://developer.nrel.gov/api/reopt/v1'
path_to_inputs = os.path.join(inputs_path, 'baseline_scenarios_full_service_restaurant.csv')
list_of_posts = multi_site_csv_parser(path_to_inputs, api_url=server, API_KEY=API_KEY)
responses = []
for post in list_of_posts:
responses.append(get_api_results(
post, results_file=os.path.join(outputs_path, post['Scenario']['description'] + '.json'),
api_url=server, API_KEY=API_KEY)
)
"""
Two options for making a summary of scenarios:
1. Write to a csv using a template with column headers for desired summary keys (scalar values only)
2. Write all inputs, outputs, and dispatch to an Excel spreadsheet
"""
parse_responses_to_csv_with_template(csv_template=output_template, responses=responses, output_csv=output_file, input_csv=path_to_inputs,
n_custom_columns=2)
parse_api_responses_to_excel(responses, spreadsheet='results_summary.xlsx')
| 44.175 | 137 | 0.68987 | [
"BSD-3-Clause"
] | pragadeeshm/REopt-API-Analysis | multi_site/baseline_scenario_full_service_restaurant.py | 1,767 | Python |
import math
import sys
def example_1():
"""
THIS IS A LONG COMMENT AND should be wrapped to fit within a 72
character limit
"""
long_1 = """LONG CODE LINES should be wrapped within 79 character to
prevent page cutoff stuff"""
long_2 = """This IS a long string that looks gross and goes beyond
what it should"""
some_tuple =(1, 2, 3, 'a')
some_variable={"long": long_1,
'other':[math.pi, 100,200, 300, 9999292929292, long_2],
"more": {"inner": "THIS whole logical line should be wrapped"},
"data": [444,5555,222,3,3,4,4,5,5,5,5,5,5,5]}
return (some_tuple, some_variable)
def example_2():
return {"has_key() is deprecated": True}
class Example_3(object):
def __init__(self, bar):
self.bar = bar
def bar_func(self):
if self.bar:
self.bar += 1
self.bar = self.bar * self.bar
return self.bar
else:
some_string = """
INDENTATION IN MULTIPLE STRINGS SHOULD NOT BE TOUCHED only
actual code should be reindented, THIS IS MORE CODE
"""
return (sys.path, some_string) | 31.615385 | 80 | 0.561233 | [
"MIT"
] | DevinJMantz/lambdata-25 | lambdata/code_review.py | 1,233 | Python |
from rest_framework.test import APIRequestFactory
from rest_framework import status
from django.test import TestCase
from django.urls import reverse
from ..models import User
from ..serializer import UserSerializer
from ..views import UserViewSet
import ipapi
class UsersApiRootTestCase(TestCase):
def test_api_root_should_reply_200(self):
""" GET /api/v1/ should return an hyperlink to the users view and return a successful status 200 OK.
"""
request = APIRequestFactory().get("/api/v1/")
user_list_view = UserViewSet.as_view({"get": "list"})
response = user_list_view(request)
self.assertEqual(status.HTTP_200_OK, response.status_code)
class UsersApiTestCase(TestCase):
""" Factorize the tests setup to use a pool of existing users. """
def setUp(self):
self.factory = APIRequestFactory()
self.users = [
User.objects.create(
first_name="Riri", last_name="Duck", email="[email protected]", password="dummy"),
User.objects.create(
first_name="Fifi", last_name="Duck", email="[email protected]", password="dummy"),
User.objects.create(
first_name="Loulou", last_name="Duck", email="[email protected]", password="dummy")
]
class GetAllUsersTest(UsersApiTestCase):
""" Test GET /api/v1/users """
def test_list_all_users_should_retrieve_all_users_and_reply_200(self):
""" GET /api/v1/users should return all the users (or empty if no users found)
and return a successful status 200 OK.
"""
users = User.objects.all().order_by("id")
request = self.factory.get(reverse("v1:user-list"))
serializer = UserSerializer(users, many=True, context={'request': request})
user_list_view = UserViewSet.as_view({"get": "list"})
response = user_list_view(request)
self.assertEqual(len(self.users), len(response.data["results"]))
self.assertEqual(serializer.data, response.data["results"])
self.assertEqual(status.HTTP_200_OK, response.status_code)
class GetSingleUserTest(UsersApiTestCase):
""" Test GET /api/v1/users/:id """
def test_get_user_when_id_valid_should_retrieve_user_and_reply_200(self):
riri = User.objects.create(
first_name="Riri", last_name="Duck", email="[email protected]", password="dummy")
user = User.objects.get(pk=riri.pk)
request = self.factory.get(reverse("v1:user-detail", kwargs={"pk": riri.pk}))
serializer = UserSerializer(user, context={'request': request})
user_detail_view = UserViewSet.as_view({"get": "retrieve"})
response = user_detail_view(request, pk=riri.pk)
self.assertEqual(serializer.data, response.data)
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_get_user_when_id_invalid_should_reply_404(self):
request = self.factory.get(reverse("v1:user-detail", kwargs={"pk": 100}))
user_detail_view = UserViewSet.as_view({"get": "retrieve"})
response = user_detail_view(request, pk=100)
self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)
class CreateNewUserTest(UsersApiTestCase):
""" Test POST /api/v1/users
Override 'REMOTE_ADDR' to set IP address to Switzerland or another country for testing purpose.
"""
def test_post_user_when_from_Switzerland_and_data_valid_should_create_user_and_reply_201(self):
initial_users_count = len(self.users)
valid_data = {
"first_name": "Casper",
"last_name": "Canterville",
"email": "[email protected]",
"password": "dummy",
}
request = self.factory.post(
reverse("v1:user-list"),
data=valid_data,
REMOTE_ADDR='2.16.92.0'
)
user_detail_view = UserViewSet.as_view({"post": "create"})
response = user_detail_view(request)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
new_users_count = User.objects.count()
self.assertEqual(initial_users_count+1, new_users_count)
def test_post_user_when_id_invalid_should_not_create_user_and_reply_400(self):
initial_users_count = len(self.users)
invalid_data = {
"first_name": "Casper",
"last_name": "Canterville",
"email": "",
"password": "dummy",
}
request = self.factory.post(
reverse("v1:user-list"),
data=invalid_data,
REMOTE_ADDR='2.16.92.0'
)
user_detail_view = UserViewSet.as_view({"post": "create"})
response = user_detail_view(request)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
users_count = User.objects.count()
self.assertEqual(initial_users_count, users_count)
def test_post_user_when_data_valid_but_email_already_used_should_not_create_user_and_reply_400(self):
initial_users_count = len(self.users)
valid_data_with_used_email = {
"first_name": "Casper",
"last_name": "Canterville",
"email": "[email protected]",
"password": "dummy",
}
request = self.factory.post(
reverse("v1:user-list"),
data=valid_data_with_used_email,
REMOTE_ADDR='2.16.92.0'
)
user_detail_view = UserViewSet.as_view({"post": "create"})
response = user_detail_view(request)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
new_users_count = User.objects.count()
self.assertEqual(initial_users_count, new_users_count)
def test_post_user_when_IP_not_in_Switzerland_should_not_create_user_and_reply_403(self):
initial_users_count = len(self.users)
valid_data = {
"first_name": "Casper",
"last_name": "Canterville",
"email": "[email protected]",
"password": "dummy",
}
request = self.factory.post(
reverse("v1:user-list"),
data=valid_data,
REMOTE_ADDR='2.16.8.0' # Spain
)
user_detail_view = UserViewSet.as_view({"post": "create"})
response = user_detail_view(request)
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.assertTrue(len(response.data['detail']) > 0)
users_count = User.objects.count()
self.assertEqual(initial_users_count, users_count)
class UpdateSinglUserTest(UsersApiTestCase):
""" Test PUT|PATCH /api/v1/user/:id """
def test_patch_user_when_id_valid_should_patch_user_and_reply_200(self):
riri = User.objects.create(
first_name="Riri", last_name="Duck", email="[email protected]", password="dummy")
request = self.factory.patch(
reverse("v1:user-detail", kwargs={"pk": riri.pk}),
data={"email": "[email protected]"}
)
user_detail_view = UserViewSet.as_view({"patch": "partial_update"})
response = user_detail_view(request, pk=riri.pk)
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_patch_user_when_id_invalid_should_not_patch_user_and_reply_404(self):
riri = User.objects.create(
first_name="Riri", last_name="Duck", email="[email protected]", password="dummy")
request = self.factory.patch(
reverse("v1:user-detail", kwargs={"pk": 100}),
data={"email": "[email protected]"}
)
user_detail_view = UserViewSet.as_view({"patch": "partial_update"})
response = user_detail_view(request, pk=100)
self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)
def test_put_when_invalid_data_should_not_update_user_and_reply_400(self):
riri = User.objects.create(
first_name="Riri", last_name="Duck", email="[email protected]", password="dummy")
invalid_payload = {
"first_name": "",
"last_name": "Duck",
"email": "[email protected]"
}
request = self.factory.put(
reverse("v1:user-detail", kwargs={"pk": riri.pk}),
data=invalid_payload
)
user_detail_view = UserViewSet.as_view({"put": "update"})
response = user_detail_view(request, pk=riri.pk)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
class DeleteSinglePuppyTest(UsersApiTestCase):
""" Test DELETE /api/v1/user/:id """
def test_delete_user_when_id_valid_should_delete_user_and_reply_204(self):
initial_users_count = len(self.users)
user_to_delete = self.users[0]
request = self.factory.delete(reverse("v1:user-detail", kwargs={"pk": user_to_delete.pk}))
user_detail_view = UserViewSet.as_view({"delete": "destroy"})
response = user_detail_view(request, pk=user_to_delete.pk)
self.assertEqual(status.HTTP_204_NO_CONTENT, response.status_code)
new_users_count = User.objects.count()
self.assertEqual(initial_users_count-1, new_users_count)
def test_delete_user_when_id_invalid_should_reply_404(self):
request = self.factory.delete(reverse("v1:user-detail", kwargs={"pk": 100}))
user_detail_view = UserViewSet.as_view({"delete": "destroy"})
response = user_detail_view(request, pk=100)
self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)
| 39.052846 | 108 | 0.65192 | [
"MIT"
] | r-o-main/users-exercise | users_django/users/tests/test_views.py | 9,607 | Python |
# Credits to Ozan Sener
# https://github.com/intel-isl/MultiObjectiveOptimization
import numpy as np
import torch
class MGDASolver:
MAX_ITER = 250
STOP_CRIT = 1e-5
@staticmethod
def _min_norm_element_from2(v1v1, v1v2, v2v2):
"""
Analytical solution for min_{c} |cx_1 + (1-c)x_2|_2^2
d is the distance (objective) optimzed
v1v1 = <x1,x1>
v1v2 = <x1,x2>
v2v2 = <x2,x2>
"""
if v1v2 >= v1v1:
# Case: Fig 1, third column
gamma = 0.999
cost = v1v1
return gamma, cost
if v1v2 >= v2v2:
# Case: Fig 1, first column
gamma = 0.001
cost = v2v2
return gamma, cost
# Case: Fig 1, second column
gamma = -1.0 * ((v1v2 - v2v2) / (v1v1 + v2v2 - 2 * v1v2))
cost = v2v2 + gamma * (v1v2 - v2v2)
return gamma, cost
@staticmethod
def _min_norm_2d(vecs: list, dps):
"""
Find the minimum norm solution as combination of two points
This is correct only in 2D
ie. min_c |\sum c_i x_i|_2^2 st. \sum c_i = 1 , 1 >= c_1 >= 0
for all i, c_i + c_j = 1.0 for some i, j
"""
dmin = 1e8
sol = 0
for i in range(len(vecs)):
for j in range(i + 1, len(vecs)):
if (i, j) not in dps:
dps[(i, j)] = 0.0
for k in range(len(vecs[i])):
dps[(i, j)] += torch.dot(vecs[i][k].view(-1),
vecs[j][k].view(-1)).detach()
dps[(j, i)] = dps[(i, j)]
if (i, i) not in dps:
dps[(i, i)] = 0.0
for k in range(len(vecs[i])):
dps[(i, i)] += torch.dot(vecs[i][k].view(-1),
vecs[i][k].view(-1)).detach()
if (j, j) not in dps:
dps[(j, j)] = 0.0
for k in range(len(vecs[i])):
dps[(j, j)] += torch.dot(vecs[j][k].view(-1),
vecs[j][k].view(-1)).detach()
c, d = MGDASolver._min_norm_element_from2(dps[(i, i)],
dps[(i, j)],
dps[(j, j)])
if d < dmin:
dmin = d
sol = [(i, j), c, d]
return sol, dps
@staticmethod
def _projection2simplex(y):
"""
Given y, it solves argmin_z |y-z|_2 st \sum z = 1 , 1 >= z_i >= 0 for all i
"""
m = len(y)
sorted_y = np.flip(np.sort(y), axis=0)
tmpsum = 0.0
tmax_f = (np.sum(y) - 1.0) / m
for i in range(m - 1):
tmpsum += sorted_y[i]
tmax = (tmpsum - 1) / (i + 1.0)
if tmax > sorted_y[i + 1]:
tmax_f = tmax
break
return np.maximum(y - tmax_f, np.zeros(y.shape))
@staticmethod
def _next_point(cur_val, grad, n):
proj_grad = grad - (np.sum(grad) / n)
tm1 = -1.0 * cur_val[proj_grad < 0] / proj_grad[proj_grad < 0]
tm2 = (1.0 - cur_val[proj_grad > 0]) / (proj_grad[proj_grad > 0])
skippers = np.sum(tm1 < 1e-7) + np.sum(tm2 < 1e-7)
t = 1
if len(tm1[tm1 > 1e-7]) > 0:
t = np.min(tm1[tm1 > 1e-7])
if len(tm2[tm2 > 1e-7]) > 0:
t = min(t, np.min(tm2[tm2 > 1e-7]))
next_point = proj_grad * t + cur_val
next_point = MGDASolver._projection2simplex(next_point)
return next_point
@staticmethod
def find_min_norm_element(vecs: list):
"""
Given a list of vectors (vecs), this method finds the minimum norm
element in the convex hull as min |u|_2 st. u = \sum c_i vecs[i]
and \sum c_i = 1. It is quite geometric, and the main idea is the
fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution
lies in (0, d_{i,j})Hence, we find the best 2-task solution , and
then run the projected gradient descent until convergence
"""
# Solution lying at the combination of two points
dps = {}
init_sol, dps = MGDASolver._min_norm_2d(vecs, dps)
n = len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = 1 - init_sol[1]
if n < 3:
# This is optimal for n=2, so return the solution
return sol_vec, init_sol[2]
iter_count = 0
grad_mat = np.zeros((n, n))
for i in range(n):
for j in range(n):
grad_mat[i, j] = dps[(i, j)]
while iter_count < MGDASolver.MAX_ITER:
grad_dir = -1.0 * np.dot(grad_mat, sol_vec)
new_point = MGDASolver._next_point(sol_vec, grad_dir, n)
# Re-compute the inner products for line search
v1v1 = 0.0
v1v2 = 0.0
v2v2 = 0.0
for i in range(n):
for j in range(n):
v1v1 += sol_vec[i] * sol_vec[j] * dps[(i, j)]
v1v2 += sol_vec[i] * new_point[j] * dps[(i, j)]
v2v2 += new_point[i] * new_point[j] * dps[(i, j)]
nc, nd = MGDASolver._min_norm_element_from2(v1v1.item(),
v1v2.item(),
v2v2.item())
# try:
new_sol_vec = nc * sol_vec + (1 - nc) * new_point
# except AttributeError:
# print(sol_vec)
change = new_sol_vec - sol_vec
if np.sum(np.abs(change)) < MGDASolver.STOP_CRIT:
return sol_vec, nd
sol_vec = new_sol_vec
@staticmethod
def find_min_norm_element_FW(vecs):
"""
Given a list of vectors (vecs), this method finds the minimum norm
element in the convex hull
as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
It is quite geometric, and the main idea is the fact that if
d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies
in (0, d_{i,j})Hence, we find the best 2-task solution, and then
run the Frank Wolfe until convergence
"""
# Solution lying at the combination of two points
dps = {}
init_sol, dps = MGDASolver._min_norm_2d(vecs, dps)
n = len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = 1 - init_sol[1]
if n < 3:
# This is optimal for n=2, so return the solution
return sol_vec, init_sol[2]
iter_count = 0
grad_mat = np.zeros((n, n))
for i in range(n):
for j in range(n):
grad_mat[i, j] = dps[(i, j)]
while iter_count < MGDASolver.MAX_ITER:
t_iter = np.argmin(np.dot(grad_mat, sol_vec))
v1v1 = np.dot(sol_vec, np.dot(grad_mat, sol_vec))
v1v2 = np.dot(sol_vec, grad_mat[:, t_iter])
v2v2 = grad_mat[t_iter, t_iter]
nc, nd = MGDASolver._min_norm_element_from2(v1v1, v1v2, v2v2)
new_sol_vec = nc * sol_vec
new_sol_vec[t_iter] += 1 - nc
change = new_sol_vec - sol_vec
if np.sum(np.abs(change)) < MGDASolver.STOP_CRIT:
return sol_vec, nd
sol_vec = new_sol_vec
@classmethod
def get_scales(cls, grads, losses, normalization_type, tasks):
scale = {}
gn = gradient_normalizers(grads, losses, normalization_type)
for t in tasks:
for gr_i in range(len(grads[t])):
grads[t][gr_i] = grads[t][gr_i] / (gn[t] + 1e-5)
sol, min_norm = cls.find_min_norm_element([grads[t] for t in tasks])
for zi, t in enumerate(tasks):
scale[t] = float(sol[zi])
return scale
def gradient_normalizers(grads, losses, normalization_type):
gn = {}
if normalization_type == 'l2':
for t in grads:
gn[t] = torch.sqrt(
torch.stack([gr.pow(2).sum().data for gr in grads[t]]).sum())
elif normalization_type == 'loss':
for t in grads:
gn[t] = min(losses[t].mean(), 10.0)
elif normalization_type == 'loss+':
for t in grads:
gn[t] = min(losses[t].mean() * torch.sqrt(
torch.stack([gr.pow(2).sum().data for gr in grads[t]]).sum()),
10)
elif normalization_type == 'none' or normalization_type == 'eq':
for t in grads:
gn[t] = 1.0
else:
raise ValueError('ERROR: Invalid Normalization Type')
return gn
| 36.599174 | 83 | 0.489669 | [
"MIT"
] | DavidHidde/backdoors101 | utils/min_norm_solvers.py | 8,857 | Python |
from .main import Controller
__all__ = ['Conttroler'] | 18 | 28 | 0.759259 | [
"MIT"
] | Grosse-pasteque/CPT | CPT/__init__.py | 54 | Python |
"""This module contains logic for refreshing materialized views.
Materialized views don't get refreshed automatically after a bucardo initial
sync. This module detects them and refreshes them.
Classes exported:
MatViews: Identify materialized views and refresh them on the secondary database.
"""
import psycopg2
from psycopg2 import sql
from plugins import Plugin
class MatViews(Plugin):
"""Identify materialized views and refresh them on the secondary database.
Materialized views are identified based on the namespaces specified in the
config.
Methods exported:
refresh: find and refresh materialized views
"""
def __init__(self, cfg):
"""Create configuration settings that may not already be set.
The user can either define the relevant namespaces specifically for the
mat_views plugin, or the mat_views plugin can draw on the settings in the
bucardo section of the config. If neither exists, the script will throw an
error.
Keyword arguments:
cfg: contents of the config file as a dictionary
"""
super(MatViews, self).__init__(cfg)
# Override or inherit certain params from the parent, depending on the config.
self._set_inheritable_params('mat_views')
def refresh(self):
"""Refresh materialized views.
First, this method finds the namespaces being replicated, by referring to the
config for schemas and tables.
Then it finds any materialized views in the namespaces.
Then it refreshes the materialized views.
"""
print('Finding materialized views.')
# 'm' is for "materialized view".
views = self._find_objects('m', self.repl_objects)
if views:
conn = psycopg2.connect(self.secondary_schema_owner_conn_pg_format)
for view in views:
print(f'Refreshing {view[0]}.{view[1]}')
query = sql.SQL('REFRESH MATERIALIZED VIEW {schema}.{table}').format(
schema=sql.Identifier(view[0]),
table=sql.Identifier(view[1])
)
try:
with conn.cursor() as cur:
cur.execute(query)
conn.commit()
except Exception:
conn.close()
raise
conn.close()
print('Done refreshing views.')
else:
print('No materialized views found.')
| 34.027027 | 86 | 0.629468 | [
"MIT"
] | emmadev/bucardo_wrapper | plugins/mat_views/__init__.py | 2,518 | Python |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: list_translate_rule.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from monitor_sdk.model.monitor import translate_rule_pb2 as monitor__sdk_dot_model_dot_monitor_dot_translate__rule__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='list_translate_rule.proto',
package='translate',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x19list_translate_rule.proto\x12\ttranslate\x1a.monitor_sdk/model/monitor/translate_rule.proto\";\n\x18ListTranslateRuleRequest\x12\x0c\n\x04page\x18\x01 \x01(\x05\x12\x11\n\tpage_size\x18\x02 \x01(\x05\"\x8c\x01\n\x19ListTranslateRuleResponse\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0b\n\x03msg\x18\x02 \x01(\t\x12\r\n\x05total\x18\x03 \x01(\x05\x12\x0c\n\x04page\x18\x04 \x01(\x05\x12\x11\n\tpage_size\x18\x05 \x01(\x05\x12$\n\x04\x64\x61ta\x18\x06 \x03(\x0b\x32\x16.monitor.TransalteRule\"\x88\x01\n ListTranslateRuleResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x32\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32$.translate.ListTranslateRuleResponseb\x06proto3')
,
dependencies=[monitor__sdk_dot_model_dot_monitor_dot_translate__rule__pb2.DESCRIPTOR,])
_LISTTRANSLATERULEREQUEST = _descriptor.Descriptor(
name='ListTranslateRuleRequest',
full_name='translate.ListTranslateRuleRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='page', full_name='translate.ListTranslateRuleRequest.page', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_size', full_name='translate.ListTranslateRuleRequest.page_size', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=88,
serialized_end=147,
)
_LISTTRANSLATERULERESPONSE = _descriptor.Descriptor(
name='ListTranslateRuleResponse',
full_name='translate.ListTranslateRuleResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='translate.ListTranslateRuleResponse.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='msg', full_name='translate.ListTranslateRuleResponse.msg', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='total', full_name='translate.ListTranslateRuleResponse.total', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page', full_name='translate.ListTranslateRuleResponse.page', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_size', full_name='translate.ListTranslateRuleResponse.page_size', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='translate.ListTranslateRuleResponse.data', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=150,
serialized_end=290,
)
_LISTTRANSLATERULERESPONSEWRAPPER = _descriptor.Descriptor(
name='ListTranslateRuleResponseWrapper',
full_name='translate.ListTranslateRuleResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='translate.ListTranslateRuleResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='translate.ListTranslateRuleResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='translate.ListTranslateRuleResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='translate.ListTranslateRuleResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=293,
serialized_end=429,
)
_LISTTRANSLATERULERESPONSE.fields_by_name['data'].message_type = monitor__sdk_dot_model_dot_monitor_dot_translate__rule__pb2._TRANSALTERULE
_LISTTRANSLATERULERESPONSEWRAPPER.fields_by_name['data'].message_type = _LISTTRANSLATERULERESPONSE
DESCRIPTOR.message_types_by_name['ListTranslateRuleRequest'] = _LISTTRANSLATERULEREQUEST
DESCRIPTOR.message_types_by_name['ListTranslateRuleResponse'] = _LISTTRANSLATERULERESPONSE
DESCRIPTOR.message_types_by_name['ListTranslateRuleResponseWrapper'] = _LISTTRANSLATERULERESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ListTranslateRuleRequest = _reflection.GeneratedProtocolMessageType('ListTranslateRuleRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTTRANSLATERULEREQUEST,
'__module__' : 'list_translate_rule_pb2'
# @@protoc_insertion_point(class_scope:translate.ListTranslateRuleRequest)
})
_sym_db.RegisterMessage(ListTranslateRuleRequest)
ListTranslateRuleResponse = _reflection.GeneratedProtocolMessageType('ListTranslateRuleResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTTRANSLATERULERESPONSE,
'__module__' : 'list_translate_rule_pb2'
# @@protoc_insertion_point(class_scope:translate.ListTranslateRuleResponse)
})
_sym_db.RegisterMessage(ListTranslateRuleResponse)
ListTranslateRuleResponseWrapper = _reflection.GeneratedProtocolMessageType('ListTranslateRuleResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _LISTTRANSLATERULERESPONSEWRAPPER,
'__module__' : 'list_translate_rule_pb2'
# @@protoc_insertion_point(class_scope:translate.ListTranslateRuleResponseWrapper)
})
_sym_db.RegisterMessage(ListTranslateRuleResponseWrapper)
# @@protoc_insertion_point(module_scope)
| 42.509259 | 777 | 0.766935 | [
"Apache-2.0"
] | easyopsapis/easyops-api-python | monitor_sdk/api/translate/list_translate_rule_pb2.py | 9,182 | Python |
"""NAO robot class"""
from .robot import Robot
import torch
class Pose_Assumption(Robot):
def __init__(self, env_params):
super(Pose_Assumption, self).__init__(env_params)
env_params = self.ingest_params2(env_params)
self.target = env_params["target error"]
self.joints = env_params["joints to move"]
self.target_angles = env_params["target angles"]
self.default_pose = "LyingBack"
self.penalty = 0 # State
self.error = float('inf') # Initial state
self.assume_pose(self.default_pose)
self.set_stiffness()
def ingest_params2(self, env_params):
if "target error" not in env_params:
env_params["target error"] = 0.1
if "joints to move" not in env_params:
env_params["joints to move"] = ["HeadYaw", "HeadPitch",
"RShoulderPitch","RShoulderRoll",
"RElbowYaw", "RElbowRoll",
"RWristYaw",
"RHipYawPitch",
"RHipRoll", "RHipPitch", "RKneePitch",
"RAnklePitch", "RAnkleRoll",
"LShoulderPitch","LShoulderRoll",
"LElbowYaw", "LElbowRoll",
"LWristYaw",
"LHipYawPitch",
"LHipRoll", "LHipPitch", "LKneePitch",
"LAnklePitch", "LAnkleRoll"
]
# NOTE: joints must be named individually
if "target angles" not in env_params:
env_params["target angles"] = [0.0, 0.153,
0.66, 0.914,
0.994, 0.721,
0.08432,
-0.512, -0.04,
-0.8299, 0.317,
0.288, -0.268, 0.99, 0.175, -1.234,
-0.819, -1.286, -0.58287, 0.118,
0.2899, -0.09, 0.6, -0.046
]
return env_params
def set_stiffness(self):
time = 1.0 # Seconds
value = 0.7 # Stiffness (max 1/min 0, higher is looser)
self.motion.stiffnessInterpolation(self.joints, value, time)
def step(self):
"""In this function the robot will return to default pose, to
be ready for the new command.
"""
origin = [0.4] # Arbitrary input
self.observation = torch.tensor(origin,
dtype=self.precision,
device = self.device)
def evaluate(self, inference):
"""Evaluates the predicted pose."""
self.reset_state()
values = self.process_inference(inference)
self.apply(values)
angles = self.get_joints()
self.calc_error(angles)
return self.error
def reset_state(self):
self.penalty = 0
self.error = float('inf')
def process_inference(self, inference):
"""Ensures safety of the predicted angles."""
values = [a.item() for a in inference]
for idx, value in enumerate(values):
name = self.joints[idx]
limits = self.motion.getLimits(name)
min_angle = limits[0][0]
max_angle = limits[0][1]
max_vel = limits[0][2] # Unenforced
max_tor = limits[0][3] # Unenforced
value = self.cap_angle(value, min_angle, max_angle)
values[idx] = [value]
return values
def apply(self, angles):
"""Applies the pose to the robot."""
self.set_joints(angles)
def cap_angle(self, x, a, b):
penalty = 10 # Safety penalty
if x<a:
self.penalty += penalty
x = a
elif x>b:
self.penalty += penalty
x = b
return x
def calc_error(self, angles):
"""Calculate the error between predicted and target angles, and
add the safety penalties.
"""
errors = [abs(x-y) for x,y in zip(angles, self.target_angles)]
error = sum(errors)
error += self.penalty
self.error = torch.tensor(error)
#
| 35.128788 | 82 | 0.463015 | [
"MIT"
] | AroMorin/DNNOP | environments/nao/pose_assumption.py | 4,637 | Python |
__package__ = "blackhat.bin.installable"
from ...helpers import Result
from ...lib.input import ArgParser
from ...lib.output import output
from ...lib.ifaddrs import getifaddrs
__COMMAND__ = "ifconfig"
__DESCRIPTION__ = ""
__DESCRIPTION_LONG__ = ""
__VERSION__ = "1.2"
def parse_args(args=[], doc=False):
"""
Handle parsing of arguments and flags. Generates docs using help from `ArgParser`
Args:
args (list): argv passed to the binary
doc (bool): If the function should generate and return manpage
Returns:
Processed args and a copy of the `ArgParser` object if not `doc` else a `string` containing the generated manpage
"""
parser = ArgParser(prog=__COMMAND__, description=f"{__COMMAND__} - {__DESCRIPTION__}")
parser.add_argument("--version", action="store_true", help=f"output version information and exit")
args = parser.parse_args(args)
arg_helps_with_dups = parser._actions
arg_helps = []
[arg_helps.append(x) for x in arg_helps_with_dups if x not in arg_helps]
NAME = f"**NAME*/\n\t{__COMMAND__} - {__DESCRIPTION__}"
SYNOPSIS = f"**SYNOPSIS*/\n\t{__COMMAND__} [OPTION]... "
DESCRIPTION = f"**DESCRIPTION*/\n\t{__DESCRIPTION__}\n\n"
for item in arg_helps:
# Its a positional argument
if len(item.option_strings) == 0:
# If the argument is optional:
if item.nargs == "?":
SYNOPSIS += f"[{item.dest.upper()}] "
else:
SYNOPSIS += f"{item.dest.upper()} "
else:
# Boolean flag
if item.nargs == 0:
if len(item.option_strings) == 1:
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/\t{item.help}\n\n"
else:
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/\n\t\t{item.help}\n\n"
elif item.nargs == "+":
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/=[{item.dest.upper()}]...\n\t\t{item.help}\n\n"
else:
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/={item.dest.upper()}\n\t\t{item.help}\n\n"
if doc:
return f"{NAME}\n\n{SYNOPSIS}\n\n{DESCRIPTION}\n\n"
else:
return args, parser
def main(args: list, pipe: bool) -> Result:
"""
# TODO: Add docstring for manpage
"""
args, parser = parse_args(args)
if parser.error_message:
if not args.version:
return output(f"{__COMMAND__}: {parser.error_message}", pipe, success=False)
if args.version:
return output(f"ifconfig (blackhat netutils) {__VERSION__}", pipe)
# If we specific -h/--help, args will be empty, so exit gracefully
if not args:
return output("", pipe)
else:
result = getifaddrs()
return output(result.data.ifa_addr, pipe)
| 34.853659 | 121 | 0.602519 | [
"MIT"
] | stautonico/blackhat-simulator | client/blackhat/bin/installable/ifconfig.py | 2,858 | Python |
# -*- coding: utf-8 -*-
import numpy as np
import astropy.units as u
import pkg_resources
from astropy.io import ascii
from astropy.modeling.tabular import tabular_model
from .baseclasses import BaseAtttauVModel
from .helpers import _test_valid_x_range
__all__ = ["WG00"]
x_range_WG00 = [0.1, 3.0001]
class WG00(BaseAtttauVModel):
r"""
Attenuation curve of Witt & Gordon (2000)
Parameters
----------
tau_v: float
optical depth in V band
Raises
------
InputParameterError
Input Av values outside of defined range
Notes
-----
From Witt & Gordon (2000, ApJ, Volume 528, pp. 799-816)
Example:
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from dust_attenuation.radiative_transfer import WG00
fig, ax = plt.subplots(1,2, figsize=(10,6))
# generate the curves and plot them
# Use 1/microns for a better sampling
x = np.arange(0.35,10.0,0.1)/u.micron
x_Vband = 0.55 # microns
tau_Vs = [0.25,0.4,1.1,17.0,46.0]
for tau_V in tau_Vs[::-1]:
att_model = WG00(tau_V = tau_V, geometry = 'cloudy',
dust_type = 'mw', dust_distribution = 'clumpy')
ax[0].plot(x,att_model(1/x),label=r'$\tau_V$ = %.2f mag' % (tau_V))
ax[1].plot(x,att_model(1/x)/att_model(x_Vband),
label=r'$\tau_V$ = %.2f mag' % (tau_V))
ax[0].set_xlabel(r'$x$ [$\mu m^{-1}$]')
ax[0].set_ylabel(r'$Att(x)$ [mag]')
ax[1].set_xlabel(r'$x$ [$\mu m^{-1}$]')
ax[1].set_ylabel(r'$Att(x)/Att_V$')
ax[0].legend(loc='best')
ax[1].legend(loc='best')
fig.suptitle(r'CLOUDY / MW / clumpy model',size=15)
plt.tight_layout()
fig.subplots_adjust(top=0.88)
plt.show()
"""
tau_V_range = [0.25, 50.0]
x_range = x_range_WG00
def __init__(
self, tau_V, geometry="dusty", dust_type="mw", dust_distribution="clumpy"
):
"""
Load the attenuation curves for a given geometry, dust type and
dust distribution.
Parameters
----------
tau_V: float
optical depth in V band
geometry: string
'shell', 'cloudy' or 'dusty'
dust_type: string
'mw' or 'smc'
dust_distribution: string
'homogeneous' or 'clumpy'
Returns
-------
Attx: np array (float)
Att(x) attenuation curve [mag]
"""
# Ensure strings are lower cases
self.geometry = geometry.lower()
self.dust_type = dust_type.lower()
self.dust_distribution = dust_distribution.lower()
data_path = pkg_resources.resource_filename("dust_attenuation", "data/WG00/")
data = ascii.read(data_path + self.geometry + ".txt", header_start=0)
if self.dust_type == "mw":
start = 0
elif self.dust_type == "smc":
start = 25
# Column names
tau_colname = "tau"
tau_att_colname = "tau_att"
fsca_colname = "f(sca)"
fdir_colname = "f(dir)"
fesc_colname = "f(esc)"
if self.dust_distribution == "clumpy":
tau_att_colname += "_c"
fsca_colname += "_c"
fdir_colname += "_c"
fesc_colname += "_c"
elif self.dust_distribution == "homogeneous":
tau_att_colname += "_h"
fsca_colname += "_h"
fdir_colname += "_h"
fesc_colname += "_h"
tau_att_list = []
tau_list = []
fsca_list = []
fdir_list = []
fesc_list = []
len_data = len(data["lambda"])
# number of lines between 2 models
steps = 25
counter = start
while counter < len_data:
tau_att_list.append(
np.array(data[tau_att_colname][counter : counter + steps])
)
tau_list.append(np.array(data[tau_colname][counter : counter + steps]))
fsca_list.append(np.array(data[fsca_colname][counter : counter + steps]))
fdir_list.append(np.array(data[fdir_colname][counter : counter + steps]))
fesc_list.append(np.array(data[fesc_colname][counter : counter + steps]))
counter += int(2 * steps)
# Convert to np.array and take transpose to have (wvl, tau_V)
tau_att_table = np.array(tau_att_list).T
tau_table = np.array(tau_list).T
fsca_table = np.array(fsca_list).T
fdir_table = np.array(fdir_list).T
fesc_table = np.array(fesc_list).T
# wavelength grid. It is the same for all the models
wvl = np.array(data["lambda"][0:25])
self.wvl_grid = wvl
# Grid for the optical depth
tau_V_grid = np.array(
[
0.25,
0.5,
0.75,
1.0,
1.5,
2.0,
2.5,
3.0,
3.5,
4.0,
4.5,
5.0,
5.5,
6.0,
7.0,
8.0,
9.0,
10.0,
15.0,
20.0,
25.0,
30.0,
35.0,
40.0,
45.0,
50.0,
]
)
# Create a 2D tabular model for tau_att and all flux fraction
tab = tabular_model(2, name="2D_table")
# Values corresponding to the x and y grid points
gridpoints = (wvl, tau_V_grid)
self.model = tab(
gridpoints,
lookup_table=tau_att_table,
name="tau_att_WG00",
bounds_error=False,
fill_value=None,
method="linear",
)
self.tau = tab(
gridpoints,
lookup_table=tau_table,
name="tau_WG00",
bounds_error=False,
fill_value=None,
method="linear",
)
self.fsca = tab(
gridpoints,
lookup_table=fsca_table,
name="fsca_WG00",
bounds_error=False,
fill_value=None,
method="linear",
)
self.fdir = tab(
gridpoints,
lookup_table=fdir_table,
name="fdir_WG00",
bounds_error=False,
fill_value=None,
method="linear",
)
self.fesc = tab(
gridpoints,
lookup_table=fesc_table,
name="fesc_WG00",
bounds_error=False,
fill_value=None,
method="linear",
)
# In Python 2: super(WG00, self)
# In Python 3: super() but super(WG00, self) still works
super(WG00, self).__init__(tau_V=tau_V)
def evaluate(self, x, tau_V):
"""
WG00 function
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
tau_V: float
optical depth in V band
Returns
-------
Attx: np array (float)
Att(x) attenuation curve [mag]
Raises
------
ValueError
Input x values outside of defined range
"""
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
x = x_quant.value
# check that the wavenumbers are within the defined range
_test_valid_x_range(x, self.x_range, "WG00")
# setup the ax vectors
n_x = len(x)
xinterp = 1e4 * x
yinterp = tau_V * np.ones(n_x)
taux = self.model(xinterp, yinterp)
# Convert optical depth to attenuation
Attx = 1.086 * taux
return Attx
def get_extinction(self, x, tau_V):
"""
Return the extinction at a given wavelength and
V-band optical depth.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
tau_V: float
optical depth in V band
Returns
-------
ext: np array (float)
ext(x) extinction curve [mag]
Raises
------
ValueError
Input x values outside of defined range
"""
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
x = x_quant.value
# check that the wavenumbers are within the defined range
_test_valid_x_range(x, self.x_range, "WG00")
# setup the ax vectors
x = np.atleast_1d(x)
n_x = len(x)
xinterp = 1e4 * x
yinterp = tau_V * np.ones(n_x)
return self.tau(xinterp, yinterp) * 1.086
def get_fsca(self, x, tau_V):
"""
Return the scattered flux fraction at a given wavelength and
V-band optical depth.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
tau_V: float
optical depth in V band
Returns
-------
fsca: np array (float)
fsca(x) scattered flux fraction
Raises
------
ValueError
Input x values outside of defined range
"""
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
x = x_quant.value
# check that the wavenumbers are within the defined range
_test_valid_x_range(x, self.x_range, "WG00")
# setup the ax vectors
x = np.atleast_1d(x)
n_x = len(x)
xinterp = 1e4 * x
yinterp = tau_V * np.ones(n_x)
return self.fsca(xinterp, yinterp)
def get_fdir(self, x, tau_V):
"""
Return the direct attenuated stellar flux fraction at a given
wavelength and V-band optical depth.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
tau_V: float
optical depth in V band
Returns
-------
fsca: np array (float)
fsca(x) scattered flux fraction
Raises
------
ValueError
Input x values outside of defined range
"""
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
x = x_quant.value
# check that the wavenumbers are within the defined range
_test_valid_x_range(x, self.x_range, "WG00")
# setup the ax vectors
x = np.atleast_1d(x)
n_x = len(x)
xinterp = 1e4 * x
yinterp = tau_V * np.ones(n_x)
return self.fdir(xinterp, yinterp)
def get_fesc(self, x, tau_V):
"""
Return the total escaping flux fraction at a given wavelength and
V-band optical depth.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
tau_V: float
optical depth in V band
Returns
-------
fsca: np array (float)
fsca(x) scattered flux fraction
Raises
------
ValueError
Input x values outside of defined range
"""
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
x = x_quant.value
# check that the wavenumbers are within the defined range
_test_valid_x_range(x, self.x_range, "WG00")
# setup the ax vectors
x = np.atleast_1d(x)
n_x = len(x)
xinterp = 1e4 * x
yinterp = tau_V * np.ones(n_x)
return self.fesc(xinterp, yinterp)
def get_albedo(self, x):
"""
Return the albedo in function of wavelength for the corresponding
dust type (SMC or MW). The albedo gives the probability a photon
is scattered from a dust grain.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
Returns
-------
albedo: np array (float)
alb(x) albedo
Raises
------
ValueError
Input x values outside of defined range
"""
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
x = x_quant.value
# check that the wavenumbers are within the defined range
_test_valid_x_range(x, self.x_range, "WG00")
# setup the ax vectors
x = np.atleast_1d(x)
alb_MW = np.array(
[
0.320,
0.409,
0.481,
0.526,
0.542,
0.536,
0.503,
0.432,
0.371,
0.389,
0.437,
0.470,
0.486,
0.499,
0.506,
0.498,
0.502,
0.491,
0.481,
0.500,
0.473,
0.457,
0.448,
0.424,
0.400,
]
)
alb_SMC = np.array(
[
0.400,
0.449,
0.473,
0.494,
0.508,
0.524,
0.529,
0.528,
0.523,
0.520,
0.516,
0.511,
0.505,
0.513,
0.515,
0.498,
0.494,
0.489,
0.484,
0.493,
0.475,
0.465,
0.439,
0.417,
0.400,
]
)
if self.dust_type == "smc":
albedo = alb_SMC
elif self.dust_type == "mw":
albedo = alb_MW
tab = tabular_model(1, name="Tabular1D")
alb_fit = tab(
self.wvl_grid,
lookup_table=albedo,
name="albedo",
bounds_error=False,
fill_value=None,
method="linear",
)
xinterp = 1e4 * x
return alb_fit(xinterp)
def get_scattering_phase_function(self, x):
"""
Return the scattering phase function in function of wavelength for the
corresponding dust type (SMC or MW). The scattering phase
function gives the angle at which the photon scatters.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
Returns
-------
g: np array (float)
g(x) scattering phase function
Raises
------
ValueError
Input x values outside of defined range
"""
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
x = x_quant.value
# check that the wavenumbers are within the defined range
_test_valid_x_range(x, self.x_range, "WG00")
# setup the ax vectors
x = np.atleast_1d(x)
g_MW = np.array(
[
0.800,
0.783,
0.767,
0.756,
0.745,
0.736,
0.727,
0.720,
0.712,
0.707,
0.702,
0.697,
0.691,
0.685,
0.678,
0.646,
0.624,
0.597,
0.563,
0.545,
0.533,
0.511,
0.480,
0.445,
0.420,
]
)
g_SMC = np.array(
[
0.800,
0.783,
0.767,
0.756,
0.745,
0.736,
0.727,
0.720,
0.712,
0.707,
0.702,
0.697,
0.691,
0.685,
0.678,
0.646,
0.624,
0.597,
0.563,
0.545,
0.533,
0.511,
0.480,
0.445,
0.420,
]
)
if self.dust_type == "smc":
g = g_SMC
elif self.dust_type == "mw":
g = g_MW
tab = tabular_model(1, name="Tabular1D")
g_fit = tab(
self.wvl_grid,
lookup_table=g,
name="albedo",
bounds_error=False,
fill_value=None,
method="linear",
)
xinterp = 1e4 * x
return g_fit(xinterp)
| 26.47962 | 85 | 0.494535 | [
"BSD-3-Clause"
] | gbrammer/dust_attenuation | dust_attenuation/radiative_transfer.py | 19,491 | Python |
"""
Extract CLOS / NLOS lookup.
Written by Ed Oughton.
March 2021
"""
import os
import configparser
import json
import math
import glob
import random
import numpy as np
import pandas as pd
import geopandas as gpd
import pyproj
from shapely.geometry import Point, Polygon, box, LineString
from shapely.ops import transform
import rasterio
# import networkx as nx
from rasterio.warp import calculate_default_transform, reproject, Resampling
from rasterio.mask import mask
from rasterstats import zonal_stats, gen_zonal_stats
from tqdm import tqdm
grass7bin = r'"C:\Program Files\GRASS GIS 7.8\grass78.bat"'
os.environ['GRASSBIN'] = grass7bin
os.environ['PATH'] += ';' + r"C:\Program Files\GRASS GIS 7.8\lib"
from grass_session import Session
from grass.script import core as gcore
CONFIG = configparser.ConfigParser()
CONFIG.read(os.path.join(os.path.dirname(__file__), "script_config.ini"))
BASE_PATH = CONFIG["file_locations"]["base_path"]
DATA_RAW = os.path.join(BASE_PATH, "raw")
DATA_INTERMEDIATE = os.path.join(BASE_PATH, "intermediate")
DATA_PROCESSED = os.path.join(BASE_PATH, "processed")
def load_raster_tile_lookup(iso3):
"""
Load in the preprocessed raster tile lookup.
Parameters
----------
iso3 : string
Country iso3 code.
Returns
-------
lookup : dict
A lookup table containing raster tile boundary coordinates
as the keys, and the file paths as the values.
"""
path = os.path.join(DATA_INTERMEDIATE, iso3, 'raster_lookup.csv')
data = pd.read_csv(path)
data = data.to_records('dicts')
lookup = {}
for item in data:
coords = (item['x1'], item['y1'], item['x2'], item['y2'])
lookup[coords] = item['path']
return lookup
def generate_grid(iso3, side_length):
"""
Generate a spatial grid for the chosen country.
"""
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'grid')
if not os.path.exists(directory):
os.makedirs(directory)
filename = 'grid_{}_{}_km.shp'.format(side_length, side_length)
path_output = os.path.join(directory, filename)
if os.path.exists(path_output):
return
filename = 'national_outline.shp'
path = os.path.join(DATA_INTERMEDIATE, iso3, filename)
country_outline = gpd.read_file(path, crs="epsg:4326")
country_outline.crs = "epsg:4326"
country_outline = country_outline.to_crs("epsg:3857")
xmin, ymin, xmax, ymax = country_outline.total_bounds
polygons = manually_create_grid(
xmin, ymin, xmax, ymax, side_length, side_length
)
grid = gpd.GeoDataFrame({'geometry': polygons}, crs="epsg:3857")#[:100]
intersection = gpd.overlay(grid, country_outline, how='intersection')
intersection.crs = "epsg:3857"
intersection['area_km2'] = intersection['geometry'].area / 1e6
intersection = intersection.to_crs("epsg:4326")
intersection.to_file(path_output, crs="epsg:4326")
return intersection
def manually_create_grid(xmin, ymin, xmax, ymax, length, wide):
"""
"""
cols = list(range(int(np.floor(xmin)), int(np.ceil(xmax - int(wide))), int(wide)))
rows = list(range(int(np.floor(ymin)), int(np.ceil(ymax)), int(length)))
polygons = []
for x in cols:
for y in rows:
polygons.append(
Polygon([(x, y), (x+wide, y), (x+wide, y-length), (x, y-length)])
)
return polygons
def find_tile(polygon, tile_lookup):
"""
Parameters
----------
polygon : tuple
The bounds of the modeling region.
tile_lookup : dict
A lookup table containing raster tile boundary coordinates
as the keys, and the file paths as the values.
Return
------
output : list
Contains the file path to the correct raster tile. Note:
only the first element is returned and if there are more than
one paths, an error is returned.
"""
output = []
poly_bbox = box(polygon[0], polygon[1], polygon[2], polygon[3])
for key, value in tile_lookup.items():
bbox = box(key[0], key[1], key[2], key[3])
if bbox.intersects(poly_bbox):
output.append(value)
if len(output) == 1:
return output[0]
elif len(output) > 1:
print('Problem with find_tile returning more than 1 path')
return output[0]
else:
print('Problem with find_tile: Unable to find raster path')
def add_id_range_data_to_grid(iso3, tile_lookup, side_length):
"""
Query the Digital Elevation Model to get an estimated interdecile
range for each grid square.
"""
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'grid')
filename = 'grid_final.shp'
path_output = os.path.join(directory, filename)
if os.path.exists(path_output):
return gpd.read_file(path_output, crs='epsg:4328')
filename = 'grid_{}_{}_km.shp'.format(side_length, side_length)
path = os.path.join(directory, filename)
grid = gpd.read_file(path, crs='epsg:4328')
output = []
for idx, grid_tile in grid.iterrows():
path_input = find_tile(
grid_tile['geometry'].bounds,
tile_lookup
)
stats = next(gen_zonal_stats(
grid_tile['geometry'],
path_input,
add_stats={
'interdecile_range': interdecile_range
},
nodata=0
))
id_range_m = stats['interdecile_range']
output.append({
'type': 'Feature',
'geometry': grid_tile['geometry'],
'properties': {
'id_range_m': id_range_m,
'area_km2': grid_tile['area_km2'],
# 'pop_density_km2': grid_tile['pop_densit'],
# 'population': grid_tile['population'],
}
})
output = gpd.GeoDataFrame.from_features(output, crs='epsg:4326')
output = output.replace([np.inf, -np.inf], np.nan)
output = output[output.geometry.notnull()]
output.to_file(path_output, crs="epsg:4326")
return output
def interdecile_range(x):
"""
Get range between bottom 10% and top 10% of values.
This is from the Longley-Rice Irregular Terrain Model.
Code here: https://github.com/edwardoughton/itmlogic
Paper here: https://joss.theoj.org/papers/10.21105/joss.02266.pdf
Parameters
----------
x : list
Terrain profile values.
Returns
-------
interdecile_range : int
The terrain irregularity parameter.
"""
q90, q10 = np.percentile(x, [90, 10])
interdecile_range = int(round(q90 - q10, 0))
return interdecile_range
def estimate_terrain_deciles(grid):
"""
"""
# terrain_lookup = grid.loc[grid['area_km2'] > 1000].reset_index()
terrain_lookup = grid
terrain_lookup['decile'] = pd.qcut(terrain_lookup['id_range_m'], 10, labels=False)
terrain_lookup = terrain_lookup[['decile', 'id_range_m']]
terrain_lookup = terrain_lookup.groupby(['decile']).min()
terrain_lookup = terrain_lookup['id_range_m'].to_list()
return terrain_lookup
def select_grid_sampling_areas(iso3, grid, lut):
"""
"""
for i in range(1, 11):
if i == 1:
grid.loc[(grid['id_range_m'] < lut[1]), 'decile'] = str(i)
value_name = '0-{}'.format(str(lut[1]))
grid.loc[(grid['id_range_m'] < lut[1]), 'value'] = value_name
elif i <= 9:
grid.loc[(
grid['id_range_m'] >= lut[i-1]) &
(grid['id_range_m'] <= lut[i]), 'decile'] = str(i)
value_name = '{}-{}'.format(str(lut[i-1]), str(lut[i]))
grid.loc[(
grid['id_range_m'] >= lut[i-1]) &
(grid['id_range_m'] <= lut[i]), 'value'] = value_name
elif i == 10:
grid.loc[(grid['id_range_m'] > lut[i-1]), 'decile'] = str(i)
value_name = '>{}'.format(str(lut[i-1]))
grid.loc[(grid['id_range_m'] > lut[i-1]), 'value'] = value_name
else:
continue
np.random.seed(2)
grid = grid.loc[grid['area_km2'] > 2400].reset_index()
sampling_areas = grid.groupby(['decile']).apply(lambda x: x.sample(1)).reset_index(drop=True)
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_area')
if not os.path.exists(directory):
os.makedirs(directory)
sampling_areas.to_file(os.path.join(directory, 'sampling_areas.shp'))
sampling_areas.crs = 'epsg:4326'
return sampling_areas
def get_points(iso3, sampling_areas, tile_lookup, point_spacing):
"""
"""
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_points')
if not os.path.exists(directory):
os.makedirs(directory)
sampling_areas = sampling_areas.to_crs("epsg:3857")
for idx, sampling_area in sampling_areas.iterrows():
lon = sampling_area['geometry'].representative_point().coords[0][0]
lat = sampling_area['geometry'].representative_point().coords[0][1]
filename = "{}-{}".format(lon, lat)
xmin, ymin, xmax, ymax = sampling_area['geometry'].bounds
polygons = manually_create_grid(xmin, ymin, xmax, ymax, point_spacing, point_spacing)
#make geopandas dataframes
grid_sample = gpd.GeoDataFrame({'geometry': polygons}, crs="epsg:3857")
boundary = gpd.GeoDataFrame({'geometry': sampling_area['geometry']},
crs="epsg:3857", index=[0])
#only get points within the tile boundary
grid_sample = gpd.overlay(grid_sample, boundary, how='intersection')
grid_sample = grid_sample.to_crs("epsg:4326") #convert to lon lat
##get the highest points in each grid sample tile
sampling_points = find_points(iso3, grid_sample, tile_lookup, filename)#[:1]
##convert to projected for viewsheding
sampling_points = sampling_points.to_crs("epsg:4326")
path_output = os.path.join(directory, filename + '.shp')
sampling_points.to_file(path_output)
return sampling_points
def find_points(iso3, grid_sample, tile_lookup, filename):
"""
"""
filename_2 = filename + '.shp'
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_points')
path_output = os.path.join(directory, filename_2)
if os.path.exists(path_output):
return gpd.read_file(path_output, crs='epsg:4326')
output = []
for idx, grid_tile in grid_sample.iterrows():
min_x, min_y, max_x, max_y = grid_tile['geometry'].bounds
geom = Point(random.uniform(min_x, max_x), random.uniform(min_y, max_y))
output.append({
'type': 'Feature',
'geometry': geom,
'properties': {
}
})
output = gpd.GeoDataFrame.from_features(output, crs='epsg:4326')
return output
def generate_viewsheds(iso3, sampling_areas, sampling_points):
"""
"""
sampling_areas = sampling_areas.to_crs("epsg:3857")
#set output folder
folder_out_viewsheds = os.path.join(DATA_INTERMEDIATE, iso3, 'viewsheds')
if not os.path.exists(folder_out_viewsheds):
os.makedirs(folder_out_viewsheds)
for idx, sampling_area in tqdm(sampling_areas.iterrows(),
total=sampling_areas.shape[0]):
output = []
lon = sampling_area['geometry'].representative_point().coords[0][0]
lat = sampling_area['geometry'].representative_point().coords[0][1]
area_filename = "{}-{}".format(lon, lat)
print('--Working on {}'.format(area_filename))
##load sampling points
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_points')
points = gpd.read_file(os.path.join(directory, area_filename + '.shp'))#[:2]
##convert to lon lat to get correct raster tile
sampling_area_df = gpd.GeoDataFrame({'geometry': sampling_area['geometry']},
crs="epsg:3857", index=[0])
sampling_area_df = sampling_area_df.to_crs("epsg:4326")
for idx, item in sampling_area_df.iterrows():
#needs a loop because the data structure needs a series
path_input = find_tile(item['geometry'].bounds, tile_lookup)
for idx, point in tqdm(points.iterrows(), total=points.shape[0]):
results = []
lon = point['geometry'].representative_point().coords[0][0]
lat = point['geometry'].representative_point().coords[0][1]
filename2 = "{}-{}".format(lon, lat)
path_output = os.path.join(folder_out_viewsheds, filename2)
file_path = os.path.join(path_output, 'location', 'PERMANENT',
'viewsheds', filename2 + '.tif')
x = point['geometry'].coords[0][0]
y = point['geometry'].coords[0][1]
if not os.path.exists(file_path):
try:
viewshed((x, y), path_input, path_output, filename2, 45000, 'epsg:4326')
except:
print('--Viewshed already exists')
seen = set()
for idx, node in tqdm(points.iterrows(), total=points.shape[0]):
x2 = node['geometry'].coords[0][0]
y2 = node['geometry'].coords[0][1]
link = '{}_{}_{}_{}'.format(x, y, x2, y2)
if link in seen:
continue
dist = find_distance((x, y), (x2, y2))
if dist < 10:
continue
los = check_los(file_path, (x2, y2))
results.append({
'sampling_area': area_filename,
'point_id': filename2,
'node_id': '{}_{}'.format(x2, y2),
'distance': dist,
'id_range_m': sampling_area['id_range_m'],
'decile': sampling_area['decile'],
'los': los,
})
seen.add('{}_{}_{}_{}'.format(x, y, x2, y2))
seen.add('{}_{}_{}_{}'.format(x2, y2, x, y))
output = output + results
output = pd.DataFrame(output)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'los_results')
if not os.path.exists(folder):
os.makedirs(folder)
output.to_csv(os.path.join(folder, area_filename + '.csv'), index=False)
def viewshed(point, path_input, path_output, tile_name, max_distance, crs):
"""
Perform a viewshed using GRASS.
Parameters
---------
point : tuple
The point being queried.
tile_lookup : dict
A lookup table containing raster tile boundary coordinates
as the keys, and the file paths as the values.
path_output : string
The directory path for the output folder.
tile_name : string
The name allocated to the viewshed tile.
max_distance : int
The maximum distance a path can be.
crs : string
The coordinate reference system in use.
Returns
-------
grid : dataframe
A geopandas dataframe containing the created grid.
"""
with Session(gisdb=path_output, location="location", create_opts=crs):
# print('parse command')
# print(gcore.parse_command("g.gisenv", flags="s"))#, set="DEBUG=3"
# print('r.external')
# now link a GDAL supported raster file to a binary raster map layer,
# from any GDAL supported raster map format, with an optional title.
# The file is not imported but just registered as GRASS raster map.
gcore.run_command('r.external', input=path_input, output=tile_name, overwrite=True)
# print('r.external.out')
#write out as geotiff
gcore.run_command('r.external.out', directory='viewsheds', format="GTiff")
# print('r.region')
#manage the settings of the current geographic region
gcore.run_command('g.region', raster=tile_name)
# print('r.viewshed')
#for each point in the output that is NULL: No LOS
gcore.run_command('r.viewshed', #flags='e',
input=tile_name,
output='{}.tif'.format(tile_name),
coordinate= [point[0], point[1]],
observer_elevation=30,
target_elevation=30,
memory=5000,
overwrite=True,
quiet=True,
max_distance=max_distance,
# verbose=True
)
def check_los(path_input, point):
"""
Find potential LOS high points.
Parameters
----------
path_input : string
File path for the digital elevation raster tile.
point : tuple
Coordinate point being queried.
Returns
-------
los : string
The Line of Sight (los) of the path queried.
"""
with rasterio.open(path_input) as src:
x = point[0]
y = point[1]
for val in src.sample([(x, y)]):
if np.isnan(val):
# print('is nan: {} therefore nlos'.format(val))
los = 'nlos'
return los
else:
# print('is not nan: {} therefore los'.format(val))
los ='clos'
return los
def find_distance(point1, point2):
"""
"""
point1 = Point(point1)
point1 = gpd.GeoDataFrame({'geometry': [point1]}, index=[0])
point1 = point1.set_crs('epsg:4326')
point1 = point1.to_crs('epsg:3857')
point2 = Point(point2)
point2 = gpd.GeoDataFrame({'geometry': [point2]}, index=[0])
point2 = point2.set_crs('epsg:4326')
point2 = point2.to_crs('epsg:3857')
dist = LineString([
(point1['geometry'][0].coords[0][0], point1['geometry'][0].coords[0][1]),
(point2['geometry'][0].coords[0][0], point2['geometry'][0].coords[0][1])
]).length
return dist
def collect_results(iso3, sampling_areas):
"""
"""
sampling_areas = sampling_areas.to_crs("epsg:3857")#[:1]
output = []
#set output folder
for idx, sampling_area in sampling_areas.iterrows():
lon = sampling_area['geometry'].representative_point().coords[0][0]
lat = sampling_area['geometry'].representative_point().coords[0][1]
filename = "{}-{}".format(lon, lat)
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'los_results')
data = pd.read_csv(os.path.join(directory, filename + '.csv'))
seen = set()
interval_size = 2500
for distance_lower in range(0, 45000, interval_size):
distance_upper = distance_lower + interval_size
clos = 0
nlos = 0
for idx, item in data.iterrows():
path_id = '{}_{}_{}'.format(
item['point_id'],
item['node_id'],
item['distance']
)
if not path_id in seen:
if item['distance'] < distance_upper:
if item['los'] == 'clos':
clos += 1
elif item['los'] == 'nlos':
nlos += 1
else:
print('Did not recognize los')
seen.add(path_id)
if clos > 0:
clos_probability = (clos / (clos + nlos))
else:
clos_probability = 'no data'
if nlos > 0:
nlos_probability = (nlos / (clos + nlos))
else:
nlos_probability = 'no data'
output.append({
'decile': item['decile'],
'id_range_m': item['id_range_m'],
'distance_lower': distance_lower,
'distance_upper': distance_upper,
'total_samples': clos + nlos,
'clos_probability': clos_probability,
'nlos_probability': nlos_probability,
})
output = pd.DataFrame(output)
folder = os.path.join(DATA_INTERMEDIATE, iso3)
output.to_csv(os.path.join(folder, 'los_lookup.csv'), index=False)
if __name__ == "__main__":
countries = [
("PER", 5e4, 25e2),
("IDN", 5e4, 25e2),
]
for country in countries:
iso3 = country[0]
side_length = country[1]
point_spacing = country[2]
##Load the raster tile lookup
tile_lookup = load_raster_tile_lookup(iso3)
##Generate grids
generate_grid(iso3, side_length) #1e5
# ##Add interdecile range to grid
grid = add_id_range_data_to_grid(iso3, tile_lookup, side_length)
##Get the terrain deciles
terrain_values = estimate_terrain_deciles(grid)
##Get the grid tile samples
sampling_areas = select_grid_sampling_areas(iso3, grid, terrain_values)#[:1]
##Generate the terrain lookup
sampling_points = get_points(iso3, sampling_areas, tile_lookup, point_spacing)#[:1]
##Process viewsheds
generate_viewsheds(iso3, sampling_areas, sampling_points)
## Collect results
collect_results(iso3, sampling_areas)
| 29.416435 | 97 | 0.593675 | [
"MIT"
] | edwardoughton/e3nb | scripts/los.py | 21,121 | Python |
# Generated by Django 3.1.4 on 2021-01-03 18:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Attribute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, null=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Training',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, null=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('result', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='home.lab')),
],
),
migrations.CreateModel(
name='TrainingValue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.IntegerField(null=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('attribute_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='home.attribute')),
('training_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='home.training')),
],
),
]
| 41.238095 | 130 | 0.592379 | [
"MIT"
] | darkun7/Evaluate-CS-Student-as-Lecture-Assistant | home/migrations/0002_attribute_training_trainingvalue.py | 1,732 | Python |
# This script is used to parse BOOST special function test data into something
# we can easily import in numpy.
import re
import os
# Where to put the data (directory will be created)
DATA_DIR = 'scipy/special/tests/data/boost'
# Where to pull out boost data
BOOST_SRC = "boostmath/test"
CXX_COMMENT = re.compile(r'^\s+//')
DATA_REGEX = re.compile(r'^\s*/*\{*\s*SC_')
ITEM_REGEX = re.compile(r'[+-]?\d*\.?\d+(?:[eE][+-]?\d+)?')
HEADER_REGEX = re.compile(
r'const boost::array\<boost::array\<.*, (\d+)\>, (\d+)\> ([a-zA-Z_\d]+)')
IGNORE_PATTERNS = [
# Makes use of ldexp and casts
"hypergeometric_1F1_big_double_limited.ipp",
"hypergeometric_1F1_big_unsolved.ipp",
# Makes use of numeric_limits and ternary operator
"beta_small_data.ipp",
# Doesn't contain any data
"almost_equal.ipp",
# Derivatives functions don't exist
"bessel_y01_prime_data.ipp",
"bessel_yn_prime_data.ipp",
"sph_bessel_prime_data.ipp",
"sph_neumann_prime_data.ipp",
# Data files not needed by scipy special tests.
"ibeta_derivative_",
r"ellint_r[cdfjg]_[^d]",
r"ellint_d2?_",
"jacobi_",
"heuman_lambda_",
"hypergeometric_",
"nct_",
r".*gammap1m1_",
"trig_",
"powm1_data.ipp",
]
def _raw_data(line):
items = line.split(',')
l = []
for item in items:
m = ITEM_REGEX.search(item)
if m:
q = m.group(0)
l.append(q)
return l
def parse_ipp_file(filename):
print(filename)
a = open(filename, 'r')
lines = a.readlines()
data = {}
i = 0
while (i < len(lines)):
line = lines[i]
m = HEADER_REGEX.search(line)
if m:
d = int(m.group(1))
n = int(m.group(2))
print(f"d = {d}, n = {n}")
cdata = []
i += 1
line = lines[i]
# Skip comments
while CXX_COMMENT.match(line):
i += 1
line = lines[i]
while DATA_REGEX.match(line):
cdata.append(_raw_data(line))
i += 1
line = lines[i]
# Skip comments
while CXX_COMMENT.match(line):
i += 1
line = lines[i]
if not len(cdata) == n:
raise ValueError("parsed data: %d, expected %d" % (len(cdata), n))
data[m.group(3)] = cdata
else:
i += 1
return data
def dump_dataset(filename, data):
fid = open(filename, 'w')
try:
for line in data:
fid.write(f"{' '.join(line)}\n")
finally:
fid.close()
def dump_datasets(filename):
base, ext = os.path.splitext(os.path.basename(filename))
base += f'_{ext[1:]}'
datadir = os.path.join(DATA_DIR, base)
os.makedirs(datadir)
datasets = parse_ipp_file(filename)
for k, d in datasets.items():
print(k, len(d))
dfilename = os.path.join(datadir, k) + '.txt'
dump_dataset(dfilename, d)
if __name__ == '__main__':
for filename in sorted(os.listdir(BOOST_SRC)):
# Note: Misses data in hpp files (e.x. powm1_sqrtp1m1_test.hpp)
if filename.endswith(".ipp"):
if any(re.match(pattern, filename) for pattern in IGNORE_PATTERNS):
continue
path = os.path.join(BOOST_SRC, filename)
print(f"================= {path} ===============")
dump_datasets(path)
| 27.515873 | 82 | 0.552062 | [
"BSD-3-Clause"
] | ikamensh/scipy | scipy/special/utils/convert.py | 3,467 | Python |
import json
import os
from time import sleep
import requests
import pyrominfo.pyrominfo.snes as snes
from shutil import copy
from pyrominfo.pyrominfo import nintendo64
def n64_info(filename):
n64_parser = nintendo64.Nintendo64Parser()
props = n64_parser.parse(filename)
return props
def snes_info(filename):
snes_parser = snes.SNESParser()
props = snes_parser.parse(filename)
return props
def get_console(argument):
switcher = {
'sfc': 'SNES',
'smc': 'SNES',
'md': '',
'bin': '',
'gb': 'GB',
'gbc': 'GBC',
'nes': 'NES',
'z64': 'N64',
}
return switcher.get(argument)
def giant_bomb_request(title, api_key):
headers = {'User-Agent': 'gripper'}
params = {
'resources': 'game',
'query': title,
'api_key': api_key,
'format': 'json'
}
response = requests.get(url='http://www.giantbomb.com/api/search/', headers=headers, params=params)
return json.loads(response.text)
def rip_game():
while True:
path = '/RETRODE'
api_key = os.environ['api-key']
files = os.listdir(path)
files.remove('RETRODE.CFG')
breakout = False
console = get_console(files[0].split('.')[-1])
filename = f'{path}/{files[0]}'
if console == 'N64':
rom_info = n64_info(filename)
if console == 'SNES':
rom_info = snes_info(filename)
title = rom_info["title"]
search_results = giant_bomb_request(title, api_key)
for results in search_results['results']:
if breakout is True:
break
aliases = str(results.get('aliases')).lower().splitlines()
if title.lower() in aliases or title.lower() == results['name']:
for platform in results['platforms']:
if platform['abbreviation'] == 'SNES':
if not os.path.exists(f'./{title}'):
os.mkdir(f'./{title} - {rom_info["region"]}')
for file in files:
destination_file = f'./{title} - {rom_info["region"]}/{title}.{file.split(".")[-1]}'
if not os.path.exists(destination_file):
copy(filename, destination_file)
breakout = True
break
sleep(5)
#dont run code while testing container
if __name__ == '__main__':
sleep(900)
#rip_game()
| 29.522727 | 116 | 0.531178 | [
"MIT"
] | mugenoesis/Gripper | main.py | 2,598 | Python |
import numpy as np
import os
import time
import argparse
import PruneAndSearch as algs
def get_args():
parser = argparse.ArgumentParser (
prog='PruneAndSearch',
description='Implementation of the Prune and Search Algorithm. ',
usage='python main.py { --rand RAND | --file FILE | --list LIST | --test [--trial TRIAL] [--vals VALS] [--verb] } [--seed SEED]'
)
parser.add_argument('-n', '--small', default=None, type=int, help='The N-th smallest element to find in the values. (default: {})'.format('MEDIAN'))
parser.add_argument('-r', '--rand', default=None, type=int, help='Generate N random numbers in range 1 - 10,000. (default: {})'.format('DISABLED'))
parser.add_argument('-f', '--file', default=None, help='Read in a list from a text file. (default: {})'.format('DISABLED'))
parser.add_argument('-l', '--list', default=None, type=int, nargs='+', help='Provide input as a list from the command line. (default: {})'.format('DISABLED'))
parser.add_argument('-x', '--seed', default=123, type=int, help='Seed for Numpy RNG. (default: {})'.format(123))
parser.add_argument('-t', '--test', default=False, action='store_true', help='Perform a timed test, random trials T times. (default: {})'.format('DISABLED'))
parser.add_argument('-T', '--trial', default=1000, type=int, help='Number of timed trials to conduct. (default: {})'.format(1000))
parser.add_argument('-v', '--vals', default=100, type=int, help='Number of random values to during testing. (default: {})'.format(100))
parser.add_argument('-V', '--verb', default=False, action='store_true', help='Verbose output. (default: {})'.format('DISABLED'))
args = parser.parse_args()
count = 0
if args.rand != None: count += 1
if args.file != None: count += 1
if args.list != None: count += 1
if args.test: count += 1
if count > 1: print("\n[ERROR] Too many arguments provided!!\n")
if count == 0: print("\n[ERROR] No arguments provided!!\n")
if count != 1:
parser.print_help()
print("\n Please provide the program with an argument using one of the following:\n")
print("\t python main.py --rand 20")
print("\t python main.py --file a.data")
print("\t python main.py --list 1 2 3 4 5 6 7 8")
print("\t python main.py --test --trial 300 --vals 100 --verb --seed 123")
print(" ")
exit()
return args
def get_list(args):
# Simple getter function to get some list
# based on the arguments passed in.
if args.rand != None:
values = np.random.randint(1, 10000, size=args.rand)
print("Generated {} random values between 1 - 10,000.".format(args.rand))
return values
if args.file != None:
if not os.path.exists(args.file):
print("[ERROR] File ``{}`` does not exist!!".format(args.file))
print("\t Please provide the path to a file.")
exit()
values = np.loadtxt(args.file, dtype=np.int32)
return values
if args.list != None:
values = np.asarray(args.list, dtype=np.int32)
return values
def test_algorithm(seed, numTrials=1000, numVals=100, maxVal=10000, verbose=True):
# Run a series of trials on both algorithms.
numVals = int(numVals) # 1e6
maxVal = int(maxVal) # 1e10
if verbose:
print("\n")
print(" -- Prune and Search Algorithm -- ")
print(" ================================ ")
print(" Random Numbers Seed = {} ".format(seed) )
print(" Number of Trials = {} ".format(numTrials))
print(" Number of Values in List = {} ".format(numVals) )
print(" Maximum Value in List = {} ".format(maxVal) )
print("\n")
# Seed The first trial for consistency.
np.random.seed( seed )
# Keep a buffer of the returned finds for later comparison.
SortAndSearchAnsBuffer = []
SortAndSearchTimeBuffer = []
# Begin the trials!
print("Beginning {} Trial on {} elements for Sort And Search . . . ".format(numTrials, numVals), end='', flush=True)
for _ in range(numTrials):
randomList = np.random.randint(maxVal, size=numVals)
findVal = np.random.randint(1, numVals+1)
startTime = time.time()
ansVal = algs.SortAndSearch(randomList, findVal)
endTime = time.time()
SortAndSearchAnsBuffer.append(ansVal)
SortAndSearchTimeBuffer.append( endTime - startTime )
print("\u0394 : {:.4f}, \u03bc : {:.6f} \u00B1 {:.6f} ".format(
np.sum( SortAndSearchTimeBuffer ),
np.mean( SortAndSearchTimeBuffer ),
np.std( SortAndSearchTimeBuffer )
))
# Seed The first trial for consistency.
np.random.seed( seed )
# Keep a buffer of the returned finds for later comparison.
PruneAndSearchAnsBuffer = []
PruneAndSearchTimeBuffer = []
# Begin the trials!
print("Beginning {} Trial on {} elements for Prune And Search . . . ".format(numTrials, numVals), end='', flush=True)
for _ in range(numTrials):
randomList = np.random.randint(maxVal, size=numVals)
findVal = np.random.randint(1, numVals+1)
startTime = time.time()
ansVal = algs.PruneAndSearch(randomList, findVal)
endTime = time.time()
PruneAndSearchAnsBuffer.append(ansVal)
PruneAndSearchTimeBuffer.append( endTime - startTime )
print("\u0394 : {:.4f}, \u03bc : {:.6f} \u00B1 {:.6f} ".format(
np.sum( PruneAndSearchTimeBuffer ),
np.mean( PruneAndSearchTimeBuffer ),
np.std( PruneAndSearchTimeBuffer )
))
#for a,b in zip(SortAndSearchAnsBuffer, PruneAndSearchAnsBuffer):
# print(a, b, " " if a == b else "\t!!X!!")
print("\nDid the Algorithms find the same solutions? ==> {}\n".format(PruneAndSearchAnsBuffer == SortAndSearchAnsBuffer))
return
def main():
# Fetch Arguments.
args = get_args()
# Seed the RNG.
np.random.seed(args.seed)
# Perform a timed trial and return.
if args.test:
test_algorithm(args.seed, numTrials=args.trial, numVals=args.vals, verbose=args.verb)
return
# From the args get the list.
values = get_list(args)
# Sent the n-value to find, median if small was not set.
findVal = args.small if args.small != None else len(values) // 2
print("\n")
print(" -- Prune and Search Algorithm -- ")
print(" ================================ ")
print(" Find The {}-Smallest Value ".format(findVal))
print(" In The List = ")
elPerRow = 5
for idx in range(0, len(values), elPerRow):
print(" ", *values[ idx : idx+elPerRow ])
print("\n")
# Naive solution in O( n log n ).
print("Beginning Sort And Search . . . ", end='', flush=True)
startTime = time.time()
ansVal_A = algs.SortAndSearch(values, findVal)
endTime = time.time()
print("\u0394 : {:.6f}".format( endTime - startTime ))
print("Beginning Prune And Search . . . ", end='', flush=True)
startTime = time.time()
ansVal_B = algs.PruneAndSearch(values, findVal)
endTime = time.time()
print("\u0394 : {:.6f}".format( endTime - startTime ))
print("\nDid the Algorithms find the same solutions? ==> {}\n".format(ansVal_A == ansVal_B))
print("The {}-Smallest Value is {}".format(findVal, ansVal_A))
return
if __name__ == '__main__':
main()
| 37.870813 | 169 | 0.576374 | [
"Apache-2.0"
] | kothiga/N-Smallest | main.py | 7,915 | Python |
# Copyright 2017 Square, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylink.jlock as jlock
import mock
import errno
import os
import unittest
class TestJLock(unittest.TestCase):
"""Tests the ``jlock`` submodule."""
def setUp(self):
"""Called before each test.
Performs setup.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
assertRaisesRegexp = getattr(self, 'assertRaisesRegexp', None)
self.assertRaisesRegexp = getattr(self, 'assertRaisesRegex', assertRaisesRegexp)
def tearDown(self):
"""Called after each test.
Performs teardown.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
pass
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_init_and_delete(self):
"""Tests initialization and deleting a ``JLock``.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
serial_no = 0xdeadbeef
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
del lock
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_exists(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire when the lock exists for an active process.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='%s\n' % pid).return_value,
]
mock_exists.side_effect = [True, True]
mock_util.pid_exists.return_value = True
mock_op.side_effect = [OSError(errno.EEXIST, '')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertFalse(lock.acquire())
self.assertFalse(lock.acquired)
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_with(pid)
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_os_error(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire the lock but generating an os-level error.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
serial_no = 0xdeadbeef
mock_exists.side_effect = [False, False]
mock_op.side_effect = [OSError(~errno.EEXIST, 'Message')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
with self.assertRaisesRegexp(OSError, 'Message'):
lock.acquire()
self.assertFalse(lock.acquired)
mock_open.assert_not_called()
mock_util.pid_exists.assert_not_called()
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_bad_file(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the current lockfile is invallid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
IOError()
]
mock_exists.return_value = True
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_not_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_invalid_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the pid in the lockfile is invalid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='dog\n').return_value,
]
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_called_once()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_old_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring when the PID in the lockfile does not exist.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='42\n').return_value,
]
mock_op.return_value = fd
mock_util.pid_exists.return_value = False
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_once_with(42)
mock_rm.assert_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.path.exists')
@mock.patch('os.close')
@mock.patch('os.remove')
def test_jlock_release_acquired(self, mock_remove, mock_close, mock_exists):
"""Tests releasing a held lock.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_remove (Mock): mock file removal method
mock_close (Mock): mocked close method
mock_exists (Mock): mocked path exist method
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
lock.acquired = True
lock.fd = 1
lock.path = os.sep
self.assertTrue(lock.release())
mock_exists.return_value = True
mock_remove.assert_called_once_with(os.sep)
mock_close.assert_called_once_with(1)
mock_exists.assert_called_once_with(os.sep)
self.assertEqual(False, lock.acquired)
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_release_not_held(self):
"""Tests calling release when lock not held.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
self.assertFalse(lock.release())
if __name__ == '__main__':
unittest.main()
| 31.141243 | 119 | 0.623458 | [
"Apache-2.0"
] | Bhav97/pylink | tests/unit/test_jlock.py | 11,024 | Python |
from dataclasses import dataclass, field
from typing import List
__NAMESPACE__ = "NISTSchema-SV-IV-list-hexBinary-maxLength-1-NS"
@dataclass
class NistschemaSvIvListHexBinaryMaxLength1:
class Meta:
name = "NISTSchema-SV-IV-list-hexBinary-maxLength-1"
namespace = "NISTSchema-SV-IV-list-hexBinary-maxLength-1-NS"
value: List[bytes] = field(
default_factory=list,
metadata={
"max_length": 5,
"tokens": True,
"format": "base16",
}
)
| 24.857143 | 68 | 0.641762 | [
"MIT"
] | tefra/xsdata-w3c-tests | output/models/nist_data/list_pkg/hex_binary/schema_instance/nistschema_sv_iv_list_hex_binary_max_length_1_xsd/nistschema_sv_iv_list_hex_binary_max_length_1.py | 522 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayUserMpointPreconsultModel import AlipayUserMpointPreconsultModel
class AlipayUserMpointPreconsultRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayUserMpointPreconsultModel):
self._biz_content = value
else:
self._biz_content = AlipayUserMpointPreconsultModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.user.mpoint.preconsult'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.268966 | 148 | 0.643399 | [
"Apache-2.0"
] | antopen/alipay-sdk-python-all | alipay/aop/api/request/AlipayUserMpointPreconsultRequest.py | 3,954 | Python |
##########################################################################
#
# Copyright (c) 2007-2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import IECore
class TestTurbulence( unittest.TestCase ) :
def testConstructors( self ) :
t = IECore.TurbulenceV2ff()
self.assertEqual( t.octaves, 4 )
self.assertEqual( t.gain, 0.5 )
self.assertEqual( t.lacunarity, 2 )
self.assertEqual( t.turbulent, True )
t = IECore.TurbulenceV2ff( 2, 1, 3, False )
self.assertEqual( t.octaves, 2 )
self.assertEqual( t.gain, 1 )
self.assertEqual( t.lacunarity, 3 )
self.assertEqual( t.turbulent, False )
t = IECore.TurbulenceV2ff(
octaves = 3,
gain = 1.4,
lacunarity = 3,
turbulent = False
)
self.assertEqual( t.octaves, 3 )
self.assertAlmostEqual( t.gain, 1.4 )
self.assertEqual( t.lacunarity, 3 )
self.assertEqual( t.turbulent, False )
def test2d( self ) :
t = IECore.TurbulenceV2ff(
octaves = 4,
gain = 0.35,
lacunarity = 2,
turbulent = False
)
width = 400
height = 400
f = IECore.FloatVectorData( width * height )
o = 0
for i in range( 0, height ) :
for j in range( 0, width ) :
f[o] = 0.5 + t.turbulence( IECore.V2f( i/50.0, j/50.0 ) )
o += 1
b = IECore.Box2i( IECore.V2i( 0, 0 ), IECore.V2i( width-1, height-1 ) )
i = IECore.ImagePrimitive( b, b )
i["r"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, f )
i["g"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, f )
i["b"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, f )
e = IECore.Reader.create( "test/IECore/data/expectedResults/turbulence2d.exr" ).read()
op = IECore.ImageDiffOp()
res = op(
imageA = i,
imageB = e,
maxError = 0.0005
)
self.failIf( res.value )
def testNaN( self ) :
t = IECore.TurbulenceV2ff(
octaves = 28,
gain = 0.35,
lacunarity = 2,
turbulent = True
)
f = t.turbulence( IECore.V2f( 21.3, 51.2 ) )
self.assert_( f == f )
if __name__ == "__main__":
unittest.main()
| 31.352941 | 88 | 0.670598 | [
"BSD-3-Clause"
] | PaulDoessel/cortex | test/IECore/Turbulence.py | 3,731 | Python |
# -*- coding: utf-8 -*-
from .deprecated_code import (chi2_bin, best_ks_bin, make_bin, feature_analysis, calc_bin_cond)
| 30.25 | 95 | 0.752066 | [
"Apache-2.0"
] | longxl87/TimeDragon | __init__.py | 121 | Python |
#!/usr/bin/env python3
import ast
from collections import namedtuple
from functools import partial
import itertools
import logging
import os
from pathlib import Path
import re
from tempfile import NamedTemporaryFile, TemporaryDirectory
import time
import traceback
from typing import (
Any,
Iterator,
List,
Optional,
Pattern,
Tuple,
Type,
TYPE_CHECKING,
Union,
)
import attr
import mypy.api
if TYPE_CHECKING:
import flake8.options.manager.OptionManager # noqa
__version__ = '17.8.0'
noqa = re.compile(r'# noqa\b', re.I).search
Error = namedtuple('Error', 'lineno col message type vars')
def make_arguments(**kwargs: Union[str, bool]) -> List[str]:
result = []
for k, v in kwargs.items():
k = k.replace('_', '-')
if v is True:
result.append('--' + k)
elif v is False:
continue
else:
result.append('--{}={}'.format(k, v))
return result
def calculate_mypypath() -> List[str]:
"""Return MYPYPATH so that stubs have precedence over local sources."""
typeshed_root = None
count = 0
started = time.time()
for parent in itertools.chain(
# Look in current script's parents, useful for zipapps.
Path(__file__).parents,
# Look around site-packages, useful for virtualenvs.
Path(mypy.api.__file__).parents,
# Look in global paths, useful for globally installed.
Path(os.__file__).parents,
):
count += 1
candidate = parent / 'lib' / 'mypy' / 'typeshed'
if candidate.is_dir():
typeshed_root = candidate
break
# Also check the non-installed path, useful for `setup.py develop`.
candidate = parent / 'typeshed'
if candidate.is_dir():
typeshed_root = candidate
break
LOG.debug(
'Checked %d paths in %.2fs looking for typeshed. Found %s',
count,
time.time() - started,
typeshed_root,
)
if not typeshed_root:
return []
stdlib_dirs = ('3.7', '3.6', '3.5', '3.4', '3.3', '3.2', '3', '2and3')
stdlib_stubs = [
typeshed_root / 'stdlib' / stdlib_dir
for stdlib_dir in stdlib_dirs
]
third_party_dirs = ('3.7', '3.6', '3', '2and3')
third_party_stubs = [
typeshed_root / 'third_party' / tp_dir
for tp_dir in third_party_dirs
]
return [
str(p) for p in stdlib_stubs + third_party_stubs
]
# invalid_types.py:5: error: Missing return statement
MYPY_ERROR_TEMPLATE = r"""
^
.* # whatever at the beginning
{filename}: # this needs to be provided in run()
(?P<lineno>\d+) # necessary for the match
(:(?P<column>\d+))? # optional but useful column info
:[ ] # ends the preamble
((?P<class>error|warning|note):)? # optional class
[ ](?P<message>.*) # the rest
$"""
LOG = logging.getLogger('flake8.mypy')
DEFAULT_ARGUMENTS = make_arguments(
platform='linux',
# flake8-mypy expects the two following for sensible formatting
show_column_numbers=True,
show_error_context=False,
# suppress error messages from unrelated files
follow_imports='skip',
# since we're ignoring imports, writing .mypy_cache doesn't make any sense
cache_dir=os.devnull,
# suppress errors about unsatisfied imports
ignore_missing_imports=True,
# allow untyped calls as a consequence of the options above
disallow_untyped_calls=False,
# allow returning Any as a consequence of the options above
warn_return_any=False,
# treat Optional per PEP 484
strict_optional=True,
# ensure all execution paths are returning
warn_no_return=True,
# lint-style cleanliness for typing needs to be disabled; returns more errors
# than the full run.
warn_redundant_casts=False,
warn_unused_ignores=False,
# The following are off by default. Flip them on if you feel
# adventurous.
disallow_untyped_defs=False,
check_untyped_defs=False,
)
_Flake8Error = Tuple[int, int, str, Type['MypyChecker']]
@attr.s(hash=False)
class MypyChecker:
name = 'flake8-mypy'
version = __version__
tree = attr.ib(default=None)
filename = attr.ib(default='(none)')
lines = attr.ib(default=[]) # type: List[int]
options = attr.ib(default=None)
visitor = attr.ib(default=attr.Factory(lambda: TypingVisitor))
def run(self) -> Iterator[_Flake8Error]:
if not self.lines:
return # empty file, no need checking.
visitor = self.visitor()
visitor.visit(self.tree)
if not visitor.should_type_check:
return # typing not used in the module
if not self.options.mypy_config and 'MYPYPATH' not in os.environ:
os.environ['MYPYPATH'] = ':'.join(calculate_mypypath())
# Always put the file in a separate temporary directory to avoid
# unexpected clashes with other .py and .pyi files in the same original
# directory.
with TemporaryDirectory(prefix='flake8mypy_') as d:
file = NamedTemporaryFile(
'w',
encoding='utf8',
prefix='tmpmypy_',
suffix='.py',
dir=d,
delete=False,
)
try:
self.filename = file.name
for line in self.lines:
file.write(line)
file.close()
yield from self._run()
finally:
os.remove(file.name)
def _run(self) -> Iterator[_Flake8Error]:
mypy_cmdline = self.build_mypy_cmdline(self.filename, self.options.mypy_config)
mypy_re = self.build_mypy_re(self.filename)
last_t499 = 0
try:
stdout, stderr, returncode = mypy.api.run(mypy_cmdline)
except Exception as exc:
# Pokémon exception handling to guard against mypy's internal errors
last_t499 += 1
yield self.adapt_error(T498(last_t499, 0, vars=(type(exc), str(exc))))
for line in traceback.format_exc().splitlines():
last_t499 += 1
yield self.adapt_error(T499(last_t499, 0, vars=(line,)))
else:
# FIXME: should we make any decision based on `returncode`?
for line in stdout.splitlines():
try:
e = self.make_error(line, mypy_re)
except ValueError:
# unmatched line
last_t499 += 1
yield self.adapt_error(T499(last_t499, 0, vars=(line,)))
continue
if self.omit_error(e):
continue
yield self.adapt_error(e)
for line in stderr.splitlines():
last_t499 += 1
yield self.adapt_error(T499(last_t499, 0, vars=(line,)))
@classmethod
def adapt_error(cls, e: Any) -> _Flake8Error:
"""Adapts the extended error namedtuple to be compatible with Flake8."""
return e._replace(message=e.message.format(*e.vars))[:4]
def omit_error(self, e: Error) -> bool:
"""Returns True if error should be ignored."""
if (
e.vars and
e.vars[0] == 'No parent module -- cannot perform relative import'
):
return True
return bool(noqa(self.lines[e.lineno - 1]))
@classmethod
def add_options(cls, parser: 'flake8.options.manager.OptionManager') -> None:
parser.add_option(
'--mypy-config',
parse_from_config=True,
help="path to a custom mypy configuration file",
)
def make_error(self, line: str, regex: Pattern) -> Error:
m = regex.match(line)
if not m:
raise ValueError("unmatched line")
lineno = int(m.group('lineno'))
column = int(m.group('column') or 0)
message = m.group('message').strip()
if m.group('class') == 'note':
return T400(lineno, column, vars=(message,))
return T484(lineno, column, vars=(message,))
def build_mypy_cmdline(
self, filename: str, mypy_config: Optional[str]
) -> List[str]:
if mypy_config:
return ['--config-file=' + mypy_config, filename]
return DEFAULT_ARGUMENTS + [filename]
def build_mypy_re(self, filename: Union[str, Path]) -> Pattern:
filename = Path(filename)
if filename.is_absolute():
prefix = Path('.').absolute()
try:
filename = filename.relative_to(prefix)
except ValueError:
pass # not relative to the cwd
re_filename = re.escape(str(filename))
if re_filename.startswith(r'\./'):
re_filename = re_filename[3:]
return re.compile(
MYPY_ERROR_TEMPLATE.format(filename=re_filename),
re.VERBOSE,
)
@attr.s
class TypingVisitor(ast.NodeVisitor):
"""Used to determine if the file is using annotations at all."""
should_type_check = attr.ib(default=False)
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
if node.returns:
self.should_type_check = True
return
for arg in itertools.chain(node.args.args, node.args.kwonlyargs):
if arg.annotation:
self.should_type_check = True
return
va = node.args.vararg
kw = node.args.kwarg
if (va and va.annotation) or (kw and kw.annotation):
self.should_type_check = True
def visit_Import(self, node: ast.Import) -> None:
for name in node.names:
if (
isinstance(name, ast.alias) and
name.name == 'typing' or
name.name.startswith('typing.')
):
self.should_type_check = True
break
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
if (
node.level == 0 and
node.module == 'typing' or
node.module and node.module.startswith('typing.')
):
self.should_type_check = True
def generic_visit(self, node: ast.AST) -> None:
"""Called if no explicit visitor function exists for a node."""
for _field, value in ast.iter_fields(node):
if self.should_type_check:
break
if isinstance(value, list):
for item in value:
if self.should_type_check:
break
if isinstance(item, ast.AST):
self.visit(item)
elif isinstance(value, ast.AST):
self.visit(value)
# Generic mypy error
T484 = partial(
Error,
message="T484 {}",
type=MypyChecker,
vars=(),
)
# Generic mypy note
T400 = partial(
Error,
message="T400 note: {}",
type=MypyChecker,
vars=(),
)
# Internal mypy error (summary)
T498 = partial(
Error,
message="T498 Internal mypy error '{}': {}",
type=MypyChecker,
vars=(),
)
# Internal mypy error (traceback, stderr, unmatched line)
T499 = partial(
Error,
message="T499 {}",
type=MypyChecker,
vars=(),
)
| 29.830287 | 87 | 0.579519 | [
"MIT"
] | ambv/flake8-mypy | flake8_mypy.py | 11,426 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
class LineageBackend(object):
def send_lineage(self,
operator=None, inlets=None, outlets=None, context=None):
"""
Sends lineage metadata to a backend
:param operator: the operator executing a transformation on the inlets and outlets
:param inlets: the inlets to this operator
:param outlets: the outlets from this operator
:param context: the current context of the task instance
"""
raise NotImplementedError()
| 40.375 | 90 | 0.724458 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1010data/incubator-airflow | airflow/lineage/backend/__init__.py | 1,292 | Python |
# THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class ModeldbAddProjectTags(BaseType):
def __init__(self, id=None, tags=None):
required = {
"id": False,
"tags": False,
}
self.id = id
self.tags = tags
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
tmp = d.get('id', None)
if tmp is not None:
d['id'] = tmp
tmp = d.get('tags', None)
if tmp is not None:
d['tags'] = [tmp for tmp in tmp]
return ModeldbAddProjectTags(**d)
| 21.8 | 62 | 0.597859 | [
"Apache-2.0"
] | Atharex/modeldb | client/verta/verta/_swagger/_public/modeldb/model/ModeldbAddProjectTags.py | 654 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='AllFieldsModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('char_field', models.CharField(help_text=b'write something', max_length=500)),
('int_field', models.IntegerField(help_text=b'Put a number, magic number')),
('text_field', models.TextField(help_text=b'Put a large test here')),
('big_integer_field', models.BigIntegerField(help_text=b'An big integer field')),
('binary_field', models.BinaryField(help_text=b'A binary field')),
('date_field', models.DateField(help_text=b'A date field')),
('datetime_field', models.DateTimeField(help_text=b'A datetime field')),
('boolean_field', models.BooleanField(help_text=b'A boolean field')),
('comma_separated_integer_field', models.CommaSeparatedIntegerField(help_text=b'A comma sepparated integer field', max_length=200)),
('decimal_field', models.DecimalField(help_text=b'A decimal field', max_digits=100, decimal_places=10)),
('duration_field', models.DurationField(help_text=b'A duration field')),
('email_field', models.EmailField(help_text=b'A email field', max_length=254)),
('file_field', models.FileField(help_text=b'A file field', upload_to=b'')),
('file_path_field', models.FilePathField(help_text=b'A file path field')),
('float_field', models.FloatField(help_text=b'A float field')),
('generic_ip_addr_field', models.GenericIPAddressField(help_text=b'A generic ip addr field')),
('image_field', models.ImageField(help_text=b'A image field', upload_to=b'')),
('null_boolean_field', models.NullBooleanField(help_text=b'A null boolean field')),
('positive_integer_field', models.PositiveIntegerField(help_text=b'A positive integer')),
('positive_small_integer_field', models.PositiveSmallIntegerField(help_text=b'A positive small integer field')),
('slug_field', models.SlugField(help_text=b'A slug field')),
('small_integer_field', models.SmallIntegerField(help_text=b'A small integer field')),
('time_field', models.TimeField(help_text=b'A time field')),
('url_field', models.URLField(help_text=b'A url field')),
('uuid_field', models.UUIDField(help_text=b'A uuid field')),
],
),
migrations.CreateModel(
name='ForeingModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text=b'write something', max_length=500)),
('age', models.PositiveSmallIntegerField()),
('birthday', models.DateField()),
('foreign_key_field', models.ForeignKey(help_text=b'A foreign_key field', to='second_app.AllFieldsModel')),
],
),
migrations.CreateModel(
name='ManyToManyModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text=b'write something', max_length=500)),
],
),
migrations.AddField(
model_name='allfieldsmodel',
name='many_to_many_field',
field=models.ManyToManyField(help_text=b'A many to many field', to='second_app.ManyToManyModel'),
),
]
| 58.313433 | 148 | 0.622217 | [
"BSD-3-Clause"
] | lumasepa/clean_admin | example/second_app/migrations/0001_initial.py | 3,907 | Python |
import time
import random
import numpy as np
import gym
from rlkit.scripted_experts.scripted_policy import ScriptedPolicy
ACT_MAG = 0.275
ACT_NOISE_SCALE = 0.1
ACT_SLOW_NOISE_SCALE = 0.05
SLOW_DOWN_RADIUS = 0.01
def get_linear_pos_act(cur_pos, reach_pos):
cur_pos = cur_pos.copy()
reach_pos = reach_pos.copy()
move_dir = reach_pos - cur_pos
dist = np.linalg.norm(move_dir, axis=-1)
# if dist > ACT_MAG:
# if dist < ACT_MAG:
# move_dir = move_dir
# else:
move_dir *= (ACT_MAG / dist)
return move_dir
class ScriptedLinearFewShotReachPolicy(ScriptedPolicy):
def __init__(self):
super().__init__()
def reset(self, env):
# # first make the gripper go slightly above the object
self.correct_obj_idx = env.correct_obj_idx
if self.correct_obj_idx == 0:
self.correct_obj_abs_pos = env.sim.data.get_site_xpos('object0')
else:
self.correct_obj_abs_pos = env.sim.data.get_site_xpos('object1')
self.init_grip_pos = env.sim.data.get_site_xpos('robot0:grip')
X_Y_FRAC = np.random.uniform(0.7, 0.8)
Z_FRAC = np.random.uniform(0.2, 0.3)
self.waypoint = np.zeros(3)
self.waypoint[:2] = (self.correct_obj_abs_pos[:2] - self.init_grip_pos[:2]) * X_Y_FRAC
self.waypoint[2] = (self.correct_obj_abs_pos[2] - self.init_grip_pos[2]) * Z_FRAC
self.waypoint += self.init_grip_pos
self.waypoint += np.random.uniform(-0.01, 0.01, 3)
# first go to a way-point
def cond_0(obs):
grip_pos = env.sim.data.get_site_xpos('robot0:grip')
return 0.01 > np.linalg.norm(grip_pos - self.waypoint, axis=-1)
self.milestone_0_cond = cond_0
# now actually go to the object
def cond_1(obs):
grip_pos = env.sim.data.get_site_xpos('robot0:grip')
goal = env.goal
return 0.01 > np.linalg.norm(grip_pos - goal)
self.milestone_1_cond = cond_1
# reset the milestones
self.milestone_0_complete = False
self.milestone_1_complete = False
self.first_time_all_complete = -1
def get_action(self, obs, env, timestep):
# first find out what stage we are in and update milestone info
cur_stage = -1
if not self.milestone_0_complete:
# check if milestone 0 was completed by the last step action
if self.milestone_0_cond(obs):
self.milestone_0_complete = True
cur_stage = 1
else:
cur_stage = 0
else:
if not self.milestone_1_complete:
# check if milestone 1 was completed by the last step action
if self.milestone_1_cond(obs):
self.milestone_1_complete = True
self.first_time_all_complete = timestep
print('solved')
cur_stage = 1
# now perform the action corresponding to the current stage
if cur_stage == 0:
grip_pos = env.sim.data.get_site_xpos('robot0:grip')
action = [0, 0, 0, 0]
pos_act = get_linear_pos_act(grip_pos, self.waypoint)
pos_act += np.random.uniform(0.0, ACT_NOISE_SCALE, 3)
for i in range(len(pos_act)):
action[i] = pos_act[i]
action[len(action)-1] = np.random.uniform(-0.005, -0.015) # close
else:
action = [0, 0, 0, 0]
# print(np.linalg.norm(correct_obj_rel_target, axis=-1))
grip_pos = env.sim.data.get_site_xpos('robot0:grip')
target_rel_pos = env.goal - grip_pos
if np.linalg.norm(target_rel_pos, axis=-1) < SLOW_DOWN_RADIUS:
# pos_act = ACT_MAG*target_rel_pos*10
pos_act = 0.25*get_linear_pos_act(np.zeros(3), target_rel_pos)
pos_act += np.random.uniform(0.0, ACT_SLOW_NOISE_SCALE, 3)
# print(pos_act)
else:
pos_act = get_linear_pos_act(np.zeros(3), target_rel_pos)
pos_act += np.random.uniform(0.0, ACT_NOISE_SCALE, 3)
# pos_act = get_linear_pos_act(np.zeros(3), correct_obj_rel_target)
for i in range(len(pos_act)):
action[i] = pos_act[i]
action[len(action)-1] = np.random.uniform(-0.005, -0.015) # close
action = np.clip(action, -1.0, 1.0)
return action, {}
| 37.495798 | 94 | 0.600628 | [
"MIT"
] | KamyarGh/rl_swiss | rlkit/scripted_experts/linear_few_shot_reach_env_expert.py | 4,462 | Python |
# -*- coding: utf-8 -*-
def calculate_map(gt_path, my_path):
id2videos = dict()
with open(gt_path, 'r') as fin:
lines = fin.readlines()
for line in lines:
terms = line.strip().split(' ')
id2videos[terms[0]] = terms[1:]
id_num = len(lines)
my_id2videos = dict()
with open(my_path, 'r') as fin:
lines = fin.readlines()
assert (len(lines) <= id_num)
for line in lines:
terms = line.strip().split(' ')
tmp_list = []
for video in terms[1:]:
if video not in tmp_list:
tmp_list.append(video)
my_id2videos[terms[0]] = tmp_list
ap_total = 0.
for cid in id2videos:
videos = id2videos[cid]
if cid not in my_id2videos:
continue
my_videos = my_id2videos[cid]
# recall number upper bound
assert (len(my_videos) <= 100)
ap = 0.
ind = 0.
for ind_video, my_video in enumerate(my_videos):
if my_video in videos:
ind += 1
ap += ind / (ind_video + 1)
ap_total += ap / len(videos)
return ap_total / id_num
if __name__ == '__main__':
gt_val_path = '/data/materials/val_gt.txt'
my_val_path = '/data/result/result.txt'
print('mAP: {}'.format(calculate_map(gt_val_path, my_val_path)))
| 30.108696 | 68 | 0.540794 | [
"MIT"
] | LegenDong/IQIYI_VID_FACE_2019 | evaluation_map.py | 1,385 | Python |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=g-bad-import-order,redefined-builtin
"""APIs to train an image classification model.
Task guide:
https://www.tensorflow.org/lite/tutorials/model_maker_image_classification.
"""
from tensorflow_examples.lite.model_maker.core.data_util.image_dataloader import ImageClassifierDataLoader as DataLoader
from tensorflow_examples.lite.model_maker.core.task.image_classifier import create
from tensorflow_examples.lite.model_maker.core.task.image_classifier import ImageClassifier
from tensorflow_examples.lite.model_maker.core.task.model_spec.image_spec import efficientnet_lite0_spec as EfficientNetLite0Spec
from tensorflow_examples.lite.model_maker.core.task.model_spec.image_spec import efficientnet_lite1_spec as EfficientNetLite1Spec
from tensorflow_examples.lite.model_maker.core.task.model_spec.image_spec import efficientnet_lite2_spec as EfficientNetLite2Spec
from tensorflow_examples.lite.model_maker.core.task.model_spec.image_spec import efficientnet_lite3_spec as EfficientNetLite3Spec
from tensorflow_examples.lite.model_maker.core.task.model_spec.image_spec import efficientnet_lite4_spec as EfficientNetLite4Spec
from tensorflow_examples.lite.model_maker.core.task.model_spec.image_spec import ImageModelSpec as ModelSpec
from tensorflow_examples.lite.model_maker.core.task.model_spec.image_spec import mobilenet_v2_spec as MobileNetV2Spec
from tensorflow_examples.lite.model_maker.core.task.model_spec.image_spec import resnet_50_spec as Resnet50Spec
| 65.3125 | 129 | 0.852632 | [
"Apache-2.0"
] | 22SWU/FlowerIdentify | tensorflow_examples/lite/model_maker/public/image_classifier/__init__.py | 2,090 | Python |
from .DTM import DTM | 20 | 20 | 0.8 | [
"MIT"
] | TNAlotaibi/DTM-crypt | src/DTM_crypt/__init__.py | 20 | Python |
import os
import random
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from datasets.data_io import get_transform, read_all_lines, pfm_imread
class PicoStereoDataset(Dataset):
def __init__(self, datapath, list_filename, training):
self.datapath = datapath
self.left_filenames, self.right_filenames, self.disp_filenames = self.load_path(list_filename)
self.training = training
def load_path(self, list_filename):
lines = read_all_lines(list_filename)
splits = [line.split() for line in lines]
left_images = [x[0] for x in splits]
right_images = [x[1] for x in splits]
if len(splits[0]) == 2: # ground truth not available
return left_images, right_images, None
else:
disp_images = [x[2] for x in splits]
return left_images, right_images, disp_images
def load_image(self, filename):
return Image.open(filename).convert('RGB')
def load_disp(self, filename):
data, scale = pfm_imread(filename)
data = np.ascontiguousarray(data, dtype=np.float32)
return data
def __len__(self):
return len(self.left_filenames)
def __getitem__(self, index):
left_img = self.load_image(os.path.join(self.datapath, self.left_filenames[index]))
right_img = self.load_image(os.path.join(self.datapath, self.right_filenames[index]))
if self.disp_filenames: # has disparity ground truth
disparity = self.load_disp(os.path.join(self.datapath, self.disp_filenames[index]))
else:
disparity = None
# to tensor, normalize
processed = get_transform()
left_img = processed(left_img)
right_img = processed(right_img)
return {"left": left_img,
"right": right_img,
# "disparity": disparity,
"top_pad": 0,
"right_pad": 0,
"left_filename": self.left_filenames[index]}
class SceneFlowDataset(Dataset):
def __init__(self, datapath, list_filename, training):
self.datapath = datapath
self.left_filenames, self.right_filenames, self.disp_filenames = self.load_path(list_filename)
self.training = training
def load_path(self, list_filename):
lines = read_all_lines(list_filename)
splits = [line.split() for line in lines]
left_images = [x[0] for x in splits]
right_images = [x[1] for x in splits]
disp_images = [x[2] for x in splits]
return left_images, right_images, disp_images
def load_image(self, filename):
return Image.open(filename).convert('RGB')
def load_disp(self, filename):
data, scale = pfm_imread(filename)
data = np.ascontiguousarray(data, dtype=np.float32)
return data
def __len__(self):
return len(self.left_filenames)
def __getitem__(self, index):
left_img = self.load_image(os.path.join(self.datapath, self.left_filenames[index]))
right_img = self.load_image(os.path.join(self.datapath, self.right_filenames[index]))
disparity = self.load_disp(os.path.join(self.datapath, self.disp_filenames[index]))
if self.training:
w, h = left_img.size
crop_w, crop_h = 512, 256
x1 = random.randint(0, w - crop_w)
y1 = random.randint(0, h - crop_h)
# random crop
left_img = left_img.crop((x1, y1, x1 + crop_w, y1 + crop_h))
right_img = right_img.crop((x1, y1, x1 + crop_w, y1 + crop_h))
disparity = disparity[y1:y1 + crop_h, x1:x1 + crop_w]
# to tensor, normalize
processed = get_transform()
left_img = processed(left_img)
right_img = processed(right_img)
return {"left": left_img,
"right": right_img,
"disparity": disparity}
else:
w, h = left_img.size
crop_w, crop_h = 960, 512
left_img = left_img.crop((w - crop_w, h - crop_h, w, h))
right_img = right_img.crop((w - crop_w, h - crop_h, w, h))
disparity = disparity[h - crop_h:h, w - crop_w: w]
processed = get_transform()
left_img = processed(left_img)
right_img = processed(right_img)
return {"left": left_img,
"right": right_img,
"disparity": disparity,
"top_pad": 0,
"right_pad": 0,
"left_filename": self.left_filenames[index]}
class KITTIDataset(Dataset):
def __init__(self, datapath, list_filename, training):
self.datapath = datapath
self.left_filenames, self.right_filenames, self.disp_filenames = self.load_path(list_filename)
self.training = training
if self.training:
assert self.disp_filenames is not None
def load_path(self, list_filename):
lines = read_all_lines(list_filename)
splits = [line.split() for line in lines]
left_images = [x[0] for x in splits]
right_images = [x[1] for x in splits]
if len(splits[0]) == 2: # ground truth not available
return left_images, right_images, None
else:
disp_images = [x[2] for x in splits]
return left_images, right_images, disp_images
def load_image(self, filename):
return Image.open(filename).convert('RGB')
def load_disp(self, filename):
data = Image.open(filename)
data = np.array(data, dtype=np.float32) / 256.
return data
def __len__(self):
return len(self.left_filenames)
def __getitem__(self, index):
left_img = self.load_image(os.path.join(self.datapath, self.left_filenames[index]))
right_img = self.load_image(os.path.join(self.datapath, self.right_filenames[index]))
if self.disp_filenames: # has disparity ground truth
disparity = self.load_disp(os.path.join(self.datapath, self.disp_filenames[index]))
else:
disparity = None
if self.training:
w, h = left_img.size
crop_w, crop_h = 512, 256
x1 = random.randint(0, w - crop_w)
y1 = random.randint(0, h - crop_h)
# random crop
left_img = left_img.crop((x1, y1, x1 + crop_w, y1 + crop_h))
right_img = right_img.crop((x1, y1, x1 + crop_w, y1 + crop_h))
disparity = disparity[y1:y1 + crop_h, x1:x1 + crop_w]
# to tensor, normalize
processed = get_transform()
left_img = processed(left_img)
right_img = processed(right_img)
return {"left": left_img,
"right": right_img,
"disparity": disparity}
else:
w, h = left_img.size
# normalize
processed = get_transform()
left_img = processed(left_img).numpy()
right_img = processed(right_img).numpy()
# pad to size 1248x384
top_pad = 384 - h
right_pad = 1248 - w
assert top_pad > 0 and right_pad > 0
# pad images
left_img = np.lib.pad(left_img, ((0, 0), (top_pad, 0), (0, right_pad)), mode='constant', constant_values=0)
right_img = np.lib.pad(right_img, ((0, 0), (top_pad, 0), (0, right_pad)), mode='constant',
constant_values=0)
# pad disparity gt
if disparity is not None:
assert len(disparity.shape) == 2
disparity = np.lib.pad(disparity, ((top_pad, 0), (0, right_pad)), mode='constant', constant_values=0)
if disparity is not None:
return {"left": left_img,
"right": right_img,
"disparity": disparity,
"top_pad": top_pad,
"right_pad": right_pad,
"left_filename": self.left_filenames[index]}
else:
return {"left": left_img,
"right": right_img,
"top_pad": top_pad,
"right_pad": right_pad,
"left_filename": self.left_filenames[index],
"right_filename": self.right_filenames[index]}
class DrivingStereoDataset(Dataset):
def __init__(self, datapath, list_filename, training):
self.datapath = datapath
self.left_filenames, self.right_filenames, self.disp_filenames = self.load_path(list_filename)
self.training = training
def load_path(self, list_filename):
lines = read_all_lines(list_filename)
splits = [line.split() for line in lines]
left_images = [x[0] for x in splits]
right_images = [x[1] for x in splits]
disp_images = [x[2] for x in splits]
return left_images, right_images, disp_images
def load_image(self, filename):
return Image.open(filename).convert('RGB')
def load_disp(self, filename):
data = Image.open(filename)
data = np.array(data, dtype=np.float32) / 256.
return data
def __len__(self):
return len(self.left_filenames)
def __getitem__(self, index):
left_img = self.load_image(os.path.join(self.datapath, self.left_filenames[index]))
right_img = self.load_image(os.path.join(self.datapath, self.right_filenames[index]))
disparity = self.load_disp(os.path.join(self.datapath, self.disp_filenames[index]))
if self.training:
w, h = left_img.size # (881, 400)
crop_w, crop_h = 512, 256
x1 = random.randint(0, w - crop_w)
y1 = random.randint(0, h - crop_h)
# random crop
left_img = left_img.crop((x1, y1, x1 + crop_w, y1 + crop_h))
right_img = right_img.crop((x1, y1, x1 + crop_w, y1 + crop_h))
disparity = disparity[y1:y1 + crop_h, x1:x1 + crop_w]
# to tensor, normalize
processed = get_transform()
left_img = processed(left_img)
right_img = processed(right_img)
return {"left": left_img,
"right": right_img,
"disparity": disparity}
else:
w, h = left_img.size
crop_w, crop_h = 880, 400
left_img = left_img.crop((w - crop_w, h - crop_h, w, h))
right_img = right_img.crop((w - crop_w, h - crop_h, w, h))
disparity = disparity[h - crop_h:h, w - crop_w: w]
processed = get_transform()
left_img = processed(left_img)
right_img = processed(right_img)
return {"left": left_img,
"right": right_img,
"disparity": disparity,
"top_pad": 0,
"right_pad": 0,
"left_filename": self.left_filenames[index]}
| 37.740614 | 119 | 0.579942 | [
"Apache-2.0"
] | skylook/mobilestereonet | datasets/dataset.py | 11,058 | Python |
from unittest.mock import MagicMock
import pytest
from click.testing import CliRunner
from prefect.cli.register import register
def test_register_init():
runner = CliRunner()
result = runner.invoke(register)
assert result.exit_code == 0
assert "Register flows" in result.output
def test_register_help():
runner = CliRunner()
result = runner.invoke(register, ["--help"])
assert result.exit_code == 0
assert "Register flows" in result.output
def test_register_flow_help():
runner = CliRunner()
result = runner.invoke(register, ["flow", "--help"])
assert result.exit_code == 0
assert "Register a flow" in result.output
@pytest.mark.parametrize("labels", [[], ["b", "c"]])
@pytest.mark.parametrize("kind", ["run_config", "environment"])
def test_register_flow_call(monkeypatch, tmpdir, kind, labels):
client = MagicMock()
monkeypatch.setattr("prefect.Client", MagicMock(return_value=client))
if kind == "environment":
contents = (
"from prefect import Flow\n"
"from prefect.environments.execution import LocalEnvironment\n"
"from prefect.environments.storage import Local\n"
"f = Flow('test-flow', environment=LocalEnvironment(labels=['a']),\n"
" storage=Local(add_default_labels=False))"
)
else:
contents = (
"from prefect import Flow\n"
"from prefect.run_configs import KubernetesRun\n"
"from prefect.environments.storage import Local\n"
"f = Flow('test-flow', run_config=KubernetesRun(labels=['a']),\n"
" storage=Local(add_default_labels=False))"
)
full_path = str(tmpdir.join("flow.py"))
with open(full_path, "w") as f:
f.write(contents)
args = ["flow", "--file", full_path, "--name", "test-flow", "--project", "project"]
for l in labels:
args.extend(["-l", l])
runner = CliRunner()
result = runner.invoke(register, args)
assert client.register.called
assert client.register.call_args[1]["project_name"] == "project"
# Check additional labels are set if specified
flow = client.register.call_args[1]["flow"]
if kind == "run_config":
assert flow.run_config.labels == {"a", *labels}
else:
assert flow.environment.labels == {"a", *labels}
assert result.exit_code == 0
| 32.22973 | 87 | 0.644025 | [
"Apache-2.0"
] | BluePoof/prefect | tests/cli/test_register.py | 2,385 | Python |
# Copyright (c) 2017-2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
This module contains pretty-print/formatting utilities.
"""
from dataclasses import dataclass
from typing import Optional
@dataclass(frozen=True)
class PrettyOptions:
"""
Display options for pretty-printing DAML ASTs.
Instance attributes:
.. attribute:: PrettyOptions.column_width
The maximum number of columns to use when rendering text, or ``None`` if lines should not
wrap.
.. attribute:: PrettyOptions.show_hidden_types
``True`` to render built-in DAML types defined in ``DA.Internal`` or ``GHC`` and specially
generated names.
.. attribute:: PrettyOptions.format
A string that identifies the target language to render.
"""
column_width: Optional[int] = None
show_hidden_types: bool = False
format: str = "daml"
| 25.756757 | 102 | 0.701994 | [
"Apache-2.0"
] | digital-asset/dazl-client | python/dazl/pretty/options.py | 953 | Python |
import logging
import numpy as np
import trimesh
from src.common import compute_iou
# from scipy.spatial import cKDTree
from src.utils.libkdtree import KDTree
from src.utils.libmesh import check_mesh_contains
# Maximum values for bounding box [-0.5, 0.5]^3
EMPTY_PCL_DICT = {
'completeness': np.sqrt(3),
'accuracy': np.sqrt(3),
'completeness2': 3,
'accuracy2': 3,
'chamfer': 6,
}
EMPTY_PCL_DICT_NORMALS = {
'normals completeness': -1.,
'normals accuracy': -1.,
'normals': -1.,
}
logger = logging.getLogger(__name__)
class MeshEvaluator(object):
""" Mesh evaluation class.
It handles the mesh evaluation process.
Args:
n_points (int): number of points to be used for evaluation
"""
def __init__(self, n_points=100000):
self.n_points = n_points
def eval_mesh(self,
mesh,
pointcloud_tgt,
normals_tgt,
points_iou,
occ_tgt,
remove_wall=False):
""" Evaluates a mesh.
Args:
mesh (trimesh): mesh which should be evaluated
pointcloud_tgt (numpy array): target point cloud
normals_tgt (numpy array): target normals
points_iou (numpy_array): points tensor for IoU evaluation
occ_tgt (numpy_array): GT occupancy values for IoU points
"""
if len(mesh.vertices) != 0 and len(mesh.faces) != 0:
if remove_wall: # ! Remove walls and floors
pointcloud, idx = mesh.sample(2 * self.n_points, return_index=True)
eps = 0.007
x_max, x_min = pointcloud_tgt[:, 0].max(), pointcloud_tgt[:, 0].min()
y_max, y_min = pointcloud_tgt[:, 1].max(), pointcloud_tgt[:, 1].min()
z_max, z_min = pointcloud_tgt[:, 2].max(), pointcloud_tgt[:, 2].min()
# add small offsets
x_max, x_min = x_max + eps, x_min - eps
y_max, y_min = y_max + eps, y_min - eps
z_max, z_min = z_max + eps, z_min - eps
mask_x = (pointcloud[:, 0] <= x_max) & (pointcloud[:, 0] >= x_min)
mask_y = (pointcloud[:, 1] >= y_min) # floor
mask_z = (pointcloud[:, 2] <= z_max) & (pointcloud[:, 2] >= z_min)
mask = mask_x & mask_y & mask_z
pointcloud_new = pointcloud[mask]
# Subsample
idx_new = np.random.randint(pointcloud_new.shape[0], size=self.n_points)
pointcloud = pointcloud_new[idx_new]
idx = idx[mask][idx_new]
else:
pointcloud, idx = mesh.sample(self.n_points, return_index=True)
pointcloud = pointcloud.astype(np.float32)
normals = mesh.face_normals[idx]
else:
pointcloud = np.empty((0, 3))
normals = np.empty((0, 3))
out_dict = self.eval_pointcloud(pointcloud, pointcloud_tgt, normals, normals_tgt)
if len(mesh.vertices) != 0 and len(mesh.faces) != 0:
occ = check_mesh_contains(mesh, points_iou)
if occ_tgt.min() < 0:
occ_tgt = (occ_tgt <= 0).astype(np.float32)
out_dict['iou'] = compute_iou(occ, occ_tgt)
else:
out_dict['iou'] = 0.
return out_dict
@staticmethod
def eval_pointcloud(pointcloud,
pointcloud_tgt,
normals=None,
normals_tgt=None,
thresholds=np.linspace(1. / 1000, 1, 1000)):
""" Evaluates a point cloud.
Args:
pointcloud (numpy array): predicted point cloud
pointcloud_tgt (numpy array): target point cloud
normals (numpy array): predicted normals
normals_tgt (numpy array): target normals
thresholds (numpy array): threshold values for the F-score calculation
"""
# Return maximum losses if pointcloud is empty
if pointcloud.shape[0] == 0:
logger.warning('Empty pointcloud / mesh detected!')
out_dict = EMPTY_PCL_DICT.copy()
if normals is not None and normals_tgt is not None:
out_dict.update(EMPTY_PCL_DICT_NORMALS)
return out_dict
pointcloud = np.asarray(pointcloud)
pointcloud_tgt = np.asarray(pointcloud_tgt)
# Completeness: how far are the points of the target point cloud from the predicted point cloud
completeness, completeness_normals = distance_p2p(pointcloud_tgt, normals_tgt, pointcloud, normals)
recall = get_threshold_percentage(completeness, thresholds)
completeness2 = completeness ** 2
completeness = completeness.mean()
completeness2 = completeness2.mean()
completeness_normals = completeness_normals.mean()
# Accuracy: how far are the points of the predicted pointcloud from the target pointcloud
accuracy, accuracy_normals = distance_p2p(pointcloud, normals, pointcloud_tgt, normals_tgt)
precision = get_threshold_percentage(accuracy, thresholds)
accuracy2 = accuracy ** 2
accuracy = accuracy.mean()
accuracy2 = accuracy2.mean()
accuracy_normals = accuracy_normals.mean()
# Chamfer distance
chamferL2 = 0.5 * (completeness2 + accuracy2)
normals_correctness = (0.5 * completeness_normals + 0.5 * accuracy_normals)
chamferL1 = 0.5 * (completeness + accuracy)
# F-Score
F = [2 * precision[i] * recall[i] / (precision[i] + recall[i]) for i in range(len(precision))]
out_dict = {
'completeness': completeness,
'accuracy': accuracy,
'normals completeness': completeness_normals,
'normals accuracy': accuracy_normals,
'normals': normals_correctness,
'completeness2': completeness2,
'accuracy2': accuracy2,
'chamfer-L2': chamferL2,
'chamfer-L1': chamferL1,
'f-score': F[9], # threshold = 1.0%
'f-score-15': F[14], # threshold = 1.5%
'f-score-20': F[19], # threshold = 2.0%
}
return out_dict
def distance_p2p(points_src, normals_src, points_tgt, normals_tgt):
""" Computes minimal distances of each point in points_src to points_tgt.
Args:
points_src (numpy array): source points
normals_src (numpy array): source normals
points_tgt (numpy array): target points
normals_tgt (numpy array): target normals
"""
kdtree = KDTree(points_tgt)
dist, idx = kdtree.query(points_src)
if normals_src is not None and normals_tgt is not None:
normals_src = normals_src / np.linalg.norm(normals_src, axis=-1, keepdims=True)
normals_tgt = normals_tgt / np.linalg.norm(normals_tgt, axis=-1, keepdims=True)
normals_dot_product = (normals_tgt[idx] * normals_src).sum(axis=-1)
# Handle normals that point into wrong direction gracefully (mostly due to method not caring about this in generation)
normals_dot_product = np.abs(normals_dot_product)
else:
normals_dot_product = np.array([np.nan] * points_src.shape[0], dtype=np.float32)
return dist, normals_dot_product
def distance_p2m(points, mesh):
""" Compute minimal distances of each point in points to mesh.
Args:
points (numpy array): points array
mesh (trimesh): mesh
"""
_, dist, _ = trimesh.proximity.closest_point(mesh, points)
return dist
def get_threshold_percentage(dist, thresholds):
""" Evaluates a point cloud.
Args:
dist (numpy array): calculated distance
thresholds (numpy array): threshold values for the F-score calculation
"""
in_threshold = [(dist <= t).mean() for t in thresholds]
return in_threshold
| 36.293578 | 126 | 0.605536 | [
"MIT"
] | hummat/convolutional_occupancy_networks | src/eval.py | 7,912 | Python |
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument, SetEnvironmentVariable
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
from nav2_common.launch import RewrittenYaml
def generate_launch_description():
# Get the launch directory
bringup_dir = get_package_share_directory('kohm_gazebo')
namespace = LaunchConfiguration('namespace')
use_sim_time = LaunchConfiguration('use_sim_time', default=True)
autostart = LaunchConfiguration('autostart')
params_file = LaunchConfiguration('config')
default_bt_xml_filename = LaunchConfiguration('default_bt_xml_filename')
map_subscribe_transient_local = LaunchConfiguration(
'map_subscribe_transient_local')
lifecycle_nodes = [
'controller_server', 'planner_server', 'recoveries_server',
'bt_navigator', 'waypoint_follower'
]
remappings = [('/tf', 'tf'), ('/tf_static', 'tf_static'), ('/cmd_vel', '/nav_vel'), ('/odom', '/kohm/odom')]
param_substitutions = {
'use_sim_time': use_sim_time,
'default_bt_xml_filename': default_bt_xml_filename,
'autostart': autostart,
'map_subscribe_transient_local': map_subscribe_transient_local
}
configured_params = RewrittenYaml(source_file=params_file,
root_key=namespace,
param_rewrites=param_substitutions,
convert_types=True)
return LaunchDescription([
# Set env var to print messages to stdout immediately
SetEnvironmentVariable('RCUTILS_LOGGING_BUFFERED_STREAM', '1'),
DeclareLaunchArgument('namespace',
default_value='',
description='Top-level namespace'),
DeclareLaunchArgument(
'use_sim_time',
default_value='false',
description='Use simulation (Gazebo) clock if true'),
DeclareLaunchArgument(
'autostart',
default_value='true',
description='Automatically startup the nav2 stack'),
DeclareLaunchArgument(
'config',
default_value=os.path.join(bringup_dir, 'config/navigation',
'nav2_params.yaml'),
description='Full path to the ROS2 parameters file to use'),
DeclareLaunchArgument(
'default_bt_xml_filename',
default_value=os.path.join(
get_package_share_directory('nav2_bt_navigator'),
'behavior_trees', 'navigate_w_replanning_and_recovery.xml'),
description='Full path to the behavior tree xml file to use'),
DeclareLaunchArgument(
'map_subscribe_transient_local',
default_value='false',
description='Whether to set the map subscriber QoS to transient local'),
Node(package='nav2_controller',
executable='controller_server',
output='screen',
parameters=[configured_params],
remappings=remappings),
Node(package='nav2_planner',
executable='planner_server',
name='planner_server',
output='screen',
parameters=[configured_params],
remappings=remappings),
Node(package='nav2_recoveries',
executable='recoveries_server',
name='recoveries_server',
output='screen',
parameters=[configured_params],
remappings=remappings),
Node(package='nav2_bt_navigator',
executable='bt_navigator',
name='bt_navigator',
output='screen',
parameters=[configured_params],
remappings=remappings),
Node(package='nav2_waypoint_follower',
executable='waypoint_follower',
name='waypoint_follower',
output='screen',
parameters=[configured_params],
remappings=remappings),
Node(package='nav2_lifecycle_manager',
executable='lifecycle_manager',
name='lifecycle_manager_navigation',
output='screen',
parameters=[{
'use_sim_time': use_sim_time
}, {
'autostart': autostart
}, {
'node_names': lifecycle_nodes
}]),
])
| 40.330709 | 112 | 0.632175 | [
"MIT"
] | iscumd/KiloOhm | kohm_gazebo/launch/include/navigation/nav2/nav.launch.py | 5,122 | Python |
import json
json_settings = json.dumps([
{
"type": "numeric",
"title": "Lower Limit",
"desc": "Lowest number to be used when asking questions",
"section": "General",
"key": "lower_num"
},
{
"type": "numeric",
"title": "Upper Limit",
"desc": "Highest number to be used when asking questions",
"section": "General",
"key": "upper_num"
}
])
| 22.894737 | 66 | 0.512644 | [
"MIT"
] | gopar/Kivy-Tutor | json_settings.py | 435 | Python |
from .mlp_q_function import MLPQFunction
from .min_q_function import MinQFunction
from .zero_q_function import ZeroQFunction
__all__ = ['MinQFunction', 'MLPQFunction', 'ZeroQFunction']
| 31 | 59 | 0.827957 | [
"MIT"
] | roosephu/boots | boots/q_function/__init__.py | 186 | Python |
from setuptools import setup
# def readme():
# with open('README.md') as f:
# retun f.read()
setup(
name = 'cypher',
version = '0.2',
author = 'shashi',
author_email = '[email protected]',
description = 'Password Encryptor by suggesting wheather a password is strong or not',
#long_description = readme(),
long_description_content_type = 'text/markdown',
url = "https://github.com/walkershashi/Cypher",
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
lisence = 'MIT',
packages = ['cypher'],
) | 28.25 | 90 | 0.610619 | [
"MIT"
] | walkershashi/Cypher | setup.py | 678 | Python |
# Generated by Django 3.0.8 on 2020-07-01 19:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='default.jpg', upload_to='profile_pics/')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 29.923077 | 121 | 0.645244 | [
"MIT"
] | arthtyagi/Geddit | users/migrations/0001_initial.py | 778 | Python |
"""Source URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
# from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = [
path('admin/', admin.site.urls),
path("", include("ColorDetection.urls")),
]
# urlpatterns += staticfiles_urlpatterns()
| 35.269231 | 77 | 0.720829 | [
"Apache-2.0"
] | normalclone/color_detection | Source/urls.py | 917 | Python |
# Lab 2 Linear Regression
import tensorflow as tf
tf.set_random_seed(777) # seed 설정
# training data
x_train = [1, 2, 3]
y_train = [1, 2, 3]
# regerssion 결과는 W = 1, b = 0 이라는 것을 알 수 있음
# but tensorflow로 training 시켜서 해보기!!
# W와 b는 어떻게 달라질까?
# tf.Variable() : tensorflow가 사용하는 변수(trainable variable)
# tf.random_normal([1]) : normal dist에서 1개의 난수 생성
W = tf.Variable(tf.random_normal([1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
# Linear regression model
hypothesis = x_train * W + b
# cost/loss function (MSE)
# tf.square() : 제곱해주는 tf 함수
# tf.reduce_mean() : mean 구해주는 tf 함수
# hypothesis(y_hat), y_train(true value)
cost = tf.reduce_mean(tf.square(hypothesis - y_train))
# GradientDescent
# Minimize
# learning rate=0.01로 training 시킴 => gradient descent로 인해 조금씩 true에 가까워짐
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train = optimizer.minimize(cost)
# session 객체 생성(tf graph 객체 생성)
sess = tf.Session()
# 모든 tf variavle 초기화
sess.run(tf.global_variables_initializer())
# Fit
# 2001번 최적화 시킴!!!
for step in range(2001):
sess.run(train)
if step % 20 == 0: # 다 뽑으면 너무 많으니까 몇개만 뽑기 위해서
# step(몇 번째인지?), cost(mse), W(weight), b(bias)
print(step, sess.run(cost), sess.run(W), sess.run(b))
# Learns best fit W:[ 1.], b:[ 0.]
'''
0 2.82329 [ 2.12867713] [-0.85235667]
20 0.190351 [ 1.53392804] [-1.05059612]
40 0.151357 [ 1.45725465] [-1.02391243]
...
1920 1.77484e-05 [ 1.00489295] [-0.01112291]
1940 1.61197e-05 [ 1.00466311] [-0.01060018]
1960 1.46397e-05 [ 1.004444] [-0.01010205]
1980 1.32962e-05 [ 1.00423515] [-0.00962736]
2000 1.20761e-05 [ 1.00403607] [-0.00917497]
''' | 27.983051 | 72 | 0.677771 | [
"MIT"
] | statKim/TIL | Python/tensorflow/DeepLearningZeroToAll/ver.py/Lab02-1-linear_regression.py | 1,879 | Python |
import torch.nn as nn
from .registry import Registry
COMPRESSION_MODULES = Registry('compression modules')
class ProxyModule:
def __init__(self, module):
self._module = module
def __getattr__(self, name):
return getattr(self._module, name)
class _NNCFModuleMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
_NNCFModuleMixin.add_mixin_fields(self)
@staticmethod
def add_mixin_fields(obj):
obj.pre_ops = nn.ModuleDict()
obj.post_ops = nn.ModuleDict()
def get_pre_op(self, key):
return self.pre_ops[key]
def get_post_op(self, key):
return self.post_ops[key]
def register_pre_forward_operation(self, op):
key = str(len(self.pre_ops))
self.pre_ops[key] = op
return key
def remove_pre_forward_operation(self, key):
return self.pre_ops.pop(key)
def register_post_forward_operation(self, op):
key = str(len(self.post_ops))
self.post_ops[key] = op
return key
def remove_post_forward_operation(self, key):
return self.post_ops.pop(key)
def forward(self, *args):
proxy_module = ProxyModule(self)
for op in self.pre_ops.values():
op_args = op(proxy_module, args)
if op_args is not None:
if not isinstance(op_args, tuple):
op_args = tuple([op_args])
args = op_args
results = super().forward.__func__(proxy_module, *args)
for op in self.post_ops.values():
op_results = op(proxy_module, results)
if op_results is not None:
results = op_results
return results
| 27.725806 | 63 | 0.623618 | [
"Apache-2.0"
] | AbraInsight/nncf_pytorch | nncf/layer_utils.py | 1,719 | Python |
"""
"""
import unittest
from unittest.mock import Mock, patch
from wheezy.core import __version__, httpclient
from wheezy.core.gzip import compress
class HTTPClientTestCase(unittest.TestCase):
def setUp(self):
self.patcher = patch.object(httpclient, "HTTPConnection")
self.mock_c_class = self.patcher.start()
self.headers = [("date", "Sat, 12 Oct 2013 18:29:13 GMT")]
self.mock_response = Mock()
self.mock_response.getheaders.return_value = self.headers
self.mock_response.read.return_value = "".encode("utf-8")
self.mock_c = Mock()
self.mock_c.getresponse.return_value = self.mock_response
self.mock_c_class.return_value = self.mock_c
self.client = httpclient.HTTPClient(
"http://localhost:8080/api/v1/",
headers={"User-Agent": "wheezy/%s" % __version__},
)
def tearDown(self):
self.patcher.stop()
def test_init(self):
self.mock_c_class.assert_called_once_with("localhost:8080")
assert "/api/v1/" == self.client.path
assert {} == self.client.cookies
assert self.client.headers is None
def test_get(self):
self.mock_response.status = 200
assert 200 == self.client.get("auth/token")
assert self.mock_c.connect.called
assert self.mock_c.request.called
method, path, body, headers = self.mock_c.request.call_args[0]
assert "GET" == method
assert "/api/v1/auth/token" == path
assert "" == body
assert self.client.default_headers == headers
assert "gzip" == headers["Accept-Encoding"]
assert "close" == headers["Connection"]
assert 3 == len(headers)
def test_ajax_get(self):
self.client.ajax_get("auth/token")
method, path, body, headers = self.mock_c.request.call_args[0]
assert "XMLHttpRequest" == headers["X-Requested-With"]
def test_get_query(self):
self.client.get("auth/token", params={"a": ["1"]})
method, path, body, headers = self.mock_c.request.call_args[0]
assert "/api/v1/auth/token?a=1" == path
def test_head(self):
self.client.head("auth/token")
method, path, body, headers = self.mock_c.request.call_args[0]
assert "HEAD" == method
def test_post(self):
self.client.post(
"auth/token",
params={
"a": ["1"],
},
)
method, path, body, headers = self.mock_c.request.call_args[0]
assert "POST" == method
assert "/api/v1/auth/token" == path
assert "a=1" == body
assert "application/x-www-form-urlencoded" == headers["Content-Type"]
def test_ajax_post(self):
self.client.ajax_post("auth/token", params={"a": ["1"]})
assert self.mock_c.request.called
method, path, body, headers = self.mock_c.request.call_args[0]
assert "XMLHttpRequest" == headers["X-Requested-With"]
def test_post_content(self):
self.client.ajax_post(
"auth/token", content_type="application/json", body='{"a":1}'
)
assert self.mock_c.request.called
method, path, body, headers = self.mock_c.request.call_args[0]
assert "application/json" == headers["Content-Type"]
assert '{"a":1}' == body
def test_follow(self):
self.mock_response.status = 303
self.headers.append(("location", "http://localhost:8080/error/401"))
assert 303 == self.client.get("auth/token")
self.client.follow()
method, path, body, headers = self.mock_c.request.call_args[0]
assert "GET" == method
assert "/error/401" == path
def test_cookies(self):
self.headers.append(("set-cookie", "_x=1; path=/; httponly"))
self.client.get("auth/token")
assert self.client.cookies
assert "1" == self.client.cookies["_x"]
self.headers.append(("set-cookie", "_x=; path=/; httponly"))
self.client.get("auth/token")
assert not self.client.cookies
def test_assert_json(self):
"""Expecting json response but content type is not valid."""
self.headers.append(("content-type", "text/html; charset=UTF-8"))
self.client.get("auth/token")
self.assertRaises(AssertionError, lambda: self.client.json)
def test_json(self):
"""json response."""
patcher = patch.object(httpclient, "json_loads")
mock_json_loads = patcher.start()
mock_json_loads.return_value = {}
self.headers.append(
("content-type", "application/json; charset=UTF-8")
)
self.mock_response.read.return_value = "{}".encode("utf-8")
self.client.get("auth/token")
assert {} == self.client.json
patcher.stop()
def test_gzip(self):
"""Ensure gzip decompression."""
self.headers.append(("content-encoding", "gzip"))
self.mock_response.read.return_value = compress("test".encode("utf-8"))
self.client.get("auth/token")
assert "test" == self.client.content
def test_etag(self):
"""ETag processing."""
self.headers.append(("etag", '"ca231fbc"'))
self.client.get("auth/token")
method, path, body, headers = self.mock_c.request.call_args[0]
assert "If-None-Match" not in headers
assert '"ca231fbc"' == self.client.etags["/api/v1/auth/token"]
self.client.get("auth/token")
method, path, body, headers = self.mock_c.request.call_args[0]
assert '"ca231fbc"' == headers["If-None-Match"]
| 37.844595 | 79 | 0.612926 | [
"MIT"
] | akornatskyy/wheezy.core | src/wheezy/core/tests/test_httpclient.py | 5,601 | Python |
# Generic CNN classifier that uses a geojson file and gbdx imagery to classify chips
import numpy as np
import os, random
import json, geojson
from mltools import geojson_tools as gt
from mltools.data_extractors import get_data_from_polygon_list as get_chips
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.models import Sequential, model_from_json
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.callbacks import ModelCheckpoint
from keras.optimizers import SGD
class PoolNet(object):
'''
Convolutional Neural Network model to classify chips as pool/no pool
INPUT classes (list [str]): Classes to train model on, exactly as they appear in
the properties of any geojsons used for training. Defaults to pool
classes: ['No swimming pool', 'Swimming pool'].
batch_size (int): Amount of images to use for each batch during training.
Defaults to 32.
input_shape (tuple[int]): Shape of input chips with theano dimensional
ordering (n_channels, height, width). Height and width must be equal. If
an old model is loaded (old_model_name is not None), input shape will be
automatically set from the architecture and does not need to be specified.
Defaults to (3,125,125).
old_model_name (str): Name of previous model to load (not including file
extension). There should be a json architecture file and HDF5 ('.h5')
weights file in the working directory under this name. If None, a new
model will be compiled for training. Defaults to None.
learning_rate (float): Learning rate for the first round of training. Defualts
to 0.001
small_model (bool): Use a model with nine layers instead of 16. Will train
faster but may be less accurate and cannot be used with large chips.
Defaults to False.
kernel_size (int): Size (in pixels) of the kernels to use at each
convolutional layer of the network. Defaults to 3 (standard for VGGNet).
'''
def __init__(self, classes=['No swimming pool', 'Swimming pool'], batch_size=32,
input_shape=(3, 125, 125), small_model=False, model_name=None,
learning_rate = 0.001, kernel_size=3):
self.nb_classes = len(classes)
self.classes = classes
self.batch_size = batch_size
self.small_model = small_model
self.input_shape = input_shape
self.lr = learning_rate
self.kernel_size = kernel_size
self.cls_dict = {classes[i]: i for i in xrange(len(self.classes))}
if model_name:
self.model_name = model_name
self.model = self._load_model_architecture(model_name)
self.model.load_weights(model_name + '.h5')
self.input_shape = self.model.input_shape
elif self.small_model:
self.model = self._small_model()
else:
self.model = self._VGG_16()
self.model_layer_names = [self.model.layers[i].get_config()['name']
for i in range(len(self.model.layers))]
def _VGG_16(self):
'''
Implementation of VGG 16-layer net.
'''
print 'Compiling VGG Net...'
model = Sequential()
model.add(ZeroPadding2D((1,1), input_shape=self.input_shape))
model.add(Convolution2D(64, self.kernel_size, self.kernel_size,activation='relu',
input_shape=self.input_shape))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(self.nb_classes, activation='softmax'))
sgd = SGD(lr=self.lr, decay=0.01, momentum=0.9, nesterov=True)
model.compile(optimizer = 'sgd', loss = 'categorical_crossentropy')
return model
def _small_model(self):
'''
Alternative model architecture with fewer layers for computationally expensive
training datasets
'''
print 'Compiling Small Net...'
model = Sequential()
model.add(ZeroPadding2D((1,1), input_shape=self.input_shape))
model.add(Convolution2D(64, self.kernel_size, self.kernel_size,activation='relu',
input_shape=self.input_shape))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(self.nb_classes, activation='softmax'))
sgd = SGD(lr=self.lr, decay=0.01, momentum=0.9, nesterov=True)
model.compile(optimizer = 'sgd', loss = 'categorical_crossentropy')
return model
def _load_model_architecture(self, model_name):
'''
Load a model arcitecture from a json file
INPUT model_name (str): Name of model to load
OUTPUT Loaded model architecture
'''
print 'Loading model {}'.format(self.model_name)
#load model
with open(model_name + '.json') as f:
mod = model_from_json(json.load(f))
return mod
def save_model(self, model_name):
'''
Saves model architecture as a json file and current weigts as h5df file
INPUT model_name (str): Name inder which to save the architecture and weights.
This should not include the file extension.
'''
# Save architecture
arch, arch_json = '{}.json'.format(model_name), self.model.to_json()
with open(arch, 'w') as f:
json.dump(arch_json, f)
# Save weights
weights = '{}.h5'.format(model_name)
self.model.save_weights(weights)
def fit_from_geojson(self, train_geojson, max_side_dim=None, min_side_dim=0,
chips_per_batch=5000, train_size=10000, validation_split=0.1,
bit_depth=8, save_model=None, nb_epoch=10,
shuffle_btwn_epochs=True, return_history=False,
save_all_weights=True, retrain=False, learning_rate_2=0.01):
'''
Fit a model from a geojson file with training data. This method iteratively
yields large batches of chips to train on for each epoch. Please ensure that
your current working directory contains all imagery referenced in the
image_id property in train_geojson, and are named as follows: <image_id>.tif,
where image_id is the catalog id of the image.
INPUT train_geojson (string): Filename for the training data (must be a
geojson). The geojson must be filtered such that all polygons are of
valid size (as defined by max_side_dim and min_side_dim)
max_side_dim (int): Maximum acceptable side dimension (in pixels) for a
chip. If None, defaults to input_shape[-1]. If larger than the
input shape the chips extracted will be downsampled to match the
input shape. Defaults to None.
min_side_dim (int): Minimum acceptable side dimension (in pixels) for a
chip. Defaults to 0.
chips_per_batch (int): Number of chips to yield per batch. Must be small
enough to fit into memory. Defaults to 5000 (decrease for larger
input sizes).
train_size (int): Number of chips to use for training data.
validation_split (float): Proportion of training chips to use as validation
data. Defaults to 0.1.
bit_depth (int): Bit depth of the image strips from which training chips
are extracted. Defaults to 8 (standard for DRA'ed imagery).
save_model (string): Name of model for saving. if None, does not save
model to disk. Defaults to None
nb_epoch (int): Number of epochs to train for. Each epoch will be trained
on batches * batches_per_epoch chips. Defaults to 10.
shuffle_btwn_epochs (bool): Shuffle the features in train_geojson
between each epoch. Defaults to True.
return_history (bool): Return a list containing metrics from past epochs.
Defaults to False.
save_all_weights (bool): Save model weights after each epoch. A directory
called models will be created in the working directory. Defaults to
True.
retrain (bool): Freeze all layers except final softmax to retrain only
the final weights of the model. Defaults to False
learning_rate_2 (float): Learning rate for the second round of training.
Only relevant if retrain is True. Defaults to 0.01.
OUTPUT trained model, history
'''
resize_dim, validation_data, full_hist = None, None, []
# load geojson training polygons
with open(train_geojson) as f:
polygons = geojson.load(f)['features'][:train_size]
if len(polygons) < train_size:
raise Exception('Not enough polygons to train on. Please add more training ' \
'data or decrease train_size.')
# Determine size of chips to extract and resize dimension
if not max_side_dim:
max_side_dim = self.input_shape[-1]
elif max_side_dim != self.input_shape[-1]:
resize_dim = self.input_shape # resize chips to match input shape
# Recompile model with retrain params
if retrain:
for i in xrange(len(self.model.layers[:-1])):
self.model.layers[i].trainable = False
sgd = SGD(lr=learning_rate_2, momentum=0.9, nesterov=True)
self.model.compile(loss='categorical_crossentropy', optimizer='sgd')
# Set aside validation data
if validation_split > 0:
val_size = int(validation_split * train_size)
val_data, polygons = polygons[: val_size], polygons[val_size: ]
train_size = len(polygons)
# extract validation chips
print 'Getting validation data...\n'
valX, valY = get_chips(val_data, min_side_dim=min_side_dim,
max_side_dim=max_side_dim, classes=self.classes,
normalize=True, return_labels=True, mask=True,
bit_depth=bit_depth, show_percentage=True,
assert_all_valid=True, resize_dim=resize_dim)
validation_data = (valX, valY)
# Train model
for e in range(nb_epoch):
print 'Epoch {}/{}'.format(e + 1, nb_epoch)
# Make callback and diretory for saved weights
if save_all_weights:
chk = ModelCheckpoint(filepath="./models/epoch" + str(e) + \
"_{val_loss:.2f}.h5", verbose=1,
save_weights_only=True)
if 'models' not in os.listdir('.'):
os.makedirs('models')
if shuffle_btwn_epochs:
np.random.shuffle(polygons)
# Cycle through batches of chips and train
for batch_start in range(0, train_size, chips_per_batch):
callbacks = []
this_batch = polygons[batch_start: batch_start + chips_per_batch]
# Get chips from batch
X, Y = get_chips(this_batch, min_side_dim=min_side_dim,
max_side_dim=max_side_dim, classes=self.classes,
normalize=True, return_labels=True, mask=True,
bit_depth=bit_depth, show_percentage=False,
assert_all_valid=True, resize_dim=resize_dim)
# Save weights if this is the final batch in the epoch
if batch_start == range(0, train_size, chips_per_batch)[-1]:
callbacks = [chk]
# Fit the model on this batch
hist = self.model.fit(X, Y, batch_size=self.batch_size, nb_epoch=1,
validation_data=validation_data,
callbacks=callbacks)
# Dict recording loss and val_loss after each epoch
full_hist.append(hist.history)
if save_model:
self.save_model(save_model)
if return_history:
return full_hist
def fit_xy(self, X_train, Y_train, validation_split=0.1, save_model=None,
nb_epoch=10, shuffle_btwn_epochs=True, return_history=False,
save_all_weights=True, retrain=False, learning_rate_2=0.01):
'''
Fit model on training chips already loaded into memory
INPUT X_train (array): Training chips with the following dimensions:
(train_size, num_channels, rows, cols). Dimensions of each chip
should match the input_size to the model.
Y_train (list): One-hot encoded labels to X_train with dimensions as
follows: (train_size, n_classes)
validation_split (float): Proportion of X_train to validate on while
training.
save_model (string): Name under which to save model. if None, does not
save model. Defualts to None.
nb_epoch (int): Number of training epochs to complete
shuffle_btwn_epochs (bool): Shuffle the features in train_geojson
between each epoch. Defaults to True.
return_history (bool): Return a list containing metrics from past epochs.
Defaults to False.
save_all_weights (bool): Save model weights after each epoch. A directory
called models will be created in the working directory. Defaults to
True.
retrain (bool): Freeze all layers except final softmax to retrain only
the final weights of the model. Defaults to False
learning_rate_2 (float): Learning rate for the second round of training.
Only relevant if retrain is True. Defaults to 0.01.
OUTPUT trained Keras model.
'''
callbacks = []
# Recompile model with retrain params
if retrain:
for i in xrange(len(self.model.layers[:-1])):
self.model.layers[i].trainable = False
sgd = SGD(lr=learning_rate_2, momentum=0.9, nesterov=True)
self.model.compile(loss='categorical_crossentropy', optimizer='sgd')
# Define callback to save weights after each epoch
if save_all_weights:
chk = ModelCheckpoint(filepath="./models/ch_{epoch:02d}-{val_loss:.2f}.h5",
verbose=1, save_weights_only=True)
callbacks = [chk]
# Fit model
hist = self.model.fit(X_train, Y_train, validation_split=validation_split,
callbacks=callbacks, nb_epoch=nb_epoch,
shuffle=shuffle_btwn_epochs)
if save_model:
self.save_model(save_model)
if return_history:
return hist
def classify_geojson(self, target_geojson, output_name, max_side_dim=None,
min_side_dim=0, numerical_classes=True, chips_in_mem=5000,
bit_depth=8):
'''
Use the current model and weights to classify all polygons in target_geojson. The
output file will have a 'CNN_class' property with the net's classification
result, and a 'certainty' property with the net's certainty in the assigned
classification.
Please ensure that your current working directory contains all imagery referenced
in the image_id property in target_geojson, and are named as follows:
<image_id>.tif, where image_id is the catalog id of the image.
INPUT target_geojson (string): Name of the geojson to classify. This file
should only contain chips with side dimensions between min_side_dim
and max_side_dim (see below).
output_name (string): Name under which to save the classified geojson.
max_side_dim (int): Maximum acceptable side dimension (in pixels) for a
chip. If None, defaults to input_shape[-1]. If larger than the
input shape the chips extracted will be downsampled to match the
input shape. Defaults to None.
min_side_dim (int): Minimum acceptable side dimension (in pixels) for a
chip. Defaults to 0.
numerical_classes (bool): Make output classifications correspond to the
indicies (base 0) of the 'classes' attribute. If False, 'CNN_class'
is a string with the class name. Defaults to True.
chips_in_mem (int): Number of chips to load in memory at once. Decrease
this parameter for larger chip sizes. Defaults to 5000.
bit_depth (int): Bit depth of the image strips from which training chips
are extracted. Defaults to 8 (standard for DRA'ed imagery).
'''
resize_dim, yprob, ytrue = None, [], []
# Determine size of chips to extract and resize dimension
if not max_side_dim:
max_side_dim = self.input_shape[-1]
elif max_side_dim != self.input_shape[-1]:
resize_dim = self.input_shape # resize chips to match input shape
# Format output filename
if not output_name.endswith('.geojson'):
output_name = '{}.geojson'.format(output_name)
# Get polygon list from geojson
with open(target_geojson) as f:
features = geojson.load(f)['features']
# Classify in batches of 1000
for ix in xrange(0, len(features), chips_in_mem):
this_batch = features[ix: (ix + chips_in_mem)]
try:
X = get_chips(this_batch, min_side_dim=min_side_dim,
max_side_dim=max_side_dim, classes=self.classes,
normalize=True, return_labels=False,
bit_depth=bit_depth, mask=True, show_percentage=False,
assert_all_valid=True, resize_dim=resize_dim)
except (AssertionError):
raise ValueError('Please filter the input geojson file using ' \
'geojoson_tools.filter_geojson() and ensure all ' \
'polygons are valid before using this method.')
# Predict classes of test data
yprob += list(self.model.predict_proba(X))
# Get predicted classes and certainty
yhat = [np.argmax(i) for i in yprob]
ycert = [str(np.max(j)) for j in yprob]
if not numerical_classes:
yhat = [self.classes[i] for i in yhat]
# Update geojson, save as output_name
data = zip(yhat, ycert)
property_names = ['CNN_class', 'certainty']
gt.write_properties_to(data, property_names=property_names,
input_file=target_geojson, output_file=output_name)
# Tools for analyzing network performance
def x_to_rgb(X):
'''
Transform a normalized (3,h,w) image (theano ordering) to a (h,w,3) rgb image
(tensor flow).
Use this to view or save rgb polygons as images.
INPUT (1) 3d array 'X': originial chip with theano dimensional ordering (3, h, w)
OUTPUT (1) 3d array: rgb image in tensor flow dim-prdering (h,w,3)
'''
rgb_array = np.zeros((X.shape[1], X.shape[2], 3), 'uint8')
rgb_array[...,0] = X[0] * 255
rgb_array[...,1] = X[1] * 255
rgb_array[...,2] = X[2] * 255
return rgb_array
| 46.923541 | 91 | 0.595086 | [
"MIT"
] | DigitalGlobe/mltools | examples/polygon_classify_cnn/pool_net.py | 23,321 | Python |
import six
from smqtk.representation import DescriptorIndex, get_data_element_impls
from smqtk.utils import merge_dict, plugin, SimpleTimer
try:
from six.moves import cPickle as pickle
except ImportError:
import pickle
class MemoryDescriptorIndex (DescriptorIndex):
"""
In-memory descriptor index with file caching.
Stored descriptor elements are all held in memory in a uuid-to-element
dictionary (hash table).
If the path to a file cache is provided, it is loaded at construction if it
exists. When elements are added to the index, the in-memory table is dumped
to the cache.
"""
@classmethod
def is_usable(cls):
"""
Check whether this class is available for use.
:return: Boolean determination of whether this implementation is usable.
:rtype: bool
"""
# no dependencies
return True
@classmethod
def get_default_config(cls):
"""
Generate and return a default configuration dictionary for this class.
This will be primarily used for generating what the configuration
dictionary would look like for this class without instantiating it.
By default, we observe what this class's constructor takes as arguments,
turning those argument names into configuration dictionary keys. If any
of those arguments have defaults, we will add those values into the
configuration dictionary appropriately. The dictionary returned should
only contain JSON compliant value types.
It is not be guaranteed that the configuration dictionary returned
from this method is valid for construction of an instance of this class.
:return: Default configuration dictionary for the class.
:rtype: dict
"""
c = super(MemoryDescriptorIndex, cls).get_default_config()
c['cache_element'] = plugin.make_config(get_data_element_impls())
return c
@classmethod
def from_config(cls, config_dict, merge_default=True):
"""
Instantiate a new instance of this class given the configuration
JSON-compliant dictionary encapsulating initialization arguments.
:param config_dict: JSON compliant dictionary encapsulating
a configuration.
:type config_dict: dict
:param merge_default: Merge the given configuration on top of the
default provided by ``get_default_config``.
:type merge_default: bool
:return: Constructed instance from the provided config.
:rtype: MemoryDescriptorIndex
"""
if merge_default:
config_dict = merge_dict(cls.get_default_config(), config_dict)
# Optionally construct cache element from sub-config.
if config_dict['cache_element'] \
and config_dict['cache_element']['type']:
e = plugin.from_plugin_config(config_dict['cache_element'],
get_data_element_impls())
config_dict['cache_element'] = e
else:
config_dict['cache_element'] = None
return super(MemoryDescriptorIndex, cls).from_config(config_dict, False)
def __init__(self, cache_element=None, pickle_protocol=-1):
"""
Initialize a new in-memory descriptor index, or reload one from a
cache.
:param cache_element: Optional data element cache, loading an existing
index if the element has bytes. If the given element is writable,
new descriptors added to this index are cached to the element.
:type cache_element: None | smqtk.representation.DataElement
:param pickle_protocol: Pickling protocol to use when serializing index
table to the optionally provided, writable cache element. We will
use -1 by default (latest version, probably a binary form).
:type pickle_protocol: int
"""
super(MemoryDescriptorIndex, self).__init__()
# Mapping of descriptor UUID to the DescriptorElement instance.
#: :type: dict[collections.Hashable, smqtk.representation.DescriptorElement]
self._table = {}
# Record of optional file cache we're using
self.cache_element = cache_element
self.pickle_protocol = pickle_protocol
if cache_element and not cache_element.is_empty():
self._log.debug("Loading cached descriptor index table from %s "
"element.", cache_element.__class__.__name__)
self._table = pickle.loads(cache_element.get_bytes())
def get_config(self):
c = merge_dict(self.get_default_config(), {
"pickle_protocol": self.pickle_protocol,
})
if self.cache_element:
merge_dict(c['cache_element'],
plugin.to_plugin_config(self.cache_element))
return c
def cache_table(self):
if self.cache_element and self.cache_element.writable():
with SimpleTimer("Caching descriptor table", self._log.debug):
self.cache_element.set_bytes(pickle.dumps(self._table,
self.pickle_protocol))
def count(self):
return len(self._table)
def clear(self):
"""
Clear this descriptor index's entries.
"""
self._table = {}
self.cache_table()
def has_descriptor(self, uuid):
"""
Check if a DescriptorElement with the given UUID exists in this index.
:param uuid: UUID to query for
:type uuid: collections.Hashable
:return: True if a DescriptorElement with the given UUID exists in this
index, or False if not.
:rtype: bool
"""
return uuid in self._table
def add_descriptor(self, descriptor, no_cache=False):
"""
Add a descriptor to this index.
Adding the same descriptor multiple times should not add multiple
copies of the descriptor in the index.
:param descriptor: Descriptor to index.
:type descriptor: smqtk.representation.DescriptorElement
:param no_cache: Do not cache the internal table if a file cache was
provided. This would be used if adding many descriptors at a time,
preventing a file write for every individual descriptor added.
:type no_cache: bool
"""
self._table[descriptor.uuid()] = descriptor
if not no_cache:
self.cache_table()
def add_many_descriptors(self, descriptors):
"""
Add multiple descriptors at one time.
:param descriptors: Iterable of descriptor instances to add to this
index.
:type descriptors:
collections.Iterable[smqtk.representation.DescriptorElement]
"""
added_something = False
for d in descriptors:
# using no-cache so we don't trigger multiple file writes
self.add_descriptor(d, no_cache=True)
added_something = True
if added_something:
self.cache_table()
def get_descriptor(self, uuid):
"""
Get the descriptor in this index that is associated with the given UUID.
:param uuid: UUID of the DescriptorElement to get.
:type uuid: collections.Hashable
:raises KeyError: The given UUID doesn't associate to a
DescriptorElement in this index.
:return: DescriptorElement associated with the queried UUID.
:rtype: smqtk.representation.DescriptorElement
"""
return self._table[uuid]
def get_many_descriptors(self, uuids):
"""
Get an iterator over descriptors associated to given descriptor UUIDs.
:param uuids: Iterable of descriptor UUIDs to query for.
:type uuids: collections.Iterable[collections.Hashable]
:raises KeyError: A given UUID doesn't associate with a
DescriptorElement in this index.
:return: Iterator of descriptors associated to given uuid values.
:rtype: __generator[smqtk.representation.DescriptorElement]
"""
for uid in uuids:
yield self._table[uid]
def remove_descriptor(self, uuid, no_cache=False):
"""
Remove a descriptor from this index by the given UUID.
:param uuid: UUID of the DescriptorElement to remove.
:type uuid: collections.Hashable
:raises KeyError: The given UUID doesn't associate to a
DescriptorElement in this index.
:param no_cache: Do not cache the internal table if a file cache was
provided. This would be used if adding many descriptors at a time,
preventing a file write for every individual descriptor added.
:type no_cache: bool
"""
del self._table[uuid]
if not no_cache:
self.cache_table()
def remove_many_descriptors(self, uuids):
"""
Remove descriptors associated to given descriptor UUIDs from this
index.
:param uuids: Iterable of descriptor UUIDs to remove.
:type uuids: collections.Iterable[collections.Hashable]
:raises KeyError: A given UUID doesn't associate with a
DescriptorElement in this index.
"""
for uid in uuids:
# using no-cache so we don't trigger multiple file writes
self.remove_descriptor(uid, no_cache=True)
self.cache_table()
def iterkeys(self):
return six.iterkeys(self._table)
def iterdescriptors(self):
return six.itervalues(self._table)
def iteritems(self):
return six.iteritems(self._table)
DESCRIPTOR_INDEX_CLASS = MemoryDescriptorIndex
| 34.992857 | 84 | 0.651051 | [
"BSD-3-Clause"
] | cdeepakroy/SMQTK | python/smqtk/representation/descriptor_index/memory.py | 9,798 | Python |
import cv2
import glob
import os
if __name__ == "__main__":
img_path = "/workspace/mnt/storage/yankai/test_cephfs/YOLOX/datasets/ducha_det/labels/val"
save_size_path = "/workspace/mnt/storage/yankai/test_cephfs/YOLOX/datasets/ducha_det/sizes/val"
if not os.path.exists(save_size_path):
os.makedirs(save_size_path)
img_labels = glob.glob(img_path + "/*.txt")
for i, img_label in enumerate(img_labels):
if i % 100 == 0:
print(i)
img_path = img_label.replace('labels','images').replace('.txt','.jpg').replace('.txt','.png').replace('.txt','.jpeg')
assert os.path.exists(img_path)
img = cv2.imread(img_path)
assert img is not None
h, w, _ = img.shape
save_size_path = img_label.replace('labels', 'sizes')
with open(save_size_path, 'w') as txt_write:
txt_write.writelines("{} {}".format(h, w))
| 39.434783 | 125 | 0.643881 | [
"Apache-2.0"
] | yankai317/YOLOX | tools/add_img_size.py | 907 | Python |
from typing import Optional
from pydantic import AnyHttpUrl, Field, IPvAnyAddress
from app.schemas.base import AbstractParentResourceModel
from app.schemas.mixin import TimestampMixin
class Script(AbstractParentResourceModel, TimestampMixin):
"""Script"""
url: AnyHttpUrl = Field(...)
ip_address: Optional[IPvAnyAddress] = Field(None)
| 25.142857 | 58 | 0.784091 | [
"MIT"
] | ninoseki/uzen | app/schemas/script.py | 352 | Python |
def conv(T,taille):
# conv (list(list(bool)) * int -> list(list(int)))
# Convertis un tableau à 2 dimensions contenent des booléens en tableau à 2 dimensions contenant des entiers tel que True = 1 et False = 0
# T (list(list(bool))) : tableau à 2 dimensions contenant des booléens
# taille (int) : taille du tableau à 2 dimensions
# Initialisation et traitement
# tableau (list(list(int))) : tableau à 2 dimensions contenant des entiers
# En même temps que l'on parcours le tableau T, on construit le tableau tableau en suivant la règle True = 1 et False = 0
tableau = [[0 if T[i][j] == False else 1 for j in range(taille)] for i in range(taille)]
return tableau
| 54.538462 | 142 | 0.682652 | [
"MIT"
] | Erwanexyz/Python | conv_tableau_2_dimensions_bool_int.py | 719 | Python |
"""Definitions for all core text instructions."""
from pyshgp.push.type_library import PushTypeLibrary
from pyshgp.push.instruction import SimpleInstruction, ProducesManyOfTypeInstruction
from pyshgp.push.types import Char
from pyshgp.utils import Token
def _concat(a, b):
return str(b) + str(a),
def _first_char(s):
if len(s) == 0:
return Token.revert
return s[0],
def _last_char(s):
if len(s) == 0:
return Token.revert
return s[-1],
def _nth_char(s, ndx):
if len(s) == 0:
return Token.revert
return s[ndx % len(s)],
def _contains(s, x):
return x in s,
def _p_index(s, substr):
try:
return s.index(substr),
except ValueError:
return -1,
def _head(s, i):
if len(s) == 0:
return "",
return s[:i % len(s)],
def _tail(s, i):
if len(s) == 0:
return "",
return s[i % len(s):],
def _rest(s):
if len(s) < 2:
return "",
return s[1:],
def _but_last(s):
if len(s) < 2:
return "",
return s[:-1],
def _drop(s, i):
if len(s) == 0:
return "",
return s[i % len(s):],
def _but_last_n(s, i):
if len(s) == 0:
return "",
return s[:-(i % len(s))],
def _split_on(s, x):
if x == "":
return []
return s.split(x)
def _replace_n(s, old, new, n=1):
return s.replace(str(old), str(new), n),
def _replace_all(s, old, new):
return s.replace(str(old), str(new)),
def _remove_n(s, x, n=1):
return _replace_n(s, x, "", n)
def _remove_all(s, x):
return _replace_all(s, x, "")
def _len(s):
return len(s),
def _reverse(s):
return s[::-1],
def _make_empty():
return "",
def _is_empty(s):
return s == "",
def _occurrences_of(s, x):
return s.count(str(x)),
def _remove_nth(s, ndx):
return s[:ndx] + s[ndx + 1:],
def _set_nth(s, c, ndx):
return s[:ndx] + str(c) + s[ndx + 1:],
def _insert(s, x, ndx):
return s[:ndx] + str(x) + s[ndx:],
def _strip_whitespace(s):
return s.strip(),
# @TODO: Implement exec_string_iterate instruction.
def _is_whitespace(c):
return str(c).isspace(),
def _is_letter(c):
return str(c).isalpha(),
def _is_digit(c):
return str(c).isdigit(),
def _str_from_thing(thing):
return str(thing),
def _char_from_bool(b):
if b:
return Char("T"),
return Char("F"),
def _char_from_ascii(i):
return Char(chr(i % 128)),
def _char_from_float(f):
return _char_from_ascii(int(f))
def _all_chars(s):
return [Char(c) for c in list(s)[::-1]]
def instructions(type_library: PushTypeLibrary):
"""Return all core text instructions."""
i = []
for push_type in ["str", "char"]:
i.append(SimpleInstruction(
"{t}_concat".format(t=push_type),
_concat,
input_stacks=[push_type, push_type],
output_stacks=["str"],
code_blocks=0,
docstring="Concatenates the top two {t}s and pushes the resulting string.".format(t=push_type)
))
i.append(SimpleInstruction(
"str_insert_{t}".format(t=push_type),
_insert,
input_stacks=["str", push_type, "int"],
output_stacks=["str"],
code_blocks=0,
docstring="""Inserts {t} into the top str at index `n` and pushes
the resulting string. The value for `n` is taken from the int stack.""".format(t=push_type)
))
# Getting Characters
i.append(SimpleInstruction(
"{t}_from_first_char".format(t=push_type),
_first_char,
input_stacks=["str"],
output_stacks=[push_type],
code_blocks=0,
docstring="Pushes a {t} of the first character of the top string.".format(t=push_type)
))
i.append(SimpleInstruction(
"{t}_from_last_char".format(t=push_type),
_last_char,
input_stacks=["str"],
output_stacks=[push_type],
code_blocks=0,
docstring="Pushes a {t} of the last character of the top string.".format(t=push_type)
))
i.append(SimpleInstruction(
"{t}_from_nth_char".format(t=push_type),
_nth_char,
input_stacks=["str", "int"],
output_stacks=[push_type],
code_blocks=0,
docstring="Pushes a {t} of the nth character of the top string. The top integer denotes nth position.".format(t=push_type)
))
# Checking string contents
i.append(SimpleInstruction(
"str_contains_{t}".format(t=push_type),
_contains,
input_stacks=["str", push_type],
output_stacks=["bool"],
code_blocks=0,
docstring="Pushes true if the next {t} is in the top string. Pushes false otherwise.".format(t=push_type)
))
i.append(SimpleInstruction(
"str_index_of_{t}".format(t=push_type),
_p_index,
input_stacks=["str", push_type],
output_stacks=["int"],
code_blocks=0,
docstring="Pushes the index of the next {t} in the top string. If not found, pushes -1.".format(t=push_type)
))
# Splitting
# @TODO: srt_split_on_space instruction
i.append(ProducesManyOfTypeInstruction(
"str_split_on_{t}".format(t=push_type),
_split_on,
input_stacks=["str", push_type],
output_stack="str",
code_blocks=0,
docstring="Pushes multiple strs produced by splitting the top str on the top {t}.".format(t=push_type)
))
# Replacements
i.append(SimpleInstruction(
"str_replace_first_{t}".format(t=push_type),
_replace_n,
input_stacks=["str", push_type, push_type],
output_stacks=["str"],
code_blocks=0,
docstring="""Pushes the str produced by replaceing the first occurrence of the
top {t} with the second {t}.""".format(t=push_type)
))
i.append(SimpleInstruction(
"str_replace_n_{t}".format(t=push_type),
_replace_n,
input_stacks=["str", push_type, push_type, "int"],
output_stacks=["str"],
code_blocks=0,
docstring="""Pushes the str produced by replaceing the first `n` occurrences of the
top {t} with the second {t}. The value for `n` is the top int.""".format(t=push_type)
))
i.append(SimpleInstruction(
"str_replace_all_{t}".format(t=push_type),
_replace_all,
input_stacks=["str", push_type, push_type],
output_stacks=["str"],
code_blocks=0,
docstring="""Pushes the str produced by replaceing all occurrences of the
top {t} with the second {t}.""".format(t=push_type)
))
# Removals
i.append(SimpleInstruction(
"str_remove_first_{t}".format(t=push_type),
_remove_n,
input_stacks=["str", push_type],
output_stacks=["str"],
code_blocks=0,
docstring="Pushes the str produced by removing the first occurrence of the top {t}.".format(t=push_type)
))
i.append(SimpleInstruction(
"str_remove_n_{t}".format(t=push_type),
_remove_n,
input_stacks=["str", push_type, "int"],
output_stacks=["str"],
code_blocks=0,
docstring="""Pushes the str produced by remvoing the first `n` occurrences of the
top {t}. The value for `n` is the top int.""".format(t=push_type)
))
i.append(SimpleInstruction(
"str_remove_all_{t}".format(t=push_type),
_remove_all,
input_stacks=["str", push_type],
output_stacks=["str"],
code_blocks=0,
docstring="Pushes the str produced by removing all occurrences of the top {t}.".format(t=push_type)
))
# Misc
i.append(SimpleInstruction(
"str_occurrences_of_{t}".format(t=push_type),
_occurrences_of,
input_stacks=["str", push_type],
output_stacks=["int"],
code_blocks=0,
docstring="Pushes the number of times the top {t} occurs in the top str to the int stack.".format(t=push_type)
))
i.append(SimpleInstruction(
"str_reverse",
_reverse,
input_stacks=["str"],
output_stacks=["str"],
code_blocks=0,
docstring="""Takes the top string and pushes it reversed."""
))
i.append(SimpleInstruction(
"str_head",
_head,
input_stacks=["str", "int"],
output_stacks=["str"],
code_blocks=0,
docstring="""Pushes a string of the first `n` characters from the top string. The value
for `n` is the top int mod the length of the string."""
))
i.append(SimpleInstruction(
"str_tail",
_tail,
input_stacks=["str", "int"],
output_stacks=["str"],
code_blocks=0,
docstring="""Pushes a string of the last `n` characters from the top string. The value
for `n` is the top int mod the length of the string."""
))
i.append(SimpleInstruction(
"str_append_char",
_concat,
input_stacks=["str", "char"],
output_stacks=["str"],
code_blocks=0,
docstring="Appends the top char to the top string pushes the resulting string."
))
i.append(SimpleInstruction(
"str_rest",
_rest,
input_stacks=["str"],
output_stacks=["str"],
code_blocks=0,
docstring="Pushes the top str without its first character."
))
i.append(SimpleInstruction(
"str_but_last",
_but_last,
input_stacks=["str"],
output_stacks=["str"],
code_blocks=0,
docstring="Pushes the top str without its last character."
))
i.append(SimpleInstruction(
"str_drop",
_drop,
input_stacks=["str", "int"],
output_stacks=["str"],
code_blocks=0,
docstring="""Pushes the top str without its first `n` character. The value for `n`
is the top int mod the length of the string."""
))
i.append(SimpleInstruction(
"str_but_last_n",
_but_last_n,
input_stacks=["str", "int"],
output_stacks=["str"],
code_blocks=0,
docstring="""Pushes the top str without its last `n` character. The value for `n`
is the top int mod the length of the string."""
))
i.append(SimpleInstruction(
"str_length",
_len,
input_stacks=["str"],
output_stacks=["int"],
code_blocks=0,
docstring="Pushes the length of the top str to the int stack."
))
i.append(SimpleInstruction(
"str_make_empty",
_make_empty,
input_stacks=[],
output_stacks=["str"],
code_blocks=0,
docstring="Pushes an empty string."
))
i.append(SimpleInstruction(
"str_is_empty_string",
_is_empty,
input_stacks=["str"],
output_stacks=["bool"],
code_blocks=0,
docstring="Pushes True if top string is empty. Pushes False otherwise."
))
i.append(SimpleInstruction(
"str_remove_nth",
_remove_nth,
input_stacks=["str", "int"],
output_stacks=["str"],
code_blocks=0,
docstring="Pushes the top str with the nth character removed."
))
i.append(SimpleInstruction(
"str_set_nth",
_set_nth,
input_stacks=["str", "char", "int"],
output_stacks=["str"],
code_blocks=0,
docstring="Pushes the top str with the nth character set to the top character."
))
i.append(SimpleInstruction(
"str_strip_whitespace",
_strip_whitespace,
input_stacks=["str"],
output_stacks=["str"],
code_blocks=0,
docstring="Pushes the top str with trailing and leading whitespace stripped."
))
# @TODO: Instructions for trim_left and trim_right
# @TODO: Instructions for pad_left and pad_right
# CHARACTER INSTRUCTIONS
i.append(SimpleInstruction(
"char_is_whitespace",
_is_whitespace,
input_stacks=["char"],
output_stacks=["bool"],
code_blocks=0,
docstring="Pushes True if the top Char is whitespace. Pushes False otherwise."
))
i.append(SimpleInstruction(
"char_is_letter",
_is_letter,
input_stacks=["char"],
output_stacks=["bool"],
code_blocks=0,
docstring="Pushes True if the top Char is a letter. Pushes False otherwise."
))
i.append(SimpleInstruction(
"char_is_digit",
_is_digit,
input_stacks=["char"],
output_stacks=["bool"],
code_blocks=0,
docstring="Pushes True if the top Char is a numeric digit. Pushes False otherwise."
))
# TYPE CONVERTING
for push_type in ["bool", "int", "float", "char"]:
i.append(SimpleInstruction(
"str_from_{t}".format(t=push_type),
_str_from_thing,
input_stacks=[push_type],
output_stacks=["str"],
code_blocks=0,
docstring="Pushes the top {t} converted into a str.".format(t=push_type)
))
i.append(SimpleInstruction(
"char_from_bool",
_char_from_bool,
input_stacks=["bool"],
output_stacks=["char"],
code_blocks=0,
docstring="""Pushes the char \"T\" if the top bool is True. If the top
bool is False, pushes the char \"F\"."""
))
i.append(SimpleInstruction(
"char_from_ascii_int",
_char_from_ascii,
input_stacks=["int"],
output_stacks=["char"],
code_blocks=0,
docstring="Pushes the top int converted into a Character by using the int mod 128 as an ascii value."
))
i.append(SimpleInstruction(
"char_from_float",
_char_from_float,
input_stacks=["float"],
output_stacks=["char"],
code_blocks=0,
docstring="""Pushes the top float converted into a Character by flooring
the float to an int, taking the int mod 128, and using it as an ascii value."""
))
i.append(ProducesManyOfTypeInstruction(
"chars_from_str",
_all_chars,
input_stacks=["str"],
output_stack="char",
code_blocks=0,
docstring="""Pushes each character of the top str to the char stack in reverse order."""
))
return i
| 27.012844 | 134 | 0.577299 | [
"MIT"
] | RedBeansAndRice/pyshgp | pyshgp/push/instructions/text.py | 14,722 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Apache Beam SDK for Python setup file."""
from __future__ import absolute_import
from __future__ import print_function
import os
import platform
import sys
import warnings
from distutils.version import StrictVersion
# Pylint and isort disagree here.
# pylint: disable=ungrouped-imports
import setuptools
from pkg_resources import DistributionNotFound
from pkg_resources import get_distribution
from setuptools.command.build_py import build_py
from setuptools.command.develop import develop
from setuptools.command.egg_info import egg_info
from setuptools.command.sdist import sdist
from setuptools.command.test import test
def get_version():
global_names = {}
exec( # pylint: disable=exec-used
open(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'apache_beam/version.py')
).read(),
global_names
)
return global_names['__version__']
PACKAGE_NAME = 'apache-beam'
PACKAGE_VERSION = get_version()
PACKAGE_DESCRIPTION = 'Apache Beam SDK for Python'
PACKAGE_URL = 'https://beam.apache.org'
PACKAGE_DOWNLOAD_URL = 'https://pypi.python.org/pypi/apache-beam'
PACKAGE_AUTHOR = 'Apache Software Foundation'
PACKAGE_EMAIL = '[email protected]'
PACKAGE_KEYWORDS = 'apache beam'
PACKAGE_LONG_DESCRIPTION = '''
Apache Beam is a unified programming model for both batch and streaming
data processing, enabling efficient execution across diverse distributed
execution engines and providing extensibility points for connecting to
different technologies and user communities.
'''
REQUIRED_PIP_VERSION = '7.0.0'
_PIP_VERSION = get_distribution('pip').version
if StrictVersion(_PIP_VERSION) < StrictVersion(REQUIRED_PIP_VERSION):
warnings.warn(
"You are using version {0} of pip. " \
"However, version {1} is recommended.".format(
_PIP_VERSION, REQUIRED_PIP_VERSION
)
)
REQUIRED_CYTHON_VERSION = '0.28.1'
try:
_CYTHON_VERSION = get_distribution('cython').version
if StrictVersion(_CYTHON_VERSION) < StrictVersion(REQUIRED_CYTHON_VERSION):
warnings.warn(
"You are using version {0} of cython. " \
"However, version {1} is recommended.".format(
_CYTHON_VERSION, REQUIRED_CYTHON_VERSION
)
)
except DistributionNotFound:
# do nothing if Cython is not installed
pass
# Currently all compiled modules are optional (for performance only).
if platform.system() == 'Windows':
# Windows doesn't always provide int64_t.
cythonize = lambda *args, **kwargs: []
else:
try:
# pylint: disable=wrong-import-position
from Cython.Build import cythonize
except ImportError:
cythonize = lambda *args, **kwargs: []
REQUIRED_PACKAGES = [
'avro>=1.8.1,<2.0.0; python_version < "3.0"',
'avro-python3>=1.8.1,<2.0.0; python_version >= "3.0"',
'crcmod>=1.7,<2.0',
'dill>=0.2.9,<0.2.10',
'fastavro>=0.21.4,<0.22',
'future>=0.16.0,<1.0.0',
'futures>=3.2.0,<4.0.0; python_version < "3.0"',
'grpcio>=1.8,<2',
'hdfs>=2.1.0,<3.0.0',
'httplib2>=0.8,<=0.12.0',
'mock>=1.0.1,<3.0.0',
'oauth2client>=2.0.1,<4',
# grpcio 1.8.1 and above requires protobuf 3.5.0.post1.
'protobuf>=3.5.0.post1,<4',
# [BEAM-6287] pyarrow is not supported on Windows for Python 2
('pyarrow>=0.11.1,<0.14.0; python_version >= "3.0" or '
'platform_system != "Windows"'),
'pydot>=1.2.0,<1.3',
'pytz>=2018.3',
# [BEAM-5628] Beam VCF IO is not supported in Python 3.
'pyvcf>=0.6.8,<0.7.0; python_version < "3.0"',
'pyyaml>=3.12,<4.0.0',
'typing>=3.6.0,<3.7.0; python_version < "3.5.0"',
]
REQUIRED_TEST_PACKAGES = [
'nose>=1.3.7',
'numpy>=1.14.3,<2',
'pandas>=0.23.4,<0.24',
'parameterized>=0.6.0,<0.7.0',
'pyhamcrest>=1.9,<2.0',
'tenacity>=5.0.2,<6.0',
]
GCP_REQUIREMENTS = [
'cachetools>=3.1.0,<4',
'google-apitools>=0.5.28,<0.5.29',
# [BEAM-4543] googledatastore is not supported in Python 3.
'proto-google-cloud-datastore-v1>=0.90.0,<=0.90.4; python_version < "3.0"',
# [BEAM-4543] googledatastore is not supported in Python 3.
'googledatastore>=7.0.1,<7.1; python_version < "3.0"',
'google-cloud-datastore>=1.7.1,<2.0.0',
'google-cloud-pubsub>=0.39.0,<0.40.0',
# GCP packages required by tests
'google-cloud-bigquery>=1.6.0,<1.7.0',
'google-cloud-core>=0.28.1,<0.30.0',
'google-cloud-bigtable>=0.31.1,<0.33.0',
]
# We must generate protos after setup_requires are installed.
def generate_protos_first(original_cmd):
try:
# See https://issues.apache.org/jira/browse/BEAM-2366
# pylint: disable=wrong-import-position
import gen_protos
class cmd(original_cmd, object):
def run(self):
gen_protos.generate_proto_files()
super(cmd, self).run()
return cmd
except ImportError:
warnings.warn("Could not import gen_protos, skipping proto generation.")
return original_cmd
python_requires = '>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*'
if sys.version_info[0] == 3:
warnings.warn(
'Python 3 support for the Apache Beam SDK is not yet fully supported. '
'You may encounter buggy behavior or missing features.')
setuptools.setup(
name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description=PACKAGE_DESCRIPTION,
long_description=PACKAGE_LONG_DESCRIPTION,
url=PACKAGE_URL,
download_url=PACKAGE_DOWNLOAD_URL,
author=PACKAGE_AUTHOR,
author_email=PACKAGE_EMAIL,
packages=setuptools.find_packages(),
package_data={'apache_beam': [
'*/*.pyx', '*/*/*.pyx', '*/*.pxd', '*/*/*.pxd', 'testing/data/*.yaml',
'portability/api/*.yaml']},
ext_modules=cythonize([
'apache_beam/**/*.pyx',
'apache_beam/coders/coder_impl.py',
'apache_beam/metrics/execution.py',
'apache_beam/runners/common.py',
'apache_beam/runners/worker/logger.py',
'apache_beam/runners/worker/opcounters.py',
'apache_beam/runners/worker/operations.py',
'apache_beam/transforms/cy_combiners.py',
'apache_beam/utils/counters.py',
'apache_beam/utils/windowed_value.py',
]),
install_requires=REQUIRED_PACKAGES,
python_requires=python_requires,
test_suite='nose.collector',
tests_require=REQUIRED_TEST_PACKAGES,
extras_require={
'docs': ['Sphinx>=1.5.2,<2.0'],
'test': REQUIRED_TEST_PACKAGES,
'gcp': GCP_REQUIREMENTS,
},
zip_safe=False,
# PyPI package information.
classifiers=[
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache License, Version 2.0',
keywords=PACKAGE_KEYWORDS,
entry_points={
'nose.plugins.0.10': [
'beam_test_plugin = test_config:BeamTestPlugin',
]},
cmdclass={
'build_py': generate_protos_first(build_py),
'develop': generate_protos_first(develop),
'egg_info': generate_protos_first(egg_info),
'sdist': generate_protos_first(sdist),
'test': generate_protos_first(test),
},
)
| 34.084034 | 79 | 0.677885 | [
"Apache-2.0"
] | elwinarens/beam | sdks/python/setup.py | 8,112 | Python |
import os
import requests
# configurations to be adjusted
# 1. put here URL (see textfile)
base_url = "https://data-dataref.ifremer.fr/stereo/AA_2015/2015-03-05_10-35-00_12Hz/input/cam1/"
# 2. decide which (range of) images
start = 0
end = 149
# 3. name folder to save images to, best take from url (change "/" to "_")
download_folder = 'AA_2015_2015-03-05_10-35-00_12Hz'
img_appendix = "_01" # as the datasat is providing stereo, we only need mono, not to be changed
#create a download folder if not yet existing
current_directory = os.getcwd()
final_directory = os.path.join(current_directory, download_folder)
if not os.path.exists(final_directory):
os.makedirs(final_directory)
# run through all url to download images individually, same file name as in original dataset
#start to uncomment
# while start <= end:
# img_name = f'{start:06d}' + img_appendix + '.tif'
# #print(f"image_name is: " + img_name)
# url = base_url + img_name
# r = requests.get(url, allow_redirects=True)
# # print(f"loading url: " + url)
#
# # save image in download_folder
# path_dest = os.path.join(final_directory, img_name)
# open(path_dest, 'wb').write(r.content)
#
# start += 1
#
# print("Done")
#end to uncomment
#Alternative with .txt list (AA-Videos need it)
with open("/Users/rueskamp/Documents/Studium SE/05_WS21/Projekt_See/codebase/dataset_preparation/AA_2015_2015-03-05_10-35-00_12Hz.txt", "r") as f:
list2 = []
for item in f:
one, two = item.split(">", 1)
img_name = one
# #print(f"image_name is: " + img_name)
url = base_url + img_name
r = requests.get(url, allow_redirects=True)
# print(f"loading url: " + url)
#
# save image in download_folder
path_dest = os.path.join(final_directory, img_name)
open(path_dest, 'wb').write(r.content)
print("Done") | 34.236364 | 146 | 0.680297 | [
"MIT"
] | sea-state-estimation/sea-state-estimation | dataset_preparation/downloading.py | 1,883 | Python |
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
import os
import uuid
import tempfile
import pytest
import ftrack_api.cache
@pytest.fixture(params=['proxy', 'layered', 'memory', 'file', 'serialised'])
def cache(request):
'''Return cache.'''
if request.param == 'proxy':
cache = ftrack_api.cache.ProxyCache(
ftrack_api.cache.MemoryCache()
)
elif request.param == 'layered':
cache = ftrack_api.cache.LayeredCache(
[ftrack_api.cache.MemoryCache()]
)
elif request.param == 'memory':
cache = ftrack_api.cache.MemoryCache()
elif request.param == 'file':
cache_path = os.path.join(
tempfile.gettempdir(), '{0}.dbm'.format(uuid.uuid4().hex)
)
cache = ftrack_api.cache.FileCache(cache_path)
def cleanup():
'''Cleanup.'''
try:
os.remove(cache_path)
except OSError:
# BSD DB (Mac OSX) implementation of the interface will append
# a .db extension.
os.remove(cache_path + '.db')
request.addfinalizer(cleanup)
elif request.param == 'serialised':
cache = ftrack_api.cache.SerialisedCache(
ftrack_api.cache.MemoryCache(),
encode=lambda value: value,
decode=lambda value: value
)
else:
raise ValueError(
'Unrecognised cache fixture type {0!r}'.format(request.param)
)
return cache
class Class(object):
'''Class for testing.'''
def method(self, key):
'''Method for testing.'''
def function(mutable, x, y=2):
'''Function for testing.'''
mutable['called'] = True
return {'result': x + y}
def assert_memoised_call(
memoiser, function, expected, args=None, kw=None, memoised=True
):
'''Assert *function* call via *memoiser* was *memoised*.'''
mapping = {'called': False}
if args is not None:
args = (mapping,) + args
else:
args = (mapping,)
result = memoiser.call(function, args, kw)
assert result == expected
assert mapping['called'] is not memoised
def test_get(cache):
'''Retrieve item from cache.'''
cache.set('key', 'value')
assert cache.get('key') == 'value'
def test_get_missing_key(cache):
'''Fail to retrieve missing item from cache.'''
with pytest.raises(KeyError):
cache.get('key')
def test_set(cache):
'''Set item in cache.'''
with pytest.raises(KeyError):
cache.get('key')
cache.set('key', 'value')
assert cache.get('key') == 'value'
def test_remove(cache):
'''Remove item from cache.'''
cache.set('key', 'value')
cache.remove('key')
with pytest.raises(KeyError):
cache.get('key')
def test_remove_missing_key(cache):
'''Fail to remove missing key.'''
with pytest.raises(KeyError):
cache.remove('key')
def test_keys(cache):
'''Retrieve keys of items in cache.'''
assert cache.keys() == []
cache.set('a', 'a_value')
cache.set('b', 'b_value')
cache.set('c', 'c_value')
assert sorted(cache.keys()) == sorted(['a', 'b', 'c'])
def test_clear(cache):
'''Remove items from cache.'''
cache.set('a', 'a_value')
cache.set('b', 'b_value')
cache.set('c', 'c_value')
assert cache.keys()
cache.clear()
assert not cache.keys()
def test_clear_using_pattern(cache):
'''Remove items that match pattern from cache.'''
cache.set('matching_key', 'value')
cache.set('another_matching_key', 'value')
cache.set('key_not_matching', 'value')
assert cache.keys()
cache.clear(pattern='.*matching_key$')
assert cache.keys() == ['key_not_matching']
def test_clear_encountering_missing_key(cache, mocker):
'''Clear missing key.'''
# Force reporting keys that are not actually valid for test purposes.
mocker.patch.object(cache, 'keys', lambda: ['missing'])
assert cache.keys() == ['missing']
# Should not error even though key not valid.
cache.clear()
# The key was not successfully removed so should still be present.
assert cache.keys() == ['missing']
def test_layered_cache_propagates_value_on_get():
'''Layered cache propagates value on get.'''
caches = [
ftrack_api.cache.MemoryCache(),
ftrack_api.cache.MemoryCache(),
ftrack_api.cache.MemoryCache()
]
cache = ftrack_api.cache.LayeredCache(caches)
# Set item on second level cache only.
caches[1].set('key', 'value')
# Retrieving key via layered cache should propagate it automatically to
# higher level caches only.
assert cache.get('key') == 'value'
assert caches[0].get('key') == 'value'
with pytest.raises(KeyError):
caches[2].get('key')
def test_layered_cache_remove_at_depth():
'''Remove key that only exists at depth in LayeredCache.'''
caches = [
ftrack_api.cache.MemoryCache(),
ftrack_api.cache.MemoryCache()
]
cache = ftrack_api.cache.LayeredCache(caches)
# Set item on second level cache only.
caches[1].set('key', 'value')
# Removing key that only exists at depth should not raise key error.
cache.remove('key')
# Ensure key was removed.
assert not cache.keys()
def test_expand_references():
'''Test that references are expanded from serialized cache.'''
cache_path = os.path.join(
tempfile.gettempdir(), '{0}.dbm'.format(uuid.uuid4().hex)
)
def make_cache(session, cache_path):
'''Create a serialised file cache.'''
serialized_file_cache = ftrack_api.cache.SerialisedCache(
ftrack_api.cache.FileCache(cache_path),
encode=session.encode,
decode=session.decode
)
return serialized_file_cache
# Populate the serialized file cache.
session = ftrack_api.Session(
cache=lambda session, cache_path=cache_path:make_cache(
session, cache_path
)
)
expanded_results = dict()
query_string = 'select asset.parent from AssetVersion where asset is_not None limit 10'
for sequence in session.query(query_string):
asset = sequence.get('asset')
expanded_results.setdefault(
asset.get('id'), asset.get('parent')
)
# Fetch the data from cache.
new_session = ftrack_api.Session(
cache=lambda session, cache_path=cache_path:make_cache(
session, cache_path
)
)
new_session_two = ftrack_api.Session(
cache=lambda session, cache_path=cache_path:make_cache(
session, cache_path
)
)
# Make sure references are merged.
for sequence in new_session.query(query_string):
asset = sequence.get('asset')
assert (
asset.get('parent') == expanded_results[asset.get('id')]
)
# Use for fetching directly using get.
assert (
new_session_two.get(asset.entity_type, asset.get('id')).get('parent') ==
expanded_results[asset.get('id')]
)
@pytest.mark.parametrize('items, key', [
(({},), '{}'),
(({}, {}), '{}{}')
], ids=[
'single object',
'multiple objects'
])
def test_string_key_maker_key(items, key):
'''Generate key using string key maker.'''
key_maker = ftrack_api.cache.StringKeyMaker()
assert key_maker.key(*items) == key
@pytest.mark.parametrize('items, key', [
(
({},),
'\x01\x01'
),
(
({'a': 'b'}, [1, 2]),
'\x01'
'\x80\x02U\x01a.' '\x02' '\x80\x02U\x01b.'
'\x01'
'\x00'
'\x03'
'\x80\x02K\x01.' '\x00' '\x80\x02K\x02.'
'\x03'
),
(
(function,),
'\x04function\x00unit.test_cache'
),
(
(Class,),
'\x04Class\x00unit.test_cache'
),
(
(Class.method,),
'\x04method\x00Class\x00unit.test_cache'
),
(
(callable,),
'\x04callable'
)
], ids=[
'single mapping',
'multiple objects',
'function',
'class',
'method',
'builtin'
])
def test_object_key_maker_key(items, key):
'''Generate key using string key maker.'''
key_maker = ftrack_api.cache.ObjectKeyMaker()
assert key_maker.key(*items) == key
def test_memoised_call():
'''Call memoised function.'''
memoiser = ftrack_api.cache.Memoiser()
# Initial call should not be memoised so function is executed.
assert_memoised_call(
memoiser, function, args=(1,), expected={'result': 3}, memoised=False
)
# Identical call should be memoised so function is not executed again.
assert_memoised_call(
memoiser, function, args=(1,), expected={'result': 3}, memoised=True
)
# Differing call is not memoised so function is executed.
assert_memoised_call(
memoiser, function, args=(3,), expected={'result': 5}, memoised=False
)
def test_memoised_call_variations():
'''Call memoised function with identical arguments using variable format.'''
memoiser = ftrack_api.cache.Memoiser()
expected = {'result': 3}
# Call function once to ensure is memoised.
assert_memoised_call(
memoiser, function, args=(1,), expected=expected, memoised=False
)
# Each of the following calls should equate to the same key and make
# use of the memoised value.
for args, kw in [
((), {'x': 1}),
((), {'x': 1, 'y': 2}),
((1,), {'y': 2}),
((1,), {})
]:
assert_memoised_call(
memoiser, function, args=args, kw=kw, expected=expected
)
# The following calls should all be treated as new variations and so
# not use any memoised value.
assert_memoised_call(
memoiser, function, kw={'x': 2}, expected={'result': 4}, memoised=False
)
assert_memoised_call(
memoiser, function, kw={'x': 3, 'y': 2}, expected={'result': 5},
memoised=False
)
assert_memoised_call(
memoiser, function, args=(4, ), kw={'y': 2}, expected={'result': 6},
memoised=False
)
assert_memoised_call(
memoiser, function, args=(5, ), expected={'result': 7}, memoised=False
)
def test_memoised_mutable_return_value():
'''Avoid side effects for returned mutable arguments when memoising.'''
memoiser = ftrack_api.cache.Memoiser()
arguments = ({'called': False}, 1)
result_a = memoiser.call(function, arguments)
assert result_a == {'result': 3}
assert arguments[0]['called']
# Modify mutable externally and check that stored memoised value is
# unchanged.
del result_a['result']
arguments[0]['called'] = False
result_b = memoiser.call(function, arguments)
assert result_b == {'result': 3}
assert not arguments[0]['called']
| 26.01199 | 91 | 0.608648 | [
"MIT"
] | Mikfr83/OpenPype | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | 10,847 | Python |
import sys
import time
import math
import psutil
import pytest
import threading
from loky import TimeoutError
from loky import get_reusable_executor
from loky.backend import get_context
# Set a large timeout as it should only be reached in case of deadlocks
TIMEOUT = 40
_test_event = None
def initializer_event(event):
"""Initializer that set a global test event for test synchronization"""
global _test_event
_test_event = event
def _direct_children_with_cmdline(p):
"""Helper to fetch cmdline from children process list"""
children_with_cmdline = []
for c in p.children():
try:
cmdline = " ".join(c.cmdline())
if not c.is_running() or not cmdline:
# Under linux is_running() can return True even though
# the command line data can no longer be read from
# /proc/<pid>/cmdline. This looks like a race condition
# between /proc/<pid>/stat and /proc/<pid>/cmdline
# when the process is being terminated by the OS.
continue
children_with_cmdline.append((c, cmdline))
except (OSError, psutil.NoSuchProcess, psutil.AccessDenied):
# These errors indicate that the process has terminated while
# we were processing the info. Just discard it.
pass
return children_with_cmdline
def _running_children_with_cmdline(p):
all_children = _direct_children_with_cmdline(p)
workers = [(c, cmdline) for c, cmdline in all_children
if ('semaphore_tracker' not in cmdline and
'resource_tracker' not in cmdline and
'multiprocessing.forkserver' not in cmdline)]
forkservers = [c for c, cmdline in all_children
if 'multiprocessing.forkserver' in cmdline]
for fs in forkservers:
workers.extend(_direct_children_with_cmdline(fs))
return workers
def _check_subprocesses_number(executor, expected_process_number=None,
expected_max_process_number=None, patience=100):
# Wait for terminating processes to disappear
children_cmdlines = _running_children_with_cmdline(psutil.Process())
pids_cmdlines = [(c.pid, cmdline) for c, cmdline in children_cmdlines]
children_pids = {pid for pid, _ in pids_cmdlines}
if executor is not None:
worker_pids = set(executor._processes.keys())
else:
# Bypass pids checks when executor has been garbage
# collected
worker_pids = children_pids
if expected_process_number is not None:
try:
assert len(children_pids) == expected_process_number, pids_cmdlines
assert len(worker_pids) == expected_process_number, pids_cmdlines
assert worker_pids == children_pids, pids_cmdlines
except AssertionError:
if expected_process_number != 0:
raise
# there is a race condition with the /proc/<pid>/ system clean up
# and our utilization of psutil. The Process is considered alive by
# psutil even though it have been terminated. Wait for the system
# clean up in this case.
for _ in range(patience):
if not _running_children_with_cmdline(psutil.Process()):
break
time.sleep(.1)
else:
raise
if expected_max_process_number is not None:
assert len(children_pids) <= expected_max_process_number, pids_cmdlines
assert len(worker_pids) <= expected_max_process_number, pids_cmdlines
def _check_executor_started(executor):
# Submit a small job to make sure that the pool is an working state
res = executor.submit(id, None)
try:
res.result(timeout=TIMEOUT)
except TimeoutError:
print('\n' * 3, res.done(), executor._call_queue.empty(),
executor._result_queue.empty())
print(executor._processes)
print(threading.enumerate())
from faulthandler import dump_traceback
dump_traceback()
executor.submit(dump_traceback).result(TIMEOUT)
raise RuntimeError("Executor took too long to run basic task.")
class ExecutorMixin:
worker_count = 5
@classmethod
def setup_class(cls):
print(f"setup class with {cls.context}")
global _test_event
if _test_event is None:
_test_event = cls.context.Event()
@classmethod
def teardown_class(cls):
print(f"teardown class with {cls.context}")
global _test_event
if _test_event is not None:
_test_event = None
@pytest.fixture(autouse=True)
def setup_method(self):
global _test_event
assert _test_event is not None
try:
self.executor = self.executor_type(
max_workers=self.worker_count, context=self.context,
initializer=initializer_event, initargs=(_test_event,))
except NotImplementedError as e:
self.skipTest(str(e))
_check_executor_started(self.executor)
_check_subprocesses_number(self.executor, self.worker_count)
def teardown_method(self, method):
# Make sure executor is not broken if it should not be
executor = getattr(self, 'executor', None)
if executor is not None:
expect_broken_pool = hasattr(method, "broken_pool") # old pytest
for mark in getattr(method, "pytestmark", []):
if mark.name == "broken_pool":
expect_broken_pool = True
is_actually_broken = executor._flags.broken is not None
assert is_actually_broken == expect_broken_pool
t_start = time.time()
executor.shutdown(wait=True, kill_workers=True)
dt = time.time() - t_start
assert dt < 10, "Executor took too long to shutdown"
_check_subprocesses_number(executor, 0)
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
@classmethod
def check_no_running_workers(cls, patience=5, sleep_duration=0.01):
deadline = time.time() + patience
while time.time() <= deadline:
time.sleep(sleep_duration)
p = psutil.Process()
workers = _running_children_with_cmdline(p)
if not workers:
return
# Patience exhausted: log the remaining workers command line and
# raise error.
print("Remaining worker processes command lines:", file=sys.stderr)
for w, cmdline in workers:
print(w.pid, w.status(), end='\n', file=sys.stderr)
print(cmdline, end='\n\n', file=sys.stderr)
raise AssertionError(
f'Expected no more running worker processes but got {len(workers)}'
f' after waiting {patience:0.3f}s.'
)
class ReusableExecutorMixin:
def setup_method(self, method):
default_start_method = get_context().get_start_method()
assert default_start_method == "loky", default_start_method
executor = get_reusable_executor(max_workers=2)
_check_executor_started(executor)
# There can be less than 2 workers because of the worker timeout
_check_subprocesses_number(executor, expected_max_process_number=2)
def teardown_method(self, method):
"""Make sure the executor can be recovered after the tests"""
executor = get_reusable_executor(max_workers=2)
assert executor.submit(math.sqrt, 1).result() == 1
# There can be less than 2 workers because of the worker timeout
_check_subprocesses_number(executor, expected_max_process_number=2)
@classmethod
def teardown_class(cls):
executor = get_reusable_executor(max_workers=2)
executor.shutdown(wait=True)
| 38.526066 | 79 | 0.651126 | [
"BSD-3-Clause"
] | pombredanne/loky | tests/_executor_mixin.py | 8,129 | Python |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
try:
unicode
except NameError:
unicode = str
unichr = chr
import bisect
import os
import sys
import types
import curses
import app.config
import app.controller
import app.cu_editor
import app.em_editor
import app.string
import app.text_buffer
import app.vi_editor
# The terminal area that the curses can draw to.
mainCursesWindow = None
class ViewWindow:
"""A view window is a base window that does not get focus or have
TextBuffer.
See class ActiveWindow for a window that can get focus. See class Window for
a window that can get focus and have a TextBuffer.
"""
def __init__(self, program, parent):
"""
Args:
parent is responsible for the order in which this window is updated,
relative to its siblings.
"""
if app.config.strict_debug:
assert issubclass(self.__class__, ViewWindow), self
assert issubclass(program.__class__, app.ci_program.CiProgram), self
if parent is not None:
assert issubclass(parent.__class__, ViewWindow), parent
self.program = program
self.parent = parent
self.isFocusable = False
self.top = 0
self.left = 0
self.rows = 1
self.cols = 1
self.scrollRow = 0
self.scrollCol = 0
self.showCursor = True
self.writeLineRow = 0
self.zOrder = []
def addStr(self, row, col, text, colorPair):
"""Overwrite text at row, column with text.
The caller is responsible for avoiding overdraw.
"""
if app.config.strict_debug:
app.log.check_le(row, self.rows)
app.log.check_le(col, self.cols)
self.program.backgroundFrame.addStr(self.top + row, self.left + col,
text.encode('utf-8'), colorPair)
def reattach(self):
self.setParent(self.parent)
def blank(self, colorPair):
"""Clear the window."""
for i in range(self.rows):
self.addStr(i, 0, ' ' * self.cols, colorPair)
def bringChildToFront(self, child):
"""Bring it to the top layer."""
try:
self.zOrder.remove(child)
except ValueError:
pass
self.zOrder.append(child)
def bringToFront(self):
"""Bring it to the top layer."""
self.parent.bringChildToFront(self)
def changeFocusTo(self, changeTo):
if app.config.strict_debug:
assert issubclass(self.__class__, ViewWindow), self
assert issubclass(changeTo.__class__, ViewWindow), changeTo
topWindow = self
while topWindow.parent:
topWindow = topWindow.parent
topWindow.changeFocusTo(changeTo)
def colorPref(self, colorType, delta=0):
return self.program.color.get(colorType, delta)
def contains(self, row, col):
"""Determine whether the position at row, col lay within this window."""
for i in self.zOrder:
if i.contains(row, col):
return i
return (self.top <= row < self.top + self.rows and
self.left <= col < self.left + self.cols and self)
def debugDraw(self):
programWindow = self
while programWindow.parent is not None:
programWindow = programWindow.parent
programWindow.debugDraw(self)
def deselect(self):
pass
def detach(self):
"""Hide the window by removing self from parents' children, but keep
same parent to be reattached later."""
try:
self.parent.zOrder.remove(self)
except ValueError:
pass
def layoutHorizontally(self, children, separation=0):
left = self.left
cols = self.cols
for view in children:
preferredCols = view.preferredSize(self.rows, max(0, cols))[1]
view.reshape(self.top, left, self.rows,
max(0, min(cols, preferredCols)))
delta = view.cols + separation
left += delta
cols -= delta
def layoutVertically(self, children, separation=0):
top = self.top
rows = self.rows
for view in children:
preferredRows = view.preferredSize(max(0, rows), self.cols)[0]
view.reshape(top, self.left, max(0, min(rows, preferredRows)),
self.cols)
delta = view.rows + separation
top += delta
rows -= delta
def mouseClick(self, paneRow, paneCol, shift, ctrl, alt):
pass
def mouseDoubleClick(self, paneRow, paneCol, shift, ctrl, alt):
pass
def mouseMoved(self, paneRow, paneCol, shift, ctrl, alt):
pass
def mouseRelease(self, paneRow, paneCol, shift, ctrl, alt):
pass
def mouseTripleClick(self, paneRow, paneCol, shift, ctrl, alt):
pass
def mouseWheelDown(self, shift, ctrl, alt):
pass
def mouseWheelUp(self, shift, ctrl, alt):
pass
def moveTo(self, top, left):
self.top = top
self.left = left
def moveBy(self, top, left):
self.top += top
self.left += left
def _childFocusableWindow(self, reverse=False):
windows = self.zOrder[:]
if reverse:
windows.reverse()
for i in windows:
if i.isFocusable:
return i
else:
r = i._childFocusableWindow(reverse)
if r is not None:
return r
def nextFocusableWindow(self, start, reverse=False):
"""Windows without |isFocusable| are skipped. Ignore (skip) |start| when
searching.
Args:
start (window): the child window to start from. If |start| is not
found, start from the first child window.
reverse (bool): if True, find the prior focusable window.
Returns:
A window that should be focused.
See also: showFullWindowHierarchy() which can help in debugging.
"""
windows = self.parent.zOrder[:]
if reverse:
windows.reverse()
try:
found = windows.index(start)
except ValueError:
found = -1
windows = windows[found + 1:]
for i in windows:
if i.isFocusable:
return i
else:
r = i._childFocusableWindow(reverse)
if r is not None:
return r
r = self.parent.nextFocusableWindow(self.parent, reverse)
if r is not None:
return r
return self._childFocusableWindow(reverse)
def normalize(self):
self.parent.normalize()
def onPrefChanged(self, category, name):
self.parent.onPrefChanged(category, name)
def paint(self, row, col, count, colorPair):
"""Paint text a row, column with colorPair.
fyi, I thought this may be faster than using addStr to paint over the
text with a different colorPair. It looks like there isn't a significant
performance difference between chgat and addstr.
"""
mainCursesWindow.chgat(self.top + row, self.left + col, count,
colorPair)
def preferredSize(self, rowLimit, colLimit):
# Derived classes should override this.
return rowLimit, colLimit
def presentModal(self, changeTo, paneRow, paneCol):
self.parent.presentModal(changeTo, paneRow, paneCol)
def priorFocusableWindow(self, start):
return self.nextFocusableWindow(start, True)
def quitNow(self):
self.program.quitNow()
def render(self):
"""Redraw window."""
for child in self.zOrder:
child.render()
def showWindowHierarchy(self, indent=' '):
"""For debugging."""
focus = u'[f]' if self.isFocusable else u'[ ]'
extra = u''
if hasattr(self, 'label'):
extra += u' "' + self.label + u'"'
app.log.info("%s%s%s%s" % (indent, focus, self, extra))
for child in self.zOrder:
child.showWindowHierarchy(indent + u' ')
def showFullWindowHierarchy(self, indent=u' '):
"""For debugging."""
f = self
while f.parent is not None:
f = f.parent
assert f
f.showWindowHierarchy()
def doPreCommand(self):
pass
def longTimeSlice(self):
"""returns whether work is finished (no need to call again)."""
return True
def shortTimeSlice(self):
"""returns whether work is finished (no need to call again)."""
return True
def reshape(self, top, left, rows, cols):
self.moveTo(top, left)
self.resizeTo(rows, cols)
#app.log.debug(self, top, left, rows, cols)
def resizeBottomBy(self, rows):
self.rows += rows
def resizeBy(self, rows, cols):
self.rows += rows
self.cols += cols
def resizeTo(self, rows, cols):
#app.log.detail(rows, cols, self)
if app.config.strict_debug:
assert rows >= 0, rows
assert cols >= 0, cols
self.rows = rows
self.cols = cols
def resizeTopBy(self, rows):
self.top += rows
self.rows -= rows
def setParent(self, parent, layerIndex=sys.maxsize):
"""Setting the parent will cause the the window to refresh (i.e. if self
was hidden with detach() it will no longer be hidden)."""
if app.config.strict_debug:
assert issubclass(self.__class__, ViewWindow), self
assert issubclass(parent.__class__, ViewWindow), parent
if self.parent:
try:
self.parent.zOrder.remove(self)
except ValueError:
pass
self.parent = parent
if parent:
self.parent.zOrder.insert(layerIndex, self)
def writeLine(self, text, color):
"""Simple line writer for static windows."""
if app.config.strict_debug:
assert isinstance(text, unicode)
text = text[:self.cols]
text = text + u' ' * max(0, self.cols - len(text))
self.program.backgroundFrame.addStr(self.top + self.writeLineRow, self.left,
text.encode(u'utf-8'), color)
self.writeLineRow += 1
def getProgram(self):
return self.program
class ActiveWindow(ViewWindow):
"""An ActiveWindow may have focus and a controller."""
def __init__(self, program, parent):
if app.config.strict_debug:
assert issubclass(self.__class__, ActiveWindow), self
assert issubclass(program.__class__,
app.ci_program.CiProgram), repr(program)
if parent is not None:
assert issubclass(parent.__class__, ViewWindow), parent
ViewWindow.__init__(self, program, parent)
self.controller = None
self.hasFocus = False
self.isFocusable = True
def focus(self):
"""
Note: to focus a view it must have a controller. Focusing a view without
a controller would make the program appear to freeze since nothing
would be responding to user input.
"""
self.hasFocus = True
self.controller.focus()
def setController(self, controller):
if app.config.strict_debug:
assert issubclass(self.__class__, Window), self
self.controller = controller(self)
def unfocus(self):
self.hasFocus = False
self.controller.unfocus()
class Window(ActiveWindow):
"""A Window holds a TextBuffer and a controller that operates on the
TextBuffer."""
def __init__(self, program, parent):
if app.config.strict_debug:
assert issubclass(self.__class__, Window), self
assert issubclass(program.__class__, app.ci_program.CiProgram), self
assert issubclass(parent.__class__, ViewWindow), parent
ActiveWindow.__init__(self, program, parent)
self.hasCaptiveCursor = self.program.prefs.editor['captiveCursor']
self.textBuffer = None
def mouseClick(self, paneRow, paneCol, shift, ctrl, alt):
if self.textBuffer:
self.textBuffer.mouseClick(paneRow, paneCol, shift, ctrl, alt)
def mouseDoubleClick(self, paneRow, paneCol, shift, ctrl, alt):
if self.textBuffer:
self.textBuffer.mouseDoubleClick(paneRow, paneCol, shift, ctrl, alt)
def mouseMoved(self, paneRow, paneCol, shift, ctrl, alt):
if self.textBuffer:
self.textBuffer.mouseMoved(paneRow, paneCol, shift, ctrl, alt)
def mouseRelease(self, paneRow, paneCol, shift, ctrl, alt):
if self.textBuffer:
self.textBuffer.mouseRelease(paneRow, paneCol, shift, ctrl, alt)
def mouseTripleClick(self, paneRow, paneCol, shift, ctrl, alt):
if self.textBuffer:
self.textBuffer.mouseTripleClick(paneRow, paneCol, shift, ctrl, alt)
def mouseWheelDown(self, shift, ctrl, alt):
if self.textBuffer:
self.textBuffer.mouseWheelDown(shift, ctrl, alt)
def mouseWheelUp(self, shift, ctrl, alt):
if self.textBuffer:
self.textBuffer.mouseWheelUp(shift, ctrl, alt)
def preferredSize(self, rowLimit, colLimit):
return min(rowLimit, len(self.textBuffer.lines)), colLimit
def render(self):
if self.textBuffer:
self.textBuffer.draw(self)
ViewWindow.render(self)
def setController(self, controller):
ActiveWindow.setController(self, controller)
self.controller.setTextBuffer(self.textBuffer)
def setTextBuffer(self, textBuffer):
textBuffer.setView(self)
self.textBuffer = textBuffer
def doPreCommand(self):
if self.textBuffer is not None:
self.textBuffer.setMessage()
def longTimeSlice(self):
"""returns whether work is finished (no need to call again)."""
finished = True
tb = self.textBuffer
if tb is not None and tb.parser.resumeAtRow < len(tb.lines):
tb.parseDocument()
# If a user event came in while parsing, the parsing will be paused
# (to be resumed after handling the event).
finished = tb.parser.resumeAtRow >= len(tb.lines)
for child in self.zOrder:
finished = finished and child.longTimeSlice()
return finished
def shortTimeSlice(self):
"""returns whether work is finished (no need to call again)."""
tb = self.textBuffer
if tb is not None:
tb.parseScreenMaybe()
return tb.parser.resumeAtRow >= len(tb.lines)
return True
class LabelWindow(ViewWindow):
"""A text label.
The label is inert, it will pass events to its parent.
"""
def __init__(self,
program,
parent,
label,
preferredWidth=None,
align=u'left'):
if app.config.strict_debug:
assert issubclass(program.__class__, app.ci_program.CiProgram), self
assert issubclass(parent.__class__, ViewWindow), parent
assert isinstance(label, unicode)
assert preferredWidth is None or isinstance(preferredWidth, int)
assert isinstance(align, unicode)
ViewWindow.__init__(self, program, parent)
self.label = label
self.preferredWidth = preferredWidth
self.align = -1 if align == u'left' else 1
self.color = self.program.color.get(u'keyword')
def preferredSize(self, rowLimit, colLimit):
if app.config.strict_debug:
assert self.parent
assert rowLimit >= 0
assert colLimit >= 0
preferredWidth = (self.preferredWidth if self.preferredWidth is not None
else len(self.label))
return (min(rowLimit, 1), min(colLimit, preferredWidth))
def render(self):
if self.rows <= 0:
return
line = self.label[:self.cols]
line = u"%*s" % (self.cols * self.align, line)
self.addStr(0, 0, line, self.color)
ViewWindow.render(self)
class LabeledLine(Window):
"""A single line with a label.
This is akin to a line prompt or gui modal dialog. It's used for things like
'find' and 'goto line'.
"""
def __init__(self, program, parent, label):
if app.config.strict_debug:
assert issubclass(self.__class__, LabeledLine), self
assert issubclass(program.__class__, app.ci_program.CiProgram), self
assert issubclass(parent.__class__, ViewWindow), parent
Window.__init__(self, program, parent)
self.host = parent
tb = app.text_buffer.TextBuffer(self.program)
tb.rootGrammar = self.program.prefs.grammars[u'none']
self.setTextBuffer(tb)
self.label = label
self.leftColumn = ViewWindow(self.program, self)
# TODO(dschuyler) Add self.rightColumn.
def focus(self):
self.bringToFront()
if not self.controller:
app.log.info(self, repr(self.label))
Window.focus(self)
def preferredSize(self, rowLimit, colLimit):
return min(rowLimit, 1), colLimit
def render(self):
#app.log.info('LabeledLine', self.label, self.rows, self.cols)
if self.rows <= 0:
return
self.leftColumn.addStr(0, 0, self.label,
self.program.color.get(u'keyword'))
Window.render(self)
def reshape(self, top, left, rows, cols):
labelWidth = len(self.label)
Window.reshape(self, top, left + labelWidth, rows,
max(0, cols - labelWidth))
self.leftColumn.reshape(top, left, rows, labelWidth)
def setLabel(self, label):
self.label = label
self.reshape(self.top, self.left, self.rows, self.cols)
class Menu(ViewWindow):
"""Work in progress on a context menu."""
def __init__(self, program, host):
if app.config.strict_debug:
assert issubclass(self.__class__, Menu), self
assert issubclass(host.__class__, ActiveWindow)
ViewWindow.__init__(self, program, host)
self.host = host
self.label = u''
self.lines = []
self.commands = []
def addItem(self, label, command):
self.lines.append(label)
self.commands.append(command)
def clear(self):
self.lines = []
self.commands = []
def moveSizeToFit(self, left, top):
self.clear()
self.addItem(u'some menu', None)
#self.addItem('sort', self.host.textBuffer.sortSelection)
self.addItem(u'cut', self.host.textBuffer.editCut)
self.addItem(u'paste', self.host.textBuffer.editPaste)
longest = 0
for i in self.lines:
if len(i) > longest:
longest = len(i)
self.reshape(left, top, len(self.lines), longest + 2)
def render(self):
color = self.program.color.get(u'context_menu')
self.writeLineRow = 0
for i in self.lines[:self.rows]:
self.writeLine(" " + i, color)
ViewWindow.render(self)
class LineNumbers(ViewWindow):
def __init__(self, program, host):
ViewWindow.__init__(self, program, host)
self.host = host
def drawLineNumbers(self):
limit = min(self.rows,
len(self.host.textBuffer.lines) - self.host.scrollRow)
cursorBookmarkColorIndex = None
visibleBookmarks = self.getVisibleBookmarks(self.host.scrollRow,
self.host.scrollRow + limit)
currentBookmarkIndex = 0
colorPrefs = self.program.color
for i in range(limit):
color = colorPrefs.get(u'line_number')
currentRow = self.host.scrollRow + i
if currentBookmarkIndex < len(visibleBookmarks):
currentBookmark = visibleBookmarks[currentBookmarkIndex]
else:
currentBookmark = None
# Use a different color if the row is associated with a bookmark.
if currentBookmark:
if (currentRow >= currentBookmark.begin and
currentRow <= currentBookmark.end):
color = colorPrefs.get(
currentBookmark.data.get(u'colorIndex'))
if self.host.textBuffer.penRow == currentRow:
cursorBookmarkColorIndex = currentBookmark.data.get(
u'colorIndex')
if currentRow + 1 > currentBookmark.end:
currentBookmarkIndex += 1
self.addStr(i, 0, u' %5d ' % (currentRow + 1), color)
# Draw indicators for text off of the left edge.
if self.host.scrollCol > 0:
color = colorPrefs.get(u'line_overflow')
for i in range(limit):
if len(self.host.textBuffer.lines[self.host.scrollRow + i]) > 0:
self.addStr(i, 6, u' ', color)
# Draw blank line number rows past the end of the document.
color = colorPrefs.get(u'outside_document')
for i in range(limit, self.rows):
self.addStr(i, 0, u' ', color)
# Highlight the line numbers for the current cursor line.
cursorAt = self.host.textBuffer.penRow - self.host.scrollRow
if 0 <= cursorAt < limit:
if cursorBookmarkColorIndex:
if self.program.prefs.startup[u'numColors'] == 8:
color = colorPrefs.get(cursorBookmarkColorIndex)
else:
color = colorPrefs.get(cursorBookmarkColorIndex % 32 + 128)
else:
color = colorPrefs.get(u'line_number_current')
self.addStr(cursorAt, 1, u'%5d' % (self.host.textBuffer.penRow + 1),
color)
def getVisibleBookmarks(self, beginRow, endRow):
"""
Args:
beginRow (int): the index of the line number that you want the list of
bookmarks to start from.
endRow (int): the index of the line number that you want the list of
bookmarks to end at (exclusive).
Returns:
A list containing the bookmarks that are displayed on the screen. If
there are no bookmarks, returns an empty list.
"""
bookmarkList = self.host.textBuffer.bookmarks
beginIndex = endIndex = 0
if len(bookmarkList):
needle = app.bookmark.Bookmark(beginRow, beginRow, {})
beginIndex = bisect.bisect_left(bookmarkList, needle)
if beginIndex > 0 and bookmarkList[beginIndex - 1].end >= beginRow:
beginIndex -= 1
needle.range = (endRow, endRow)
endIndex = bisect.bisect_left(bookmarkList, needle)
return bookmarkList[beginIndex:endIndex]
def mouseClick(self, paneRow, paneCol, shift, ctrl, alt):
if ctrl:
app.log.info(u'click at', paneRow, paneCol)
return
self.host.changeFocusTo(self.host)
tb = self.host.textBuffer
if self.host.scrollRow + paneRow >= len(tb.lines):
tb.selectionNone()
return
if shift:
if tb.selectionMode == app.selectable.kSelectionNone:
tb.selectionLine()
self.mouseRelease(paneRow, paneCol, shift, ctrl, alt)
else:
tb.cursorMoveAndMark(
self.host.scrollRow + paneRow - tb.penRow, 0,
self.host.scrollRow + paneRow - tb.markerRow, 0,
app.selectable.kSelectionNone - tb.selectionMode)
self.mouseRelease(paneRow, paneCol, shift, ctrl, alt)
def mouseDoubleClick(self, paneRow, paneCol, shift, ctrl, alt):
self.host.textBuffer.selectionAll()
def mouseMoved(self, paneRow, paneCol, shift, ctrl, alt):
app.log.info(paneRow, paneCol, shift)
self.host.textBuffer.mouseClick(paneRow, paneCol - self.cols, True,
ctrl, alt)
def mouseRelease(self, paneRow, paneCol, shift, ctrl, alt):
app.log.info(paneRow, paneCol, shift)
tb = self.host.textBuffer
tb.selectLineAt(self.host.scrollRow + paneRow)
def mouseTripleClick(self, paneRow, paneCol, shift, ctrl, alt):
pass
def mouseWheelDown(self, shift, ctrl, alt):
self.host.mouseWheelDown(shift, ctrl, alt)
def mouseWheelUp(self, shift, ctrl, alt):
self.host.mouseWheelUp(shift, ctrl, alt)
def render(self):
self.drawLineNumbers()
class LogWindow(ViewWindow):
def __init__(self, program, parent):
ViewWindow.__init__(self, program, parent)
self.lines = app.log.getLines()
self.renderCounter = 0
def render(self):
self.renderCounter += 1
app.log.meta(u" " * 10, self.renderCounter, u"- screen render -")
self.writeLineRow = 0
colorPrefs = self.program.color
colorA = colorPrefs.get(u'default')
colorB = colorPrefs.get(u'highlight')
for i in self.lines[-self.rows:]:
color = colorA
if len(i) and i[-1] == u'-':
color = colorB
self.writeLine(i, color)
ViewWindow.render(self)
class InteractiveFind(Window):
def __init__(self, program, host):
Window.__init__(self, program, host)
self.host = host
self.expanded = False
self.setController(app.cu_editor.InteractiveFind)
indent = u' '
self.findLine = LabeledLine(self.program, self, u'Find: ')
self.findLine.setController(app.cu_editor.InteractiveFindInput)
self.findLine.setParent(self)
self.replaceLine = LabeledLine(self.program, self, u'Replace: ')
self.replaceLine.setController(app.cu_editor.InteractiveReplaceInput)
self.replaceLine.setParent(self)
self.matchOptionsRow = RowWindow(self.program, self, 2)
self.matchOptionsRow.setParent(self)
# If findUseRegex is false, re.escape the search.
OptionsToggle(self.program, self.matchOptionsRow, u'regex', u'editor',
u'findUseRegex')
# If findWholeWord, wrap with \b.
OptionsToggle(self.program, self.matchOptionsRow, u'wholeWord',
u'editor', u'findWholeWord')
# If findIgnoreCase, pass ignore case flag to regex.
OptionsToggle(self.program, self.matchOptionsRow, u'ignoreCase',
u'editor', u'findIgnoreCase')
if 0:
# Use locale.
OptionsToggle(self.program, self.matchOptionsRow, u'locale',
u'editor', u'findLocale')
# Span lines.
OptionsToggle(self.program, self.matchOptionsRow, u'multiline',
u'editor', u'findMultiline')
# Dot matches anything (even \n).
OptionsToggle(self.program, self.matchOptionsRow, u'dotAll',
u'editor', u'findDotAll')
# Unicode match.
OptionsToggle(self.program, self.matchOptionsRow, u'unicode',
u'editor', u'findUnicode')
# Replace uppercase with upper and lowercase with lower.
OptionsToggle(self.program, self.matchOptionsRow, u'smartCaps',
u'editor', u'findReplaceSmartCaps')
if 0:
self.scopeOptions, self.scopeRow = self.addSelectOptionsRow(
indent + u'scope ',
[u'file', u'directory', u'openFiles', u'project'])
(self.changeCaseOptions,
self.changeCaseRow) = self.addSelectOptionsRow(
indent + u'changeCase',
[u'none', u'smart', u'upper', u'lower'])
(self.withinOptions,
self.withinOptionsRow) = self.addSelectOptionsRow(
indent + u'within ',
[
u'any',
u'code',
u'comment',
u'error',
u'markup',
u'misspelled', # Find in misspelled words.
u'quoted', # Find in strings.
])
(self.searchSelectionOption,
self.searchSelectionRow) = self.addSelectOptionsRow(
indent + u'selection ', [u'any', u'yes', u'no'])
(self.searchChangedOption,
self.searchChangedRow) = self.addSelectOptionsRow(
indent + u'changed ', [u'any', u'yes', u'no'])
self.pathsLine = LabeledLine(self.program, self, u'Paths: ')
self.pathsLine.setController(app.cu_editor.InteractiveFindInput)
self.pathsLine.setParent(self)
def reattach(self):
Window.reattach(self)
# TODO(dschuyler): consider removing expanded control.
# See https://github.com/google/ci_edit/issues/170
self.expanded = True
self.parent.layout()
def detach(self):
Window.detach(self)
self.parent.layout()
def addSelectOptionsRow(self, label, optionsList):
"""Such as a radio group."""
optionsRow = OptionsRow(self.program, self)
optionsRow.color = self.program.color.get(u'keyword')
optionsRow.addLabel(label)
optionsDict = {}
optionsRow.beginGroup()
for key in optionsList:
optionsDict[key] = False
optionsRow.addSelection(key, optionsDict)
optionsRow.endGroup()
optionsDict[optionsList[0]] = True
optionsRow.setParent(self)
return optionsDict, optionsRow
def bringChildToFront(self, child):
# The find window doesn't reorder children.
pass
def focus(self):
self.reattach()
if app.config.strict_debug:
assert self.parent
assert self.findLine.parent
assert self.rows > 0, self.rows
assert self.findLine.rows > 0, self.findLine.rows
self.controller.focus()
self.changeFocusTo(self.findLine)
def preferredSize(self, rowLimit, colLimit):
if app.config.strict_debug:
assert self.parent
assert rowLimit >= 0
assert colLimit >= 0
if self.parent and self in self.parent.zOrder and self.expanded:
return (min(rowLimit, len(self.zOrder)), colLimit)
return (1, -1)
def expandFindWindow(self, expanded):
self.expanded = expanded
self.parent.layout()
def reshape(self, top, left, rows, cols):
Window.reshape(self, top, left, rows, cols)
self.layoutVertically(self.zOrder)
def unfocus(self):
self.detach()
Window.unfocus(self)
class MessageLine(ViewWindow):
"""The message line appears at the bottom of the screen."""
def __init__(self, program, host):
ViewWindow.__init__(self, program, host)
self.host = host
self.message = None
self.renderedMessage = None
def render(self):
colorPrefs = self.program.color
if self.message:
if self.message != self.renderedMessage:
self.writeLineRow = 0
self.writeLine(self.message, colorPrefs.get(u'message_line'))
else:
self.blank(colorPrefs.get(u'message_line'))
class StatusLine(ViewWindow):
"""The status line appears at the bottom of the screen.
It shows the current line and column the cursor is on.
"""
def __init__(self, program, host):
ViewWindow.__init__(self, program, host)
self.host = host
def render(self):
tb = self.host.textBuffer
colorPrefs = self.program.color
color = colorPrefs.get(u'status_line')
if self.host.showTips:
tipRows = app.help.docs[u'tips']
if len(tipRows) + 1 < self.rows:
for i in range(self.rows):
self.addStr(i, 0, u' ' * self.cols, color)
for i, k in enumerate(tipRows):
self.addStr(i + 1, 4, k, color)
self.addStr(1, 40, u"(Press F1 to show/hide tips)",
color | curses.A_REVERSE)
statusLine = u''
if tb.message:
statusLine = tb.message[0]
color = (tb.message[1] if tb.message[1] is not None else
colorPrefs.get(u'status_line'))
if 0:
if tb.isDirty():
statusLine += u' * '
else:
statusLine += u' . '
# Percentages.
rowPercentage = 0
colPercentage = 0
lineCount = len(tb.lines)
if lineCount:
rowPercentage = self.host.textBuffer.penRow * 100 // lineCount
charCount = len(tb.lines[self.host.textBuffer.penRow])
if charCount and self.host.textBuffer.penCol != 0:
colPercentage = self.host.textBuffer.penCol * 100 // charCount
# Format.
rightSide = u''
if len(statusLine):
rightSide += u' |'
if self.program.prefs.startup.get('showLogWindow'):
rightSide += u' %s | %s |' % (tb.cursorGrammarName(),
tb.selectionModeName())
rightSide += u' %4d,%2d | %3d%%,%3d%%' % (
self.host.textBuffer.penRow + 1, self.host.textBuffer.penCol + 1,
rowPercentage, colPercentage)
statusLine += \
u' ' * (self.cols - len(statusLine) - len(rightSide)) + rightSide
self.addStr(self.rows - 1, 0, statusLine[:self.cols], color)
class TopInfo(ViewWindow):
def __init__(self, program, host):
ViewWindow.__init__(self, program, host)
self.host = host
self.borrowedRows = 0
self.lines = []
self.mode = 2
def onChange(self):
if self.mode == 0:
return
tb = self.host.textBuffer
lines = []
# TODO: Make dynamic topInfo work properly
if len(tb.lines):
lineCursor = self.host.scrollRow
line = ""
# Check for extremely small window.
if len(tb.lines) > lineCursor:
while len(line) == 0 and lineCursor > 0:
line = tb.lines[lineCursor]
lineCursor -= 1
if len(line):
indent = len(line) - len(line.lstrip(u' '))
lineCursor += 1
while lineCursor < len(tb.lines):
line = tb.lines[lineCursor]
if not len(line):
continue
z = len(line) - len(line.lstrip(u' '))
if z > indent:
indent = z
lineCursor += 1
else:
break
while indent and lineCursor > 0:
line = tb.lines[lineCursor]
if len(line):
z = len(line) - len(line.lstrip(u' '))
if z < indent:
indent = z
lines.append(line)
lineCursor -= 1
pathLine = app.string.pathEncode(self.host.textBuffer.fullPath)
if 1:
if tb.isReadOnly:
pathLine += u' [RO]'
if 1:
if tb.isDirty():
pathLine += u' * '
else:
pathLine += u' . '
lines.append(pathLine[-self.cols:])
self.lines = lines
infoRows = len(self.lines)
if self.mode > 0:
infoRows = self.mode
if self.borrowedRows != infoRows:
self.host.topRows = infoRows
self.host.layout()
self.borrowedRows = infoRows
def render(self):
"""Render the context information at the top of the window."""
lines = self.lines[-self.mode:]
lines.reverse()
color = self.program.color.get('top_info')
for i, line in enumerate(lines):
self.addStr(i, 0,
(line + u' ' * (self.cols - len(line)))[:self.cols],
color)
for i in range(len(lines), self.rows):
self.addStr(i, 0, u' ' * self.cols, color)
def reshape(self, top, left, rows, cols):
self.borrowedRows = 0
ViewWindow.reshape(self, top, left, rows, cols)
class InputWindow(Window):
"""This is the main content window.
Often the largest pane displayed.
"""
def __init__(self, program, host):
if app.config.strict_debug:
assert host
Window.__init__(self, program, host)
self.host = host
self.showFooter = True
self.savedScrollPositions = {}
self.showLineNumbers = self.program.prefs.editor.get(
'showLineNumbers', True)
self.showMessageLine = True
self.showRightColumn = True
self.showTopInfo = True
self.statusLineCount = 0 if self.program.prefs.status.get(
'seenTips') else 8
self.topRows = 2 # Number of lines in default TopInfo status.
self.controller = app.controller.MainController(self)
self.controller.add(app.em_editor.EmacsEdit(self))
self.controller.add(app.vi_editor.ViEdit(self))
self.controller.add(app.cu_editor.CuaPlusEdit(self))
# What does the user appear to want: edit, quit, or something else?
self.userIntent = 'edit'
if 1:
self.confirmClose = LabeledLine(
self.program, self, "Save changes? (yes, no, or cancel): ")
self.confirmClose.setController(app.cu_editor.ConfirmClose)
if 1:
self.confirmOverwrite = LabeledLine(
self.program, self, "Overwrite exiting file? (yes or no): ")
self.confirmOverwrite.setController(app.cu_editor.ConfirmOverwrite)
self.contextMenu = Menu(self.program, self)
if 1: # wip on multi-line interactive find.
self.interactiveFind = InteractiveFind(self.program, self)
self.interactiveFind.setParent(self, 0)
else:
self.interactiveFind = LabeledLine(self.program, self, u'find: ')
self.interactiveFind.setController(app.cu_editor.InteractiveFind)
if 1:
self.interactiveGoto = LabeledLine(self.program, self, u'goto: ')
self.interactiveGoto.setController(app.cu_editor.InteractiveGoto)
if 1:
self.interactivePrediction = LabeledLine(self.program, self, u'p: ')
self.interactivePrediction.setController(
app.cu_editor.InteractivePrediction)
if 1:
self.interactivePrompt = LabeledLine(self.program, self, u"e: ")
self.interactivePrompt.setController(
app.cu_editor.InteractivePrompt)
if 1:
self.interactiveQuit = LabeledLine(
self.program, self, u"Save changes? (yes, no, or cancel): ")
self.interactiveQuit.setController(app.cu_editor.InteractiveQuit)
if 1:
self.topInfo = TopInfo(self.program, self)
self.topInfo.setParent(self, 0)
if not self.showTopInfo:
self.topInfo.detach()
if 1:
self.statusLine = StatusLine(self.program, self)
self.statusLine.setParent(self, 0)
if not self.showFooter:
self.statusLine.detach()
if 1:
self.lineNumberColumn = LineNumbers(self.program, self)
self.lineNumberColumn.setParent(self, 0)
if not self.showLineNumbers:
self.lineNumberColumn.detach()
if 1:
self.logoCorner = ViewWindow(self.program, self)
self.logoCorner.name = u'Logo'
self.logoCorner.setParent(self, 0)
if 1:
self.rightColumn = ViewWindow(self.program, self)
self.rightColumn.name = u'Right'
self.rightColumn.setParent(self, 0)
if not self.showRightColumn:
self.rightColumn.detach()
if 1:
self.popupWindow = PopupWindow(self.program, self)
if self.showMessageLine:
self.messageLine = MessageLine(self.program, self)
self.messageLine.setParent(self, 0)
self.showTips = self.program.prefs.status.get(u'showTips')
self.statusLineCount = 8 if self.showTips else 1
if 0:
def splitWindow(self):
"""Experimental."""
app.log.info()
other = InputWindow(self.prg, self)
other.setTextBuffer(self.textBuffer)
app.log.info()
self.prg.zOrder.append(other)
self.prg.layout()
app.log.info()
def layout(self):
"""Change self and sub-windows to fit within the given rectangle."""
top, left, rows, cols = self.outerShape
lineNumbersCols = 7
topRows = self.topRows
bottomRows = max(1, self.interactiveFind.preferredSize(rows, cols)[0])
# The top, left of the main window is the rows, cols of the logo corner.
self.logoCorner.reshape(top, left, 2, lineNumbersCols)
if self.showTopInfo and rows > topRows and cols > lineNumbersCols:
self.topInfo.reshape(top, left + lineNumbersCols, topRows,
cols - lineNumbersCols)
top += topRows
rows -= topRows
rows -= bottomRows
bottomFirstRow = top + rows
self.confirmClose.reshape(bottomFirstRow, left, bottomRows, cols)
self.confirmOverwrite.reshape(bottomFirstRow, left, bottomRows, cols)
self.interactivePrediction.reshape(bottomFirstRow, left, bottomRows,
cols)
self.interactivePrompt.reshape(bottomFirstRow, left, bottomRows, cols)
self.interactiveQuit.reshape(bottomFirstRow, left, bottomRows, cols)
if self.showMessageLine:
self.messageLine.reshape(bottomFirstRow, left, bottomRows, cols)
self.interactiveFind.reshape(bottomFirstRow, left, bottomRows, cols)
if 1:
self.interactiveGoto.reshape(bottomFirstRow, left, bottomRows, cols)
if self.showFooter and rows > 0:
self.statusLine.reshape(bottomFirstRow - self.statusLineCount, left,
self.statusLineCount, cols)
rows -= self.statusLineCount
if self.showLineNumbers and cols > lineNumbersCols:
self.lineNumberColumn.reshape(top, left, rows, lineNumbersCols)
cols -= lineNumbersCols
left += lineNumbersCols
if self.showRightColumn and cols > 0:
self.rightColumn.reshape(top, left + cols - 1, rows, 1)
cols -= 1
Window.reshape(self, top, left, rows, cols)
def drawLogoCorner(self):
"""."""
logo = self.logoCorner
if logo.rows <= 0 or logo.cols <= 0:
return
color = self.program.color.get('logo')
for i in range(logo.rows):
logo.addStr(i, 0, u' ' * logo.cols, color)
logo.addStr(0, 1, u'ci' [:self.cols], color)
logo.render()
def drawRightEdge(self):
"""Draw makers to indicate text extending past the right edge of the
window."""
maxRow, maxCol = self.rows, self.cols
limit = min(maxRow, len(self.textBuffer.lines) - self.scrollRow)
colorPrefs = self.program.color
for i in range(limit):
color = colorPrefs.get('right_column')
if len(self.textBuffer.lines[
i + self.scrollRow]) - self.scrollCol > maxCol:
color = colorPrefs.get('line_overflow')
self.rightColumn.addStr(i, 0, u' ', color)
color = colorPrefs.get('outside_document')
for i in range(limit, maxRow):
self.rightColumn.addStr(i, 0, u' ', color)
def focus(self):
self.layout()
if self.showMessageLine:
self.messageLine.bringToFront()
Window.focus(self)
def nextFocusableWindow(self, start, reverse=False):
# Keep the tab focus in the child branch. (The child view will call
# this, tell the child there is nothing to tab to up here).
return None
def render(self):
self.topInfo.onChange()
self.drawLogoCorner()
self.drawRightEdge()
Window.render(self)
def reshape(self, top, left, rows, cols):
"""Change self and sub-windows to fit within the given rectangle."""
app.log.detail(top, left, rows, cols)
Window.reshape(self, top, left, rows, cols)
self.outerShape = (top, left, rows, cols)
self.layout()
def setTextBuffer(self, textBuffer):
if app.config.strict_debug:
assert issubclass(
textBuffer.__class__, app.text_buffer.TextBuffer), \
repr(textBuffer)
app.log.info('setTextBuffer')
if self.textBuffer is not None:
self.savedScrollPositions[self.textBuffer.fullPath] = (
self.scrollRow, self.scrollCol)
#self.normalize()
textBuffer.lineLimitIndicator = self.program.prefs.editor[
'lineLimitIndicator']
textBuffer.debugRedo = self.program.prefs.startup.get('debugRedo')
Window.setTextBuffer(self, textBuffer)
self.controller.setTextBuffer(textBuffer)
savedScroll = self.savedScrollPositions.get(self.textBuffer.fullPath)
if savedScroll is not None:
self.scrollRow, self.scrollCol = savedScroll
else:
historyScroll = self.textBuffer.fileHistory.get('scroll')
if historyScroll is not None:
self.scrollRow, self.scrollCol = historyScroll
else:
self.textBuffer.scrollToOptimalScrollPosition()
def startup(self):
bufferManager = self.program.bufferManager
for f in self.program.prefs.startup.get('cliFiles', []):
tb = bufferManager.loadTextBuffer(f['path'])
if tb is None:
# app.log.info('failed to load', repr(f["path"]))
continue
tb.parseDocument()
if f['row'] is not None:
if f['col'] is not None:
tb.selectText(f['row'], f['col'], 0,
app.selectable.kSelectionNone)
else:
tb.selectText(f['row'], 0, 0, app.selectable.kSelectionNone)
if self.program.prefs.startup.get('readStdin'):
bufferManager.readStdin()
bufferManager.buffers.reverse()
tb = bufferManager.topBuffer()
if not tb:
tb = bufferManager.newTextBuffer()
self.setTextBuffer(tb)
# Should parsing the document be a standard part of setTextBuffer? TBD.
self.textBuffer.parseDocument()
openToLine = self.program.prefs.startup.get('openToLine')
if openToLine is not None:
self.textBuffer.selectText(openToLine - 1, 0, 0,
app.selectable.kSelectionNone)
def toggleShowTips(self):
self.showTips = not self.showTips
self.statusLineCount = 8 if self.showTips else 1
self.layout()
self.program.prefs.save('status', 'showTips', self.showTips)
def unfocus(self):
if self.showMessageLine:
self.messageLine.detach()
Window.unfocus(self)
class OptionsSelectionWindow(ViewWindow):
"""Mutex window."""
def __init__(self, program, parent):
if app.config.strict_debug:
assert parent is not None
ViewWindow.__init__(self, program, parent)
self.color = self.program.color.get('top_info')
def reshape(self, top, left, rows, cols):
ViewWindow.reshape(self, top, left, rows, cols)
self.layoutHorizontally(self.zOrder)
def childSelected(self, selectedChild):
app.log.info(self.zOrder)
for child in self.zOrder:
if child is not selectedChild:
child.deselect()
def render(self):
self.blank(self.color)
ViewWindow.render(self)
class OptionsTrinaryStateWindow(Window):
def __init__(self, program, parent, label, prefCategory, prefName):
if app.config.strict_debug:
assert isinstance(label, unicode)
assert isinstance(prefCategory, unicode)
assert isinstance(prefName, unicode)
Window.__init__(self, program, parent)
# TODO(dschuyler): Creating a text buffer is rather heavy for a toggle
# control. This should get some optimization.
self.setTextBuffer(app.text_buffer.TextBuffer(self.program))
self.setController(app.cu_editor.ToggleController)
self.setParent(parent)
self.name = label
self.prefCategory = prefCategory
self.prefName = prefName
colorPrefs = self.program.color
self.color = colorPrefs.get('keyword')
self.focusColor = colorPrefs.get('selected')
self.textBuffer.view.showCursor = False
def focus(self):
Window.focus(self)
def setUp(self, toggleOn, toggleOff, toggleUndefined, width=None):
if app.config.strict_debug:
assert isinstance(toggleOn, unicode)
assert isinstance(toggleOff, unicode)
assert isinstance(toggleUndefined, unicode)
assert width is None or isinstance(width, int)
self.toggleOn = toggleOn
self.toggleOff = toggleOff
self.toggleUndefined = toggleUndefined
longest = max(len(toggleOn), len(toggleOff), len(toggleUndefined))
self.width = width if width is not None else longest
self.updateLabel()
def mouseClick(self, paneRow, paneCol, shift, ctrl, alt):
self.controller.toggleValue()
def onPrefChanged(self, category, name):
Window.onPrefChanged(self, category, name)
if category != self.prefCategory or name != self.prefName:
return
self.updateLabel()
def updateLabel(self):
pref = self.program.prefs.category(self.prefCategory)[self.prefName]
if pref is None:
label = self.toggleUndefined
else:
label = self.toggleOn if pref else self.toggleOff
self.label = u'%*s' % (self.width, label)
def preferredSize(self, rowLimit, colLimit):
return min(rowLimit, 1), min(colLimit, abs(self.width))
def render(self):
Window.render(self)
if self.rows <= 0:
return
self.writeLineRow = 0
color = self.focusColor if self.hasFocus else self.color
self.writeLine(self.label[:self.cols], color)
class OptionsToggle(OptionsTrinaryStateWindow):
def __init__(self,
program,
parent,
label,
prefCategory,
prefName,
width=None):
if app.config.strict_debug:
assert isinstance(label, unicode)
assert isinstance(prefCategory, unicode)
assert isinstance(prefName, unicode)
OptionsTrinaryStateWindow.__init__(self, program, parent, label,
prefCategory, prefName)
# I considered these unicode characters, but [x] looks clearer to me.
# toggleOn = unichr(0x2612) + ' ' + control['name']
# toggleOff = unichr(0x2610) + ' ' + control['name']
OptionsTrinaryStateWindow.setUp(self, u'[x]' + label, u'[ ]' + label,
u'[-]' + label, width)
class RowWindow(ViewWindow):
def __init__(self, program, host, separator):
if app.config.strict_debug:
assert host
ViewWindow.__init__(self, program, host)
self.color = self.program.color.get('keyword')
self.separator = separator
def preferredSize(self, rowLimit, colLimit):
return min(rowLimit, 1), colLimit
def render(self):
self.blank(self.color)
ViewWindow.render(self)
def reshape(self, top, left, rows, cols):
ViewWindow.reshape(self, top, left, rows, cols)
#app.log.info(top, left, rows, cols, self)
self.layoutHorizontally(self.zOrder, self.separator)
class OptionsRow(ViewWindow):
class ControlElement:
def __init__(self, elementType, name, reference, width=None, sep=" "):
self.type = elementType
self.name = name
self.reference = reference
self.width = width if width is not None else len(name)
self.sep = sep
def __init__(self, program, host):
if app.config.strict_debug:
assert host
ViewWindow.__init__(self, program, host)
self.host = host
self.color = self.program.color.get('top_info')
self.controlList = []
self.group = None
def addElement(self, draw, kind, name, reference, width, sep, extraWidth=0):
if app.config.strict_debug:
assert isinstance(name, unicode)
assert isinstance(sep, unicode)
assert width is None or isinstance(width, int)
assert isinstance(extraWidth, int)
if reference is not None:
assert isinstance(reference, dict)
assert name in reference
if self.group is not None:
self.group.append(len(self.controlList))
element = {
'dict': reference,
'draw': draw,
'name': name,
'sep': sep,
'type': kind,
'width': width if width is not None else len(name) + extraWidth
}
self.controlList.append(element)
return element
def addLabel(self, name, width=None, sep=u" "):
def draw(control):
return control[u'name']
return self.addElement(draw, u'label', name, None, width, sep)
def addSortHeader(self, name, reference, width=None, sep=u" |"):
def draw(control):
decoration = u'v' if control[u'dict'][control[u'name']] else u'^'
if control[u'dict'][control[u'name']] is None:
decoration = u'-'
if control[u'width'] < 0:
return u'%s %s' % (control[u'name'], decoration)
return u'%s %s' % (decoration, control[u'name'])
self.addElement(draw, u'sort', name, reference, width, sep, len(u' v'))
def addSelection(self, name, reference, width=None, sep=u" "):
if app.config.strict_debug:
assert isinstance(name, unicode)
if 1:
toggleOn = u'(*)' + name
toggleOff = u'( )' + name
def draw(control):
return toggleOn if control[u'dict'][control[u'name']] else toggleOff
width = max(width, min(len(toggleOn), len(toggleOff)))
self.addElement(draw, u'selection', name, reference, width, sep,
len(u'(*)'))
def removeThis_addToggle(self, name, reference, width=None, sep=u" "):
if app.config.strict_debug:
assert isinstance(name, unicode)
if 1:
toggleOn = u'[x]' + name
toggleOff = u'[ ]' + name
if 0:
toggleOn = unichr(0x2612) + ' ' + control['name']
toggleOff = unichr(0x2610) + ' ' + control['name']
if 0:
toggleOn = '[+' + control['name'] + ']'
toggleOff = '[-' + control['name'] + ']'
def draw(control):
return toggleOn if control['dict'][control['name']] else toggleOff
width = max(width, min(len(toggleOn), len(toggleOff)))
self.addElement(draw, u'toggle', name, reference, width, sep,
len('[-]'))
def beginGroup(self):
"""Like a radio group, or column sort headers."""
self.group = []
def endGroup(self):
"""Like a radio group, or column sort headers."""
pass
def mouseClick(self, paneRow, paneCol, shift, ctrl, alt):
#row = self.scrollRow + paneRow
col = self.scrollCol + paneCol
offset = 0
for index, control in enumerate(self.controlList):
width = abs(control['width'])
if offset <= col < offset + width:
if control['type'] == 'selection':
name = control['name']
for element in self.group:
elementName = self.controlList[element]['name']
self.controlList[element]['dict'][elementName] = False
control['dict'][name] = True
self.host.controller.optionChanged(name,
control['dict'][name])
break
if control['type'] == 'sort':
name = control['name']
newValue = not control['dict'][name]
if index in self.group:
for element in self.group:
elementName = self.controlList[element]['name']
self.controlList[element]['dict'][
elementName] = None
control['dict'][name] = newValue
self.host.controller.optionChanged(name,
control['dict'][name])
break
if control['type'] == 'toggle':
name = control['name']
control['dict'][name] = not control['dict'][name]
self.host.controller.optionChanged(name,
control['dict'][name])
break
offset += width + len(control['sep'])
def preferredSize(self, rowLimit, colLimit):
return min(rowLimit, 1), colLimit
def render(self):
if self.rows <= 0:
return
line = u''
for control in self.controlList:
label = control['draw'](control)
line += u'%*s%s' % (control['width'], label, control['sep'])
if len(line) >= self.cols:
break
self.writeLineRow = 0
self.writeLine(line[:self.cols], self.color)
class PopupWindow(Window):
def __init__(self, program, host):
if app.config.strict_debug:
assert host
Window.__init__(self, program, host)
self.host = host
self.controller = app.cu_editor.PopupController(self)
self.setTextBuffer(app.text_buffer.TextBuffer(self.program))
self.longestLineLength = 0
self.__message = []
self.showOptions = True
# This will be displayed and should contain the keys that respond to
# user input. This should be updated if you change the controller's
# command set.
self.options = []
def render(self):
"""Display a box of text in the center of the window."""
maxRows, maxCols = self.host.rows, self.host.cols
cols = min(self.longestLineLength + 6, maxCols)
rows = min(len(self.__message) + 4, maxRows)
self.resizeTo(rows, cols)
self.moveTo(maxRows // 2 - rows // 2, maxCols // 2 - cols // 2)
color = self.program.color.get('popup_window')
for row in range(rows):
if row == rows - 2 and self.showOptions:
message = '/'.join(self.options)
elif row == 0 or row >= rows - 3:
self.addStr(row, 0, ' ' * cols, color)
continue
else:
message = self.__message[row - 1]
lineLength = len(message)
spacing1 = (cols - lineLength) // 2
spacing2 = cols - lineLength - spacing1
self.addStr(row, 0, ' ' * spacing1 + message + ' ' * spacing2,
color)
def setMessage(self, message):
"""Sets the Popup window's message to the given message.
message (str): A string that you want to display.
Returns:
None.
"""
self.__message = message.split("\n")
self.longestLineLength = max([len(line) for line in self.__message])
def setOptionsToDisplay(self, options):
"""
This function is used to change the options that are displayed in the
popup window. They will be separated by a '/' character when displayed.
Args:
options (list): A list of possible keys which the user can press and
should be responded to by the controller.
"""
self.options = options
def setTextBuffer(self, textBuffer):
Window.setTextBuffer(self, textBuffer)
self.controller.setTextBuffer(textBuffer)
def unfocus(self):
self.detach()
Window.unfocus(self)
class PaletteWindow(Window):
"""A window with example foreground and background text colors."""
def __init__(self, prg, host):
Window.__init__(self, prg, host)
self.prg = prg
self.resizeTo(16, 16 * 5)
self.moveTo(8, 8)
self.controller = app.cu_editor.PaletteDialogController(self)
self.setTextBuffer(app.text_buffer.TextBuffer(self.program))
def render(self):
width = 16
rows = 16
colorPrefs = self.program.color
for i in range(width):
for k in range(rows):
self.addStr(k, i * 5, ' %3d ' % (i + k * width,),
colorPrefs.get(i + k * width))
def setTextBuffer(self, textBuffer):
Window.setTextBuffer(self, textBuffer)
self.controller.setTextBuffer(textBuffer)
def unfocus(self):
self.detach()
Window.unfocus(self)
class SortableHeaderWindow(OptionsTrinaryStateWindow):
def __init__(self,
program,
parent,
label,
prefCategory,
prefName,
width=None):
if app.config.strict_debug:
assert issubclass(program.__class__,
app.ci_program.CiProgram), program
assert isinstance(label, unicode)
assert isinstance(prefCategory, unicode)
assert isinstance(prefName, unicode)
OptionsTrinaryStateWindow.__init__(self, program, parent, label,
prefCategory, prefName)
self.color = self.program.color.get(u'top_info')
def draw(label, decoration, width):
if width < 0:
x = u'%s %s' % (label, decoration)
else:
x = u'%s %s' % (decoration, label)
return u'%*s' % (width, x)
OptionsTrinaryStateWindow.setUp(self, draw(label, u'v', width),
draw(label, u'^', width),
draw(label, u'-', width))
def deselect(self):
self.controller.clearValue()
def mouseClick(self, paneRow, paneCol, shift, ctrl, alt):
self.parent.childSelected(self)
self.controller.toggleValue()
| 37.032929 | 84 | 0.580494 | [
"Apache-2.0"
] | fsx950223/ci_edit | app/window.py | 64,104 | Python |
import os
import sys
def dump_output(path, data):
f = open(path, 'wb')
if data is None:
f.write(b'\0')
else:
f.write(data)
f.close()
def get_path_list(directory):
paths = []
if not os.path.exists(directory):
raise FileNotFoundError('No such directory: {}'.format(directory))
for root, dirs, files in os.walk(directory):
for file in files:
paths.append(os.path.join(root, file))
return paths
def pad_or_trunc(data, size):
if data is None:
return b'~' * size
if len(data) < size:
return data + b'~' * (size - len(data))
else:
return data[:size]
def dump_file(path, **feature_dict):
result = {}
try:
pe = pefile.PE(path, fast_load=True)
except FileNotFoundError as e:
print("%s: %s" % (path, e))
return result
except pefile.PEFormatError as e:
print("%s: %s" % (path, e))
return result
pe.parse_data_directories(directories=[
pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT'],
pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_EXPORT'],
pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_RESOURCE']])
for name, size in feature_dict.items():
if name == 'imports':
data = combine_imports(extract_imports(pe), size)
elif name == 'exports':
data = extract_exports(pe, size)
elif name == 'entry_point':
data = extract_entry_point(pe, size)
elif name == 'code_entropy':
data = extract_code_entropy(pe, size)
elif name == 'entropies':
data = extract_entropies(pe, size)
elif name == 'icon':
data = get_main_icon(pe, path, size)
elif name == 'resources':
data = extract_resources(pe, path, size)
elif name == 'security':
data = extract_security_attrs(pe, path, size)
elif name == 'header':
data = extract_header(pe, path, size)
elif name == 'size':
data = extract_size(pe, path, size)
else:
raise AttributeError("Unkown option: %s" % (name))
data = pad_or_trunc(data, size)
result[name] = (data, size)
return result
def dump_files(directory, output, **feature_dict):
paths = get_path_list(directory)
total = len(paths)
for i, path in enumerate(paths):
sys.stdout.write("\r{}: {}/{}".format(directory, i + 1, total))
sys.stdout.flush()
output_path = os.path.join(output, path)
if not os.path.exists(output_path):
os.makedirs(output_path)
features = dump_file(path, **feature_dict)
for name, data in features.items():
dump_output(os.path.join(output, path, name), data[0])
def extract_features(directories, output='out', **feature_dict):
for d in directories:
dump_files(d, output, **feature_dict)
| 28.754717 | 75 | 0.57185 | [
"MIT"
] | Zemana/utils | pe_extractor/extractor.py | 3,048 | Python |
import functools
import typing
from aws_cdk import core
from cdk_resources.utils import (
app_context,
ALLOWED_ENVIRONMENTS,
get_environment,
)
__all__ = ["ResourceStack", "register_stacks"]
class ResourceStack(core.Stack):
""" """
EXISTING_RESOURCES = None
RESOURCES = None
def __init__(
self, scope: core.App, stack_id: str, **kwargs
) -> None:
super().__init__(scope, stack_id, **kwargs)
# Update Context
app_context.update(app=scope, current_stack=self)
if self.is_valid_environment is False:
raise Exception(
f"`{get_environment()}` must be a valid environment allowed "
f"values {ALLOWED_ENVIRONMENTS}"
)
# Existing resources
for resources in self.EXISTING_RESOURCES or []:
resource_name, Resource, resource_attrs = (
self.get_resource_name(resources[0]),
resources[1],
(resources[2] if len(resources) == 3 else {}),
)
setattr(
self,
resource_name,
Resource(
scope=self,
construct_id=resources[0],
**resource_attrs,
),
)
# Own Resources
for resources in self.RESOURCES or []:
resource_name, Resource, resource_attrs = (
self.get_resource_name(resources[0]),
resources[1],
(resources[2] if len(resources) == 3 else {}),
)
resource = Resource(scope=self, construct_id=resource_name)
setattr(self, resource_name, resource)
@staticmethod
def get_resource_name(value: typing.Union[str, typing.Callable]) -> str:
return value() if hasattr(value, "__call__") else value
@property
@functools.lru_cache(maxsize=None)
def is_valid_environment(self) -> bool:
if len(ALLOWED_ENVIRONMENTS) == 0:
return True
environment = get_environment()
return environment is not None and environment in ALLOWED_ENVIRONMENTS
def register_stacks(
app: core.App, aws_env: core.Environment, stacks: list
) -> None:
# Create Stacks
for stack in stacks:
stack_id, stack_class, stack_kwargs = (
stack[0],
stack[1],
(stack[2] if len(stack) == 3 else {}),
)
stack_class(app, stack_id, env=aws_env, **stack_kwargs)
| 29.541176 | 78 | 0.573875 | [
"MIT"
] | sfernandezf/cdk-resources | cdk_resources/stacks.py | 2,511 | Python |
# Generated by Django 3.1.1 on 2020-09-08 18:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('grocery', '0003_auto_20200908_1417'),
]
operations = [
migrations.AlterField(
model_name='item',
name='list',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='item', to='grocery.list'),
),
migrations.AlterField(
model_name='list',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='list', to=settings.AUTH_USER_MODEL),
),
]
| 31.037037 | 142 | 0.656325 | [
"MIT"
] | MeRichard123/LifeExpenses | ExpenseTracker/grocery/migrations/0004_auto_20200908_1918.py | 838 | Python |
import numpy as np
from scipy.io import loadmat
m = loadmat('test.mat', squeeze_me=True, struct_as_record=True,
mat_dtype=True)
np.savez('test.npz', **m)
| 23.285714 | 63 | 0.711656 | [
"BSD-3-Clause"
] | 0x0L/scipy | scipy/fftpack/tests/gendata.py | 163 | Python |
import re
#
# Модуль 2 из домашнего задания для 4 вебинара.
#
# Пользователь вводит любые цифры через запятую.
# Сохранить цифры в список.
# Получить новый список в котором будут только уникальные элементы исходного.
# Вывести его на экран.
#
s_input = input("Введите элементы списка через разделитель [,:/]: ")
l_numbers = list(map(int, re.split(",|:|/", s_input)))
print([x for x in l_numbers if l_numbers.count(x) == 1])
| 30.714286 | 78 | 0.718605 | [
"MIT"
] | andreylrr/PythonDeveloperHW4 | 2seq.py | 645 | Python |
#!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Encode and decode BASE58, P2PKH and P2SH addresses."""
from .script import hash256, hash160, sha256, CScript, OP_0
from .util import bytes_to_hex_str, hex_str_to_bytes
from . import segwit_addr
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def byte_to_base58(b, version):
result = ''
str = bytes_to_hex_str(b)
str = bytes_to_hex_str(chr(version).encode('latin-1')) + str
checksum = bytes_to_hex_str(hash256(hex_str_to_bytes(str)))
str += checksum[:8]
value = int('0x'+str,0)
while value > 0:
result = chars[value % 58] + result
value //= 58
while (str[:2] == '00'):
result = chars[0] + result
str = str[2:]
return result
# TODO: def base58_decode
def keyhash_to_p2pkh(hash, main = False):
assert (len(hash) == 20)
version = 76 if main else 138
return byte_to_base58(hash, version)
def scripthash_to_p2sh(hash, main = False):
assert (len(hash) == 20)
version = 28 if main else 88
return byte_to_base58(hash, version)
def key_to_p2pkh(key, main = False):
key = check_key(key)
return keyhash_to_p2pkh(hash160(key), main)
def script_to_p2sh(script, main = False):
script = check_script(script)
return scripthash_to_p2sh(hash160(script), main)
def key_to_p2sh_p2wpkh(key, main = False):
key = check_key(key)
p2shscript = CScript([OP_0, hash160(key)])
return script_to_p2sh(p2shscript, main)
def program_to_witness(version, program, main = False):
if (type(program) is str):
program = hex_str_to_bytes(program)
assert 0 <= version <= 16
assert 2 <= len(program) <= 40
assert version > 0 or len(program) in [20, 32]
return segwit_addr.encode("xpc" if main else "xpcrt", version, program)
def script_to_p2wsh(script, main = False):
script = check_script(script)
return program_to_witness(0, sha256(script), main)
def key_to_p2wpkh(key, main = False):
key = check_key(key)
return program_to_witness(0, hash160(key), main)
def script_to_p2sh_p2wsh(script, main = False):
script = check_script(script)
p2shscript = CScript([OP_0, sha256(script)])
return script_to_p2sh(p2shscript, main)
def check_key(key):
if (type(key) is str):
key = hex_str_to_bytes(key) # Assuming this is hex string
if (type(key) is bytes and (len(key) == 33 or len(key) == 65)):
return key
assert(False)
def check_script(script):
if (type(script) is str):
script = hex_str_to_bytes(script) # Assuming this is hex string
if (type(script) is bytes or type(script) is CScript):
return script
assert(False)
| 32.420455 | 75 | 0.687697 | [
"MIT"
] | Falconno7/xpchain | test/functional/test_framework/address.py | 2,853 | Python |
from django import forms
class PaymentForm(forms.Form):
stripeTokenType = forms.CharField(required=False)
stripeEmail = forms.EmailField(required=False)
stripeToken = forms.CharField(required=True)
| 23.666667 | 53 | 0.769953 | [
"MIT"
] | phildini/logtacts | payments/forms.py | 213 | Python |
from __future__ import absolute_import, division, unicode_literals
import sys
import numpy as np
import pytest
try:
import holoviews as hv
except Exception:
hv = None
hv_available = pytest.mark.skipif(hv is None, reason="requires holoviews")
try:
import matplotlib as mpl
mpl.use('Agg')
except Exception:
mpl = None
mpl_available = pytest.mark.skipif(mpl is None, reason="requires matplotlib")
try:
import pandas as pd
except Exception:
pd = None
pd_available = pytest.mark.skipif(pd is None, reason="requires pandas")
try:
import streamz
except Exception:
streamz = None
streamz_available = pytest.mark.skipif(streamz is None, reason="requires streamz")
try:
import jupyter_bokeh
except Exception:
jupyter_bokeh = None
jb_available = pytest.mark.skipif(jupyter_bokeh is None, reason="requires jupyter_bokeh")
py3_only = pytest.mark.skipif(sys.version_info.major < 3, reason="requires Python 3")
def mpl_figure():
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(np.random.rand(10, 2))
plt.close(fig)
return fig
def check_layoutable_properties(layoutable, model):
layoutable.background = '#ffffff'
assert model.background == '#ffffff'
layoutable.css_classes = ['custom_class']
assert model.css_classes == ['custom_class']
layoutable.width = 500
assert model.width == 500
layoutable.height = 450
assert model.height == 450
layoutable.min_height = 300
assert model.min_height == 300
layoutable.min_width = 250
assert model.min_width == 250
layoutable.max_height = 600
assert model.max_height == 600
layoutable.max_width = 550
assert model.max_width == 550
layoutable.margin = 10
assert model.margin == (10, 10, 10, 10)
layoutable.sizing_mode = 'stretch_width'
assert model.sizing_mode == 'stretch_width'
layoutable.width_policy = 'max'
assert model.width_policy == 'max'
layoutable.height_policy = 'min'
assert model.height_policy == 'min'
| 23.689655 | 89 | 0.711305 | [
"BSD-3-Clause"
] | bstadlbauer/panel | panel/tests/util.py | 2,061 | Python |
# Copyright (c) 2021 - for information on the respective copyright owner
# see the NOTICE file and/or the repository https://github.com/micro-ROS/system_modes.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import launch
import launch.actions
from launch.substitutions import LaunchConfiguration
import launch_ros.actions
logger = launch.substitutions.LaunchConfiguration("log_level")
def generate_launch_description():
return launch.LaunchDescription([
launch.actions.DeclareLaunchArgument(
'lookup_table',
description='Path to lookup table'),
launch.actions.DeclareLaunchArgument(
'log_level',
default_value=["info"],
description='Logging level'),
launch_ros.actions.Node(
package='micro_ros_diagnostic_bridge',
executable='diagnostic_bridge',
parameters=[{'lookup_table': LaunchConfiguration('lookup_table')}],
output='screen',
arguments=['--ros-args', '--log-level', logger])
])
| 37.634146 | 86 | 0.705768 | [
"Apache-2.0"
] | bjv-capra/micro_ros_diagnostics | micro_ros_diagnostic_bridge/launch/diagnostic_bridge.launch.py | 1,543 | Python |
"""
DriverFactory class
Note: Change this class as you add support for:
1. SauceLabs/BrowserStack
2. More browsers like Opera
"""
import dotenv,os,sys,requests,json
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.chrome import service
from selenium.webdriver.remote.webdriver import RemoteConnection
from conf import opera_browser_conf
class DriverFactory():
def __init__(self,browser='ff',browser_version=None,os_name=None):
"Constructor for the Driver factory"
self.browser=browser
self.browser_version=browser_version
self.os_name=os_name
def get_web_driver(self,remote_flag,os_name,os_version,browser,browser_version):
"Return the appropriate driver"
if (remote_flag.lower() == 'n'):
web_driver = self.run_local(os_name,os_version,browser,browser_version)
else:
print("DriverFactory does not know the browser: ",browser)
web_driver = None
return web_driver
def run_local(self,os_name,os_version,browser,browser_version):
"Return the local driver"
local_driver = None
if browser.lower() == "ff" or browser.lower() == 'firefox':
local_driver = webdriver.Firefox()
elif browser.lower() == "ie":
local_driver = webdriver.Ie()
elif browser.lower() == "chrome":
local_driver = webdriver.Chrome()
elif browser.lower() == "opera":
opera_options = None
try:
opera_browser_location = opera_browser_conf.location
options = webdriver.ChromeOptions()
options.binary_location = opera_browser_location # path to opera executable
local_driver = webdriver.Opera(options=options)
except Exception as e:
print("\nException when trying to get remote webdriver:%s"%sys.modules[__name__])
print("Python says:%s"%str(e))
if 'no Opera binary' in str(e):
print("SOLUTION: It looks like you are trying to use Opera Browser. Please update Opera Browser location under conf/opera_browser_conf.\n")
elif browser.lower() == "safari":
local_driver = webdriver.Safari()
return local_driver
def get_firefox_driver(self):
"Return the Firefox driver"
driver = webdriver.Firefox(firefox_profile=self.get_firefox_profile())
return driver
def get_firefox_profile(self):
"Return a firefox profile"
return self.set_firefox_profile()
def set_firefox_profile(self):
"Setup firefox with the right preferences and return a profile"
try:
self.download_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','downloads'))
if not os.path.exists(self.download_dir):
os.makedirs(self.download_dir)
except Exception as e:
print("Exception when trying to set directory structure")
print(str(e))
profile = webdriver.firefox.firefox_profile.FirefoxProfile()
set_pref = profile.set_preference
set_pref('browser.download.folderList', 2)
set_pref('browser.download.dir', self.download_dir)
set_pref('browser.download.useDownloadDir', True)
set_pref('browser.helperApps.alwaysAsk.force', False)
set_pref('browser.helperApps.neverAsk.openFile', 'text/csv,application/octet-stream,application/pdf')
set_pref('browser.helperApps.neverAsk.saveToDisk', 'text/csv,application/vnd.ms-excel,application/pdf,application/csv,application/octet-stream')
set_pref('plugin.disable_full_page_plugin_for_types', 'application/pdf')
set_pref('pdfjs.disabled',True)
return profile
| 38.634615 | 160 | 0.659781 | [
"MIT"
] | akkuldn/interview-scheduler | QA/page_objects/DriverFactory.py | 4,018 | Python |
from __future__ import unicode_literals
import unittest
from torrent_parser import encode
class TestEncode(unittest.TestCase):
def test_encode(self):
self.assertEqual(encode(12345), b'i12345e')
| 17.583333 | 51 | 0.772512 | [
"MIT"
] | 7sDream/torrent_parser | tests/test_encode.py | 211 | Python |
"""
*Element Height*
"""
from strism._geoshape import Pixel
from ._dimension import Dimension
__all__ = ["ElementHeight"]
class ElementHeight(
Pixel,
Dimension,
):
def __init__(
self,
height: int,
):
super(ElementHeight, self).__init__(
height,
)
| 12.72 | 44 | 0.578616 | [
"Apache-2.0"
] | jedhsu/text | text/_shape/height.py | 318 | Python |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Define a class for creating the jailed context."""
import os
import shutil
from subprocess import run, PIPE
from retry.api import retry_call
from framework.defs import API_USOCKET_NAME, FC_BINARY_NAME, \
JAILER_DEFAULT_CHROOT
class JailerContext:
"""Represents jailer configuration and contains jailer helper functions.
Each microvm will have a jailer configuration associated with it.
"""
# Keep in sync with parameters from code base.
jailer_id = None
exec_file = None
numa_node = None
uid = None
gid = None
chroot_base = None
netns = None
daemonize = None
seccomp_level = None
def __init__(
self,
jailer_id,
exec_file,
numa_node=0,
uid=1234,
gid=1234,
chroot_base=JAILER_DEFAULT_CHROOT,
netns=None,
daemonize=True,
seccomp_level=2
):
"""Set up jailer fields.
This plays the role of a default constructor as it populates
the jailer's fields with some default values. Each field can be
further adjusted by each test even with None values.
"""
self.jailer_id = jailer_id
self.exec_file = exec_file
self.numa_node = numa_node
self.uid = uid
self.gid = gid
self.chroot_base = chroot_base
self.netns = netns if netns is not None else jailer_id
self.daemonize = daemonize
self.seccomp_level = seccomp_level
def __del__(self):
"""Cleanup this jailer context."""
self.cleanup()
def construct_param_list(self):
"""Create the list of parameters we want the jailer to start with.
We want to be able to vary any parameter even the required ones as we
might want to add integration tests that validate the enforcement of
mandatory arguments.
"""
jailer_param_list = []
# Pretty please, try to keep the same order as in the code base.
if self.jailer_id is not None:
jailer_param_list.extend(['--id', str(self.jailer_id)])
if self.exec_file is not None:
jailer_param_list.extend(['--exec-file', str(self.exec_file)])
if self.numa_node is not None:
jailer_param_list.extend(['--node', str(self.numa_node)])
if self.uid is not None:
jailer_param_list.extend(['--uid', str(self.uid)])
if self.gid is not None:
jailer_param_list.extend(['--gid', str(self.gid)])
if self.chroot_base is not None:
jailer_param_list.extend(
['--chroot-base-dir', str(self.chroot_base)]
)
if self.netns is not None:
jailer_param_list.extend(['--netns', str(self.netns_file_path())])
if self.daemonize:
jailer_param_list.append('--daemonize')
if self.seccomp_level is not None:
jailer_param_list.extend(
['--seccomp-level', str(self.seccomp_level)]
)
return jailer_param_list
def chroot_base_with_id(self):
"""Return the MicroVM chroot base + MicroVM ID."""
return os.path.join(
self.chroot_base if self.chroot_base is not None
else JAILER_DEFAULT_CHROOT,
FC_BINARY_NAME,
self.jailer_id
)
def api_socket_path(self):
"""Return the MicroVM API socket path."""
return os.path.join(self.chroot_path(), API_USOCKET_NAME)
def chroot_path(self):
"""Return the MicroVM chroot path."""
return os.path.join(self.chroot_base_with_id(), 'root')
def jailed_path(self, file_path, create=False):
"""Create a hard link owned by uid:gid.
Create a hard link to the specified file, changes the owner to
uid:gid, and returns a path to the link which is valid within the jail.
"""
file_name = os.path.basename(file_path)
global_p = os.path.join(self.chroot_path(), file_name)
jailed_p = os.path.join("/", file_name)
if create:
cmd = 'ln -f {} {}'.format(file_path, global_p)
run(cmd, shell=True, check=True)
cmd = 'chown {}:{} {}'.format(self.uid, self.gid, global_p)
run(cmd, shell=True, check=True)
return jailed_p
def netns_file_path(self):
"""Get the host netns file path for a jailer context.
Returns the path on the host to the file which represents the netns,
and which must be passed to the jailer as the value of the --netns
parameter, when in use.
"""
if self.netns:
return '/var/run/netns/{}'.format(self.netns)
return None
def netns_cmd_prefix(self):
"""Return the jailer context netns file prefix."""
if self.netns:
return 'ip netns exec {} '.format(self.netns)
return ''
def setup(self):
"""Set up this jailer context."""
os.makedirs(
self.chroot_base if self.chroot_base is not None
else JAILER_DEFAULT_CHROOT,
exist_ok=True
)
if self.netns:
run('ip netns add {}'.format(self.netns), shell=True, check=True)
def cleanup(self):
"""Clean up this jailer context."""
shutil.rmtree(self.chroot_base_with_id(), ignore_errors=True)
if self.netns:
_ = run(
'ip netns del {}'.format(self.netns),
shell=True,
stderr=PIPE
)
# Remove the cgroup folders associated with this microvm.
# The base /sys/fs/cgroup/<controller>/firecracker folder will remain,
# because we can't remove it unless we're sure there's no other running
# microVM.
# Firecracker is interested in these 3 cgroups for the moment.
controllers = ('cpu', 'cpuset', 'pids')
for controller in controllers:
# Obtain the tasks from each cgroup and wait on them before
# removing the microvm's associated cgroup folder.
try:
retry_call(
f=self._kill_crgoup_tasks,
fargs=[controller],
exceptions=TimeoutError,
max_delay=5
)
except TimeoutError:
pass
# As the files inside a cgroup aren't real, they can't need
# to be removed, that is why 'rm -rf' and 'rmdir' fail.
# We only need to remove the cgroup directories. The "-depth"
# argument tells find to do a depth first recursion, so that
# we remove any sub cgroups first if they are there.
back_cmd = r'-depth -type d -exec rmdir {} \;'
cmd = 'find /sys/fs/cgroup/{}/{}/{} {}'.format(
controller,
FC_BINARY_NAME,
self.jailer_id,
back_cmd
)
# We do not need to know if it succeeded or not; afterall, we are
# trying to clean up resources created by the jailer itself not
# the testing system.
_ = run(cmd, shell=True, stderr=PIPE)
def _kill_crgoup_tasks(self, controller):
"""Simulate wait on pid.
Read the tasks file and stay there until /proc/{pid}
disappears. The retry function that calls this code makes
sure we do not timeout.
"""
tasks_file = '/sys/fs/cgroup/{}/{}/{}/tasks'.format(
controller,
FC_BINARY_NAME,
self.jailer_id
)
# If tests do not call start on machines, the cgroups will not be
# created.
if not os.path.exists(tasks_file):
return True
cmd = 'cat {}'.format(tasks_file)
tasks = run(cmd, shell=True, stdout=PIPE).stdout.decode('utf-8')
tasks_split = tasks.splitlines()
for task in tasks_split:
if os.path.exists("/proc/{}".format(task)):
raise TimeoutError
return True
| 35.004274 | 79 | 0.588084 | [
"Apache-2.0"
] | Pennyzct/firecracker | tests/framework/jailer.py | 8,191 | Python |
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import logging
import traceback
from myuw.dao.calendar import api_request
from myuw.views.api import ProtectedAPI
from myuw.views.error import handle_exception
from myuw.views import prefetch_resources
from myuw.logger.timer import Timer
from myuw.logger.logresp import log_api_call
logger = logging.getLogger(__name__)
class DepartmentalCalendar(ProtectedAPI):
def get(self, request, *args, **kwargs):
timer = Timer()
try:
prefetch_resources(request,
prefetch_group=True,
prefetch_enrollment=True)
response = api_request(request)
log_api_call(timer, request, "Get DepartmentalCalendar")
return self.json_response(response)
except Exception:
return handle_exception(logger, timer, traceback)
| 33.321429 | 68 | 0.69239 | [
"Apache-2.0"
] | timtim17/myuw | myuw/views/api/calendar.py | 933 | Python |
from django import forms
from django.forms import ModelForm
from .models import Review
class ReviewForm(ModelForm):
required_css_class = 'required'
def __init__(self, *args, **kwargs):
"""
user object is passed to the form in kwargs in the view
the user objected is removed from kwargs and then the
super class form object is instantiated. This is because
our form needs the user object not its super class.
"""
self.user = kwargs.pop('user', None)
super(ReviewForm, self).__init__(*args, **kwargs)
class Meta:
model = Review
fields = [
'title', 'review', 'type', 'book'
]
def clean_book(self, *args, **kwargs):
"""
This method checks if a user has already reviewed
the selected book. As per django docs exists() is
an efficient way of checking this.
"""
book = self.cleaned_data.get("book")
if Review.objects.filter(book=book, author=self.user).exists():
raise forms.ValidationError("Book already reviewed by user {}".format(self.user))
else:
return book
| 31.513514 | 93 | 0.616638 | [
"Apache-2.0"
] | mohammadasim/online-bookstore | reviews/forms.py | 1,166 | Python |
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"filepath": path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"biom": new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:
"""Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
"""
biom_file = biom.load_table(filepath)
if biom_file.metadata(axis="observation") is not None:
obs_data = biom_file.metadata_to_dataframe("observation")
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [
colname
for tax_name in BIOM_TAXONOMY_NAMES
for colname in col_names_low
if colname[::-1].find(tax_name[::-1]) < 3
and colname[::-1].find(tax_name[::-1]) > -1
]
metadata_cols = [
col for col in col_names if col.lower() not in avail_col_names
]
if len(avail_col_names) == 1:
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return taxonomy_frame, tmp_metadata
else:
raise ValueError("Biom file does not contain observation metadata.")
def _remove_features_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if len(tmp_ids) > 0:
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action("_remove_features_by_id", ids, **kwargs)
def _merge_features_by_map(
self, map_dict: Mapper, done: bool = False, **kwargs: Any
) -> Optional[Mapper]:
"""Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
"""
if not done:
raise NotImplementedError
if map_dict:
return self._ratify_action(
"_merge_features_by_map",
map_dict,
_annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(),
**kwargs
)
def drop_feature_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() == len(target_ids):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError("Invalid feature ids are provided.")
def get_taxonomy_by_id(
self, ids: Optional[AnyGenericIdentifier] = None
) -> pd.DataFrame:
"""Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() <= len(target_ids):
return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]
else:
raise ValueError("Invalid feature ids are provided.")
def get_lineage_by_id(
self,
ids: Optional[AnyGenericIdentifier] = None,
missing_rank: bool = False,
desired_ranks: Union[bool, Sequence[str]] = False,
drop_ranks: Union[bool, Sequence[str]] = False,
**kwargs: Any
) -> pd.Series:
"""Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks
total_valid_rids = self.xrid.isin(target_ids).sum()
if total_valid_rids == len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[target_ids],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
elif total_valid_rids < len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[np.unique(target_ids)],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
else:
raise ValueError("Invalid feature ids are provided.")
def find_features_by_pattern(
self, pattern_str: str, case_sensitive: bool = False, regex: bool = False
) -> np.ndarray:
"""Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices
"""
return self.__internal_taxonomy[
self.__internal_taxonomy.loc[:, "lineage"].str.contains(
pattern_str, case=case_sensitive, regex=regex
)
].index.values
def drop_features_without_taxa(
self, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs)
def drop_features_without_ranks(
self, ranks: Sequence[str], any: bool = False, **kwargs: Any
) -> Optional[AnyGenericIdentifier]: # Done
"""Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
"""
target_ranks = np.asarray(ranks)
if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(
target_ranks
):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (
no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)
)
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError("Invalid ranks are provided.")
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
"""Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ret = {}
groupby = self.__internal_taxonomy.groupby("lineage")
if any([len(group) > 1 for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for lineage, feature_ids in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs)
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
"""Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
"""
ret = {}
if not isinstance(level, str):
raise TypeError("`rank` must have str type.")
if level in self.__avail_ranks:
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(
self.__internal_taxonomy, False, target_ranks, False
)
groups = tmp_lineages.groupby(tmp_lineages)
if len(groups.groups) > 1:
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for lineage, feature_ids in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError("Invalid rank are provided.")
return self._merge_features_by_map(ret, True, **kwargs)
def find_features_without_taxa(self) -> np.ndarray:
"""Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices.
"""
return self.__internal_taxonomy.loc[
self.__internal_taxonomy.loc[:, VALID_RANKS].agg(
lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1
)
< 1
].index.values
def get_subset(
self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any
) -> "RepTaxonomy":
"""Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
"""
if rids is None:
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if not self.xrid.isin(target_rids).sum() == len(target_rids):
raise ValueError("Invalid feature ids are provided.")
return type(self)(
taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"],
metadata=self.metadata,
name=self.name,
)
def _export(
self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any
) -> Tuple[pd.Series, dict]:
"""Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility
"""
if taxlike == "lineage":
return (
self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),
kwargs,
)
else:
raise NotImplemented
def export(
self,
output_fp: str,
*args,
_add_ext: bool = False,
sep: str = ",",
**kwargs: Any
) -> None:
"""Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
"""
tmp_export, rkwarg = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv("{}.csv".format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep)
def copy(self) -> "RepTaxonomy":
"""Copy of the instance."""
return type(self)(
taxonomy=self.__internal_taxonomy.loc[:, "lineage"],
metadata=self.metadata,
name=self.name,
)
def __fix_taxon_names(self) -> None:
"""Fix invalid taxon names."""
def taxon_fixer(taxon):
if taxon is not None and pd.notna(taxon):
tmp_taxon_trimmed = taxon.lower().strip()
if len(tmp_taxon_trimmed) > 0:
if tmp_taxon_trimmed[0] == "[":
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if tmp_taxon_trimmed[-1] == "]":
tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[
:, VALID_RANKS
].applymap(taxon_fixer)
def __reconstruct_internal_lineages(self) -> None:
"""Reconstruct the internal lineages."""
self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa(
self.__internal_taxonomy, True, self.__avail_ranks, False
)
def __init_internal_taxonomy(
self,
taxonomy_data: Union[pd.Series, pd.DataFrame],
taxonomy_notation: Optional[str] = "greengenes",
order_ranks: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
"""
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data, taxonomy_notation, order_ranks
)
elif isinstance(taxonomy_data, pd.DataFrame):
if taxonomy_data.shape[1] == 1:
taxonomy_data_series = pd.Series(
data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index
)
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data_series, taxonomy_notation, order_ranks
)
else:
new_taxonomy = self.__init_taxonomy_from_frame(
taxonomy_data, taxonomy_notation, order_ranks
)
else:
raise RuntimeError(
"`taxonomy_data` must be either pd.Series or pd.Dataframe"
)
if new_taxonomy is None:
raise ValueError("Provided taxonomy is invalid.")
# Assign newly constructed taxonomy to the self.__internal_taxonomy
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names() # Fix incorrect taxa
tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]
self.__avail_ranks = [
rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()
]
# Reconstruct internal lineages for default greengenes notation
self.__reconstruct_internal_lineages()
self._init_state = True
def __init_taxonomy_from_lineages(
self,
taxonomy_series: pd.Series,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done
"""Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
"""
# Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation
if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:
notation = taxonomy_notation
else:
# Get first lineage _sample for notation testing assuming the rest have the the same notations
sample_taxon = taxonomy_series.iloc[0]
# Identify notation of the lineage string
notation = indentify_taxon_notation(sample_taxon)
if order_ranks is not None:
if all([rank in VALID_RANKS for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if notation == "greengenes":
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {
elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS
}
for rank in VALID_RANKS:
if rank not in tmp_taxa_dict.keys():
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append([None] + tmp_taxa_ordered)
taxonomy = pd.DataFrame(
index=ordered_indices_list,
data=ordered_taxa_list,
columns=["lineage"] + VALID_RANKS,
)
return taxonomy
elif notation == "qiime":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=lambda x: x[0])
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank, taxon in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif notation == "silva":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(";")
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank_i, taxon in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [
rank for rank in target_order_ranks if rank in VALID_RANKS
]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError
def __init_taxonomy_from_frame(
self,
taxonomy_dataframe: pd.DataFrame,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series
"""Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame`
"""
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if valid_ranks is not None:
if len(valid_ranks) > 0:
return pd.concat(
[
taxonomy_dataframe,
pd.DataFrame(
data="",
index=taxonomy_dataframe.index,
columns=[
rank for rank in VALID_RANKS if rank not in valid_ranks
],
),
],
axis=1,
)
else:
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join(taxa.values.tolist()), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
@property
def avail_ranks(self) -> Sequence[str]:
"""List of available taxonomic ranks."""
return self.__avail_ranks
@property
def duplicated(self) -> pd.Index:
"""List of duplicated feature indices."""
return self.__internal_taxonomy.index[
self.__internal_taxonomy["lineage"].duplicated(keep=False)
]
@property
def data(self) -> pd.DataFrame:
"""Actual data representation as pd.DataFrame."""
return self.__internal_taxonomy
@property
def xrid(self) -> pd.Index:
"""Feature indices as pd.Index."""
return self.__internal_taxonomy.index
| 37.081016 | 133 | 0.559903 | [
"BSD-3-Clause"
] | mmtechslv/PhyloMAF | pmaf/biome/essentials/_taxonomy.py | 30,666 | Python |
import pickle
import numpy as np
# pickle_file = 'experiment_pickle_12_0.15_5_0.075.p'
pickle_file = 'experiment_pickle_12_0.1_5_0.075.p'
content = pickle.load(open(pickle_file))
familys = content.keys()
for family in familys:
collected = []
measurements = content[family]
for measurement in measurements:
collected.append(np.mean(measurement[1]))
print family, ':', round(np.median(collected), 3), '+-', round(np.percentile(collected, 75) - np.percentile(collected, 25), 3)
| 29.588235 | 130 | 0.717694 | [
"Unlicense"
] | ai-se/BEETLE__FSE_2018 | src/RQ4_exp/run_pickle.py | 503 | Python |
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from numpy.testing import assert_equal, assert_almost_equal
from qf_lib.backtesting.events.time_event.regular_time_event.market_close_event import MarketCloseEvent
from qf_lib.backtesting.events.time_event.regular_time_event.market_open_event import MarketOpenEvent
from qf_lib.common.enums.frequency import Frequency
from qf_lib.common.enums.price_field import PriceField
from qf_lib.common.utils.dateutils.date_format import DateFormat
from qf_lib.common.utils.dateutils.string_to_date import str_to_date
from qf_lib.containers.qf_data_array import QFDataArray
from qf_lib_tests.integration_tests.backtesting.alpha_model_strategy_testers.test_alpha_model_strategy_for_stop_losses import \
TestAlphaModelStrategy
class TestAlphaModelIntradayStrategy(TestAlphaModelStrategy):
data_start_date = str_to_date("2014-12-25 00:00:00.00", DateFormat.FULL_ISO)
data_end_date = str_to_date("2015-02-28 23:59:59.00", DateFormat.FULL_ISO)
end_date = str_to_date("2015-02-28 13:30:00.00", DateFormat.FULL_ISO)
frequency = Frequency.MIN_1
def test_stop_losses(self):
expected_transactions_quantities = \
[8130, -127, 1, -8004, 7454, -58, -7396, 6900, -6900, 6390, -44, -6346, 5718, -36]
result_transactions_quantities = [t.quantity for t in self.transactions]
assert_equal(expected_transactions_quantities, result_transactions_quantities)
expected_transactions_prices = [125, 130, 135, 235.6, 255, 260, 259.35, 280, 264.1, 285, 290, 282, 315, 320]
result_transactions_prices = [t.price for t in self.transactions]
assert_almost_equal(expected_transactions_prices, result_transactions_prices)
expected_portfolio_values = [1024390, 1064659, 1064659, 1064659, 1104677, 1144697, 1184717, 1224737, 1264757,
1264757, 1264757, 1304777, 1344797, 1384817, 1424837, 1464857, 1464857, 1464857,
1504877, 1544897, 1584917, 1624937, 1664957, 1664957, 1664957, 1704977, 1744997,
1785017, 1825037, 1865057, 1865057, 1865057, 1905077, 1945097, 1985117, 1885867.4,
1908229.4, 1908229.4, 1908229.4, 1945325.4, 1982305.4, 2019285.4, 1918330, 1808620,
1808620, 1808620, 1827790, 1859608, 1891338, 1923068, 1954798, 1954798, 1954798,
1789802, 1806956, 1835438, 1863848, 1892258, 1892258]
assert_almost_equal(expected_portfolio_values, list(self.portfolio.portfolio_eod_series()))
def _make_mock_data_array(self, tickers, fields):
all_dates_market_open = pd.date_range(start=self.data_start_date + MarketOpenEvent.trigger_time(),
end=self.data_end_date + MarketOpenEvent.trigger_time(), freq="B")
all_dates_market_close = pd.date_range(start=self.data_start_date + MarketCloseEvent.trigger_time() - Frequency.MIN_1.time_delta(),
end=self.data_end_date + MarketCloseEvent.trigger_time() - Frequency.MIN_1.time_delta(), freq="B")
num_of_dates = len(all_dates_market_open)
num_of_tickers = len(tickers)
num_of_fields = len(fields)
start_value = 100.0
values = np.arange(start_value, num_of_dates * num_of_tickers * num_of_fields + start_value)
reshaped_values = np.reshape(values, (num_of_dates, num_of_tickers, num_of_fields))
mocked_result_market_open = QFDataArray.create(all_dates_market_open, tickers, fields, data=reshaped_values)
mocked_result_market_close = QFDataArray.create(all_dates_market_close, tickers, fields, data=reshaped_values)
mocked_result_market_close.loc[:, :, PriceField.Low] -= 5.0
mocked_result_market_close.loc[:, :, PriceField.High] += 5.0
all_dates = all_dates_market_open.union(all_dates_market_close)
mocked_result = QFDataArray.create(all_dates, tickers, fields)
mocked_result.loc[all_dates_market_open, :, :] = mocked_result_market_open.loc[:, :, :]
mocked_result.loc[all_dates_market_close, :, :] = mocked_result_market_close.loc[:, :, :]
self._add_test_cases(mocked_result, tickers)
return mocked_result
def _add_test_cases(self, mocked_result, tickers):
# single low price breaking the stop level
mocked_result.loc[
str_to_date('2015-02-05 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low] -= 15.0
# two consecutive low prices breaking the stop level
mocked_result.loc[
str_to_date('2015-02-12 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low] -= 15.0
mocked_result.loc[
str_to_date('2015-02-13 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low] -= 15.0
# single open price breaking the stop level
mocked_result.loc[
str_to_date('2015-02-23 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low] -= 25.0
mocked_result.loc[str_to_date('2015-02-23 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Open] = \
mocked_result.loc[str_to_date('2015-02-23 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low]
| 60.373737 | 145 | 0.704367 | [
"Apache-2.0"
] | ajmal017/qf-lib | qf_lib_tests/integration_tests/backtesting/alpha_model_strategy_testers/test_alpha_model_strategy_for_stop_losses_intraday.py | 5,979 | Python |
import json
import os
from djoser.conf import settings as djoser_settings
from djoser.compat import get_user_email
from django.utils.timezone import now
from django.http import HttpResponse
from rest_framework import status
from rest_framework.decorators import api_view, authentication_classes, permission_classes, action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from django.db import transaction
from django.conf import settings
from .authentication import WebpageTokenAuth
from .models import AHJUserMaintains, AHJ, User, APIToken, Contact, PreferredContactMethod
from .permissions import IsSuperuser
from .serializers import UserSerializer
from djoser.views import UserViewSet
from .utils import get_enum_value_row, filter_dict_keys, ENUM_FIELDS
@authentication_classes([WebpageTokenAuth])
@permission_classes([IsAuthenticated])
class ConfirmPasswordReset(UserViewSet):
@action(["post"], detail=False)
def reset_password_confirm(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.user.set_password(serializer.data["new_password"])
if hasattr(serializer.user, "last_login"):
serializer.user.last_login = now()
serializer.user.is_active = True # The purpose of overwriting this endpoint is to set users as active if performing password reset confirm.
serializer.user.save() # The user had to access their email account to perform a password reset.
if djoser_settings.PASSWORD_CHANGED_EMAIL_CONFIRMATION:
context = {"user": serializer.user}
to = [get_user_email(serializer.user)]
djoser_settings.EMAIL.password_changed_confirmation(self.request, context).send(to)
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET'])
@authentication_classes([WebpageTokenAuth])
@permission_classes([IsAuthenticated])
def get_active_user(request):
"""
Endpoint for getting the active user
through the authtoken
"""
return Response(UserSerializer(request.user, context={'is_public_view': False}).data, status=status.HTTP_200_OK)
@api_view(['GET'])
def get_single_user(request, username):
"""
Function view for getting a single user with the specified Username = username
"""
context = {'is_public_view': True}
if request.auth is not None and request.user.Username == username:
context['is_public_view'] = False
try:
user = User.objects.get(Username=username)
return Response(UserSerializer(user, context=context).data, status=status.HTTP_200_OK)
except Exception as e:
return Response(str(e), status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
@authentication_classes([WebpageTokenAuth])
@permission_classes([IsAuthenticated])
def user_update(request):
"""
Update the user profile associated with the requesting user.
"""
changeable_user_fields = {'Username', 'PersonalBio', 'URL', 'CompanyAffiliation'}
changeable_contact_fields = {'FirstName', 'LastName', 'URL', 'WorkPhone', 'PreferredContactMethod', 'Title'}
user_data = filter_dict_keys(request.data, changeable_user_fields)
contact_data = filter_dict_keys(request.data, changeable_contact_fields)
for field in ENUM_FIELDS.intersection(contact_data.keys()):
contact_data[field] = get_enum_value_row(field, contact_data[field])
user = request.user
User.objects.filter(UserID=user.UserID).update(**user_data)
Contact.objects.filter(ContactID=user.ContactID.ContactID).update(**contact_data)
return Response('Success', status=status.HTTP_200_OK)
@api_view(['GET'])
@authentication_classes([WebpageTokenAuth])
@permission_classes([IsAuthenticated, IsSuperuser])
def create_api_token(request):
try:
user = request.user
with transaction.atomic():
APIToken.objects.filter(user=user).delete()
api_token = APIToken.objects.create(user=user)
return Response({'auth_token': api_token.key}, status=status.HTTP_201_CREATED)
except Exception as e:
return Response(str(e), status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
@authentication_classes([WebpageTokenAuth])
@permission_classes([IsAuthenticated, IsSuperuser])
def set_ahj_maintainer(request):
"""
View to assign a user as a data maintainer of an AHJ
Expects a Username and a the primary key of an AHJ (AHJPK)
"""
try:
username = request.data['Username']
ahjpk = request.data['AHJPK']
user = User.objects.get(Username=username)
ahj = AHJ.objects.get(AHJPK=ahjpk)
maintainer_record = AHJUserMaintains.objects.filter(AHJPK=ahj, UserID=user)
if maintainer_record.exists():
maintainer_record.update(MaintainerStatus=True)
else:
AHJUserMaintains.objects.create(UserID=user, AHJPK=ahj, MaintainerStatus=True)
return Response(UserSerializer(user).data, status=status.HTTP_200_OK)
except Exception as e:
return Response(str(e), status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
@authentication_classes([WebpageTokenAuth])
@permission_classes([IsAuthenticated, IsSuperuser])
def remove_ahj_maintainer(request):
"""
View to revoke a user as a data maintainer of an AHJ
Expects a user's webpage token and a the primary key of an AHJ (AHJPK)
"""
try:
username = request.data['Username']
ahjpk = request.data['AHJPK']
user = User.objects.get(Username=username)
ahj = AHJ.objects.get(AHJPK=ahjpk)
AHJUserMaintains.objects.filter(AHJPK=ahj, UserID=user).update(MaintainerStatus=False)
return Response(UserSerializer(user).data, status=status.HTTP_200_OK)
except Exception as e:
return Response(str(e), status=status.HTTP_400_BAD_REQUEST)
| 41.194444 | 147 | 0.736514 | [
"MIT"
] | SunSpecOrangeButton/ahj-registry | server/ahj_app/views_users.py | 5,932 | Python |
#!/usr/bin/env python
import os
import sys
basepath = os.path.dirname(os.path.abspath(__file__))
basepath = os.path.abspath(os.path.join(basepath, os.pardir))
wavpath = os.path.join(basepath, "spectro", "tchaikovsky.wav")
sys.path.append(basepath)
from utils import PhysicalKeyboard
from spectro import Spectro
kbd = PhysicalKeyboard()
spectro = Spectro(kbd)
spectro.play(wavpath)
| 19.4 | 62 | 0.765464 | [
"MIT"
] | valschneider/lauzhack2017 | demos/spectro_tchai.py | 388 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetShareResult',
'AwaitableGetShareResult',
'get_share',
]
@pulumi.output_type
class GetShareResult:
"""
Represents a share on the Data Box Edge/Gateway device.
"""
def __init__(__self__, access_protocol=None, azure_container_info=None, client_access_rights=None, data_policy=None, description=None, id=None, monitoring_status=None, name=None, refresh_details=None, share_mappings=None, share_status=None, system_data=None, type=None, user_access_rights=None):
if access_protocol and not isinstance(access_protocol, str):
raise TypeError("Expected argument 'access_protocol' to be a str")
pulumi.set(__self__, "access_protocol", access_protocol)
if azure_container_info and not isinstance(azure_container_info, dict):
raise TypeError("Expected argument 'azure_container_info' to be a dict")
pulumi.set(__self__, "azure_container_info", azure_container_info)
if client_access_rights and not isinstance(client_access_rights, list):
raise TypeError("Expected argument 'client_access_rights' to be a list")
pulumi.set(__self__, "client_access_rights", client_access_rights)
if data_policy and not isinstance(data_policy, str):
raise TypeError("Expected argument 'data_policy' to be a str")
pulumi.set(__self__, "data_policy", data_policy)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if monitoring_status and not isinstance(monitoring_status, str):
raise TypeError("Expected argument 'monitoring_status' to be a str")
pulumi.set(__self__, "monitoring_status", monitoring_status)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if refresh_details and not isinstance(refresh_details, dict):
raise TypeError("Expected argument 'refresh_details' to be a dict")
pulumi.set(__self__, "refresh_details", refresh_details)
if share_mappings and not isinstance(share_mappings, list):
raise TypeError("Expected argument 'share_mappings' to be a list")
pulumi.set(__self__, "share_mappings", share_mappings)
if share_status and not isinstance(share_status, str):
raise TypeError("Expected argument 'share_status' to be a str")
pulumi.set(__self__, "share_status", share_status)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if user_access_rights and not isinstance(user_access_rights, list):
raise TypeError("Expected argument 'user_access_rights' to be a list")
pulumi.set(__self__, "user_access_rights", user_access_rights)
@property
@pulumi.getter(name="accessProtocol")
def access_protocol(self) -> str:
"""
Access protocol to be used by the share.
"""
return pulumi.get(self, "access_protocol")
@property
@pulumi.getter(name="azureContainerInfo")
def azure_container_info(self) -> Optional['outputs.AzureContainerInfoResponse']:
"""
Azure container mapping for the share.
"""
return pulumi.get(self, "azure_container_info")
@property
@pulumi.getter(name="clientAccessRights")
def client_access_rights(self) -> Optional[Sequence['outputs.ClientAccessRightResponse']]:
"""
List of IP addresses and corresponding access rights on the share(required for NFS protocol).
"""
return pulumi.get(self, "client_access_rights")
@property
@pulumi.getter(name="dataPolicy")
def data_policy(self) -> Optional[str]:
"""
Data policy of the share.
"""
return pulumi.get(self, "data_policy")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description for the share.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
The path ID that uniquely identifies the object.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="monitoringStatus")
def monitoring_status(self) -> str:
"""
Current monitoring status of the share.
"""
return pulumi.get(self, "monitoring_status")
@property
@pulumi.getter
def name(self) -> str:
"""
The object name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="refreshDetails")
def refresh_details(self) -> Optional['outputs.RefreshDetailsResponse']:
"""
Details of the refresh job on this share.
"""
return pulumi.get(self, "refresh_details")
@property
@pulumi.getter(name="shareMappings")
def share_mappings(self) -> Sequence['outputs.MountPointMapResponse']:
"""
Share mount point to the role.
"""
return pulumi.get(self, "share_mappings")
@property
@pulumi.getter(name="shareStatus")
def share_status(self) -> str:
"""
Current status of the share.
"""
return pulumi.get(self, "share_status")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Share on ASE device
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userAccessRights")
def user_access_rights(self) -> Optional[Sequence['outputs.UserAccessRightResponse']]:
"""
Mapping of users and corresponding access rights on the share (required for SMB protocol).
"""
return pulumi.get(self, "user_access_rights")
class AwaitableGetShareResult(GetShareResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetShareResult(
access_protocol=self.access_protocol,
azure_container_info=self.azure_container_info,
client_access_rights=self.client_access_rights,
data_policy=self.data_policy,
description=self.description,
id=self.id,
monitoring_status=self.monitoring_status,
name=self.name,
refresh_details=self.refresh_details,
share_mappings=self.share_mappings,
share_status=self.share_status,
system_data=self.system_data,
type=self.type,
user_access_rights=self.user_access_rights)
def get_share(device_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetShareResult:
"""
Represents a share on the Data Box Edge/Gateway device.
:param str device_name: The device name.
:param str name: The share name.
:param str resource_group_name: The resource group name.
"""
__args__ = dict()
__args__['deviceName'] = device_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:databoxedge/v20201201:getShare', __args__, opts=opts, typ=GetShareResult).value
return AwaitableGetShareResult(
access_protocol=__ret__.access_protocol,
azure_container_info=__ret__.azure_container_info,
client_access_rights=__ret__.client_access_rights,
data_policy=__ret__.data_policy,
description=__ret__.description,
id=__ret__.id,
monitoring_status=__ret__.monitoring_status,
name=__ret__.name,
refresh_details=__ret__.refresh_details,
share_mappings=__ret__.share_mappings,
share_status=__ret__.share_status,
system_data=__ret__.system_data,
type=__ret__.type,
user_access_rights=__ret__.user_access_rights)
| 38.389121 | 299 | 0.663215 | [
"Apache-2.0"
] | polivbr/pulumi-azure-native | sdk/python/pulumi_azure_native/databoxedge/v20201201/get_share.py | 9,175 | Python |
#!/usr/bin/env python
import os
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
with open(
os.path.join(here, "requirements.txt"), encoding="utf-8"
) as requirements_file:
requirements = requirements_file.read().splitlines()
with open(
os.path.join(here, "requirements_dev.txt"), encoding="utf-8"
) as requirements_dev_file:
requirements_dev = requirements_dev_file.read().splitlines()
# split the developer requirements into setup and test requirements
if not requirements_dev.count("") == 1 or requirements_dev.index("") == 0:
raise SyntaxError(
"requirements_dev.txt has the wrong format: setup and test "
"requirements have to be separated by one blank line."
)
requirements_dev_split = requirements_dev.index("")
test_requirements = requirements_dev[
requirements_dev_split + 1 :
] # +1: skip empty line
setup(
project_urls={
"Documentation": "https://zfit-interface.readthedocs.io/",
"Changelog": "https://zfit-interface.readthedocs.io/en/latest/changelog.html",
"Issue Tracker": "https://github.com/zfit/zfit-interface/issues",
},
install_requires=requirements,
test_requirements=test_requirements,
extras_require={"dev": requirements_dev},
use_scm_version=True,
)
| 31.780488 | 86 | 0.71911 | [
"BSD-3-Clause"
] | zfit/zfit-interface | setup.py | 1,303 | Python |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 23 15:13:33 2019
@author: ifenty
"""
from __future__ import division
import numpy as np
import matplotlib.pylab as plt
from .llc_array_conversion import llc_compact_to_tiles
from .llc_array_conversion import llc_compact_to_faces
from .llc_array_conversion import llc_faces_to_tiles
from .llc_array_conversion import llc_faces_to_compact
from .llc_array_conversion import llc_tiles_to_faces
from .llc_array_conversion import llc_tiles_to_compact
from .read_bin_llc import read_llc_to_compact, read_llc_to_faces, read_llc_to_tiles
from .tile_plot import plot_tiles
# Tests the read_bin_llc and llc_array_conversion routines
# %%
### Load model grid coordinates (longitude, latitude)
def run_read_bin_and_llc_conversion_test(llc_grid_dir, llc_lons_fname='XC.data',
llc_hfacc_fname='hFacC.data', llc=90,
llc_grid_filetype = '>f',
make_plots=False):
"""
Runs test on the read_bin_llc and llc_conversion routines
Parameters
----------
llc_grid_dir : string
A string with the directory of the binary file to open
llc_lons_fname : string
A string with the name of the XC grid file [XC.data]
llc_hfacc_fname : string
A string with the name of the hfacC grid file [hFacC.data]
llc : int
the size of the llc grid. For ECCO v4, we use the llc90 domain
so `llc` would be `90`.
Default: 90
llc_grid_filetype: string
the file type, default is big endian (>) 32 bit float (f)
alternatively, ('<d') would be little endian (<) 64 bit float (d)
Deafult: '>f'
make_plots : boolean
A boolean specifiying whether or not to make plots
Deafult: False
Returns
-------
1 : all tests passed
0 : at least one test failed
"""
# SET TEST RESULT = 1 TO START
TEST_RESULT = 1
# %% ----------- TEST 1: 2D field XC FOM GRID FILE
#%% 1a LOAD COMPACT
tmpXC_c = read_llc_to_compact(llc_grid_dir, llc_lons_fname, llc=llc,
filetype=llc_grid_filetype)
tmpXC_f = read_llc_to_faces(llc_grid_dir, llc_lons_fname, llc=llc,
filetype=llc_grid_filetype)
tmpXC_t = read_llc_to_tiles(llc_grid_dir, llc_lons_fname, llc=llc,
filetype=llc_grid_filetype)
if make_plots:
#plt.close('all')
for f in range(1,6):
plt.figure()
plt.imshow(tmpXC_f[f]);plt.colorbar()
plot_tiles(tmpXC_t)
plt.draw()
raw_input("Press Enter to continue...")
#%% 1b CONVERT COMPACT TO FACES, TILES
tmpXC_cf = llc_compact_to_faces(tmpXC_c)
tmpXC_ct = llc_compact_to_tiles(tmpXC_c)
for f in range(1,6):
tmp = np.unique(tmpXC_f[f] - tmpXC_cf[f])
print ('unique diffs CF ', f, tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 1b-1')
return TEST_RESULT
tmp = np.unique(tmpXC_ct - tmpXC_t)
print ('unique diffs for CT ', tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 1b-2')
return TEST_RESULT
#%% 1c CONVERT FACES TO TILES, COMPACT
tmpXC_ft = llc_faces_to_tiles(tmpXC_f)
tmpXC_fc = llc_faces_to_compact(tmpXC_f)
# unique diff tests
tmp = np.unique(tmpXC_t - tmpXC_ft)
print ('unique diffs for FT ', tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 1c-1')
return TEST_RESULT
tmp = np.unique(tmpXC_fc - tmpXC_c)
print ('unique diffs FC', tmp )
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 1c-2')
return TEST_RESULT
#%% 1d CONVERT TILES to FACES, COMPACT
tmpXC_tf = llc_tiles_to_faces(tmpXC_t)
tmpXC_tc = llc_tiles_to_compact(tmpXC_t)
# unique diff tests
for f in range(1,6):
tmp = np.unique(tmpXC_f[f] - tmpXC_tf[f])
print ('unique diffs for TF ', f, tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 1d-1')
return TEST_RESULT
tmp = np.unique(tmpXC_tc - tmpXC_c)
print ('unique diffs TC', tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 1d-2')
return TEST_RESULT
#%% 1e CONVERT COMPACT TO FACES TO TILES TO FACES TO COMPACT
tmpXC_cftfc = llc_faces_to_compact(llc_tiles_to_faces(llc_faces_to_tiles(llc_compact_to_faces(tmpXC_c))))
tmp = np.unique(tmpXC_cftfc - tmpXC_c)
print ('unique diffs CFTFC', tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 1e')
return TEST_RESULT
# %% ----------- TEST 2: 3D fields HFACC FOM GRID FILE
#%% 2a LOAD COMPACT
tmpHF_c = read_llc_to_compact(llc_grid_dir, llc_hfacc_fname, llc=llc,nk=50,
filetype=llc_grid_filetype)
tmpHF_f = read_llc_to_faces(llc_grid_dir, llc_hfacc_fname, llc=llc, nk=50,
filetype=llc_grid_filetype)
tmpHF_t = read_llc_to_tiles(llc_grid_dir, llc_hfacc_fname, llc=llc, nk=50,
filetype=llc_grid_filetype)
tmpHF_c.shape
if make_plots:
#plt.close('all')
plt.imshow(tmpHF_c[0,:]);plt.colorbar()
plot_tiles(tmpHF_t[:,0,:])
plot_tiles(tmpHF_t[:,20,:])
plt.draw()
raw_input("Press Enter to continue...")
#%% 2b CONVERT COMPACT TO FACES, TILES
tmpHF_cf = llc_compact_to_faces(tmpHF_c)
tmpHF_ct = llc_compact_to_tiles(tmpHF_c)
# unique diff tests
for f in range(1,6):
tmp = np.unique(tmpHF_f[f] - tmpHF_cf[f])
print ('unique diffs CF ', f, tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 2b-1')
return TEST_RESULT
tmp = np.unique(tmpHF_ct - tmpHF_t)
print ('unique diffs CT ', tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 2b-2')
return TEST_RESULT
if make_plots:
for k in [0, 20]:
for f in range(1,6):
plt.figure()
plt.imshow(tmpHF_cf[f][k,:], origin='lower');plt.colorbar()
plt.draw()
raw_input("Press Enter to continue...")
#%% 2c CONVERT FACES TO TILES, COMPACT
tmpHF_ft = llc_faces_to_tiles(tmpHF_f)
tmpHF_fc = llc_faces_to_compact(tmpHF_f)
if make_plots:
#plt.close('all')
plot_tiles(tmpHF_ft[:,0,:])
plot_tiles(tmpHF_ft[:,20,:])
plt.draw()
raw_input("Press Enter to continue...")
# unique diff tests
tmp = np.unique(tmpHF_t - tmpHF_ft)
print ('unique diffs FT ', tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 2c-1')
return TEST_RESULT
tmp = np.unique(tmpHF_fc - tmpHF_c)
print ('unique diffs FC', tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 2c-2')
return TEST_RESULT
#%% 2d CONVERT TILES to FACES, COMPACT
tmpHF_tf = llc_tiles_to_faces(tmpHF_t)
tmpHF_tc = llc_tiles_to_compact(tmpHF_t)
if make_plots:
#plt.close('all')
for k in [0, 20]:
for f in range(1,6):
plt.figure()
plt.imshow(tmpHF_tf[f][k,:], origin='lower');plt.colorbar()
plt.draw()
raw_input("Press Enter to continue...")
# unique diff tests
for f in range(1,6):
tmp = np.unique(tmpHF_f[f] - tmpHF_tf[f])
print ('unique diffs TF ', f, tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 2d-1')
return TEST_RESULT
tmp = np.unique(tmpHF_tc - tmpHF_c)
print ('unique diffs TC ', tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 2d-1')
return TEST_RESULT
#%% 2e CONVERT COMPACT TO FACES TO TILES TO FACES TO COMPACT
tmpHF_cftfc = llc_faces_to_compact(llc_tiles_to_faces(
llc_faces_to_tiles(llc_compact_to_faces(tmpHF_c))))
tmp = np.unique(tmpHF_cftfc - tmpHF_c)
print ('unique diffs CFTFC ', tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 2e')
return TEST_RESULT
print ('YOU MADE IT THIS FAR, TESTS PASSED!')
return TEST_RESULT
####################### ###########################
#%%
if __name__== "__main__":
import sys
import matplotlib
sys.path.append('/Users/ifenty/ECCOv4-py/')
import ecco_v4_py as ecco
import matplotlib.pylab as plt
llc_grid_dir = '/Volumes/ECCO_BASE/ECCO_v4r3/grid_llc90/'
llc_lons_fname='XC.data'
llc_hfacc_fname='hFacC.data',
llc=90,
llc_grid_filetype = '>f',
make_plots=False
#%%
TEST_RESULT = ecco.run_read_bin_and_llc_conversion_test(llc_grid_dir, make_plots=True)
print(TEST_RESULT)
| 28.567976 | 109 | 0.577834 | [
"MIT"
] | cpatrizio88/ECCO_tools | ecco_v4_py/test_llc_array_loading_and_conversion.py | 9,456 | Python |
import requests
import json
from pybliometrics.scopus import AbstractRetrieval
arr_authors = [
'55949131000', #EG
'56344636600', #MF
'6602888121', #MG
'7005314544' #SR
]
MY_API_KEY = 'afd5bb57359cd0e85670e92a9a282d48'
from pybliometrics.scopus.utils import config
#config['Authentication']['APIKey'] = 'afd5bb57359cd0e85670e92a9a282d48'
bib = set()
def get_scopus_info(SCOPUS_ID):
url = ("http://api.elsevier.com/content/abstract/scopus_id/"
+ SCOPUS_ID
+ "?field=authors,title,publicationName,volume,issueIdentifier,"
+ "prism:pageRange,coverDate,article-number,doi,citedby-count,prism:aggregationType")
resp = requests.get(url,
headers={'Accept':'application/json',
'X-ELS-APIKey': MY_API_KEY})
return json.loads(resp.text.encode('utf-8'))
for author in arr_authors:
resp = requests.get("http://api.elsevier.com/content/search/scopus?query=AU-ID(" + author + ")&field=dc:identifier",
headers={'Accept':'application/json',
'X-ELS-APIKey': MY_API_KEY})
results = resp.json()
#print(results)
i = 0
for r in results['search-results']["entry"]:
sid = [str(r['dc:identifier'])]
# some entries seem to have json parse errors, so we catch those
print(sid[0].replace('SCOPUS_ID:',''))
ab = AbstractRetrieval(sid[0].replace('SCOPUS_ID:',''))
bib.add(str(ab.get_html()))
break
break
with open('bib.bib', 'w') as file:
for bibitem in bib:
file.write(bibitem)
file.write('\n') | 30.462963 | 120 | 0.619453 | [
"MIT"
] | big-unibo/big-website | script/bib_script2_not_working.py | 1,645 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.