content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
from recyclus import Client
import time
def load(job):
print('saving cyclus.sqlite...')
client.save('cyclus.sqlite', job.jobid)
def wait_for_completion(job):
while True:
time.sleep(2)
resp = job.status()
if resp['status'] != 'ok':
print(f'Error:', resp['message'])
return
info = resp['info']
print(f"\tStatus: {info['status']}")
if info['status'] in ['done', 'error', 'failed', 'unknown job']:
if info['status'] == 'done':
load(job)
# job.delete()
print('done')
return
client = Client()
job = client.run(scenario='./scenario.xml', project='demo')
print('job submitted:', job.jobid)
wait_for_completion(job)
print('files:',job.files())
print('list:')
job.list()
| 19.348837 | 72 | 0.543269 | [
"BSD-3-Clause"
] | yarden-livnat/ReCyclus | tests/run.py | 832 | Python |
from flask import Flask
from app.settings import conf
from app.ext import init_ext
from app.views import init_blue
def create_app(env_name):
#做一个校验
if not env_name in conf.keys():
raise Exception('您的环境名有问题')
app = Flask(__name__)
#各种配置
app.config.from_object(conf.get(env_name))
#注册第三方插件
init_ext(app)
init_blue(app)
#注册蓝图
return app | 22.5 | 47 | 0.654321 | [
"Apache-2.0"
] | General-ITer/Flask-Introduction | day02split/app/__init__.py | 461 | Python |
"""
Area Weighted Interpolation
"""
import numpy as np
import geopandas as gpd
from ._vectorized_raster_interpolation import _fast_append_profile_in_gdf
import warnings
from scipy.sparse import dok_matrix, diags, coo_matrix
import pandas as pd
import os
from tobler.util.util import _check_crs, _nan_check, _inf_check, _check_presence_of_crs
def _chunk_dfs(geoms_to_chunk, geoms_full, n_jobs):
chunk_size = geoms_to_chunk.shape[0] // n_jobs + 1
for i in range(n_jobs):
start = i * chunk_size
yield geoms_to_chunk.iloc[start : start + chunk_size], geoms_full
def _index_n_query(geoms1, geoms2):
# Pick largest for STRTree, query_bulk the smallest
if geoms1.shape[0] > geoms2.shape[0]:
large = geoms1
small = geoms2
else:
large = geoms2
small = geoms1
# Build tree + query
qry_polyIDs, tree_polyIDs = large.sindex.query_bulk(small, predicate="intersects")
# Remap IDs to global
large_global_ids = large.iloc[tree_polyIDs].index.values
small_global_ids = small.iloc[qry_polyIDs].index.values
# Return always global IDs for geoms1, geoms2
if geoms1.shape[0] > geoms2.shape[0]:
return np.array([large_global_ids, small_global_ids]).T
else:
return np.array([small_global_ids, large_global_ids]).T
def _chunk_polys(id_pairs, geoms_left, geoms_right, n_jobs):
chunk_size = id_pairs.shape[0] // n_jobs + 1
for i in range(n_jobs):
start = i * chunk_size
chunk1 = geoms_left.values.data[id_pairs[start : start + chunk_size, 0]]
chunk2 = geoms_right.values.data[id_pairs[start : start + chunk_size, 1]]
yield chunk1, chunk2
def _intersect_area_on_chunk(geoms1, geoms2):
import pygeos
areas = pygeos.area(pygeos.intersection(geoms1, geoms2))
return areas
def _area_tables_binning_parallel(source_df, target_df, n_jobs=-1):
"""Construct area allocation and source-target correspondence tables using
a parallel spatial indexing approach
...
NOTE: currently, the largest df is chunked and the other one is shipped in
full to each core; within each process, the spatial index is built for the
largest set of geometries, and the other one used for `query_bulk`
Parameters
----------
source_df : geopandas.GeoDataFrame
GeoDataFrame containing input data and polygons
target_df : geopandas.GeoDataFramee
GeoDataFrame defining the output geometries
n_jobs : int
[Optional. Default=-1] Number of processes to run in parallel. If -1,
this is set to the number of CPUs available
Returns
-------
tables : scipy.sparse.dok_matrix
"""
from joblib import Parallel, delayed, parallel_backend
if _check_crs(source_df, target_df):
pass
else:
return None
if n_jobs == -1:
n_jobs = os.cpu_count()
df1 = source_df.copy()
df2 = target_df.copy()
# Chunk the largest, ship the smallest in full
if df1.shape[0] > df2.shape[1]:
to_chunk = df1
df_full = df2
else:
to_chunk = df2
df_full = df1
# Spatial index query
## Reindex on positional IDs
to_workers = _chunk_dfs(
gpd.GeoSeries(to_chunk.geometry.values, crs=to_chunk.crs),
gpd.GeoSeries(df_full.geometry.values, crs=df_full.crs),
n_jobs,
)
with parallel_backend("loky", inner_max_num_threads=1):
worker_out = Parallel(n_jobs=n_jobs)(
delayed(_index_n_query)(*chunk_pair) for chunk_pair in to_workers
)
ids_src, ids_tgt = np.concatenate(worker_out).T
# Intersection + area calculation
chunks_to_intersection = _chunk_polys(
np.vstack([ids_src, ids_tgt]).T, df1.geometry, df2.geometry, n_jobs
)
with parallel_backend("loky", inner_max_num_threads=1):
worker_out = Parallel(n_jobs=n_jobs)(
delayed(_intersect_area_on_chunk)(*chunk_pair)
for chunk_pair in chunks_to_intersection
)
areas = np.concatenate(worker_out)
# Build DOK table
table = coo_matrix(
(areas, (ids_src, ids_tgt),),
shape=(df1.shape[0], df2.shape[0]),
dtype=np.float32,
)
table = table.todok()
return table
def _area_tables_binning(source_df, target_df, spatial_index):
"""Construct area allocation and source-target correspondence tables using a spatial indexing approach
...
NOTE: this currently relies on Geopandas' spatial index machinery
Parameters
----------
source_df : geopandas.GeoDataFrame
GeoDataFrame containing input data and polygons
target_df : geopandas.GeoDataFramee
GeoDataFrame defining the output geometries
spatial_index : str
Spatial index to use to build the allocation of area from source to
target tables. It currently support the following values:
- "source": build the spatial index on `source_df`
- "target": build the spatial index on `target_df`
- "auto": attempts to guess the most efficient alternative.
Currently, this option uses the largest table to build the
index, and performs a `bulk_query` on the shorter table.
Returns
-------
tables : scipy.sparse.dok_matrix
"""
if _check_crs(source_df, target_df):
pass
else:
return None
df1 = source_df.copy()
df2 = target_df.copy()
# it is generally more performant to use the longer df as spatial index
if spatial_index == "auto":
if df1.shape[0] > df2.shape[0]:
spatial_index = "source"
else:
spatial_index = "target"
if spatial_index == "source":
ids_tgt, ids_src = df1.sindex.query_bulk(df2.geometry, predicate="intersects")
elif spatial_index == "target":
ids_src, ids_tgt = df2.sindex.query_bulk(df1.geometry, predicate="intersects")
else:
raise ValueError(
f"'{spatial_index}' is not a valid option. Use 'auto', 'source' or 'target'."
)
areas = df1.geometry.values[ids_src].intersection(df2.geometry.values[ids_tgt]).area
table = coo_matrix(
(areas, (ids_src, ids_tgt),),
shape=(df1.shape[0], df2.shape[0]),
dtype=np.float32,
)
table = table.todok()
return table
def _area_tables(source_df, target_df):
"""
Construct area allocation and source-target correspondence tables.
Parameters
----------
source_df : geopandas.GeoDataFrame
target_df : geopandas.GeoDataFrame
Returns
-------
tables : tuple (optional)
two 2-D numpy arrays
SU: area of intersection of source geometry i with union geometry j
UT: binary mapping of union geometry j to target geometry t
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
Union geometry is a geometry formed by the intersection of a source geometry and a target geometry
SU Maps source geometry to union geometry, UT maps union geometry to target geometry
"""
if _check_crs(source_df, target_df):
pass
else:
return None
source_df = source_df.copy()
source_df = source_df.copy()
n_s = source_df.shape[0]
n_t = target_df.shape[0]
_left = np.arange(n_s)
_right = np.arange(n_t)
source_df.loc[:, "_left"] = _left # create temporary index for union
target_df.loc[:, "_right"] = _right # create temporary index for union
res_union = gpd.overlay(source_df, target_df, how="union")
n_u, _ = res_union.shape
SU = np.zeros(
(n_s, n_u)
) # holds area of intersection of source geom with union geom
UT = np.zeros((n_u, n_t)) # binary table mapping union geom to target geom
for index, row in res_union.iterrows():
# only union polygons that intersect both a source and a target geometry matter
if not np.isnan(row["_left"]) and not np.isnan(row["_right"]):
s_id = int(row["_left"])
t_id = int(row["_right"])
SU[s_id, index] = row[row.geometry.name].area
UT[index, t_id] = 1
source_df.drop(["_left"], axis=1, inplace=True)
target_df.drop(["_right"], axis=1, inplace=True)
return SU, UT
def _area_interpolate_binning(
source_df,
target_df,
extensive_variables=None,
intensive_variables=None,
table=None,
allocate_total=True,
spatial_index="auto",
n_jobs=1,
categorical_variables=None,
):
"""
Area interpolation for extensive, intensive and categorical variables.
Parameters
----------
source_df : geopandas.GeoDataFrame
target_df : geopandas.GeoDataFrame
extensive_variables : list
[Optional. Default=None] Columns in dataframes for extensive variables
intensive_variables : list
[Optional. Default=None] Columns in dataframes for intensive variables
table : scipy.sparse.dok_matrix
[Optional. Default=None] Area allocation source-target correspondence
table. If not provided, it will be built from `source_df` and
`target_df` using `tobler.area_interpolate._area_tables_binning`
allocate_total : boolean
[Optional. Default=True] True if total value of source area should be
allocated. False if denominator is area of i. Note that the two cases
would be identical when the area of the source polygon is exhausted by
intersections. See Notes for more details.
spatial_index : str
[Optional. Default="auto"] Spatial index to use to build the
allocation of area from source to target tables. It currently support
the following values:
- "source": build the spatial index on `source_df`
- "target": build the spatial index on `target_df`
- "auto": attempts to guess the most efficient alternative.
Currently, this option uses the largest table to build the
index, and performs a `bulk_query` on the shorter table.
This argument is ignored if n_jobs>1 (or n_jobs=-1).
n_jobs : int
[Optional. Default=1] Number of processes to run in parallel to
generate the area allocation. If -1, this is set to the number of CPUs
available. If `table` is passed, this is ignored.
NOTE: as of Jan'21 multi-core functionality requires master versions
of `pygeos` and `geopandas`.
categorical_variables : list
[Optional. Default=None] Columns in dataframes for categorical variables
Returns
-------
estimates : geopandas.GeoDataFrame
new geodaraframe with interpolated variables as columns and target_df geometry
as output geometry
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
For an extensive variable, the estimate at target polygon j (default case) is:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \\sum_k a_{i,k}
If the area of the source polygon is not exhausted by intersections with
target polygons and there is reason to not allocate the complete value of
an extensive attribute, then setting allocate_total=False will use the
following weights:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / a_i
where a_i is the total area of source polygon i.
For an intensive variable, the estimate at target polygon j is:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \\sum_k a_{k,j}
For categorical variables, the estimate returns ratio of presence of each
unique category.
"""
source_df = source_df.copy()
target_df = target_df.copy()
if _check_crs(source_df, target_df):
pass
else:
return None
if table is None:
if n_jobs == 1:
table = _area_tables_binning(source_df, target_df, spatial_index)
else:
table = _area_tables_binning_parallel(source_df, target_df, n_jobs=n_jobs)
den = source_df[source_df.geometry.name].area.values
if allocate_total:
den = np.asarray(table.sum(axis=1))
den = den + (den == 0)
den = 1.0 / den
n = den.shape[0]
den = den.reshape((n,))
den = diags([den], [0])
weights = den.dot(table) # row standardize table
dfs = []
extensive = []
if extensive_variables:
for variable in extensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
estimates = diags([vals], [0]).dot(weights)
estimates = estimates.sum(axis=0)
extensive.append(estimates.tolist()[0])
extensive = np.asarray(extensive)
extensive = np.array(extensive)
extensive = pd.DataFrame(extensive.T, columns=extensive_variables)
area = np.asarray(table.sum(axis=0))
den = 1.0 / (area + (area == 0))
n, k = den.shape
den = den.reshape((k,))
den = diags([den], [0])
weights = table.dot(den)
intensive = []
if intensive_variables:
for variable in intensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
n = vals.shape[0]
vals = vals.reshape((n,))
estimates = diags([vals], [0])
estimates = estimates.dot(weights).sum(axis=0)
intensive.append(estimates.tolist()[0])
intensive = np.asarray(intensive)
intensive = pd.DataFrame(intensive.T, columns=intensive_variables)
if categorical_variables:
categorical = {}
for variable in categorical_variables:
unique = source_df[variable].unique()
for value in unique:
mask = source_df[variable] == value
categorical[f"{variable}_{value}"] = np.asarray(
table[mask].sum(axis=0)
)[0]
categorical = pd.DataFrame(categorical)
categorical = categorical.div(target_df.area, axis="rows")
if extensive_variables:
dfs.append(extensive)
if intensive_variables:
dfs.append(intensive)
if categorical_variables:
dfs.append(categorical)
df = pd.concat(dfs, axis=1)
df["geometry"] = target_df[target_df.geometry.name].reset_index(drop=True)
df = gpd.GeoDataFrame(df.replace(np.inf, np.nan))
return df
def _area_interpolate(
source_df,
target_df,
extensive_variables=None,
intensive_variables=None,
tables=None,
allocate_total=True,
):
"""
Area interpolation for extensive and intensive variables.
Parameters
----------
source_df : geopandas.GeoDataFrame (required)
geodataframe with polygon geometries
target_df : geopandas.GeoDataFrame (required)
geodataframe with polygon geometries
extensive_variables : list, (optional)
columns in dataframes for extensive variables
intensive_variables : list, (optional)
columns in dataframes for intensive variables
tables : tuple (optional)
two 2-D numpy arrays
SU: area of intersection of source geometry i with union geometry j
UT: binary mapping of union geometry j to target geometry t
allocate_total : boolean
True if total value of source area should be allocated.
False if denominator is area of i. Note that the two cases
would be identical when the area of the source polygon is
exhausted by intersections. See Notes for more details.
Returns
-------
estimates : geopandas.GeoDataFrame
new geodaraframe with interpolated variables as columns and target_df geometry
as output geometry
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
For an extensive variable, the estimate at target polygon j (default case) is:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \sum_k a_{i,k}
If the area of the source polygon is not exhausted by intersections with
target polygons and there is reason to not allocate the complete value of
an extensive attribute, then setting allocate_total=False will use the
following weights:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / a_i
where a_i is the total area of source polygon i.
For an intensive variable, the estimate at target polygon j is:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \sum_k a_{k,j}
"""
source_df = source_df.copy()
target_df = target_df.copy()
if _check_crs(source_df, target_df):
pass
else:
return None
if tables is None:
SU, UT = _area_tables(source_df, target_df)
else:
SU, UT = tables
den = source_df[source_df.geometry.name].area.values
if allocate_total:
den = SU.sum(axis=1)
den = den + (den == 0)
weights = np.dot(np.diag(1 / den), SU)
dfs = []
extensive = []
if extensive_variables:
for variable in extensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
estimates = np.dot(np.diag(vals), weights)
estimates = np.dot(estimates, UT)
estimates = estimates.sum(axis=0)
extensive.append(estimates)
extensive = np.array(extensive)
extensive = pd.DataFrame(extensive.T, columns=extensive_variables)
ST = np.dot(SU, UT)
area = ST.sum(axis=0)
den = np.diag(1.0 / (area + (area == 0)))
weights = np.dot(ST, den)
intensive = []
if intensive_variables:
for variable in intensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
vals.shape = (len(vals), 1)
est = (vals * weights).sum(axis=0)
intensive.append(est)
intensive = np.array(intensive)
intensive = pd.DataFrame(intensive.T, columns=intensive_variables)
if extensive_variables:
dfs.append(extensive)
if intensive_variables:
dfs.append(intensive)
df = pd.concat(dfs, axis=1)
df["geometry"] = target_df[target_df.geometry.name].reset_index(drop=True)
df = gpd.GeoDataFrame(df.replace(np.inf, np.nan))
return df
def _area_tables_raster(
source_df, target_df, raster_path, codes=[21, 22, 23, 24], force_crs_match=True
):
"""
Construct area allocation and source-target correspondence tables according to a raster 'populated' areas
Parameters
----------
source_df : geopandas.GeoDataFrame
geeodataframe with geometry column of polygon type
target_df : geopandas.GeoDataFrame
geodataframe with geometry column of polygon type
raster_path : str
the path to the associated raster image.
codes : list
list of integer code values that should be considered as 'populated'.
Since this draw inspiration using the National Land Cover Database (NLCD), the default is 21 (Developed, Open Space), 22 (Developed, Low Intensity), 23 (Developed, Medium Intensity) and 24 (Developed, High Intensity).
The description of each code can be found here: https://www.mrlc.gov/sites/default/files/metadata/landcover.html
Only taken into consideration for harmonization raster based.
force_crs_match : bool (default is True)
Whether the Coordinate Reference System (CRS) of the polygon will be reprojected to the CRS of the raster file.
It is recommended to let this argument as True.
Returns
-------
tables: tuple (optional)
two 2-D numpy arrays
SU: area of intersection of source geometry i with union geometry j
UT: binary mapping of union geometry j to target geometry t
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
Union geometry is a geometry formed by the intersection of a source geometry and a target geometry
SU Maps source geometry to union geometry, UT maps union geometry to target geometry
"""
if _check_crs(source_df, target_df):
pass
else:
return None
source_df = source_df.copy()
target_df = target_df.copy()
n_s = source_df.shape[0]
n_t = target_df.shape[0]
_left = np.arange(n_s)
_right = np.arange(n_t)
source_df.loc[:, "_left"] = _left # create temporary index for union
target_df.loc[:, "_right"] = _right # create temporary index for union
res_union_pre = gpd.overlay(source_df, target_df, how="union")
# Establishing a CRS for the generated union
warnings.warn(
"The CRS for the generated union will be set to be the same as source_df."
)
res_union_pre.crs = source_df.crs
# The 'append_profile_in_gdf' function is present in nlcd.py script
res_union = _fast_append_profile_in_gdf(
res_union_pre, raster_path, force_crs_match=force_crs_match
)
str_codes = [str(i) for i in codes]
str_list = ["Type_" + i for i in str_codes]
# Extract list of code names that actually appear in the appended dataset
str_list_ok = [col for col in res_union.columns if col in str_list]
res_union["Populated_Pixels"] = res_union[str_list_ok].sum(axis=1)
n_u, _ = res_union.shape
SU = np.zeros(
(n_s, n_u)
) # holds area of intersection of source geom with union geom
UT = np.zeros((n_u, n_t)) # binary table mapping union geom to target geom
for index, row in res_union.iterrows():
# only union polygons that intersect both a source and a target geometry matter
if not np.isnan(row["_left"]) and not np.isnan(row["_right"]):
s_id = int(row["_left"])
t_id = int(row["_right"])
SU[s_id, index] = row["Populated_Pixels"]
UT[index, t_id] = 1
source_df.drop(["_left"], axis=1, inplace=True)
target_df.drop(["_right"], axis=1, inplace=True)
return SU, UT
| 33.81106 | 225 | 0.658307 | [
"BSD-3-Clause"
] | AnGWar26/tobler | tobler/area_weighted/area_interpolate.py | 22,011 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ReplicationJobsOperations:
"""ReplicationJobsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservicessiterecovery.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.JobCollection"]:
"""Gets the list of jobs.
Gets the list of Azure Site Recovery Jobs for the vault.
:param filter: OData filter options.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either JobCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.recoveryservicessiterecovery.models.JobCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('JobCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationJobs'} # type: ignore
async def get(
self,
job_name: str,
**kwargs: Any
) -> "_models.Job":
"""Gets the job details.
Get the details of an Azure Site Recovery job.
:param job_name: Job identifier.
:type job_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Job, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicessiterecovery.models.Job
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Job"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Job', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationJobs/{jobName}'} # type: ignore
async def _cancel_initial(
self,
job_name: str,
**kwargs: Any
) -> Optional["_models.Job"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Job"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self._cancel_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Job', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_cancel_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationJobs/{jobName}/cancel'} # type: ignore
async def begin_cancel(
self,
job_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.Job"]:
"""Cancels the specified job.
The operation to cancel an Azure Site Recovery job.
:param job_name: Job identifier.
:type job_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Job or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.recoveryservicessiterecovery.models.Job]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Job"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._cancel_initial(
job_name=job_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Job', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_cancel.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationJobs/{jobName}/cancel'} # type: ignore
async def _restart_initial(
self,
job_name: str,
**kwargs: Any
) -> Optional["_models.Job"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Job"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self._restart_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Job', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_restart_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationJobs/{jobName}/restart'} # type: ignore
async def begin_restart(
self,
job_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.Job"]:
"""Restarts the specified job.
The operation to restart an Azure Site Recovery job.
:param job_name: Job identifier.
:type job_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Job or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.recoveryservicessiterecovery.models.Job]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Job"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._restart_initial(
job_name=job_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Job', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_restart.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationJobs/{jobName}/restart'} # type: ignore
async def _resume_initial(
self,
job_name: str,
resume_job_params: "_models.ResumeJobParams",
**kwargs: Any
) -> Optional["_models.Job"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Job"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._resume_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(resume_job_params, 'ResumeJobParams')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Job', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_resume_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationJobs/{jobName}/resume'} # type: ignore
async def begin_resume(
self,
job_name: str,
resume_job_params: "_models.ResumeJobParams",
**kwargs: Any
) -> AsyncLROPoller["_models.Job"]:
"""Resumes the specified job.
The operation to resume an Azure Site Recovery job.
:param job_name: Job identifier.
:type job_name: str
:param resume_job_params: Resume rob comments.
:type resume_job_params: ~azure.mgmt.recoveryservicessiterecovery.models.ResumeJobParams
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Job or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.recoveryservicessiterecovery.models.Job]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Job"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._resume_initial(
job_name=job_name,
resume_job_params=resume_job_params,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Job', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_resume.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationJobs/{jobName}/resume'} # type: ignore
async def _export_initial(
self,
job_query_parameter: "_models.JobQueryParameter",
**kwargs: Any
) -> Optional["_models.Job"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Job"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._export_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(job_query_parameter, 'JobQueryParameter')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Job', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_export_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationJobs/export'} # type: ignore
async def begin_export(
self,
job_query_parameter: "_models.JobQueryParameter",
**kwargs: Any
) -> AsyncLROPoller["_models.Job"]:
"""Exports the details of the Azure Site Recovery jobs of the vault.
The operation to export the details of the Azure Site Recovery jobs of the vault.
:param job_query_parameter: job query filter.
:type job_query_parameter: ~azure.mgmt.recoveryservicessiterecovery.models.JobQueryParameter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Job or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.recoveryservicessiterecovery.models.Job]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Job"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._export_initial(
job_query_parameter=job_query_parameter,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Job', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_export.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationJobs/export'} # type: ignore
| 50.357911 | 218 | 0.668792 | [
"MIT"
] | AFengKK/azure-sdk-for-python | sdk/recoveryservices/azure-mgmt-recoveryservicessiterecovery/azure/mgmt/recoveryservicessiterecovery/aio/operations/_replication_jobs_operations.py | 32,783 | Python |
"""Tuya Air Quality sensor."""
from zigpy.profiles import zha
from zigpy.quirks import CustomDevice
from zigpy.zcl.clusters.general import Basic, GreenPowerProxy, Groups, Ota, Scenes, Time
from zhaquirks.const import (
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
MODELS_INFO,
OUTPUT_CLUSTERS,
PROFILE_ID,
)
from zhaquirks.tuya.air import (
TuyaAirQualityCO2,
TuyaAirQualityFormaldehyde,
TuyaAirQualityHumidity,
TuyaAirQualityTemperature,
TuyaAirQualityVOC,
TuyaCO2ManufCluster,
)
class TuyaCO2Sensor(CustomDevice):
"""Tuya Air quality device."""
signature = {
# NodeDescriptor(logical_type=<LogicalType.Router: 1>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress|RxOnWhenIdle|MainsPowered|FullFunctionDevice: 142>, manufacturer_code=4098, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=<DescriptorCapability.0: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=False, *is_full_function_device=True, *is_mains_powered=True, *is_receiver_on_when_idle=True, *is_router=True, *is_security_capable=False)]
# device_version=1
# SizePrefixedSimpleDescriptor(endpoint=1, profile=260, device_type=81, device_version=1,
# input_clusters=[0, 4, 5, 61184],
# output_clusters=[25, 10])
MODELS_INFO: [
("_TZE200_8ygsuhe1", "TS0601"),
("_TZE200_yvx5lh6k", "TS0601"),
],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.SMART_PLUG,
INPUT_CLUSTERS: [
Basic.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
TuyaCO2ManufCluster.cluster_id,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
}
},
}
replacement = {
ENDPOINTS: {
1: {
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
TuyaCO2ManufCluster,
TuyaAirQualityCO2,
TuyaAirQualityFormaldehyde,
TuyaAirQualityHumidity,
TuyaAirQualityTemperature,
TuyaAirQualityVOC,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
}
}
}
class TuyaCO2SensorGPP(CustomDevice):
"""Tuya Air quality device with GPP."""
signature = {
# NodeDescriptor(logical_type=<LogicalType.Router: 1>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress|RxOnWhenIdle|MainsPowered|FullFunctionDevice: 142>, manufacturer_code=4098, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=<DescriptorCapability.0: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=False, *is_full_function_device=True, *is_mains_powered=True, *is_receiver_on_when_idle=True, *is_router=True, *is_security_capable=False)]
# device_version=1
# SizePrefixedSimpleDescriptor(endpoint=1, profile=260, device_type=81, device_version=1,
# input_clusters=[0, 4, 5, 61184],
# output_clusters=[25, 10])
MODELS_INFO: [
("_TZE200_ryfmq5rl", "TS0601"),
("_TZE200_yvx5lh6k", "TS0601"),
("_TZE200_dwcarsat", "TS0601"),
],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.SMART_PLUG,
INPUT_CLUSTERS: [
Basic.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
TuyaCO2ManufCluster.cluster_id,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
},
242: {
# <SimpleDescriptor endpoint=242 profile=41440 device_type=97
# input_clusters=[]
# output_clusters=[33]
PROFILE_ID: 41440,
DEVICE_TYPE: 97,
INPUT_CLUSTERS: [],
OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
},
},
}
replacement = {
ENDPOINTS: {
1: {
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
TuyaCO2ManufCluster,
TuyaAirQualityCO2,
TuyaAirQualityFormaldehyde,
TuyaAirQualityHumidity,
TuyaAirQualityTemperature,
TuyaAirQualityVOC,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
},
242: {
PROFILE_ID: 41440,
DEVICE_TYPE: 97,
INPUT_CLUSTERS: [],
OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
},
}
}
| 41.233577 | 737 | 0.601522 | [
"Apache-2.0"
] | InovelliUSA/zha-device-handlers | zhaquirks/tuya/air/ts0601_air_quality.py | 5,649 | Python |
# -*- coding: utf-8 -*-
from .ExactDate import ExactDate
from .JieQi import JieQi
from .NineStar import NineStar
from .EightChar import EightChar
from .ShuJiu import ShuJiu
from .Fu import Fu
from .Solar import Solar
from .SolarWeek import SolarWeek
from .SolarMonth import SolarMonth
from .SolarSeason import SolarSeason
from .SolarHalfYear import SolarHalfYear
from .SolarYear import SolarYear
from .Lunar import Lunar
from .LunarYear import LunarYear
from .LunarMonth import LunarMonth
from .LunarTime import LunarTime
from .Holiday import Holiday
| 29 | 40 | 0.820327 | [
"MIT"
] | 6tail/lunar-python | lunar_python/__init__.py | 551 | Python |
"""
Training and validation method for arbitrary models.
"""
import io
import os
import sys
import time
from keras import Sequential
from keras.layers import Dense, Dropout, BatchNormalization
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
plt.switch_backend('agg')
def train_and_evaluate(model, model_name, training_epochs, the_data):
print('model.metrics_names: {}'.format(model.metrics_names))
total_steps = training_epochs * the_data.get_training_mini_batches_per_epoch()
training_index = pd.RangeIndex(start=0, stop=total_steps, name='Training Step')
training_metrics_df = pd.DataFrame(
data=np.zeros((total_steps, len(model.metrics_names))),
columns=model.metrics_names,
index=training_index)
# evaluate the model on the dev set(s) after each epoch
dev_index = pd.RangeIndex(start=0, stop=training_epochs, name='Epoch')
dev_columns = pd.MultiIndex.from_product(
iterables=(the_data.get_dev_set_names(), model.metrics_names),
names=('dev set', 'metric'))
dev_metrics_df = pd.DataFrame(
data=np.zeros((training_epochs, len(the_data.get_dev_set_names()) * len(model.metrics_names))),
columns=dev_columns,
index=dev_index)
print(dev_metrics_df.head())
steps_per_epoch = the_data.get_training_mini_batches_per_epoch()
# n counts number of training iterations
n = 0
t0 = time.time()
##with h5py.File(the_data.fp, 'r', libver='latest', swmr=True) as train_test_file:
# train for all epochs
t00 = time.time()
##for train_X, train_y, step, epoch in the_data.get_training_mini_batches(data_file=train_test_file, yield_state=True):
for train_X, train_y, step, epoch in the_data.get_training_data_generator()(yield_state=True):
if epoch > training_epochs:
print('completed {} training epochs in {:5.2f}s'.format(training_epochs, time.time()-t0))
break
else:
# train on one mini batch
print('training on batch {} ({})'.format(step, steps_per_epoch))
training_metrics = model.train_on_batch(train_X, train_y)
training_metrics_df.loc[n, model.metrics_names] = training_metrics
n += 1
# look at performance on dev data after each epoch
# re-plot the training and dev metrics after each epoch
if step == steps_per_epoch:
print('completed training epoch {} in {:5.2f}s'.format(epoch, time.time()-t00))
print('{} steps per epoch'.format(steps_per_epoch))
print('{:5.2f}s per step'.format((time.time()-t00)/steps_per_epoch))
print(training_metrics_df.loc[n-2:n])
t00 = time.time()
print('evaluate the model on the dev set(s)')
#evaluate_dev_sets(epoch=epoch, model=model, the_data=the_data, train_test_file=train_test_file, dev_metrics_df=dev_metrics_df)
evaluate_dev_sets(epoch=epoch, model=model, the_data=the_data, dev_metrics_df=dev_metrics_df)
plot_training_and_dev_metrics(
training_metrics_df,
dev_metrics_df,
model_name=model_name,
steps_per_epoch=steps_per_epoch,
epoch_count=training_epochs,
output_fp=model_name + '.pdf')
return training_metrics_df, dev_metrics_df
def evaluate_dev_sets(epoch, model, the_data, dev_metrics_df):
for dev_steps, dev_set_name, dev_generator in the_data.get_dev_generators():
sys.stdout.write('.')
# print('dev set: "{}"'.format(dev_set_name))
# print(' dev steps: {}'.format(dev_steps))
dev_metrics = model.evaluate_generator(generator=dev_generator, steps=dev_steps)
dev_metrics_df.loc[epoch - 1, (dev_set_name, model.metrics_names)] = dev_metrics
sys.stdout.write('\n')
print('dev metrics:\n{}'.format(dev_metrics_df.loc[epoch - 1]))
def build_layer(model_name, layer_type, kwargs):
if layer_type == 'Dense':
model_name.write('_dns_{}'.format(kwargs['units']))
if 'kernel_regularizer' in kwargs:
# the l2 field is a ndarray with shape ()
# indexing with [] gives error 'too many indices'
# the item() method is the first way I found to extract the float value from l2
model_name.write('_l2_{:6.4f}'.format(kwargs['kernel_regularizer'].l2.item()))
layer = Dense(**kwargs)
elif layer_type == 'Dropout':
model_name.write('_drp_{:3.2f}'.format(kwargs['rate']))
layer = Dropout(**kwargs)
elif layer_type == 'BatchNormalization':
model_name.write('_bn')
layer = BatchNormalization(**kwargs)
else:
raise Exception()
return layer
def build_model(layers, model=None, input_dim=None):
"""
Build and return a Sequential model with Dense layers given by the layers argument.
Arguments
model (keras.Sequential) model to which layers will be added
input_dim (int) dimension of input
layers (tuple) sequence of 2-ples, one per layer, such as ((64, 'relu'), (64, 'relu'), (1, 'sigmoid'))
Return
model_name (str) a name for the model
model (Model) a compiled model
"""
if model is None:
model = Sequential()
model_name = io.StringIO()
layer_type, kwargs = layers[0]
if input_dim is None:
pass
else:
kwargs['input_dim'] = input_dim
for layer_type, kwargs in layers:
layer = build_layer(model_name, layer_type, kwargs)
model.add(layer)
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# trim the leading '_' from the model name - lazy!
return model_name.getvalue()[1:], model
def plot_training_and_dev_metrics(training_metrics_df, dev_metrics_df, model_name, steps_per_epoch, epoch_count, output_fp):
# generate network-specific accuracy and loss keys
output_dp, output_filename = os.path.split(output_fp)
output_basename, output_ext = os.path.splitext(output_filename)
##separate_plots_fp = os.path.join(output_dp, output_basename + '_separate' + output_ext)
##sorted_training_history_list = sorted(training_history_list, key=lambda h: h[2]['val_acc'][-1], reverse=True)
with PdfPages(output_fp) as pdfpages:
#for model_name, layers, history, t in sorted_training_history_list:
#training_accuracy_loss = {}
#validation_accuracy_loss = {}
#training_accuracy_loss['acc ' + model_name] = history['acc']
#training_accuracy_loss['loss ' + model_name] = history['loss']
#validation_accuracy_loss['val_acc ' + model_name] = history['val_acc']
#validation_accuracy_loss['val_loss ' + model_name] = history['val_loss']
#training_df = pd.DataFrame(
# data=training_accuracy_loss,
# index=[b + 1 for b in range(epoch_count * batches_per_epoch)])
#training_df.index.name = 'batch'
#validation_df = pd.DataFrame(
# data=validation_accuracy_loss,
# index=[(e + 1) * batches_per_epoch for e in range(epoch_count)])
#validation_df.index.name = 'batch'
fig, ax1 = plt.subplots()
legend = []
#for loss_column in [column for column in training_df.columns if 'loss' in column and model_name in column]:
#for training_metric_column in training_metrics_df.columns:
#print('training metric column: {}'.format(training_metric_column))
ax1.plot(training_metrics_df.index, training_metrics_df.loc[:, 'loss'], color='tab:blue', alpha=0.8)
legend.append('training loss')
#for loss_column in [column for column in validation_df.columns if
# 'loss' in column and model_name in column]:
# print('validation loss column: {}'.format(loss_column))
# ax1.plot(validation_df.index, validation_df.loc[:, loss_column], color='tab:orange', alpha=0.8)
# legend.append('val_loss')
ax1.set_xlabel('epoch')
tick_spacing = steps_per_epoch
ax1.xaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))
ax1.set_xticklabels([0] + list(range(epoch_count+1)))
ax1.set_ylabel('loss')
ax1.legend(legend, loc='lower left')
ax2 = ax1.twinx()
legend = []
#for acc_column in [column for column in training_metrics_df.columns if 'acc' in column]:
#print('training acc column: {}'.format(acc_column))
ax2.plot(training_metrics_df.index, training_metrics_df.loc[:, 'acc'], color='tab:purple', alpha=0.8)
legend.append('training acc')
for dev_acc_column in [column for column in dev_metrics_df.columns if 'acc' in column]:
print('validation acc column: {}'.format(dev_acc_column))
ax2.plot([steps_per_epoch * (n + 1) for n in dev_metrics_df.index], dev_metrics_df.loc[:, dev_acc_column], alpha=0.8)
legend.append(dev_acc_column)
ax2.set_title('Training and Development Metrics\n{}'.format(model_name))
ax2.set_ylim(0.0, 1.0)
ax2.set_ylabel('accuracy')
print(legend)
ax2.legend(legend, loc='lower right')
pdfpages.savefig()
| 42.524444 | 139 | 0.650397 | [
"MIT"
] | hurwitzlab/viral-learning | vl/model/training.py | 9,568 | Python |
import unittest
import os
import grp
from myDevices.sensors import sensors
from myDevices.devices import manager
from myDevices.utils.config import Config
from myDevices.utils import types
from myDevices.utils.logger import exception, setDebug, info, debug, error, logToFile, setInfo
from myDevices.devices.bus import checkAllBus, BUSLIST
from myDevices.devices.digital.gpio import NativeGPIO as GPIO
from myDevices.devices import instance
from time import sleep
from json import loads, dumps
class SensorsClientTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.client = sensors.SensorsClient()
@classmethod
def tearDownClass(cls):
cls.client.StopMonitoring()
del cls.client
def OnDataChanged(self, sensor_data):
# if len(sensor_data) < 5:
# info('OnDataChanged: {}'.format(sensor_data))
# else:
# info('OnDataChanged: {}'.format(len(sensor_data)))
self.previousSystemData = self.currentSystemData
self.currentSystemData = sensor_data
if self.previousSystemData:
self.done = True
def testMonitor(self):
debug('testMonitor')
self.previousSystemData = None
self.currentSystemData = None
self.done = False
SensorsClientTest.client.SetDataChanged(self.OnDataChanged)
for i in range(35):
sleep(1)
if self.done:
break
info('Changed items: {}'.format([x for x in self.currentSystemData if x not in self.previousSystemData]))
self.assertNotEqual(self.previousSystemData, self.currentSystemData)
def testBusInfo(self):
debug('testBusInfo')
bus = {item['channel']:item['value'] for item in SensorsClientTest.client.BusInfo()}
info('Bus info: {}'.format(bus))
for pin in GPIO().pins:
self.assertIn('sys:gpio:{};function'.format(pin), bus)
self.assertIn('sys:gpio:{};value'.format(pin), bus)
def testSensorsInfo(self):
debug('testSensorsInfo')
sensors = SensorsClientTest.client.SensorsInfo()
info('Sensors info: {}'.format(sensors))
for sensor in sensors:
self.assertEqual('dev:', sensor['channel'][:4])
self.assertIn('value', sensor)
def testSetFunction(self):
debug('testSetFunciton')
self.setChannelFunction(GPIO().pins[7], 'IN')
self.setChannelFunction(GPIO().pins[7], 'OUT')
def testSetValue(self):
debug('testSetValue')
self.setChannelFunction(GPIO().pins[7], 'OUT')
self.setChannelValue(GPIO().pins[7], 1)
self.setChannelValue(GPIO().pins[7], 0)
def testSensors(self):
debug('testSensors')
#Test adding a sensor
channel = GPIO().pins[8]
testSensor = {'description': 'Digital Input', 'device': 'DigitalSensor', 'args': {'gpio': 'GPIO', 'invert': False, 'channel': channel}, 'name': 'testdevice'}
SensorsClientTest.client.RemoveSensor(testSensor['name']) #Attempt to remove device if it already exists from a previous test
compareKeys = ('args', 'description', 'device')
retValue = SensorsClientTest.client.AddSensor(testSensor['name'], testSensor['description'], testSensor['device'], testSensor['args'])
self.assertTrue(retValue)
retrievedSensor = next(obj for obj in manager.getDeviceList() if obj['name'] == testSensor['name'])
for key in compareKeys:
self.assertEqual(testSensor[key], retrievedSensor[key])
#Test updating a sensor
editedSensor = testSensor
editedSensor['args']['channel'] = GPIO().pins[5]
retValue = SensorsClientTest.client.EditSensor(editedSensor['name'], editedSensor['description'], editedSensor['device'], editedSensor['args'])
self.assertTrue(retValue)
retrievedSensor = next(obj for obj in manager.getDeviceList() if obj['name'] == editedSensor['name'])
for key in compareKeys:
self.assertEqual(editedSensor[key], retrievedSensor[key])
#Test removing a sensor
retValue = SensorsClientTest.client.RemoveSensor(testSensor['name'])
self.assertTrue(retValue)
deviceNames = [device['name'] for device in manager.getDeviceList()]
self.assertNotIn(testSensor['name'], deviceNames)
def testSensorInfo(self):
debug('testSensorInfo')
actuator_channel = GPIO().pins[10]
light_switch_channel = GPIO().pins[11]
sensors = {'actuator' : {'description': 'Digital Output', 'device': 'DigitalActuator', 'args': {'gpio': 'GPIO', 'invert': False, 'channel': actuator_channel}, 'name': 'test_actuator'},
'light_switch' : {'description': 'Light Switch', 'device': 'LightSwitch', 'args': {'gpio': 'GPIO', 'invert': True, 'channel': light_switch_channel}, 'name': 'test_light_switch'},
'MCP3004' : {'description': 'MCP3004', 'device': 'MCP3004', 'args': {'chip': '0'}, 'name': 'test_MCP3004'},
'distance' : {'description': 'Analog Distance Sensor', 'device': 'DistanceSensor', 'args': {'adc': 'test_MCP3004', 'channel': 0}, 'name': 'test_distance'}
}
for sensor in sensors.values():
SensorsClientTest.client.AddSensor(sensor['name'], sensor['description'], sensor['device'], sensor['args'])
SensorsClientTest.client.SensorsInfo()
#Test setting sensor values
self.setSensorValue(sensors['actuator'], 1)
self.setSensorValue(sensors['actuator'], 0)
self.setSensorValue(sensors['light_switch'], 1)
self.setSensorValue(sensors['light_switch'], 0)
#Test getting analog value
channel = 'dev:{}'.format(sensors['distance']['name'])
retrievedSensorInfo = next(obj for obj in SensorsClientTest.client.SensorsInfo() if obj['channel'] == channel)
self.assertGreaterEqual(retrievedSensorInfo['value'], 0.0)
self.assertLessEqual(retrievedSensorInfo['value'], 1.0)
for sensor in sensors.values():
self.assertTrue(SensorsClientTest.client.RemoveSensor(sensor['name']))
def testSensorCallback(self):
debug('testSensorCallback')
self.previousSystemData = None
self.currentSystemData = None
self.done = False
SensorsClientTest.client.SetDataChanged(self.OnDataChanged)
actuator_channel = GPIO().pins[10]
sensor_channel = GPIO().pins[11]
sensors = {'actuator' : {'description': 'Digital Output', 'device': 'DigitalActuator', 'args': {'gpio': 'GPIO', 'invert': False, 'channel': actuator_channel}, 'name': 'test_actuator'},
'sensor': {'description': 'Digital Input', 'device': 'DigitalSensor', 'args': {'gpio': 'GPIO', 'invert': False, 'channel': sensor_channel}, 'name': 'testdevice'}}
for sensor in sensors.values():
SensorsClientTest.client.AddSensor(sensor['name'], sensor['description'], sensor['device'], sensor['args'])
for i in range(35):
sleep(1)
if self.done:
break
info('Changed items: {}'.format([x for x in self.currentSystemData if x not in self.previousSystemData]))
self.assertNotEqual(self.previousSystemData, self.currentSystemData)
for sensor in sensors.values():
self.assertTrue(SensorsClientTest.client.RemoveSensor(sensor['name']))
def setSensorValue(self, sensor, value):
SensorsClientTest.client.SensorCommand('integer', sensor['name'], None, value)
channel = 'dev:{}'.format(sensor['name'])
sensorInfo = next(obj for obj in SensorsClientTest.client.SensorsInfo() if obj['channel'] == channel)
self.assertEqual(value, sensorInfo['value'])
def setChannelFunction(self, channel, function):
SensorsClientTest.client.GpioCommand('function', channel, function)
bus = {item['channel']:item['value'] for item in SensorsClientTest.client.BusInfo()}
self.assertEqual(function, bus['sys:gpio:{};function'.format(channel)])
def setChannelValue(self, channel, value):
SensorsClientTest.client.GpioCommand('value', channel, value)
bus = {item['channel']:item['value'] for item in SensorsClientTest.client.BusInfo()}
self.assertEqual(value, bus['sys:gpio:{};value'.format(channel)])
if __name__ == '__main__':
setInfo()
unittest.main()
| 50.251497 | 197 | 0.654671 | [
"MIT"
] | 42electronics/Cayenne-Agent | myDevices/test/sensors_test.py | 8,392 | Python |
"""fix_parser.py - parse V1.0 fixprotocol sbe xml files described
by xsd https://github.com/FIXTradingCommunity/
fix-simple-binary-encoding/blob/master/v1-0-STANDARD/resources/sbe.xsd
"""
import xml.etree.ElementTree as etree
from pysbe.schema.constants import (
SBE_TYPES_TYPE,
STRING_ENUM_MAP,
VALID_TYPE_PRIMITIVE_TYPE,
TYPE_PRIMITIVE_TYPE_MAP,
PRESENCE_MAP,
QUALIFIED_NAME_RE,
SYMBOLIC_NAME_RE,
)
from pysbe.schema.builder import createMessageSchema
from pysbe.schema.types import (
createType,
createComposite,
createEnum,
createValidValue,
TypeCollection,
createRef,
createSet,
createChoice,
createMessage,
createField,
FieldCollection,
createGroup,
)
from pysbe.schema.exceptions import UnknownReference
SBE_NS = "http://fixprotocol.io/2016/sbe"
SEMANTIC_ATTRIBUTES = {
"semanticType": {"type": str, "use": "optional"},
"description": {"type": str, "use": "optional"},
}
VERSION_ATTRIBUTES = {
"sinceVersion": {"type": int, "default": 0, "minimumValue": 0, "use": "optional"},
# deprecated is itself deprecated in RC4
"deprecated": {"type": int, "minimumValue": 0, "use": "optional"},
}
ALIGNMENT_ATTRIBUTES = {"offset": {"type": int, "minimumValue": 0, "use": "optional"}}
PRESENCE_ATTRIBUTES = {
"presence": {"type": str, "default": "required", "map": PRESENCE_MAP},
"valueRef": {"type": str, "use": "optional", "pattern": QUALIFIED_NAME_RE},
}
TYPE_ATTRIBUTES = {
"name": {"type": str, "pattern": SYMBOLIC_NAME_RE},
"primitiveType": {"type": str, "map": TYPE_PRIMITIVE_TYPE_MAP},
"nullValue": {"type": str, "use": "optional"},
"minValue": {"type": str, "use": "optional"},
"maxValue": {"type": str, "use": "optional"},
"characterEncoding": {"type": str, "use": "optional"},
"length": {"type": int, "minimumValue": 0, "use": "optional", "default": 1},
}
ENUM_ATTRIBUTES = {"encodingType": {"type": str, "pattern": SYMBOLIC_NAME_RE}}
REF_ATTRIBUTES = {"type": {"type": str}}
MESSAGE_ATTRIBUTES = {
"blockLength": {"type": int, "use": "optional"},
"message_id": {"type": int, "attribute_name": "id"},
}
FIELD_ATTRIBUTES = {
"field_id": {"type": int, "attribute_name": "id"},
"field_type": {"type": str, "pattern": SYMBOLIC_NAME_RE, "attribute_name": "type"},
}
GROUP_ATTRIBUTES = {
"group_id": {"type": int, "attribute_name": "id"},
"dimensionType": {"type": str, "pattern": SYMBOLIC_NAME_RE, "use": "optional"},
}
ALL_ATTRIBUTES_MAP = {
**SEMANTIC_ATTRIBUTES,
**VERSION_ATTRIBUTES,
**ALIGNMENT_ATTRIBUTES,
**PRESENCE_ATTRIBUTES,
**TYPE_ATTRIBUTES,
**ENUM_ATTRIBUTES,
**REF_ATTRIBUTES,
**MESSAGE_ATTRIBUTES,
**FIELD_ATTRIBUTES,
**GROUP_ATTRIBUTES,
}
TYPE_ATTRIBUTES_LIST = list(SEMANTIC_ATTRIBUTES) + list(VERSION_ATTRIBUTES) + list(
ALIGNMENT_ATTRIBUTES
) + list(
PRESENCE_ATTRIBUTES
) + list(
TYPE_ATTRIBUTES
)
COMPOSITE_ATTRIBUTES_LIST = ["name"] + list(SEMANTIC_ATTRIBUTES) + list(
ALIGNMENT_ATTRIBUTES
) + list(
VERSION_ATTRIBUTES
)
ENUM_ATTRIBUTES_LIST = ["name"] + list(ENUM_ATTRIBUTES) + list(
ALIGNMENT_ATTRIBUTES
) + list(
SEMANTIC_ATTRIBUTES
) + list(
VERSION_ATTRIBUTES
)
ENUM_VALID_VALUES_ATTRIBUTES_LIST = (
"name", "description", "sinceVersion", "deprecated"
)
REF_ATTRIBUTES_LIST = ("name", "type", "offset", "sinceVersion", "deprecated")
SET_ATTRIBUTES_LIST = (
"name", "description", "encodingType", "sinceVersion", "deprecated", "offset"
)
SET_CHOICE_ATTRIBUTES_LIST = ("name", "description", "sinceVersion", "deprecated")
VALID_COMPOSITE_CHILD_ELEMENTS = ("type", "enum", "set", "composite", "ref")
MESSAGE_ATTRIBUTES_LIST = (
"name",
"message_id",
"description",
"blockLength",
"semanticType",
"sinceVersion",
"deprecated",
)
FIELD_ATTRIBUTES_LIST = (
"name",
"field_id",
"field_type",
"description",
"offset",
"presence",
"valueRef",
"sinceVersion",
"deprecated",
)
GROUP_ATTRIBUTES_LIST = (
"name",
"group_id",
"description",
"blockLength",
"semanticType",
"sinceVersion",
"deprecated",
"dimensionType",
)
MISSING = object()
class BaseParser:
"""contains shared functionality"""
NS = {"sbe": SBE_NS}
def parse_common_attributes(self, element, attributes):
"""parse and return dict of common attributes"""
result_attributes = {}
for attribute in attributes:
attrib_info = ALL_ATTRIBUTES_MAP[attribute]
if attrib_info.get("default", MISSING) is not MISSING:
default_value = attrib_info["default"]
else:
default_value = MISSING
attribute_name = attrib_info.get("attribute_name", attribute)
value = element.attrib.get(attribute_name, default_value)
if value is MISSING or value == "":
if attrib_info.get("use") == "optional":
continue
else:
raise ValueError(
f"element {element.tag} missing required "
f"attribute {attribute_name}"
)
if attrib_info.get("type"):
try:
value = attrib_info["type"](value)
except ValueError as exc:
raise ValueError(
f"element {element.tag} invalid value "
f"{repr(value)} for attribute {attribute_name}"
) from exc
if attrib_info.get("minimumValue"):
if value < attrib_info["minimumValue"]:
raise ValueError(
f"element {element.tag} invalid value {repr(value)}"
f" for attribute {attribute_name},"
"less than allowed minimum "
f"{repr(attrib_info['minimumValue'])}"
)
if attrib_info.get("pattern"):
if not attrib_info["pattern"].match(value):
raise ValueError(
f"element {element.tag} invalid value {repr(value)} "
f"for attribute {attribute_name},"
"does not match expected pattern "
f"{repr(attrib_info['pattern'])}"
)
if attrib_info.get("map"):
try:
value = attrib_info["map"][value]
except (KeyError, IndexError) as exc:
raise ValueError(
f"element {element.tag} invalid value {repr(value)} "
f"for attribute {attribute_name}"
f", must be one of {repr(attrib_info['map'].keys())}"
) from exc
if attrib_info.get("rename"):
attribute = attrib_info["rename"]
result_attributes[attribute] = value
return result_attributes
class SBESpecParser(BaseParser):
"""Parser for VFIX"""
def __init__(self):
pass
def parseFile(self, file_or_object):
"""parse a file"""
root = etree.parse(file_or_object)
element_name = "{%s}messageSchema" % SBE_NS
# for some reason root.find('sbe:messageSchema') returns None
# work around that
messageSchema_element = root.getroot()
if messageSchema_element.tag != element_name:
raise ValueError(
f"root element is not sbe:messageSchema,"
" found {repr(messageSchema_element)} instead"
)
return self.processSchema(messageSchema_element)
def processSchema(self, messageSchema_element):
"""process xml elements beginning with root messageSchema_element"""
attrib = messageSchema_element.attrib
version = parse_version(attrib.get("version"))
byteOrder = parse_byteOrder(attrib.get("byteOrder") or "littleEndian")
package = parse_optionalString(attrib.get("package"))
semanticVersion = parse_optionalString(attrib.get("semanticVersion"))
description = parse_optionalString(attrib.get("description"))
headerType = parse_optionalString(attrib.get("headerType") or "messageHeader")
messageSchema = createMessageSchema(
version=version,
byteOrder=byteOrder,
package=package,
semanticVersion=semanticVersion,
description=description,
headerType=headerType,
)
types_elements = messageSchema_element.findall("types")
types_parser = TypesParser()
for element in types_elements:
types_parser.parse_types(messageSchema, element)
message_elements = messageSchema_element.findall(
"sbe:message", namespaces=self.NS
)
message_parser = MessageParser()
for element in message_elements:
message_parser.parse_message(messageSchema, element)
return messageSchema
class TypesParser(BaseParser):
"""parse type definitions"""
# which child elements may appear in types
VALID_TYPES_ELEMENTS = ("type", "composite", "enum", "set")
def parse_types(self, messageSchema, element):
"""parse type, can be repeated"""
for child_element in element:
if child_element.tag not in self.VALID_TYPES_ELEMENTS:
raise ValueError(
f"invalid types child element {repr(child_element.tag)}"
)
parser = getattr(self, f"parse_types_{child_element.tag}", None)
if not parser:
raise RuntimeError(
f"unsupported types parser {repr(child_element.tag)}"
)
parser(messageSchema, child_element)
def parse_types_type(self, parent: TypeCollection, element):
"""parse types/type"""
attributes = self.parse_common_attributes(
element, attributes=TYPE_ATTRIBUTES_LIST
)
sbe_type = createType(**attributes)
parent.addType(sbe_type)
def parse_types_ref(self, parent: TypeCollection, element):
"""parse composite / ref"""
attributes = self.parse_common_attributes(
element, attributes=REF_ATTRIBUTES_LIST
)
sbe_ref = createRef(**attributes)
reference_type = parent.lookupName(sbe_ref.type)
if not reference_type:
raise UnknownReference(
f"composite {parent.name} ref {sbe_ref.name}"
f" references unknown encodingType {sbe_ref.type}"
)
parent.addType(sbe_ref)
def parse_types_composite(self, parent: TypeCollection, element):
"""parse types/composite"""
attributes = self.parse_common_attributes(
element, attributes=COMPOSITE_ATTRIBUTES_LIST
)
sbe_composite = createComposite(**attributes)
parent.addType(sbe_composite)
# now iterate over composite children
for child_element in element:
tag = child_element.tag
if tag not in VALID_COMPOSITE_CHILD_ELEMENTS:
raise ValueError(
f"invalid child element {repr(tag)} in "
f"composite element {repr(sbe_composite.name)}"
)
parser = getattr(self, f"parse_types_{tag}", None)
if not parser:
raise RuntimeError(
f"unsupported types parser {repr(child_element.tag)}"
)
parser(sbe_composite, child_element)
def parse_types_set(self, parent: TypeCollection, element):
"""parse types/set"""
attributes = self.parse_common_attributes(
element, attributes=SET_ATTRIBUTES_LIST
)
sbe_set = createSet(**attributes)
parent.addType(sbe_set)
for child_element in element.findall("choice"):
choice = self.parse_set_choice(sbe_set=sbe_set, element=child_element)
sbe_set.addChoice(choice)
def parse_set_choice(self, sbe_set, element):
"""parse and return an enum validvalue"""
attributes = self.parse_common_attributes(
element, attributes=SET_CHOICE_ATTRIBUTES_LIST
)
value = element.text
try:
value = int(element.text)
except ValueError as exc:
raise ValueError(
f"invalid value for set {sbe_set.name} choice "
f"{attributes.get('name')}"
) from exc
choice = createChoice(value=value, **attributes)
return choice
def parse_types_enum(self, parent: TypeCollection, element):
"""parse types/enum"""
attributes = self.parse_common_attributes(
element, attributes=ENUM_ATTRIBUTES_LIST
)
sbe_enum = createEnum(**attributes)
parent.addType(sbe_enum)
for child_element in element.findall("validValue"):
valid_value = self.parse_enum_valid_value(
sbe_enum=sbe_enum, element=child_element
)
sbe_enum.addValidValue(valid_value)
def parse_enum_valid_value(self, sbe_enum, element):
"""parse and return an enum validvalue"""
attributes = self.parse_common_attributes(
element, attributes=ENUM_VALID_VALUES_ATTRIBUTES_LIST
)
value = element.text
enum_valid_value = createValidValue(value=value, **attributes)
return enum_valid_value
class MessageParser(BaseParser):
"""parse message definitions"""
# which child elements may appear in message
VALID_MESSAGE_TYPES = ("field", "group", "data")
def parse_message(self, messageSchema, element):
"""parse message, can be repeated"""
attributes = self.parse_common_attributes(
element, attributes=MESSAGE_ATTRIBUTES_LIST
)
message = createMessage(**attributes)
messageSchema.addMessage(message)
self.parse_field_children(messageSchema, message, element)
def parse_field_children(self, messageSchema, parent: FieldCollection, element):
"""parse child elements that fit in a fieldCollection"""
for child_element in element:
if child_element.tag not in self.VALID_MESSAGE_TYPES:
raise ValueError(
f"invalid message/group child element {repr(child_element.tag)}"
)
parser = getattr(self, f"parse_message_{child_element.tag}", None)
if not parser:
raise RuntimeError(
f"unsupported message parser {repr(child_element.tag)}"
)
parser(messageSchema, parent, child_element)
def parse_message_field(
self, messageSchema, parent: FieldCollection, element
) -> None:
"""parse field Type"""
attributes = self.parse_common_attributes(
element, attributes=FIELD_ATTRIBUTES_LIST
)
field = createField(**attributes)
field.validate(messageSchema)
parent.addField(field)
def parse_message_group(
self, messageSchema, parent: FieldCollection, element
) -> None:
"""parse field Type"""
attributes = self.parse_common_attributes(
element, attributes=GROUP_ATTRIBUTES_LIST
)
group = createGroup(**attributes)
group.validate(messageSchema)
parent.addField(group)
self.parse_field_children(messageSchema, group, element)
def parse_byteOrder(byteOrder):
"""convert byteOrder to enum"""
if byteOrder is None or byteOrder == "":
return None
value = STRING_ENUM_MAP.get(byteOrder)
if value is None:
raise ValueError(
f"invalid byteOrder {repr(value)},"
"expected one of {SBE_STRING_ENUM_MAP.keys()}"
)
return value
def parse_version(version):
"""convert version to int"""
if version is None:
raise ValueError("sbe:messageSchema/@version is required")
return int(version)
def parse_optionalString(value):
"""parse an optional string"""
if not value:
return None
return value
| 32.321429 | 87 | 0.61326 | [
"Apache-2.0"
] | bkc/pysbe | pysbe/parser/fix_parser.py | 16,290 | Python |
"""
Created on Jan 24, 2014
@author: StarlitGhost
"""
from twisted.plugin import IPlugin
from desertbot.moduleinterface import IModule
from desertbot.modules.commandinterface import BotCommand
from zope.interface import implementer
from urllib.parse import quote
from desertbot.message import IRCMessage
from desertbot.response import IRCResponse, ResponseType
from twisted.words.protocols.irc import assembleFormattedText as colour, attributes as A
@implementer(IPlugin, IModule)
class Urban(BotCommand):
def triggers(self):
return ['urban', 'ud']
def help(self, query):
return ("urban <search term>"
" - returns the definition of the given search term from UrbanDictionary.com")
def execute(self, message: IRCMessage):
if len(message.parameterList) == 0:
return IRCResponse(ResponseType.Say,
"You didn't give a word! Usage: {0}".format(self.help),
message.replyTo)
search = quote(message.parameters)
url = 'http://api.urbandictionary.com/v0/define?term={0}'.format(search)
response = self.bot.moduleHandler.runActionUntilValue('fetch-url', url)
j = response.json()
if len(j['list']) == 0:
return IRCResponse(ResponseType.Say,
"No entry found for '{0}'".format(message.parameters),
message.replyTo)
graySplitter = colour(A.normal[' ', A.fg.gray['|'], ' '])
defn = j['list'][0]
word = defn['word']
definition = defn['definition']
definition = graySplitter.join([s.strip() for s in definition.strip().splitlines() if s])
example = defn['example']
example = graySplitter.join([s.strip() for s in example.strip().splitlines() if s])
author = defn['author']
up = defn['thumbs_up']
down = defn['thumbs_down']
more = 'http://{}.urbanup.com/'.format(word.replace(' ', '-'))
if word.lower() != message.parameters.lower():
word = "{0} (Contains '{1}')".format(word, message.parameters)
defFormatString = str(colour(A.normal[A.bold["{0}:"], " {1}"]))
exampleFormatString = str(colour(A.normal[A.bold["Example(s):"], " {0}"]))
byFormatString = str(colour(A.normal["{0}",
graySplitter,
A.fg.lightGreen["+{1}"],
A.fg.gray["/"],
A.fg.lightRed["-{2}"],
graySplitter,
"More defs: {3}"]))
responses = [IRCResponse(ResponseType.Say,
defFormatString.format(word, definition),
message.replyTo),
IRCResponse(ResponseType.Say,
exampleFormatString.format(example),
message.replyTo),
IRCResponse(ResponseType.Say,
byFormatString.format(author, up, down, more),
message.replyTo)]
return responses
urban = Urban()
| 36 | 97 | 0.532307 | [
"MIT"
] | MasterGunner/DesertBot | desertbot/modules/commands/Urban.py | 3,312 | Python |
import numpy as np
import openmdao.api as om
class IntegratedSurfaceForces(om.ExplicitComponent):
def setup(self):
self.add_input('aoa',desc = 'angle of attack', units='rad',tags=['mphys_input'])
self.add_input('yaw',desc = 'yaw angle',units='rad',tags=['mphys_input'])
self.add_input('ref_area', val = 1.0,tags=['mphys_input'])
self.add_input('moment_center',shape=3,tags=['mphys_input'])
self.add_input('ref_length', val = 1.0,tags=['mphys_input'])
self.add_input('q_inf', val = 1.0,tags=['mphys_input'])
self.add_input('x_aero', shape_by_conn=True,
distributed=True,
desc = 'surface coordinates',
tags=['mphys_coupling'])
self.add_input('f_aero', shape_by_conn=True,
distributed=True,
desc = 'dimensional forces at nodes',
tags=['mphys_coupling'])
self.add_output('C_L', desc = 'Lift coefficient', tags=['mphys_result'])
self.add_output('C_D', desc = 'Drag coefficient', tags=['mphys_result'])
self.add_output('C_X', desc = 'X Force coefficient', tags=['mphys_result'])
self.add_output('C_Y', desc = 'Y Force coefficient', tags=['mphys_result'])
self.add_output('C_Z', desc = 'Z Force coefficient', tags=['mphys_result'])
self.add_output('CM_X', desc = 'X Moment coefficient', tags=['mphys_result'])
self.add_output('CM_Y', desc = 'Y Moment coefficient', tags=['mphys_result'])
self.add_output('CM_Z', desc = 'Z Moment coefficient', tags=['mphys_result'])
self.add_output('Lift', desc = 'Total Lift', tags=['mphys_result'])
self.add_output('Drag', desc = 'Total Drag', tags=['mphys_result'])
self.add_output('F_X', desc = 'Total X Force', tags=['mphys_result'])
self.add_output('F_Y', desc = 'Total Y Force', tags=['mphys_result'])
self.add_output('F_Z', desc = 'Total Z Force', tags=['mphys_result'])
self.add_output('M_X', desc = 'Total X Moment', tags=['mphys_result'])
self.add_output('M_Y', desc = 'Total Y Moment', tags=['mphys_result'])
self.add_output('M_Z', desc = 'Total Z Moment', tags=['mphys_result'])
def compute(self,inputs,outputs):
aoa = inputs['aoa']
yaw = inputs['yaw']
area = inputs['ref_area']
q_inf = inputs['q_inf']
xc = inputs['moment_center'][0]
yc = inputs['moment_center'][1]
zc = inputs['moment_center'][2]
c = inputs['ref_length']
x = inputs['x_aero'][0::3]
y = inputs['x_aero'][1::3]
z = inputs['x_aero'][2::3]
fx = inputs['f_aero'][0::3]
fy = inputs['f_aero'][1::3]
fz = inputs['f_aero'][2::3]
fx_total = self.comm.allreduce(np.sum(fx))
fy_total = self.comm.allreduce(np.sum(fy))
fz_total = self.comm.allreduce(np.sum(fz))
outputs['F_X'] = fx_total
outputs['F_Y'] = fy_total
outputs['F_Z'] = fz_total
outputs['C_X'] = fx_total / (q_inf * area)
outputs['C_Y'] = fy_total / (q_inf * area)
outputs['C_Z'] = fz_total / (q_inf * area)
outputs['Lift'] = -fx_total * np.sin(aoa) + fz_total * np.cos(aoa)
outputs['Drag'] = ( fx_total * np.cos(aoa) * np.cos(yaw)
- fy_total * np.sin(yaw)
+ fz_total * np.sin(aoa) * np.cos(yaw)
)
outputs['C_L'] = outputs['Lift'] / (q_inf * area)
outputs['C_D'] = outputs['Drag'] / (q_inf * area)
m_x = self.comm.allreduce( np.dot(fz,(y-yc)) - np.dot(fy,(z-zc)))
m_y = self.comm.allreduce(-np.dot(fz,(x-xc)) + np.dot(fx,(z-zc)))
m_z = self.comm.allreduce( np.dot(fy,(x-xc)) - np.dot(fx,(y-yc)))
outputs['M_X'] = m_x
outputs['M_Y'] = m_y
outputs['M_Z'] = m_z
outputs['CM_X'] = m_x / (q_inf * area * c)
outputs['CM_Y'] = m_y / (q_inf * area * c)
outputs['CM_Z'] = m_z / (q_inf * area * c)
def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):
aoa = inputs['aoa']
yaw = inputs['yaw']
area = inputs['ref_area']
q_inf = inputs['q_inf']
xc = inputs['moment_center'][0]
yc = inputs['moment_center'][1]
zc = inputs['moment_center'][2]
c = inputs['ref_length']
x = inputs['x_aero'][0::3]
y = inputs['x_aero'][1::3]
z = inputs['x_aero'][2::3]
fx = inputs['f_aero'][0::3]
fy = inputs['f_aero'][1::3]
fz = inputs['f_aero'][2::3]
fx_total = self.comm.allreduce(np.sum(fx))
fy_total = self.comm.allreduce(np.sum(fy))
fz_total = self.comm.allreduce(np.sum(fz))
lift = -fx_total * np.sin(aoa) + fz_total * np.cos(aoa)
drag = ( fx_total * np.cos(aoa) * np.cos(yaw)
- fy_total * np.sin(yaw)
+ fz_total * np.sin(aoa) * np.cos(yaw)
)
m_x = self.comm.allreduce( np.dot(fz,(y-yc)) - np.dot(fy,(z-zc)))
m_y = self.comm.allreduce(-np.dot(fz,(x-xc)) + np.dot(fx,(z-zc)))
m_z = self.comm.allreduce( np.dot(fy,(x-xc)) - np.dot(fx,(y-yc)))
if mode == 'fwd':
if 'aoa' in d_inputs:
daoa_rad = d_inputs['aoa']
if 'Lift' in d_outputs or 'C_L' in d_outputs:
d_lift_d_aoa = ( - fx_total * np.cos(aoa) * daoa_rad
- fz_total * np.sin(aoa) * daoa_rad )
if 'Lift' in d_outputs:
d_outputs['Lift'] += d_lift_d_aoa
if 'C_L' in d_outputs:
d_outputs['C_L'] += d_lift_d_aoa / (q_inf * area)
if 'Drag' in d_outputs or 'C_D' in d_outputs:
d_drag_d_aoa = ( fx_total * (-np.sin(aoa) * daoa_rad) * np.cos(yaw)
+ fz_total * ( np.cos(aoa) * daoa_rad) * np.cos(yaw))
if 'Drag' in d_outputs:
d_outputs['Drag'] += d_drag_d_aoa
if 'C_D' in d_outputs:
d_outputs['C_D'] += d_drag_d_aoa / (q_inf * area)
if 'yaw' in d_inputs:
dyaw_rad = d_inputs['yaw']
if 'Drag' in d_outputs or 'C_D' in d_outputs:
d_drag_d_yaw = ( fx_total * np.cos(aoa) * (-np.sin(yaw) * dyaw_rad)
- fy_total * np.cos(yaw) * dyaw_rad
+ fz_total * np.sin(aoa) * (-np.sin(yaw) * dyaw_rad)
)
if 'Drag' in d_outputs:
d_outputs['Drag'] += d_drag_d_yaw
if 'C_D' in d_outputs:
d_outputs['C_D'] += d_drag_d_yaw / (q_inf * area)
if 'ref_area' in d_inputs:
d_nondim = - d_inputs['ref_area'] / (q_inf * area**2.0)
if 'C_X' in d_outputs:
d_outputs['C_X'] += fx_total * d_nondim
if 'C_Y' in d_outputs:
d_outputs['C_Y'] += fy_total * d_nondim
if 'C_Z' in d_outputs:
d_outputs['C_Z'] += fz_total * d_nondim
if 'C_L' in d_outputs:
d_outputs['C_L'] += lift * d_nondim
if 'C_D' in d_outputs:
d_outputs['C_D'] += drag * d_nondim
if 'CM_X' in d_outputs:
d_outputs['CM_X'] += m_x * d_nondim / c
if 'CM_X' in d_outputs:
d_outputs['CM_Y'] += m_y * d_nondim / c
if 'CM_Z' in d_outputs:
d_outputs['CM_Z'] += m_z * d_nondim / c
if 'moment_center' in d_inputs:
dxc = d_inputs['moment_center'][0]
dyc = d_inputs['moment_center'][1]
dzc = d_inputs['moment_center'][2]
if 'M_X' in d_outputs:
d_outputs['M_X'] += -fz_total * dyc + fy_total * dzc
if 'M_Y' in d_outputs:
d_outputs['M_Y'] += fz_total * dxc - fx_total * dzc
if 'M_Z' in d_outputs:
d_outputs['M_Z'] += -fy_total * dxc + fx_total * dyc
if 'CM_X' in d_outputs:
d_outputs['CM_X'] += (-fz_total * dyc + fy_total * dzc) / (q_inf * area * c)
if 'CM_Y' in d_outputs:
d_outputs['CM_Y'] += ( fz_total * dxc - fx_total * dzc) / (q_inf * area * c)
if 'CM_Z' in d_outputs:
d_outputs['CM_Z'] += (-fy_total * dxc + fx_total * dyc) / (q_inf * area * c)
if 'ref_length' in d_inputs:
d_nondim = - d_inputs['ref_length'] / (q_inf * area * c**2.0)
if 'CM_X' in d_outputs:
d_outputs['CM_X'] += m_x * d_nondim
if 'CM_X' in d_outputs:
d_outputs['CM_Y'] += m_y * d_nondim
if 'CM_Z' in d_outputs:
d_outputs['CM_Z'] += m_z * d_nondim
if 'q_inf' in d_inputs:
d_nondim = - d_inputs['q_inf'] / (q_inf**2.0 * area)
if 'C_X' in d_outputs:
d_outputs['C_X'] += fx_total * d_nondim
if 'C_Y' in d_outputs:
d_outputs['C_Y'] += fy_total * d_nondim
if 'C_Z' in d_outputs:
d_outputs['C_Z'] += fz_total * d_nondim
if 'C_L' in d_outputs:
d_outputs['C_L'] += lift * d_nondim
if 'C_D' in d_outputs:
d_outputs['C_D'] += drag * d_nondim
if 'CM_X' in d_outputs:
d_outputs['CM_X'] += m_x * d_nondim / c
if 'CM_X' in d_outputs:
d_outputs['CM_Y'] += m_y * d_nondim / c
if 'CM_Z' in d_outputs:
d_outputs['CM_Z'] += m_z * d_nondim / c
if 'x_aero' in d_inputs:
dx = d_inputs['x_aero'][0::3]
dy = d_inputs['x_aero'][1::3]
dz = d_inputs['x_aero'][2::3]
if 'M_X' in d_outputs:
d_outputs['M_X'] += np.dot(fz,dy) - np.dot(fy,dz)
if 'M_Y' in d_outputs:
d_outputs['M_Y'] += -np.dot(fz,dx) + np.dot(fx,dz)
if 'M_Z' in d_outputs:
d_outputs['M_Z'] += np.dot(fy,dx) - np.dot(fx,dy)
if 'CM_X' in d_outputs:
d_outputs['CM_X'] += ( np.dot(fz,dy) - np.dot(fy,dz)) / (q_inf * area * c)
if 'CM_Y' in d_outputs:
d_outputs['CM_Y'] += (-np.dot(fz,dx) + np.dot(fx,dz)) / (q_inf * area * c)
if 'CM_Z' in d_outputs:
d_outputs['CM_Z'] += ( np.dot(fy,dx) - np.dot(fx,dy)) / (q_inf * area * c)
if 'f_aero' in d_inputs:
dfx = d_inputs['f_aero'][0::3]
dfy = d_inputs['f_aero'][1::3]
dfz = d_inputs['f_aero'][2::3]
dfx_total = np.sum(dfx)
dfy_total = np.sum(dfy)
dfz_total = np.sum(dfz)
if 'F_X' in d_outputs:
d_outputs['F_X'] += dfx_total
if 'F_Y' in d_outputs:
d_outputs['F_Y'] += dfy_total
if 'F_Z' in d_outputs:
d_outputs['F_Z'] += dfz_total
if 'C_X' in d_outputs:
d_outputs['C_X'] += dfx_total / (q_inf * area)
if 'C_Y' in d_outputs:
d_outputs['C_Y'] += dfy_total / (q_inf * area)
if 'C_Z' in d_outputs:
d_outputs['C_Z'] += dfz_total / (q_inf * area)
if 'Lift' in d_outputs:
d_outputs['Lift'] += -dfx_total * np.sin(aoa) + dfz_total * np.cos(aoa)
if 'Drag' in d_outputs:
d_outputs['Drag'] += ( dfx_total * np.cos(aoa) * np.cos(yaw)
- dfy_total * np.sin(yaw)
+ dfz_total * np.sin(aoa) * np.cos(yaw)
)
if 'C_L' in d_outputs:
d_outputs['C_L'] += (-dfx_total * np.sin(aoa) + dfz_total * np.cos(aoa)) / (q_inf * area)
if 'C_D' in d_outputs:
d_outputs['C_D'] += ( dfx_total * np.cos(aoa) * np.cos(yaw)
- dfy_total * np.sin(yaw)
+ dfz_total * np.sin(aoa) * np.cos(yaw)
) / (q_inf * area)
if 'M_X' in d_outputs:
d_outputs['M_X'] += np.dot(dfz,(y-yc)) - np.dot(dfy,(z-zc))
if 'M_Y' in d_outputs:
d_outputs['M_Y'] += -np.dot(dfz,(x-xc)) + np.dot(dfx,(z-zc))
if 'M_Z' in d_outputs:
d_outputs['M_Z'] += np.dot(dfy,(x-xc)) - np.dot(dfx,(y-yc))
if 'CM_X' in d_outputs:
d_outputs['CM_X'] += ( np.dot(dfz,(y-yc)) - np.dot(dfy,(z-zc))) / (q_inf * area * c)
if 'CM_Y' in d_outputs:
d_outputs['CM_Y'] += (-np.dot(dfz,(x-xc)) + np.dot(dfx,(z-zc))) / (q_inf * area * c)
if 'CM_Z' in d_outputs:
d_outputs['CM_Z'] += ( np.dot(dfy,(x-xc)) - np.dot(dfx,(y-yc))) / (q_inf * area * c)
elif mode == 'rev':
if 'aoa' in d_inputs:
if 'Lift' in d_outputs or 'C_L' in d_outputs:
d_lift = d_outputs['Lift'] if 'Lift' in d_outputs else 0.0
d_cl = d_outputs['C_L'] if 'C_L' in d_outputs else 0.0
d_inputs['aoa'] += ( - fx_total * np.cos(aoa)
- fz_total * np.sin(aoa)
) * (d_lift + d_cl / (q_inf * area))
if 'Drag' in d_outputs or 'C_D' in d_outputs:
d_drag = d_outputs['Drag'] if 'Drag' in d_outputs else 0.0
d_cd = d_outputs['C_D'] if 'C_D' in d_outputs else 0.0
d_inputs['aoa'] += ( fx_total * (-np.sin(aoa)) * np.cos(yaw)
+ fz_total * ( np.cos(aoa)) * np.cos(yaw)
) * (d_drag + d_cd / (q_inf * area))
if 'yaw' in d_inputs:
if 'Drag' in d_outputs or 'C_D' in d_outputs:
d_drag = d_outputs['Drag'] if 'Drag' in d_outputs else 0.0
d_cd = d_outputs['C_D'] if 'C_D' in d_outputs else 0.0
d_inputs['yaw'] += ( fx_total * np.cos(aoa) * (-np.sin(yaw))
- fy_total * np.cos(yaw)
+ fz_total * np.sin(aoa) * (-np.sin(yaw))
) * (d_drag + d_cd / (q_inf * area))
if 'ref_area' in d_inputs:
d_nondim = - 1.0 / (q_inf * area**2.0)
if 'C_X' in d_outputs:
d_inputs['ref_area'] += d_outputs['C_X'] * fx_total * d_nondim
if 'C_Y' in d_outputs:
d_inputs['ref_area'] += d_outputs['C_Y'] * fy_total * d_nondim
if 'C_Z' in d_outputs:
d_inputs['ref_area'] += d_outputs['C_Z'] * fz_total * d_nondim
if 'C_L' in d_outputs:
d_inputs['ref_area'] += d_outputs['C_L'] * lift * d_nondim
if 'C_D' in d_outputs:
d_inputs['ref_area'] += d_outputs['C_D'] * drag * d_nondim
if 'CM_X' in d_outputs:
d_inputs['ref_area'] += d_outputs['CM_X'] * m_x * d_nondim / c
if 'CM_X' in d_outputs:
d_inputs['ref_area'] += d_outputs['CM_Y'] * m_y * d_nondim / c
if 'CM_Z' in d_outputs:
d_inputs['ref_area'] += d_outputs['CM_Z'] * m_z * d_nondim / c
if 'moment_center' in d_inputs:
if 'M_X' in d_outputs:
d_inputs['moment_center'][1] += -fz_total * d_outputs['M_X']
d_inputs['moment_center'][2] += fy_total * d_outputs['M_X']
if 'M_Y' in d_outputs:
d_inputs['moment_center'][0] += fz_total * d_outputs['M_Y']
d_inputs['moment_center'][2] += -fx_total * d_outputs['M_Y']
if 'M_Z' in d_outputs:
d_inputs['moment_center'][0] += -fy_total * d_outputs['M_Z']
d_inputs['moment_center'][1] += fx_total * d_outputs['M_Z']
if 'CM_X' in d_outputs:
d_inputs['moment_center'][1] += -fz_total * d_outputs['CM_X'] / (q_inf * area * c)
d_inputs['moment_center'][2] += fy_total * d_outputs['CM_X'] / (q_inf * area * c)
if 'CM_Y' in d_outputs:
d_inputs['moment_center'][0] += fz_total * d_outputs['CM_Y'] / (q_inf * area * c)
d_inputs['moment_center'][2] += -fx_total * d_outputs['CM_Y'] / (q_inf * area * c)
if 'CM_Z' in d_outputs:
d_inputs['moment_center'][0] += -fy_total * d_outputs['CM_Z'] / (q_inf * area * c)
d_inputs['moment_center'][1] += fx_total * d_outputs['CM_Z'] / (q_inf * area * c)
if 'ref_length' in d_inputs:
d_nondim = - 1.0 / (q_inf * area * c**2.0)
if 'CM_X' in d_outputs:
d_inputs['ref_length'] += m_x * d_nondim * d_outputs['CM_X']
if 'CM_X' in d_outputs:
d_inputs['ref_length'] += m_y * d_nondim * d_outputs['CM_Y']
if 'CM_Z' in d_outputs:
d_inputs['ref_length'] += m_z * d_nondim * d_outputs['CM_Z']
if 'q_inf' in d_inputs:
d_nondim = - 1.0 / (q_inf**2.0 * area)
if 'C_X' in d_outputs:
d_inputs['q_inf'] += d_outputs['C_X'] * fx_total * d_nondim
if 'C_Y' in d_outputs:
d_inputs['q_inf'] += d_outputs['C_Y'] * fy_total * d_nondim
if 'C_Z' in d_outputs:
d_inputs['q_inf'] += d_outputs['C_Z'] * fz_total * d_nondim
if 'C_L' in d_outputs:
d_inputs['q_inf'] += d_outputs['C_L'] * lift * d_nondim
if 'C_D' in d_outputs:
d_inputs['q_inf'] += d_outputs['C_D'] * drag * d_nondim
if 'CM_X' in d_outputs:
d_inputs['q_inf'] += d_outputs['CM_X'] * m_x * d_nondim / c
if 'CM_X' in d_outputs:
d_inputs['q_inf'] += d_outputs['CM_Y'] * m_y * d_nondim / c
if 'CM_Z' in d_outputs:
d_inputs['q_inf'] += d_outputs['CM_Z'] * m_z * d_nondim / c
if 'x_aero' in d_inputs:
nondim = 1.0 / (q_inf * area * c)
dm_x = d_outputs['M_X'] if 'M_X' in d_outputs else 0.0
dm_y = d_outputs['M_Y'] if 'M_Y' in d_outputs else 0.0
dm_z = d_outputs['M_Z'] if 'M_Z' in d_outputs else 0.0
dcm_x = d_outputs['CM_X']*nondim if 'CM_X' in d_outputs else 0.0
dcm_y = d_outputs['CM_Y']*nondim if 'CM_Y' in d_outputs else 0.0
dcm_z = d_outputs['CM_Z']*nondim if 'CM_Z' in d_outputs else 0.0
d_inputs['x_aero'][0::3] += -fz * (dm_y + dcm_y) + fy * (dm_z + dcm_z)
d_inputs['x_aero'][1::3] += fz * (dm_x + dcm_x) - fx * (dm_z + dcm_z)
d_inputs['x_aero'][2::3] += -fy * (dm_x + dcm_x) + fx * (dm_y + dcm_y)
if 'f_aero' in d_inputs:
if 'F_X' in d_outputs:
d_inputs['f_aero'][0::3] += d_outputs['F_X']
if 'F_Y' in d_outputs:
d_inputs['f_aero'][1::3] += d_outputs['F_Y']
if 'F_Z' in d_outputs:
d_inputs['f_aero'][2::3] += d_outputs['F_Z']
if 'C_X' in d_outputs:
d_inputs['f_aero'][0::3] += d_outputs['C_X'] / (q_inf * area)
if 'C_Y' in d_outputs:
d_inputs['f_aero'][1::3] += d_outputs['C_Y'] / (q_inf * area)
if 'C_Z' in d_outputs:
d_inputs['f_aero'][2::3] += d_outputs['C_Z'] / (q_inf * area)
if 'Lift' in d_outputs:
d_inputs['f_aero'][0::3] += -np.sin(aoa) * d_outputs['Lift']
d_inputs['f_aero'][2::3] += np.cos(aoa) * d_outputs['Lift']
if 'Drag' in d_outputs:
d_inputs['f_aero'][0::3] += np.cos(aoa) * np.cos(yaw) * d_outputs['Drag']
d_inputs['f_aero'][1::3] += -np.sin(yaw) * d_outputs['Drag']
d_inputs['f_aero'][2::3] += np.sin(aoa) * np.cos(yaw) * d_outputs['Drag']
if 'C_L' in d_outputs:
d_inputs['f_aero'][0::3] += -np.sin(aoa) * d_outputs['C_L'] / (q_inf * area)
d_inputs['f_aero'][2::3] += np.cos(aoa) * d_outputs['C_L'] / (q_inf * area)
if 'C_D' in d_outputs:
d_inputs['f_aero'][0::3] += np.cos(aoa) * np.cos(yaw) * d_outputs['C_D'] / (q_inf * area)
d_inputs['f_aero'][1::3] += -np.sin(yaw) * d_outputs['C_D'] / (q_inf * area)
d_inputs['f_aero'][2::3] += np.sin(aoa) * np.cos(yaw) * d_outputs['C_D'] / (q_inf * area)
if 'M_X' in d_outputs:
d_inputs['f_aero'][1::3] += -(z-zc) * d_outputs['M_X']
d_inputs['f_aero'][2::3] += (y-yc) * d_outputs['M_X']
if 'M_Y' in d_outputs:
d_inputs['f_aero'][0::3] += (z-zc) * d_outputs['M_Y']
d_inputs['f_aero'][2::3] += -(x-xc) * d_outputs['M_Y']
if 'M_Z' in d_outputs:
d_inputs['f_aero'][0::3] += -(y-yc) * d_outputs['M_Z']
d_inputs['f_aero'][1::3] += (x-xc) * d_outputs['M_Z']
if 'CM_X' in d_outputs:
d_inputs['f_aero'][1::3] += -(z-zc) * d_outputs['CM_X'] / (q_inf * area * c)
d_inputs['f_aero'][2::3] += (y-yc) * d_outputs['CM_X'] / (q_inf * area * c)
if 'CM_Y' in d_outputs:
d_inputs['f_aero'][0::3] += (z-zc) * d_outputs['CM_Y'] / (q_inf * area * c)
d_inputs['f_aero'][2::3] += -(x-xc) * d_outputs['CM_Y'] / (q_inf * area * c)
if 'CM_Z' in d_outputs:
d_inputs['f_aero'][0::3] += -(y-yc) * d_outputs['CM_Z'] / (q_inf * area * c)
d_inputs['f_aero'][1::3] += (x-xc) * d_outputs['CM_Z'] / (q_inf * area * c)
def check_integrated_surface_force_partials():
nnodes = 3
prob = om.Problem()
ivc = om.IndepVarComp()
ivc.add_output('aoa',val=45.0, units='deg')
ivc.add_output('yaw',val=135.0, units='deg')
ivc.add_output('ref_area',val=0.2)
ivc.add_output('moment_center',shape=3,val=np.zeros(3))
ivc.add_output('ref_length', val = 3.0)
ivc.add_output('q_inf',val=10.0)
ivc.add_output('x_aero',shape=3*nnodes,val=np.random.rand(3*nnodes),distributed=True)
ivc.add_output('f_aero',shape=3*nnodes,val=np.random.rand(3*nnodes),distributed=True)
prob.model.add_subsystem('ivc',ivc,promotes_outputs=['*'])
prob.model.add_subsystem('forces',IntegratedSurfaceForces(),
promotes_inputs=['*'])
prob.setup(force_alloc_complex=True)
prob.run_model()
prob.check_partials(compact_print=True, method='cs')
if __name__ == '__main__':
check_integrated_surface_force_partials()
| 53.004435 | 110 | 0.474001 | [
"Apache-2.0"
] | OpenMDAO/mphys | mphys/integrated_forces.py | 23,905 | Python |
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['127.0.0.1'])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES['default'] = env.db('DATABASE_URL') # noqa F405
DATABASES['default']['ATOMIC_REQUESTS'] = True # noqa F405
DATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': env('REDIS_URL'),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
'IGNORE_EXCEPTIONS': True,
}
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool('DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ['storages'] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': f'max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate',
}
# STATIC
# ------------------------
STATICFILES_STORAGE = 'config.settings.production.StaticRootS3Boto3Storage'
STATIC_URL = f'https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/static/'
# MEDIA
# ------------------------------------------------------------------------------
# region http://stackoverflow.com/questions/10390244/
# Full-fledge class: https://stackoverflow.com/a/18046120/104731
from storages.backends.s3boto3 import S3Boto3Storage # noqa E402
class StaticRootS3Boto3Storage(S3Boto3Storage):
location = 'static'
class MediaRootS3Boto3Storage(S3Boto3Storage):
location = 'media'
file_overwrite = False
# endregion
DEFAULT_FILE_STORAGE = 'config.settings.production.MediaRootS3Boto3Storage'
MEDIA_URL = f'https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['loaders'] = [ # noqa F405
(
'django.template.loaders.cached.Loader',
[
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
),
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
'DJANGO_DEFAULT_FROM_EMAIL',
default='The Bureau <[email protected]>'
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[The Bureau]')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ['anymail'] # noqa F405
EMAIL_BACKEND = 'anymail.backends.mailgun.EmailBackend'
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
'MAILGUN_API_KEY': env('MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_DOMAIN')
}
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['gunicorn'] # noqa F405
# Collectfast
# ------------------------------------------------------------------------------
# https://github.com/antonagestam/collectfast#installation
INSTALLED_APPS = ['collectfast'] + INSTALLED_APPS # noqa F405
AWS_PRELOAD_METADATA = True
# LOGGING
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
# Your stuff...
# ------------------------------------------------------------------------------
| 39.316832 | 96 | 0.605515 | [
"Apache-2.0"
] | clairempr/bureau | config/settings/production.py | 7,942 | Python |
# Copyright 2018 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
import subprocess
import charms_openstack.test_utils as test_utils
import charm.openstack.octavia as octavia # for constants
import charm.openstack.api_crud as api_crud
class FakeNeutronConflictException(Exception):
pass
class TestAPICrud(test_utils.PatchHelper):
def setUp(self):
super().setUp()
self.secgrp_uuid = 'fake-secgrp-uuid'
self.health_secgrp_uuid = 'fake-health_secgrp-uuid'
self.security_group_rule_calls = [
mock.call(
{'security_group_rule': {
'direction': 'ingress',
'protocol': 'icmpv6',
'ethertype': 'IPv6',
'security_group_id': self.secgrp_uuid}}),
mock.call(
{'security_group_rule': {
'direction': 'ingress',
'protocol': 'tcp',
'ethertype': 'IPv6',
'port_range_min': '22',
'port_range_max': '22',
'security_group_id': self.secgrp_uuid}}),
mock.call(
{'security_group_rule': {
'direction': 'ingress',
'protocol': 'tcp',
'ethertype': 'IPv6',
'port_range_min': '9443',
'port_range_max': '9443',
'security_group_id': self.secgrp_uuid}}),
mock.call(
{'security_group_rule': {
'direction': 'ingress',
'protocol': 'icmpv6',
'ethertype': 'IPv6',
'security_group_id': self.health_secgrp_uuid}}),
mock.call(
{'security_group_rule': {
'direction': 'ingress',
'protocol': 'udp',
'ethertype': 'IPv6',
'port_range_min': octavia.OCTAVIA_HEALTH_LISTEN_PORT,
'port_range_max': octavia.OCTAVIA_HEALTH_LISTEN_PORT,
'security_group_id': self.health_secgrp_uuid}}),
]
def test_endpoint_type(self):
self.patch_object(api_crud.ch_core.hookenv, 'config')
self.config.return_value = False
self.assertEquals(api_crud.endpoint_type(), 'publicURL')
self.config.return_value = True
self.assertEquals(api_crud.endpoint_type(), 'internalURL')
def test_session_from_identity_service(self):
self.patch_object(api_crud, 'keystone_identity')
self.patch_object(api_crud, 'keystone_session')
identity_service = mock.MagicMock()
result = api_crud.session_from_identity_service(identity_service)
self.keystone_identity.Password.assert_called_once_with(
auth_url='{}://{}:{}/'
.format(identity_service.auth_protocol(),
identity_service.auth_host(),
identity_service.auth_port()),
user_domain_name=identity_service.service_domain(),
username=identity_service.service_username(),
password=identity_service.service_password(),
project_domain_name=identity_service.service_domain(),
project_name=identity_service.service_tenant(),
)
self.keystone_session.Session.assert_called_once_with(
auth=self.keystone_identity.Password(),
verify='/etc/ssl/certs/ca-certificates.crt')
self.assertEqual(result, self.keystone_session.Session())
def test_init_neutron_client(self):
self.patch_object(api_crud, 'neutron_client')
self.patch_object(api_crud.ch_core.hookenv, 'config')
self.patch_object(api_crud, 'endpoint_type')
self.endpoint_type.return_value = 'someeptype'
api_crud.init_neutron_client('somesession')
self.config.assert_called_once_with('region')
self.neutron_client.Client.assert_called_once_with(
session='somesession', region_name=self.config(),
endpoint_type='someeptype')
def test_get_nova_client(self):
self.patch_object(api_crud, 'nova_client')
self.patch_object(api_crud.ch_core.hookenv, 'config')
self.config.return_value = 'someregion'
self.patch_object(api_crud, 'endpoint_type')
self.endpoint_type.return_value = 'someeptype'
api_crud.get_nova_client('somesession')
self.config.assert_called_once_with('region')
self.nova_client.Client.assert_called_once_with(
'2', session='somesession', region_name='someregion',
endpoint_type='someeptype')
def test_get_nova_flavor(self):
self.patch_object(api_crud, 'get_nova_client')
self.patch_object(api_crud, 'nova_client')
self.patch_object(api_crud, 'session_from_identity_service')
self.patch_object(api_crud, 'keystone_exceptions')
nova = mock.MagicMock()
self.get_nova_client.return_value = nova
flavor = mock.MagicMock()
flavor.id = 'fake-id'
flavor.name = 'charm-octavia'
nova.flavors.list.return_value = [flavor]
self.keystone_exceptions.catalog.EndpointNotFound = Exception
self.keystone_exceptions.connection.ConnectFailure = Exception
self.nova_client.exceptions.ClientException = Exception
nova.flavors.list.side_effect = Exception
identity_service = mock.MagicMock()
with self.assertRaises(api_crud.APIUnavailable):
api_crud.get_nova_flavor(identity_service)
nova.flavors.list.side_effect = None
api_crud.get_nova_flavor(identity_service)
nova.flavors.list.assert_called_with(is_public=False)
self.assertFalse(nova.flavors.create.called)
nova.flavors.list.return_value = []
nova.flavors.create.return_value = flavor
api_crud.get_nova_flavor(identity_service)
nova.flavors.create.assert_called_with('charm-octavia', 1024, 1, 8,
is_public=False)
def test_lookup_hm_port(self):
nc = mock.MagicMock()
nc.list_ports.return_value = {'ports': ['first', 'second']}
with self.assertRaises(api_crud.DuplicateResource):
api_crud.lookup_hm_port(nc, 'fake-unit-name')
nc.list_ports.return_value = {'ports': ['first']}
self.assertEquals(
api_crud.lookup_hm_port(nc, 'fake-unit-name'),
'first')
nc.list_ports.return_value = {}
self.assertEquals(
api_crud.lookup_hm_port(nc, 'fake-unit-name'),
None)
def test_get_hm_port(self):
self.patch_object(api_crud, 'session_from_identity_service')
self.patch_object(api_crud, 'init_neutron_client')
nc = mock.MagicMock()
self.init_neutron_client.return_value = nc
network_uuid = 'fake-network-uuid'
nc.list_networks.return_value = {'networks': [{'id': network_uuid}]}
health_secgrp_uuid = 'fake-secgrp-uuid'
nc.list_security_groups.return_value = {
'security_groups': [{'id': health_secgrp_uuid}]}
self.patch_object(api_crud.socket, 'gethostname')
self.gethostname.return_value = 'fakehostname'
port_uuid = 'fake-port-uuid'
port_mac_address = 'fake-mac-address'
nc.create_port.return_value = {
'port': {'id': port_uuid, 'mac_address': port_mac_address}}
self.patch('subprocess.check_output', 'check_output')
self.patch('charms.reactive.set_flag', 'set_flag')
identity_service = mock.MagicMock()
self.patch_object(api_crud, 'neutron_lib')
self.neutron_lib.constants.DEVICE_OWNER_LOADBALANCERV2 = 'fakeowner'
self.patch_object(api_crud, 'lookup_hm_port')
self.lookup_hm_port.return_value = None
result = api_crud.get_hm_port(identity_service,
'fake-unit-name',
'192.0.2.42')
self.init_neutron_client.assert_called_once_with(
self.session_from_identity_service())
nc.list_networks.assert_called_with(tags='charm-octavia')
nc.list_security_groups.assert_called_with(
tags='charm-octavia-health')
self.lookup_hm_port.assert_called_once_with(
nc, 'fake-unit-name')
nc.create_port.assert_called_once_with(
{
'port': {
'admin_state_up': False,
'binding:host_id': 'fakehostname',
'device_owner': 'fakeowner',
'security_groups': ['fake-secgrp-uuid'],
'name': 'octavia-health-manager-'
'fake-unit-name-listen-port',
'network_id': 'fake-network-uuid',
},
})
nc.add_tag.assert_called_with('ports', port_uuid, 'charm-octavia')
self.assertEqual(result, {'id': 'fake-port-uuid',
'mac_address': 'fake-mac-address'})
nc.create_port.reset_mock()
result = api_crud.get_hm_port(identity_service,
'fake-unit-name',
'192.0.2.42',
host_id='fake-unit-name.fqdn')
nc.create_port.assert_called_once_with(
{
'port': {
'admin_state_up': False,
'binding:host_id': 'fake-unit-name.fqdn',
'device_owner': 'fakeowner',
'security_groups': ['fake-secgrp-uuid'],
'name': 'octavia-health-manager-'
'fake-unit-name-listen-port',
'network_id': 'fake-network-uuid',
},
})
self.assertEqual(result, {'id': 'fake-port-uuid',
'mac_address': 'fake-mac-address'})
def test_toggle_hm_port(self):
self.patch_object(api_crud, 'session_from_identity_service')
self.patch_object(api_crud, 'init_neutron_client')
identity_service = mock.MagicMock()
nc = mock.MagicMock()
self.init_neutron_client.return_value = nc
nc.list_ports.return_value = {'ports': [{'id': 'fake-port-uuid'}]}
api_crud.toggle_hm_port(identity_service, 'fake-unit-name')
self.init_neutron_client.assert_called_once_with(
self.session_from_identity_service())
nc.list_ports.asssert_called_with(tags='charm-octavia-fake-unit-name')
nc.update_port.assert_called_with('fake-port-uuid',
{'port': {'admin_state_up': True}})
def test_is_hm_port_bound(self):
self.patch_object(api_crud, 'session_from_identity_service')
self.patch_object(api_crud, 'init_neutron_client')
self.patch_object(api_crud, 'lookup_hm_port')
self.lookup_hm_port.return_value = None
self.assertEquals(
api_crud.is_hm_port_bound('ids', 'fake-unit-name'), None)
self.lookup_hm_port.assert_called_once_with(
mock.ANY, 'fake-unit-name')
self.lookup_hm_port.return_value = {'binding:vif_type': 'nonfailure'}
self.assertTrue(api_crud.is_hm_port_bound('ids', 'fake-unit-name'))
self.lookup_hm_port.return_value = {
'binding:vif_type': 'binding_failed'}
self.assertFalse(api_crud.is_hm_port_bound('ids', 'fake-unit-name'))
def test_wait_for_hm_port_bound(self):
self.patch_object(api_crud.tenacity, 'Retrying')
@contextlib.contextmanager
def fake_context_manager():
# TODO: Replace with `contextlib.nullcontext()` once we have
# deprecated support for Python 3.4, 3.5 and 3.6
yield None
self.Retrying.return_value = [fake_context_manager()]
self.patch_object(api_crud, 'is_hm_port_bound')
self.is_hm_port_bound.return_value = True
self.assertTrue(api_crud.wait_for_hm_port_bound(
'ids', 'fake-unit-name'))
self.Retrying.side_effect = api_crud.tenacity.RetryError(None)
self.assertFalse(api_crud.wait_for_hm_port_bound(
'ids', 'fake-unit-name'))
def test_setup_hm_port(self):
self.patch('subprocess.check_output', 'check_output')
self.patch('subprocess.check_call', 'check_call')
self.patch_object(api_crud, 'get_hm_port')
self.patch_object(api_crud, 'toggle_hm_port')
identity_service = mock.MagicMock()
octavia_charm = mock.MagicMock()
port_uuid = 'fake-port-uuid'
port_mac_address = 'fake-mac-address'
self.get_hm_port.return_value = {
'id': port_uuid,
'mac_address': port_mac_address,
'admin_state_up': False,
'binding:vif_type': 'binding_failed',
'status': 'DOWN',
}
e = subprocess.CalledProcessError(returncode=1, cmd=None)
e.output = ('Device "{}" does not exist.'
.format(api_crud.octavia.OCTAVIA_MGMT_INTF))
self.check_output.side_effect = e
api_crud.setup_hm_port(identity_service, octavia_charm)
self.get_hm_port.assert_called_with(
identity_service,
octavia_charm.local_unit_name,
octavia_charm.local_address,
host_id=None)
self.check_output.assert_called_with(
['ip', 'link', 'show', api_crud.octavia.OCTAVIA_MGMT_INTF],
stderr=-2, universal_newlines=True)
self.check_call.assert_has_calls([
mock.call(
['ovs-vsctl', '--', 'add-port',
api_crud.octavia.OCTAVIA_INT_BRIDGE,
api_crud.octavia.OCTAVIA_MGMT_INTF,
'--', 'set', 'Interface', api_crud.octavia.OCTAVIA_MGMT_INTF,
'type=internal',
'--', 'set', 'Interface', api_crud.octavia.OCTAVIA_MGMT_INTF,
'external-ids:iface-status=active',
'--', 'set', 'Interface', api_crud.octavia.OCTAVIA_MGMT_INTF,
'external-ids:attached-mac={}'.format(port_mac_address),
'--', 'set', 'Interface', api_crud.octavia.OCTAVIA_MGMT_INTF,
'external-ids:iface-id={}'.format(port_uuid),
'--', 'set', 'Interface', api_crud.octavia.OCTAVIA_MGMT_INTF,
'external-ids:skip_cleanup=true']),
mock.call(['ip', 'link', 'set', 'o-hm0', 'up', 'address',
'fake-mac-address']),
])
self.check_call.assert_called_with(
['ip', 'link', 'set', api_crud.octavia.OCTAVIA_MGMT_INTF,
'up', 'address', port_mac_address])
self.toggle_hm_port.assert_called
def test_get_port_ips(self):
self.patch_object(api_crud, 'session_from_identity_service')
self.patch_object(api_crud, 'init_neutron_client')
nc = mock.MagicMock()
self.init_neutron_client.return_value = nc
nc.list_ports.return_value = {
'ports': [
{'fixed_ips': [{'ip_address': '2001:db8:42::42'}]},
{'fixed_ips': [{'ip_address': '2001:db8:42::51'}]},
],
}
identity_service = mock.MagicMock()
self.assertEquals(api_crud.get_port_ips(identity_service),
['2001:db8:42::42',
'2001:db8:42::51'])
self.init_neutron_client.assert_called_once_with(
self.session_from_identity_service())
def test_get_mgmt_network_create(self):
resource_tag = 'charm-octavia'
self.patch_object(api_crud, 'neutron_client')
identity_service = mock.MagicMock()
nc = mock.MagicMock()
self.neutron_client.Client.return_value = nc
network_uuid = '83f1a860-9aed-4c0b-8b72-47195580a0c1'
nc.create_network.return_value = {'network': {'id': network_uuid}}
nc.create_subnet.return_value = {
'subnets': [{'id': 'fake-subnet-uuid', 'cidr': 'fake-cidr'}]}
nc.create_router.return_value = {
'router': {'id': 'fake-router-uuid'}}
nc.create_security_group.side_effect = [
{'security_group': {'id': self.secgrp_uuid}},
{'security_group': {'id': self.health_secgrp_uuid}},
]
self.patch_object(api_crud, 'is_extension_enabled')
self.is_extension_enabled.return_value = True
result = api_crud.get_mgmt_network(identity_service)
nc.list_networks.assert_called_once_with(tags=resource_tag)
nc.create_network.assert_called_once_with({
'network': {'name': octavia.OCTAVIA_MGMT_NET}})
nc.list_subnets.assert_called_once_with(network_id=network_uuid,
tags=resource_tag)
nc.list_routers.assert_called_once_with(tags=resource_tag)
nc.create_router.assert_called_once_with(
{'router': {'name': 'lb-mgmt', 'distributed': False}})
nc.list_security_groups.assert_any_call(tags=resource_tag)
nc.list_security_groups.assert_any_call(tags=resource_tag + '-health')
nc.create_security_group_rule.assert_has_calls(
self.security_group_rule_calls)
self.assertEqual(result, (
{'id': network_uuid},
{'id': self.secgrp_uuid},),
)
def test_get_mgmt_network_exists(self):
resource_tag = 'charm-octavia'
self.patch_object(api_crud, 'session_from_identity_service')
self.patch_object(api_crud, 'init_neutron_client')
identity_service = mock.MagicMock()
nc = mock.MagicMock()
self.init_neutron_client.return_value = nc
network_uuid = '83f1a860-9aed-4c0b-8b72-47195580a0c1'
nc.list_networks.return_value = {'networks': [{'id': network_uuid}]}
nc.list_subnets.return_value = {
'subnets': [{'id': 'fake-subnet-uuid'}]}
nc.list_routers.return_value = {
'routers': [{'id': 'fake-router-uuid'}]}
nc.list_security_groups.side_effect = [
{'security_groups': [{'id': self.secgrp_uuid}]},
{'security_groups': [{'id': self.health_secgrp_uuid}]},
]
self.patch_object(api_crud.neutronclient.common, 'exceptions',
name='neutron_exceptions')
self.neutron_exceptions.Conflict = FakeNeutronConflictException
nc.create_security_group_rule.side_effect = \
FakeNeutronConflictException
result = api_crud.get_mgmt_network(identity_service)
self.init_neutron_client.assert_called_once_with(
self.session_from_identity_service())
nc.list_networks.assert_called_once_with(tags=resource_tag)
nc.list_subnets.assert_called_once_with(network_id=network_uuid,
tags=resource_tag)
nc.list_routers.assert_called_once_with(tags=resource_tag)
nc.list_security_groups.assert_has_calls([
mock.call(tags=resource_tag),
mock.call(tags=resource_tag + '-health'),
])
nc.create_security_group_rule.assert_has_calls(
self.security_group_rule_calls)
self.assertEqual(result, (
{'id': network_uuid},
{'id': self.secgrp_uuid},),
)
def test_get_mgmt_network_exists_create_router(self):
resource_tag = 'charm-octavia'
self.patch_object(api_crud, 'session_from_identity_service')
self.patch_object(api_crud, 'init_neutron_client')
identity_service = mock.MagicMock()
nc = mock.MagicMock()
self.init_neutron_client.return_value = nc
network_uuid = '83f1a860-9aed-4c0b-8b72-47195580a0c1'
nc.list_networks.return_value = {'networks': [{'id': network_uuid}]}
nc.list_subnets.return_value = {
'subnets': [{'id': 'fake-subnet-uuid'}]}
# network and subnet exists, but router doesn't
nc.list_routers.return_value = {'routers': []}
nc.create_router.return_value = {
'router': {'id': 'fake-router-uuid'}}
nc.list_security_groups.side_effect = [
{'security_groups': [{'id': self.secgrp_uuid}]},
{'security_groups': [{'id': self.health_secgrp_uuid}]},
]
self.patch_object(api_crud.neutronclient.common, 'exceptions',
name='neutron_exceptions')
self.neutron_exceptions.Conflict = FakeNeutronConflictException
nc.create_security_group_rule.side_effect = \
FakeNeutronConflictException
result = api_crud.get_mgmt_network(identity_service)
self.init_neutron_client.assert_called_once_with(
self.session_from_identity_service())
nc.list_networks.assert_called_once_with(tags=resource_tag)
self.assertFalse(nc.create_networks.called)
nc.list_subnets.assert_called_once_with(network_id=network_uuid,
tags=resource_tag)
self.assertFalse(nc.create_subnet.called)
nc.list_routers.assert_called_once_with(tags=resource_tag)
self.assertTrue(nc.create_router.called)
nc.add_interface_router.assert_called_once_with('fake-router-uuid', {
'subnet_id': 'fake-subnet-uuid'})
nc.list_security_groups.assert_has_calls([
mock.call(tags=resource_tag),
mock.call(tags=resource_tag + '-health'),
])
nc.create_security_group_rule.assert_has_calls(
self.security_group_rule_calls)
self.assertEqual(result, (
{'id': network_uuid},
{'id': self.secgrp_uuid},),
)
| 46.590814 | 78 | 0.616301 | [
"Apache-2.0"
] | openstack/charm-octavia | unit_tests/test_lib_charm_openstack_api_crud.py | 22,317 | Python |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'searchform.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_searchForm(object):
def setupUi(self, searchForm):
searchForm.setObjectName("searchForm")
searchForm.setWindowModality(QtCore.Qt.ApplicationModal)
searchForm.resize(400, 300)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("icon/search.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
searchForm.setWindowIcon(icon)
self.layoutWidget = QtWidgets.QWidget(searchForm)
self.layoutWidget.setGeometry(QtCore.QRect(20, 110, 353, 69))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(self.layoutWidget)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.lineEdit = QtWidgets.QLineEdit(self.layoutWidget)
self.lineEdit.setObjectName("lineEdit")
self.horizontalLayout.addWidget(self.lineEdit)
self.pushButton = QtWidgets.QPushButton(self.layoutWidget)
self.pushButton.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.pushButton.setStyleSheet("background-color:white")
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.songbutton = QtWidgets.QRadioButton(self.layoutWidget)
self.songbutton.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.songbutton.setTabletTracking(False)
self.songbutton.setAcceptDrops(False)
self.songbutton.setAutoFillBackground(False)
self.songbutton.setChecked(True)
self.songbutton.setAutoRepeat(False)
self.songbutton.setObjectName("songbutton")
self.horizontalLayout_2.addWidget(self.songbutton)
self.artbutton = QtWidgets.QRadioButton(self.layoutWidget)
self.artbutton.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.artbutton.setObjectName("artbutton")
self.horizontalLayout_2.addWidget(self.artbutton)
self.playlistbutton = QtWidgets.QRadioButton(self.layoutWidget)
self.playlistbutton.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.playlistbutton.setObjectName("playlistbutton")
self.horizontalLayout_2.addWidget(self.playlistbutton)
self.playerbutton = QtWidgets.QRadioButton(self.layoutWidget)
self.playerbutton.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.playerbutton.setObjectName("playerbutton")
self.horizontalLayout_2.addWidget(self.playerbutton)
self.exitbutton = QtWidgets.QPushButton(self.layoutWidget)
self.exitbutton.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.exitbutton.setStyleSheet("background-color:white")
self.exitbutton.setObjectName("exitbutton")
self.horizontalLayout_2.addWidget(self.exitbutton)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.retranslateUi(searchForm)
QtCore.QMetaObject.connectSlotsByName(searchForm)
def retranslateUi(self, searchForm):
_translate = QtCore.QCoreApplication.translate
searchForm.setWindowTitle(_translate("searchForm", "搜索"))
self.label.setText(_translate("searchForm", "搜索:"))
self.pushButton.setText(_translate("searchForm", "确认"))
self.songbutton.setText(_translate("searchForm", "歌曲"))
self.artbutton.setText(_translate("searchForm", "歌手"))
self.playlistbutton.setText(_translate("searchForm", "歌单"))
self.playerbutton.setText(_translate("searchForm", "用户"))
self.exitbutton.setText(_translate("searchForm", "退出"))
| 53 | 94 | 0.715617 | [
"MIT"
] | FNgrey/musicplayer | searchform.py | 4,433 | Python |
# Copyright (c) Contributors to the aswf-docker Project. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
CI Image and Package Builder
"""
import logging
import subprocess
import json
import os
import tempfile
import typing
from aswfdocker import constants, aswfinfo, utils, groupinfo, index
logger = logging.getLogger(__name__)
class Builder:
"""Builder generates a "docker buildx bake" json file to drive the parallel builds of Docker images."""
def __init__(
self,
build_info: aswfinfo.ASWFInfo,
group_info: groupinfo.GroupInfo,
push: bool = False,
use_conan: bool = False,
):
self.push = push
self.build_info = build_info
self.group_info = group_info
self.use_conan = use_conan
self.index = index.Index()
def make_bake_dict(self) -> typing.Dict[str, dict]:
root: typing.Dict[str, dict] = {}
root["target"] = {}
versions_to_bake = set()
for image, version in self.group_info.iter_images_versions():
use_conan = self.group_info.type == constants.ImageType.PACKAGE and (
self.use_conan
or self.index.is_conan_only_package(image.replace("ci-package-", ""))
)
major_version = utils.get_major_version(version)
version_info = self.index.version_info(major_version)
if self.group_info.type == constants.ImageType.PACKAGE:
image_base = image.replace("ci-package-", "")
group = self.index.get_group_from_image(
self.group_info.type, image_base
)
if use_conan:
if version in versions_to_bake:
# Only one version per image needed
continue
if version_info.ci_common_version != major_version:
# Only bake images for ci_common!
version = version_info.ci_common_version
major_version = utils.get_major_version(version)
versions_to_bake.add(version)
tags = list(
map(
lambda tag: f"{constants.DOCKER_REGISTRY}/{self.build_info.docker_org}"
+ f"/ci-centos7-gl-conan:{tag}",
[version, major_version],
)
)
docker_file = "packages/common/Dockerfile"
else:
tags = version_info.get_tags(
version,
self.build_info.docker_org,
image,
extra_suffix=version_info.package_versions.get(
"ASWF_"
+ image.replace("ci-package-", "").upper()
+ "_VERSION"
),
)
docker_file = f"packages/{group}/Dockerfile"
else:
tags = version_info.get_tags(version, self.build_info.docker_org, image)
docker_file = f"{image}/Dockerfile"
if version_info.ci_common_version == major_version:
channel = f"ci_common{major_version}"
else:
channel = f"vfx{version_info.major_version}"
args = {
"ASWF_ORG": self.build_info.docker_org,
"ASWF_PKG_ORG": self.build_info.package_org,
"ASWF_VERSION": version,
"CI_COMMON_VERSION": version_info.ci_common_version,
"ASWF_CONAN_CHANNEL": channel,
}
args.update(version_info.all_package_versions)
target_dict = {
"context": ".",
"dockerfile": docker_file,
"args": args,
"labels": {
"org.opencontainers.image.created": self.build_info.build_date,
"org.opencontainers.image.revision": self.build_info.vcs_ref,
},
"tags": tags,
"output": ["type=registry,push=true" if self.push else "type=docker"],
}
if self.group_info.type == constants.ImageType.PACKAGE:
if use_conan:
target_dict["target"] = "ci-centos7-gl-conan"
else:
target_dict["target"] = image
root["target"][f"{image}-{major_version}"] = target_dict
root["group"] = {"default": {"targets": list(root["target"].keys())}}
return root
def make_bake_jsonfile(self) -> typing.Optional[str]:
d = self.make_bake_dict()
if not d["group"]["default"]["targets"]:
return None
groups = "-".join(self.group_info.names)
versions = "-".join(self.group_info.versions)
path = os.path.join(
tempfile.gettempdir(),
f"docker-bake-{self.group_info.type.name}-{groups}-{versions}.json",
)
with open(path, "w", encoding="utf-8") as f:
json.dump(d, f, indent=4, sort_keys=True)
return path
def _run(self, cmd: str, dry_run: bool):
if dry_run:
logger.info("Would run: '%s'", cmd)
else:
logger.info("Building: '%s'", cmd)
subprocess.run(cmd, shell=True, check=True, cwd=self.build_info.repo_root)
def _run_in_docker(self, base_cmd, cmd, dry_run):
self._run(
" ".join(base_cmd + cmd),
dry_run=dry_run,
)
def _get_conan_env_vars(self, version_info):
envs = {
"CONAN_USER_HOME": constants.CONAN_USER_HOME,
"CCACHE_DIR": "/tmp/ccache",
"CONAN_NON_INTERACTIVE": "1",
}
if "CONAN_LOGIN_USERNAME" in os.environ:
envs["CONAN_LOGIN_USERNAME"] = os.environ["CONAN_PASSWORD"]
if "ARTIFACTORY_USER" in os.environ:
envs["CONAN_LOGIN_USERNAME"] = os.environ["ARTIFACTORY_USER"]
if "CONAN_PASSWORD" in os.environ:
envs["CONAN_PASSWORD"] = os.environ["CONAN_PASSWORD"]
if "ARTIFACTORY_TOKEN" in os.environ:
envs["CONAN_PASSWORD"] = os.environ["ARTIFACTORY_TOKEN"]
for name, value in version_info.all_package_versions.items():
envs[name] = value
return envs
def _get_conan_vols(self):
conan_base = os.path.join(utils.get_git_top_level(), "packages", "conan")
vols = {
os.path.join(conan_base, "settings"): os.path.join(
constants.CONAN_USER_HOME, ".conan"
),
os.path.join(conan_base, "data"): os.path.join(
constants.CONAN_USER_HOME, "d"
),
os.path.join(conan_base, "recipes"): os.path.join(
constants.CONAN_USER_HOME, "recipes"
),
os.path.join(conan_base, "ccache"): "/tmp/ccache",
}
return vols
def _get_conan_base_cmd(self, version_info):
base_cmd = ["docker", "run"]
for name, value in self._get_conan_env_vars(version_info).items():
base_cmd.append("-e")
base_cmd.append(f"{name}={value}")
for name, value in self._get_conan_vols().items():
base_cmd.append("-v")
base_cmd.append(f"{name}:{value}")
tag = (
f"{constants.DOCKER_REGISTRY}/{self.build_info.docker_org}"
+ f"/ci-centos7-gl-conan:{version_info.ci_common_version}"
)
base_cmd.append(tag)
return base_cmd
def _build_conan_package(
self,
image,
version,
dry_run,
keep_source,
keep_build,
conan_login,
build_missing,
):
major_version = utils.get_major_version(version)
version_info = self.index.version_info(major_version)
base_cmd = self._get_conan_base_cmd(version_info)
if conan_login:
self._run_in_docker(
base_cmd,
[
"conan",
"user",
"-p",
"-r",
self.build_info.docker_org,
],
dry_run,
)
self._run_in_docker(
base_cmd,
[
"conan",
"config",
"set",
f"general.default_profile={version_info.conan_profile}",
],
dry_run,
)
full_version = version_info.package_versions.get(
"ASWF_" + image.upper() + "_VERSION"
)
conan_version = (
f"{image}/{full_version}"
f"@{self.build_info.docker_org}/{version_info.conan_profile}"
)
build_cmd = [
"conan",
"create",
os.path.join(constants.CONAN_USER_HOME, "recipes", image),
conan_version,
]
if keep_source:
build_cmd.append("--keep-source")
if keep_build:
build_cmd.append("--keep-build")
if build_missing:
build_cmd.append("--build=missing")
self._run_in_docker(
base_cmd,
build_cmd,
dry_run,
)
alias_version = (
f"{image}/latest"
f"@{self.build_info.docker_org}/{version_info.conan_profile}"
)
self._run_in_docker(
base_cmd,
[
"conan",
"alias",
alias_version,
conan_version,
],
dry_run,
)
if self.push:
self._run_in_docker(
base_cmd,
[
"conan",
"upload",
"--all",
"-r",
self.build_info.docker_org,
conan_version,
],
dry_run,
)
self._run_in_docker(
base_cmd,
[
"conan",
"upload",
"--all",
"-r",
self.build_info.docker_org,
alias_version,
],
dry_run,
)
def build(
self,
dry_run: bool = False,
progress: str = "",
keep_source=False,
keep_build=False,
conan_login=False,
build_missing=False,
) -> None:
images_and_versions = []
for image, version in self.group_info.iter_images_versions(get_image=True):
if (
self.group_info.type == constants.ImageType.PACKAGE
and not self.use_conan
and self.index.is_conan_only_package(image)
):
logger.warning("Skipping %s as it is a conan-only package!", image)
continue
images_and_versions.append((image, version))
if not images_and_versions:
return
path = self.make_bake_jsonfile()
if path:
self._run(
f"docker buildx bake -f {path} --progress {progress}", dry_run=dry_run
)
if not self.use_conan or self.group_info.type == constants.ImageType.IMAGE:
return
conan_base = os.path.join(utils.get_git_top_level(), "packages", "conan")
for image, version in images_and_versions:
recipe_path = os.path.join(conan_base, "recipes", image)
if not os.path.exists(recipe_path):
logger.warning("Recipe for %s not found: skipping!", image)
continue
self._build_conan_package(
image,
version,
dry_run,
keep_source,
keep_build,
conan_login,
build_missing,
)
| 35.637982 | 107 | 0.511324 | [
"Apache-2.0"
] | aloysbaillet/aswf-docker | python/aswfdocker/builder.py | 12,010 | Python |
# Mu Young
# Balrog Entry
from net.swordie.ms.constants import BossConstants
from net.swordie.ms.constants import GameConstants
options = {
0 : BossConstants.BALROG_EASY_BATTLE_MAP,
1 : BossConstants.BALROG_HARD_BATTLE_MAP
}
if not sm.isPartyLeader():
sm.sendSayOkay("Please have your party leader speak to me..")
elif sm.sendAskYesNo("Greetings, o weary traveller, you have arrived at the Balrog temple.\r\nDo you wish to battle the Balrog?"):
choice = sm.sendNext("Your strength should surpass level 50 for the easy Balrog, and level 70 for the hard Balrog.\r\n#L0#Easy#l\r\n#L1#Hard#l")
sm.sendSayOkay("Good luck, traveller.")
sm.warpInstanceIn(options[choice], True)
sm.setInstanceTime(BossConstants.BALROG_TIME_LIMIT, BossConstants.BALROG_ENTRY_MAP)
else:
sm.sendSayOkay("I am disappointed, but I respect you for knowing your limitations.") | 45.157895 | 145 | 0.782051 | [
"MIT"
] | Descended/Spirit-V179 | scripts/npc/balog_accept.py | 858 | Python |
#
#
# Needs to be expanded to accommodate the common occurrence of sparse.multiSparse objects in the geounitNode class vs pure numpy arrays
#
#
import os
import sys
# If there is __init__.py in the directory where this file is, then Python adds das_decennial directory to sys.path
# automatically. Not sure why and how it works, therefore, keeping the following line as a double
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
import programs.engine.nodes as nodes
import numpy as np
def buildTestNode():
raw = np.array([0,1,2,3,4])
syn = np.array([6,7,8,9,5])
geocode = "0"
geolevel = "National"
node = nodes.geounitNode(geocode, geolevel=geolevel, raw=raw, syn=syn, geocodeDict={16: "Block", 12: "Block_Group", 11: "Tract", 5: "County", 2: "State", 1: "National"})
return node
# NODES NO LONGER HAVE toJSON, as this is not needed
# def test_slotsToJSON():
# node = buildTestNode()
#
# jsonStr = node.toJSON()
# assert jsonStr == '{"geocode": "0", "geocodeDict": {"16": "Block", "12": "Block_Group", "11": "Tract", "5": "County", "2": "State", "1": "National"}, "geolevel": "National", "parentGeocode": "0", "raw": [0, 1, 2, 3, 4], "dp": null, "syn": [6, 7, 8, 9, 5], "syn_unrounded": null, "cons": null, "invar": null, "dp_queries": null, "congDistGeocode": null, "sldlGeocode": null, "slduGeocode": null, "minimalSchemaArray": null, "grbVars": null, "grbPenaltyVarsPos": null, "grbPenaltyVarsNeg": null, "ancestorsDP": null, "ancestorsRaw": null}'
#
# jsonStr = node.toJSON(keepAttrs=["raw", "syn"])
# assert jsonStr == '{"raw": [0, 1, 2, 3, 4], "syn": [6, 7, 8, 9, 5]}'
#
# jsontuple = node.toJSON(addClassName=True)
# assert jsontuple == ('geounitNode', '{"geocode": "0", "geocodeDict": {"16": "Block", "12": "Block_Group", "11": "Tract", "5": "County", "2": "State", "1": "National"}, "geolevel": "National", "parentGeocode": "0", "raw": [0, 1, 2, 3, 4], "dp": null, "syn": [6, 7, 8, 9, 5], "syn_unrounded": null, "cons": null, "invar": null, "dp_queries": null, "congDistGeocode": null, "sldlGeocode": null, "slduGeocode": null, "minimalSchemaArray": null, "grbVars": null, "grbPenaltyVarsPos": null, "grbPenaltyVarsNeg": null, "ancestorsDP": null, "ancestorsRaw": null}')
#
# classname, jsonStr = jsontuple
# assert classname == 'geounitNode'
# assert jsonStr == '{"geocode": "0", "geocodeDict": {"16": "Block", "12": "Block_Group", "11": "Tract", "5": "County", "2": "State", "1": "National"}, "geolevel": "National", "parentGeocode": "0", "raw": [0, 1, 2, 3, 4], "dp": null, "syn": [6, 7, 8, 9, 5], "syn_unrounded": null, "cons": null, "invar": null, "dp_queries": null, "congDistGeocode": null, "sldlGeocode": null, "slduGeocode": null, "minimalSchemaArray": null, "grbVars": null, "grbPenaltyVarsPos": null, "grbPenaltyVarsNeg": null, "ancestorsDP": null, "ancestorsRaw": null}'
| 63.478261 | 562 | 0.642466 | [
"CC0-1.0"
] | dkifer/census2020-das-e2e | programs/engine/unit_tests/json_nodes_test.py | 2,920 | Python |
"""
Copyright June 25, 2020 Richard Koshak
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import traceback
from datetime import datetime, date, time, timedelta
from core.log import logging, LOG_PREFIX
from core.date import to_python_datetime, to_java_zoneddatetime
from core.jsr223 import scope
from java.time import ZonedDateTime
from java.time.temporal import ChronoUnit
# improve typing and linting as per
# https://github.com/CrazyIvan359/openhab-stubs/blob/master/Usage.md
import typing as t
if t.TYPE_CHECKING:
basestring = str
unicode = str
else:
basestring = basestring # type: ignore # pylint: disable=self-assigning-variable
unicode = unicode # type: ignore # pylint: disable=self-assigning-variable
duration_regex = re.compile(r'^((?P<days>[\.\d]+?)d)? *((?P<hours>[\.\d]+?)h)? *((?P<minutes>[\.\d]+?)m)? *((?P<seconds>[\.\d]+?)s)?$')
iso8601_regex = re.compile(r'^(-?(?:[1-9][0-9]*)?[0-9]{4})-(1[0-2]|0[1-9])-(3[01]|0[1-9]|[12][0-9])T(2[0-3]|[01][0-9]):([0-5][0-9]):([0-5][0-9])(\.[0-9]+)?(Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$')
def parse_duration(time_str, log=logging.getLogger("{}.time_utils".format(LOG_PREFIX))):
"""Parse a time string e.g. (2h13m) into a timedelta object
https://stackoverflow.com/questions/4628122/how-to-construct-a-timedelta-object-from-a-simple-string
Arguments:
- time_str: A string identifying a duration. Use
- d: days
- h: hours
- m: minutes
- s: seconds
All options are optional but at least one needs to be supplied. Float
values are allowed (e.g. "1.5d" is the same as "1d12h"). Spaces
between each field is allowed. Examples:
- 1h 30m 45s
- 1h05s
- 55h 59m 12s
- log: optional, logger object for logging a warning if the passed in
string is not parsable. A "time_utils" logger will be used if not
supplied.
Returns:
A ``datetime.timedelta`` object representing the supplied time duration
or ``None`` if ``time_str`` cannot be parsed.
"""
parts = duration_regex.match(time_str)
if parts is None:
log.warn("Could not parse any time information from '{}'. Examples "
"of valid strings: '8h', '2d8h5m20s', '2m 4s'"
.format(time_str))
return None
else:
time_params = {name: float(param) for name, param in parts.groupdict().items() if param}
return timedelta(**time_params)
def delta_to_datetime(td):
"""Takes a Python timedelta Object and converts it to a ZonedDateTime from now.
Arguments:
- td: The Python datetime.timedelta Object
Returns:
A ZonedDateTime td from now.
"""
return (ZonedDateTime.now().plusDays(td.days)
.plusSeconds(td.seconds)
.plusNanos(td.microseconds//1000 * 1000000))
def parse_duration_to_datetime(time_str, log=logging.getLogger("{}.time_utils".format(LOG_PREFIX))):
"""Parses the passed in time string (see parse_duration) and returns a
ZonedDateTime that amount of time from now.
Arguments:
- time_str: A string identifying a duration. See parse_duration above
Returns:
A ZonedDateTime time_str from now
"""
return delta_to_datetime(parse_duration(time_str, log))
def is_iso8601(dt_str):
"""Returns True if dt_str conforms to ISO 8601
Arguments:
- dt_str: the String to check
Returns:
True if dt_str conforms to dt_str and False otherwise
"""
try:
if iso8601_regex.match(dt_str) is not None:
return True
except:
pass
return False
def to_datetime(when, log=logging.getLogger("{}.time_utils".format(LOG_PREFIX)), output = 'Java'):
"""Based on what type when is, converts when to a Python DateTime object.
Type:
- int: returns now.plusMillis(when)
- openHAB number type: returns now.plusMillis(when.intValue())
- ISO8601 string: DateTime(when)
- Duration definition: see parse_duration_to_datetime
- java ZonedDateTime
For python make sure the datetime object is not assigned to a variable when this function is called)
otherwise a java.time.sql object will be returned due to a bug in Jython
- Python datetime
- Python time: returns DateTime with today date and system timezone
Arguments:
- when: the Object to convert to a DateTime
- log: optional logger, when not supplied one is created for logging errors
- output: object returned as a string. If not specified returns a ZonedDateTime object
'Python': return datetime object
'Java': return a ZonedDateTime object
Returns:
- ZonedDateTime specified by when
- datetime specified by when if output = 'Python'
- ZonedDateTime specified by when if output = 'Java'
"""
log.debug('when is: ' + str(when) + ' output is ' + str(output))
dt_python = None
dt_java = None
try:
if isinstance(when, (str, unicode)):
if is_iso8601(when):
log.debug('when is iso8601: '+str(when))
dt_java = ZonedDateTime.parse(str(when))
else:
log.debug('when is duration: ' + str(when))
dt_python = datetime.now() + parse_duration(when, log)
elif isinstance(when, int):
log.debug('when is int: ' + str(when))
dt_java = ZonedDateTime.now().plus(when, ChronoUnit.MILLIS)
elif isinstance(when, (scope.DateTimeType)):
log.debug('when is DateTimeType: ' + str(when))
dt_java = when.getZonedDateTime()
elif isinstance(when, (scope.DecimalType, scope.PercentType, scope.QuantityType)):
log.debug('when is decimal, percent or quantity type: ' + str(when))
dt_python = datetime.now() + timedelta(milliseconds = when.intValue())
elif isinstance(when, datetime):
log.debug('when is datetime: ' + str(when))
dt_python = when
elif isinstance(when, ZonedDateTime):
log.debug('when is ZonedDateTime: ' + str(when))
dt_java = when
elif isinstance(when, time):
log.debug('when is python time object: ' + str(when))
dt_java = ZonedDateTime.now() \
.withHour(when.hour) \
.withMinute(when.minute) \
.withSecond(when.second) \
.withNano(when.microsecond * 1000) # nanos need to be set, otherwise they_ll be taken from the actual time
else:
log.warn('When is an unknown type {}'.format(type(when)))
return None
except:
log.error('Exception: {}'.format(traceback.format_exc()))
if output == 'Python':
log.debug('returning dt python')
return dt_python if dt_python is not None else to_python_datetime(dt_java)
elif output == 'Java':
log.debug("returning dt java")
return dt_java if dt_java is not None else to_java_zoneddatetime(dt_python)
elif output == 'Joda':
log.error("to_datetime trying to return dt joda - use output = 'Python' or output = 'Java' instead")
else:
log.error("to_datetime cannot output [{}]".format(output))
def to_today(when, log=logging.getLogger("{}.time_utils".format(LOG_PREFIX)), output='Java'):
"""Takes a when (see to_datetime) and updates the date to today.
Arguments:
- when : One of the types or formats supported by to_datetime
- log: optional logger, when not supplied one is created for logging errors
Returns:
- ZonedDateTime specified by when with today's date.
- datetime specified by when with today's date if output = 'Python'
- ZonedDateTime specified by when with today's date if output = 'Java'
"""
log.debug('output is: '+ str(output))
if output == 'Python':
dt = to_datetime(when, log=log, output = 'Python')
return datetime.combine(date.today(), dt.timetz())
elif output == 'Java':
dt = to_datetime(when, log=log, output = 'Java')
now = dt.now()
return (now.withHour(dt.getHour())
.withMinute(dt.getMinute())
.withSecond(dt.getSecond())
.withNano(dt.getNano()))
elif output == 'Joda':
log.error("to_today trying to return dt joda - use output = 'Python' or output = 'Java' instead")
else:
log.error("to_today cannot output [{}]".format(output))
| 39.90393 | 198 | 0.63132 | [
"Apache-2.0"
] | cherub-i/openhab-rules-tools | time_utils/automation/lib/python/community/time_utils.py | 9,138 | Python |
"""
Copyright (C) 2020 Vanessa Sochat.
This Source Code Form is subject to the terms of the
Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed
with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
import shutil
import os
here = os.path.abspath(os.path.dirname(__file__))
def get_template(name):
"""Given the name of a template (an entire folder in the directory here)
Return the full path to the folder, with the intention to copy it somewhere.
"""
template = os.path.join(here, name)
if os.path.exists(template):
return template
def copy_template(name, dest):
"""Given a template name and a destination directory, copy the template
to the desination directory.
"""
template = get_template(name)
dest_dir = os.path.dirname(dest)
if template and os.path.exists(dest_dir):
shutil.copytree(template, dest)
return dest
| 26.771429 | 83 | 0.693703 | [
"MPL-2.0"
] | khinsen/gridtest | gridtest/templates/__init__.py | 937 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Display a calendar populated from google calendar data on an inky display."""
from PIL import Image, ImageDraw # type: ignore
# from typing import Tuple
# import time
def draw_what_sheet(image: Image.Image) -> None:
"""Draw a calendar page for a WHAT display.
Args:
image: The image to be drawn on to
"""
draw = ImageDraw.Draw(image)
# draw.rectangle([(7, 3), (392, 296)], outline=1)
draw.line([(7, 3), (392, 3)], fill=1)
for line in range(8):
draw.line([(line * 55 + 7, 3), (line * 55 + 7, 296)], fill=1)
for line in range(7):
draw.line([(7, line * 45 + 26), (392, line * 45 + 26)], fill=1)
if __name__ == "__main__":
palette = 3 * [255]
palette += 3 * [0]
palette += [255, 0, 0]
palette += 759 * [0]
img = Image.new("P", (400, 300), color=0)
draw_what_sheet(img)
img.putpalette(palette)
img.save("calendar.png")
try:
from inky import InkyWHAT # type: ignore
except RuntimeError:
pass
except ModuleNotFoundError:
pass
else:
inky_display = InkyWHAT("red")
inky_display.set_image(img)
inky_display.set_border(inky_display.BLACK)
inky_display.show()
| 24.941176 | 80 | 0.591195 | [
"MIT"
] | nukes327/inky_monitor | inky_calendar.py | 1,272 | Python |
""" Utility functions. """
import tensorflow as tf
def get_shape(tensor, dynamic=False):
""" Return shape of the input tensor without batch size.
Parameters
----------
tensor : tf.Tensor
dynamic : bool
If True, returns tensor which represents shape. If False, returns list of ints and/or Nones.
Returns
-------
shape : tf.Tensor or list
"""
if dynamic:
shape = tf.shape(tensor)
else:
shape = tensor.get_shape().as_list()
return shape[1:]
def get_num_dims(tensor):
""" Return a number of semantic dimensions (i.e. excluding batch and channels axis)"""
shape = get_shape(tensor)
dim = len(shape)
return max(1, dim - 2)
def get_channels_axis(data_format='channels_last'):
""" Return the integer channels axis based on string data format. """
return 1 if data_format == "channels_first" or data_format.startswith("NC") else -1
def get_num_channels(tensor, data_format='channels_last'):
""" Return number of channels in the input tensor.
Parameters
----------
tensor : tf.Tensor
Returns
-------
shape : tuple of ints
"""
shape = tensor.get_shape().as_list()
axis = get_channels_axis(data_format)
return shape[axis]
def get_batch_size(tensor, dynamic=False):
""" Return batch size (the length of the first dimension) of the input tensor.
Parameters
----------
tensor : tf.Tensor
Returns
-------
batch size : int or None
"""
if dynamic:
return tf.shape(tensor)[0]
return tensor.get_shape().as_list()[0]
def get_spatial_dim(tensor):
""" Return spatial dim of the input tensor (without channels and batch dimension).
Parameters
----------
tensor : tf.Tensor
Returns
-------
dim : int
"""
return len(tensor.get_shape().as_list()) - 2
def get_spatial_shape(tensor, data_format='channels_last', dynamic=False):
""" Return the tensor spatial shape (without batch and channels dimensions).
Parameters
----------
tensor : tf.Tensor
dynamic : bool
If True, returns tensor which represents shape. If False, returns list of ints and/or Nones.
Returns
-------
shape : tf.Tensor or list
"""
if dynamic:
shape = tf.shape(tensor)
else:
shape = tensor.get_shape().as_list()
axis = slice(1, -1) if data_format == "channels_last" else slice(2, None)
return shape[axis]
| 24.196078 | 100 | 0.627229 | [
"Apache-2.0"
] | bestetc/batchflow | batchflow/models/tf/utils.py | 2,468 | Python |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Stubouts, mocks and fixtures for the test suite
"""
import time
from nova import db
from nova import utils
from nova.compute import task_states
from nova.compute import vm_states
def stub_out_db_instance_api(stubs):
"""Stubs out the db API for creating Instances."""
INSTANCE_TYPES = {
'm1.tiny': dict(memory_mb=512, vcpus=1, root_gb=0, flavorid=1),
'm1.small': dict(memory_mb=2048, vcpus=1, root_gb=20, flavorid=2),
'm1.medium':
dict(memory_mb=4096, vcpus=2, root_gb=40, flavorid=3),
'm1.large': dict(memory_mb=8192, vcpus=4, root_gb=80, flavorid=4),
'm1.xlarge':
dict(memory_mb=16384, vcpus=8, root_gb=160, flavorid=5)}
class FakeModel(object):
"""Stubs out for model."""
def __init__(self, values):
self.values = values
def __getattr__(self, name):
return self.values[name]
def __getitem__(self, key):
if key in self.values:
return self.values[key]
else:
raise NotImplementedError()
def fake_instance_create(context, values):
"""Stubs out the db.instance_create method."""
type_data = INSTANCE_TYPES[values['instance_type']]
base_options = {
'name': values['name'],
'id': values['id'],
'uuid': utils.gen_uuid(),
'reservation_id': utils.generate_uid('r'),
'image_ref': values['image_ref'],
'kernel_id': values['kernel_id'],
'ramdisk_id': values['ramdisk_id'],
'vm_state': vm_states.BUILDING,
'task_state': task_states.SCHEDULING,
'user_id': values['user_id'],
'project_id': values['project_id'],
'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'instance_type': values['instance_type'],
'memory_mb': type_data['memory_mb'],
'vcpus': type_data['vcpus'],
'mac_addresses': [{'address': values['mac_address']}],
'root_gb': type_data['root_gb'],
}
return FakeModel(base_options)
def fake_network_get_by_instance(context, instance_id):
"""Stubs out the db.network_get_by_instance method."""
fields = {
'bridge': 'vmnet0',
'netmask': '255.255.255.0',
'gateway': '10.10.10.1',
'broadcast': '10.10.10.255',
'dns1': 'fake',
'vlan': 100}
return FakeModel(fields)
def fake_instance_type_get_all(context, inactive=0, filters=None):
return INSTANCE_TYPES.values()
def fake_instance_type_get_by_name(context, name):
return INSTANCE_TYPES[name]
stubs.Set(db, 'instance_create', fake_instance_create)
stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance)
stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all)
stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)
| 35.352381 | 78 | 0.626347 | [
"Apache-2.0"
] | bopopescu/openstack-12 | nova/tests/vmwareapi/db_fakes.py | 3,712 | Python |
from bs4 import BeautifulSoup
import article
import requester
root_url = "http://www.nec-nijmegen.nl/"
source_url = "https://www.nec-nijmegen.nl/nieuws.htm"
def make_request():
html = requester.get_html(source_url)
return html
def get_articles():
print("Getting articles from: " + source_url)
html = make_request()
soup = BeautifulSoup(html, "html.parser")
lis = soup.find_all("div", class_="item")
arts = []
for eles in lis:
print()
art = article.Article()
art.title = eles.a.get("title")
art.url = eles.a.get("href")
art.full_url = root_url + art.url
art.source = source_url
arts.append(art)
return arts
| 22.0625 | 53 | 0.631728 | [
"MIT"
] | midasvo/nec-bot | nec_sources/necnijmegen.py | 706 | Python |
#!/usr/bin/env python3
import socket
from util import ip4_range
import unittest
from framework import tag_fixme_vpp_workers
from framework import VppTestCase, VppTestRunner
from template_bd import BridgeDomain
from scapy.layers.l2 import Ether
from scapy.packet import Raw
from scapy.layers.inet import IP, UDP
from scapy.layers.inet6 import IPv6
from scapy.contrib.gtp import GTP_U_Header
from scapy.utils import atol
import util
from vpp_ip_route import VppIpRoute, VppRoutePath
from vpp_ip import INVALID_INDEX
@tag_fixme_vpp_workers
class TestGtpuUDP(VppTestCase):
""" GTPU UDP ports Test Case """
def setUp(self):
super(TestGtpuUDP, self).setUp()
self.dport = 2152
self.ip4_err = 0
self.ip6_err = 0
self.create_pg_interfaces(range(1))
for pg in self.pg_interfaces:
pg.admin_up()
self.pg0.config_ip4()
self.pg0.config_ip6()
def _check_udp_port_ip4(self, enabled=True):
pkt = (Ether(src=self.pg0.local_mac, dst=self.pg0.remote_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
UDP(sport=self.dport, dport=self.dport, chksum=0))
self.pg0.add_stream(pkt)
self.pg_start()
err = self.statistics.get_counter(
'/err/ip4-udp-lookup/No listener for dst port')[0]
if enabled:
self.assertEqual(err, self.ip4_err)
else:
self.assertEqual(err, self.ip4_err + 1)
self.ip4_err = err
def _check_udp_port_ip6(self, enabled=True):
pkt = (Ether(src=self.pg0.local_mac, dst=self.pg0.remote_mac) /
IPv6(src=self.pg0.remote_ip6, dst=self.pg0.local_ip6) /
UDP(sport=self.dport, dport=self.dport, chksum=0))
self.pg0.add_stream(pkt)
self.pg_start()
err = self.statistics.get_counter(
'/err/ip6-udp-lookup/No listener for dst port')[0]
if enabled:
self.assertEqual(err, self.ip6_err)
else:
self.assertEqual(err, self.ip6_err + 1)
self.ip6_err = err
def test_udp_port(self):
""" test UDP ports
Check if there are no udp listeners before gtpu is enabled
"""
# UDP ports should be disabled unless a tunnel is configured
self._check_udp_port_ip4(False)
self._check_udp_port_ip6(False)
r = self.vapi.gtpu_add_del_tunnel(is_add=True,
mcast_sw_if_index=0xFFFFFFFF,
decap_next_index=0xFFFFFFFF,
src_address=self.pg0.local_ip4,
dst_address=self.pg0.remote_ip4)
# UDP port 2152 enabled for ip4
self._check_udp_port_ip4()
r = self.vapi.gtpu_add_del_tunnel(is_add=True,
mcast_sw_if_index=0xFFFFFFFF,
decap_next_index=0xFFFFFFFF,
src_address=self.pg0.local_ip6,
dst_address=self.pg0.remote_ip6)
# UDP port 2152 enabled for ip6
self._check_udp_port_ip6()
r = self.vapi.gtpu_add_del_tunnel(is_add=False,
mcast_sw_if_index=0xFFFFFFFF,
decap_next_index=0xFFFFFFFF,
src_address=self.pg0.local_ip4,
dst_address=self.pg0.remote_ip4)
r = self.vapi.gtpu_add_del_tunnel(is_add=False,
mcast_sw_if_index=0xFFFFFFFF,
decap_next_index=0xFFFFFFFF,
src_address=self.pg0.local_ip6,
dst_address=self.pg0.remote_ip6)
class TestGtpu(BridgeDomain, VppTestCase):
""" GTPU Test Case """
def __init__(self, *args):
BridgeDomain.__init__(self)
VppTestCase.__init__(self, *args)
def encapsulate(self, pkt, vni):
"""
Encapsulate the original payload frame by adding GTPU header with its
UDP, IP and Ethernet fields
"""
return (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
UDP(sport=self.dport, dport=self.dport, chksum=0) /
GTP_U_Header(teid=vni, gtp_type=self.gtp_type, length=150) /
pkt)
def ip_range(self, start, end):
""" range of remote ip's """
return ip4_range(self.pg0.remote_ip4, start, end)
def encap_mcast(self, pkt, src_ip, src_mac, vni):
"""
Encapsulate the original payload frame by adding GTPU header with its
UDP, IP and Ethernet fields
"""
return (Ether(src=src_mac, dst=self.mcast_mac) /
IP(src=src_ip, dst=self.mcast_ip4) /
UDP(sport=self.dport, dport=self.dport, chksum=0) /
GTP_U_Header(teid=vni, gtp_type=self.gtp_type, length=150) /
pkt)
def decapsulate(self, pkt):
"""
Decapsulate the original payload frame by removing GTPU header
"""
return pkt[GTP_U_Header].payload
# Method for checking GTPU encapsulation.
#
def check_encapsulation(self, pkt, vni, local_only=False, mcast_pkt=False):
# Verify source MAC is VPP_MAC and destination MAC is MY_MAC resolved
# by VPP using ARP.
self.assertEqual(pkt[Ether].src, self.pg0.local_mac)
if not local_only:
if not mcast_pkt:
self.assertEqual(pkt[Ether].dst, self.pg0.remote_mac)
else:
self.assertEqual(pkt[Ether].dst, type(self).mcast_mac)
# Verify GTPU tunnel source IP is VPP_IP and destination IP is MY_IP.
self.assertEqual(pkt[IP].src, self.pg0.local_ip4)
if not local_only:
if not mcast_pkt:
self.assertEqual(pkt[IP].dst, self.pg0.remote_ip4)
else:
self.assertEqual(pkt[IP].dst, type(self).mcast_ip4)
# Verify UDP destination port is GTPU 2152, source UDP port could be
# arbitrary.
self.assertEqual(pkt[UDP].dport, type(self).dport)
# Verify teid
self.assertEqual(pkt[GTP_U_Header].teid, vni)
def test_encap(self):
""" Encapsulation test
Send frames from pg1
Verify receipt of encapsulated frames on pg0
"""
self.pg1.add_stream([self.frame_reply])
self.pg0.enable_capture()
self.pg_start()
# Pick first received frame and check if it's correctly encapsulated.
out = self.pg0.get_capture(1)
pkt = out[0]
self.check_encapsulation(pkt, self.single_tunnel_vni)
# payload = self.decapsulate(pkt)
# self.assert_eq_pkts(payload, self.frame_reply)
def test_ucast_flood(self):
""" Unicast flood test
Send frames from pg3
Verify receipt of encapsulated frames on pg0
"""
self.pg3.add_stream([self.frame_reply])
self.pg0.enable_capture()
self.pg_start()
# Get packet from each tunnel and assert it's correctly encapsulated.
out = self.pg0.get_capture(self.n_ucast_tunnels)
for pkt in out:
self.check_encapsulation(pkt, self.ucast_flood_bd, True)
# payload = self.decapsulate(pkt)
# self.assert_eq_pkts(payload, self.frame_reply)
def test_mcast_flood(self):
""" Multicast flood test
Send frames from pg2
Verify receipt of encapsulated frames on pg0
"""
self.pg2.add_stream([self.frame_reply])
self.pg0.enable_capture()
self.pg_start()
# Pick first received frame and check if it's correctly encapsulated.
out = self.pg0.get_capture(1)
pkt = out[0]
self.check_encapsulation(pkt, self.mcast_flood_bd,
local_only=False, mcast_pkt=True)
# payload = self.decapsulate(pkt)
# self.assert_eq_pkts(payload, self.frame_reply)
@classmethod
def create_gtpu_flood_test_bd(cls, teid, n_ucast_tunnels):
# Create 10 ucast gtpu tunnels under bd
ip_range_start = 10
ip_range_end = ip_range_start + n_ucast_tunnels
next_hop_address = cls.pg0.remote_ip4
for dest_ip4 in ip4_range(next_hop_address, ip_range_start,
ip_range_end):
# add host route so dest_ip4 will not be resolved
rip = VppIpRoute(cls, dest_ip4, 32,
[VppRoutePath(next_hop_address,
INVALID_INDEX)],
register=False)
rip.add_vpp_config()
r = cls.vapi.gtpu_add_del_tunnel(
is_add=True,
mcast_sw_if_index=0xFFFFFFFF,
decap_next_index=0xFFFFFFFF,
src_address=cls.pg0.local_ip4,
dst_address=dest_ip4,
teid=teid)
cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
bd_id=teid)
@classmethod
def add_del_shared_mcast_dst_load(cls, is_add):
"""
add or del tunnels sharing the same mcast dst
to test gtpu ref_count mechanism
"""
n_shared_dst_tunnels = 20
teid_start = 1000
teid_end = teid_start + n_shared_dst_tunnels
for teid in range(teid_start, teid_end):
r = cls.vapi.gtpu_add_del_tunnel(
decap_next_index=0xFFFFFFFF,
src_address=cls.pg0.local_ip4,
dst_address=cls.mcast_ip4,
mcast_sw_if_index=1,
teid=teid,
is_add=is_add)
if r.sw_if_index == 0xffffffff:
raise ValueError("bad sw_if_index: ~0")
@classmethod
def add_shared_mcast_dst_load(cls):
cls.add_del_shared_mcast_dst_load(is_add=1)
@classmethod
def del_shared_mcast_dst_load(cls):
cls.add_del_shared_mcast_dst_load(is_add=0)
@classmethod
def add_del_mcast_tunnels_load(cls, is_add):
"""
add or del tunnels to test gtpu stability
"""
n_distinct_dst_tunnels = 20
ip_range_start = 10
ip_range_end = ip_range_start + n_distinct_dst_tunnels
for dest_ip4 in ip4_range(cls.mcast_ip4, ip_range_start,
ip_range_end):
teid = int(dest_ip4.split('.')[3])
cls.vapi.gtpu_add_del_tunnel(
decap_next_index=0xFFFFFFFF,
src_address=cls.pg0.local_ip4,
dst_address=dest_ip4,
mcast_sw_if_index=1,
teid=teid,
is_add=is_add)
@classmethod
def add_mcast_tunnels_load(cls):
cls.add_del_mcast_tunnels_load(is_add=1)
@classmethod
def del_mcast_tunnels_load(cls):
cls.add_del_mcast_tunnels_load(is_add=0)
# Class method to start the GTPU test case.
# Overrides setUpClass method in VppTestCase class.
# Python try..except statement is used to ensure that the tear down of
# the class will be executed even if exception is raised.
# @param cls The class pointer.
@classmethod
def setUpClass(cls):
super(TestGtpu, cls).setUpClass()
try:
cls.dport = 2152
cls.gtp_type = 0xff
# Create 2 pg interfaces.
cls.create_pg_interfaces(range(4))
for pg in cls.pg_interfaces:
pg.admin_up()
# Configure IPv4 addresses on VPP pg0.
cls.pg0.config_ip4()
# Resolve MAC address for VPP's IP address on pg0.
cls.pg0.resolve_arp()
# Our Multicast address
cls.mcast_ip4 = '239.1.1.1'
cls.mcast_mac = util.mcast_ip_to_mac(cls.mcast_ip4)
# Create GTPU VTEP on VPP pg0, and put gtpu_tunnel0 and pg1
# into BD.
cls.single_tunnel_bd = 11
cls.single_tunnel_vni = 11
r = cls.vapi.gtpu_add_del_tunnel(
is_add=True,
mcast_sw_if_index=0xFFFFFFFF,
decap_next_index=0xFFFFFFFF,
src_address=cls.pg0.local_ip4,
dst_address=cls.pg0.remote_ip4,
teid=cls.single_tunnel_vni)
cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
bd_id=cls.single_tunnel_bd)
cls.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=cls.pg1.sw_if_index, bd_id=cls.single_tunnel_bd)
# Setup teid 2 to test multicast flooding
cls.n_ucast_tunnels = 10
cls.mcast_flood_bd = 12
cls.create_gtpu_flood_test_bd(cls.mcast_flood_bd,
cls.n_ucast_tunnels)
r = cls.vapi.gtpu_add_del_tunnel(
is_add=True,
src_address=cls.pg0.local_ip4,
dst_address=cls.mcast_ip4,
mcast_sw_if_index=1,
decap_next_index=0xFFFFFFFF,
teid=cls.mcast_flood_bd)
cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
bd_id=cls.mcast_flood_bd)
cls.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=cls.pg2.sw_if_index, bd_id=cls.mcast_flood_bd)
# Add and delete mcast tunnels to check stability
cls.add_shared_mcast_dst_load()
cls.add_mcast_tunnels_load()
cls.del_shared_mcast_dst_load()
cls.del_mcast_tunnels_load()
# Setup teid 3 to test unicast flooding
cls.ucast_flood_bd = 13
cls.create_gtpu_flood_test_bd(cls.ucast_flood_bd,
cls.n_ucast_tunnels)
cls.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=cls.pg3.sw_if_index, bd_id=cls.ucast_flood_bd)
except Exception:
super(TestGtpu, cls).tearDownClass()
raise
@classmethod
def tearDownClass(cls):
super(TestGtpu, cls).tearDownClass()
# Method to define VPP actions before tear down of the test case.
# Overrides tearDown method in VppTestCase class.
# @param self The object pointer.
def tearDown(self):
super(TestGtpu, self).tearDown()
def show_commands_at_teardown(self):
self.logger.info(self.vapi.cli("show bridge-domain 11 detail"))
self.logger.info(self.vapi.cli("show bridge-domain 12 detail"))
self.logger.info(self.vapi.cli("show bridge-domain 13 detail"))
self.logger.info(self.vapi.cli("show int"))
self.logger.info(self.vapi.cli("show gtpu tunnel"))
self.logger.info(self.vapi.cli("show trace"))
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| 36.702899 | 79 | 0.590326 | [
"Apache-2.0"
] | B4dM4n/vpp | test/test_gtpu.py | 15,195 | Python |
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import json
import os
from tornado import web
from .. import _load_handler_from_location
from ...utils import clean_filename
from ...utils import quote
from ...utils import response_text
from ...utils import url_path_join
from ..base import BaseHandler
from ..base import cached
from ..base import RenderingHandler
from ..github.handlers import GithubClientMixin
class GistClientMixin(GithubClientMixin):
# PROVIDER_CTX is a dictionary whose entries are passed as keyword arguments
# to the render_template method of the GistHandler. The following describe
# the information contained in each of these keyword arguments:
# provider_label: str
# Text to to apply to the navbar icon linking to the provider
# provider_icon: str
# CSS classname to apply to the navbar icon linking to the provider
# executor_label: str, optional
# Text to apply to the navbar icon linking to the execution service
# executor_icon: str, optional
# CSS classname to apply to the navbar icon linking to the execution service
PROVIDER_CTX = {
"provider_label": "Gist",
"provider_icon": "github-square",
"executor_label": "Binder",
"executor_icon": "icon-binder",
}
BINDER_TMPL = "{binder_base_url}/gist/{user}/{gist_id}/master"
BINDER_PATH_TMPL = BINDER_TMPL + "?filepath={path}"
def client_error_message(self, exc, url, body, msg=None):
if exc.code == 403 and "too big" in body.lower():
return 400, "GitHub will not serve raw gists larger than 10MB"
return super().client_error_message(exc, url, body, msg)
class UserGistsHandler(GistClientMixin, BaseHandler):
"""list a user's gists containing notebooks
.ipynb file extension is required for listing (not for rendering).
"""
def render_usergists_template(
self, entries, user, provider_url, prev_url, next_url, **namespace
):
"""
provider_url: str
URL to the notebook document upstream at the provider (e.g., GitHub)
executor_url: str, optional (kwarg passed into `namespace`)
URL to execute the notebook document (e.g., Binder)
"""
return self.render_template(
"usergists.html",
entries=entries,
user=user,
provider_url=provider_url,
prev_url=prev_url,
next_url=next_url,
**self.PROVIDER_CTX,
**namespace
)
@cached
async def get(self, user, **namespace):
page = self.get_argument("page", None)
params = {}
if page:
params["page"] = page
with self.catch_client_error():
response = await self.github_client.get_gists(user, params=params)
prev_url, next_url = self.get_page_links(response)
gists = json.loads(response_text(response))
entries = []
for gist in gists:
notebooks = [f for f in gist["files"] if f.endswith(".ipynb")]
if notebooks:
entries.append(
dict(
id=gist["id"],
notebooks=notebooks,
description=gist["description"] or "",
)
)
if self.github_url == "https://github.com/":
gist_base_url = "https://gist.github.com/"
else:
gist_base_url = url_path_join(self.github_url, "gist/")
provider_url = url_path_join(gist_base_url, "{user}".format(user=user))
html = self.render_usergists_template(
entries=entries,
user=user,
provider_url=provider_url,
prev_url=prev_url,
next_url=next_url,
**namespace
)
await self.cache_and_finish(html)
class GistHandler(GistClientMixin, RenderingHandler):
"""render a gist notebook, or list files if a multifile gist"""
async def parse_gist(self, user, gist_id, filename=""):
with self.catch_client_error():
response = await self.github_client.get_gist(gist_id)
gist = json.loads(response_text(response))
gist_id = gist["id"]
if user is None:
# redirect to /gist/user/gist_id if no user given
owner_dict = gist.get("owner", {})
if owner_dict:
user = owner_dict["login"]
else:
user = "anonymous"
new_url = "{format}/gist/{user}/{gist_id}".format(
format=self.format_prefix, user=user, gist_id=gist_id
)
if filename:
new_url = new_url + "/" + filename
self.redirect(self.from_base(new_url))
return
files = gist["files"]
many_files_gist = len(files) > 1
# user and gist_id get modified
return user, gist_id, gist, files, many_files_gist
# Analogous to GitHubTreeHandler
async def tree_get(self, user, gist_id, gist, files):
"""
user, gist_id, gist, and files are (most) of the values returned by parse_gist
"""
entries = []
ipynbs = []
others = []
for file in files.values():
e = {}
e["name"] = file["filename"]
if file["filename"].endswith(".ipynb"):
e["url"] = quote("/%s/%s" % (gist_id, file["filename"]))
e["class"] = "fa-book"
ipynbs.append(e)
else:
if self.github_url == "https://github.com/":
gist_base_url = "https://gist.github.com/"
else:
gist_base_url = url_path_join(self.github_url, "gist/")
provider_url = url_path_join(
gist_base_url,
"{user}/{gist_id}#file-{clean_name}".format(
user=user,
gist_id=gist_id,
clean_name=clean_filename(file["filename"]),
),
)
e["url"] = provider_url
e["class"] = "fa-share"
others.append(e)
entries.extend(ipynbs)
entries.extend(others)
# Enable a binder navbar icon if a binder base URL is configured
executor_url = (
self.BINDER_TMPL.format(
binder_base_url=self.binder_base_url,
user=user.rstrip("/"),
gist_id=gist_id,
)
if self.binder_base_url
else None
)
# provider_url:
# URL to the notebook document upstream at the provider (e.g., GitHub)
# executor_url: str, optional
# URL to execute the notebook document (e.g., Binder)
html = self.render_template(
"treelist.html",
entries=entries,
tree_type="gist",
tree_label="gists",
user=user.rstrip("/"),
provider_url=gist["html_url"],
executor_url=executor_url,
**self.PROVIDER_CTX
)
await self.cache_and_finish(html)
# Analogous to GitHubBlobHandler
async def file_get(self, user, gist_id, filename, gist, many_files_gist, file):
content = await self.get_notebook_data(gist_id, filename, many_files_gist, file)
if not content:
return
await self.deliver_notebook(user, gist_id, filename, gist, file, content)
# Only called by file_get
async def get_notebook_data(self, gist_id, filename, many_files_gist, file):
"""
gist_id, filename, many_files_gist, file are all passed to file_get
"""
if (file["type"] or "").startswith("image/"):
self.log.debug(
"Fetching raw image (%s) %s/%s: %s",
file["type"],
gist_id,
filename,
file["raw_url"],
)
response = await self.fetch(file["raw_url"])
# use raw bytes for images:
content = response.body
elif file["truncated"]:
self.log.debug(
"Gist %s/%s truncated, fetching %s", gist_id, filename, file["raw_url"]
)
response = await self.fetch(file["raw_url"])
content = response_text(response, encoding="utf-8")
else:
content = file["content"]
if many_files_gist and not filename.endswith(".ipynb"):
self.set_header("Content-Type", file.get("type") or "text/plain")
# cannot redirect because of X-Frame-Content
self.finish(content)
return
else:
return content
# Only called by file_get
async def deliver_notebook(self, user, gist_id, filename, gist, file, content):
"""
user, gist_id, filename, gist, file, are the same values as those
passed into file_get, whereas content is returned from
get_notebook_data using user, gist_id, filename, gist, and file.
"""
# Enable a binder navbar icon if a binder base URL is configured
executor_url = (
self.BINDER_PATH_TMPL.format(
binder_base_url=self.binder_base_url,
user=user.rstrip("/"),
gist_id=gist_id,
path=quote(filename),
)
if self.binder_base_url
else None
)
# provider_url: str, optional
# URL to the notebook document upstream at the provider (e.g., GitHub)
await self.finish_notebook(
content,
file["raw_url"],
msg="gist: %s" % gist_id,
public=gist["public"],
provider_url=gist["html_url"],
executor_url=executor_url,
**self.PROVIDER_CTX
)
@cached
async def get(self, user, gist_id, filename=""):
"""
Encompasses both the case of a single file gist, handled by
`file_get`, as well as a many-file gist, handled by `tree_get`.
"""
parsed_gist = await self.parse_gist(user, gist_id, filename)
if parsed_gist is not None:
user, gist_id, gist, files, many_files_gist = parsed_gist
else:
return
if many_files_gist and not filename:
await self.tree_get(user, gist_id, gist, files)
else:
if not many_files_gist and not filename:
filename = list(files.keys())[0]
if filename not in files:
raise web.HTTPError(
404, "No such file in gist: %s (%s)", filename, list(files.keys())
)
file = files[filename]
await self.file_get(user, gist_id, filename, gist, many_files_gist, file)
class GistRedirectHandler(BaseHandler):
"""redirect old /<gist-id> to new /gist/<gist-id>"""
def get(self, gist_id, file=""):
new_url = "%s/gist/%s" % (self.format_prefix, gist_id)
if file:
new_url = "%s/%s" % (new_url, file)
self.log.info("Redirecting %s to %s", self.request.uri, new_url)
self.redirect(self.from_base(new_url))
def default_handlers(handlers=[], **handler_names):
"""Tornado handlers"""
gist_handler = _load_handler_from_location(handler_names["gist_handler"])
user_gists_handler = _load_handler_from_location(
handler_names["user_gists_handler"]
)
return handlers + [
(r"/gist/([^\/]+/)?([0-9]+|[0-9a-f]{20,})", gist_handler, {}),
(r"/gist/([^\/]+/)?([0-9]+|[0-9a-f]{20,})/(?:files/)?(.*)", gist_handler, {}),
(r"/([0-9]+|[0-9a-f]{20,})", GistRedirectHandler, {}),
(r"/([0-9]+|[0-9a-f]{20,})/(.*)", GistRedirectHandler, {}),
(r"/gist/([^\/]+)/?", user_gists_handler, {}),
]
def uri_rewrites(rewrites=[]):
gist_rewrites = [
(r"^([a-f0-9]+)/?$", "/{0}"),
(r"^https?://gist.github.com/([^\/]+/)?([a-f0-9]+)/?$", "/{1}"),
]
# github enterprise
if os.environ.get("GITHUB_API_URL", "") != "":
gist_base_url = url_path_join(
os.environ.get("GITHUB_API_URL").split("/api/v3")[0], "gist/"
)
gist_rewrites.extend(
[
# Fetching the Gist ID which is embedded in the URL, but with a different base URL
(r"^" + gist_base_url + r"([^\/]+/)?([a-f0-9]+)/?$", "/{1}")
]
)
return gist_rewrites + rewrites
| 34.651099 | 98 | 0.562753 | [
"BSD-3-Clause-Clear"
] | cybergis/nbviewer | nbviewer/providers/gist/handlers.py | 12,613 | Python |
#!/usr/bin/env python
import h5py
f = h5py.File('H11302_OLS_OSS/H11302_2m_1.bag')
print f.listobjects()
print f.listitems()
bag_root = f['/BAG_root']
metadata = ''.join(bag_root['metadata'])
o = file('metadata.xml','w')
o.write(metadata)
del o
#print metadata #[0:200]
elevation = bag_root['elevation']
print 'shape:',elevation.shape
data = elevation.value
#print type(data)
#print data
print 'range:',data.min(), data.max()
#import matplotlib.mlab as mlab
#import matplotlib.pyplot as plt
o = file('out.xyz','w')
for y in range(elevation.shape[1]):
#for x,z in enumerate(elevation[y]):
for x in range(elevation.shape[0]):
z = elevation[x,y]
if z>=1000000-1:
continue
#o.write('{x} {y} {z}\n'.format(x=x,y=y,z=z))
o.write('%d %d %0.2f\n'% (x,y,z))
| 19.804878 | 53 | 0.635468 | [
"Apache-2.0"
] | schwehr/bag-py | bag.py | 812 | Python |
#!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
#
from typing import Sequence
from pefile import PE
from qiling.const import QL_ARCH
from qiling.exception import QlErrorArch, QlMemoryMappedError
from qiling.loader.loader import QlLoader
from qiling.os.memory import QlMemoryHeap
from qiling.os.uefi import context, st, smst
from qiling.os.uefi.ProcessorBind import CPU_STACK_ALIGNMENT
from qiling.os.uefi.shutdown import hook_EndOfExecution
from qiling.os.uefi.protocols import EfiLoadedImageProtocol
from qiling.os.uefi.protocols import EfiSmmAccess2Protocol
from qiling.os.uefi.protocols import EfiSmmBase2Protocol
from qiling.os.uefi.protocols import EfiSmmCpuProtocol
from qiling.os.uefi.protocols import EfiSmmSwDispatch2Protocol
from qiling.os.uefi.protocols import PcdProtocol
class QlLoaderPE_UEFI(QlLoader):
def __init__(self, ql):
super(QlLoaderPE_UEFI, self).__init__(ql)
self.ql = ql
self.modules = []
self.events = {}
self.notify_list = []
self.next_image_base = 0
# list of members names to save and restore
__save_members = (
'modules',
'events',
'notify_list',
'next_image_base',
'loaded_image_protocol_modules',
'tpl',
'efi_conf_table_array',
'efi_conf_table_array_ptr',
'efi_conf_table_data_ptr',
'efi_conf_table_data_next_ptr'
)
def save(self) -> dict:
saved_state = super(QlLoaderPE_UEFI, self).save()
for member in QlLoaderPE_UEFI.__save_members:
saved_state[member] = getattr(self, member)
# since this class initialize the heap (that is hosted by the OS object), we will store it here.
saved_state['heap'] = self.ql.os.heap.save()
return saved_state
def restore(self, saved_state: dict):
super(QlLoaderPE_UEFI, self).restore(saved_state)
for member in QlLoaderPE_UEFI.__save_members:
setattr(self, member, saved_state[member])
self.ql.os.heap.restore(saved_state['heap'])
def install_loaded_image_protocol(self, image_base, image_size):
fields = {
'gST' : self.gST,
'image_base' : image_base,
'image_size' : image_size
}
descriptor = EfiLoadedImageProtocol.make_descriptor(fields)
self.dxe_context.install_protocol(descriptor, image_base)
self.loaded_image_protocol_modules.append(image_base)
def map_and_load(self, path: str, exec_now: bool=False):
"""Map and load a module into memory.
The specified module would be mapped and loaded into the address set
in the `next_image_base` member. It is the caller's responsibility to
make sure that the memory is available.
On success, `next_image_base` will be updated accordingly.
Args:
path : path of the module binary to load
exec_now : execute module right away; will be enququed if not
Raises:
QlMemoryMappedError : when `next_image_base` is not available
"""
ql = self.ql
pe = PE(path, fast_load=True)
# use image base only if it does not point to NULL
image_base = pe.OPTIONAL_HEADER.ImageBase or self.next_image_base
image_size = ql.mem.align(pe.OPTIONAL_HEADER.SizeOfImage, 0x1000)
assert (image_base % 0x1000) == 0, 'image base is expected to be page-aligned'
if image_base != pe.OPTIONAL_HEADER.ImageBase:
pe.relocate_image(image_base)
pe.parse_data_directories()
data = bytes(pe.get_memory_mapped_image())
ql.mem.map(image_base, image_size, info="[module]")
ql.mem.write(image_base, data)
ql.log.info(f'Module {path} loaded to {image_base:#x}')
entry_point = image_base + pe.OPTIONAL_HEADER.AddressOfEntryPoint
ql.log.info(f'Module entry point at {entry_point:#x}')
# the 'entry_point' member is used by the debugger. if not set, set it
# to the first loaded module entry point so the debugger can break
if self.entry_point == 0:
self.entry_point = entry_point
self.install_loaded_image_protocol(image_base, image_size)
# this would be used later be os.find_containing_image
self.images.append(self.coverage_image(image_base, image_base + image_size, path))
# update next memory slot to allow sequencial loading. its availability
# is unknown though
self.next_image_base = image_base + image_size
module_info = (path, image_base, entry_point)
# execute the module right away or enqueue it
if exec_now:
# call entry point while retaining the current return address
self.execute_module(*module_info, eoe_trap=None)
else:
self.modules.append(module_info)
def call_function(self, addr: int, args: Sequence[int], ret: int):
"""Call a function after properly setting up its arguments and return address.
Args:
addr : function address
args : a sequence of arguments to pass to the function; may be empty
ret : return address; may be None
"""
# arguments gpr (ms x64 cc)
regs = ('rcx', 'rdx', 'r8', 'r9')
assert len(args) <= len(regs), f'currently supporting up to {len(regs)} arguments'
# set up the arguments
for reg, arg in zip(regs, args):
self.ql.reg.write(reg, arg)
# if provided, set return address
if ret is not None:
self.ql.stack_push(ret)
self.ql.reg.rip = addr
def unload_modules(self):
for handle in self.loaded_image_protocol_modules:
struct_addr = self.dxe_context.protocols[handle][self.loaded_image_protocol_guid]
loaded_image_protocol = EfiLoadedImageProtocol.EFI_LOADED_IMAGE_PROTOCOL.loadFrom(self.ql, struct_addr)
unload_ptr = self.ql.unpack64(loaded_image_protocol.Unload)
if unload_ptr != 0:
self.ql.log.info(f'Unloading module {handle:#x}, calling {unload_ptr:#x}')
self.call_function(unload_ptr, [handle], self.end_of_execution_ptr)
self.loaded_image_protocol_modules.remove(handle)
return True
return False
def execute_module(self, path: str, image_base: int, entry_point: int, eoe_trap: int):
"""Start the execution of a UEFI module.
Args:
image_base : module base address
entry_point : module entry point address
eoe_trap : end-of-execution trap address; may be None
"""
# use familiar UEFI names
ImageHandle = image_base
SystemTable = self.gST
self.call_function(entry_point, [ImageHandle, SystemTable], eoe_trap)
self.ql.os.entry_point = entry_point
self.ql.log.info(f'Running from {entry_point:#010x} of {path}')
def execute_next_module(self):
if not self.modules or self.ql.os.notify_before_module_execution(self.ql, self.modules[0][0]):
return
path, image_base, entry_point = self.modules.pop(0)
self.execute_module(path, image_base, entry_point, self.end_of_execution_ptr)
def run(self):
# intel architecture uefi implementation only
if self.ql.archtype not in (QL_ARCH.X86, QL_ARCH.X8664):
raise QlErrorArch("Unsupported architecture")
# x86-64 arch only
if self.ql.archtype != QL_ARCH.X8664:
raise QlErrorArch("Only 64 bit arch is supported at the moment")
self.loaded_image_protocol_guid = self.ql.os.profile["LOADED_IMAGE_PROTOCOL"]["Guid"]
self.loaded_image_protocol_modules = []
self.tpl = 4 # TPL_APPLICATION
arch_key = {
QL_ARCH.X86 : "OS32",
QL_ARCH.X8664 : "OS64"
}[self.ql.archtype]
# -------- init BS / RT / DXE data structures and protocols --------
os_profile = self.ql.os.profile[arch_key]
self.dxe_context = context.DxeContext(self.ql)
# initialize and locate heap
heap_base = int(os_profile["heap_address"], 0)
heap_size = int(os_profile["heap_size"], 0)
self.dxe_context.init_heap(heap_base, heap_size)
self.heap_base_address = heap_base
self.ql.log.info(f"Located heap at {heap_base:#010x}")
# initialize and locate stack
stack_base = int(os_profile["stack_address"], 0)
stack_size = int(os_profile["stack_size"], 0)
self.dxe_context.init_stack(stack_base, stack_size)
sp = stack_base + stack_size - CPU_STACK_ALIGNMENT
self.ql.log.info(f"Located stack at {sp:#010x}")
# TODO: statically allocating 256 KiB for ST, RT, BS, DS and Configuration Tables.
# however, this amount of memory is rather arbitrary
gST = self.dxe_context.heap.alloc(256 * 1024)
st.initialize(self.ql, gST)
protocols = (
EfiSmmAccess2Protocol,
EfiSmmBase2Protocol,
)
for proto in protocols:
self.dxe_context.install_protocol(proto.descriptor, 1)
# workaround
self.ql.os.heap = self.dxe_context.heap
# -------- init SMM data structures and protocols --------
smm_profile = self.ql.os.profile['SMRAM']
self.smm_context = context.SmmContext(self.ql)
# initialize and locate SMM heap
heap_base = int(smm_profile["heap_address"], 0)
heap_size = int(smm_profile["heap_size"], 0)
self.smm_context.init_heap(heap_base, heap_size)
self.ql.log.info(f"Located SMM heap at {heap_base:#010x}")
# TODO: statically allocating 256 KiB for SMM ST.
# however, this amount of memory is rather arbitrary
gSmst = self.smm_context.heap.alloc(256 * 1024)
smst.initialize(self.ql, gSmst)
self.in_smm = False
protocols = (
EfiSmmCpuProtocol,
EfiSmmSwDispatch2Protocol
)
for proto in protocols:
self.smm_context.install_protocol(proto.descriptor, 1)
# map mmio ranges
# TODO: move to somehwere more appropriate (+ hook accesses?)
mmio_map = self.ql.os.profile["MMIO"]
self.ql.mem.map(
int(mmio_map['sbreg_base'], 0),
int(mmio_map['sbreg_size'], 0)
)
# set stack and frame pointers
self.ql.reg.rsp = sp
self.ql.reg.rbp = sp
self.entry_point = 0
self.load_address = 0
self.next_image_base = int(os_profile["image_address"], 0)
try:
for dependency in self.ql.argv:
self.map_and_load(dependency)
except QlMemoryMappedError:
self.ql.log.critical("Couldn't map dependency")
self.ql.log.info(f"Done with loading {self.ql.path}")
# set up an end-of-execution hook to regain control when module is done
# executing (i.e. when the entry point function returns). that should be
# set on a non-executable address, so SystemTable's address was picked
self.end_of_execution_ptr = gST
self.ql.hook_address(hook_EndOfExecution, self.end_of_execution_ptr)
self.execute_next_module()
def restore_runtime_services(self):
pass # not sure why do we need to restore RT
| 35.912226 | 115 | 0.650052 | [
"CC0-1.0"
] | mrTavas/owasp-fstm-auto | qiling/qiling/loader/pe_uefi.py | 11,456 | Python |
from __future__ import annotations
from datetime import timedelta
import itertools
import numpy as np
import pytest
from pandas.compat import (
IS64,
is_platform_windows,
)
import pandas as pd
import pandas._testing as tm
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
@pytest.fixture(autouse=True, scope="class")
def check_comprehensiveness(request):
# Iterate over combination of dtype, method and klass
# and ensure that each are contained within a collected test
cls = request.cls
combos = itertools.product(cls.klasses, cls.dtypes, [cls.method])
def has_test(combo):
klass, dtype, method = combo
cls_funcs = request.node.session.items
return any(
klass in x.name and dtype in x.name and method in x.name for x in cls_funcs
)
opts = request.config.option
if opts.lf or opts.keyword:
# If we are running with "last-failed" or -k foo, we expect to only
# run a subset of tests.
yield
else:
for combo in combos:
if not has_test(combo):
raise AssertionError(
f"test method is not defined: {cls.__name__}, {combo}"
)
yield
class CoercionBase:
klasses = ["index", "series"]
dtypes = [
"object",
"int64",
"float64",
"complex128",
"bool",
"datetime64",
"datetime64tz",
"timedelta64",
"period",
]
@property
def method(self):
raise NotImplementedError(self)
class TestSetitemCoercion(CoercionBase):
method = "setitem"
def _assert_setitem_series_conversion(
self, original_series, loc_value, expected_series, expected_dtype
):
"""test series value's coercion triggered by assignment"""
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
assert temp.dtype == expected_dtype
# FIXME: dont leave commented-out
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
@pytest.mark.parametrize(
"val,exp_dtype", [(1, object), (1.1, object), (1 + 1j, object), (True, object)]
)
def test_setitem_series_object(self, val, exp_dtype):
obj = pd.Series(list("abcd"))
assert obj.dtype == object
exp = pd.Series(["a", val, "c", "d"])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype",
[(1, np.int64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)],
)
def test_setitem_series_int64(self, val, exp_dtype, request):
obj = pd.Series([1, 2, 3, 4])
assert obj.dtype == np.int64
if exp_dtype is np.float64:
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
mark = pytest.mark.xfail(reason="GH12747 The result must be float")
request.node.add_marker(mark)
exp = pd.Series([1, val, 3, 4])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype", [(np.int32(1), np.int8), (np.int16(2 ** 9), np.int16)]
)
def test_setitem_series_int8(self, val, exp_dtype, request):
obj = pd.Series([1, 2, 3, 4], dtype=np.int8)
assert obj.dtype == np.int8
if exp_dtype is np.int16:
exp = pd.Series([1, 0, 3, 4], dtype=np.int8)
self._assert_setitem_series_conversion(obj, val, exp, np.int8)
mark = pytest.mark.xfail(
reason="BUG: it must be pd.Series([1, 1, 3, 4], dtype=np.int16"
)
request.node.add_marker(mark)
warn = None if exp_dtype is np.int8 else FutureWarning
msg = "Values are too large to be losslessly cast to int8"
with tm.assert_produces_warning(warn, match=msg):
exp = pd.Series([1, val, 3, 4], dtype=np.int8)
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype",
[(1, np.float64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)],
)
def test_setitem_series_float64(self, val, exp_dtype):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
assert obj.dtype == np.float64
exp = pd.Series([1.1, val, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype",
[
(1, np.complex128),
(1.1, np.complex128),
(1 + 1j, np.complex128),
(True, object),
],
)
def test_setitem_series_complex128(self, val, exp_dtype):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
assert obj.dtype == np.complex128
exp = pd.Series([1 + 1j, val, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype",
[
(1, object),
("3", object),
(3, object),
(1.1, object),
(1 + 1j, object),
(True, np.bool_),
],
)
def test_setitem_series_bool(self, val, exp_dtype):
obj = pd.Series([True, False, True, False])
assert obj.dtype == np.bool_
exp = pd.Series([True, val, True, False], dtype=exp_dtype)
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype",
[(pd.Timestamp("2012-01-01"), "datetime64[ns]"), (1, object), ("x", object)],
)
def test_setitem_series_datetime64(self, val, exp_dtype):
obj = pd.Series(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
pd.Timestamp("2011-01-04"),
]
)
assert obj.dtype == "datetime64[ns]"
exp = pd.Series(
[
pd.Timestamp("2011-01-01"),
val,
pd.Timestamp("2011-01-03"),
pd.Timestamp("2011-01-04"),
]
)
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype",
[
(pd.Timestamp("2012-01-01", tz="US/Eastern"), "datetime64[ns, US/Eastern]"),
(pd.Timestamp("2012-01-01", tz="US/Pacific"), object),
(pd.Timestamp("2012-01-01"), object),
(1, object),
],
)
def test_setitem_series_datetime64tz(self, val, exp_dtype):
tz = "US/Eastern"
obj = pd.Series(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2011-01-03", tz=tz),
pd.Timestamp("2011-01-04", tz=tz),
]
)
assert obj.dtype == "datetime64[ns, US/Eastern]"
exp = pd.Series(
[
pd.Timestamp("2011-01-01", tz=tz),
val,
pd.Timestamp("2011-01-03", tz=tz),
pd.Timestamp("2011-01-04", tz=tz),
]
)
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype",
[(pd.Timedelta("12 day"), "timedelta64[ns]"), (1, object), ("x", object)],
)
def test_setitem_series_timedelta64(self, val, exp_dtype):
obj = pd.Series(
[
pd.Timedelta("1 day"),
pd.Timedelta("2 day"),
pd.Timedelta("3 day"),
pd.Timedelta("4 day"),
]
)
assert obj.dtype == "timedelta64[ns]"
exp = pd.Series(
[pd.Timedelta("1 day"), val, pd.Timedelta("3 day"), pd.Timedelta("4 day")]
)
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
def test_setitem_series_no_coercion_from_values_list(self):
# GH35865 - int casted to str when internally calling np.array(ser.values)
ser = pd.Series(["a", 1])
ser[:] = list(ser.values)
expected = pd.Series(["a", 1])
tm.assert_series_equal(ser, expected)
def _assert_setitem_index_conversion(
self, original_series, loc_key, expected_index, expected_dtype
):
"""test index's coercion triggered by assign key"""
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
assert temp.index.dtype == expected_dtype
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
assert temp.index.dtype == expected_dtype
@pytest.mark.parametrize(
"val,exp_dtype", [("x", object), (5, IndexError), (1.1, object)]
)
def test_setitem_index_object(self, val, exp_dtype):
obj = pd.Series([1, 2, 3, 4], index=list("abcd"))
assert obj.index.dtype == object
if exp_dtype is IndexError:
temp = obj.copy()
msg = "index 5 is out of bounds for axis 0 with size 4"
with pytest.raises(exp_dtype, match=msg):
temp[5] = 5
else:
exp_index = pd.Index(list("abcd") + [val])
self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype", [(5, np.int64), (1.1, np.float64), ("x", object)]
)
def test_setitem_index_int64(self, val, exp_dtype):
obj = pd.Series([1, 2, 3, 4])
assert obj.index.dtype == np.int64
exp_index = pd.Index([0, 1, 2, 3, val])
self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype", [(5, IndexError), (5.1, np.float64), ("x", object)]
)
def test_setitem_index_float64(self, val, exp_dtype, request):
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
assert obj.index.dtype == np.float64
if exp_dtype is IndexError:
# float + int -> int
temp = obj.copy()
msg = "index 5 is out of bounds for axis 0 with size 4"
with pytest.raises(exp_dtype, match=msg):
temp[5] = 5
mark = pytest.mark.xfail(reason="TODO_GH12747 The result must be float")
request.node.add_marker(mark)
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, val])
self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype)
@pytest.mark.xfail(reason="Test not implemented")
def test_setitem_series_period(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_complex128(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_bool(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_datetime64(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_datetime64tz(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_timedelta64(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_period(self):
raise NotImplementedError
class TestInsertIndexCoercion(CoercionBase):
klasses = ["index"]
method = "insert"
def _assert_insert_conversion(self, original, value, expected, expected_dtype):
"""test coercion triggered by insert"""
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
assert res.dtype == expected_dtype
@pytest.mark.parametrize(
"insert, coerced_val, coerced_dtype",
[
(1, 1, object),
(1.1, 1.1, object),
(False, False, object),
("x", "x", object),
],
)
def test_insert_index_object(self, insert, coerced_val, coerced_dtype):
obj = pd.Index(list("abcd"))
assert obj.dtype == object
exp = pd.Index(["a", coerced_val, "b", "c", "d"])
self._assert_insert_conversion(obj, insert, exp, coerced_dtype)
@pytest.mark.parametrize(
"insert, coerced_val, coerced_dtype",
[
(1, 1, np.int64),
(1.1, 1.1, np.float64),
(False, False, object), # GH#36319
("x", "x", object),
],
)
def test_insert_index_int64(self, insert, coerced_val, coerced_dtype):
obj = pd.Int64Index([1, 2, 3, 4])
assert obj.dtype == np.int64
exp = pd.Index([1, coerced_val, 2, 3, 4])
self._assert_insert_conversion(obj, insert, exp, coerced_dtype)
@pytest.mark.parametrize(
"insert, coerced_val, coerced_dtype",
[
(1, 1.0, np.float64),
(1.1, 1.1, np.float64),
(False, False, object), # GH#36319
("x", "x", object),
],
)
def test_insert_index_float64(self, insert, coerced_val, coerced_dtype):
obj = pd.Float64Index([1.0, 2.0, 3.0, 4.0])
assert obj.dtype == np.float64
exp = pd.Index([1.0, coerced_val, 2.0, 3.0, 4.0])
self._assert_insert_conversion(obj, insert, exp, coerced_dtype)
@pytest.mark.parametrize(
"fill_val,exp_dtype",
[
(pd.Timestamp("2012-01-01"), "datetime64[ns]"),
(pd.Timestamp("2012-01-01", tz="US/Eastern"), "datetime64[ns, US/Eastern]"),
],
ids=["datetime64", "datetime64tz"],
)
@pytest.mark.parametrize(
"insert_value",
[pd.Timestamp("2012-01-01"), pd.Timestamp("2012-01-01", tz="Asia/Tokyo"), 1],
)
def test_insert_index_datetimes(self, request, fill_val, exp_dtype, insert_value):
obj = pd.DatetimeIndex(
["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"], tz=fill_val.tz
)
assert obj.dtype == exp_dtype
exp = pd.DatetimeIndex(
["2011-01-01", fill_val.date(), "2011-01-02", "2011-01-03", "2011-01-04"],
tz=fill_val.tz,
)
self._assert_insert_conversion(obj, fill_val, exp, exp_dtype)
if fill_val.tz:
# mismatched tzawareness
ts = pd.Timestamp("2012-01-01")
result = obj.insert(1, ts)
expected = obj.astype(object).insert(1, ts)
assert expected.dtype == object
tm.assert_index_equal(result, expected)
# mismatched tz --> cast to object (could reasonably cast to common tz)
ts = pd.Timestamp("2012-01-01", tz="Asia/Tokyo")
result = obj.insert(1, ts)
expected = obj.astype(object).insert(1, ts)
assert expected.dtype == object
tm.assert_index_equal(result, expected)
else:
# mismatched tzawareness
ts = pd.Timestamp("2012-01-01", tz="Asia/Tokyo")
result = obj.insert(1, ts)
expected = obj.astype(object).insert(1, ts)
assert expected.dtype == object
tm.assert_index_equal(result, expected)
item = 1
result = obj.insert(1, item)
expected = obj.astype(object).insert(1, item)
assert expected[1] == item
assert expected.dtype == object
tm.assert_index_equal(result, expected)
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(["1 day", "2 day", "3 day", "4 day"])
assert obj.dtype == "timedelta64[ns]"
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(["1 day", "10 day", "2 day", "3 day", "4 day"])
self._assert_insert_conversion(
obj, pd.Timedelta("10 day"), exp, "timedelta64[ns]"
)
for item in [pd.Timestamp("2012-01-01"), 1]:
result = obj.insert(1, item)
expected = obj.astype(object).insert(1, item)
assert expected.dtype == object
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"insert, coerced_val, coerced_dtype",
[
(pd.Period("2012-01", freq="M"), "2012-01", "period[M]"),
(pd.Timestamp("2012-01-01"), pd.Timestamp("2012-01-01"), object),
(1, 1, object),
("x", "x", object),
],
)
def test_insert_index_period(self, insert, coerced_val, coerced_dtype):
obj = pd.PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq="M")
assert obj.dtype == "period[M]"
data = [
pd.Period("2011-01", freq="M"),
coerced_val,
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
pd.Period("2011-04", freq="M"),
]
if isinstance(insert, pd.Period):
exp = pd.PeriodIndex(data, freq="M")
self._assert_insert_conversion(obj, insert, exp, coerced_dtype)
# string that can be parsed to appropriate PeriodDtype
self._assert_insert_conversion(obj, str(insert), exp, coerced_dtype)
else:
result = obj.insert(0, insert)
expected = obj.astype(object).insert(0, insert)
tm.assert_index_equal(result, expected)
# TODO: ATM inserting '2012-01-01 00:00:00' when we have obj.freq=="M"
# casts that string to Period[M], not clear that is desirable
if not isinstance(insert, pd.Timestamp):
# non-castable string
result = obj.insert(0, str(insert))
expected = obj.astype(object).insert(0, str(insert))
tm.assert_index_equal(result, expected)
msg = r"Unexpected keyword arguments {'freq'}"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning):
# passing keywords to pd.Index
pd.Index(data, freq="M")
@pytest.mark.xfail(reason="Test not implemented")
def test_insert_index_complex128(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_insert_index_bool(self):
raise NotImplementedError
class TestWhereCoercion(CoercionBase):
method = "where"
def _assert_where_conversion(
self, original, cond, values, expected, expected_dtype
):
"""test coercion triggered by where"""
target = original.copy()
res = target.where(cond, values)
tm.assert_equal(res, expected)
assert res.dtype == expected_dtype
@pytest.mark.parametrize(
"fill_val,exp_dtype",
[(1, object), (1.1, object), (1 + 1j, object), (True, object)],
)
def test_where_object(self, index_or_series, fill_val, exp_dtype):
klass = index_or_series
obj = klass(list("abcd"))
assert obj.dtype == object
cond = klass([True, False, True, False])
if fill_val is True and klass is pd.Series:
ret_val = 1
else:
ret_val = fill_val
exp = klass(["a", ret_val, "c", ret_val])
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
if fill_val is True:
values = klass([True, False, True, True])
else:
values = klass(x * fill_val for x in [5, 6, 7, 8])
exp = klass(["a", values[1], "c", values[3]])
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize(
"fill_val,exp_dtype",
[(1, np.int64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)],
)
def test_where_int64(self, index_or_series, fill_val, exp_dtype):
klass = index_or_series
if klass is pd.Index and exp_dtype is np.complex128:
pytest.skip("Complex Index not supported")
obj = klass([1, 2, 3, 4])
assert obj.dtype == np.int64
cond = klass([True, False, True, False])
exp = klass([1, fill_val, 3, fill_val])
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
if fill_val is True:
values = klass([True, False, True, True])
else:
values = klass(x * fill_val for x in [5, 6, 7, 8])
exp = klass([1, values[1], 3, values[3]])
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize(
"fill_val, exp_dtype",
[(1, np.float64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)],
)
def test_where_float64(self, index_or_series, fill_val, exp_dtype):
klass = index_or_series
if klass is pd.Index and exp_dtype is np.complex128:
pytest.skip("Complex Index not supported")
obj = klass([1.1, 2.2, 3.3, 4.4])
assert obj.dtype == np.float64
cond = klass([True, False, True, False])
exp = klass([1.1, fill_val, 3.3, fill_val])
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
if fill_val is True:
values = klass([True, False, True, True])
else:
values = klass(x * fill_val for x in [5, 6, 7, 8])
exp = klass([1.1, values[1], 3.3, values[3]])
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize(
"fill_val,exp_dtype",
[
(1, np.complex128),
(1.1, np.complex128),
(1 + 1j, np.complex128),
(True, object),
],
)
def test_where_series_complex128(self, fill_val, exp_dtype):
klass = pd.Series
obj = klass([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
assert obj.dtype == np.complex128
cond = klass([True, False, True, False])
exp = klass([1 + 1j, fill_val, 3 + 3j, fill_val])
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
if fill_val is True:
values = klass([True, False, True, True])
else:
values = klass(x * fill_val for x in [5, 6, 7, 8])
exp = klass([1 + 1j, values[1], 3 + 3j, values[3]], dtype=exp_dtype)
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize(
"fill_val,exp_dtype",
[(1, object), (1.1, object), (1 + 1j, object), (True, np.bool_)],
)
def test_where_series_bool(self, fill_val, exp_dtype):
klass = pd.Series
obj = klass([True, False, True, False])
assert obj.dtype == np.bool_
cond = klass([True, False, True, False])
exp = klass([True, fill_val, True, fill_val])
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
if fill_val is True:
values = klass([True, False, True, True])
else:
values = klass(x * fill_val for x in [5, 6, 7, 8])
exp = klass([True, values[1], True, values[3]])
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize(
"fill_val,exp_dtype",
[
(pd.Timestamp("2012-01-01"), "datetime64[ns]"),
(pd.Timestamp("2012-01-01", tz="US/Eastern"), object),
],
ids=["datetime64", "datetime64tz"],
)
def test_where_series_datetime64(self, fill_val, exp_dtype):
obj = pd.Series(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
pd.Timestamp("2011-01-04"),
]
)
assert obj.dtype == "datetime64[ns]"
cond = pd.Series([True, False, True, False])
exp = pd.Series(
[pd.Timestamp("2011-01-01"), fill_val, pd.Timestamp("2011-01-03"), fill_val]
)
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
values = pd.Series(pd.date_range(fill_val, periods=4))
if fill_val.tz:
exp = pd.Series(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2012-01-02 00:00", tz="US/Eastern"),
pd.Timestamp("2011-01-03"),
pd.Timestamp("2012-01-04 00:00", tz="US/Eastern"),
]
)
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
exp = pd.Series(
[
pd.Timestamp("2011-01-01"),
values[1],
pd.Timestamp("2011-01-03"),
values[3],
]
)
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize(
"fill_val",
[
pd.Timestamp("2012-01-01"),
pd.Timestamp("2012-01-01").to_datetime64(),
pd.Timestamp("2012-01-01").to_pydatetime(),
],
)
def test_where_index_datetime(self, fill_val):
exp_dtype = "datetime64[ns]"
obj = pd.Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
pd.Timestamp("2011-01-04"),
]
)
assert obj.dtype == "datetime64[ns]"
cond = pd.Index([True, False, True, False])
result = obj.where(cond, fill_val)
expected = pd.DatetimeIndex([obj[0], fill_val, obj[2], fill_val])
tm.assert_index_equal(result, expected)
values = pd.Index(pd.date_range(fill_val, periods=4))
exp = pd.Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2012-01-02"),
pd.Timestamp("2011-01-03"),
pd.Timestamp("2012-01-04"),
]
)
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.xfail(reason="GH 22839: do not ignore timezone, must be object")
def test_where_index_datetime64tz(self):
fill_val = pd.Timestamp("2012-01-01", tz="US/Eastern")
exp_dtype = object
obj = pd.Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
pd.Timestamp("2011-01-04"),
]
)
assert obj.dtype == "datetime64[ns]"
cond = pd.Index([True, False, True, False])
msg = "Index\\(\\.\\.\\.\\) must be called with a collection of some kind"
with pytest.raises(TypeError, match=msg):
obj.where(cond, fill_val)
values = pd.Index(pd.date_range(fill_val, periods=4))
exp = pd.Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2012-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03"),
pd.Timestamp("2012-01-04", tz="US/Eastern"),
],
dtype=exp_dtype,
)
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.xfail(reason="Test not implemented")
def test_where_index_complex128(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_where_index_bool(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_where_series_timedelta64(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_where_series_period(self):
raise NotImplementedError
@pytest.mark.parametrize(
"value", [pd.Timedelta(days=9), timedelta(days=9), np.timedelta64(9, "D")]
)
def test_where_index_timedelta64(self, value):
tdi = pd.timedelta_range("1 Day", periods=4)
cond = np.array([True, False, False, True])
expected = pd.TimedeltaIndex(["1 Day", value, value, "4 Days"])
result = tdi.where(cond, value)
tm.assert_index_equal(result, expected)
# wrong-dtyped NaT
dtnat = np.datetime64("NaT", "ns")
expected = pd.Index([tdi[0], dtnat, dtnat, tdi[3]], dtype=object)
assert expected[1] is dtnat
result = tdi.where(cond, dtnat)
tm.assert_index_equal(result, expected)
def test_where_index_period(self):
dti = pd.date_range("2016-01-01", periods=3, freq="QS")
pi = dti.to_period("Q")
cond = np.array([False, True, False])
# Passinga valid scalar
value = pi[-1] + pi.freq * 10
expected = pd.PeriodIndex([value, pi[1], value])
result = pi.where(cond, value)
tm.assert_index_equal(result, expected)
# Case passing ndarray[object] of Periods
other = np.asarray(pi + pi.freq * 10, dtype=object)
result = pi.where(cond, other)
expected = pd.PeriodIndex([other[0], pi[1], other[2]])
tm.assert_index_equal(result, expected)
# Passing a mismatched scalar -> casts to object
td = pd.Timedelta(days=4)
expected = pd.Index([td, pi[1], td], dtype=object)
result = pi.where(cond, td)
tm.assert_index_equal(result, expected)
per = pd.Period("2020-04-21", "D")
expected = pd.Index([per, pi[1], per], dtype=object)
result = pi.where(cond, per)
tm.assert_index_equal(result, expected)
class TestFillnaSeriesCoercion(CoercionBase):
# not indexing, but place here for consistency
method = "fillna"
@pytest.mark.xfail(reason="Test not implemented")
def test_has_comprehensive_tests(self):
raise NotImplementedError
def _assert_fillna_conversion(self, original, value, expected, expected_dtype):
"""test coercion triggered by fillna"""
target = original.copy()
res = target.fillna(value)
tm.assert_equal(res, expected)
assert res.dtype == expected_dtype
@pytest.mark.parametrize(
"fill_val, fill_dtype",
[(1, object), (1.1, object), (1 + 1j, object), (True, object)],
)
def test_fillna_object(self, index_or_series, fill_val, fill_dtype):
klass = index_or_series
obj = klass(["a", np.nan, "c", "d"])
assert obj.dtype == object
exp = klass(["a", fill_val, "c", "d"])
self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
@pytest.mark.parametrize(
"fill_val,fill_dtype",
[(1, np.float64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)],
)
def test_fillna_float64(self, index_or_series, fill_val, fill_dtype):
klass = index_or_series
obj = klass([1.1, np.nan, 3.3, 4.4])
assert obj.dtype == np.float64
exp = klass([1.1, fill_val, 3.3, 4.4])
# float + complex -> we don't support a complex Index
# complex for Series,
# object for Index
if fill_dtype == np.complex128 and klass == pd.Index:
fill_dtype = object
self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
@pytest.mark.parametrize(
"fill_val,fill_dtype",
[
(1, np.complex128),
(1.1, np.complex128),
(1 + 1j, np.complex128),
(True, object),
],
)
def test_fillna_series_complex128(self, fill_val, fill_dtype):
obj = pd.Series([1 + 1j, np.nan, 3 + 3j, 4 + 4j])
assert obj.dtype == np.complex128
exp = pd.Series([1 + 1j, fill_val, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
@pytest.mark.parametrize(
"fill_val,fill_dtype",
[
(pd.Timestamp("2012-01-01"), "datetime64[ns]"),
(pd.Timestamp("2012-01-01", tz="US/Eastern"), object),
(1, object),
("x", object),
],
ids=["datetime64", "datetime64tz", "object", "object"],
)
def test_fillna_datetime(self, index_or_series, fill_val, fill_dtype):
klass = index_or_series
obj = klass(
[
pd.Timestamp("2011-01-01"),
pd.NaT,
pd.Timestamp("2011-01-03"),
pd.Timestamp("2011-01-04"),
]
)
assert obj.dtype == "datetime64[ns]"
exp = klass(
[
pd.Timestamp("2011-01-01"),
fill_val,
pd.Timestamp("2011-01-03"),
pd.Timestamp("2011-01-04"),
]
)
self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
@pytest.mark.parametrize(
"fill_val,fill_dtype",
[
(pd.Timestamp("2012-01-01", tz="US/Eastern"), "datetime64[ns, US/Eastern]"),
(pd.Timestamp("2012-01-01"), object),
(pd.Timestamp("2012-01-01", tz="Asia/Tokyo"), object),
(1, object),
("x", object),
],
)
def test_fillna_datetime64tz(self, index_or_series, fill_val, fill_dtype):
klass = index_or_series
tz = "US/Eastern"
obj = klass(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.NaT,
pd.Timestamp("2011-01-03", tz=tz),
pd.Timestamp("2011-01-04", tz=tz),
]
)
assert obj.dtype == "datetime64[ns, US/Eastern]"
exp = klass(
[
pd.Timestamp("2011-01-01", tz=tz),
fill_val,
pd.Timestamp("2011-01-03", tz=tz),
pd.Timestamp("2011-01-04", tz=tz),
]
)
self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
@pytest.mark.xfail(reason="Test not implemented")
def test_fillna_series_int64(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_fillna_index_int64(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_fillna_series_bool(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_fillna_index_bool(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_fillna_series_timedelta64(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_fillna_series_period(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_fillna_index_timedelta64(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_fillna_index_period(self):
raise NotImplementedError
class TestReplaceSeriesCoercion(CoercionBase):
klasses = ["series"]
method = "replace"
rep: dict[str, list] = {}
rep["object"] = ["a", "b"]
rep["int64"] = [4, 5]
rep["float64"] = [1.1, 2.2]
rep["complex128"] = [1 + 1j, 2 + 2j]
rep["bool"] = [True, False]
rep["datetime64[ns]"] = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-03")]
for tz in ["UTC", "US/Eastern"]:
# to test tz => different tz replacement
key = f"datetime64[ns, {tz}]"
rep[key] = [
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-03", tz=tz),
]
rep["timedelta64[ns]"] = [pd.Timedelta("1 day"), pd.Timedelta("2 day")]
@pytest.fixture(params=["dict", "series"])
def how(self, request):
return request.param
@pytest.fixture(
params=[
"object",
"int64",
"float64",
"complex128",
"bool",
"datetime64[ns]",
"datetime64[ns, UTC]",
"datetime64[ns, US/Eastern]",
"timedelta64[ns]",
]
)
def from_key(self, request):
return request.param
@pytest.fixture(
params=[
"object",
"int64",
"float64",
"complex128",
"bool",
"datetime64[ns]",
"datetime64[ns, UTC]",
"datetime64[ns, US/Eastern]",
"timedelta64[ns]",
],
ids=[
"object",
"int64",
"float64",
"complex128",
"bool",
"datetime64",
"datetime64tz",
"datetime64tz",
"timedelta64",
],
)
def to_key(self, request):
return request.param
@pytest.fixture
def replacer(self, how, from_key, to_key):
"""
Object we will pass to `Series.replace`
"""
if how == "dict":
replacer = dict(zip(self.rep[from_key], self.rep[to_key]))
elif how == "series":
replacer = pd.Series(self.rep[to_key], index=self.rep[from_key])
else:
raise ValueError
return replacer
def test_replace_series(self, how, to_key, from_key, replacer):
index = pd.Index([3, 4], name="xxx")
obj = pd.Series(self.rep[from_key], index=index, name="yyy")
assert obj.dtype == from_key
if from_key.startswith("datetime") and to_key.startswith("datetime"):
# tested below
return
elif from_key in ["datetime64[ns, US/Eastern]", "datetime64[ns, UTC]"]:
# tested below
return
result = obj.replace(replacer)
if (from_key == "float64" and to_key in ("int64")) or (
from_key == "complex128" and to_key in ("int64", "float64")
):
if not IS64 or is_platform_windows():
pytest.skip(f"32-bit platform buggy: {from_key} -> {to_key}")
# Expected: do not downcast by replacement
exp = pd.Series(self.rep[to_key], index=index, name="yyy", dtype=from_key)
else:
exp = pd.Series(self.rep[to_key], index=index, name="yyy")
assert exp.dtype == to_key
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize(
"to_key",
["timedelta64[ns]", "bool", "object", "complex128", "float64", "int64"],
indirect=True,
)
@pytest.mark.parametrize(
"from_key", ["datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"], indirect=True
)
def test_replace_series_datetime_tz(self, how, to_key, from_key, replacer):
index = pd.Index([3, 4], name="xyz")
obj = pd.Series(self.rep[from_key], index=index, name="yyy")
assert obj.dtype == from_key
result = obj.replace(replacer)
exp = pd.Series(self.rep[to_key], index=index, name="yyy")
assert exp.dtype == to_key
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize(
"to_key",
["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"],
indirect=True,
)
@pytest.mark.parametrize(
"from_key",
["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"],
indirect=True,
)
def test_replace_series_datetime_datetime(self, how, to_key, from_key, replacer):
index = pd.Index([3, 4], name="xyz")
obj = pd.Series(self.rep[from_key], index=index, name="yyy")
assert obj.dtype == from_key
result = obj.replace(replacer)
exp = pd.Series(self.rep[to_key], index=index, name="yyy")
assert exp.dtype == to_key
tm.assert_series_equal(result, exp)
@pytest.mark.xfail(reason="Test not implemented")
def test_replace_series_period(self):
raise NotImplementedError
| 34.193878 | 88 | 0.565503 | [
"BSD-3-Clause"
] | 701KHK1915/8-PANDAS | pandas/tests/indexing/test_coercion.py | 40,212 | Python |
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Task
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
import typing
from pydantic import Field, root_validator
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.errors import MissingError, NoneIsNotAllowedError
from . import backboneelement, domainresource, fhirtypes
class Task(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
A task to be performed.
"""
resource_type = Field("Task", const=True)
authoredOn: fhirtypes.DateTime = Field(
None,
alias="authoredOn",
title="Task Creation Date",
description="The date and time this task was created.",
# if property is element of this resource.
element_property=True,
)
authoredOn__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_authoredOn", title="Extension field for ``authoredOn``."
)
basedOn: typing.List[fhirtypes.ReferenceType] = Field(
None,
alias="basedOn",
title="Request fulfilled by this task",
description=(
"BasedOn refers to a higher-level authorization that triggered the "
'creation of the task. It references a "request" resource such as a '
"ServiceRequest, MedicationRequest, ServiceRequest, CarePlan, etc. "
'which is distinct from the "request" resource the task is seeking to '
"fulfill. This latter resource is referenced by FocusOn. For example,"
" based on a ServiceRequest (= BasedOn), a task is created to fulfill a"
" procedureRequest ( = FocusOn ) to collect a specimen from a patient."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Resource"],
)
businessStatus: fhirtypes.CodeableConceptType = Field(
None,
alias="businessStatus",
title='E.g. "Specimen collected", "IV prepped"',
description="Contains business-specific nuances of the business state.",
# if property is element of this resource.
element_property=True,
)
code: fhirtypes.CodeableConceptType = Field(
None,
alias="code",
title="Task Type",
description="A name or code (or both) briefly describing what the task involves.",
# if property is element of this resource.
element_property=True,
)
description: fhirtypes.String = Field(
None,
alias="description",
title="Human-readable explanation of task",
description="A free-text description of what is to be performed.",
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
encounter: fhirtypes.ReferenceType = Field(
None,
alias="encounter",
title="Healthcare event during which this task originated",
description=(
"The healthcare event (e.g. a patient and healthcare provider "
"interaction) during which this task was created."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Encounter"],
)
executionPeriod: fhirtypes.PeriodType = Field(
None,
alias="executionPeriod",
title="Start and end time of execution",
description=(
"Identifies the time action was first taken against the task (start) "
"and/or the time final action was taken against the task prior to "
"marking it as completed (end)."
),
# if property is element of this resource.
element_property=True,
)
focus: fhirtypes.ReferenceType = Field(
None,
alias="focus",
title="What task is acting on",
description=(
"The request being actioned or the resource being manipulated by this "
"task."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Resource"],
)
for_fhir: fhirtypes.ReferenceType = Field(
None,
alias="for",
title="Beneficiary of the Task",
description=(
"The entity who benefits from the performance of the service specified "
"in the task (e.g., the patient)."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Resource"],
)
groupIdentifier: fhirtypes.IdentifierType = Field(
None,
alias="groupIdentifier",
title="Requisition or grouper id",
description=(
"An identifier that links together multiple tasks and other requests "
"that were created in the same context."
),
# if property is element of this resource.
element_property=True,
)
identifier: typing.List[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="Task Instance Identifier",
description="The business identifier for this task.",
# if property is element of this resource.
element_property=True,
)
input: typing.List[fhirtypes.TaskInputType] = Field(
None,
alias="input",
title="Information used to perform task",
description=(
"Additional information that may be needed in the execution of the " "task."
),
# if property is element of this resource.
element_property=True,
)
instantiatesCanonical: fhirtypes.Canonical = Field(
None,
alias="instantiatesCanonical",
title="Formal definition of task",
description=(
"The URL pointing to a *FHIR*-defined protocol, guideline, orderset or "
"other definition that is adhered to in whole or in part by this Task."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["ActivityDefinition"],
)
instantiatesCanonical__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_instantiatesCanonical",
title="Extension field for ``instantiatesCanonical``.",
)
instantiatesUri: fhirtypes.Uri = Field(
None,
alias="instantiatesUri",
title="Formal definition of task",
description=(
"The URL pointing to an *externally* maintained protocol, guideline, "
"orderset or other definition that is adhered to in whole or in part by"
" this Task."
),
# if property is element of this resource.
element_property=True,
)
instantiatesUri__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_instantiatesUri", title="Extension field for ``instantiatesUri``."
)
insurance: typing.List[fhirtypes.ReferenceType] = Field(
None,
alias="insurance",
title="Associated insurance coverage",
description=(
"Insurance plans, coverage extensions, pre-authorizations and/or pre-"
"determinations that may be relevant to the Task."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Coverage", "ClaimResponse"],
)
intent: fhirtypes.Code = Field(
None,
alias="intent",
title=(
"unknown | proposal | plan | order | original-order | reflex-order | "
"filler-order | instance-order | option"
),
description=(
'Indicates the "level" of actionability associated with the Task, i.e. '
"i+R[9]Cs this a proposed task, a planned task, an actionable task, "
"etc."
),
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=[
"unknown",
"proposal",
"plan",
"order",
"original-order",
"reflex-order",
"filler-order",
"instance-order",
"option",
],
)
intent__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_intent", title="Extension field for ``intent``."
)
lastModified: fhirtypes.DateTime = Field(
None,
alias="lastModified",
title="Task Last Modified Date",
description="The date and time of last modification to this task.",
# if property is element of this resource.
element_property=True,
)
lastModified__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_lastModified", title="Extension field for ``lastModified``."
)
location: fhirtypes.ReferenceType = Field(
None,
alias="location",
title="Where task occurs",
description="Principal physical location where the this task is performed.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Location"],
)
note: typing.List[fhirtypes.AnnotationType] = Field(
None,
alias="note",
title="Comments made about the task",
description="Free-text information captured about the task as it progresses.",
# if property is element of this resource.
element_property=True,
)
output: typing.List[fhirtypes.TaskOutputType] = Field(
None,
alias="output",
title="Information produced as part of task",
description="Outputs produced by the Task.",
# if property is element of this resource.
element_property=True,
)
owner: fhirtypes.ReferenceType = Field(
None,
alias="owner",
title="Responsible individual",
description=(
"Individual organization or Device currently responsible for task "
"execution."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=[
"Practitioner",
"PractitionerRole",
"Organization",
"CareTeam",
"HealthcareService",
"Patient",
"Device",
"RelatedPerson",
],
)
partOf: typing.List[fhirtypes.ReferenceType] = Field(
None,
alias="partOf",
title="Composite task",
description="Task that this particular task is part of.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Task"],
)
performerType: typing.List[fhirtypes.CodeableConceptType] = Field(
None,
alias="performerType",
title="Requested performer",
description="The kind of participant that should perform the task.",
# if property is element of this resource.
element_property=True,
)
priority: fhirtypes.Code = Field(
None,
alias="priority",
title="routine | urgent | asap | stat",
description=(
"Indicates how quickly the Task should be addressed with respect to "
"other requests."
),
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["routine", "urgent", "asap", "stat"],
)
priority__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_priority", title="Extension field for ``priority``."
)
reasonCode: fhirtypes.CodeableConceptType = Field(
None,
alias="reasonCode",
title="Why task is needed",
description="A description or code indicating why this task needs to be performed.",
# if property is element of this resource.
element_property=True,
)
reasonReference: fhirtypes.ReferenceType = Field(
None,
alias="reasonReference",
title="Why task is needed",
description="A resource reference indicating why this task needs to be performed.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Resource"],
)
relevantHistory: typing.List[fhirtypes.ReferenceType] = Field(
None,
alias="relevantHistory",
title="Key events in history of the Task",
description=(
"Links to Provenance records for past versions of this Task that "
"identify key state transitions or updates that are likely to be "
"relevant to a user looking at the current version of the task."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Provenance"],
)
requester: fhirtypes.ReferenceType = Field(
None,
alias="requester",
title="Who is asking for task to be done",
description="The creator of the task.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=[
"Device",
"Organization",
"Patient",
"Practitioner",
"PractitionerRole",
"RelatedPerson",
],
)
restriction: fhirtypes.TaskRestrictionType = Field(
None,
alias="restriction",
title="Constraints on fulfillment tasks",
description=(
"If the Task.focus is a request resource and the task is seeking "
"fulfillment (i.e. is asking for the request to be actioned), this "
"element identifies any limitations on what parts of the referenced "
"request should be actioned."
),
# if property is element of this resource.
element_property=True,
)
status: fhirtypes.Code = Field(
None,
alias="status",
title="draft | requested | received | accepted | +",
description="The current status of the task.",
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["draft", "requested", "received", "accepted", "+"],
)
status__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_status", title="Extension field for ``status``."
)
statusReason: fhirtypes.CodeableConceptType = Field(
None,
alias="statusReason",
title="Reason for current status",
description="An explanation as to why this task is held, failed, was refused, etc.",
# if property is element of this resource.
element_property=True,
)
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_594(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("intent", "intent__ext"), ("status", "status__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class TaskInput(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Information used to perform task.
Additional information that may be needed in the execution of the task.
"""
resource_type = Field("TaskInput", const=True)
type: fhirtypes.CodeableConceptType = Field(
...,
alias="type",
title="Label for the input",
description=(
"A code or description indicating how the input is intended to be used "
"as part of the task execution."
),
# if property is element of this resource.
element_property=True,
)
valueAddress: fhirtypes.AddressType = Field(
None,
alias="valueAddress",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueAge: fhirtypes.AgeType = Field(
None,
alias="valueAge",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueAnnotation: fhirtypes.AnnotationType = Field(
None,
alias="valueAnnotation",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueAttachment: fhirtypes.AttachmentType = Field(
None,
alias="valueAttachment",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueBase64Binary: fhirtypes.Base64Binary = Field(
None,
alias="valueBase64Binary",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueBase64Binary__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_valueBase64Binary",
title="Extension field for ``valueBase64Binary``.",
)
valueBoolean: bool = Field(
None,
alias="valueBoolean",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueBoolean__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueBoolean", title="Extension field for ``valueBoolean``."
)
valueCanonical: fhirtypes.Canonical = Field(
None,
alias="valueCanonical",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueCanonical__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueCanonical", title="Extension field for ``valueCanonical``."
)
valueCode: fhirtypes.Code = Field(
None,
alias="valueCode",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueCode__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueCode", title="Extension field for ``valueCode``."
)
valueCodeableConcept: fhirtypes.CodeableConceptType = Field(
None,
alias="valueCodeableConcept",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueCoding: fhirtypes.CodingType = Field(
None,
alias="valueCoding",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueContactDetail: fhirtypes.ContactDetailType = Field(
None,
alias="valueContactDetail",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueContactPoint: fhirtypes.ContactPointType = Field(
None,
alias="valueContactPoint",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueContributor: fhirtypes.ContributorType = Field(
None,
alias="valueContributor",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueCount: fhirtypes.CountType = Field(
None,
alias="valueCount",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueDataRequirement: fhirtypes.DataRequirementType = Field(
None,
alias="valueDataRequirement",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueDate: fhirtypes.Date = Field(
None,
alias="valueDate",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueDate__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueDate", title="Extension field for ``valueDate``."
)
valueDateTime: fhirtypes.DateTime = Field(
None,
alias="valueDateTime",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueDateTime__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueDateTime", title="Extension field for ``valueDateTime``."
)
valueDecimal: fhirtypes.Decimal = Field(
None,
alias="valueDecimal",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueDecimal__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueDecimal", title="Extension field for ``valueDecimal``."
)
valueDistance: fhirtypes.DistanceType = Field(
None,
alias="valueDistance",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueDosage: fhirtypes.DosageType = Field(
None,
alias="valueDosage",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueDuration: fhirtypes.DurationType = Field(
None,
alias="valueDuration",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueExpression: fhirtypes.ExpressionType = Field(
None,
alias="valueExpression",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueHumanName: fhirtypes.HumanNameType = Field(
None,
alias="valueHumanName",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueId: fhirtypes.Id = Field(
None,
alias="valueId",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueId__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueId", title="Extension field for ``valueId``."
)
valueIdentifier: fhirtypes.IdentifierType = Field(
None,
alias="valueIdentifier",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueInstant: fhirtypes.Instant = Field(
None,
alias="valueInstant",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueInstant__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueInstant", title="Extension field for ``valueInstant``."
)
valueInteger: fhirtypes.Integer = Field(
None,
alias="valueInteger",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueInteger__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueInteger", title="Extension field for ``valueInteger``."
)
valueMarkdown: fhirtypes.Markdown = Field(
None,
alias="valueMarkdown",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueMarkdown__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueMarkdown", title="Extension field for ``valueMarkdown``."
)
valueMeta: fhirtypes.MetaType = Field(
None,
alias="valueMeta",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueMoney: fhirtypes.MoneyType = Field(
None,
alias="valueMoney",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueOid: fhirtypes.Oid = Field(
None,
alias="valueOid",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueOid__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueOid", title="Extension field for ``valueOid``."
)
valueParameterDefinition: fhirtypes.ParameterDefinitionType = Field(
None,
alias="valueParameterDefinition",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valuePeriod: fhirtypes.PeriodType = Field(
None,
alias="valuePeriod",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valuePositiveInt: fhirtypes.PositiveInt = Field(
None,
alias="valuePositiveInt",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valuePositiveInt__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_valuePositiveInt",
title="Extension field for ``valuePositiveInt``.",
)
valueQuantity: fhirtypes.QuantityType = Field(
None,
alias="valueQuantity",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueRange: fhirtypes.RangeType = Field(
None,
alias="valueRange",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueRatio: fhirtypes.RatioType = Field(
None,
alias="valueRatio",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueReference: fhirtypes.ReferenceType = Field(
None,
alias="valueReference",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueRelatedArtifact: fhirtypes.RelatedArtifactType = Field(
None,
alias="valueRelatedArtifact",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueSampledData: fhirtypes.SampledDataType = Field(
None,
alias="valueSampledData",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueSignature: fhirtypes.SignatureType = Field(
None,
alias="valueSignature",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueString: fhirtypes.String = Field(
None,
alias="valueString",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueString__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueString", title="Extension field for ``valueString``."
)
valueTime: fhirtypes.Time = Field(
None,
alias="valueTime",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueTime__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueTime", title="Extension field for ``valueTime``."
)
valueTiming: fhirtypes.TimingType = Field(
None,
alias="valueTiming",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueTriggerDefinition: fhirtypes.TriggerDefinitionType = Field(
None,
alias="valueTriggerDefinition",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueUnsignedInt: fhirtypes.UnsignedInt = Field(
None,
alias="valueUnsignedInt",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueUnsignedInt__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_valueUnsignedInt",
title="Extension field for ``valueUnsignedInt``.",
)
valueUri: fhirtypes.Uri = Field(
None,
alias="valueUri",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueUri__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueUri", title="Extension field for ``valueUri``."
)
valueUrl: fhirtypes.Url = Field(
None,
alias="valueUrl",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueUrl__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueUrl", title="Extension field for ``valueUrl``."
)
valueUsageContext: fhirtypes.UsageContextType = Field(
None,
alias="valueUsageContext",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueUuid: fhirtypes.Uuid = Field(
None,
alias="valueUuid",
title="Content to use in performing the task",
description="The value of the input parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueUuid__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueUuid", title="Extension field for ``valueUuid``."
)
@root_validator(pre=True, allow_reuse=True)
def validate_one_of_many_1131(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {
"value": [
"valueAddress",
"valueAge",
"valueAnnotation",
"valueAttachment",
"valueBase64Binary",
"valueBoolean",
"valueCanonical",
"valueCode",
"valueCodeableConcept",
"valueCoding",
"valueContactDetail",
"valueContactPoint",
"valueContributor",
"valueCount",
"valueDataRequirement",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueDistance",
"valueDosage",
"valueDuration",
"valueExpression",
"valueHumanName",
"valueId",
"valueIdentifier",
"valueInstant",
"valueInteger",
"valueMarkdown",
"valueMeta",
"valueMoney",
"valueOid",
"valueParameterDefinition",
"valuePeriod",
"valuePositiveInt",
"valueQuantity",
"valueRange",
"valueRatio",
"valueReference",
"valueRelatedArtifact",
"valueSampledData",
"valueSignature",
"valueString",
"valueTime",
"valueTiming",
"valueTriggerDefinition",
"valueUnsignedInt",
"valueUri",
"valueUrl",
"valueUsageContext",
"valueUuid",
]
}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
class TaskOutput(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Information produced as part of task.
Outputs produced by the Task.
"""
resource_type = Field("TaskOutput", const=True)
type: fhirtypes.CodeableConceptType = Field(
...,
alias="type",
title="Label for output",
description="The name of the Output parameter.",
# if property is element of this resource.
element_property=True,
)
valueAddress: fhirtypes.AddressType = Field(
None,
alias="valueAddress",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueAge: fhirtypes.AgeType = Field(
None,
alias="valueAge",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueAnnotation: fhirtypes.AnnotationType = Field(
None,
alias="valueAnnotation",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueAttachment: fhirtypes.AttachmentType = Field(
None,
alias="valueAttachment",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueBase64Binary: fhirtypes.Base64Binary = Field(
None,
alias="valueBase64Binary",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueBase64Binary__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_valueBase64Binary",
title="Extension field for ``valueBase64Binary``.",
)
valueBoolean: bool = Field(
None,
alias="valueBoolean",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueBoolean__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueBoolean", title="Extension field for ``valueBoolean``."
)
valueCanonical: fhirtypes.Canonical = Field(
None,
alias="valueCanonical",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueCanonical__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueCanonical", title="Extension field for ``valueCanonical``."
)
valueCode: fhirtypes.Code = Field(
None,
alias="valueCode",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueCode__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueCode", title="Extension field for ``valueCode``."
)
valueCodeableConcept: fhirtypes.CodeableConceptType = Field(
None,
alias="valueCodeableConcept",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueCoding: fhirtypes.CodingType = Field(
None,
alias="valueCoding",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueContactDetail: fhirtypes.ContactDetailType = Field(
None,
alias="valueContactDetail",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueContactPoint: fhirtypes.ContactPointType = Field(
None,
alias="valueContactPoint",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueContributor: fhirtypes.ContributorType = Field(
None,
alias="valueContributor",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueCount: fhirtypes.CountType = Field(
None,
alias="valueCount",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueDataRequirement: fhirtypes.DataRequirementType = Field(
None,
alias="valueDataRequirement",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueDate: fhirtypes.Date = Field(
None,
alias="valueDate",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueDate__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueDate", title="Extension field for ``valueDate``."
)
valueDateTime: fhirtypes.DateTime = Field(
None,
alias="valueDateTime",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueDateTime__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueDateTime", title="Extension field for ``valueDateTime``."
)
valueDecimal: fhirtypes.Decimal = Field(
None,
alias="valueDecimal",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueDecimal__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueDecimal", title="Extension field for ``valueDecimal``."
)
valueDistance: fhirtypes.DistanceType = Field(
None,
alias="valueDistance",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueDosage: fhirtypes.DosageType = Field(
None,
alias="valueDosage",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueDuration: fhirtypes.DurationType = Field(
None,
alias="valueDuration",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueExpression: fhirtypes.ExpressionType = Field(
None,
alias="valueExpression",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueHumanName: fhirtypes.HumanNameType = Field(
None,
alias="valueHumanName",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueId: fhirtypes.Id = Field(
None,
alias="valueId",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueId__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueId", title="Extension field for ``valueId``."
)
valueIdentifier: fhirtypes.IdentifierType = Field(
None,
alias="valueIdentifier",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueInstant: fhirtypes.Instant = Field(
None,
alias="valueInstant",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueInstant__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueInstant", title="Extension field for ``valueInstant``."
)
valueInteger: fhirtypes.Integer = Field(
None,
alias="valueInteger",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueInteger__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueInteger", title="Extension field for ``valueInteger``."
)
valueMarkdown: fhirtypes.Markdown = Field(
None,
alias="valueMarkdown",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueMarkdown__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueMarkdown", title="Extension field for ``valueMarkdown``."
)
valueMeta: fhirtypes.MetaType = Field(
None,
alias="valueMeta",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueMoney: fhirtypes.MoneyType = Field(
None,
alias="valueMoney",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueOid: fhirtypes.Oid = Field(
None,
alias="valueOid",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueOid__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueOid", title="Extension field for ``valueOid``."
)
valueParameterDefinition: fhirtypes.ParameterDefinitionType = Field(
None,
alias="valueParameterDefinition",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valuePeriod: fhirtypes.PeriodType = Field(
None,
alias="valuePeriod",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valuePositiveInt: fhirtypes.PositiveInt = Field(
None,
alias="valuePositiveInt",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valuePositiveInt__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_valuePositiveInt",
title="Extension field for ``valuePositiveInt``.",
)
valueQuantity: fhirtypes.QuantityType = Field(
None,
alias="valueQuantity",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueRange: fhirtypes.RangeType = Field(
None,
alias="valueRange",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueRatio: fhirtypes.RatioType = Field(
None,
alias="valueRatio",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueReference: fhirtypes.ReferenceType = Field(
None,
alias="valueReference",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueRelatedArtifact: fhirtypes.RelatedArtifactType = Field(
None,
alias="valueRelatedArtifact",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueSampledData: fhirtypes.SampledDataType = Field(
None,
alias="valueSampledData",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueSignature: fhirtypes.SignatureType = Field(
None,
alias="valueSignature",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueString: fhirtypes.String = Field(
None,
alias="valueString",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueString__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueString", title="Extension field for ``valueString``."
)
valueTime: fhirtypes.Time = Field(
None,
alias="valueTime",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueTime__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueTime", title="Extension field for ``valueTime``."
)
valueTiming: fhirtypes.TimingType = Field(
None,
alias="valueTiming",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueTriggerDefinition: fhirtypes.TriggerDefinitionType = Field(
None,
alias="valueTriggerDefinition",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueUnsignedInt: fhirtypes.UnsignedInt = Field(
None,
alias="valueUnsignedInt",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueUnsignedInt__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_valueUnsignedInt",
title="Extension field for ``valueUnsignedInt``.",
)
valueUri: fhirtypes.Uri = Field(
None,
alias="valueUri",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueUri__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueUri", title="Extension field for ``valueUri``."
)
valueUrl: fhirtypes.Url = Field(
None,
alias="valueUrl",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueUrl__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueUrl", title="Extension field for ``valueUrl``."
)
valueUsageContext: fhirtypes.UsageContextType = Field(
None,
alias="valueUsageContext",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueUuid: fhirtypes.Uuid = Field(
None,
alias="valueUuid",
title="Result of output",
description="The value of the Output parameter as a basic type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueUuid__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueUuid", title="Extension field for ``valueUuid``."
)
@root_validator(pre=True, allow_reuse=True)
def validate_one_of_many_1260(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {
"value": [
"valueAddress",
"valueAge",
"valueAnnotation",
"valueAttachment",
"valueBase64Binary",
"valueBoolean",
"valueCanonical",
"valueCode",
"valueCodeableConcept",
"valueCoding",
"valueContactDetail",
"valueContactPoint",
"valueContributor",
"valueCount",
"valueDataRequirement",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueDistance",
"valueDosage",
"valueDuration",
"valueExpression",
"valueHumanName",
"valueId",
"valueIdentifier",
"valueInstant",
"valueInteger",
"valueMarkdown",
"valueMeta",
"valueMoney",
"valueOid",
"valueParameterDefinition",
"valuePeriod",
"valuePositiveInt",
"valueQuantity",
"valueRange",
"valueRatio",
"valueReference",
"valueRelatedArtifact",
"valueSampledData",
"valueSignature",
"valueString",
"valueTime",
"valueTiming",
"valueTriggerDefinition",
"valueUnsignedInt",
"valueUri",
"valueUrl",
"valueUsageContext",
"valueUuid",
]
}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
class TaskRestriction(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Constraints on fulfillment tasks.
If the Task.focus is a request resource and the task is seeking fulfillment
(i.e. is asking for the request to be actioned), this element identifies
any limitations on what parts of the referenced request should be actioned.
"""
resource_type = Field("TaskRestriction", const=True)
period: fhirtypes.PeriodType = Field(
None,
alias="period",
title="When fulfillment sought",
description="Over what time-period is fulfillment sought.",
# if property is element of this resource.
element_property=True,
)
recipient: typing.List[fhirtypes.ReferenceType] = Field(
None,
alias="recipient",
title="For whom is fulfillment sought?",
description=(
"For requests that are targeted to more than on potential "
"recipient/target, for whom is fulfillment sought?"
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=[
"Patient",
"Practitioner",
"PractitionerRole",
"RelatedPerson",
"Group",
"Organization",
],
)
repetitions: fhirtypes.PositiveInt = Field(
None,
alias="repetitions",
title="How many times to repeat",
description="Indicates the number of times the requested action should occur.",
# if property is element of this resource.
element_property=True,
)
repetitions__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_repetitions", title="Extension field for ``repetitions``."
)
| 36.181689 | 93 | 0.615138 | [
"BSD-3-Clause"
] | chgl/fhir.resources | fhir/resources/task.py | 76,669 | Python |
# -*- coding: utf-8 -*-
"""
Parser elements.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import copy
import logging
import re
from lxml.builder import E
import six
import types
log = logging.getLogger(__name__)
class ParseException(Exception):
"""Exception thrown by a ParserElement when it doesn't match input."""
def __init__(self, tokens, i=0, msg=None, element=None):
self.i = i
self.msg = msg
self.tokens = tokens
self.element = element
@classmethod
def wrap(cls, parse_exception):
return cls(parse_exception.tokens, parse_exception.loc, parse_exception.msg, parse_exception.element)
def __str__(self):
return ('%s (at token %d)' % (self.msg, self.i))
XML_SAFE_TAGS = {
'-LRB-': 'LRB',
'-RRB-': 'RRB',
'.': 'STOP',
',': 'COMMA',
':': 'COLON',
'$': 'DOLLAR',
'``': 'LQUOTE',
'\'\'': 'RQUOTE',
'PRP$': 'PRPPOS',
'WP$': 'WPPOS',
None: 'NONE'
}
def safe_name(name):
"""Make name safe for use in XML output."""
return XML_SAFE_TAGS.get(name, name)
class BaseParserElement(object):
"""Abstract base parser element class."""
def __init__(self):
self.name = None
"""str or None: name for BaseParserElement. This is used to set the name of the Element when a result is found"""
self.actions = []
"""list(chemdataextractor.parse.actions): list of actions that will be applied to the results after parsing. Actions are functions with arguments of (tokens, start, result)"""
self.streamlined = False
self.condition = None
def set_action(self, *fns):
self.actions = fns
return self
def add_action(self, *fns):
self.actions += fns
return self
def with_condition(self, condition):
"""
Add a condition to the parser element. The condition must be a function that takes
a match and return True or False, i.e. a function which takes tuple(list(Element), int)
and returns bool. If the function evaluates True, the match is kept, while if the function
evaluates False, the match is discarded. The condition is executed after any other actions.
"""
self.condition = condition
return self
def copy(self):
new = copy.copy(self)
new.actions = self.actions[:]
return new
def set_name(self, name):
new = self.copy()
new.name = name
return new
def scan(self, tokens, max_matches=six.MAXSIZE, overlap=False):
"""
Scans for matches in given tokens.
:param list(tuple(string, string)) tokens: A tokenized representation of the text to scan. The first string in the tuple is the content, typically a word, and the second string is the part of speech tag.
:param int max_matches: The maximum number of matches to look for. Default is the maximum size possible for a list.
:param bool overlap: Whether the found results are allowed to overlap. Default False.
:returns: A generator of the results found. Each result is a tuple with the first element being a list of elements found, and the second and third elements are the start and end indices representing the span of the result.
:rtype: generator(tuple(list(lxml.etree.Element), int, int))
"""
if not self.streamlined:
self.streamline()
matches = 0
i = 0
length = len(tokens)
while i < length and matches < max_matches:
try:
results, next_i = self.parse(tokens, i)
except ParseException as err:
# print(err.msg)
i += 1
else:
if next_i > i:
matches += 1
if len(results) == 1:
results = results[0]
yield results, i, next_i
if overlap:
i += 1
else:
i = next_i
else:
i += 1
def parse(self, tokens, i, actions=True):
"""
Parse given tokens and return results
:param tokens: A tokenized representation of the text to scan. The first string in the tuple is the content, typically a word, and the second string is the part of speech tag.
:type tokens: list(tuple(string, string))
:param int i: The index at which to start scanning from
:param bool actions: Whether the actions attached to this element will be executed. Default True.
:returns: A tuple where the first element is a list of elements found (can be None if no results were found), and the last index investigated.
:rtype: tuple(list(Element) or None, int)
"""
start = i
try:
result, i = self._parse_tokens(tokens, i, actions)
except IndexError:
raise ParseException(tokens, i, 'IndexError', self)
if actions:
for action in self.actions:
action_result = action(tokens, start, result)
if action_result is not None:
result = action_result
if self.condition is not None:
if not self.condition(result):
raise ParseException(tokens, i, 'Did not satisfy condition', self)
return result, i
def try_parse(self, tokens, i):
return self.parse(tokens, i, actions=False)[1]
def _parse_tokens(self, tokens, i, actions=True):
"""
Implemented by subclasses, parses given tokens and returns the results
:param list(tuple(string, string)) tokens: A tokenized representation of the text to scan. The first string in the tuple is the content, typically a word, and the second string is the part of speech tag.
:param int i: The index at which to start scanning from
:param bool actions: Whether the actions attached to this element will be executed. Default True.
:returns: A tuple where the first element is a list of elements found (can be None if no results were found), and the last index investigated.
:rtype: tuple(list(Element) or None, int)
"""
# TODO: abstractmethod?
return None, i
def streamline(self):
"""
Streamlines internal representations. e.g., if we have something like And(And(And(And(a), b), c), d), streamline this to And(a, b, c, d)
"""
self.streamlined = True
return self
def __add__(self, other):
if isinstance(other, six.text_type):
other = Word(other)
if not isinstance(other, BaseParserElement):
# raise?
return None
return And([self, other])
def __radd__(self, other):
if isinstance(other, six.text_type):
other = Word(other)
if not isinstance(other, BaseParserElement):
# raise?
return None
return other + self
def __or__(self, other):
if isinstance(other, six.text_type):
other = Word(other)
if not isinstance(other, BaseParserElement):
return None
return First([self, other])
def __ror__(self, other):
if isinstance(other, six.text_type):
other = Word(other)
if not isinstance(other, BaseParserElement):
return None
return other | self
def __xor__(self, other):
if isinstance(other, six.text_type):
other = Word(other)
if not isinstance(other, BaseParserElement):
return None
return Or([self, other])
def __rxor__(self, other):
if isinstance(other, six.text_type):
other = Word(other)
if not isinstance(other, BaseParserElement):
return None
return other ^ self
# def __and__(self, other):
# if isinstance(other, six.text_type):
# other = Word(other)
# if not isinstance(other, BaseParserElement):
# return None
# return Each([self, other])
#
# def __rand__(self, other):
# if isinstance(other, six.text_type):
# other = Word(other)
# if not isinstance(other, BaseParserElement):
# return None
# return other & self
def __invert__(self):
return Not(self)
def __call__(self, name):
"""
If a BaseParserElement is called, returns the BaseParserElement with its name set to the argument. The name is used to identify the results parsed by this element.
:param str name: Name to give BaseParserElement
:returns: A BaseParserElement with the given name
:rtype: BaseParserElement
"""
return self.set_name(name)
def hide(self):
return Hide(self)
class Any(BaseParserElement):
"""Always match a single token."""
def _parse_tokens(self, tokens, i, actions=True):
return [E(self.name or safe_name(tokens[i][1]), tokens[i][0])], i + 1
class Word(BaseParserElement):
"""Match token text exactly. Case-sensitive."""
def __init__(self, match):
super(Word, self).__init__()
self.match = match
def _parse_tokens(self, tokens, i, actions=True):
token_text = tokens[i][0]
if token_text == self.match:
return [E(self.name or safe_name(tokens[i][1]), token_text)], i + 1
raise ParseException(tokens, i, 'Expected %s, got %s' % (self.match, token_text), self)
class Tag(BaseParserElement):
"""Match tag exactly."""
def __init__(self, match):
super(Tag, self).__init__()
self.match = match
def _parse_tokens(self, tokens, i, actions=True):
token = tokens[i]
if token[1] == self.match:
return [E(self.name or safe_name(token[1]), token[0])], i + 1
raise ParseException(tokens, i, 'Expected %s, got %s' % (self.match, token[1]), self)
class IWord(Word):
"""Case-insensitive match token text."""
def __init__(self, match):
super(IWord, self).__init__(match.lower())
def _parse_tokens(self, tokens, i, actions=True):
token_text = tokens[i][0]
if token_text.lower() == self.match:
return [E(self.name or safe_name(tokens[i][1]), tokens[i][0])], i + 1
raise ParseException(tokens, i, 'Expected %s, got %s' % (self.match, tokens[i][0]), self)
class Regex(BaseParserElement):
"""Match token text with regular expression."""
def __init__(self, pattern, flags=0, group=None):
super(Regex, self).__init__()
if isinstance(pattern, six.string_types):
self.regex = re.compile(pattern, flags)
self.pattern = pattern
else:
self.regex = pattern
self.pattern = pattern.pattern
self.group = group
def _parse_tokens(self, tokens, i, actions=True):
token_text = tokens[i][0]
result = self.regex.search(token_text)
if result:
text = tokens[i][0] if self.group is None else result.group(self.group)
return [E(self.name or safe_name(tokens[i][1]), text)], i + 1
raise ParseException(tokens, i, 'Expected %s, got %s' % (self.pattern, token_text), self)
class Start(BaseParserElement):
"""Match at start of tokens."""
def __init__(self):
super(Start, self).__init__()
def _parse_tokens(self, tokens, i, actions=True):
if i != 0:
raise ParseException(tokens, i, 'Expected start of tokens', self)
return [], i
class End(BaseParserElement):
"""Match at end of tokens."""
def __init__(self):
super(End, self).__init__()
def _parse_tokens(self, tokens, i, actions=True):
if i < len(tokens):
raise ParseException(tokens, i, 'Expected end of tokens', self)
return [], i
class ParseExpression(BaseParserElement):
"""Abstract class for combining and post-processing parsed tokens."""
def __init__(self, exprs):
super(ParseExpression, self).__init__()
if isinstance(exprs, types.GeneratorType):
exprs = list(exprs)
if isinstance(exprs, six.text_type):
self.exprs = [Word(exprs)]
elif isinstance(exprs, collections.Sequence):
if all(isinstance(expr, six.text_type) for expr in exprs):
exprs = map(Word, exprs)
self.exprs = list(exprs)
else:
try:
self.exprs = list(exprs)
except TypeError:
self.exprs = [exprs]
def __getitem__(self, i):
return self.exprs[i]
def append(self, other):
self.exprs.append(other)
return self
def copy(self):
ret = super(ParseExpression, self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
def streamline(self):
super(ParseExpression, self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested exprs from e.g. And(And(And(a, b), c), d) to And(a,b,c,d)
if len(self.exprs) == 2:
other = self.exprs[0]
if isinstance(other, self.__class__) and not other.actions and other.name is None:
self.exprs = other.exprs[:] + [self.exprs[1]]
other = self.exprs[-1]
if isinstance(other, self.__class__) and not other.actions and other.name is None:
self.exprs = self.exprs[:-1] + other.exprs[:]
return self
class And(ParseExpression):
"""
Match all in the given order.
Can probably be replaced by the plus operator '+'?
"""
def __init__(self, exprs):
super(And, self).__init__(exprs)
def _parse_tokens(self, tokens, i, actions=True):
results = []
for e in self.exprs:
exprresults, i = e.parse(tokens, i)
if exprresults is not None:
results.extend(exprresults)
return ([E(self.name, *results)] if self.name else results), i
def __iadd__(self, other):
if isinstance(other, six.text_type):
other = Word(other)
return self.append(other)
# class All(BaseParserElement):
# """
# All elements are present in any order. Other elements can be in between.
# This is primarily used for table parsing, to see if all required elements are found in a row of the category table.
# """
#
# def __init__(self, *exprs):
# super(All, self).__init__(*exprs)
#
# # i am not sure if this has the correct parent, but essentially, for every expression provided we have to do
# # something like a simple Match() not And() and then go on to the next expression with resetting the tokens to zero
# # if all expressions are found individually return the result.
#
# def _parse_tokens(self, tokens, i, actions=True):
# results = []
# for expression in self.exprs:
# for e in expression:
# exprresults, i = e.parse(tokens, i)
# if exprresults is not None:
# results.extend(exprresults)
# return ([E(self.name, *results)] if self.name else results), i
class Or(ParseExpression):
"""
Match the longest.
Can probably be replaced by the pipe operator '|'.
"""
def _parse_tokens(self, tokens, i, actions=True):
furthest_exception_i = -1
furthest_match_i = -1
furthest_exception = None
for e in self.exprs:
try:
end_i = e.try_parse(tokens, i)
except ParseException as err:
if err.i > furthest_exception_i:
furthest_exception = err
furthest_exception_i = err.i
except IndexError:
if len(tokens) > furthest_exception_i:
furthest_exception = ParseException(tokens, len(tokens), '', self)
furthest_exception_i = len(tokens)
else:
if end_i > furthest_match_i:
furthest_match_i = end_i
furthest_match = e
if furthest_match_i < 0:
if furthest_exception is not None:
raise furthest_exception
else:
raise ParseException(tokens, i, 'No alternatives match', self)
# If a name is assigned to an Or, it replaces the name of the contained result
if self.name:
furthest_match = furthest_match.set_name(self.name)
result, result_i = furthest_match.parse(tokens, i, actions=actions)
# if self.name:
# result.tag = self.name
return result, result_i
def __ixor__(self, other):
if isinstance(other, six.text_type):
other = Word(other)
return self.append(other)
class First(ParseExpression):
"""Match the first."""
def __init__(self, exprs):
super(First, self).__init__(exprs)
def _parse_tokens(self, tokens, i, actions=True):
furthest_i = -1
furthest_exception = None
for e in self.exprs:
try:
result, result_i = e.parse(tokens, i, actions=True)
# If a name is assigned to a First, it replaces the name of the contained result
if self.name:
for e in result:
e.tag = self.name
return result, result_i
except ParseException as err:
if err.i > furthest_i:
furthest_exception = err
furthest_i = err.i
else:
if furthest_exception is not None:
raise furthest_exception
else:
raise ParseException(tokens, i, 'No alternatives match', self)
def __ior__(self, other):
if isinstance(other, six.text_type):
other = Word(other)
return self.append(other)
class ParseElementEnhance(BaseParserElement):
"""Abstract class for combining and post-processing parsed tokens."""
def __init__(self, expr):
super(ParseElementEnhance, self).__init__()
if isinstance(expr, six.text_type):
expr = Word(expr)
self.expr = expr
def _parse_tokens(self, tokens, i, actions=True):
if self.expr is not None:
return self.expr.parse(tokens, i)
else:
raise ParseException('', i, 'Error', self)
def streamline(self):
super(ParseElementEnhance, self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
class FollowedBy(ParseElementEnhance):
"""
Check ahead if matches.
Example::
Tn + FollowedBy('Neel temperature')
Tn will match only if followed by 'Neel temperature', but 'Neel temperature' will not be part of the output/tree
"""
def _parse_tokens(self, tokens, i, actions=True):
self.expr.try_parse(tokens, i)
return [], i
class Not(ParseElementEnhance):
"""
Check ahead to disallow a match with the given parse expression.
Example::
Tn + Not('some_string')
Tn will match if not followed by 'some_string'
"""
def _parse_tokens(self, tokens, i, actions=True):
try:
self.expr.try_parse(tokens, i)
except (ParseException, IndexError):
pass
else:
raise ParseException(tokens, i, 'Encountered disallowed token', self)
return [], i
class ZeroOrMore(ParseElementEnhance):
"""Optional repetition of zero or more of the given expression."""
def _parse_tokens(self, tokens, i, actions=True):
results = []
try:
results, i = self.expr.parse(tokens, i, actions)
while 1:
start_i = i
tmpresults, i = self.expr.parse(tokens, start_i, actions)
if tmpresults:
results.extend(tmpresults)
except (ParseException, IndexError):
pass
return ([E(self.name, *results)] if self.name else results), i
class OneOrMore(ParseElementEnhance):
"""Repetition of one or more of the given expression."""
def _parse_tokens(self, tokens, i, actions=True):
#print(tokens)
# must be at least one
results, i = self.expr.parse(tokens, i, actions)
try:
while 1:
start_i = i
tmpresults, i = self.expr.parse(tokens, start_i, actions)
if tmpresults:
results.extend(tmpresults)
except (ParseException, IndexError):
pass
return ([E(self.name, *results)] if self.name else results), i
class Optional(ParseElementEnhance):
"""
Can be present but doesn't need to be.
If present, will be added to the result/tree.
"""
def __init__(self, expr):
super(Optional, self).__init__(expr)
def _parse_tokens(self, tokens, i, actions=True):
results = []
try:
results, i = self.expr.parse(tokens, i, actions)
except (ParseException, IndexError):
pass
return results, i
class Group(ParseElementEnhance):
"""
For nested tags; will group argument and give it a label, preserving the original sub-tags.
Otherwise, the default behaviour would be to rename the outermost tag in the argument.
Usage: Group(some_text)('new_tag) where 'some_text' is a previously tagged expression
"""
def _parse_tokens(self, tokens, i, actions=True):
results, i = self.expr.parse(tokens, i, actions)
return ([E(self.name, *results)] if self.name else results), i
class SkipTo(ParseElementEnhance):
"""
Skips to the next occurance of expression. Does not add the next occurance of expression to the parse tree.
For example::
entities + SkipTo(entities)
will output ``entities`` only once. Whereas::
entities + SkipTo(entities) + entities
will output ``entities`` as well as the second occurrence of ``entities`` after an arbitrary number of tokens in between.
"""
def __init__(self, expr, include=False):
super(SkipTo, self).__init__(expr)
self.include = include
def _parse_tokens(self, tokens, i, actions=True):
start_i = i
tokens_length = len(tokens)
while i <= tokens_length:
try:
self.expr.parse(tokens, i, actions=False)
results = [E(safe_name(t[1]), t[0]) for t in tokens[start_i:i]]
if self.include:
match_result, i = self.expr.parse(tokens, i, actions)
if match_result:
results.extend(match_result)
return results, i
except (ParseException, IndexError):
i += 1
raise ParseException(tokens, i, '', self)
class Hide(ParseElementEnhance):
"""
Converter for ignoring the results of a parsed expression.
It wouldn't appear in the generated xml element tree, but it would still be part of the rule.
"""
def _parse_tokens(self, tokens, i, actions=True):
results, i = super(Hide, self)._parse_tokens(tokens, i)
return [], i
def hide(self):
return self
# Abbreviations
W = Word
I = IWord
R = Regex
T = Tag
H = Hide
| 33.512821 | 230 | 0.599379 | [
"MIT"
] | ShuHuang/batterydatabase | chemdataextractor_batteries/chemdataextractor/parse/elements.py | 23,526 | Python |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, List, NamedTuple, Tuple
import numpy as np
import plotly.graph_objs as go
import torch
from torch import Tensor
class SamplesSummary(NamedTuple):
num_chain: int
num_samples: int
single_sample_sz: Tensor
def _samples_info(query_samples: Tensor) -> SamplesSummary:
return SamplesSummary(
num_chain=query_samples.size(0),
num_samples=query_samples.size(1),
# pyre-fixme[6]: For 3rd param expected `Tensor` but got `Size`.
single_sample_sz=query_samples.size()[2:],
)
def trace_helper(
x: List[List[List[int]]], y: List[List[List[float]]], labels: List[str]
) -> Tuple[List[go.Scatter], List[str]]:
"""
this function gets results prepared by a plot-related function and
outputs a tuple including plotly object and its corresponding legend.
"""
all_traces = []
num_chains = len(x)
num_indices = len(x[0])
for index in range(num_indices):
trace = []
for chain in range(num_chains):
trace.append(
go.Scatter(
x=x[chain][index],
y=y[chain][index],
mode="lines",
name="chain" + str(chain),
)
)
all_traces.append(trace)
return (all_traces, labels)
def plot_helper(
query_samples: Tensor, func: Callable
) -> Tuple[List[go.Scatter], List[str]]:
"""
this function executes a plot-related function, passed as input parameter func, and
outputs a tuple including plotly object and its corresponding legend.
"""
num_chain, num_samples, single_sample_sz = _samples_info(query_samples)
x_axis, y_axis, all_labels = [], [], []
for chain in range(num_chain):
flattened_data = query_samples[chain].reshape(num_samples, -1)
numel = flattened_data[0].numel()
x_axis_data, y_axis_data, labels = [], [], []
for i in range(numel):
index = np.unravel_index(i, single_sample_sz)
data = flattened_data[:, i]
partial_label = f" for {list(index)}"
x_data, y_data = func(data.detach())
x_axis_data.append(x_data)
y_axis_data.append(y_data)
labels.append(partial_label)
x_axis.append(x_axis_data)
y_axis.append(y_axis_data)
all_labels.append(labels)
return trace_helper(x_axis, y_axis, all_labels[0])
def autocorr(x: Tensor) -> Tuple[List[int], List[float]]:
def autocorr_calculation(x: Tensor, lag: int) -> Tensor:
y1 = x[: (len(x) - lag)]
y2 = x[lag:]
sum_product = (
(y1 - (x.mean(dim=0).expand(y1.size())))
* (y2 - (x.mean(dim=0).expand(y2.size())))
).sum(0)
return sum_product / ((len(x) - lag) * torch.var(x, dim=0))
max_lag = x.size(0)
y_axis_data = [autocorr_calculation(x, lag).item() for lag in range(max_lag)]
x_axis_data = list(range(max_lag))
return (x_axis_data, y_axis_data)
def trace_plot(x: Tensor) -> Tuple[List[int], Tensor]:
return (list(range(x.size(0))), x)
| 32.245098 | 87 | 0.620249 | [
"MIT"
] | facebookresearch/beanmachine | src/beanmachine/ppl/diagnostics/common_plots.py | 3,289 | Python |
import typing
from ParadoxTrading.Fetch.Crypto.FetchBase import FetchBase
class FetchDepth(FetchBase):
def __init__(
self, _psql_host='localhost', _psql_dbname='data',
_psql_user='', _psql_password='', _cache_path='cache'
):
super().__init__(
_psql_host=_psql_host, _psql_dbname=_psql_dbname,
_psql_user=_psql_user, _psql_password=_psql_password,
_cache_path=_cache_path
)
self.table_key: str = '{}_rs_{}_depth'
self.columns: typing.List[str] = []
for i in range(10):
self.columns.append('askprice{}'.format(i))
self.columns.append('askamount{}'.format(i))
for i in range(10):
self.columns.append('bidprice{}'.format(i))
self.columns.append('bidamount{}'.format(i))
self.columns.append('datetime')
| 32.407407 | 65 | 0.613714 | [
"MIT"
] | gsamarakoon/ParadoxTrading | ParadoxTrading/Fetch/Crypto/FetchDepth.py | 875 | Python |
# Copyright (C) 2014-2018 DLR
#
# All rights reserved. This program and the accompanying materials are made
# available under the terms of the Eclipse Public License v1.0 which
# accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Annika Wollschlaeger <[email protected]>
# Franz Steinmetz <[email protected]>
# Lukas Becker <[email protected]>
# Matthias Buettner <[email protected]>
# Rico Belder <[email protected]>
# Sebastian Brunner <[email protected]>
"""
.. module:: barrier_concurrency_state
:synopsis: A module to represent a barrier concurrency state for the state machine
"""
from builtins import str
import traceback
from gtkmvc3.observable import Observable
from rafcon.core.custom_exceptions import RecoveryModeException
from rafcon.core.state_elements.logical_port import Outcome
from rafcon.core.decorators import lock_state_machine
from rafcon.core.states.concurrency_state import ConcurrencyState
from rafcon.core.states.state import StateExecutionStatus
from rafcon.core.states.execution_state import ExecutionState
from rafcon.core.states.container_state import ContainerState
from rafcon.core.constants import UNIQUE_DECIDER_STATE_ID
from rafcon.utils import log
logger = log.get_logger(__name__)
class BarrierConcurrencyState(ConcurrencyState):
""" The barrier concurrency holds a list of states that are executed in parallel. It waits until all states
finished their execution before it returns.
Note: In the backward execution case the decider state does not have to be backward executed, as it only
decides the outcome of the barrier concurrency state. In a backward execution the logic flow obviously already
exists.
The order of history items for the concurrency state is:
Call - Concurrency - Return
and for the backward case:
Return - Concurrency - Call
For the children of the concurrency state the history items are:
In the forward case:
- Call: Before calling the child
- Return: After executing the child
In the backward case:
- Pop Return: Before backward executing the child
- Pop Call: After backward executing the child
The decider state is not considered in the backward execution case.
"""
yaml_tag = u'!BarrierConcurrencyState'
def __init__(self, name=None, state_id=None, input_data_ports=None, output_data_ports=None,
income=None, outcomes=None,
states=None, transitions=None, data_flows=None, start_state_id=None, scoped_variables=None,
decider_state=None, load_from_storage=False):
self.__init_running = True
states = {} if states is None else states
if decider_state is not None:
if isinstance(decider_state, DeciderState):
decider_state._state_id = UNIQUE_DECIDER_STATE_ID
states[UNIQUE_DECIDER_STATE_ID] = decider_state
else:
logger.warning("Argument decider_state has to be instance of DeciderState not {}".format(decider_state))
if not load_from_storage and UNIQUE_DECIDER_STATE_ID not in states:
states[UNIQUE_DECIDER_STATE_ID] = DeciderState(name='Decider', state_id=UNIQUE_DECIDER_STATE_ID)
# TODO figure out how to solve those two clinch better of copy/add state and already existing transitions #1 #2
ConcurrencyState.__init__(self, name, state_id, input_data_ports, output_data_ports, income, outcomes,
states, transitions, data_flows, start_state_id, scoped_variables)
for state_id, state in self.states.items():
if state_id != UNIQUE_DECIDER_STATE_ID:
for outcome in self.states[state_id].outcomes.values():
# TODO figure out how to solve this clinch better #3
match = [t.from_state == state_id and t.from_outcome == outcome.outcome_id for t in self.transitions.values()]
if not outcome.outcome_id < 0 and not any(match):
try:
self.add_transition(from_state_id=state_id, from_outcome=outcome.outcome_id,
to_state_id=UNIQUE_DECIDER_STATE_ID, to_outcome=None)
except (ValueError, RecoveryModeException) as e:
if "transition origin already connected to another transition" not in str(e):
logger.error("default decider state transition could not be added: {}".format(e))
raise
self.__init_running = False
def run(self):
""" This defines the sequence of actions that are taken when the barrier concurrency state is executed
:return:
"""
logger.debug("Starting execution of {0}{1}".format(self, " (backwards)" if self.backward_execution else ""))
self.setup_run()
# data to be accessed by the decider state
child_errors = {}
final_outcomes_dict = {}
decider_state = self.states[UNIQUE_DECIDER_STATE_ID]
try:
concurrency_history_item = self.setup_forward_or_backward_execution()
self.start_child_states(concurrency_history_item, decider_state)
# print("bcs1")
#######################################################
# wait for all child threads to finish
#######################################################
for history_index, state in enumerate(self.states.values()):
# skip the decider state
if state is not decider_state:
self.join_state(state, history_index, concurrency_history_item)
self.add_state_execution_output_to_scoped_data(state.output_data, state)
self.update_scoped_variables_with_output_dictionary(state.output_data, state)
# save the errors of the child state executions for the decider state
if 'error' in state.output_data:
child_errors[state.state_id] = (state.name, state.output_data['error'])
final_outcomes_dict[state.state_id] = (state.name, state.final_outcome)
# print("bcs2")
#######################################################
# handle backward execution case
#######################################################
if self.backward_execution:
# print("bcs2.1.")
return self.finalize_backward_execution()
else:
# print("bcs2.2.")
self.backward_execution = False
# print("bcs3")
#######################################################
# execute decider state
#######################################################
decider_state_error = self.run_decider_state(decider_state, child_errors, final_outcomes_dict)
# print("bcs4")
#######################################################
# handle no transition
#######################################################
transition = self.get_transition_for_outcome(decider_state, decider_state.final_outcome)
if transition is None:
# final outcome is set here
transition = self.handle_no_transition(decider_state)
# if the transition is still None, then the child_state was preempted or aborted, in this case return
decider_state.state_execution_status = StateExecutionStatus.INACTIVE
# print("bcs5")
if transition is None:
self.output_data["error"] = RuntimeError("state aborted")
else:
if decider_state_error:
self.output_data["error"] = decider_state_error
self.final_outcome = self.outcomes[transition.to_outcome]
# print("bcs6")
return self.finalize_concurrency_state(self.final_outcome)
except Exception as e:
logger.error("{0} had an internal error: {1}\n{2}".format(self, str(e), str(traceback.format_exc())))
self.output_data["error"] = e
self.state_execution_status = StateExecutionStatus.WAIT_FOR_NEXT_STATE
return self.finalize(Outcome(-1, "aborted"))
def run_decider_state(self, decider_state, child_errors, final_outcomes_dict):
""" Runs the decider state of the barrier concurrency state. The decider state decides on which outcome the
barrier concurrency is left.
:param decider_state: the decider state of the barrier concurrency state
:param child_errors: error of the concurrent branches
:param final_outcomes_dict: dictionary of all outcomes of the concurrent branches
:return:
"""
decider_state.state_execution_status = StateExecutionStatus.ACTIVE
# forward the decider specific data
decider_state.child_errors = child_errors
decider_state.final_outcomes_dict = final_outcomes_dict
# standard state execution
decider_state.input_data = self.get_inputs_for_state(decider_state)
decider_state.output_data = self.create_output_dictionary_for_state(decider_state)
decider_state.start(self.execution_history, backward_execution=False)
decider_state.join()
decider_state_error = None
if decider_state.final_outcome.outcome_id == -1:
if 'error' in decider_state.output_data:
decider_state_error = decider_state.output_data['error']
# standard output data processing
self.add_state_execution_output_to_scoped_data(decider_state.output_data, decider_state)
self.update_scoped_variables_with_output_dictionary(decider_state.output_data, decider_state)
return decider_state_error
def _check_transition_validity(self, check_transition):
""" Transition of BarrierConcurrencyStates must least fulfill the condition of a ContainerState.
Start transitions are forbidden in the ConcurrencyState.
:param check_transition: the transition to check for validity
:return:
"""
valid, message = super(BarrierConcurrencyState, self)._check_transition_validity(check_transition)
if not valid:
return False, message
# Only the following transitions are allowed in barrier concurrency states:
# - Transitions from the decider state to the parent state\n"
# - Transitions from not-decider states to the decider state\n"
# - Transitions from not_decider states from aborted/preempted outcomes to the
# aborted/preempted outcome of the parent
from_state_id = check_transition.from_state
to_state_id = check_transition.to_state
from_outcome_id = check_transition.from_outcome
to_outcome_id = check_transition.to_outcome
if from_state_id == UNIQUE_DECIDER_STATE_ID:
if to_state_id != self.state_id:
return False, "Transition from the decider state must go to the parent state"
else:
if to_state_id != UNIQUE_DECIDER_STATE_ID:
if from_outcome_id not in [-2, -1] or to_outcome_id not in [-2, -1]:
return False, "Transition from this state must go to the decider state. The only exception are " \
"transition from aborted/preempted to the parent aborted/preempted outcomes"
return True, message
@lock_state_machine
def add_state(self, state, storage_load=False):
"""Overwrite the parent class add_state method
Add automatic transition generation for the decider_state.
:param state: The state to be added
:return:
"""
state_id = super(BarrierConcurrencyState, self).add_state(state)
if not storage_load and not self.__init_running and not state.state_id == UNIQUE_DECIDER_STATE_ID:
# the transitions must only be created for the initial add_state call and not during each load procedure
for o_id, o in list(state.outcomes.items()):
if not o_id == -1 and not o_id == -2:
self.add_transition(state.state_id, o_id, self.states[UNIQUE_DECIDER_STATE_ID].state_id, None)
return state_id
@ContainerState.states.setter
@lock_state_machine
@Observable.observed
def states(self, states):
""" Overwrite the setter of the container state base class as special handling for the decider state is needed.
:param states: the dictionary of new states
:raises exceptions.TypeError: if the states parameter is not of type dict
"""
# First safely remove all existing states (recursively!), as they will be replaced
state_ids = list(self.states.keys())
for state_id in state_ids:
# Do not remove decider state, if teh new list of states doesn't contain an alternative one
if state_id == UNIQUE_DECIDER_STATE_ID and UNIQUE_DECIDER_STATE_ID not in states:
continue
self.remove_state(state_id)
if states is not None:
if not isinstance(states, dict):
raise TypeError("states must be of type dict")
# Ensure that the decider state is added first, as transition to this states will automatically be
# created when adding further states
decider_state = states.pop(UNIQUE_DECIDER_STATE_ID, None)
if decider_state is not None:
self.add_state(decider_state)
for state in states.values():
self.add_state(state)
def remove_state(self, state_id, recursive=True, force=False, destroy=True):
""" Overwrite the parent class remove state method by checking if the user tries to delete the decider state
:param state_id: the id of the state to remove
:param recursive: a flag to indicate a recursive disassembling of all substates
:param force: a flag to indicate forcefully deletion of all states (important of the decider state in the
barrier concurrency state)
:param destroy: a flag which indicates if the state should not only be disconnected from the state but also
destroyed, including all its state elements
:raises exceptions.AttributeError: if the state_id parameter is the decider state
"""
if state_id == UNIQUE_DECIDER_STATE_ID and force is False:
raise AttributeError("You are not allowed to delete the decider state.")
else:
return ContainerState.remove_state(self, state_id, recursive=recursive, force=force, destroy=destroy)
@classmethod
def from_dict(cls, dictionary):
states = None if 'states' not in dictionary else dictionary['states']
transitions = dictionary['transitions']
data_flows = dictionary['data_flows']
state = cls(name=dictionary['name'],
state_id=dictionary['state_id'],
input_data_ports=dictionary['input_data_ports'],
output_data_ports=dictionary['output_data_ports'],
outcomes=dictionary['outcomes'],
states=None,
transitions=transitions if states else None,
data_flows=data_flows if states else None,
scoped_variables=dictionary['scoped_variables'],
load_from_storage=True)
try:
state.description = dictionary['description']
except (TypeError, KeyError): # (Very) old state machines do not have a description field
import traceback
formatted_lines = traceback.format_exc().splitlines()
logger.warning("Erroneous description for state '{1}': {0}".format(formatted_lines[-1], dictionary['name']))
if states:
return state
else:
return state, dictionary['transitions'], dictionary['data_flows']
class DeciderState(ExecutionState):
"""A class to represent a state for deciding the exit of barrier concurrency state.
This type of ExecutionState has initial always the UNIQUE_DECIDER_STATE_ID.
"""
yaml_tag = u'!DeciderState'
def __init__(self, name=None, state_id=None, input_data_ports=None, output_data_ports=None,
income=None, outcomes=None,
path=None, filename=None, check_path=True):
if state_id is None:
state_id = UNIQUE_DECIDER_STATE_ID
ExecutionState.__init__(self, name, state_id, input_data_ports, output_data_ports, income, outcomes, path,
filename, check_path)
self.child_errors = {}
self.final_outcomes_dict = {}
def get_outcome_for_state_name(self, name):
""" Returns the final outcome of the child state specified by name.
Note: This is utility function that is used by the programmer to make a decision based on the final outcome
of its child states. A state is not uniquely specified by the name, but as the programmer normally does not want
to use state-ids in his code this utility function was defined.
:param name: The name of the state to get the final outcome for.
:return:
"""
return_value = None
for state_id, name_outcome_tuple in self.final_outcomes_dict.items():
if name_outcome_tuple[0] == name:
return_value = name_outcome_tuple[1]
break
return return_value
def get_outcome_for_state_id(self, state_id):
""" Returns the final outcome of the child state specified by the state_id.
:param state_id: The id of the state to get the final outcome for.
:return:
"""
return_value = None
for s_id, name_outcome_tuple in self.final_outcomes_dict.items():
if s_id == state_id:
return_value = name_outcome_tuple[1]
break
return return_value
def get_errors_for_state_name(self, name):
""" Returns the error message of the child state specified by name.
Note: This is utility function that is used by the programmer to make a decision based on the final outcome
of its child states. A state is not uniquely specified by the name, but as the programmer normally does not want
to use state-ids in his code this utility function was defined.
:param name: The name of the state to get the error message for
:return:
"""
return_value = None
for state_id, name_outcome_tuple in self.child_errors.items():
if name_outcome_tuple[0] == name:
return_value = name_outcome_tuple[1]
break
return return_value
| 48.077889 | 130 | 0.646668 | [
"EPL-1.0"
] | LJMP/RAFCON | source/rafcon/core/states/barrier_concurrency_state.py | 19,135 | Python |
from __future__ import absolute_import, print_function, unicode_literals
import pickle
from builtins import dict, str
import os
import re
import boto3
import logging
import botocore.session
from time import sleep
import matplotlib as mpl
from numpy import median, arange, array
from indra.tools.reading.util.reporter import Reporter
from indra.util.get_version import get_git_info
mpl.use('Agg')
from matplotlib import pyplot as plt
from datetime import datetime, timedelta
from indra.literature import elsevier_client as ec
from indra.literature.elsevier_client import _ensure_api_keys
from indra.util.aws import get_job_log, tag_instance, get_batch_command
from indra.util.nested_dict import NestedDict
bucket_name = 'bigmech'
logger = logging.getLogger('aws_reading')
class BatchReadingError(Exception):
pass
def wait_for_complete(queue_name, job_list=None, job_name_prefix=None,
poll_interval=10, idle_log_timeout=None,
kill_on_log_timeout=False, stash_log_method=None,
tag_instances=False, result_record=None):
"""Return when all jobs in the given list finished.
If not job list is given, return when all jobs in queue finished.
Parameters
----------
queue_name : str
The name of the queue to wait for completion.
job_list : Optional[list(dict)]
A list of jobID-s in a dict, as returned by the submit function.
Example: [{'jobId': 'e6b00f24-a466-4a72-b735-d205e29117b4'}, ...]
If not given, this function will return if all jobs completed.
job_name_prefix : Optional[str]
A prefix for the name of the jobs to wait for. This is useful if the
explicit job list is not available but filtering is needed.
poll_interval : Optional[int]
The time delay between API calls to check the job statuses.
idle_log_timeout : Optional[int] or None
If not None, then track the logs of the active jobs, and if new output
is not produced after `idle_log_timeout` seconds, a warning is printed.
If `kill_on_log_timeout` is set to True, the job will also be
terminated.
kill_on_log_timeout : Optional[bool]
If True, and if `idle_log_timeout` is set, jobs will be terminated
after timeout. This has no effect if `idle_log_timeout` is None.
Default is False.
stash_log_method : Optional[str]
Select a method to store the job logs, either 's3' or 'local'. If no
method is specified, the logs will not be loaded off of AWS. If 's3' is
specified, then `job_name_prefix` must also be given, as this will
indicate where on s3 to store the logs.
tag_instances : bool
Default is False. If True, apply tags to the instances. This is toady
typically done by each job, so in most cases this should not be needed.
result_record : dict
A dict which will be modified in place to record the results of the job.
"""
if stash_log_method == 's3' and job_name_prefix is None:
raise Exception('A job_name_prefix is required to post logs on s3.')
start_time = datetime.now()
if job_list is None:
job_id_list = []
else:
job_id_list = [job['jobId'] for job in job_list]
def get_jobs_by_status(status, job_id_filter=None, job_name_prefix=None):
res = batch_client.list_jobs(jobQueue=queue_name,
jobStatus=status, maxResults=10000)
jobs = res['jobSummaryList']
if job_name_prefix:
jobs = [job for job in jobs if
job['jobName'].startswith(job_name_prefix)]
if job_id_filter:
jobs = [job_def for job_def in jobs
if job_def['jobId'] in job_id_filter]
return jobs
job_log_dict = {}
def check_logs(job_defs):
"""Updates teh job_log_dict."""
stalled_jobs = set()
# Check the status of all the jobs we're tracking.
for job_def in job_defs:
try:
# Get the logs for this job.
log_lines = get_job_log(job_def, write_file=False)
# Get the job id.
jid = job_def['jobId']
now = datetime.now()
if jid not in job_log_dict.keys():
# If the job is new...
logger.info("Adding job %s to the log tracker at %s."
% (jid, now))
job_log_dict[jid] = {'log': log_lines,
'last change time': now}
elif len(job_log_dict[jid]['log']) == len(log_lines):
# If the job log hasn't changed, announce as such, and check
# to see if it has been the same for longer than stall time.
check_dt = now - job_log_dict[jid]['last change time']
logger.warning(('Job \'%s\' has not produced output for '
'%d seconds.')
% (job_def['jobName'], check_dt.seconds))
if check_dt.seconds > idle_log_timeout:
logger.warning("Job \'%s\' has stalled."
% job_def['jobName'])
stalled_jobs.add(jid)
else:
# If the job is known, and the logs have changed, update the
# "last change time".
old_log = job_log_dict[jid]['log']
old_log += log_lines[len(old_log):]
job_log_dict[jid]['last change time'] = now
except Exception as e:
# Sometimes due to sync et al. issues, a part of this will fail.
# Such things are usually transitory issues so we keep trying.
logger.error("Failed to check log for: %s" % str(job_def))
logger.exception(e)
# Pass up the set of job id's for stalled jobs.
return stalled_jobs
# Don't start watching jobs added after this command was initialized.
observed_job_def_dict = {}
def get_dict_of_job_tuples(job_defs):
return {jdef['jobId']: [(k, jdef[k]) for k in ['jobName', 'jobId']]
for jdef in job_defs}
batch_client = boto3.client('batch')
if tag_instances:
ecs_cluster_name = get_ecs_cluster_for_queue(queue_name, batch_client)
terminate_msg = 'Job log has stalled for at least %f minutes.'
terminated_jobs = set()
stashed_id_set = set()
while True:
pre_run = []
for status in ('SUBMITTED', 'PENDING', 'RUNNABLE', 'STARTING'):
pre_run += get_jobs_by_status(status, job_id_list, job_name_prefix)
running = get_jobs_by_status('RUNNING', job_id_list, job_name_prefix)
failed = get_jobs_by_status('FAILED', job_id_list, job_name_prefix)
done = get_jobs_by_status('SUCCEEDED', job_id_list, job_name_prefix)
observed_job_def_dict.update(get_dict_of_job_tuples(pre_run + running))
logger.info('(%d s)=(pre: %d, running: %d, failed: %d, done: %d)' %
((datetime.now() - start_time).seconds, len(pre_run),
len(running), len(failed), len(done)))
# Check the logs for new output, and possibly terminate some jobs.
stalled_jobs = check_logs(running)
if idle_log_timeout is not None:
if kill_on_log_timeout:
# Keep track of terminated jobs so we don't send a terminate
# message twice.
for jid in stalled_jobs - terminated_jobs:
batch_client.terminate_job(
jobId=jid,
reason=terminate_msg % (idle_log_timeout/60.0)
)
logger.info('Terminating %s.' % jid)
terminated_jobs.add(jid)
if job_id_list:
if (len(failed) + len(done)) == len(job_id_list):
ret = 0
break
else:
if (len(failed) + len(done) > 0) and \
(len(pre_run) + len(running) == 0):
ret = 0
break
if tag_instances:
tag_instances_on_cluster(ecs_cluster_name)
# Stash the logs of things that have finished so far. Note that jobs
# terminated in this round will not be picked up until the next round.
if stash_log_method:
stash_logs(observed_job_def_dict, done, failed, queue_name,
stash_log_method, job_name_prefix,
start_time.strftime('%Y%m%d_%H%M%S'),
ids_stashed=stashed_id_set)
sleep(poll_interval)
# Pick up any stragglers
if stash_log_method:
stash_logs(observed_job_def_dict, done, failed, queue_name,
stash_log_method, job_name_prefix,
start_time.strftime('%Y%m%d_%H%M%S'),
ids_stashed=stashed_id_set)
result_record['terminated'] = terminated_jobs
result_record['failed'] = failed
result_record['succeeded'] = done
return ret
def _get_job_ids_to_stash(job_def_list, stashed_id_set):
return [job_def['jobId'] for job_def in job_def_list
if job_def['jobId'] not in stashed_id_set]
def stash_logs(job_defs, success_jobs, failure_jobs, queue_name, method='local',
job_name_prefix=None, tag='stash', ids_stashed=None):
if ids_stashed is None:
ids_stashed = set()
success_ids = _get_job_ids_to_stash(success_jobs, ids_stashed)
failure_ids = _get_job_ids_to_stash(failure_jobs, ids_stashed)
if method == 's3':
s3_client = boto3.client('s3')
def stash_log(log_str, name_base):
name = '%s_%s.log' % (name_base, tag)
s3_client.put_object(
Bucket=bucket_name,
Key='reading_results/%s/logs/%s/%s' % (
job_name_prefix,
queue_name,
name),
Body=log_str
)
elif method == 'local':
if job_name_prefix is None:
job_name_prefix = 'batch_%s' % tag
dirname = '%s_job_logs' % job_name_prefix
os.mkdir(dirname)
def stash_log(log_str, name_base):
with open(os.path.join(dirname, name_base + '.log'), 'w') as f:
f.write(log_str)
else:
raise ValueError('Invalid method: %s' % method)
for jobId, job_def_tpl in job_defs.items():
if jobId not in success_ids and jobId not in failure_ids:
continue # Logs aren't done and ready to be loaded.
try:
job_def = dict(job_def_tpl)
lines = get_job_log(job_def, write_file=False)
if lines is None:
logger.warning("No logs found for %s." % job_def['jobName'])
continue
log_str = ''.join(lines)
base_name = job_def['jobName']
if job_def['jobId'] in success_ids:
base_name += '/SUCCESS'
elif job_def['jobId'] in failure_ids:
base_name += '/FAILED'
else:
logger.error("Job cannot be logged unless completed.")
continue
logger.info('Stashing ' + base_name)
stash_log(log_str, base_name)
except Exception as e:
logger.error("Failed to save logs for: %s" % str(job_def_tpl))
logger.exception(e)
ids_stashed |= {jid for jids in [success_ids, failure_ids] for jid in jids}
return
def get_ecs_cluster_for_queue(queue_name, batch_client=None):
"""Get the name of the ecs cluster using the batch client."""
if batch_client is None:
batch_client = boto3.client('batch')
queue_resp = batch_client.describe_job_queues(jobQueues=[queue_name])
if len(queue_resp['jobQueues']) == 1:
queue = queue_resp['jobQueues'][0]
else:
raise BatchReadingError('Error finding queue with name %s.'
% queue_name)
compute_env_names = queue['computeEnvironmentOrder']
if len(compute_env_names) == 1:
compute_env_name = compute_env_names[0]['computeEnvironment']
else:
raise BatchReadingError('Error finding the compute environment name '
'for %s.' % queue_name)
compute_envs = batch_client.describe_compute_environments(
computeEnvironments=[compute_env_name]
)['computeEnvironments']
if len(compute_envs) == 1:
compute_env = compute_envs[0]
else:
raise BatchReadingError("Error getting compute environment %s for %s. "
"Got %d enviornments instead of 1."
% (compute_env_name, queue_name,
len(compute_envs)))
ecs_cluster_name = os.path.basename(compute_env['ecsClusterArn'])
return ecs_cluster_name
def tag_instances_on_cluster(cluster_name, project='cwc'):
"""Adds project tag to untagged instances in a given cluster.
Parameters
----------
cluster_name : str
The name of the AWS ECS cluster in which running instances
should be tagged.
project : str
The name of the project to tag instances with.
"""
# Get the relevent instance ids from the ecs cluster
ecs = boto3.client('ecs')
task_arns = ecs.list_tasks(cluster=cluster_name)['taskArns']
if not task_arns:
return
tasks = ecs.describe_tasks(cluster=cluster_name, tasks=task_arns)['tasks']
container_instances = ecs.describe_container_instances(
cluster=cluster_name,
containerInstances=[task['containerInstanceArn'] for task in tasks]
)['containerInstances']
ec2_instance_ids = [ci['ec2InstanceId'] for ci in container_instances]
# Instantiate each instance to tag as a resource and create project tag
for instance_id in ec2_instance_ids:
tag_instance(instance_id, project=project)
return
@_ensure_api_keys('remote batch reading', [])
def get_elsevier_api_keys():
return [
{'name': ec.API_KEY_ENV_NAME,
'value': ec.ELSEVIER_KEYS.get('X-ELS-APIKey', '')},
{'name': ec.INST_KEY_ENV_NAME,
'value': ec.ELSEVIER_KEYS.get('X-ELS-Insttoken', '')},
]
def get_environment():
# Get AWS credentials
# http://stackoverflow.com/questions/36287720/boto3-get-credentials-dynamically
session = botocore.session.get_session()
access_key = session.get_credentials().access_key
secret_key = session.get_credentials().secret_key
# Get the Elsevier keys from the Elsevier client
environment_vars = [
{'name': 'AWS_ACCESS_KEY_ID',
'value': access_key},
{'name': 'AWS_SECRET_ACCESS_KEY',
'value': secret_key}
]
environment_vars += get_elsevier_api_keys()
# Only include values that are not empty.
return [var_dict for var_dict in environment_vars
if var_dict['value'] and var_dict['name']]
class Submitter(object):
_s3_input_name = NotImplemented
_purpose = NotImplemented
_job_queue = NotImplemented
_job_def = NotImplemented
def __init__(self, basename, readers, project_name=None, **options):
self.basename = basename
if 'all' in readers:
self.readers = ['reach', 'sparser']
else:
self.readers = readers
self.project_name = project_name
self.job_list = None
self.options=options
self.ids_per_job = None
return
def set_options(self, **kwargs):
"""Set the options of reading job."""
# This should be more specifically implemented in a child class.
self.options = kwargs
return
def _make_command(self, start_ix, end_ix):
job_name = '%s_%d_%d' % (self.basename, start_ix, end_ix)
cmd = self._get_base(job_name, start_ix, end_ix) + ['-r'] + self.readers
cmd += self._get_extensions()
for arg in cmd:
if not isinstance(arg, str):
logger.warning("Argument of command is not a string: %s"
% repr(arg))
return job_name, cmd
def _get_base(self, job_name, start_ix, end_ix):
raise NotImplementedError
def _get_extensions(self):
return []
def submit_reading(self, input_fname, start_ix, end_ix, ids_per_job,
num_tries=2):
# stash this for later.
self.ids_per_job = ids_per_job
# Upload the pmid_list to Amazon S3
id_list_key = 'reading_results/%s/%s' % (self.basename,
self._s3_input_name)
s3_client = boto3.client('s3')
s3_client.upload_file(input_fname, bucket_name, id_list_key)
# If no end index is specified, read all the PMIDs
if end_ix is None:
with open(input_fname, 'rt') as f:
lines = f.readlines()
end_ix = len(lines)
if start_ix is None:
start_ix = 0
# Get environment variables
environment_vars = get_environment()
# Iterate over the list of PMIDs and submit the job in chunks
batch_client = boto3.client('batch', region_name='us-east-1')
job_list = []
for job_start_ix in range(start_ix, end_ix, ids_per_job):
job_end_ix = job_start_ix + ids_per_job
if job_end_ix > end_ix:
job_end_ix = end_ix
job_name, cmd = self._make_command(job_start_ix, job_end_ix)
command_list = get_batch_command(cmd, purpose=self._purpose,
project=self.project_name)
logger.info('Command list: %s' % str(command_list))
job_info = batch_client.submit_job(
jobName=job_name,
jobQueue=self._job_queue,
jobDefinition=self._job_def,
containerOverrides={
'environment': environment_vars,
'command': command_list},
retryStrategy={'attempts': num_tries}
)
logger.info("submitted...")
job_list.append({'jobId': job_info['jobId']})
self.job_list = job_list
return job_list
def watch_and_wait(self, poll_interval=10, idle_log_timeout=None,
kill_on_timeout=False, stash_log_method=None,
tag_instances=False, **kwargs):
"""This provides shortcut access to the wait_for_complete_function."""
return wait_for_complete(self._job_queue, job_list=self.job_list,
job_name_prefix=self.basename,
poll_interval=poll_interval,
idle_log_timeout=idle_log_timeout,
kill_on_log_timeout=kill_on_timeout,
stash_log_method=stash_log_method,
tag_instances=tag_instances, **kwargs)
class PmidSubmitter(Submitter):
_s3_input_name = 'pmids'
_purpose = 'pmid_reading'
_job_queue = 'run_reach_queue'
_job_def = 'run_reach_jobdef'
def _get_base(self, job_name, start_ix, end_ix):
base = ['python', '-m', 'indra.tools.reading.pmid_reading.read_pmids_aws',
self.basename, '/tmp', '16', str(start_ix), str(end_ix)]
return base
def _get_extensions(self):
extensions = []
for opt_key in ['force_read', 'force_fulltext']:
if self.options.get(opt_key, False):
extensions.append('--' + opt_key)
return extensions
def set_options(self, force_read=False, force_fulltext=False):
"""Set the options for this run."""
self.options['force_read'] = force_read
self.options['force_fulltext'] = force_fulltext
return
def submit_combine(self):
job_ids = self.job_list
if job_ids is not None and len(job_ids) > 20:
print("WARNING: boto3 cannot support waiting for more than 20 jobs.")
print("Please wait for the reading to finish, then run again with the")
print("`combine` option.")
return
# Get environment variables
environment_vars = get_environment()
job_name = '%s_combine_reading_results' % self.basename
command_list = get_batch_command(
['python', '-m', 'indra.tools.reading.assemble_reading_stmts_aws',
self.basename, '-r'] + self.readers,
purpose='pmid_reading',
project=self.project_name
)
logger.info('Command list: %s' % str(command_list))
kwargs = {'jobName': job_name, 'jobQueue': self._job_queue,
'jobDefinition': self._job_def,
'containerOverrides': {'environment': environment_vars,
'command': command_list,
'memory': 60000, 'vcpus': 1}}
if job_ids:
kwargs['dependsOn'] = job_ids
batch_client = boto3.client('batch')
batch_client.submit_job(**kwargs)
logger.info("submitted...")
return
def submit_reading(basename, pmid_list_filename, readers, start_ix=None,
end_ix=None, pmids_per_job=3000, num_tries=2,
force_read=False, force_fulltext=False, project_name=None):
"""Submit an old-style pmid-centered no-database s3 only reading job.
This function is provided for the sake of backward compatibility. It is
preferred that you use the object-oriented PmidSubmitter and the
submit_reading job going forward.
"""
sub = PmidSubmitter(basename, readers, project_name)
sub.set_options(force_read, force_fulltext)
sub.submit_reading(pmid_list_filename, start_ix, end_ix, pmids_per_job,
num_tries)
return sub.job_list
def submit_combine(basename, readers, job_ids=None, project_name=None):
"""Submit a batch job to combine the outputs of a reading job.
This function is provided for backwards compatibility. You should use the
PmidSubmitter and submit_combine methods.
"""
sub = PmidSubmitter(basename, readers, project_name)
sub.job_list = job_ids
sub.submit_combine()
return sub
class DbReadingSubmitter(Submitter):
_s3_input_name = 'id_list'
_purpose = 'db_reading'
_job_queue = 'run_db_reading_queue'
_job_def = 'run_db_reading_jobdef'
def __init__(self, *args, **kwargs):
super(DbReadingSubmitter, self).__init__(*args, **kwargs)
self.time_tag = datetime.now().strftime('%Y%m%d_%H%M')
self.reporter = Reporter(self.basename + '_summary_%s' % self.time_tag)
self.reporter.sections = {'Plots': [], 'Totals': [], 'Git': []}
self.reporter.set_section_order(['Git', 'Totals', 'Plots'])
self.run_record = {}
return
def _get_base(self, job_name, start_ix, end_ix):
read_mode = 'all' if self.options.get('force_read', False) else 'unread'
stmt_mode = 'none' if self.options.get('no_stmts', False) else 'all'
job_name = '%s_%d_%d' % (self.basename, start_ix, end_ix)
base = ['python', '-m', 'indra.tools.reading.db_reading.read_db_aws',
self.basename]
base += [job_name]
base += ['/tmp', read_mode, stmt_mode, '32', str(start_ix), str(end_ix)]
return base
def _get_extensions(self):
extensions = []
if self.options.get('force_fulltext', False):
extensions.append('--force_fulltext')
if self.options.get('prioritize', False):
extensions.append('--read_best_fulltext')
max_reach_input_len = self.options.get('max_reach_input_len')
max_reach_space_ratio = self.options.get('max_reach_space_ratio')
if max_reach_input_len is not None:
extensions += ['--max_reach_input_len', max_reach_input_len]
if max_reach_space_ratio is not None:
extensions += ['--max_reach_space_ratio', max_reach_space_ratio]
return extensions
def set_options(self, force_read=False, no_stmts=False,
force_fulltext=False, prioritize=False,
max_reach_input_len=None, max_reach_space_ratio=None):
self.options['force_fulltext'] = force_fulltext
self.options['prioritize'] = prioritize
self.options['max_reach_input_len'] = max_reach_input_len
self.options['max_reach_space_ratio'] = max_reach_space_ratio
return
def watch_and_wait(self, *args, **kwargs):
kwargs['result_record'] = self.run_record
super(DbReadingSubmitter, self).watch_and_wait(*args, **kwargs)
self.produce_report()
@staticmethod
def _parse_time(time_str):
"""Create a timedelta or datetime object from default string reprs."""
try:
# This is kinda terrible, but it is the easiest way to distinguish
# them.
if '-' in time_str:
time_fmt = '%Y-%m-%d %H:%M:%S'
if '.' in time_str:
pre_dec, post_dec = time_str.split('.')
dt = datetime.strptime(pre_dec, time_fmt)
dt.replace(microsecond=int(post_dec))
else:
dt = datetime.strftime(time_str, time_fmt)
return dt
else:
if 'day' in time_str:
m = re.match(('(?P<days>[-\d]+) day[s]*, '
'(?P<hours>\d+):(?P<minutes>\d+):'
'(?P<seconds>\d[\.\d+]*)'),
time_str)
else:
m = re.match(('(?P<hours>\d+):(?P<minutes>\d+):'
'(?P<seconds>\d[\.\d+]*)'),
time_str)
return timedelta(**{key: float(val)
for key, val in m.groupdict().items()})
except Exception as e:
logger.error('Failed to parse \"%s\".' % time_str)
raise e
def _get_results_file_tree(self, s3, s3_prefix):
relevant_files = s3.list_objects(Bucket=bucket_name, Prefix=s3_prefix)
file_tree = NestedDict()
file_keys = [entry['Key'] for entry in relevant_files['Contents']]
pref_path = s3_prefix.split('/')[:-1] # avoid the trailing empty str.
for key in file_keys:
full_path = key.split('/')
relevant_path = full_path[len(pref_path):]
curr = file_tree
for step in relevant_path:
curr = curr[step]
curr['key'] = key
return file_tree
def _get_txt_file_dict(self, file_bytes):
line_list = file_bytes.decode('utf-8').splitlines()
sc = ': '
file_info = {}
for line in line_list:
segments = line.split(sc)
file_info[segments[0].strip()] = sc.join(segments[1:]).strip()
return file_info
def _handle_git_info(self, ref, git_info, file_bytes):
this_info = self._get_txt_file_dict(file_bytes)
if git_info and this_info != git_info:
logger.warning("Disagreement in git info in %s: "
"%s vs. %s."
% (ref, git_info, this_info))
elif not git_info:
git_info.update(this_info)
return
def _report_git_info(self, batch_git_info):
self.reporter.add_text('Batch Git Info', section='Git', style='h1')
for key, val in batch_git_info.items():
label = key.replace('_', ' ').capitalize()
self.reporter.add_text('%s: %s' % (label, val), section='Git')
self.reporter.add_text('Launching System\'s Git Info', section='Git',
style='h1')
git_info_dict = get_git_info()
for key, val in git_info_dict.items():
label = key.replace('_', ' ').capitalize()
self.reporter.add_text('%s: %s' % (label, val), section='Git')
return
def _handle_timing(self, ref, timing_info, file_bytes):
this_info = self._get_txt_file_dict(file_bytes)
for stage, data in this_info.items():
if stage not in timing_info.keys():
logger.info("Adding timing stage: %s" % stage)
timing_info[stage] = {}
stage_info = timing_info[stage]
timing_pairs = re.findall(r'(\w+):\s+([ 0-9:.\-]+)', data)
if len(timing_pairs) is not 3:
logger.warning("Not all timings present for %s "
"in %s." % (stage, ref))
for label, time_str in timing_pairs:
if label not in stage_info.keys():
stage_info[label] = {}
# e.g. timing_info['reading']['start']['job_name'] = <datetime>
stage_info[label][ref] = self._parse_time(time_str)
return
def _report_timing(self, timing_info):
# Pivot the timing info.
idx_patt = re.compile('%s_(\d+)_(\d+)' % self.basename)
job_segs = NestedDict()
plot_set = set()
for stage, stage_d in timing_info.items():
# e.g. reading, statement production...
for metric, metric_d in stage_d.items():
# e.g. start, end, ...
for job_name, t in metric_d.items():
# e.g. job_basename_startIx_endIx
job_segs[job_name][stage][metric] = t
m = idx_patt.match(job_name)
if m is None:
logger.error("Unexpectedly formatted name: %s."
% job_name)
continue
key = tuple([int(n) for n in m.groups()] + [job_name])
plot_set.add(key)
plot_list = list(plot_set)
plot_list.sort()
# Use this for getting the minimum and maximum.
all_times = [dt for job in job_segs.values() for stage in job.values()
for metric, dt in stage.items() if metric != 'duration']
all_start = min(all_times)
all_end = max(all_times)
def get_time_tuple(stage_data):
start_seconds = (stage_data['start'] - all_start).total_seconds()
return start_seconds, stage_data['duration'].total_seconds()
# Make the broken barh plots.
w = 6.5
h = 9
fig = plt.figure(figsize=(w, h))
gs = plt.GridSpec(2, 1, height_ratios=[10, 1])
ax0 = plt.subplot(gs[0])
ytick_pairs = []
stages = ['reading', 'statement production', 'stats']
t = arange((all_end - all_start).total_seconds())
counts = dict.fromkeys(['jobs'] + stages)
for k in counts.keys():
counts[k] = array([0 for _ in t])
for i, job_tpl in enumerate(plot_list):
s_ix, e_ix, job_name = job_tpl
job_d = job_segs[job_name]
xs = [get_time_tuple(job_d[stg]) for stg in stages]
ys = (s_ix, (e_ix - s_ix)*0.9)
ytick_pairs.append(((s_ix + e_ix)/2, '%s_%s' % (s_ix, e_ix)))
logger.debug("Making plot for: %s" % str((job_name, xs, ys)))
ax0.broken_barh(xs, ys, facecolors=('red', 'green', 'blue'))
for n, stg in enumerate(stages):
cs = counts[stg]
start = xs[n][0]
dur = xs[n][1]
cs[(t>start) & (t<(start + dur))] += 1
cs = counts['jobs']
cs[(t>xs[0][0]) & (t<(xs[-1][0] + xs[-1][1]))] += 1
# Format the plot
ax0.tick_params(top='off', left='off', right='off', bottom='off',
labelleft='on', labelbottom='off')
for spine in ax0.spines.values():
spine.set_visible(False)
total_time = (all_end - all_start).total_seconds()
ax0.set_xlim(0, total_time)
ax0.set_ylabel(self.basename + '_ ...')
print(ytick_pairs)
yticks, ylabels = zip(*ytick_pairs)
print(yticks)
if not self.ids_per_job:
print([yticks[i+1] - yticks[i]
for i in range(len(yticks) - 1)])
# Infer if we don't have it.
spacing = median([yticks[i+1] - yticks[i]
for i in range(len(yticks) - 1)])
spacing = max(1, spacing)
else:
spacing = self.ids_per_job
print(spacing)
print(yticks[0], yticks[-1])
ytick_range = list(arange(yticks[0], yticks[-1] + spacing, spacing))
ylabel_filled = []
for ytick in ytick_range:
if ytick in yticks:
ylabel_filled.append(ylabels[yticks.index(ytick)])
else:
ylabel_filled.append('FAILED')
ax0.set_ylim(0, max(ytick_range) + spacing)
ax0.set_yticks(ytick_range)
ax0.set_yticklabels(ylabel_filled)
# Plot the lower axis.
legend_list = []
color_map = {'jobs': 'k', 'reading': 'r', 'statement production': 'g',
'stats': 'b'}
ax1 = plt.subplot(gs[1], sharex=ax0)
for k, cs in counts.items():
legend_list.append(k)
ax1.plot(t, cs, color=color_map[k])
for lbl, spine in ax1.spines.items():
spine.set_visible(False)
max_n = max(counts['jobs'])
ax1.set_ylim(0, max_n + 1)
ax1.set_xlim(0, total_time)
yticks = list(range(0, max_n-max_n//5, max(1, max_n//5)))
ax1.set_yticks(yticks + [max_n])
ax1.set_yticklabels([str(n) for n in yticks] + ['max=%d' % max_n])
ax1.set_ylabel('N_jobs')
ax1.set_xlabel('Time since beginning [seconds]')
# Make the figue borders more sensible.
fig.tight_layout()
img_path = 'time_figure.png'
fig.savefig(img_path)
self.reporter.add_image(img_path, width=w, height=h, section='Plots')
return
def _handle_sum_data(self, job_ref, summary_info, file_bytes):
one_sum_data_dict = pickle.loads(file_bytes)
for k, v in one_sum_data_dict.items():
if k not in summary_info.keys():
summary_info[k] = {}
summary_info[k][job_ref] = v
return
def _report_sum_data(self, summary_info):
# Two kind of things to handle:
for k, job_dict in summary_info.items():
if isinstance(list(job_dict.values())[0], dict):
continue
# Overall totals
self.reporter.add_text('total %s: %d' % (k, sum(job_dict.values())),
section='Totals')
# Hists of totals.
if len(job_dict) <= 1:
continue
w = 6.5
h = 4
fig = plt.figure(figsize=(w, h))
plt.hist(list(job_dict.values()), align='left')
plt.xlabel(k)
plt.ylabel('Number of Jobs')
fig.tight_layout()
fname = k + '_hist.png'
fig.savefig(fname)
self.reporter.add_image(fname, width=w, height=h, section='Plots')
return
def _handle_hist_data(self, job_ref, hist_dict, file_bytes):
a_hist_data_dict = pickle.loads(file_bytes)
for k, v in a_hist_data_dict.items():
if k not in hist_dict.keys():
hist_dict[k] = {}
hist_dict[k][job_ref] = v
return
def _report_hist_data(self, hist_dict):
for k, data_dict in hist_dict.items():
w = 6.5
if k == ('stmts', 'readers'):
h = 6
fig = plt.figure(figsize=(w, h))
data = {}
for job_datum in data_dict.values():
for rdr, num in job_datum['data'].items():
if rdr not in data.keys():
data[rdr] = [num]
else:
data[rdr].append(num)
N = len(data)
key_list = list(data.keys())
xtick_locs = arange(N)
n = (N+1)*100 + 11
ax0 = plt.subplot(n)
ax0.bar(xtick_locs, [sum(data[k]) for k in key_list],
align='center')
ax0.set_xticks(xtick_locs, key_list)
ax0.set_xlabel('readers')
ax0.set_ylabel('stmts')
ax0.set_title('Reader production')
rdr_ax_list = []
for rdr, stmt_counts in data.items():
n += 1
if not rdr_ax_list:
ax = plt.subplot(n)
else:
ax = plt.subplot(n, sharex=rdr_ax_list[0])
ax.set_title(rdr)
ax.hist(stmt_counts, align='left')
ax.set_ylabel('jobs')
rdr_ax_list.append(ax)
if rdr_ax_list:
ax.set_xlabel('stmts')
else: # TODO: Handle other summary plots.
continue
figname = '_'.join(k) + '.png'
fig.savefig(figname)
self.reporter.add_image(figname, width=w, height=h, section='Plots')
return
def produce_report(self):
"""Produce a report of the batch jobs."""
s3_prefix = 'reading_results/%s/logs/%s/' % (self.basename,
self._job_queue)
logger.info("Producing batch report for %s, from prefix %s."
% (self.basename, s3_prefix))
s3 = boto3.client('s3')
file_tree = self._get_results_file_tree(s3, s3_prefix)
logger.info("Found %d relevant files." % len(file_tree))
stat_files = {
'git_info.txt': (self._handle_git_info, self._report_git_info),
'timing.txt': (self._handle_timing, self._report_timing),
'raw_tuples.pkl': (None, None),
'hist_data.pkl': (self._handle_hist_data, self._report_hist_data),
'sum_data.pkl': (self._handle_sum_data, self._report_sum_data)
}
stat_aggs = {}
for stat_file, (handle_stats, report_stats) in stat_files.items():
logger.info("Aggregating %s..." % stat_file)
# Prep the data storage.
my_agg = {}
# Get a list of the relevant files (one per job).
file_paths = file_tree.get_paths(stat_file)
logger.info("Found %d files for %s." % (len(file_paths), stat_file))
# Aggregate the data from all the jobs for each file type.
for sub_path, file_entry in file_paths:
s3_key = file_entry['key']
ref = sub_path[0]
file = s3.get_object(Bucket=bucket_name, Key=s3_key)
file_bytes = file['Body'].read()
if handle_stats is not None:
handle_stats(ref, my_agg, file_bytes)
if report_stats is not None and len(my_agg):
report_stats(my_agg)
stat_aggs[stat_file] = my_agg
for end_type, jobs in self.run_record.items():
self.reporter.add_text('Jobs %s: %d' % (end_type, len(jobs)),
section='Totals')
s3_prefix = 'reading_results/%s/' % self.basename
fname = self.reporter.make_report()
with open(fname, 'rb') as f:
s3.put_object(Bucket=bucket_name,
Key= s3_prefix + fname,
Body=f.read())
s3.put_object(Bucket=bucket_name,
Key=s3_prefix + 'stat_aggregates_%s.pkl' % self.time_tag,
Body=pickle.dumps(stat_aggs))
return file_tree, stat_aggs
def submit_db_reading(basename, id_list_filename, readers, start_ix=None,
end_ix=None, pmids_per_job=3000, num_tries=2,
force_read=False, force_fulltext=False,
read_all_fulltext=False, project_name=None,
max_reach_input_len=None, max_reach_space_ratio=None,
no_stmts=False):
"""Submit batch reading jobs that uses the database for content and results.
This function is provided for backwards compatibility, use DbReadingSubmitter
and its submit_reading method instead.
"""
sub = DbReadingSubmitter(basename, readers, project_name)
sub.set_options(force_read, no_stmts, force_fulltext, read_all_fulltext,
max_reach_input_len, max_reach_space_ratio)
sub.submit_reading(id_list_filename, start_ix, end_ix, pmids_per_job,
num_tries)
return sub
if __name__ == '__main__':
import argparse
# Create the top-level parser
parser = argparse.ArgumentParser(
'submit_reading_pipeline_aws.py',
description=('Run reading with either the db or remote resources. For '
'more specific help, select one of the Methods with the '
'`-h` option.'),
epilog=('Note that `python wait_for_complete.py ...` should be run as '
'soon as this command completes successfully. For more '
'details use `python wait_for_complete.py -h`.')
)
subparsers = parser.add_subparsers(title='Method')
subparsers.required = True
subparsers.dest = 'method'
# Create parser class for first layer of options
grandparent_reading_parser = argparse.ArgumentParser(
description='Run machine reading using AWS Batch.',
add_help=False
)
# Create parent parser classes for second layer of options
parent_submit_parser = argparse.ArgumentParser(add_help=False)
parent_submit_parser.add_argument(
'basename',
help='Defines job names and S3 keys'
)
parent_submit_parser.add_argument(
'-r', '--readers',
dest='readers',
choices=['sparser', 'reach', 'all'],
default=['all'],
nargs='+',
help='Choose which reader(s) to use.'
)
parent_submit_parser.add_argument(
'--project',
help=('Set the project name. Default is DEFAULT_AWS_PROJECT in the '
'config.')
)
parent_read_parser = argparse.ArgumentParser(add_help=False)
parent_read_parser.add_argument(
'input_file',
help=('Path to file containing input ids of content to read. For the '
'no-db options, this is simply a file with each line being a '
'pmid. For the with-db options, this is a file where each line '
'is of the form \'<id type>:<id>\', for example \'pmid:12345\'')
)
parent_read_parser.add_argument(
'--start_ix',
type=int,
help='Start index of ids to read.'
)
parent_read_parser.add_argument(
'--end_ix',
type=int,
help='End index of ids to read. If `None`, read content from all ids.'
)
parent_read_parser.add_argument(
'--force_read',
action='store_true',
help='Read papers even if previously read by current REACH.'
)
parent_read_parser.add_argument(
'--force_fulltext',
action='store_true',
help='Get full text content even if content already on S3.'
)
parent_read_parser.add_argument(
'--ids_per_job',
default=3000,
type=int,
help='Number of PMIDs to read for each AWS Batch job.'
)
''' Not currently supported.
parent_read_parser.add_argument(
'--num_tries',
default=2,
type=int,
help='Maximum number of times to try running job.'
)
'''
parent_db_parser = argparse.ArgumentParser(add_help=False)
'''Not currently supported
parent_db_parser.add_argument(
'--no_upload',
action='store_true',
help='Don\'t upload results to the database.'
)
'''
parent_db_parser.add_argument(
'--read_best_fulltext',
action='store_true',
help='Read only the best fulltext for input ids.'
)
parent_db_parser.add_argument(
'--no_statements',
action='store_true',
help='Choose to not produce any Statements; only readings will be done.'
)
parent_db_parser.add_argument(
'--max_reach_space_ratio',
type=float,
help='Set the maximum ratio of spaces to non-spaces for REACH input.',
default=None
)
parent_db_parser.add_argument(
'--max_reach_input_len',
type=int,
help='Set the maximum length of content that REACH will read.',
default=None
)
# Make non_db_parser and get subparsers
non_db_parser = subparsers.add_parser(
'no-db',
parents=[grandparent_reading_parser],
description=('Run reading by collecting content, and save as pickles. '
'This option requires that ids are given as a list of '
'pmids, one line per pmid.'),
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
non_db_subparsers = non_db_parser.add_subparsers(
title='Job Type',
help='Type of jobs to submit.'
)
non_db_subparsers.required = True
non_db_subparsers.dest = 'job_type'
# Create subparsers for the no-db option.
read_parser = non_db_subparsers.add_parser(
'read',
parents=[parent_read_parser, parent_submit_parser],
help='Run REACH and cache INDRA Statements on S3.',
description='Run REACH and cache INDRA Statements on S3.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
combine_parser = non_db_subparsers.add_parser(
'combine',
parents=[parent_submit_parser],
help='Combine INDRA Statement subsets into a single file.',
description='Combine INDRA Statement subsets into a single file.'
)
full_parser = non_db_subparsers.add_parser(
'full',
parents=[parent_read_parser, parent_submit_parser],
help='Run REACH and combine INDRA Statements when done.',
description='Run REACH and combine INDRA Statements when done.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
# Make db parser and get subparsers.
db_parser = subparsers.add_parser(
'with-db',
parents=[grandparent_reading_parser, parent_submit_parser,
parent_read_parser, parent_db_parser],
description=('Run reading with content on the db and submit results. '
'In this option, ids in \'input_file\' are given in the '
'format \'<id type>:<id>\'. Unlike no-db, there is no '
'need to combine pickles, and therefore no need to '
'specify your task further.'),
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
args = parser.parse_args()
job_ids = None
if args.method == 'no-db':
sub = PmidSubmitter(args.basename, args.readers, args.project)
sub.set_options(args.force_read, args.force_fulltext)
if args.job_type in ['read', 'full']:
sub.submit_reading(args.input_file, args.start_ix, args.end_ix,
args.ids_per_job)
if args.job_type in ['combine', 'full']:
sub.submit_combine()
elif args.method == 'with-db':
sub = DbReadingSubmitter(args.basename, args.readers, args.project)
sub.set_options(args.force_read, args.no_statements,
args.force_fulltext, args.prioritize,
args.max_reach_input_len, args.max_reach_space_ratio)
sub.submit_reading(args.input_file, args.start_ix, args.end_ix,
args.ids_per_job)
| 40.885496 | 83 | 0.582192 | [
"BSD-2-Clause"
] | budakn/INDRA | indra/tools/reading/submit_reading_pipeline.py | 48,204 | Python |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError, UserError
from odoo.addons import decimal_precision as dp
from odoo.tools import float_is_zero
class EventType(models.Model):
_inherit = 'event.type'
@api.model
def _get_default_event_ticket_ids(self):
product = self.env.ref('event_sale.product_product_event', raise_if_not_found=False)
if not product:
return False
return [(0, 0, {
'name': _('Registration'),
'product_id': product.id,
'price': 0,
})]
use_ticketing = fields.Boolean('Ticketing')
event_ticket_ids = fields.One2many(
'event.event.ticket', 'event_type_id',
string='Tickets', default=_get_default_event_ticket_ids)
@api.onchange('name')
def _onchange_name(self):
if self.name:
self.event_ticket_ids.filtered(lambda ticket: ticket.name == _('Registration')).update({
'name': _('Registration for %s') % self.name
})
class Event(models.Model):
_inherit = 'event.event'
event_ticket_ids = fields.One2many(
'event.event.ticket', 'event_id', string='Event Ticket',
copy=True)
@api.onchange('event_type_id')
def _onchange_type(self):
super(Event, self)._onchange_type()
if self.event_type_id.use_ticketing:
self.event_ticket_ids = [(5, 0, 0)] + [
(0, 0, {
'name': self.name and _('Registration for %s') % self.name or ticket.name,
'product_id': ticket.product_id.id,
'price': ticket.price,
})
for ticket in self.event_type_id.event_ticket_ids]
@api.multi
def _is_event_registrable(self):
self.ensure_one()
if not self.event_ticket_ids:
return True
return all(self.event_ticket_ids.with_context(active_test=False).mapped(lambda t: t.product_id.active))
class EventTicket(models.Model):
_name = 'event.event.ticket'
_description = 'Event Ticket'
def _default_product_id(self):
return self.env.ref('event_sale.product_product_event', raise_if_not_found=False)
name = fields.Char(string='Name', required=True, translate=True)
event_type_id = fields.Many2one('event.type', string='Event Category', ondelete='cascade')
event_id = fields.Many2one('event.event', string="Event", ondelete='cascade')
product_id = fields.Many2one('product.product', string='Product',
required=True, domain=[("event_ok", "=", True)],
default=_default_product_id)
registration_ids = fields.One2many('event.registration', 'event_ticket_id', string='Registrations')
price = fields.Float(string='Price', digits=dp.get_precision('Product Price'))
deadline = fields.Date(string="Sales End")
is_expired = fields.Boolean(string='Is Expired', compute='_compute_is_expired')
price_reduce = fields.Float(string="Price Reduce", compute="_compute_price_reduce", digits=dp.get_precision('Product Price'))
price_reduce_taxinc = fields.Float(compute='_get_price_reduce_tax', string='Price Reduce Tax inc')
# seats fields
seats_availability = fields.Selection([('limited', 'Limited'), ('unlimited', 'Unlimited')],
string='Available Seat', required=True, store=True, compute='_compute_seats', default="limited")
seats_max = fields.Integer(string='Maximum Available Seats',
help="Define the number of available tickets. If you have too much registrations you will "
"not be able to sell tickets anymore. Set 0 to ignore this rule set as unlimited.")
seats_reserved = fields.Integer(string='Reserved Seats', compute='_compute_seats', store=True)
seats_available = fields.Integer(string='Available Seats', compute='_compute_seats', store=True)
seats_unconfirmed = fields.Integer(string='Unconfirmed Seat Reservations', compute='_compute_seats', store=True)
seats_used = fields.Integer(compute='_compute_seats', store=True)
@api.multi
def _compute_is_expired(self):
for record in self:
if record.deadline:
current_date = fields.Date.context_today(record.with_context(tz=record.event_id.date_tz))
record.is_expired = record.deadline < current_date
else:
record.is_expired = False
@api.multi
def _compute_price_reduce(self):
for record in self:
product = record.product_id
discount = product.lst_price and (product.lst_price - product.price) / product.lst_price or 0.0
record.price_reduce = (1.0 - discount) * record.price
def _get_price_reduce_tax(self):
for record in self:
# sudo necessary here since the field is most probably accessed through the website
tax_ids = record.sudo().product_id.taxes_id.filtered(lambda r: r.company_id == record.event_id.company_id)
taxes = tax_ids.compute_all(record.price_reduce, record.event_id.company_id.currency_id, 1.0, product=record.product_id)
record.price_reduce_taxinc = taxes['total_included']
@api.multi
@api.depends('seats_max', 'registration_ids.state')
def _compute_seats(self):
""" Determine reserved, available, reserved but unconfirmed and used seats. """
# initialize fields to 0 + compute seats availability
for ticket in self:
ticket.seats_availability = 'unlimited' if ticket.seats_max == 0 else 'limited'
ticket.seats_unconfirmed = ticket.seats_reserved = ticket.seats_used = ticket.seats_available = 0
# aggregate registrations by ticket and by state
if self.ids:
state_field = {
'draft': 'seats_unconfirmed',
'open': 'seats_reserved',
'done': 'seats_used',
}
query = """ SELECT event_ticket_id, state, count(event_id)
FROM event_registration
WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')
GROUP BY event_ticket_id, state
"""
self.env.cr.execute(query, (tuple(self.ids),))
for event_ticket_id, state, num in self.env.cr.fetchall():
ticket = self.browse(event_ticket_id)
ticket[state_field[state]] += num
# compute seats_available
for ticket in self:
if ticket.seats_max > 0:
ticket.seats_available = ticket.seats_max - (ticket.seats_reserved + ticket.seats_used)
@api.multi
@api.constrains('registration_ids', 'seats_max')
def _check_seats_limit(self):
for record in self:
if record.seats_max and record.seats_available < 0:
raise ValidationError(_('No more available seats for this ticket type.'))
@api.constrains('event_type_id', 'event_id')
def _constrains_event(self):
if any(ticket.event_type_id and ticket.event_id for ticket in self):
raise UserError(_('Ticket cannot belong to both the event category and the event itself.'))
@api.onchange('product_id')
def _onchange_product_id(self):
self.price = self.product_id.list_price or 0
def get_ticket_multiline_description_sale(self):
""" Compute a multiline description of this ticket, in the context of sales.
It will often be used as the default description of a sales order line referencing this ticket.
1. the first line is the ticket name
2. the second line is the event name (if it exists, which should be the case with a normal workflow) or the product name (if it exists)
We decided to ignore entirely the product name and the product description_sale because they are considered to be replaced by the ticket name and event name.
-> the workflow of creating a new event also does not lead to filling them correctly, as the product is created through the event interface
"""
name = self.display_name
if self.event_id:
name += '\n' + self.event_id.display_name
elif self.product_id:
name += '\n' + self.product_id.display_name
return name
class EventRegistration(models.Model):
_inherit = 'event.registration'
event_ticket_id = fields.Many2one('event.event.ticket', string='Event Ticket', readonly=True, states={'draft': [('readonly', False)]})
# in addition to origin generic fields, add real relational fields to correctly
# handle attendees linked to sales orders and their lines
# TDE FIXME: maybe add an onchange on sale_order_id + origin
sale_order_id = fields.Many2one('sale.order', string='Source Sales Order', ondelete='cascade')
sale_order_line_id = fields.Many2one('sale.order.line', string='Sales Order Line', ondelete='cascade')
@api.onchange('event_id')
def _onchange_event_id(self):
# We reset the ticket when keeping it would lead to an inconstitent state.
if self.event_ticket_id and (not self.event_id or self.event_id != self.event_ticket_id.event_id):
self.event_ticket_id = None
@api.multi
@api.constrains('event_ticket_id', 'state')
def _check_ticket_seats_limit(self):
for record in self:
if record.event_ticket_id.seats_max and record.event_ticket_id.seats_available < 0:
raise ValidationError(_('No more available seats for this ticket'))
@api.multi
def _check_auto_confirmation(self):
res = super(EventRegistration, self)._check_auto_confirmation()
if res:
orders = self.env['sale.order'].search([('state', '=', 'draft'), ('id', 'in', self.mapped('sale_order_id').ids)], limit=1)
if orders:
res = False
return res
@api.model
def create(self, vals):
res = super(EventRegistration, self).create(vals)
if res.origin or res.sale_order_id:
res.message_post_with_view('mail.message_origin_link',
values={'self': res, 'origin': res.sale_order_id},
subtype_id=self.env.ref('mail.mt_note').id)
return res
@api.model
def _prepare_attendee_values(self, registration):
""" Override to add sale related stuff """
line_id = registration.get('sale_order_line_id')
if line_id:
registration.setdefault('partner_id', line_id.order_id.partner_id)
att_data = super(EventRegistration, self)._prepare_attendee_values(registration)
if line_id:
att_data.update({
'event_id': line_id.event_id.id,
'event_id': line_id.event_id.id,
'event_ticket_id': line_id.event_ticket_id.id,
'origin': line_id.order_id.name,
'sale_order_id': line_id.order_id.id,
'sale_order_line_id': line_id.id,
})
return att_data
@api.multi
def summary(self):
res = super(EventRegistration, self).summary()
if self.event_ticket_id.product_id.image_medium:
res['image'] = '/web/image/product.product/%s/image_medium' % self.event_ticket_id.product_id.id
information = res.setdefault('information', {})
information.append((_('Name'), self.name))
information.append((_('Ticket'), self.event_ticket_id.name or _('None')))
order = self.sale_order_id.sudo()
order_line = self.sale_order_line_id.sudo()
if not order or float_is_zero(order_line.price_total, precision_digits=order.currency_id.rounding):
payment_status = _('Free')
elif not order.invoice_ids or any(invoice.state != 'paid' for invoice in order.invoice_ids):
payment_status = _('To pay')
res['alert'] = _('The registration must be paid')
else:
payment_status = _('Paid')
information.append((_('Payment'), payment_status))
return res
| 46.270992 | 165 | 0.655118 | [
"MIT"
] | jjiege/odoo | addons/event_sale/models/event.py | 12,123 | Python |
import scrapy
from itemloaders.processors import Identity, MapCompose, TakeFirst
from price_parser.parser import parse_price
from scrapy.loader import ItemLoader
from burplist.utils.parsers import parse_abv, parse_name, parse_volume, quantize_price
class ProductItem(scrapy.Item):
platform = scrapy.Field()
name = scrapy.Field(input_processor=MapCompose(str.strip, parse_name))
url = scrapy.Field()
brand = scrapy.Field()
style = scrapy.Field()
origin = scrapy.Field()
abv = scrapy.Field(input_processor=MapCompose(str.strip, parse_abv))
volume = scrapy.Field(input_processor=MapCompose(str.strip, parse_volume))
quantity = scrapy.Field(input_processor=Identity())
image_url = scrapy.Field()
price = scrapy.Field(input_processor=MapCompose(str, parse_price, quantize_price))
class ProductLoader(ItemLoader):
default_item_class = ProductItem
default_input_processor = MapCompose(str.strip)
default_output_processor = TakeFirst()
| 30.151515 | 86 | 0.765829 | [
"MIT"
] | admariner/burplist | burplist/items.py | 995 | Python |
#Written by Shitao Tang
# --------------------------------------------------------
import connectDB
import time,hashlib,logging
def sign_up(username,password):
db=connectDB.database.getInstance()
if len(username)<=20:
return db.create_account(username,hashlib.sha224(password).hexdigest())
else:
return 'username must be less than 20 characters'
def account_authentication(username,password):
db=connectDB.database.getInstance()
result=db.authenticate_account(username,hashlib.sha224(password).hexdigest())
if result:
return hashlib.sha224(username+str(time.time())).hexdigest()
elif result ==False:
return None
else:
logging.error(result)
def check_keys(data,keys): #check whether a dictionary contains a list of keys
for key in keys:
if key not in data:
return key
return None
def check_float(value,min_value,max_value): #try to convert value to a float number and is between min_value and max_value
try:
value=float(value)
if value>=min_value and value<=max_value:
return value
else:
return None
except ValueError:
return None
def decode_xml(object_name,xml): #get the bounding box of the object in an image
logging.info("begin to decode")
bounding_box=[]
#print xml
import xml.etree.ElementTree as ET
try:
root=ET.fromstring(xml)
except:
return []
for obj in root.findall('object'):
if(obj.find('name').text==object_name):
score=float(obj.find("score").text)
bnd_box=obj.find('bndbox')
xmin=int((bnd_box).find('xmin').text)
ymin=int((bnd_box).find('ymin').text)
xmax=int((bnd_box).find('xmax').text)
ymax=int((bnd_box).find('ymax').text)
bounding_box.append((xmin,ymin,xmax,ymax,score))
return bounding_box
def coordinate_from_google_to_baidu(longitude,latitude):
return gcj02tobd09(longitude,latitude)
def coordinate_from_baidu_to_google(longitude,latitude):
return bd09togcj02(longitude,latitude)
def check_connection_of_image_analysis_server(address):
reponse=requests.get(address+"/ok")
print address,reponse.text
if reponse.text=="OK":
return True
else:
return False
#the following code is copied from github
import json
import requests
import math
x_pi = 3.14159265358979324 * 3000.0 / 180.0
pi = 3.1415926535897932384626
a = 6378245.0
ee = 0.00669342162296594323
def geocode(address):
geocoding = {'s': 'rsv3',
'key': key,
'city': 'china',
'address': address}
res = requests.get(
"http://restapi.amap.com/v3/geocode/geo", params=geocoding)
if res.status_code == 200:
json = res.json()
status = json.get('status')
count = json.get('count')
if status == '1' and int(count) >= 1:
geocodes = json.get('geocodes')[0]
lng = float(geocodes.get('location').split(',')[0])
lat = float(geocodes.get('location').split(',')[1])
return [lng, lat]
else:
return None
else:
return None
def gcj02tobd09(lng, lat):
z = math.sqrt(lng * lng + lat * lat) + 0.00002 * math.sin(lat * x_pi)
theta = math.atan2(lat, lng) + 0.000003 * math.cos(lng * x_pi)
bd_lng = z * math.cos(theta) + 0.0065
bd_lat = z * math.sin(theta) + 0.006
return [bd_lng, bd_lat]
def bd09togcj02(bd_lon, bd_lat):
x = bd_lon - 0.0065
y = bd_lat - 0.006
z = math.sqrt(x * x + y * y) - 0.00002 * math.sin(y * x_pi)
theta = math.atan2(y, x) - 0.000003 * math.cos(x * x_pi)
gg_lng = z * math.cos(theta)
gg_lat = z * math.sin(theta)
return [gg_lng, gg_lat]
def wgs84togcj02(lng, lat):
"""
"""
if out_of_china(lng, lat):
return lng, lat
dlat = transformlat(lng - 105.0, lat - 35.0)
dlng = transformlng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * pi
magic = math.sin(radlat)
magic = 1 - ee * magic * magic
sqrtmagic = math.sqrt(magic)
dlat = (dlat * 180.0) / ((a * (1 - ee)) / (magic * sqrtmagic) * pi)
dlng = (dlng * 180.0) / (a / sqrtmagic * math.cos(radlat) * pi)
mglat = lat + dlat
mglng = lng + dlng
return [mglng, mglat]
def gcj02towgs84(lng, lat):
"""
"""
if out_of_china(lng, lat):
return lng, lat
dlat = transformlat(lng - 105.0, lat - 35.0)
dlng = transformlng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * pi
magic = math.sin(radlat)
magic = 1 - ee * magic * magic
sqrtmagic = math.sqrt(magic)
dlat = (dlat * 180.0) / ((a * (1 - ee)) / (magic * sqrtmagic) * pi)
dlng = (dlng * 180.0) / (a / sqrtmagic * math.cos(radlat) * pi)
mglat = lat + dlat
mglng = lng + dlng
return [lng * 2 - mglng, lat * 2 - mglat]
def transformlat(lng, lat):
ret = -100.0 + 2.0 * lng + 3.0 * lat + 0.2 * lat * lat + \
0.1 * lng * lat + 0.2 * math.sqrt(math.fabs(lng))
ret += (20.0 * math.sin(6.0 * lng * pi) + 20.0 *
math.sin(2.0 * lng * pi)) * 2.0 / 3.0
ret += (20.0 * math.sin(lat * pi) + 40.0 *
math.sin(lat / 3.0 * pi)) * 2.0 / 3.0
ret += (160.0 * math.sin(lat / 12.0 * pi) + 320 *
math.sin(lat * pi / 30.0)) * 2.0 / 3.0
return ret
def transformlng(lng, lat):
ret = 300.0 + lng + 2.0 * lat + 0.1 * lng * lng + \
0.1 * lng * lat + 0.1 * math.sqrt(math.fabs(lng))
ret += (20.0 * math.sin(6.0 * lng * pi) + 20.0 *
math.sin(2.0 * lng * pi)) * 2.0 / 3.0
ret += (20.0 * math.sin(lng * pi) + 40.0 *
math.sin(lng / 3.0 * pi)) * 2.0 / 3.0
ret += (150.0 * math.sin(lng / 12.0 * pi) + 300.0 *
math.sin(lng / 30.0 * pi)) * 2.0 / 3.0
return ret
def out_of_china(lng, lat):
"""
"""
return not (lng > 73.66 and lng < 135.05 and lat > 3.86 and lat < 53.55)
| 30.989691 | 122 | 0.570692 | [
"MIT"
] | yehan-xiao/SUCS | main_server/common.py | 6,012 | Python |
import random
import string
from pathlib import Path
r"""
In the root folder
$ pytest tests --template fastapi_plan\template
Where `template` is path to folder with `cookiecutter.json` file
See https://github.com/hackebrot/pytest-cookies
Or example tests here
https://github.com/audreyfeldroy/cookiecutter-pypackage/blob/master/tests/test_bake_project.py
# from py._path.local import LocalPath
"""
ROOT_FOLDER = Path(__file__).parent.parent
PROJECT_TEMPLATE = f"{ROOT_FOLDER}/fastapi_plan/template"
def random_lower_string(length=20) -> str:
return "".join(random.choices(string.ascii_lowercase, k=length))
def test_bake_project_poetry(cookies):
project_name = random_lower_string()
result = cookies.bake(
template=PROJECT_TEMPLATE,
extra_context={
"project_name": project_name,
"preffered_requirements_tool": "poetry",
},
)
assert result.exit_code == 0
assert result.exception is None
assert result.project.basename == project_name
assert result.project.isdir()
top_level = [f.basename for f in result.project.listdir()]
assert "requirements.txt" in top_level
assert "poetry.lock" in top_level
assert "pyproject.toml" in top_level
# the rest in top level
assert ".dockerignore" in top_level
assert ".env" in top_level
assert "aerich.ini" in top_level
assert "app" in top_level
assert "config" in top_level
assert "docker-compose.yml" in top_level
assert "docker-compose.prod.yml" in top_level
assert "docker-compose.debug.yml" in top_level
assert "Dockerfile" in top_level
assert ".gitignore" in top_level
def test_bake_project_requiremnts(cookies):
project_name = random_lower_string()
result = cookies.bake(
template=PROJECT_TEMPLATE,
extra_context={
"project_name": project_name,
"preffered_requirements_tool": "requirements.txt",
},
)
assert result.exit_code == 0
assert result.exception is None
assert result.project.basename == project_name
assert result.project.isdir()
top_level = [f.basename for f in result.project.listdir()]
assert "requirements.txt" in top_level
assert "poetry.lock" not in top_level
assert "pyproject.toml" not in top_level
# the rest in top level
assert ".dockerignore" in top_level
assert ".env" in top_level
assert "aerich.ini" in top_level
assert "app" in top_level
assert "config" in top_level
assert "docker-compose.yml" in top_level
assert "docker-compose.prod.yml" in top_level
assert "docker-compose.debug.yml" in top_level
assert "Dockerfile" in top_level
assert ".gitignore" in top_level
| 30.266667 | 94 | 0.712922 | [
"MIT"
] | rafsaf/fastapi-plan | tests/test_fastapi_plan.py | 2,724 | Python |
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'-p',
dest='path',
help='Spectify the path')
parser.add_argument(
'-l', action='store_true',
dest='long_format',
help='use a long listing format')
parser.add_argument(
'-a', action='store_true',
dest='show_hidden',
help='do not ignore entries starting with .')
parser.add_argument(
'-S', action='store_true',
dest='sort_by_size',
help='sort by file size')
parser.add_argument(
'-R', action='store_true',
dest='list_subdir',
help='list subdirectories recursively')
args = parser.parse_args()
if not args.path:
args.path = os.getcwd()
| 24.733333 | 53 | 0.614555 | [
"MIT"
] | Windsooon/pash | pash/arg_parse.py | 742 | Python |
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from selectelhackaton.utils import SimpleAuthMixinView
@method_decorator(login_required, name='get')
class MemberIndex(SimpleAuthMixinView):
template_name = 'member/member-index.html'
| 27 | 67 | 0.783951 | [
"MIT"
] | mar4elkin/SelectelHackaton | selectelhackaton/views.py | 324 | Python |
class Solution:
def findMinHeightTrees(self, n: int, edges: List[List[int]]) -> List[int]:
total_node_count = n
if total_node_count == 1:
# Quick response for one node tree
return [0]
# build adjacency matrix
adj_matrix = defaultdict( set )
for src_node, dst_node in edges:
adj_matrix[src_node].add( dst_node )
adj_matrix[dst_node].add( src_node )
# get leaves node whoose degree is 1
leave_nodes = [ node for node in adj_matrix if len(adj_matrix[node]) == 1 ]
# keep doing leave nodes removal until total node count is smaller or equal to 2
while total_node_count > 2:
total_node_count -= len(leave_nodes)
leave_nodes_next_round = []
# leave nodes removal
for leaf in leave_nodes:
neighbor = adj_matrix[leaf].pop()
adj_matrix[neighbor].remove( leaf )
if len(adj_matrix[neighbor]) == 1:
leave_nodes_next_round.append( neighbor )
leave_nodes = leave_nodes_next_round
# final leave nodes are root node of minimum height trees
return leave_nodes
| 33.190476 | 88 | 0.522956 | [
"MIT"
] | shoaibur/SWE | Leetcoding-Actions/Explore-Monthly-Challenges/2020-11/04-Minimum-Height-Tree.py | 1,394 | Python |
#http://blog.gravatar.com/2008/01/17/gravatars-in-python-25/
import urllib, hashlib
# Set your variables here
email = "[email protected]"
default = "http://www.somewhere.com/homsar.jpg"
size = 40
def get_gravatar(email):
gravatar_url = "http://www.gravatar.com/avatar.php?"
#gravatar_url += urllib.urlencode({'gravatar_id':hashlib.md5(email.lower()).hexdigest(), 'default':default, 'size':str(size)})
return gravatar_url | 33.076923 | 127 | 0.74186 | [
"MIT"
] | k1000/django-stratus | stratus/gravatar.py | 430 | Python |
#!/usr/bin/env python
# $Id: update_pot.py 40713 2011-09-30 09:25:53Z nazgul $
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# update the pot file according the POTFILES.in
import subprocess
import os
from codecs import open
GETTEXT_XGETTEXT_EXECUTABLE = "xgettext"
CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
SOURCE_DIR = os.path.normpath(os.path.abspath(os.path.join(CURRENT_DIR, "..")))
DOMAIN = "blender"
COMMENT_PREFIX = "#~ " # from update_msg.py
FILE_NAME_POT = os.path.join(CURRENT_DIR, "blender.pot")
FILE_NAME_MESSAGES = os.path.join(CURRENT_DIR, "messages.txt")
def main():
cmd = (GETTEXT_XGETTEXT_EXECUTABLE,
"--files-from=%s" % os.path.join(SOURCE_DIR, "po", "POTFILES.in"),
"--keyword=_",
"--keyword=N_",
"--directory=%s" % SOURCE_DIR,
"--output=%s" % os.path.join(SOURCE_DIR, "po", "%s.pot" % DOMAIN),
"--from-code=utf-8",
)
print(" ".join(cmd))
process = subprocess.Popen(cmd)
process.wait()
def stripeol(s):
return s.rstrip("\n\r")
pot_messages = {}
reading_message = False
message = ""
with open(FILE_NAME_POT, 'r', "utf-8") as handle:
while True:
line = handle.readline()
if not line:
break
line = stripeol(line)
if line.startswith("msgid"):
reading_message = True
message = line[7:-1]
elif line.startswith("msgstr"):
reading_message = False
pot_messages[message] = True
elif reading_message:
message += line[1:-1]
# add messages collected automatically from RNA
with open(FILE_NAME_POT, "a", "utf-8") as pot_handle:
with open(FILE_NAME_MESSAGES, 'r', "utf-8") as handle:
msgsrc_ls = []
while True:
line = handle.readline()
if not line:
break
line = stripeol(line)
# COMMENT_PREFIX
if line.startswith(COMMENT_PREFIX):
msgsrc_ls.append(line[len(COMMENT_PREFIX):].strip())
else:
line = line.replace("\\", "\\\\")
line = line.replace("\"", "\\\"")
line = line.replace("\t", "\\t")
if not pot_messages.get(line):
for msgsrc in msgsrc_ls:
pot_handle.write("#: %s\n" % msgsrc)
pot_handle.write("msgid \"%s\"\n" % line)
pot_handle.write("msgstr \"\"\n\n")
msgsrc_ls[:] = []
if __name__ == "__main__":
print("\n\n *** Running %r *** \n" % __file__)
main()
| 32.759259 | 79 | 0.570944 | [
"Unlicense"
] | damiles/blendocv | po/update_pot.py | 3,538 | Python |
# coding: utf-8
"""
Factern API
"""
import pprint
import re # noqa: F401
import six
import importlib
parent_name = "BaseResponse"
def get_parent():
# Lazy importing of parent means that loading the classes happens
# in the correct order.
if get_parent.cache is None:
parent_fname = "factern_client.com.factern.model.%s" % re.sub("([a-z])([A-Z])", "\\1_\\2", "BaseResponse").lower()
parent = importlib.import_module(parent_fname).BaseResponse
get_parent.cache = parent
return get_parent.cache
get_parent.cache = None
class CreateEntityResponse(get_parent()):
@staticmethod
def get_parent():
return get_parent()
@staticmethod
def compute_parent_updates():
pass
get_parent().compute_parent_updates()
CreateEntityResponse.swagger_types.update(get_parent().swagger_types)
CreateEntityResponse.attribute_map.update(get_parent().attribute_map)
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'description': 'str',
'name': 'str'
}
attribute_map = {
'description': 'description',
'name': 'name'
}
def __init__(self, **kwargs): # noqa: E501
"""CreateEntityResponse - a model defined in Swagger""" # noqa: E501
self.compute_parent_updates()
for k in kwargs:
if k not in self.swagger_types:
raise ValueError("CreateEntityResponse got unexpected argument '%s'" % k)
get_parent().__init__(self, **kwargs)
self._description = None
self._name = None
if "description" in kwargs:
self.description = kwargs["description"]
if "name" in kwargs:
self.name = kwargs["name"]
@property
def description(self):
"""Gets the description of this CreateEntityResponse. # noqa: E501
:return: The description of this CreateEntityResponse. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this CreateEntityResponse.
:param description: The description of this CreateEntityResponse. # noqa: E501
:type: str
"""
self._description = description
@property
def name(self):
"""Gets the name of this CreateEntityResponse. # noqa: E501
:return: The name of this CreateEntityResponse. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CreateEntityResponse.
:param name: The name of this CreateEntityResponse. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateEntityResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.883436 | 122 | 0.581698 | [
"MIT"
] | Factern/factern-client-python | factern_client/com/factern/model/create_entity_response.py | 4,382 | Python |
import warnings
from cloudcafe.auth.provider import MemoizedAuthServiceComposite
from cloudcafe.blockstorage.config import BlockStorageConfig
from cloudcafe.blockstorage.volumes_api.common.config import VolumesAPIConfig
from cloudcafe.blockstorage.volumes_api.v1.config import \
VolumesAPIConfig as v1Config
from cloudcafe.blockstorage.volumes_api.v1.client import \
VolumesClient as v1Client
from cloudcafe.blockstorage.volumes_api.v1.behaviors import \
VolumesAPI_Behaviors as v1Behaviors
from cloudcafe.blockstorage.volumes_api.v2.config import \
VolumesAPIConfig as v2Config
from cloudcafe.blockstorage.volumes_api.v2.client import \
VolumesClient as v2Client
from cloudcafe.blockstorage.volumes_api.v2.behaviors import \
VolumesAPI_Behaviors as v2Behaviors
class _BlockstorageAuthComposite(MemoizedAuthServiceComposite):
_blockstorage_config = BlockStorageConfig
def __init__(self, endpoint_config=None, user_config=None):
self._endpoint_config = endpoint_config
self._user_config = user_config
self.config = self._blockstorage_config()
self.availability_zone = self.config.availability_zone
super(_BlockstorageAuthComposite, self).__init__(
self.config.identity_service_name, self.config.region,
endpoint_config=endpoint_config, user_config=user_config)
class _BaseVolumesComposite(object):
_config = None
_client = None
_behaviors = None
_auth = _BlockstorageAuthComposite
def __init__(self, auth_composite=None):
self.auth = auth_composite or self._auth()
self.config = self._config()
self.service_endpoint = self.auth.public_url
if self.auth.config.service_endpoint_override is not None:
self.service_endpoint = "{url}/{tenant_id}".format(
url=self.auth.config.service_endpoint_override,
tenant_id=self.auth.tenant_id)
self.client = self._client(
url=self.service_endpoint,
auth_token=self.auth.token_id,
serialize_format=self.config.serialize_format,
deserialize_format=self.config.deserialize_format)
self.behaviors = self._behaviors(self.client)
# For backwards compatibility (deprecated - see property below)
self._blockstorage_auth = self.auth
@property
def blockstorage_auth(self):
warnings.warn(
"the 'blockstorage_auth' attribute of the VolumesComposite is "
"deprecated. Please use the 'auth' attribute instead",
DeprecationWarning)
return self._blockstorage_auth
class VolumesV1Composite(_BaseVolumesComposite):
_config = v1Config
_client = v1Client
_behaviors = v1Behaviors
class VolumesV2Composite(_BaseVolumesComposite):
_config = v2Config
_client = v2Client
_behaviors = v2Behaviors
class VolumesAutoComposite(object):
def __new__(cls, auth_composite=None):
config = VolumesAPIConfig()
if config.version_under_test == "1":
return VolumesV1Composite(auth_composite=auth_composite)
if config.version_under_test == "2":
return VolumesV2Composite(auth_composite=auth_composite)
else:
raise Exception(
"VolumesAutoComposite cannot be used unless the "
"'version_under_test' attribute of the VolumesAPIConfig"
" is set to either '1' or '2'")
| 37.543478 | 77 | 0.72264 | [
"Apache-2.0"
] | kivattik/lk-cloudcafe-dev | cloudcafe/blockstorage/composites.py | 3,454 | Python |
# -*- coding: utf-8 -*-
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWebEngineWidgets import *
from PyQt5.QtWebChannel import QWebChannel
from PyQt5 import Qt
import json
import sys
import time
import random
import threading
import os
ConfigData = {}
label = None
class CallHandler(QObject):
def __init__(self):
super(CallHandler, self).__init__()
@pyqtSlot(str, result=str) # 第一个参数即为回调时携带的参数类型
def init_home(self, str_args):
print('call received')
print('resolving......init home..')
print(str_args)
return 'hello, Python'
class AdminMain(QWidget):
def __init__(self, parent=None):
self.m_flag = False
super(AdminMain, self).__init__(parent)
self.setWindowTitle("VegeTableT")
self.setWindowIcon(QIcon("./evol/logo.ico"))
self.setAttribute(Qt.Qt.WA_TranslucentBackground)
self.setContextMenuPolicy(Qt.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.close)
self.setWindowFlags(Qt.Qt.FramelessWindowHint |
Qt.Qt.Tool | Qt.Qt.WindowStaysOnTopHint)
self.moveToRight()
flo = QFormLayout()
rnbtn = QPushButton('随机提问')
rnbtn.setObjectName('bluebutton')
self.wd = Random_name()
channel = QWebChannel()
cnobj = CallHandler()
channel.registerObject('bridge', cnobj)
self.wd.browser.page().setWebChannel(channel)
rnbtn.clicked.connect(self.startRandomName)
flo.addRow(rnbtn)
self.setLayout(flo)
def kbtoggle(self):
print(24333)
def moveToRight(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().topLeft()
qr.moveTopRight(cp)
self.move(qr.topRight())
def startRandomName(self):
self.wd.showwidget()
def mousePressEvent(self, event):
if event.button() == Qt.Qt.LeftButton:
self.m_flag = True
self.m_Position = event.globalPos()-self.pos()
event.accept()
self.setCursor(QCursor(Qt.Qt.OpenHandCursor))
def mouseMoveEvent(self, QMouseEvent):
if Qt.Qt.LeftButton and self.m_flag:
self.move(QMouseEvent.globalPos()-self.m_Position)
QMouseEvent.accept()
def mouseReleaseEvent(self, QMouseEvent):
self.m_flag = False
self.setCursor(QCursor(Qt.Qt.ArrowCursor))
class AboutThis(QMainWindow):
def __init__(self):
super(AboutThis, self).__init__()
self.setWindowTitle('关于随机提问')
self.setWindowIcon(QIcon("./evol/logo.ico"))
self.setWindowFlags(Qt.Qt.WindowStaysOnTopHint)
self.resize(600, 571)
self.browser = QWebEngineView()
#加载外部的web界面
self.browser.load(
QUrl(QFileInfo("./evol/about.html").absoluteFilePath()))
self.setCentralWidget(self.browser)
def showwidget(self):
self.show()
class Random_name(QMainWindow):
def __init__(self):
super(Random_name, self).__init__()
self.setWindowTitle('随机提问')
self.setWindowIcon(QIcon("./evol/logo.ico"))
self.setWindowFlags(Qt.Qt.WindowStaysOnTopHint |
Qt.Qt.WindowCloseButtonHint)
self.resize(500, 471)
self.browser = QWebEngineView()
#加载外部的web界面
self.browser.load(
QUrl(QFileInfo("./evol/evol.html").absoluteFilePath()))
self.setCentralWidget(self.browser)
def showwidget(self):
global ConfigData
with open('./evol/data.json', 'r', encoding='utf8') as fp:
ConfigData = json.load(fp)
self.browser.page().runJavaScript('getData({})'.format(
json.dumps(ConfigData, sort_keys=True, indent=4, separators=(',', ':'))))
self.show()
if __name__ == "__main__":
app = QApplication(sys.argv)
splash = QSplashScreen(QPixmap("./evol/start.png"))
splash.showMessage("orz lin_diex!", Qt.Qt.AlignHCenter |
Qt.Qt.AlignBottom, Qt.Qt.black)
splash.show()
qApp.processEvents()
QApplication.setQuitOnLastWindowClosed(False)
win = AdminMain()
w = win
tp = QSystemTrayIcon(w)
tp.setIcon(QIcon('./evol/logo.ico'))
# 设置系统托盘图标的菜单
a1 = QAction('&显示', triggered=w.show)
def quitApp():
w.show()
re = QMessageBox.question(w, "提示", "是否退出?", QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if re == QMessageBox.Yes:
QCoreApplication.instance().quit()
tp.setVisible(False)
def reConfig():
global ConfigData
with open('./evol/data.json', 'r', encoding='utf8') as fp:
ConfigData = json.load(fp)
with open('./evol/main.qss', 'r') as f:
w.setStyleSheet(f.read())
abthis = AboutThis()
def showAbout():
abthis.showwidget()
reConfig()
win.show()
splash.finish(win)
a2 = QAction('&退出', triggered=quitApp)
a3 = QAction('&关于', triggered=showAbout)
tpMenu = QMenu()
tpMenu.addAction(a1)
tpMenu.addAction(a3)
tpMenu.addAction(a2)
tp.setContextMenu(tpMenu)
tp.show()
#tp.showMessage('VegeTable Admin', '成功运行', icon=0)
#def clickMessage():
# print("信息被点击了")
#tp.messageClicked.connect(clickMessage)
def act(reason):
if reason == 2 or reason == 3:
w.show()
tp.activated.connect(act)
sys.exit(app.exec_())
| 31.302198 | 86 | 0.599614 | [
"MIT"
] | vt-dev-team/vt-randomName | main.py | 5,853 | Python |
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=invalid-name,anomalous-backslash-in-string,missing-docstring
"""mpl circuit visualization backend."""
import collections
import fractions
import itertools
import json
import logging
import math
import numpy as np
try:
from matplotlib import patches
from matplotlib import pyplot as plt
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
from qiskit.tools.visualization import exceptions
from qiskit.tools.visualization import _qcstyle
logger = logging.getLogger(__name__)
Register = collections.namedtuple('Register', 'reg index')
WID = 0.65
HIG = 0.65
DEFAULT_SCALE = 4.3
PORDER_GATE = 5
PORDER_LINE = 2
PORDER_GRAY = 3
PORDER_TEXT = 6
PORDER_SUBP = 4
class Anchor:
def __init__(self, reg_num, yind, fold):
self.__yind = yind
self.__fold = fold
self.__reg_num = reg_num
self.__gate_placed = []
def plot_coord(self, index, gate_width):
h_pos = index % self.__fold + 1
# check folding
if self.__fold > 0:
if h_pos + (gate_width - 1) > self.__fold:
index += self.__fold - (h_pos - 1)
x_pos = index % self.__fold + 1 + 0.5 * (gate_width - 1)
y_pos = self.__yind - (index // self.__fold) * (self.__reg_num + 1)
else:
x_pos = index + 1 + 0.5 * (gate_width - 1)
y_pos = self.__yind
return x_pos, y_pos
def is_locatable(self, index, gate_width):
hold = [index + i for i in range(gate_width)]
for p in hold:
if p in self.__gate_placed:
return False
return True
def set_index(self, index, gate_width):
h_pos = index % self.__fold + 1
if h_pos + (gate_width - 1) > self.__fold:
_index = index + self.__fold - (h_pos - 1)
else:
_index = index
for ii in range(gate_width):
if _index + ii not in self.__gate_placed:
self.__gate_placed.append(_index + ii)
self.__gate_placed.sort()
def get_index(self):
if self.__gate_placed:
return self.__gate_placed[-1] + 1
return 0
class MatplotlibDrawer:
def __init__(self, qregs, cregs, ops,
scale=1.0, style=None, plot_barriers=True,
reverse_bits=False):
if not HAS_MATPLOTLIB:
raise ImportError('The class MatplotlibDrawer needs matplotlib. '
'Run "pip install matplotlib" before.')
self._ast = None
self._scale = DEFAULT_SCALE * scale
self._creg = []
self._qreg = []
self._registers(cregs, qregs)
self._ops = ops
self._qreg_dict = collections.OrderedDict()
self._creg_dict = collections.OrderedDict()
self._cond = {
'n_lines': 0,
'xmax': 0,
'ymax': 0,
}
self._style = _qcstyle.QCStyle()
self.plot_barriers = plot_barriers
self.reverse_bits = reverse_bits
if style:
if isinstance(style, dict):
self._style.set_style(style)
elif isinstance(style, str):
with open(style, 'r') as infile:
dic = json.load(infile)
self._style.set_style(dic)
self.figure = plt.figure()
self.figure.patch.set_facecolor(color=self._style.bg)
self.ax = self.figure.add_subplot(111)
self.ax.axis('off')
self.ax.set_aspect('equal')
self.ax.tick_params(labelbottom=False, labeltop=False,
labelleft=False, labelright=False)
def _registers(self, creg, qreg):
self._creg = []
for r in creg:
self._creg.append(Register(reg=r[0], index=r[1]))
self._qreg = []
for r in qreg:
self._qreg.append(Register(reg=r[0], index=r[1]))
@property
def ast(self):
return self._ast
def _gate(self, xy, fc=None, wide=False, text=None, subtext=None):
xpos, ypos = xy
if wide:
wid = WID * 2.8
else:
wid = WID
if fc:
_fc = fc
elif text:
_fc = self._style.dispcol[text]
else:
_fc = self._style.gc
box = patches.Rectangle(
xy=(xpos - 0.5 * wid, ypos - 0.5 * HIG), width=wid, height=HIG,
fc=_fc, ec=self._style.lc, linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
if text:
disp_text = "${}$".format(self._style.disptex[text])
if subtext:
self.ax.text(xpos, ypos + 0.15 * HIG, disp_text, ha='center',
va='center', fontsize=self._style.fs,
color=self._style.gt, clip_on=True,
zorder=PORDER_TEXT)
self.ax.text(xpos, ypos - 0.3 * HIG, subtext, ha='center',
va='center', fontsize=self._style.sfs,
color=self._style.sc, clip_on=True,
zorder=PORDER_TEXT)
else:
self.ax.text(xpos, ypos, disp_text, ha='center', va='center',
fontsize=self._style.fs,
color=self._style.gt,
clip_on=True,
zorder=PORDER_TEXT)
def _subtext(self, xy, text):
xpos, ypos = xy
self.ax.text(xpos, ypos - 0.3 * HIG, text, ha='center', va='top',
fontsize=self._style.sfs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
def _line(self, xy0, xy1, lc=None, ls=None):
x0, y0 = xy0
x1, y1 = xy1
if lc is None:
linecolor = self._style.lc
else:
linecolor = lc
if ls is None:
linestyle = 'solid'
else:
linestyle = ls
if linestyle == 'doublet':
theta = np.arctan2(np.abs(x1 - x0), np.abs(y1 - y0))
dx = 0.05 * WID * np.cos(theta)
dy = 0.05 * WID * np.sin(theta)
self.ax.plot([x0 + dx, x1 + dx], [y0 + dy, y1 + dy],
color=linecolor,
linewidth=1.0,
linestyle='solid',
zorder=PORDER_LINE)
self.ax.plot([x0 - dx, x1 - dx], [y0 - dy, y1 - dy],
color=linecolor,
linewidth=1.0,
linestyle='solid',
zorder=PORDER_LINE)
else:
self.ax.plot([x0, x1], [y0, y1],
color=linecolor,
linewidth=1.0,
linestyle=linestyle,
zorder=PORDER_LINE)
def _measure(self, qxy, cxy, cid):
qx, qy = qxy
cx, cy = cxy
self._gate(qxy, fc=self._style.dispcol['meas'])
# add measure symbol
arc = patches.Arc(xy=(qx, qy - 0.15 * HIG), width=WID * 0.7,
height=HIG * 0.7, theta1=0, theta2=180, fill=False,
ec=self._style.lc, linewidth=1.5,
zorder=PORDER_GATE)
self.ax.add_patch(arc)
self.ax.plot([qx, qx + 0.35 * WID],
[qy - 0.15 * HIG, qy + 0.20 * HIG],
color=self._style.lc, linewidth=1.5, zorder=PORDER_GATE)
# arrow
self._line(qxy, [cx, cy + 0.35 * WID], lc=self._style.cc,
ls=self._style.cline)
arrowhead = patches.Polygon(((cx - 0.20 * WID, cy + 0.35 * WID),
(cx + 0.20 * WID, cy + 0.35 * WID),
(cx, cy)),
fc=self._style.cc,
ec=None)
self.ax.add_artist(arrowhead)
# target
if self._style.bundle:
self.ax.text(cx + .25, cy + .1, str(cid), ha='left', va='bottom',
fontsize=0.8 * self._style.fs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
def _conds(self, xy, istrue=False):
xpos, ypos = xy
if istrue:
_fc = self._style.lc
else:
_fc = self._style.gc
box = patches.Circle(xy=(xpos, ypos), radius=WID * 0.15,
fc=_fc, ec=self._style.lc,
linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
def _ctrl_qubit(self, xy):
xpos, ypos = xy
box = patches.Circle(xy=(xpos, ypos), radius=WID * 0.15,
fc=self._style.lc, ec=self._style.lc,
linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
def _tgt_qubit(self, xy):
xpos, ypos = xy
box = patches.Circle(xy=(xpos, ypos), radius=HIG * 0.35,
fc=self._style.dispcol['target'],
ec=self._style.lc, linewidth=1.5,
zorder=PORDER_GATE)
self.ax.add_patch(box)
# add '+' symbol
self.ax.plot([xpos, xpos], [ypos - 0.35 * HIG, ypos + 0.35 * HIG],
color=self._style.lc, linewidth=1.0, zorder=PORDER_GATE)
self.ax.plot([xpos - 0.35 * HIG, xpos + 0.35 * HIG], [ypos, ypos],
color=self._style.lc, linewidth=1.0, zorder=PORDER_GATE)
def _swap(self, xy):
xpos, ypos = xy
self.ax.plot([xpos - 0.20 * WID, xpos + 0.20 * WID],
[ypos - 0.20 * WID, ypos + 0.20 * WID],
color=self._style.lc, linewidth=1.5, zorder=PORDER_LINE)
self.ax.plot([xpos - 0.20 * WID, xpos + 0.20 * WID],
[ypos + 0.20 * WID, ypos - 0.20 * WID],
color=self._style.lc, linewidth=1.5, zorder=PORDER_LINE)
def _barrier(self, config, anc):
xys = config['coord']
group = config['group']
y_reg = []
for qreg in self._qreg_dict.values():
if qreg['group'] in group:
y_reg.append(qreg['y'])
x0 = xys[0][0]
box_y0 = min(y_reg) - int(anc / self._style.fold) * (self._cond['n_lines'] + 1) - 0.5
box_y1 = max(y_reg) - int(anc / self._style.fold) * (self._cond['n_lines'] + 1) + 0.5
box = patches.Rectangle(xy=(x0 - 0.3 * WID, box_y0),
width=0.6 * WID, height=box_y1 - box_y0,
fc=self._style.bc, ec=None, alpha=0.6,
linewidth=1.5, zorder=PORDER_GRAY)
self.ax.add_patch(box)
for xy in xys:
xpos, ypos = xy
self.ax.plot([xpos, xpos], [ypos + 0.5, ypos - 0.5],
linewidth=1, linestyle="dashed",
color=self._style.lc,
zorder=PORDER_TEXT)
def _linefeed_mark(self, xy):
xpos, ypos = xy
self.ax.plot([xpos - .1, xpos - .1],
[ypos, ypos - self._cond['n_lines'] + 1],
color=self._style.lc, zorder=PORDER_LINE)
self.ax.plot([xpos + .1, xpos + .1],
[ypos, ypos - self._cond['n_lines'] + 1],
color=self._style.lc, zorder=PORDER_LINE)
def draw(self, filename=None, verbose=False):
self._draw_regs()
self._draw_ops(verbose)
_xl = - self._style.margin[0]
_xr = self._cond['xmax'] + self._style.margin[1]
_yb = - self._cond['ymax'] - self._style.margin[2] + 1 - 0.5
_yt = self._style.margin[3] + 0.5
self.ax.set_xlim(_xl, _xr)
self.ax.set_ylim(_yb, _yt)
# update figure size
fig_w = _xr - _xl
fig_h = _yt - _yb
if self._style.figwidth < 0.0:
self._style.figwidth = fig_w * self._scale * self._style.fs / 72 / WID
self.figure.set_size_inches(self._style.figwidth, self._style.figwidth * fig_h / fig_w)
if filename:
self.figure.savefig(filename, dpi=self._style.dpi,
bbox_inches='tight')
plt.close(self.figure)
return self.figure
def _draw_regs(self):
# quantum register
for ii, reg in enumerate(self._qreg):
if len(self._qreg) > 1:
label = '${}_{{{}}}$'.format(reg.reg.name, reg.index)
else:
label = '${}$'.format(reg.reg.name)
pos = -ii
self._qreg_dict[ii] = {
'y': pos,
'label': label,
'index': reg.index,
'group': reg.reg
}
self._cond['n_lines'] += 1
# classical register
if self._creg:
n_creg = self._creg.copy()
n_creg.pop(0)
idx = 0
y_off = -len(self._qreg)
for ii, (reg, nreg) in enumerate(itertools.zip_longest(
self._creg, n_creg)):
pos = y_off - idx
if self._style.bundle:
label = '${}$'.format(reg.reg.name)
self._creg_dict[ii] = {
'y': pos,
'label': label,
'index': reg.index,
'group': reg.reg
}
if not (not nreg or reg.reg != nreg.reg):
continue
else:
label = '${}_{{{}}}$'.format(reg.reg.name, reg.index)
self._creg_dict[ii] = {
'y': pos,
'label': label,
'index': reg.index,
'group': reg.reg
}
self._cond['n_lines'] += 1
idx += 1
def _draw_regs_sub(self, n_fold, feedline_l=False, feedline_r=False):
# quantum register
for qreg in self._qreg_dict.values():
if n_fold == 0:
label = qreg['label'] + ' : $\\left|0\\right\\rangle$'
else:
label = qreg['label']
y = qreg['y'] - n_fold * (self._cond['n_lines'] + 1)
self.ax.text(-0.5, y, label, ha='right', va='center',
fontsize=self._style.fs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
self._line([0, y], [self._cond['xmax'], y])
# classical register
this_creg_dict = {}
for creg in self._creg_dict.values():
if n_fold == 0:
label = creg['label'] + ' : 0 '
else:
label = creg['label']
y = creg['y'] - n_fold * (self._cond['n_lines'] + 1)
if y not in this_creg_dict.keys():
this_creg_dict[y] = {'val': 1, 'label': label}
else:
this_creg_dict[y]['val'] += 1
for y, this_creg in this_creg_dict.items():
# bundle
if this_creg['val'] > 1:
self.ax.plot([.6, .7], [y - .1, y + .1],
color=self._style.cc,
zorder=PORDER_LINE)
self.ax.text(0.5, y + .1, str(this_creg['val']), ha='left',
va='bottom',
fontsize=0.8 * self._style.fs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
self.ax.text(-0.5, y, this_creg['label'], ha='right', va='center',
fontsize=self._style.fs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
self._line([0, y], [self._cond['xmax'], y], lc=self._style.cc,
ls=self._style.cline)
# lf line
if feedline_r:
self._linefeed_mark((self._style.fold + 1 - 0.1,
- n_fold * (self._cond['n_lines'] + 1)))
if feedline_l:
self._linefeed_mark((0.1,
- n_fold * (self._cond['n_lines'] + 1)))
def _draw_ops(self, verbose=False):
_wide_gate = 'u2 u3 cu2 cu3'.split()
_barriers = {'coord': [], 'group': []}
next_ops = self._ops.copy()
if next_ops:
next_ops.pop(0)
this_anc = 0
#
# generate coordinate manager
#
q_anchors = {}
for key, qreg in self._qreg_dict.items():
q_anchors[key] = Anchor(reg_num=self._cond['n_lines'],
yind=qreg['y'],
fold=self._style.fold)
c_anchors = {}
for key, creg in self._creg_dict.items():
c_anchors[key] = Anchor(reg_num=self._cond['n_lines'],
yind=creg['y'],
fold=self._style.fold)
#
# draw gates
#
prev_width = 0
for layer_no, layer in enumerate(self._ops):
layer_width = 1
for op in layer:
if op['name'] in _wide_gate:
layer_width = 2
for op in layer:
_iswide = op['name'] in _wide_gate
# get qreg index
if 'qargs' in op.keys():
q_idxs = []
for qarg in op['qargs']:
for index, reg in self._qreg_dict.items():
if (reg['group'] == qarg[0] and
reg['index'] == qarg[1]):
q_idxs.append(index)
break
else:
q_idxs = []
# get creg index
if 'cargs' in op.keys():
c_idxs = []
for carg in op['cargs']:
for index, reg in self._creg_dict.items():
if (reg['group'] == carg[0] and
reg['index'] == carg[1]):
c_idxs.append(index)
break
else:
c_idxs = []
this_anc = layer_no + prev_width
occupied = q_idxs
q_list = [ii for ii in range(min(occupied),
max(occupied) + 1)]
locs = [q_anchors[jj].is_locatable(
this_anc, layer_width) for jj in q_list]
if all(locs):
for ii in q_list:
if op['name'] in ['barrier', 'snapshot', 'load', 'save', 'noise'] \
and not self.plot_barriers:
q_anchors[ii].set_index(this_anc - 1, layer_width)
else:
q_anchors[ii].set_index(this_anc, layer_width)
# qreg coordinate
q_xy = [q_anchors[ii].plot_coord(this_anc, layer_width) for ii in q_idxs]
# creg coordinate
c_xy = [c_anchors[ii].plot_coord(this_anc, layer_width) for ii in c_idxs]
# bottom and top point of qreg
qreg_b = min(q_xy, key=lambda xy: xy[1])
qreg_t = max(q_xy, key=lambda xy: xy[1])
if verbose:
print(op)
if 'op' in op.keys() and hasattr(op['op'], 'param'):
param = self.param_parse(op['op'].params, self._style.pimode)
else:
param = None
# conditional gate
if 'condition' in op.keys() and op['condition']:
c_xy = [c_anchors[ii].plot_coord(this_anc, layer_width) for
ii in self._creg_dict]
mask = 0
for index, cbit in enumerate(self._creg):
if cbit.reg == op['condition'][0]:
mask |= (1 << index)
val = op['condition'][1]
# cbit list to consider
fmt_c = '{{:0{}b}}'.format(len(c_xy))
cmask = list(fmt_c.format(mask))[::-1]
# value
fmt_v = '{{:0{}b}}'.format(cmask.count('1'))
vlist = list(fmt_v.format(val))[::-1]
# plot conditionals
v_ind = 0
xy_plot = []
for xy, m in zip(c_xy, cmask):
if m == '1':
if xy not in xy_plot:
if vlist[v_ind] == '1' or self._style.bundle:
self._conds(xy, istrue=True)
else:
self._conds(xy, istrue=False)
xy_plot.append(xy)
v_ind += 1
creg_b = sorted(xy_plot, key=lambda xy: xy[1])[0]
self._subtext(creg_b, hex(val))
self._line(qreg_t, creg_b, lc=self._style.cc,
ls=self._style.cline)
#
# draw special gates
#
if op['name'] == 'measure':
vv = self._creg_dict[c_idxs[0]]['index']
self._measure(q_xy[0], c_xy[0], vv)
elif op['name'] in ['barrier', 'snapshot', 'load', 'save',
'noise']:
_barriers = {'coord': [], 'group': []}
for index, qbit in enumerate(q_idxs):
q_group = self._qreg_dict[qbit]['group']
if q_group not in _barriers['group']:
_barriers['group'].append(q_group)
_barriers['coord'].append(q_xy[index])
if self.plot_barriers:
self._barrier(_barriers, this_anc)
#
# draw single qubit gates
#
elif len(q_xy) == 1:
disp = op['name']
if param:
self._gate(q_xy[0], wide=_iswide, text=disp,
subtext='{}'.format(param))
else:
self._gate(q_xy[0], wide=_iswide, text=disp)
#
# draw multi-qubit gates (n=2)
#
elif len(q_xy) == 2:
# cx
if op['name'] in ['cx']:
self._ctrl_qubit(q_xy[0])
self._tgt_qubit(q_xy[1])
# cz for latexmode
elif op['name'] == 'cz':
if self._style.latexmode:
self._ctrl_qubit(q_xy[0])
self._ctrl_qubit(q_xy[1])
else:
disp = op['name'].replace('c', '')
self._ctrl_qubit(q_xy[0])
self._gate(q_xy[1], wide=_iswide, text=disp)
# control gate
elif op['name'] in ['cy', 'ch', 'cu3', 'crz']:
disp = op['name'].replace('c', '')
self._ctrl_qubit(q_xy[0])
if param:
self._gate(q_xy[1], wide=_iswide, text=disp,
subtext='{}'.format(param))
else:
self._gate(q_xy[1], wide=_iswide, text=disp)
# cu1 for latexmode
elif op['name'] in ['cu1']:
disp = op['name'].replace('c', '')
self._ctrl_qubit(q_xy[0])
if self._style.latexmode:
self._ctrl_qubit(q_xy[1])
self._subtext(qreg_b, param)
else:
self._gate(q_xy[1], wide=_iswide, text=disp,
subtext='{}'.format(param))
# swap gate
elif op['name'] == 'swap':
self._swap(q_xy[0])
self._swap(q_xy[1])
# add qubit-qubit wiring
self._line(qreg_b, qreg_t)
#
# draw multi-qubit gates (n=3)
#
elif len(q_xy) == 3:
# cswap gate
if op['name'] == 'cswap':
self._ctrl_qubit(q_xy[0])
self._swap(q_xy[1])
self._swap(q_xy[2])
# ccx gate
elif op['name'] == 'ccx':
self._ctrl_qubit(q_xy[0])
self._ctrl_qubit(q_xy[1])
self._tgt_qubit(q_xy[2])
# add qubit-qubit wiring
self._line(qreg_b, qreg_t)
else:
logger.critical('Invalid gate %s', op)
raise exceptions.VisualizationError('invalid gate {}'.format(op))
prev_width = layer_width - 1
#
# adjust window size and draw horizontal lines
#
anchors = [q_anchors[ii].get_index() for ii in self._qreg_dict]
if anchors:
max_anc = max(anchors)
else:
max_anc = 0
n_fold = max(0, max_anc - 1) // self._style.fold
# window size
if max_anc > self._style.fold > 0:
self._cond['xmax'] = self._style.fold + 1
self._cond['ymax'] = (n_fold + 1) * (self._cond['n_lines'] + 1) - 1
else:
self._cond['xmax'] = max_anc + 1
self._cond['ymax'] = self._cond['n_lines']
# add horizontal lines
for ii in range(n_fold + 1):
feedline_r = (n_fold > 0 and n_fold > ii)
feedline_l = (ii > 0)
self._draw_regs_sub(ii, feedline_l, feedline_r)
# draw gate number
if self._style.index:
for ii in range(max_anc):
if self._style.fold > 0:
x_coord = ii % self._style.fold + 1
y_coord = - (ii // self._style.fold) * (self._cond['n_lines'] + 1) + 0.7
else:
x_coord = ii + 1
y_coord = 0.7
self.ax.text(x_coord, y_coord, str(ii + 1), ha='center',
va='center', fontsize=self._style.sfs,
color=self._style.tc, clip_on=True,
zorder=PORDER_TEXT)
@staticmethod
def param_parse(v, pimode=False):
for i, e in enumerate(v):
if pimode:
v[i] = MatplotlibDrawer.format_pi(e)
else:
v[i] = MatplotlibDrawer.format_numeric(e)
if v[i].startswith('-'):
v[i] = '$-$' + v[i][1:]
param = ', '.join(v)
return param
@staticmethod
def format_pi(val):
fracvals = MatplotlibDrawer.fraction(val)
buf = ''
if fracvals:
nmr, dnm = fracvals.numerator, fracvals.denominator
if nmr == 1:
buf += '$\\pi$'
elif nmr == -1:
buf += '-$\\pi$'
else:
buf += '{}$\\pi$'.format(nmr)
if dnm > 1:
buf += '/{}'.format(dnm)
return buf
else:
coef = MatplotlibDrawer.format_numeric(val / np.pi)
if coef == '0':
return '0'
return '{}$\\pi$'.format(coef)
@staticmethod
def format_numeric(val, tol=1e-5):
abs_val = abs(val)
if math.isclose(abs_val, 0.0, abs_tol=1e-100):
return '0'
if math.isclose(math.fmod(abs_val, 1.0),
0.0, abs_tol=tol) and 0.5 < abs_val < 9999.5:
return str(int(val))
if 0.1 <= abs_val < 100.0:
return '{:.2f}'.format(val)
return '{:.1e}'.format(val)
@staticmethod
def fraction(val, base=np.pi, n=100, tol=1e-5):
abs_val = abs(val)
for i in range(1, n):
for j in range(1, n):
if math.isclose(abs_val, i / j * base, rel_tol=tol):
if val < 0:
i *= -1
return fractions.Fraction(i, j)
return None
| 38.593377 | 95 | 0.445055 | [
"Apache-2.0"
] | rabaniten/qiskit-terra | qiskit/tools/visualization/_matplotlib.py | 29,138 | Python |
# Copyright 2016-2018 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, unicode_literals, absolute_import, division
from future import standard_library
standard_library.install_aliases()
from future.builtins import *
import os as _os
PACKAGE_PATH = _os.path.dirname(_os.path.abspath(__file__))
from pyccc.exceptions import *
from pyccc.job import *
from pyccc.python import *
from pyccc.engines import *
from pyccc.ui import *
from pyccc.files import *
# Package metadata
from pyccc import _version
__version__ = _version.get_versions()['version']
__copyright__ = "Copyright 2016-2018 Autodesk Inc."
__license__ = "Apache 2.0"
| 33.714286 | 82 | 0.783051 | [
"Apache-2.0"
] | Autodesk/py-cloud-compute-cannon | pyccc/__init__.py | 1,180 | Python |
from math import sqrt
# Example script demonstrating conversion of if statements
x = True
if x:
print("X was true")
a = 3
b = 4.5
if b > a:
print("B was greater than a")
elif a > b:
print("A was greater than a")
else:
print("They are equal")
# Nested ifs are supported
if True:
if b < a:
if b < 0:
print("b is negative")
# Conditional supports function calls
if sqrt(b) > a:
print("Square Root B was greater than a")
# Cannot handle certain python calls such as is and in
if a is b:
print("a is b")
# Lists currently not supported during translation
l = [1, 2, 3]
if a in l:
print("A is in l")
| 17.675676 | 58 | 0.62844 | [
"MIT"
] | AndrewKahr/pyplus | examples/example_if.py | 654 | Python |
"""
Copyright 2019 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: [email protected]
"""
import datetime
import logging
from typing import Any, Dict, List, Optional, Tuple, cast
from inmanta import data, util
from inmanta.const import ParameterSource
from inmanta.data.model import ResourceIdStr
from inmanta.protocol import handle, methods
from inmanta.protocol.common import attach_warnings
from inmanta.server import SLICE_AGENT_MANAGER, SLICE_DATABASE, SLICE_PARAM, SLICE_SERVER, SLICE_TRANSPORT
from inmanta.server import config as opt
from inmanta.server import protocol
from inmanta.server.agentmanager import AgentManager
from inmanta.server.server import Server
from inmanta.types import Apireturn, JsonType
LOGGER = logging.getLogger(__name__)
class ParameterService(protocol.ServerSlice):
"""Slice for parameter management"""
server_slice: Server
agentmanager: AgentManager
def __init__(self) -> None:
super(ParameterService, self).__init__(SLICE_PARAM)
self._fact_expire = opt.server_fact_expire.get()
self._fact_renew = opt.server_fact_renew.get()
def get_dependencies(self) -> List[str]:
return [SLICE_SERVER, SLICE_DATABASE, SLICE_AGENT_MANAGER]
def get_depended_by(self) -> List[str]:
return [SLICE_TRANSPORT]
async def prestart(self, server: protocol.Server) -> None:
await super().prestart(server)
self.server_slice = cast(Server, server.get_slice(SLICE_SERVER))
self.agentmanager = cast(AgentManager, server.get_slice(SLICE_AGENT_MANAGER))
async def start(self) -> None:
self.schedule(self.renew_expired_facts, self._fact_renew)
await super().start()
async def renew_expired_facts(self) -> None:
"""
Send out requests to renew expired facts
"""
LOGGER.info("Renewing expired parameters")
updated_before = datetime.datetime.now().astimezone() - datetime.timedelta(0, (self._fact_expire - self._fact_renew))
expired_params = await data.Parameter.get_updated_before(updated_before)
LOGGER.debug("Renewing %d expired parameters" % len(expired_params))
for param in expired_params:
if param.environment is None:
LOGGER.warning(
"Found parameter without environment (%s for resource %s). Deleting it.", param.name, param.resource_id
)
await param.delete()
else:
LOGGER.debug(
"Requesting new parameter value for %s of resource %s in env %s",
param.name,
param.resource_id,
param.environment,
)
await self.agentmanager.request_parameter(param.environment, param.resource_id)
unknown_parameters = await data.UnknownParameter.get_list(resolved=False)
for u in unknown_parameters:
if u.environment is None:
LOGGER.warning(
"Found unknown parameter without environment (%s for resource %s). Deleting it.", u.name, u.resource_id
)
await u.delete()
else:
LOGGER.debug("Requesting value for unknown parameter %s of resource %s in env %s", u.name, u.resource_id, u.id)
await self.agentmanager.request_parameter(u.environment, u.resource_id)
LOGGER.info("Done renewing expired parameters")
@handle(methods.get_param, param_id="id", env="tid")
async def get_param(self, env: data.Environment, param_id: str, resource_id: Optional[str] = None) -> Apireturn:
if resource_id is None:
params = await data.Parameter.get_list(environment=env.id, name=param_id)
else:
params = await data.Parameter.get_list(environment=env.id, name=param_id, resource_id=resource_id)
if len(params) == 0:
if resource_id is not None:
out = await self.agentmanager.request_parameter(env.id, resource_id)
return out
return 404
param = params[0]
# check if it was expired
now = datetime.datetime.now().astimezone()
if resource_id is None or (param.updated + datetime.timedelta(0, self._fact_expire)) > now:
return 200, {"parameter": params[0]}
LOGGER.info("Parameter %s of resource %s expired.", param_id, resource_id)
out = await self.agentmanager.request_parameter(env.id, resource_id)
return out
async def _update_param(
self,
env: data.Environment,
name: str,
value: str,
source: str,
resource_id: str,
metadata: JsonType,
recompile: bool = False,
) -> bool:
"""
Update or set a parameter.
This method returns true if:
- this update resolves an unknown
- recompile is true and the parameter updates an existing parameter to a new value
"""
LOGGER.debug("Updating/setting parameter %s in env %s (for resource %s)", name, env.id, resource_id)
if not isinstance(value, str):
value = str(value)
if resource_id is None:
resource_id = ""
params = await data.Parameter.get_list(environment=env.id, name=name, resource_id=resource_id)
value_updated = True
if len(params) == 0:
param = data.Parameter(
environment=env.id,
name=name,
resource_id=resource_id,
value=value,
source=source,
updated=datetime.datetime.now().astimezone(),
metadata=metadata,
)
await param.insert()
else:
param = params[0]
value_updated = param.value != value
await param.update(source=source, value=value, updated=datetime.datetime.now().astimezone(), metadata=metadata)
# check if the parameter is an unknown
unknown_params = await data.UnknownParameter.get_list(
environment=env.id, name=name, resource_id=resource_id, resolved=False
)
if len(unknown_params) > 0:
LOGGER.info(
"Received values for unknown parameters %s, triggering a recompile", ", ".join([x.name for x in unknown_params])
)
for p in unknown_params:
await p.update_fields(resolved=True)
return True
return recompile and value_updated
@handle(methods.set_param, param_id="id", env="tid")
async def set_param(
self,
env: data.Environment,
param_id: str,
source: ParameterSource,
value: str,
resource_id: str,
metadata: JsonType,
recompile: bool,
) -> Apireturn:
result = await self._update_param(env, param_id, value, source, resource_id, metadata, recompile)
warnings = None
if result:
compile_metadata = {
"message": "Recompile model because one or more parameters were updated",
"type": "param",
"params": [(param_id, resource_id)],
}
warnings = await self.server_slice._async_recompile(env, False, metadata=compile_metadata)
if resource_id is None:
resource_id = ""
params = await data.Parameter.get_list(environment=env.id, name=param_id, resource_id=resource_id)
return attach_warnings(200, {"parameter": params[0]}, warnings)
@handle(methods.set_parameters, env="tid")
async def set_parameters(self, env: data.Environment, parameters: List[Dict[str, Any]]) -> Apireturn:
recompile = False
params: List[Tuple[str, ResourceIdStr]] = []
for param in parameters:
name: str = param["id"]
source = param["source"]
value = param["value"] if "value" in param else None
resource_id: ResourceIdStr = param["resource_id"] if "resource_id" in param else None
metadata = param["metadata"] if "metadata" in param else None
result = await self._update_param(env, name, value, source, resource_id, metadata)
if result:
recompile = True
params.append((name, resource_id))
compile_metadata = {
"message": "Recompile model because one or more parameters were updated",
"type": "param",
"params": params,
}
warnings = None
if recompile:
warnings = await self.server_slice._async_recompile(env, False, metadata=compile_metadata)
return attach_warnings(200, None, warnings)
@handle(methods.delete_param, env="tid", parameter_name="id")
async def delete_param(self, env: data.Environment, parameter_name: str, resource_id: str) -> Apireturn:
if resource_id is None:
params = await data.Parameter.get_list(environment=env.id, name=parameter_name)
else:
params = await data.Parameter.get_list(environment=env.id, name=parameter_name, resource_id=resource_id)
if len(params) == 0:
return 404
param = params[0]
await param.delete()
metadata = {
"message": "Recompile model because one or more parameters were deleted",
"type": "param",
"params": [(param.name, param.resource_id)],
}
warnings = await self.server_slice._async_recompile(env, False, metadata=metadata)
return attach_warnings(200, None, warnings)
@handle(methods.list_params, env="tid")
async def list_params(self, env: data.Environment, query: Dict[str, str]) -> Apireturn:
params = await data.Parameter.list_parameters(env.id, **query)
return (
200,
{
"parameters": params,
"expire": self._fact_expire,
# Return datetime in UTC without explicit timezone offset
"now": util.datetime_utc_isoformat(datetime.datetime.now()),
},
)
| 38.730909 | 128 | 0.629049 | [
"Apache-2.0"
] | inmanta/inmanta-core | src/inmanta/server/services/paramservice.py | 10,651 | Python |
# some utils taken from the DeepXplore Implementation
import random
from collections import defaultdict
import numpy as np
from keras import backend as K
from keras.models import Model
from keras.preprocessing import image
from keras import models, layers, activations
from scipy.spatial.distance import mahalanobis
from numpy.linalg import inv
from itertools import combinations
#loads a mnist image
def preprocess_image(img_path):
img = image.load_img(img_path, target_size=(28, 28), grayscale=True)
input_img_data = image.img_to_array(img)
input_img_data = input_img_data.reshape(1, 28, 28, 1)
input_img_data = input_img_data.astype('float32')
input_img_data /= 255
# input_img_data = preprocess_input(input_img_data) # final input shape = (1,224,224,3)
return input_img_data
def init_neuron_cov_dict(model, model_layer_dict):
for layer in model.layers:
if 'flatten' in layer.name or 'input' in layer.name:
continue
for index in range(layer.output_shape[-1]):
model_layer_dict[(layer.name, index)] = False
def neuron_to_cover(model_layer_dict):
not_covered = [(layer_name, index) for (layer_name, index), v in model_layer_dict.items() if not v]
if not_covered:
layer_name, index = random.choice(not_covered)
else:
layer_name, index = random.choice(model_layer_dict.keys())
return layer_name, index
def get_neuron_coverage(model_layer_dict):
covered_neurons = len([v for v in model_layer_dict.values() if v])
total_neurons = len(model_layer_dict)
return covered_neurons, total_neurons, covered_neurons / float(total_neurons)
def update_neuron_coverage(input_data, model, model_layer_dict, threshold=0):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
intermediate_layer_model = Model(inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_outputs = intermediate_layer_model.predict(input_data)
for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
scaled = scale(intermediate_layer_output[0])
for num_neuron in range(scaled.shape[-1]):
if np.mean(scaled[..., num_neuron]) > threshold and not model_layer_dict[(layer_names[i], num_neuron)]:
model_layer_dict[(layer_names[i], num_neuron)] = True
print("new coverage found")
#To test
#gets the distance of the points in standard deviations
#note that it assumes that the points are normally distributed
def distance(point, mean, covarianceMatrix):
return mahalanobis(point, mean, inv(covarianceMatrix))
# an adaptation of some code from deepXplore
# initializes a dictionary that will store which qudrants have been covered
# model - the model we are looking to covered
# layer_index - the layer we are exploring
# group_size - size of the group of neurons we are analyzing
# model_layer_dict - the object we want to initialize
def init_orthant_cov_dict(model, layer_index, group_size, model_layer_dict):
layer = model.layers[layer_index]
# some error handling
if 'flatten' in layer.name or 'input' in layer.name:
print("error in init_dict: layer_index points to the wrong layer")
# we initialize each combination
for neuron_group in combinations(range(layer.output_shape[-1]), group_size): # layer.output_shape[-1] returns the number of total_neurons
for orthant in range(2^group_size-1):
model_layer_dict[(neuron_group, orthant)] = False
def get_orthant_coverage(model_layer_dict):
covered_orthants = len([v for v in model_layer_dict.values() if v])
total_orthants = len(model_layer_dict)
return covered_orthants, total_orthants, covered_orthants / float(total_orthants)
#this is meant to pick a orthant that is not covered
# we actually don't need to use this just yet, maybe if I decide to implement for DeepXplore
def next_orthant_to_cover(model_layer_dict):
not_covered = [(neuron_group, orthant) for (neuron_group, orthant), v in model_layer_dict.items() if not v]
if not_covered:
neuron_group, orthant = random.choice(not_covered)
else:
neuron_group, orthant = random.choice(model_layer_dict.keys())
return neuron_group, orthant
# creates a shortened model that ends at the nth layer, and has no activation function
# same code as from collect_data
def create_shortened_model(model, layer_depth):
# we get the neuron output for the penultimate layer for each neuron
# implemented with help from the suggestion at: https://stackoverflow.com/questions/45492318/keras-retrieve-value-of-node-before-activation-function
# we recreate the model, delete layers up to and including the layer we want to analyze, add a blank layer with no activation, and then import the old weights to this layer.
#make a new model
# some simple input checks
if(layer_depth < 0):
println ('layer depth must be positive!')
sys.exit()
if(layer_depth > len(model.layers)):
println ('layer depth too large!')
sys.exit()
# save the original weights
wgts = model.layers[layer_depth].get_weights()
nthLayerNeurons = model.layers[layer_depth].output_shape[1]
#remove layers up to the nth layer
for i in range(len(model.layers)-layer_depth):
model.pop()
model.summary
# add new layer with no activation
model.add(layers.Dense(nthLayerNeurons,activation = None))
# with the new layer, load the previous weights
model.layers[layer_depth].set_weights(wgts)
# get the output of this new model.
return Model(inputs=model.input, outputs=model.layers[layer_depth].output )
#this code updates the coverage given a certain input
def update_orthant_coverage(input_data, shortened_model, model_layer_dict, mean_vector, covariance_matrix, group_size=1, sd_threshold=1):
layer_outputs = shortened_model.predict(input_data) #get the output
# the reason that we use layer_outputs[0] is change it into a single row, rather than an array with a row.
for neuron_group in combinations(range(layer_outputs.shape[-1]),group_size):
group_output = np.asarray([layer_outputs[0][i] for i in neuron_group]) #get a list of the outputs
# we do binary addition to get the correct orthant index.
# for example, if we only have a 2 variables, we have 4 quadrants. we need to classify into 0,1,2,3 index
#init the tools to find which orthant is being explored
orthant = 0
add = int(1)
for neuron_index in neuron_group:
if layer_outputs[0][neuron_index] > mean_vector[neuron_index]:
orthant += add
add *= 2
if model_layer_dict[(neuron_group,orthant)] == True:
continue #don't do the expensive action of loading the group cov, group mean, and calculating the distance
group_mean = np.asarray([mean_vector[i] for i in neuron_group]) #list of mean
#initialize the group numpy array for later calculation
group_cov_matrix = np.asarray([[covariance_matrix[j][i] for i in neuron_group] for j in neuron_group]) #dont ask me why
if(distance(group_output, group_mean, group_cov_matrix)>sd_threshold):
model_layer_dict[(neuron_group,orthant)] = True
# just a simple check if we have full coverage works for any coverage
def full_coverage(model_layer_dict):
if False in model_layer_dict.values():
return False
return True
# from here on is code from deepxplore
# util function to convert a tensor into a valid image
def deprocess_image(x):
x *= 255
x = np.clip(x, 0, 255).astype('uint8')
return x.reshape(x.shape[1], x.shape[2]) # original shape (1,img_rows, img_cols,1)
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
def constraint_occl(gradients, start_point, rect_shape):
new_grads = np.zeros_like(gradients)
new_grads[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]] = gradients[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]]
return new_grads
def constraint_light(gradients):
new_grads = np.ones_like(gradients)
grad_mean = np.mean(gradients)
return grad_mean * new_grads
def constraint_black(gradients, rect_shape=(6, 6)):
start_point = (
random.randint(0, gradients.shape[1] - rect_shape[0]), random.randint(0, gradients.shape[2] - rect_shape[1]))
new_grads = np.zeros_like(gradients)
patch = gradients[:, start_point[0]:start_point[0] + rect_shape[0], start_point[1]:start_point[1] + rect_shape[1]]
if np.mean(patch) < 0:
new_grads[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]] = -np.ones_like(patch)
return new_grads
def init_coverage_tables(model1, model1_layer_index, model2, model2_layer_index, model3, model3_layer_index, group_size = 1):
model_layer_dict1 = defaultdict(bool)
model_layer_dict2 = defaultdict(bool)
model_layer_dict3 = defaultdict(bool)
init_dict(model1, model1_layer_index, group_size, model_layer_dict1)
init_dict(model2, model2_layer_index, group_size, model_layer_dict2)
init_dict(model3, model3_layer_index, group_size, model_layer_dict3)
return model_layer_dict1, model_layer_dict2, model_layer_dict3
def init_neuron_coverage_table(model1):
model_layer_dict1 = defaultdict(bool)
init_neuron_cov_dict(model1, model_layer_dict1)
return model_layer_dict1
def init_orthant_coverage_table(model1, layer_index, group_size):
model_layer_dict1 = defaultdict(bool)
init_orthant_cov_dict(model1, layer_index, group_size, model_layer_dict1)
return model_layer_dict1
def scale(intermediate_layer_output, rmax=1, rmin=0):
X_std = (intermediate_layer_output - intermediate_layer_output.min()) / (
intermediate_layer_output.max() - intermediate_layer_output.min())
X_scaled = X_std * (rmax - rmin) + rmin
return X_scaled
def fired(model, layer_name, index, input_data, threshold=0):
intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
intermediate_layer_output = intermediate_layer_model.predict(input_data)[0]
scaled = scale(intermediate_layer_output)
if np.mean(scaled[..., index]) > threshold:
return True
return False
def diverged(predictions1, predictions2, predictions3, target):
# if predictions2 == predictions3 == target and predictions1 != target:
if not predictions1 == predictions2 == predictions3:
return True
return False
| 42.521401 | 177 | 0.726116 | [
"MIT"
] | j-chan-hkust/deep_testing_of_advanced_learning_systems | 4_Coverage_Evaluation/CIFAR10/utils.py | 10,928 | Python |
"""Create portable serialized representations of Python objects.
See module cPickle for a (much) faster implementation.
See module copy_reg for a mechanism for registering custom picklers.
See module pickletools source for extensive comments.
Classes:
Pickler
Unpickler
Functions:
dump(object, file)
dumps(object) -> string
load(file) -> object
loads(string) -> object
Misc variables:
__version__
format_version
compatible_formats
"""
__version__ = "$Revision: 38432 $" # Code version
from types import *
from copy_reg import dispatch_table
from copy_reg import _extension_registry, _inverted_registry, _extension_cache
import marshal
import sys
import struct
__all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler",
"Unpickler", "dump", "dumps", "load", "loads"]
# These are purely informational; no code uses these.
format_version = "2.0" # File format version we write
compatible_formats = ["1.0", # Original protocol 0
"1.1", # Protocol 0 with INST added
"1.2", # Original protocol 1
"1.3", # Protocol 1 with BINFLOAT added
"2.0", # Protocol 2
] # Old format versions we can read
# Keep in synch with cPickle. This is the highest protocol number we
# know how to read.
HIGHEST_PROTOCOL = 2
# Why use struct.pack() for pickling but marshal.loads() for
# unpickling? struct.pack() is 40% faster than marshal.dumps(), but
# marshal.loads() is twice as fast as struct.unpack()!
mloads = marshal.loads
class PickleError(Exception):
"""A common base class for the other pickling exceptions."""
pass
class PicklingError(PickleError):
"""This exception is raised when an unpicklable object is passed to the
dump() method.
"""
pass
class UnpicklingError(PickleError):
"""This exception is raised when there is a problem unpickling an object,
such as a security violation.
Note that other exceptions may also be raised during unpickling, including
(but not necessarily limited to) AttributeError, EOFError, ImportError,
and IndexError.
"""
pass
# An instance of _Stop is raised by Unpickler.load_stop() in response to
# the STOP opcode, passing the object that is the result of unpickling.
class _Stop(Exception):
def __init__(self, value):
self.value = value
# Jython has PyStringMap; it's a dict subclass with string keys
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
# UnicodeType may or may not be exported (normally imported from types)
try:
UnicodeType
except NameError:
UnicodeType = None
# Pickle opcodes. See pickletools.py for extensive docs. The listing
# here is in kind-of alphabetical order of 1-character pickle code.
# pickletools groups them by purpose.
MARK = '(' # push special markobject on stack
STOP = '.' # every pickle ends with STOP
POP = '0' # discard topmost stack item
POP_MARK = '1' # discard stack top through topmost markobject
DUP = '2' # duplicate top stack item
FLOAT = 'F' # push float object; decimal string argument
INT = 'I' # push integer or bool; decimal string argument
BININT = 'J' # push four-byte signed int
BININT1 = 'K' # push 1-byte unsigned int
LONG = 'L' # push long; decimal string argument
BININT2 = 'M' # push 2-byte unsigned int
NONE = 'N' # push None
PERSID = 'P' # push persistent object; id is taken from string arg
BINPERSID = 'Q' # " " " ; " " " " stack
REDUCE = 'R' # apply callable to argtuple, both on stack
STRING = 'S' # push string; NL-terminated string argument
BINSTRING = 'T' # push string; counted binary string argument
SHORT_BINSTRING = 'U' # " " ; " " " " < 256 bytes
UNICODE = 'V' # push Unicode string; raw-unicode-escaped'd argument
BINUNICODE = 'X' # " " " ; counted UTF-8 string argument
APPEND = 'a' # append stack top to list below it
BUILD = 'b' # call __setstate__ or __dict__.update()
GLOBAL = 'c' # push self.find_class(modname, name); 2 string args
DICT = 'd' # build a dict from stack items
EMPTY_DICT = '}' # push empty dict
APPENDS = 'e' # extend list on stack by topmost stack slice
GET = 'g' # push item from memo on stack; index is string arg
BINGET = 'h' # " " " " " " ; " " 1-byte arg
INST = 'i' # build & push class instance
LONG_BINGET = 'j' # push item from memo on stack; index is 4-byte arg
LIST = 'l' # build list from topmost stack items
EMPTY_LIST = ']' # push empty list
OBJ = 'o' # build & push class instance
PUT = 'p' # store stack top in memo; index is string arg
BINPUT = 'q' # " " " " " ; " " 1-byte arg
LONG_BINPUT = 'r' # " " " " " ; " " 4-byte arg
SETITEM = 's' # add key+value pair to dict
TUPLE = 't' # build tuple from topmost stack items
EMPTY_TUPLE = ')' # push empty tuple
SETITEMS = 'u' # modify dict by adding topmost key+value pairs
BINFLOAT = 'G' # push float; arg is 8-byte float encoding
TRUE = 'I01\n' # not an opcode; see INT docs in pickletools.py
FALSE = 'I00\n' # not an opcode; see INT docs in pickletools.py
# Protocol 2
PROTO = '\x80' # identify pickle protocol
NEWOBJ = '\x81' # build object by applying cls.__new__ to argtuple
EXT1 = '\x82' # push object from extension registry; 1-byte index
EXT2 = '\x83' # ditto, but 2-byte index
EXT4 = '\x84' # ditto, but 4-byte index
TUPLE1 = '\x85' # build 1-tuple from stack top
TUPLE2 = '\x86' # build 2-tuple from two topmost stack items
TUPLE3 = '\x87' # build 3-tuple from three topmost stack items
NEWTRUE = '\x88' # push True
NEWFALSE = '\x89' # push False
LONG1 = '\x8a' # push long from < 256 bytes
LONG4 = '\x8b' # push really big long
_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3]
__all__.extend([x for x in dir() if x[0].isalpha() and x == x.upper()])
del x
# Pickling machinery
class Pickler:
def __init__(self, file, protocol=None):
"""This takes a file-like object for writing a pickle data stream.
The optional protocol argument tells the pickler to use the
given protocol; supported protocols are 0, 1, 2. The default
protocol is 0, to be backwards compatible. (Protocol 0 is the
only protocol that can be written to a file opened in text
mode and read back successfully. When using a protocol higher
than 0, make sure the file is opened in binary mode, both when
pickling and unpickling.)
Protocol 1 is more efficient than protocol 0; protocol 2 is
more efficient than protocol 1.
Specifying a negative protocol version selects the highest
protocol version supported. The higher the protocol used, the
more recent the version of Python needed to read the pickle
produced.
The file parameter must have a write() method that accepts a single
string argument. It can thus be an open file object, a StringIO
object, or any other custom object that meets this interface.
"""
if protocol is None:
protocol = 0
if protocol < 0:
protocol = HIGHEST_PROTOCOL
elif not 0 <= protocol <= HIGHEST_PROTOCOL:
raise ValueError("pickle protocol must be <= %d" % HIGHEST_PROTOCOL)
self.write = file.write
self.memo = {}
self.proto = int(protocol)
self.bin = protocol >= 1
self.fast = 0
def _pickle_moduledict(self, obj):
try:
modict = self.module_dict_ids
except AttributeError:
modict = {}
from sys import modules
for mod in modules.values():
if isinstance(mod, ModuleType):
try:
modict[id(mod.__dict__)] = mod
except KeyboardInterrupt:
raise
except: # obscure: the above can fail for
# arbitrary reasons, because of the py lib
pass
self.module_dict_ids = modict
thisid = id(obj)
try:
themodule = modict[thisid]
except KeyError:
return None
from __builtin__ import getattr
return getattr, (themodule, '__dict__')
def clear_memo(self):
"""Clears the pickler's "memo".
The memo is the data structure that remembers which objects the
pickler has already seen, so that shared or recursive objects are
pickled by reference and not by value. This method is useful when
re-using picklers.
"""
self.memo.clear()
def dump(self, obj):
"""Write a pickled representation of obj to the open file."""
if self.proto >= 2:
self.write(PROTO + chr(self.proto))
self.save(obj)
self.write(STOP)
def memoize(self, obj):
"""Store an object in the memo."""
# The Pickler memo is a dictionary mapping object ids to 2-tuples
# that contain the Unpickler memo key and the object being memoized.
# The memo key is written to the pickle and will become
# the key in the Unpickler's memo. The object is stored in the
# Pickler memo so that transient objects are kept alive during
# pickling.
# The use of the Unpickler memo length as the memo key is just a
# convention. The only requirement is that the memo values be unique.
# But there appears no advantage to any other scheme, and this
# scheme allows the Unpickler memo to be implemented as a plain (but
# growable) array, indexed by memo key.
if self.fast:
return
assert id(obj) not in self.memo
memo_len = len(self.memo)
self.write(self.put(memo_len))
self.memo[id(obj)] = memo_len, obj
# Return a PUT (BINPUT, LONG_BINPUT) opcode string, with argument i.
def put(self, i, pack=struct.pack):
if self.bin:
if i < 256:
return BINPUT + chr(i)
else:
return LONG_BINPUT + pack("<i", i)
return PUT + repr(i) + '\n'
# Return a GET (BINGET, LONG_BINGET) opcode string, with argument i.
def get(self, i, pack=struct.pack):
if self.bin:
if i < 256:
return BINGET + chr(i)
else:
return LONG_BINGET + pack("<i", i)
return GET + repr(i) + '\n'
def save(self, obj):
# Check for persistent id (defined by a subclass)
pid = self.persistent_id(obj)
if pid:
self.save_pers(pid)
return
# Check the memo
x = self.memo.get(id(obj))
if x:
self.write(self.get(x[0]))
return
# Check the type dispatch table
t = type(obj)
f = self.dispatch.get(t)
if f:
f(self, obj) # Call unbound method with explicit self
return
# Check for a class with a custom metaclass; treat as regular class
try:
issc = issubclass(t, TypeType)
except TypeError: # t is not a class (old Boost; see SF #502085)
issc = 0
if issc:
self.save_global(obj)
return
# Check copy_reg.dispatch_table
reduce = dispatch_table.get(t)
if reduce:
rv = reduce(obj)
else:
# Check for a __reduce_ex__ method, fall back to __reduce__
reduce = getattr(obj, "__reduce_ex__", None)
if reduce:
rv = reduce(self.proto)
else:
reduce = getattr(obj, "__reduce__", None)
if reduce:
rv = reduce()
else:
raise PicklingError("Can't pickle %r object: %r" %
(t.__name__, obj))
# Check for string returned by reduce(), meaning "save as global"
if type(rv) is StringType:
self.save_global(obj, rv)
return
# Assert that reduce() returned a tuple
if type(rv) is not TupleType:
raise PicklingError("%s must return string or tuple" % reduce)
# Assert that it returned an appropriately sized tuple
l = len(rv)
if not (2 <= l <= 5):
raise PicklingError("Tuple returned by %s must have "
"two to five elements" % reduce)
# Save the reduce() output and finally memoize the object
self.save_reduce(obj=obj, *rv)
def persistent_id(self, obj):
# This exists so a subclass can override it
return None
def save_pers(self, pid):
# Save a persistent id reference
if self.bin:
self.save(pid)
self.write(BINPERSID)
else:
self.write(PERSID + str(pid) + '\n')
def save_reduce(self, func, args, state=None,
listitems=None, dictitems=None, obj=None):
# This API is called by some subclasses
# Assert that args is a tuple or None
if not isinstance(args, TupleType):
raise PicklingError("args from reduce() should be a tuple")
# Assert that func is callable
if not callable(func):
raise PicklingError("func from reduce should be callable")
save = self.save
write = self.write
# Protocol 2 special case: if func's name is __newobj__, use NEWOBJ
if self.proto >= 2 and getattr(func, "__name__", "") == "__newobj__":
# A __reduce__ implementation can direct protocol 2 to
# use the more efficient NEWOBJ opcode, while still
# allowing protocol 0 and 1 to work normally. For this to
# work, the function returned by __reduce__ should be
# called __newobj__, and its first argument should be a
# new-style class. The implementation for __newobj__
# should be as follows, although pickle has no way to
# verify this:
#
# def __newobj__(cls, *args):
# return cls.__new__(cls, *args)
#
# Protocols 0 and 1 will pickle a reference to __newobj__,
# while protocol 2 (and above) will pickle a reference to
# cls, the remaining args tuple, and the NEWOBJ code,
# which calls cls.__new__(cls, *args) at unpickling time
# (see load_newobj below). If __reduce__ returns a
# three-tuple, the state from the third tuple item will be
# pickled regardless of the protocol, calling __setstate__
# at unpickling time (see load_build below).
#
# Note that no standard __newobj__ implementation exists;
# you have to provide your own. This is to enforce
# compatibility with Python 2.2 (pickles written using
# protocol 0 or 1 in Python 2.3 should be unpicklable by
# Python 2.2).
cls = args[0]
if not hasattr(cls, "__new__"):
raise PicklingError(
"args[0] from __newobj__ args has no __new__")
if obj is not None and cls is not obj.__class__:
raise PicklingError(
"args[0] from __newobj__ args has the wrong class")
args = args[1:]
save(cls)
save(args)
write(NEWOBJ)
else:
save(func)
save(args)
write(REDUCE)
if obj is not None:
self.memoize(obj)
# More new special cases (that work with older protocols as
# well): when __reduce__ returns a tuple with 4 or 5 items,
# the 4th and 5th item should be iterators that provide list
# items and dict items (as (key, value) tuples), or None.
if listitems is not None:
self._batch_appends(listitems)
if dictitems is not None:
self._batch_setitems(dictitems)
if state is not None:
save(state)
write(BUILD)
# Methods below this point are dispatched through the dispatch table
dispatch = {}
def save_none(self, obj):
self.write(NONE)
dispatch[NoneType] = save_none
def save_bool(self, obj):
if self.proto >= 2:
self.write(obj and NEWTRUE or NEWFALSE)
else:
self.write(obj and TRUE or FALSE)
dispatch[bool] = save_bool
def save_int(self, obj, pack=struct.pack):
if self.bin:
# If the int is small enough to fit in a signed 4-byte 2's-comp
# format, we can store it more efficiently than the general
# case.
# First one- and two-byte unsigned ints:
if obj >= 0:
if obj <= 0xff:
self.write(BININT1 + chr(obj))
return
if obj <= 0xffff:
self.write("%c%c%c" % (BININT2, obj&0xff, obj>>8))
return
# Next check for 4-byte signed ints:
high_bits = obj >> 31 # note that Python shift sign-extends
if high_bits == 0 or high_bits == -1:
# All high bits are copies of bit 2**31, so the value
# fits in a 4-byte signed int.
self.write(BININT + pack("<i", obj))
return
# Text pickle, or int too big to fit in signed 4-byte format.
self.write(INT + repr(obj) + '\n')
dispatch[IntType] = save_int
def save_long(self, obj, pack=struct.pack):
if self.proto >= 2:
bytes = encode_long(obj)
n = len(bytes)
if n < 256:
self.write(LONG1 + chr(n) + bytes)
else:
self.write(LONG4 + pack("<i", n) + bytes)
return
self.write(LONG + repr(obj) + '\n')
dispatch[LongType] = save_long
def save_float(self, obj, pack=struct.pack):
if self.bin:
self.write(BINFLOAT + pack('>d', obj))
else:
self.write(FLOAT + repr(obj) + '\n')
dispatch[FloatType] = save_float
def save_string(self, obj, pack=struct.pack):
if self.bin:
n = len(obj)
if n < 256:
self.write(SHORT_BINSTRING + chr(n) + obj)
else:
self.write(BINSTRING + pack("<i", n) + obj)
else:
self.write(STRING + repr(obj) + '\n')
self.memoize(obj)
dispatch[StringType] = save_string
def save_unicode(self, obj, pack=struct.pack):
if self.bin:
encoding = obj.encode('utf-8')
n = len(encoding)
self.write(BINUNICODE + pack("<i", n) + encoding)
else:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\n", "\\u000a")
self.write(UNICODE + obj.encode('raw-unicode-escape') + '\n')
self.memoize(obj)
dispatch[UnicodeType] = save_unicode
if StringType == UnicodeType:
# This is true for Jython
def save_string(self, obj, pack=struct.pack):
unicode = obj.isunicode()
if self.bin:
if unicode:
obj = obj.encode("utf-8")
l = len(obj)
if l < 256 and not unicode:
self.write(SHORT_BINSTRING + chr(l) + obj)
else:
s = pack("<i", l)
if unicode:
self.write(BINUNICODE + s + obj)
else:
self.write(BINSTRING + s + obj)
else:
if unicode:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\n", "\\u000a")
obj = obj.encode('raw-unicode-escape')
self.write(UNICODE + obj + '\n')
else:
self.write(STRING + repr(obj) + '\n')
self.memoize(obj)
dispatch[StringType] = save_string
def save_tuple(self, obj):
write = self.write
proto = self.proto
n = len(obj)
if n == 0:
if proto:
write(EMPTY_TUPLE)
else:
write(MARK + TUPLE)
return
save = self.save
memo = self.memo
if n <= 3 and proto >= 2:
for element in obj:
save(element)
# Subtle. Same as in the big comment below.
if id(obj) in memo:
get = self.get(memo[id(obj)][0])
write(POP * n + get)
else:
write(_tuplesize2code[n])
self.memoize(obj)
return
# proto 0 or proto 1 and tuple isn't empty, or proto > 1 and tuple
# has more than 3 elements.
write(MARK)
for element in obj:
save(element)
if id(obj) in memo:
# Subtle. d was not in memo when we entered save_tuple(), so
# the process of saving the tuple's elements must have saved
# the tuple itself: the tuple is recursive. The proper action
# now is to throw away everything we put on the stack, and
# simply GET the tuple (it's already constructed). This check
# could have been done in the "for element" loop instead, but
# recursive tuples are a rare thing.
get = self.get(memo[id(obj)][0])
if proto:
write(POP_MARK + get)
else: # proto 0 -- POP_MARK not available
write(POP * (n+1) + get)
return
# No recursion.
self.write(TUPLE)
self.memoize(obj)
dispatch[TupleType] = save_tuple
# save_empty_tuple() isn't used by anything in Python 2.3. However, I
# found a Pickler subclass in Zope3 that calls it, so it's not harmless
# to remove it.
def save_empty_tuple(self, obj):
self.write(EMPTY_TUPLE)
def save_list(self, obj):
write = self.write
if self.bin:
write(EMPTY_LIST)
else: # proto 0 -- can't use EMPTY_LIST
write(MARK + LIST)
self.memoize(obj)
self._batch_appends(iter(obj))
dispatch[ListType] = save_list
# Keep in synch with cPickle's BATCHSIZE. Nothing will break if it gets
# out of synch, though.
_BATCHSIZE = 1000
def _batch_appends(self, items):
# Helper to batch up APPENDS sequences
save = self.save
write = self.write
if not self.bin:
for x in items:
save(x)
write(APPEND)
return
r = xrange(self._BATCHSIZE)
while items is not None:
tmp = []
for i in r:
try:
x = items.next()
tmp.append(x)
except StopIteration:
items = None
break
n = len(tmp)
if n > 1:
write(MARK)
for x in tmp:
save(x)
write(APPENDS)
elif n:
save(tmp[0])
write(APPEND)
# else tmp is empty, and we're done
def save_dict(self, obj):
## Stackless addition BEGIN
modict_saver = self._pickle_moduledict(obj)
if modict_saver is not None:
return self.save_reduce(*modict_saver)
## Stackless addition END
write = self.write
if self.bin:
write(EMPTY_DICT)
else: # proto 0 -- can't use EMPTY_DICT
write(MARK + DICT)
self.memoize(obj)
self._batch_setitems(obj.iteritems())
dispatch[DictionaryType] = save_dict
if not PyStringMap is None:
dispatch[PyStringMap] = save_dict
def _batch_setitems(self, items):
# Helper to batch up SETITEMS sequences; proto >= 1 only
save = self.save
write = self.write
if not self.bin:
for k, v in items:
save(k)
save(v)
write(SETITEM)
return
r = xrange(self._BATCHSIZE)
while items is not None:
tmp = []
for i in r:
try:
tmp.append(items.next())
except StopIteration:
items = None
break
n = len(tmp)
if n > 1:
write(MARK)
for k, v in tmp:
save(k)
save(v)
write(SETITEMS)
elif n:
k, v = tmp[0]
save(k)
save(v)
write(SETITEM)
# else tmp is empty, and we're done
def save_inst(self, obj):
cls = obj.__class__
memo = self.memo
write = self.write
save = self.save
if hasattr(obj, '__getinitargs__'):
args = obj.__getinitargs__()
len(args) # XXX Assert it's a sequence
_keep_alive(args, memo)
else:
args = ()
write(MARK)
if self.bin:
save(cls)
for arg in args:
save(arg)
write(OBJ)
else:
for arg in args:
save(arg)
write(INST + cls.__module__ + '\n' + cls.__name__ + '\n')
self.memoize(obj)
try:
getstate = obj.__getstate__
except AttributeError:
stuff = obj.__dict__
else:
stuff = getstate()
_keep_alive(stuff, memo)
save(stuff)
write(BUILD)
dispatch[InstanceType] = save_inst
def save_global(self, obj, name=None, pack=struct.pack):
write = self.write
memo = self.memo
if name is None:
name = obj.__name__
module = getattr(obj, "__module__", None)
if module is None:
module = whichmodule(obj, name)
try:
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
except (ImportError, KeyError, AttributeError):
raise PicklingError(
"Can't pickle %r: it's not found as %s.%s" %
(obj, module, name))
else:
if klass is not obj:
raise PicklingError(
"Can't pickle %r: it's not the same object as %s.%s" %
(obj, module, name))
if self.proto >= 2:
code = _extension_registry.get((module, name))
if code:
assert code > 0
if code <= 0xff:
write(EXT1 + chr(code))
elif code <= 0xffff:
write("%c%c%c" % (EXT2, code&0xff, code>>8))
else:
write(EXT4 + pack("<i", code))
return
write(GLOBAL + module + '\n' + name + '\n')
self.memoize(obj)
def save_function(self, obj):
try:
return self.save_global(obj)
except PicklingError, e:
pass
# Check copy_reg.dispatch_table
reduce = dispatch_table.get(type(obj))
if reduce:
rv = reduce(obj)
else:
# Check for a __reduce_ex__ method, fall back to __reduce__
reduce = getattr(obj, "__reduce_ex__", None)
if reduce:
rv = reduce(self.proto)
else:
reduce = getattr(obj, "__reduce__", None)
if reduce:
rv = reduce()
else:
raise e
return self.save_reduce(obj=obj, *rv)
dispatch[ClassType] = save_global
dispatch[FunctionType] = save_function
dispatch[BuiltinFunctionType] = save_global
dispatch[TypeType] = save_global
# Pickling helpers
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
# A cache for whichmodule(), mapping a function object to the name of
# the module in which the function was found.
classmap = {} # called classmap for backwards compatibility
def whichmodule(func, funcname):
"""Figure out the module in which a function occurs.
Search sys.modules for the module.
Cache in classmap.
Return a module name.
If the function cannot be found, return "__main__".
"""
# Python functions should always get an __module__ from their globals.
mod = getattr(func, "__module__", None)
if mod is not None:
return mod
if func in classmap:
return classmap[func]
for name, module in sys.modules.items():
if module is None:
continue # skip dummy package entries
if name != '__main__' and getattr(module, funcname, None) is func:
break
else:
name = '__main__'
classmap[func] = name
return name
# Unpickling machinery
class Unpickler:
def __init__(self, file):
"""This takes a file-like object for reading a pickle data stream.
The protocol version of the pickle is detected automatically, so no
proto argument is needed.
The file-like object must have two methods, a read() method that
takes an integer argument, and a readline() method that requires no
arguments. Both methods should return a string. Thus file-like
object can be a file object opened for reading, a StringIO object,
or any other custom object that meets this interface.
"""
self.readline = file.readline
self.read = file.read
self.memo = {}
def load(self):
"""Read a pickled object representation from the open file.
Return the reconstituted object hierarchy specified in the file.
"""
self.mark = object() # any new unique object
self.stack = []
self.append = self.stack.append
read = self.read
dispatch = self.dispatch
try:
while 1:
key = read(1)
dispatch[key](self)
except _Stop, stopinst:
return stopinst.value
# Return largest index k such that self.stack[k] is self.mark.
# If the stack doesn't contain a mark, eventually raises IndexError.
# This could be sped by maintaining another stack, of indices at which
# the mark appears. For that matter, the latter stack would suffice,
# and we wouldn't need to push mark objects on self.stack at all.
# Doing so is probably a good thing, though, since if the pickle is
# corrupt (or hostile) we may get a clue from finding self.mark embedded
# in unpickled objects.
def marker(self):
stack = self.stack
mark = self.mark
k = len(stack)-1
while stack[k] is not mark: k = k-1
return k
dispatch = {}
def load_eof(self):
raise EOFError
dispatch[''] = load_eof
def load_proto(self):
proto = ord(self.read(1))
if not 0 <= proto <= 2:
raise ValueError, "unsupported pickle protocol: %d" % proto
dispatch[PROTO] = load_proto
def load_persid(self):
pid = self.readline()[:-1]
self.append(self.persistent_load(pid))
dispatch[PERSID] = load_persid
def load_binpersid(self):
pid = self.stack.pop()
self.append(self.persistent_load(pid))
dispatch[BINPERSID] = load_binpersid
def load_none(self):
self.append(None)
dispatch[NONE] = load_none
def load_false(self):
self.append(False)
dispatch[NEWFALSE] = load_false
def load_true(self):
self.append(True)
dispatch[NEWTRUE] = load_true
def load_int(self):
data = self.readline()
if data == FALSE[1:]:
val = False
elif data == TRUE[1:]:
val = True
else:
try:
val = int(data)
except ValueError:
val = long(data)
self.append(val)
dispatch[INT] = load_int
def load_binint(self):
self.append(mloads('i' + self.read(4)))
dispatch[BININT] = load_binint
def load_binint1(self):
self.append(ord(self.read(1)))
dispatch[BININT1] = load_binint1
def load_binint2(self):
self.append(mloads('i' + self.read(2) + '\000\000'))
dispatch[BININT2] = load_binint2
def load_long(self):
self.append(long(self.readline()[:-1], 0))
dispatch[LONG] = load_long
def load_long1(self):
n = ord(self.read(1))
bytes = self.read(n)
self.append(decode_long(bytes))
dispatch[LONG1] = load_long1
def load_long4(self):
n = mloads('i' + self.read(4))
bytes = self.read(n)
self.append(decode_long(bytes))
dispatch[LONG4] = load_long4
def load_float(self):
self.append(float(self.readline()[:-1]))
dispatch[FLOAT] = load_float
def load_binfloat(self, unpack=struct.unpack):
self.append(unpack('>d', self.read(8))[0])
dispatch[BINFLOAT] = load_binfloat
def load_string(self):
rep = self.readline()[:-1]
for q in "\"'": # double or single quote
if rep.startswith(q):
if not rep.endswith(q):
raise ValueError, "insecure string pickle"
rep = rep[len(q):-len(q)]
break
else:
raise ValueError, "insecure string pickle"
self.append(rep.decode("string-escape"))
dispatch[STRING] = load_string
def load_binstring(self):
len = mloads('i' + self.read(4))
self.append(self.read(len))
dispatch[BINSTRING] = load_binstring
def load_unicode(self):
self.append(unicode(self.readline()[:-1],'raw-unicode-escape'))
dispatch[UNICODE] = load_unicode
def load_binunicode(self):
len = mloads('i' + self.read(4))
self.append(unicode(self.read(len),'utf-8'))
dispatch[BINUNICODE] = load_binunicode
def load_short_binstring(self):
len = ord(self.read(1))
self.append(self.read(len))
dispatch[SHORT_BINSTRING] = load_short_binstring
def load_tuple(self):
k = self.marker()
self.stack[k:] = [tuple(self.stack[k+1:])]
dispatch[TUPLE] = load_tuple
def load_empty_tuple(self):
self.stack.append(())
dispatch[EMPTY_TUPLE] = load_empty_tuple
def load_tuple1(self):
self.stack[-1] = (self.stack[-1],)
dispatch[TUPLE1] = load_tuple1
def load_tuple2(self):
self.stack[-2:] = [(self.stack[-2], self.stack[-1])]
dispatch[TUPLE2] = load_tuple2
def load_tuple3(self):
self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])]
dispatch[TUPLE3] = load_tuple3
def load_empty_list(self):
self.stack.append([])
dispatch[EMPTY_LIST] = load_empty_list
def load_empty_dictionary(self):
self.stack.append({})
dispatch[EMPTY_DICT] = load_empty_dictionary
def load_list(self):
k = self.marker()
self.stack[k:] = [self.stack[k+1:]]
dispatch[LIST] = load_list
def load_dict(self):
k = self.marker()
d = {}
items = self.stack[k+1:]
for i in range(0, len(items), 2):
key = items[i]
value = items[i+1]
d[key] = value
self.stack[k:] = [d]
dispatch[DICT] = load_dict
# INST and OBJ differ only in how they get a class object. It's not
# only sensible to do the rest in a common routine, the two routines
# previously diverged and grew different bugs.
# klass is the class to instantiate, and k points to the topmost mark
# object, following which are the arguments for klass.__init__.
def _instantiate(self, klass, k):
args = tuple(self.stack[k+1:])
del self.stack[k:]
instantiated = 0
if (not args and
type(klass) is ClassType and
not hasattr(klass, "__getinitargs__")):
try:
value = _EmptyClass()
value.__class__ = klass
instantiated = 1
except RuntimeError:
# In restricted execution, assignment to inst.__class__ is
# prohibited
pass
if not instantiated:
try:
value = klass(*args)
except TypeError, err:
raise TypeError, "in constructor for %s: %s" % (
klass.__name__, str(err)), sys.exc_info()[2]
self.append(value)
def load_inst(self):
module = self.readline()[:-1]
name = self.readline()[:-1]
klass = self.find_class(module, name)
self._instantiate(klass, self.marker())
dispatch[INST] = load_inst
def load_obj(self):
# Stack is ... markobject classobject arg1 arg2 ...
k = self.marker()
klass = self.stack.pop(k+1)
self._instantiate(klass, k)
dispatch[OBJ] = load_obj
def load_newobj(self):
args = self.stack.pop()
cls = self.stack[-1]
obj = cls.__new__(cls, *args)
self.stack[-1] = obj
dispatch[NEWOBJ] = load_newobj
def load_global(self):
module = self.readline()[:-1]
name = self.readline()[:-1]
klass = self.find_class(module, name)
self.append(klass)
dispatch[GLOBAL] = load_global
def load_ext1(self):
code = ord(self.read(1))
self.get_extension(code)
dispatch[EXT1] = load_ext1
def load_ext2(self):
code = mloads('i' + self.read(2) + '\000\000')
self.get_extension(code)
dispatch[EXT2] = load_ext2
def load_ext4(self):
code = mloads('i' + self.read(4))
self.get_extension(code)
dispatch[EXT4] = load_ext4
def get_extension(self, code):
nil = []
obj = _extension_cache.get(code, nil)
if obj is not nil:
self.append(obj)
return
key = _inverted_registry.get(code)
if not key:
raise ValueError("unregistered extension code %d" % code)
obj = self.find_class(*key)
_extension_cache[code] = obj
self.append(obj)
def find_class(self, module, name):
# Subclasses may override this
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
return klass
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
value = func(*args)
stack[-1] = value
dispatch[REDUCE] = load_reduce
def load_pop(self):
del self.stack[-1]
dispatch[POP] = load_pop
def load_pop_mark(self):
k = self.marker()
del self.stack[k:]
dispatch[POP_MARK] = load_pop_mark
def load_dup(self):
self.append(self.stack[-1])
dispatch[DUP] = load_dup
def load_get(self):
self.append(self.memo[self.readline()[:-1]])
dispatch[GET] = load_get
def load_binget(self):
i = ord(self.read(1))
self.append(self.memo[repr(i)])
dispatch[BINGET] = load_binget
def load_long_binget(self):
i = mloads('i' + self.read(4))
self.append(self.memo[repr(i)])
dispatch[LONG_BINGET] = load_long_binget
def load_put(self):
self.memo[self.readline()[:-1]] = self.stack[-1]
dispatch[PUT] = load_put
def load_binput(self):
i = ord(self.read(1))
self.memo[repr(i)] = self.stack[-1]
dispatch[BINPUT] = load_binput
def load_long_binput(self):
i = mloads('i' + self.read(4))
self.memo[repr(i)] = self.stack[-1]
dispatch[LONG_BINPUT] = load_long_binput
def load_append(self):
stack = self.stack
value = stack.pop()
list = stack[-1]
list.append(value)
dispatch[APPEND] = load_append
def load_appends(self):
stack = self.stack
mark = self.marker()
list = stack[mark - 1]
list.extend(stack[mark + 1:])
del stack[mark:]
dispatch[APPENDS] = load_appends
def load_setitem(self):
stack = self.stack
value = stack.pop()
key = stack.pop()
dict = stack[-1]
dict[key] = value
dispatch[SETITEM] = load_setitem
def load_setitems(self):
stack = self.stack
mark = self.marker()
dict = stack[mark - 1]
for i in range(mark + 1, len(stack), 2):
dict[stack[i]] = stack[i + 1]
del stack[mark:]
dispatch[SETITEMS] = load_setitems
def load_build(self):
stack = self.stack
state = stack.pop()
inst = stack[-1]
setstate = getattr(inst, "__setstate__", None)
if setstate:
setstate(state)
return
slotstate = None
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if state:
try:
inst.__dict__.update(state)
except RuntimeError:
# XXX In restricted execution, the instance's __dict__
# is not accessible. Use the old way of unpickling
# the instance variables. This is a semantic
# difference when unpickling in restricted
# vs. unrestricted modes.
# Note, however, that cPickle has never tried to do the
# .update() business, and always uses
# PyObject_SetItem(inst.__dict__, key, value) in a
# loop over state.items().
for k, v in state.items():
setattr(inst, k, v)
if slotstate:
for k, v in slotstate.items():
setattr(inst, k, v)
dispatch[BUILD] = load_build
def load_mark(self):
self.append(self.mark)
dispatch[MARK] = load_mark
def load_stop(self):
value = self.stack.pop()
raise _Stop(value)
dispatch[STOP] = load_stop
# Helper class for load_inst/load_obj
class _EmptyClass:
pass
# Encode/decode longs in linear time.
import binascii as _binascii
def encode_long(x):
r"""Encode a long to a two's complement little-endian binary string.
Note that 0L is a special case, returning an empty string, to save a
byte in the LONG1 pickling context.
>>> encode_long(0L)
''
>>> encode_long(255L)
'\xff\x00'
>>> encode_long(32767L)
'\xff\x7f'
>>> encode_long(-256L)
'\x00\xff'
>>> encode_long(-32768L)
'\x00\x80'
>>> encode_long(-128L)
'\x80'
>>> encode_long(127L)
'\x7f'
>>>
"""
if x == 0:
return ''
if x > 0:
ashex = hex(x)
assert ashex.startswith("0x")
njunkchars = 2 + ashex.endswith('L')
nibbles = len(ashex) - njunkchars
if nibbles & 1:
# need an even # of nibbles for unhexlify
ashex = "0x0" + ashex[2:]
elif int(ashex[2], 16) >= 8:
# "looks negative", so need a byte of sign bits
ashex = "0x00" + ashex[2:]
else:
# Build the 256's-complement: (1L << nbytes) + x. The trick is
# to find the number of bytes in linear time (although that should
# really be a constant-time task).
ashex = hex(-x)
assert ashex.startswith("0x")
njunkchars = 2 + ashex.endswith('L')
nibbles = len(ashex) - njunkchars
if nibbles & 1:
# Extend to a full byte.
nibbles += 1
nbits = nibbles * 4
x += 1L << nbits
assert x > 0
ashex = hex(x)
njunkchars = 2 + ashex.endswith('L')
newnibbles = len(ashex) - njunkchars
if newnibbles < nibbles:
ashex = "0x" + "0" * (nibbles - newnibbles) + ashex[2:]
if int(ashex[2], 16) < 8:
# "looks positive", so need a byte of sign bits
ashex = "0xff" + ashex[2:]
if ashex.endswith('L'):
ashex = ashex[2:-1]
else:
ashex = ashex[2:]
assert len(ashex) & 1 == 0, (x, ashex)
binary = _binascii.unhexlify(ashex)
return binary[::-1]
def decode_long(data):
r"""Decode a long from a two's complement little-endian binary string.
>>> decode_long('')
0L
>>> decode_long("\xff\x00")
255L
>>> decode_long("\xff\x7f")
32767L
>>> decode_long("\x00\xff")
-256L
>>> decode_long("\x00\x80")
-32768L
>>> decode_long("\x80")
-128L
>>> decode_long("\x7f")
127L
"""
nbytes = len(data)
if nbytes == 0:
return 0L
ashex = _binascii.hexlify(data[::-1])
n = long(ashex, 16) # quadratic time before Python 2.3; linear now
if data[-1] >= '\x80':
n -= 1L << (nbytes * 8)
return n
# Shorthands
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def dump(obj, file, protocol=None):
Pickler(file, protocol).dump(obj)
def dumps(obj, protocol=None):
file = StringIO()
Pickler(file, protocol).dump(obj)
return file.getvalue()
def load(file):
return Unpickler(file).load()
def loads(str):
file = StringIO(str)
return Unpickler(file).load()
# Doctest
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
_test()
| 32.437326 | 80 | 0.557793 | [
"MIT"
] | woodrow/pyoac | lib-python/modified-2.5.2/pickle.py | 46,580 | Python |
#######################################
# TESTING PURPOSE ONLY MODELS!! #
# DO NOT ADD THE APP TO INSTALLED_APPS#
#######################################
import datetime as base_datetime
from decimal import Decimal
from tempfile import gettempdir
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.core.files.storage import FileSystemStorage
from django.utils.timezone import now
from model_bakery.gis import BAKER_GIS
from model_bakery.timezone import smart_datetime as datetime
from .fields import (
CustomFieldViaSettings,
CustomFieldWithGenerator,
CustomFieldWithoutGenerator,
CustomForeignKey,
FakeListField,
)
# check whether or not PIL is installed
try:
from PIL import ImageFile as PilImageFile # NoQA
except ImportError:
has_pil = False
else:
has_pil = True
if BAKER_GIS:
from django.contrib.gis.db import models
else:
from django.db import models
GENDER_CHOICES = [
("M", "male"),
("F", "female"),
("N", "non-binary"),
]
OCCUPATION_CHOICES = (
("Service Industry", (("waitress", "Waitress"), ("bartender", "Bartender"))),
("Education", (("teacher", "Teacher"), ("principal", "Principal"))),
)
TEST_TIME = base_datetime.datetime(2014, 7, 21, 15, 39, 58, 457698)
class ModelWithImpostorField(models.Model):
pass
class Profile(models.Model):
email = models.EmailField()
class User(models.Model):
profile = models.ForeignKey(
Profile, blank=True, null=True, on_delete=models.CASCADE
)
class PaymentBill(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
value = models.FloatField()
class Person(models.Model):
gender = models.CharField(max_length=1, choices=GENDER_CHOICES)
# Jards Macalé is an amazing brazilian musician! =]
enjoy_jards_macale = models.BooleanField(default=True)
like_metal_music = models.BooleanField(default=False)
name = models.CharField(max_length=30)
nickname = models.SlugField(max_length=36)
age = models.IntegerField()
bio = models.TextField()
birthday = models.DateField()
birth_time = models.TimeField()
appointment = models.DateTimeField()
blog = models.URLField()
occupation = models.CharField(max_length=10, choices=OCCUPATION_CHOICES)
uuid = models.UUIDField(primary_key=False)
name_hash = models.BinaryField(max_length=16)
days_since_last_login = models.BigIntegerField()
duration_of_sleep = models.DurationField()
email = models.EmailField()
id_document = models.CharField(unique=True, max_length=10)
try:
from django.db.models import JSONField
data = JSONField()
except ImportError:
# Skip JSONField-related fields
pass
try:
from django.contrib.postgres.fields import ArrayField, HStoreField
from django.contrib.postgres.fields import JSONField as PostgresJSONField
from django.contrib.postgres.fields.citext import (
CICharField,
CIEmailField,
CITextField,
)
from django.contrib.postgres.fields.ranges import (
BigIntegerRangeField,
DateRangeField,
DateTimeRangeField,
IntegerRangeField,
)
if settings.USING_POSTGRES:
acquaintances = ArrayField(models.IntegerField())
postgres_data = PostgresJSONField()
hstore_data = HStoreField()
ci_char = CICharField(max_length=30)
ci_email = CIEmailField()
ci_text = CITextField()
int_range = IntegerRangeField()
bigint_range = BigIntegerRangeField()
date_range = DateRangeField()
datetime_range = DateTimeRangeField()
except ImportError:
# Skip PostgreSQL-related fields
pass
try:
from django.contrib.postgres.fields.ranges import FloatRangeField
if settings.USING_POSTGRES:
float_range = FloatRangeField()
except ImportError:
# Django version greater or equal than 3.1
pass
try:
from django.contrib.postgres.fields.ranges import DecimalRangeField
if settings.USING_POSTGRES:
decimal_range = DecimalRangeField()
except ImportError:
# Django version lower than 2.2
pass
if BAKER_GIS:
geom = models.GeometryField()
point = models.PointField()
line_string = models.LineStringField()
polygon = models.PolygonField()
multi_point = models.MultiPointField()
multi_line_string = models.MultiLineStringField()
multi_polygon = models.MultiPolygonField()
geom_collection = models.GeometryCollectionField()
class Dog(models.Model):
class Meta:
order_with_respect_to = "owner"
owner = models.ForeignKey("Person", on_delete=models.CASCADE)
breed = models.CharField(max_length=50)
created = models.DateTimeField(auto_now_add=True)
friends_with = models.ManyToManyField("Dog")
class GuardDog(Dog):
pass
class Home(models.Model):
address = models.CharField(max_length=200)
owner = models.ForeignKey("Person", on_delete=models.CASCADE)
dogs = models.ManyToManyField("Dog")
class LonelyPerson(models.Model):
only_friend = models.OneToOneField(Person, on_delete=models.CASCADE)
class RelatedNamesModel(models.Model):
name = models.CharField(max_length=256)
one_to_one = models.OneToOneField(
Person, related_name="one_related", on_delete=models.CASCADE
)
foreign_key = models.ForeignKey(
Person, related_name="fk_related", on_delete=models.CASCADE
)
class ModelWithOverridedSave(Dog):
def save(self, *args, **kwargs):
self.owner = kwargs.pop("owner")
return super(ModelWithOverridedSave, self).save(*args, **kwargs)
class Classroom(models.Model):
students = models.ManyToManyField(Person, null=True)
active = models.NullBooleanField()
class Store(models.Model):
customers = models.ManyToManyField(Person, related_name="favorite_stores")
employees = models.ManyToManyField(Person, related_name="employers")
suppliers = models.ManyToManyField(
Person, related_name="suppliers", blank=True, null=True
)
class DummyEmptyModel(models.Model):
pass
class DummyIntModel(models.Model):
int_field = models.IntegerField()
small_int_field = models.SmallIntegerField()
big_int_field = models.BigIntegerField()
class DummyPositiveIntModel(models.Model):
positive_small_int_field = models.PositiveSmallIntegerField()
positive_int_field = models.PositiveIntegerField()
class DummyNumbersModel(models.Model):
float_field = models.FloatField()
class DummyDecimalModel(models.Model):
decimal_field = models.DecimalField(max_digits=1, decimal_places=0)
class UnsupportedField(models.Field):
description = "I'm bad company, baker doesn't know me"
def __init__(self, *args, **kwargs):
super(UnsupportedField, self).__init__(*args, **kwargs)
class UnsupportedModel(models.Model):
unsupported_field = UnsupportedField()
class DummyGenericForeignKeyModel(models.Model):
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey("content_type", "object_id")
class DummyGenericRelationModel(models.Model):
relation = GenericRelation(DummyGenericForeignKeyModel)
class DummyNullFieldsModel(models.Model):
null_foreign_key = models.ForeignKey(
"DummyBlankFieldsModel", null=True, on_delete=models.CASCADE
)
null_integer_field = models.IntegerField(null=True)
class DummyBlankFieldsModel(models.Model):
blank_char_field = models.CharField(max_length=50, blank=True)
blank_text_field = models.TextField(max_length=300, blank=True)
class ExtendedDefaultField(models.IntegerField):
pass
class DummyDefaultFieldsModel(models.Model):
default_id = models.AutoField(primary_key=True)
default_char_field = models.CharField(max_length=50, default="default")
default_text_field = models.TextField(default="default")
default_int_field = models.IntegerField(default=123)
default_float_field = models.FloatField(default=123.0)
default_date_field = models.DateField(default="2012-01-01")
default_date_time_field = models.DateTimeField(default=datetime(2012, 1, 1))
default_time_field = models.TimeField(default="00:00:00")
default_decimal_field = models.DecimalField(
max_digits=5, decimal_places=2, default=Decimal("0")
)
default_email_field = models.EmailField(default="[email protected]")
default_slug_field = models.SlugField(default="a-slug")
default_unknown_class_field = ExtendedDefaultField(default=42)
default_callable_int_field = models.IntegerField(default=lambda: 12)
default_callable_datetime_field = models.DateTimeField(default=now)
class DummyFileFieldModel(models.Model):
fs = FileSystemStorage(location=gettempdir())
file_field = models.FileField(upload_to="%Y/%m/%d", storage=fs)
if has_pil:
class DummyImageFieldModel(models.Model):
fs = FileSystemStorage(location=gettempdir())
image_field = models.ImageField(upload_to="%Y/%m/%d", storage=fs)
else:
# doesn't matter, won't be using
class DummyImageFieldModel(models.Model):
pass
class DummyMultipleInheritanceModel(DummyDefaultFieldsModel, Person):
my_id = models.AutoField(primary_key=True)
my_dummy_field = models.IntegerField()
class Ambiguous(models.Model):
name = models.CharField(max_length=20)
class School(models.Model):
name = models.CharField(max_length=50)
students = models.ManyToManyField(Person, through="SchoolEnrollment")
class SchoolEnrollment(models.Model):
start_date = models.DateField(auto_now_add=True)
school = models.ForeignKey(School, on_delete=models.CASCADE)
student = models.ForeignKey(Person, on_delete=models.CASCADE)
class NonAbstractPerson(Person):
dummy_count = models.IntegerField()
class CustomFieldWithGeneratorModel(models.Model):
custom_value = CustomFieldWithGenerator()
class CustomFieldWithoutGeneratorModel(models.Model):
custom_value = CustomFieldWithoutGenerator()
class CustomFieldViaSettingsModel(models.Model):
custom_value = CustomFieldViaSettings()
class CustomForeignKeyWithGeneratorModel(models.Model):
custom_fk = CustomForeignKey(
Profile, blank=True, null=True, on_delete=models.CASCADE
)
class DummyUniqueIntegerFieldModel(models.Model):
value = models.IntegerField(unique=True)
class ModelWithNext(models.Model):
attr = models.CharField(max_length=10)
def next(self):
return "foo"
class BaseModelForNext(models.Model):
fk = models.ForeignKey(ModelWithNext, on_delete=models.CASCADE)
class BaseModelForList(models.Model):
fk = FakeListField()
class Movie(models.Model):
title = models.CharField(max_length=30)
class MovieManager(models.Manager):
def get_queryset(self):
"""
Annotate queryset with an alias field 'name'.
We want to test whether this annotation has been run after
calling `baker.make()`.
"""
return super(MovieManager, self).get_queryset().annotate(name=models.F("title"))
class MovieWithAnnotation(Movie):
objects = MovieManager()
class CastMember(models.Model):
movie = models.ForeignKey(
Movie, related_name="cast_members", on_delete=models.CASCADE
)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
class DummyGenericIPAddressFieldModel(models.Model):
ipv4_field = models.GenericIPAddressField(protocol="IPv4")
ipv6_field = models.GenericIPAddressField(protocol="IPv6")
ipv46_field = models.GenericIPAddressField(protocol="both")
class AbstractModel(models.Model):
class Meta(object):
abstract = True
name = models.CharField(max_length=30)
class SubclassOfAbstract(AbstractModel):
height = models.IntegerField()
class NonStandardManager(models.Model):
name = models.CharField(max_length=30)
manager = models.Manager()
| 29.401914 | 88 | 0.71904 | [
"Apache-2.0"
] | atimilson/model_bakery | tests/generic/models.py | 12,291 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class SetServerCertificateNameRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Slb', '2014-05-15', 'SetServerCertificateName','slb')
def get_access_key_id(self):
return self.get_query_params().get('access_key_id')
def set_access_key_id(self,access_key_id):
self.add_query_param('access_key_id',access_key_id)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_ServerCertificateId(self):
return self.get_query_params().get('ServerCertificateId')
def set_ServerCertificateId(self,ServerCertificateId):
self.add_query_param('ServerCertificateId',ServerCertificateId)
def get_ServerCertificateName(self):
return self.get_query_params().get('ServerCertificateName')
def set_ServerCertificateName(self,ServerCertificateName):
self.add_query_param('ServerCertificateName',ServerCertificateName)
def get_Tags(self):
return self.get_query_params().get('Tags')
def set_Tags(self,Tags):
self.add_query_param('Tags',Tags) | 35.472222 | 83 | 0.780736 | [
"Apache-2.0"
] | DataDog/aliyun-openapi-python-sdk | aliyun-python-sdk-slb/aliyunsdkslb/request/v20140515/SetServerCertificateNameRequest.py | 2,554 | Python |
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Dict, Iterable, Optional
from prometheus_client import Gauge
from synapse.api.errors import Codes, SynapseError
from synapse.metrics.background_process_metrics import (
run_as_background_process,
wrap_as_background_process,
)
from synapse.push import Pusher, PusherConfig, PusherConfigException
from synapse.push.pusher import PusherFactory
from synapse.replication.http.push import ReplicationRemovePusherRestServlet
from synapse.types import JsonDict, RoomStreamToken
from synapse.util.async_helpers import concurrently_execute
from synapse.util.threepids import canonicalise_email
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
synapse_pushers = Gauge(
"synapse_pushers", "Number of active synapse pushers", ["kind", "app_id"]
)
class PusherPool:
"""
The pusher pool. This is responsible for dispatching notifications of new events to
the http and email pushers.
It provides three methods which are designed to be called by the rest of the
application: `start`, `on_new_notifications`, and `on_new_receipts`: each of these
delegates to each of the relevant pushers.
Note that it is expected that each pusher will have its own 'processing' loop which
will send out the notifications in the background, rather than blocking until the
notifications are sent; accordingly Pusher.on_started, Pusher.on_new_notifications and
Pusher.on_new_receipts are not expected to return awaitables.
"""
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.pusher_factory = PusherFactory(hs)
self.store = self.hs.get_datastore()
self.clock = self.hs.get_clock()
# We shard the handling of push notifications by user ID.
self._pusher_shard_config = hs.config.worker.pusher_shard_config
self._instance_name = hs.get_instance_name()
self._should_start_pushers = (
self._instance_name in self._pusher_shard_config.instances
)
# We can only delete pushers on master.
self._remove_pusher_client = None
if hs.config.worker.worker_app:
self._remove_pusher_client = ReplicationRemovePusherRestServlet.make_client(
hs
)
# Record the last stream ID that we were poked about so we can get
# changes since then. We set this to the current max stream ID on
# startup as every individual pusher will have checked for changes on
# startup.
self._last_room_stream_id_seen = self.store.get_room_max_stream_ordering()
# map from user id to app_id:pushkey to pusher
self.pushers: Dict[str, Dict[str, Pusher]] = {}
self._account_validity_handler = hs.get_account_validity_handler()
def start(self) -> None:
"""Starts the pushers off in a background process."""
if not self._should_start_pushers:
logger.info("Not starting pushers because they are disabled in the config")
return
run_as_background_process("start_pushers", self._start_pushers)
async def add_pusher(
self,
user_id: str,
access_token: Optional[int],
kind: str,
app_id: str,
app_display_name: str,
device_display_name: str,
pushkey: str,
lang: Optional[str],
data: JsonDict,
profile_tag: str = "",
) -> Optional[Pusher]:
"""Creates a new pusher and adds it to the pool
Returns:
The newly created pusher.
"""
if kind == "email":
email_owner = await self.store.get_user_id_by_threepid(
"email", canonicalise_email(pushkey)
)
if email_owner != user_id:
raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND)
time_now_msec = self.clock.time_msec()
# create the pusher setting last_stream_ordering to the current maximum
# stream ordering, so it will process pushes from this point onwards.
last_stream_ordering = self.store.get_room_max_stream_ordering()
# we try to create the pusher just to validate the config: it
# will then get pulled out of the database,
# recreated, added and started: this means we have only one
# code path adding pushers.
self.pusher_factory.create_pusher(
PusherConfig(
id=None,
user_name=user_id,
access_token=access_token,
profile_tag=profile_tag,
kind=kind,
app_id=app_id,
app_display_name=app_display_name,
device_display_name=device_display_name,
pushkey=pushkey,
ts=time_now_msec,
lang=lang,
data=data,
last_stream_ordering=last_stream_ordering,
last_success=None,
failing_since=None,
)
)
await self.store.add_pusher(
user_id=user_id,
access_token=access_token,
kind=kind,
app_id=app_id,
app_display_name=app_display_name,
device_display_name=device_display_name,
pushkey=pushkey,
pushkey_ts=time_now_msec,
lang=lang,
data=data,
last_stream_ordering=last_stream_ordering,
profile_tag=profile_tag,
)
pusher = await self.start_pusher_by_id(app_id, pushkey, user_id)
return pusher
async def remove_pushers_by_app_id_and_pushkey_not_user(
self, app_id: str, pushkey: str, not_user_id: str
) -> None:
to_remove = await self.store.get_pushers_by_app_id_and_pushkey(app_id, pushkey)
for p in to_remove:
if p.user_name != not_user_id:
logger.info(
"Removing pusher for app id %s, pushkey %s, user %s",
app_id,
pushkey,
p.user_name,
)
await self.remove_pusher(p.app_id, p.pushkey, p.user_name)
async def remove_pushers_by_access_token(
self, user_id: str, access_tokens: Iterable[int]
) -> None:
"""Remove the pushers for a given user corresponding to a set of
access_tokens.
Args:
user_id: user to remove pushers for
access_tokens: access token *ids* to remove pushers for
"""
tokens = set(access_tokens)
for p in await self.store.get_pushers_by_user_id(user_id):
if p.access_token in tokens:
logger.info(
"Removing pusher for app id %s, pushkey %s, user %s",
p.app_id,
p.pushkey,
p.user_name,
)
await self.remove_pusher(p.app_id, p.pushkey, p.user_name)
def on_new_notifications(self, max_token: RoomStreamToken) -> None:
if not self.pushers:
# nothing to do here.
return
# We just use the minimum stream ordering and ignore the vector clock
# component. This is safe to do as long as we *always* ignore the vector
# clock components.
max_stream_id = max_token.stream
if max_stream_id < self._last_room_stream_id_seen:
# Nothing to do
return
# We only start a new background process if necessary rather than
# optimistically (to cut down on overhead).
self._on_new_notifications(max_token)
@wrap_as_background_process("on_new_notifications")
async def _on_new_notifications(self, max_token: RoomStreamToken) -> None:
# We just use the minimum stream ordering and ignore the vector clock
# component. This is safe to do as long as we *always* ignore the vector
# clock components.
max_stream_id = max_token.stream
prev_stream_id = self._last_room_stream_id_seen
self._last_room_stream_id_seen = max_stream_id
try:
users_affected = await self.store.get_push_action_users_in_range(
prev_stream_id, max_stream_id
)
for u in users_affected:
# Don't push if the user account has expired
expired = await self._account_validity_handler.is_user_expired(u)
if expired:
continue
if u in self.pushers:
for p in self.pushers[u].values():
p.on_new_notifications(max_token)
except Exception:
logger.exception("Exception in pusher on_new_notifications")
async def on_new_receipts(
self, min_stream_id: int, max_stream_id: int, affected_room_ids: Iterable[str]
) -> None:
if not self.pushers:
# nothing to do here.
return
try:
# Need to subtract 1 from the minimum because the lower bound here
# is not inclusive
users_affected = await self.store.get_users_sent_receipts_between(
min_stream_id - 1, max_stream_id
)
for u in users_affected:
# Don't push if the user account has expired
expired = await self._account_validity_handler.is_user_expired(u)
if expired:
continue
if u in self.pushers:
for p in self.pushers[u].values():
p.on_new_receipts(min_stream_id, max_stream_id)
except Exception:
logger.exception("Exception in pusher on_new_receipts")
async def start_pusher_by_id(
self, app_id: str, pushkey: str, user_id: str
) -> Optional[Pusher]:
"""Look up the details for the given pusher, and start it
Returns:
The pusher started, if any
"""
if not self._should_start_pushers:
return None
if not self._pusher_shard_config.should_handle(self._instance_name, user_id):
return None
resultlist = await self.store.get_pushers_by_app_id_and_pushkey(app_id, pushkey)
pusher_config = None
for r in resultlist:
if r.user_name == user_id:
pusher_config = r
pusher = None
if pusher_config:
pusher = await self._start_pusher(pusher_config)
return pusher
async def _start_pushers(self) -> None:
"""Start all the pushers"""
pushers = await self.store.get_all_pushers()
# Stagger starting up the pushers so we don't completely drown the
# process on start up.
await concurrently_execute(self._start_pusher, pushers, 10)
logger.info("Started pushers")
async def _start_pusher(self, pusher_config: PusherConfig) -> Optional[Pusher]:
"""Start the given pusher
Args:
pusher_config: The pusher configuration with the values pulled from the db table
Returns:
The newly created pusher or None.
"""
if not self._pusher_shard_config.should_handle(
self._instance_name, pusher_config.user_name
):
return None
try:
p = self.pusher_factory.create_pusher(pusher_config)
except PusherConfigException as e:
logger.warning(
"Pusher incorrectly configured id=%i, user=%s, appid=%s, pushkey=%s: %s",
pusher_config.id,
pusher_config.user_name,
pusher_config.app_id,
pusher_config.pushkey,
e,
)
return None
except Exception:
logger.exception(
"Couldn't start pusher id %i: caught Exception",
pusher_config.id,
)
return None
if not p:
return None
appid_pushkey = "%s:%s" % (pusher_config.app_id, pusher_config.pushkey)
byuser = self.pushers.setdefault(pusher_config.user_name, {})
if appid_pushkey in byuser:
byuser[appid_pushkey].on_stop()
byuser[appid_pushkey] = p
synapse_pushers.labels(type(p).__name__, p.app_id).inc()
# Check if there *may* be push to process. We do this as this check is a
# lot cheaper to do than actually fetching the exact rows we need to
# push.
user_id = pusher_config.user_name
last_stream_ordering = pusher_config.last_stream_ordering
if last_stream_ordering:
have_notifs = await self.store.get_if_maybe_push_in_range_for_user(
user_id, last_stream_ordering
)
else:
# We always want to default to starting up the pusher rather than
# risk missing push.
have_notifs = True
p.on_started(have_notifs)
return p
async def remove_pusher(self, app_id: str, pushkey: str, user_id: str) -> None:
appid_pushkey = "%s:%s" % (app_id, pushkey)
byuser = self.pushers.get(user_id, {})
if appid_pushkey in byuser:
logger.info("Stopping pusher %s / %s", user_id, appid_pushkey)
pusher = byuser.pop(appid_pushkey)
pusher.on_stop()
synapse_pushers.labels(type(pusher).__name__, pusher.app_id).dec()
# We can only delete pushers on master.
if self._remove_pusher_client:
await self._remove_pusher_client(
app_id=app_id, pushkey=pushkey, user_id=user_id
)
else:
await self.store.delete_pusher_by_app_id_pushkey_user_id(
app_id, pushkey, user_id
)
| 36.045 | 92 | 0.625121 | [
"Apache-2.0"
] | 3ayazaya/synapse | synapse/push/pusherpool.py | 14,418 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class SqlPoolBlobAuditingPoliciesOperations(object):
"""SqlPoolBlobAuditingPoliciesOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The API version to use for this operation. Constant value: "2019-06-01-preview".
:ivar blob_auditing_policy_name: The name of the blob auditing policy. Constant value: "default".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2019-06-01-preview"
self.blob_auditing_policy_name = "default"
self.config = config
def get(
self, resource_group_name, workspace_name, sql_pool_name, custom_headers=None, raw=False, **operation_config):
"""Get a SQL pool's blob auditing policy.
Get a SQL pool's blob auditing policy.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace
:type workspace_name: str
:param sql_pool_name: SQL pool name
:type sql_pool_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SqlPoolBlobAuditingPolicy or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.synapse.models.SqlPoolBlobAuditingPolicy or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'sqlPoolName': self._serialize.url("sql_pool_name", sql_pool_name, 'str'),
'blobAuditingPolicyName': self._serialize.url("self.blob_auditing_policy_name", self.blob_auditing_policy_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SqlPoolBlobAuditingPolicy', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/auditingSettings/{blobAuditingPolicyName}'}
def create_or_update(
self, resource_group_name, workspace_name, sql_pool_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a SQL pool's blob auditing policy.
Creates or updates a SQL pool's blob auditing policy.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace
:type workspace_name: str
:param sql_pool_name: SQL pool name
:type sql_pool_name: str
:param parameters: The database blob auditing policy.
:type parameters: ~azure.mgmt.synapse.models.SqlPoolBlobAuditingPolicy
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SqlPoolBlobAuditingPolicy or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.synapse.models.SqlPoolBlobAuditingPolicy or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'sqlPoolName': self._serialize.url("sql_pool_name", sql_pool_name, 'str'),
'blobAuditingPolicyName': self._serialize.url("self.blob_auditing_policy_name", self.blob_auditing_policy_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'SqlPoolBlobAuditingPolicy')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SqlPoolBlobAuditingPolicy', response)
if response.status_code == 201:
deserialized = self._deserialize('SqlPoolBlobAuditingPolicy', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/auditingSettings/{blobAuditingPolicyName}'}
def list_by_sql_pool(
self, resource_group_name, workspace_name, sql_pool_name, custom_headers=None, raw=False, **operation_config):
"""Lists auditing settings of a Sql pool.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace
:type workspace_name: str
:param sql_pool_name: SQL pool name
:type sql_pool_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of SqlPoolBlobAuditingPolicy
:rtype:
~azure.mgmt.synapse.models.SqlPoolBlobAuditingPolicyPaged[~azure.mgmt.synapse.models.SqlPoolBlobAuditingPolicy]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_sql_pool.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'sqlPoolName': self._serialize.url("sql_pool_name", sql_pool_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.SqlPoolBlobAuditingPolicyPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_by_sql_pool.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/auditingSettings'}
| 49.197719 | 229 | 0.672695 | [
"Unlicense",
"MIT"
] | amcclead7336/Enterprise_Data_Science_Final | venv/lib/python3.8/site-packages/azure/mgmt/synapse/operations/_sql_pool_blob_auditing_policies_operations.py | 12,939 | Python |
from functools import partial
from keras_metrics import metrics as m
from keras_metrics import casts
__version__ = "1.2.1"
def metric_fn(cls, cast_strategy):
def fn(label=0, **kwargs):
metric = cls(label=label, cast_strategy=cast_strategy, **kwargs)
metric.__name__ = "%s_%s" % (cast_strategy.__name__, cls.__name__)
return metric
return fn
binary_metric = partial(
metric_fn, cast_strategy=casts.binary)
categorical_metric = partial(
metric_fn, cast_strategy=casts.categorical)
sparse_categorical_metric = partial(
metric_fn, cast_strategy=casts.sparse_categorical)
binary_true_positive = binary_metric(m.true_positive)
binary_true_negative = binary_metric(m.true_negative)
binary_false_positive = binary_metric(m.false_positive)
binary_false_negative = binary_metric(m.false_negative)
binary_precision = binary_metric(m.precision)
binary_recall = binary_metric(m.recall)
binary_f1_score = binary_metric(m.f1_score)
binary_average_recall = binary_metric(m.average_recall)
categorical_true_positive = categorical_metric(m.true_positive)
categorical_true_negative = categorical_metric(m.true_negative)
categorical_false_positive = categorical_metric(m.false_positive)
categorical_false_negative = categorical_metric(m.false_negative)
categorical_precision = categorical_metric(m.precision)
categorical_recall = categorical_metric(m.recall)
categorical_f1_score = categorical_metric(m.f1_score)
categorical_average_recall = categorical_metric(m.average_recall)
sparse_categorical_true_positive = sparse_categorical_metric(m.true_positive)
sparse_categorical_true_negative = sparse_categorical_metric(m.true_negative)
sparse_categorical_false_positive = sparse_categorical_metric(m.false_positive)
sparse_categorical_false_negative = sparse_categorical_metric(m.false_negative)
sparse_categorical_precision = sparse_categorical_metric(m.precision)
sparse_categorical_recall = sparse_categorical_metric(m.recall)
sparse_categorical_f1_score = sparse_categorical_metric(m.f1_score)
sparse_categorical_average_recall = sparse_categorical_metric(m.average_recall)
# For backward compatibility.
true_positive = binary_true_positive
true_negative = binary_true_negative
false_positive = binary_false_positive
false_negative = binary_false_negative
precision = binary_precision
recall = binary_recall
f1_score = binary_f1_score
| 35.522388 | 79 | 0.843697 | [
"BSD-3-Clause"
] | maikherbig/AIDeveloper | AIDeveloper/keras_metrics/__init__.py | 2,380 | Python |
# This file contains a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
import numpy as np
import operator
from sklearn.utils import check_random_state
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
#Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
| 33.353535 | 77 | 0.590703 | [
"Apache-2.0"
] | bopopescu/fbserver | venv/lib/python2.7/site-packages/sklearn/utils/random.py | 6,604 | Python |
#!/usr/bin/env python3
# This is run by the "run-tests" script.
import unittest
from test import TestHelper, Conn, parse
class TestNoListing(TestHelper):
def test_no_listing(self):
resp = self.get("/")
status, hdrs, body = parse(resp)
self.assertContains(status, "404 Not Found")
if __name__ == '__main__':
unittest.main()
# vim:set ts=4 sw=4 et:
| 23.875 | 52 | 0.664921 | [
"ISC"
] | adams549659584/darkhttpd | devel/test_no_listing.py | 382 | Python |
'''Test code.
'''
# pylint: disable=import-error
import unittest
from Chapter3_CodeTesting.UnitTesting.vector import Vector2D
class VectorTests(unittest.TestCase):
def setUp(self):
self.v1 = Vector2D(0, 0)
self.v2 = Vector2D(-1, 1)
self.v3 = Vector2D(2.5, -2.5)
def test_equality(self):
''' Tests the equality operator.
'''
self.assertNotEqual(self.v1, self.v2)
expected_result = Vector2D(-1, 1)
self.assertEqual(self.v2, expected_result)
def test_add(self):
''' Tests the addition operator.
'''
result = self.v1 + self.v2
expected_result = Vector2D(-1, 1)
self.assertEqual(result, expected_result)
def test_sub(self):
''' Tests the subtraction operator.
'''
result = self.v2 - self.v3
expected_result = Vector2D(-3.5, 3.5)
self.assertEqual(result, expected_result)
def test_mul(self):
''' Tests the multiplication operator.
'''
result1 = self.v1 * 5
expected_result1 = Vector2D(0.0, 0.0)
self.assertEqual(result1, expected_result1)
result2 = self.v1 * self.v2
expected_result2 = 0.0
self.assertEqual(result2, expected_result2)
def test_div(self):
''' Tests the multiplication operator.
'''
result = self.v3 / 5
expected_result = Vector2D(0.5, -0.5)
self.assertEqual(result, expected_result)
if __name__ == '__main__':
unittest.main()
| 27.160714 | 60 | 0.60618 | [
"MIT"
] | franneck94/UdemyPythonProEng | Chapter3_CodeTesting/UnitTesting/test_vector.py | 1,521 | Python |
# -*- coding: utf-8 -*-
from . import test_related
from . import test_new_fields
from . import test_onchange
from . import test_field_conversions
from . import test_attributes
| 22.125 | 36 | 0.774011 | [
"MIT"
] | tuanquanghpvn/odoo8-tutorial | odoo/openerp/addons/test_new_api/tests/__init__.py | 177 | Python |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 26 20:21:07 2019
Tecnológico Nacional de México (TECNM)
Tecnológico de Estudios Superiores de Ixtapaluca (TESI)
División de ingeniería electrónica
Introducción a la librería Numpy 2
M. en C. Rogelio Manuel Higuera Gonzalez
"""
import numpy as np
##################################################################################
ages = np.array([34,14,37,5,13]) #Crea un arreglo de edades
sorted_ages = np.sort(ages) #Acomoda los elementos del arreglo ages del menor al mayor
#ages.sort() #Acomoda los elementos del arreglo original ages del menor al mayor
argages = ages.argsort() #Indica el indice que clasifica a cada uno de los elementos del arreglo ages (del menor al mayor)
ages1 = ages[ages.argsort()] #Crea un arreglo ages ordenado dependiendo de su indice
##################################################################################
persons = np.array(['Johnny','Mary','Peter','Will','Joe'])
heights = np.array([1.76,1.2,1.68,0.5,1.25])
sort_indices = np.argsort(ages) #Realiza una clasificación basada en edades
#print(persons[sort_indices]) #Imprime la lista de personas clasificadas por su edad
#print(heights[sort_indices]) #Imprime la lista de altura clasificadas por su esdad
#print(ages[sort_indices]) #Imprime la lista de edad clasificadas por su edad
sort_indices1 = np.argsort(persons)
#print(persons[sort_indices1])
#print(ages[sort_indices1])
#print(heights[sort_indices1])
#Para ordenar en orden desendente las estaturas usar la notación en Python [::-1]
sort_indices2 = np.argsort(heights)[::-1]
#print(persons[sort_indices2])
#print(ages[sort_indices2])
#print(heights[sort_indices2])
##################################################################################
list1 = [[1,2,3,4],[5,6,7,8]]
a1 = np.array(list1)
a2 = a1
a2[0][0] = 11 #Hacer un cambio en a2 afecta a a1
a1.shape = 1,-1 #a2 tambien cambia su forma
##################################################################################
list2 = [[10,11,12,13],[14,15,16,17]]
a3 = np.array(list2)
a4 = a3.view() #Copia superficial, cuando cambias la forma de a3, a4 no es afectado
a3.shape = 1,-1
##################################################################################
list3 = [[20,21,22,23],[24,25,26,27]]
a5 = np.array(list3)
a6 = a5.copy() #La función copy() crea una copia profunda del arreglo
a5[0][0] = 10 #El cambio no es reflejado en a6
a5.shape = 1,-1 #a6 no cambia su forma
| 47.862745 | 122 | 0.610815 | [
"Unlicense"
] | RogelioHiguera/Python-2.0 | IibreriaNumpy2.py | 2,452 | Python |
#!venv/bin/python
""" This module imports Flask-Manager script, adds our create_db command
and run it. You can pass following arguments:
* create_db => creates sqlite database and all the tables
* shell => runs python shell inside application context
* runserver => runs Flask development server
* db => performs database migrations
* db init => generate new migration
* db migrate => generate automatic revision
* db current => display current revision
* db upgrade => upgrade to later version
* db downgrade => revert to previous version
* db history => list changes
* db revision => create new revision file
* db stamp => 'stamp' the revision table with giver revision
optional arguments:
-h, --help shows help message
"""
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.script import Manager, Command
from flask.ext.migrate import MigrateCommand
from app import app, db, migrate, models
class CreateDb(Command):
"""This class inherit from Flask-manager to add create_db command"""
def run(self):
""" Create database with all tables and print log to std.out"""
print 'Creating the database.'
db.create_all()
manager = Manager(app)
manager.add_command('db', MigrateCommand)
manager.add_command('create_db', CreateDb())
if __name__ == '__main__':
manager.run()
| 34.119048 | 72 | 0.692952 | [
"MIT"
] | Adynatos/egida | manager.py | 1,433 | Python |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1APIGroup(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'kind': 'str',
'name': 'str',
'preferred_version': 'V1GroupVersionForDiscovery',
'server_address_by_client_cid_rs': 'list[V1ServerAddressByClientCIDR]',
'versions': 'list[V1GroupVersionForDiscovery]'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'name': 'name',
'preferred_version': 'preferredVersion',
'server_address_by_client_cid_rs': 'serverAddressByClientCIDRs',
'versions': 'versions'
}
def __init__(self, api_version=None, kind=None, name=None, preferred_version=None, server_address_by_client_cid_rs=None, versions=None):
"""
V1APIGroup - a model defined in Swagger
"""
self._api_version = None
self._kind = None
self._name = None
self._preferred_version = None
self._server_address_by_client_cid_rs = None
self._versions = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
self.name = name
if preferred_version is not None:
self.preferred_version = preferred_version
if server_address_by_client_cid_rs is not None:
self.server_address_by_client_cid_rs = server_address_by_client_cid_rs
self.versions = versions
@property
def api_version(self):
"""
Gets the api_version of this V1APIGroup.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1APIGroup.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1APIGroup.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1APIGroup.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1APIGroup.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1APIGroup.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1APIGroup.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1APIGroup.
:type: str
"""
self._kind = kind
@property
def name(self):
"""
Gets the name of this V1APIGroup.
name is the name of the group.
:return: The name of this V1APIGroup.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1APIGroup.
name is the name of the group.
:param name: The name of this V1APIGroup.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def preferred_version(self):
"""
Gets the preferred_version of this V1APIGroup.
preferredVersion is the version preferred by the API server, which probably is the storage version.
:return: The preferred_version of this V1APIGroup.
:rtype: V1GroupVersionForDiscovery
"""
return self._preferred_version
@preferred_version.setter
def preferred_version(self, preferred_version):
"""
Sets the preferred_version of this V1APIGroup.
preferredVersion is the version preferred by the API server, which probably is the storage version.
:param preferred_version: The preferred_version of this V1APIGroup.
:type: V1GroupVersionForDiscovery
"""
self._preferred_version = preferred_version
@property
def server_address_by_client_cid_rs(self):
"""
Gets the server_address_by_client_cid_rs of this V1APIGroup.
a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
:return: The server_address_by_client_cid_rs of this V1APIGroup.
:rtype: list[V1ServerAddressByClientCIDR]
"""
return self._server_address_by_client_cid_rs
@server_address_by_client_cid_rs.setter
def server_address_by_client_cid_rs(self, server_address_by_client_cid_rs):
"""
Sets the server_address_by_client_cid_rs of this V1APIGroup.
a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
:param server_address_by_client_cid_rs: The server_address_by_client_cid_rs of this V1APIGroup.
:type: list[V1ServerAddressByClientCIDR]
"""
self._server_address_by_client_cid_rs = server_address_by_client_cid_rs
@property
def versions(self):
"""
Gets the versions of this V1APIGroup.
versions are the versions supported in this group.
:return: The versions of this V1APIGroup.
:rtype: list[V1GroupVersionForDiscovery]
"""
return self._versions
@versions.setter
def versions(self, versions):
"""
Sets the versions of this V1APIGroup.
versions are the versions supported in this group.
:param versions: The versions of this V1APIGroup.
:type: list[V1GroupVersionForDiscovery]
"""
if versions is None:
raise ValueError("Invalid value for `versions`, must not be `None`")
self._versions = versions
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1APIGroup):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 36.542751 | 625 | 0.649339 | [
"Apache-2.0"
] | Jamim/kubernetes-client-python | kubernetes/client/models/v1_api_group.py | 9,830 | Python |
import logging
import numpy as np
from typing import Any, Dict, Optional
from mlagents.tf_utils import tf
from mlagents.envs.timers import timed
from mlagents.envs.brain import BrainInfo, BrainParameters
from mlagents.trainers.models import EncoderType, LearningRateSchedule
from mlagents.trainers.ppo.models import PPOModel
from mlagents.trainers.tf_policy import TFPolicy
from mlagents.trainers.components.reward_signals.reward_signal_factory import (
create_reward_signal,
)
from mlagents.trainers.components.bc.module import BCModule
logger = logging.getLogger("mlagents.trainers")
class PPOPolicy(TFPolicy):
def __init__(
self,
seed: int,
brain: BrainParameters,
trainer_params: Dict[str, Any],
is_training: bool,
load: bool,
):
"""
Policy for Proximal Policy Optimization Networks.
:param seed: Random seed.
:param brain: Assigned Brain object.
:param trainer_params: Defined training parameters.
:param is_training: Whether the model should be trained.
:param load: Whether a pre-trained model will be loaded or a new one created.
"""
super().__init__(seed, brain, trainer_params)
reward_signal_configs = trainer_params["reward_signals"]
self.inference_dict: Dict[str, tf.Tensor] = {}
self.update_dict: Dict[str, tf.Tensor] = {}
self.stats_name_to_update_name = {
"Losses/Value Loss": "value_loss",
"Losses/Policy Loss": "policy_loss",
}
self.create_model(
brain, trainer_params, reward_signal_configs, is_training, load, seed
)
self.create_reward_signals(reward_signal_configs)
with self.graph.as_default():
self.bc_module: Optional[BCModule] = None
# Create pretrainer if needed
if "pretraining" in trainer_params:
BCModule.check_config(trainer_params["pretraining"])
self.bc_module = BCModule(
self,
policy_learning_rate=trainer_params["learning_rate"],
default_batch_size=trainer_params["batch_size"],
default_num_epoch=trainer_params["num_epoch"],
**trainer_params["pretraining"],
)
if load:
self._load_graph()
else:
self._initialize_graph()
def create_model(
self, brain, trainer_params, reward_signal_configs, is_training, load, seed
):
"""
Create PPO model
:param brain: Assigned Brain object.
:param trainer_params: Defined training parameters.
:param reward_signal_configs: Reward signal config
:param seed: Random seed.
"""
with self.graph.as_default():
self.model = PPOModel(
brain=brain,
lr=float(trainer_params["learning_rate"]),
lr_schedule=LearningRateSchedule(
trainer_params.get("learning_rate_schedule", "linear")
),
h_size=int(trainer_params["hidden_units"]),
epsilon=float(trainer_params["epsilon"]),
beta=float(trainer_params["beta"]),
max_step=float(trainer_params["max_steps"]),
normalize=trainer_params["normalize"],
use_recurrent=trainer_params["use_recurrent"],
num_layers=int(trainer_params["num_layers"]),
m_size=self.m_size,
seed=seed,
stream_names=list(reward_signal_configs.keys()),
vis_encode_type=EncoderType(
trainer_params.get("vis_encode_type", "simple")
),
)
self.model.create_ppo_optimizer()
self.inference_dict.update(
{
"action": self.model.output,
"log_probs": self.model.all_log_probs,
"value_heads": self.model.value_heads,
"value": self.model.value,
"entropy": self.model.entropy,
"learning_rate": self.model.learning_rate,
}
)
if self.use_continuous_act:
self.inference_dict["pre_action"] = self.model.output_pre
if self.use_recurrent:
self.inference_dict["memory_out"] = self.model.memory_out
self.total_policy_loss = self.model.abs_policy_loss
self.update_dict.update(
{
"value_loss": self.model.value_loss,
"policy_loss": self.total_policy_loss,
"update_batch": self.model.update_batch,
}
)
def create_reward_signals(self, reward_signal_configs):
"""
Create reward signals
:param reward_signal_configs: Reward signal config.
"""
self.reward_signals = {}
with self.graph.as_default():
# Create reward signals
for reward_signal, config in reward_signal_configs.items():
self.reward_signals[reward_signal] = create_reward_signal(
self, self.model, reward_signal, config
)
self.update_dict.update(self.reward_signals[reward_signal].update_dict)
@timed
def evaluate(self, brain_info):
"""
Evaluates policy for the agent experiences provided.
:param brain_info: BrainInfo object containing inputs.
:return: Outputs from network as defined by self.inference_dict.
"""
feed_dict = {
self.model.batch_size: len(brain_info.vector_observations),
self.model.sequence_length: 1,
}
epsilon = None
if self.use_recurrent:
if not self.use_continuous_act:
feed_dict[self.model.prev_action] = self.retrieve_previous_action(
brain_info.agents
)
feed_dict[self.model.memory_in] = self.retrieve_memories(brain_info.agents)
if self.use_continuous_act:
epsilon = np.random.normal(
size=(len(brain_info.vector_observations), self.model.act_size[0])
)
feed_dict[self.model.epsilon] = epsilon
feed_dict = self.fill_eval_dict(feed_dict, brain_info)
run_out = self._execute_model(feed_dict, self.inference_dict)
if self.use_continuous_act:
run_out["random_normal_epsilon"] = epsilon
return run_out
@timed
def update(self, mini_batch, num_sequences):
"""
Performs update on model.
:param mini_batch: Batch of experiences.
:param num_sequences: Number of sequences to process.
:return: Results of update.
"""
feed_dict = self.construct_feed_dict(self.model, mini_batch, num_sequences)
stats_needed = self.stats_name_to_update_name
update_stats = {}
# Collect feed dicts for all reward signals.
for _, reward_signal in self.reward_signals.items():
feed_dict.update(
reward_signal.prepare_update(self.model, mini_batch, num_sequences)
)
stats_needed.update(reward_signal.stats_name_to_update_name)
update_vals = self._execute_model(feed_dict, self.update_dict)
for stat_name, update_name in stats_needed.items():
update_stats[stat_name] = update_vals[update_name]
return update_stats
def construct_feed_dict(self, model, mini_batch, num_sequences):
feed_dict = {
model.batch_size: num_sequences,
model.sequence_length: self.sequence_length,
model.mask_input: mini_batch["masks"],
model.advantage: mini_batch["advantages"],
model.all_old_log_probs: mini_batch["action_probs"],
}
for name in self.reward_signals:
feed_dict[model.returns_holders[name]] = mini_batch[
"{}_returns".format(name)
]
feed_dict[model.old_values[name]] = mini_batch[
"{}_value_estimates".format(name)
]
if self.use_continuous_act:
feed_dict[model.output_pre] = mini_batch["actions_pre"]
feed_dict[model.epsilon] = mini_batch["random_normal_epsilon"]
else:
feed_dict[model.action_holder] = mini_batch["actions"]
if self.use_recurrent:
feed_dict[model.prev_action] = mini_batch["prev_action"]
feed_dict[model.action_masks] = mini_batch["action_mask"]
if self.use_vec_obs:
feed_dict[model.vector_in] = mini_batch["vector_obs"]
if self.model.vis_obs_size > 0:
for i, _ in enumerate(self.model.visual_in):
feed_dict[model.visual_in[i]] = mini_batch["visual_obs%d" % i]
if self.use_recurrent:
mem_in = [
mini_batch["memory"][i]
for i in range(0, len(mini_batch["memory"]), self.sequence_length)
]
feed_dict[model.memory_in] = mem_in
return feed_dict
def get_value_estimates(
self, brain_info: BrainInfo, idx: int, done: bool
) -> Dict[str, float]:
"""
Generates value estimates for bootstrapping.
:param brain_info: BrainInfo to be used for bootstrapping.
:param idx: Index in BrainInfo of agent.
:param done: Whether or not this is the last element of the episode, in which case the value estimate will be 0.
:return: The value estimate dictionary with key being the name of the reward signal and the value the
corresponding value estimate.
"""
feed_dict: Dict[tf.Tensor, Any] = {
self.model.batch_size: 1,
self.model.sequence_length: 1,
}
for i in range(len(brain_info.visual_observations)):
feed_dict[self.model.visual_in[i]] = [
brain_info.visual_observations[i][idx]
]
if self.use_vec_obs:
feed_dict[self.model.vector_in] = [brain_info.vector_observations[idx]]
agent_id = brain_info.agents[idx]
if self.use_recurrent:
feed_dict[self.model.memory_in] = self.retrieve_memories([agent_id])
if not self.use_continuous_act and self.use_recurrent:
feed_dict[self.model.prev_action] = self.retrieve_previous_action(
[agent_id]
)
value_estimates = self.sess.run(self.model.value_heads, feed_dict)
value_estimates = {k: float(v) for k, v in value_estimates.items()}
# If we're done, reassign all of the value estimates that need terminal states.
if done:
for k in value_estimates:
if self.reward_signals[k].use_terminal_states:
value_estimates[k] = 0.0
return value_estimates
| 40.362963 | 120 | 0.614883 | [
"Apache-2.0"
] | DdATM/ML-FlappyBird | ml-agents/mlagents/trainers/ppo/policy.py | 10,898 | Python |
from __future__ import absolute_import, division, print_function
from six.moves import range
from scitbx.lbfgs import core_parameters, termination_parameters
from scitbx.lbfgs import exception_handling_parameters, ext
from scitbx.array_family import flex
import scitbx
"""mpi_split_evaluator_run(), supports an LBFGS parameter optimization scenario where
the target (functional and gradients) are significantly rate limiting, and moreover where
the requisite terms of f and g can be load balanced by distributing the data over parallel
evaluator instances, each of which can be handled by an MPI worker rank. Rank 0 then
performs a simple MPI.reduce sum to obtain the full f and g. There has been
no low-level redesign to support MPI. In particular, the ext.minimizer is
run (wastefully) by every worker rank, using the same x parameters, f, and g. A simple
working example is given."""
# based on scitbx/lbfgs/__init__.py, run_c_plus_plus
def mpi_split_evaluator_run(target_evaluator,
termination_params=None,
core_params=None,
exception_handling_params=None,
log=None,
#---> Insertion starts
gradient_only=False,
line_search=True):
#<--- Insertion ends
"""The supported scenario is that each MPI worker rank has a target evaluator
that has part of the data. Each rank calculates a bit of the functional and
gradients, but then mpi reduce is used to sum them all up. There has been
no low-level redesign to support MPI. In particular, the ext.minimizer is
run (wastefully) by every worker rank, using the same data. It is assumed that
the calculation of compute_functional_and_gradients() is overwhelmingly the rate
limiting step, and that is what MPI parallelism is intended to distribute here."""
from libtbx.mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
if (termination_params is None):
termination_params = termination_parameters()
if (core_params is None):
core_params = core_parameters()
if (exception_handling_params is None):
exception_handling_params = exception_handling_parameters()
x = target_evaluator.x
if (log is not None):
print("lbfgs minimizer():", file=log)
print(" x.size():", x.size(), file=log)
print(" m:", core_params.m, file=log)
print(" maxfev:", core_params.maxfev, file=log)
print(" gtol:", core_params.gtol, file=log)
print(" xtol:", core_params.xtol, file=log)
print(" stpmin:", core_params.stpmin, file=log)
print(" stpmax:", core_params.stpmax, file=log)
print("lbfgs traditional_convergence_test:", \
termination_params.traditional_convergence_test, file=log)
minimizer = ext.minimizer(
x.size(),
core_params.m,
core_params.maxfev,
core_params.gtol,
core_params.xtol,
core_params.stpmin,
core_params.stpmax)
if (termination_params.traditional_convergence_test):
is_converged = ext.traditional_convergence_test(
x.size(),
termination_params.traditional_convergence_test_eps)
else:
is_converged = ext.drop_convergence_test(
n_test_points=termination_params.drop_convergence_test_n_test_points,
max_drop_eps=termination_params.drop_convergence_test_max_drop_eps,
iteration_coefficient
=termination_params.drop_convergence_test_iteration_coefficient)
callback_after_step = getattr(target_evaluator, "callback_after_step", None)
diag_mode = getattr(target_evaluator, "diag_mode", None)
if (diag_mode is not None): assert diag_mode in ["once", "always"]
f_min, x_min = None, None
f, g = None, None
try:
while 1:
if (diag_mode is None):
#XXX Only the diag_mode==None case is currently implemented, just as example
f_term, g_term = target_evaluator.compute_functional_and_gradients()
f_total = comm.reduce(f_term, MPI.SUM, 0)
g_total = comm.reduce(g_term, MPI.SUM, 0)
if rank==0: transmit = (f_total,g_total)
else: transmit = None
f, g = comm.bcast(transmit, root=0)
if False and rank==0: # for debug
print ("%s %10.4f"%("MPI stp",f),"["," ".join(["%10.4f"%a for a in x]),"]")
d = None
else:
f, g, d = target_evaluator.compute_functional_gradients_diag()
if (diag_mode == "once"):
diag_mode = None
if (f_min is None):
if (not termination_params.traditional_convergence_test):
is_converged(f)
f_min, x_min = f, x.deep_copy()
elif (f_min > f):
f_min, x_min = f, x.deep_copy()
if (log is not None):
print("lbfgs minimizer.run():" \
" f=%.6g, |g|=%.6g, x_min=%.6g, x_mean=%.6g, x_max=%.6g" % (
f, g.norm(), flex.min(x), flex.mean(x), flex.max(x)), file=log)
if (d is None):
#---> Insertion starts
if (minimizer.run(x, f, g, gradient_only,line_search)): continue
#<--- Insertion ends
else:
#---> Insertion starts
if (minimizer.run(x, f, g, d, gradient_only,line_search)): continue
#<--- Insertion ends
if (log is not None):
print("lbfgs minimizer step", file=log)
if (callback_after_step is not None):
if (callback_after_step(minimizer) is True):
if (log is not None):
print("lbfgs minimizer stop: callback_after_step is True", file=log)
break
if (termination_params.traditional_convergence_test):
if ( minimizer.iter() >= termination_params.min_iterations
and is_converged(x, g)):
if (log is not None):
print("lbfgs minimizer stop: traditional_convergence_test", file=log)
break
else:
if (is_converged(f)):
if (log is not None):
print("lbfgs minimizer stop: drop_convergence_test", file=log)
break
if ( termination_params.max_iterations is not None
and minimizer.iter() >= termination_params.max_iterations):
if (log is not None):
print("lbfgs minimizer stop: max_iterations", file=log)
break
if ( termination_params.max_calls is not None
and minimizer.nfun() > termination_params.max_calls):
if (log is not None):
print("lbfgs minimizer stop: max_calls", file=log)
break
if (d is None):
#---> Insertion starts
if (not minimizer.run(x, f, g, gradient_only,line_search)): break
#<--- Insertion ends
else:
#---> Insertion starts
if (not minimizer.run(x, f, g, d, gradient_only,line_search)): break
#<--- Insertion ends
except RuntimeError as e:
minimizer.error = str(e)
if (log is not None):
print("lbfgs minimizer exception:", str(e), file=log)
if (x_min is not None):
x.clear()
x.extend(x_min)
error_classification = exception_handling_params.filter(
minimizer.error, x.size(), x, g)
if (error_classification > 0):
raise
elif (error_classification < 0):
minimizer.is_unusual_error = True
else:
minimizer.is_unusual_error = False
else:
minimizer.error = None
minimizer.is_unusual_error = None
if (log is not None):
print("lbfgs minimizer done.", file=log)
return minimizer
class simple_quadratic(object):
def __init__(self):
self.datax = flex.double(range(-15,17))
self.datay = flex.double([20,15,18,12,10, 10,5,5,1,2, -3,-1,-4,-5,-4,
-6,-4,-6,-4,-4, -4,-5,-1,0,-1, 1,5,4,9,10, 13,15])
abc = 0.1,-0.3,-5.0 # The expected parameters, y = a*x*x + b*x + c
self.n = 3
self.x = flex.double([1,1,1])#lay out the parameter estimates.
def run(self):
self.minimizer = scitbx.lbfgs.run(target_evaluator=self,
termination_params=scitbx.lbfgs.termination_parameters(
traditional_convergence_test=True,
traditional_convergence_test_eps=1.e-3,
max_calls=1000)
)
self.a = self.x
def print_step(self,message,target):
print ("%s %10.4f"%(message,target),"["," ".join(["%10.4f"%a for a in self.x]),"]")
def compute_functional_and_gradients(self):
self.a = self.x
residuals = self.datay - self.a[0]*self.datax*self.datax - self.a[1]*self.datax - self.a[2]
f = flex.sum( 0.5 * residuals * residuals )
g = flex.double(self.n)
dR_da = -self.datax*self.datax
dR_db = -self.datax
dR_dc = flex.double(len(self.datax),-1)
g[0] = flex.sum( residuals * dR_da )
g[1] = flex.sum( residuals * dR_db )
g[2] = flex.sum( residuals * dR_dc )
# self.print_step("LBFGS stp",f)
return f,g
class mpi_quadratic(simple_quadratic):
def reinitialize(self,idx,logical_size):
if idx >= logical_size:
self.skip_flag = True
else:
self.skip_flag = False
self.datax = self.datax[idx]
self.datay = self.datay[idx]
def compute_functional_and_gradients(self):
if self.skip_flag: return 0,flex.double(self.n)
a = self.x
residual = (self.datay - a[0]*self.datax*self.datax - a[1]*self.datax - a[2])
f = 0.5 * residual * residual
g = flex.double(self.n)
dR_da = -self.datax*self.datax
dR_db = -self.datax
dR_dc = -1.
g[0] = residual * dR_da
g[1] = residual * dR_db
g[2] = residual * dR_dc
return f,g
def run_mpi():
from libtbx.mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
#print ("hello from rank %d of %d"%(rank,size))
W = simple_quadratic()
if rank==0:
W.run()
print(list(W.a), "Single process final answer")
else:
pass
comm.barrier()
M = mpi_quadratic()
M.reinitialize(idx=rank, logical_size=len(W.datax))
minimizer = mpi_split_evaluator_run(target_evaluator=M,
termination_params=scitbx.lbfgs.termination_parameters(
traditional_convergence_test=True,
traditional_convergence_test_eps=1.e-3,
max_calls=1000)
)
if rank==0:
print(list(M.x), "MPI final answer")
try:
from libtbx.test_utils import approx_equal
assert approx_equal(M.x,W.a)
assert approx_equal(M.x,[0.09601410216133123, -0.28424727078557327, -4.848332140888606])
print ("OK")
except Exception:
print ("FAIL")
if __name__=="__main__":
Usage = """
srun -n 32 -c 2 libtbx.python scitbx/lbfgs/tst_mpi_split_evaluator.py #small test case, 1 node
...only works when MPI is present, e.g., salloc -C haswell -N1 -q interactive -t 00:15:00
"""
run_mpi()
| 38.805147 | 95 | 0.655708 | [
"BSD-3-Clause"
] | jorgediazjr/dials-dev20191018 | modules/cctbx_project/scitbx/lbfgs/tst_mpi_split_evaluator.py | 10,555 | Python |
# coding: utf-8
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
from common.layers import *
from common.gradient import numerical_gradient
from collections import OrderedDict
class TwoLayerNet:
def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):
# 重みの初期化
self.params = {}
self.params["W1"] = weight_init_std * np.random.randn(input_size, hidden_size)
self.params["b1"] = np.zeros(hidden_size)
self.params["W2"] = weight_init_std * np.random.randn(hidden_size, output_size)
self.params["b2"] = np.zeros(output_size)
# レイヤの生成
self.layers = OrderedDict()
self.layers["Affine1"] = Affine(self.params["W1"], self.params["b1"])
self.layers["Relu1"] = Relu()
self.layers["Affine2"] = Affine(self.params["W2"], self.params["b2"])
self.lastLayer = SoftmaxWithLoss()
def predict(self, x):
for layer in self.layers.values():
x = layer.forward(x)
return x
# x:入力データ, t:教師データ
def loss(self, x, t):
y = self.predict(x)
return self.lastLayer.forward(y, t)
def accuracy(self, x, t):
y = self.predict(x)
y = np.argmax(y, axis=1)
if t.ndim != 1:
t = np.argmax(t, axis=1)
accuracy = np.sum(y == t) / float(x.shape[0])
return accuracy
# x:入力データ, t:教師データ
def numerical_gradient(self, x, t):
loss_W = lambda W: self.loss(x, t)
grads = {}
grads["W1"] = numerical_gradient(loss_W, self.params["W1"])
grads["b1"] = numerical_gradient(loss_W, self.params["b1"])
grads["W2"] = numerical_gradient(loss_W, self.params["W2"])
grads["b2"] = numerical_gradient(loss_W, self.params["b2"])
return grads
def gradient(self, x, t):
# forward
self.loss(x, t)
# backward
dout = 1
dout = self.lastLayer.backward(dout)
layers = list(self.layers.values())
layers.reverse()
for layer in layers:
dout = layer.backward(dout)
# 設定
grads = {}
grads["W1"], grads["b1"] = self.layers["Affine1"].dW, self.layers["Affine1"].db
grads["W2"], grads["b2"] = self.layers["Affine2"].dW, self.layers["Affine2"].db
return grads
| 29.78481 | 87 | 0.58946 | [
"MIT"
] | tmsick/deep-learning-from-scratch | ch05/two_layer_net.py | 2,471 | Python |
from __future__ import unicode_literals
import atexit
import os
import unittest
from django import VERSION
from selenium import webdriver
from django.urls import reverse
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import ui
from selenium.webdriver.support.ui import Select
try:
from taggit.models import Tag
except ImportError:
Tag = None
if VERSION[0] == 1 and VERSION[1] < 7:
# Patch for travis
from django.test.testcases import StoppableWSGIServer
def patient_shutdown(self):
"""
Stops the serve_forever loop.
Blocks until the loop has finished. This must be called while
serve_forever() is running in another thread, or it will
deadlock.
"""
self._StoppableWSGIServer__serving = False
if not self._StoppableWSGIServer__is_shut_down.wait(30):
raise RuntimeError(
"Failed to shutdown the live test server in 2 seconds. The "
"server might be stuck or generating a slow response.")
StoppableWSGIServer.shutdown = patient_shutdown
from django.test import LiveServerTestCase
else:
# LiveServerTestCase doesn't serve static files in 1.7 anymore
from django.contrib.staticfiles.testing import StaticLiveServerTestCase as LiveServerTestCase
if os.environ.get('TRAVIS', False):
WAIT_TIME = 30
elif os.environ.get('BUILD_ID', False): # Jenkins build server
WAIT_TIME = 30
else:
WAIT_TIME = 5
# Global Selenium instance.
class Selenium(object):
selenium = None
def __new__(cls):
if not cls.selenium:
selenium = getattr(webdriver, os.environ.get('TESTS_WEBDRIVER', 'Firefox'))()
selenium.implicitly_wait(WAIT_TIME)
atexit.register(selenium.quit)
cls.selenium = selenium
return cls.selenium
class WidgetTestCase(LiveServerTestCase):
input_name_suffix = '-autocomplete'
autocomplete_name = 'relation'
fixtures = ['basic_fk_model_test_case.json', 'test_user.json']
test_case_setup_done = False
@classmethod
def setUpClass(cls):
if os.environ.get('TESTS_SKIP_LIVESERVER', False):
raise unittest.SkipTest('TESTS_SKIP_LIVESERVER enabled')
cls.selenium = Selenium()
super(WidgetTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(WidgetTestCase, cls).tearDownClass()
cls.test_case_setup_done = False
def setUp(self):
if self.__class__.test_case_setup_done is False:
self.set_implicit_wait()
self.setup_test_case()
self.__class__.test_case_setup_done = True
def open_url(self, url):
self.selenium.get('%s%s' % (self.live_server_url, url))
def send_keys(self, keys, autocomplete_name=None):
autocomplete_name = autocomplete_name or self.autocomplete_name
for key in keys:
self.selenium.find_element_by_css_selector(
'input[name=%s%s]' % (autocomplete_name,
self.input_name_suffix)
).send_keys(key)
def submit(self, name=None):
selector = 'input[type=submit]'
if name:
selector += '[name=%s]' % name
self.selenium.find_element_by_css_selector(selector).click()
def login(self):
self.client.login(username='test', password='test')
cookie = self.client.cookies['sessionid']
self.open_url('/admin/')
self.selenium.add_cookie({'name': 'sessionid', 'value': cookie.value, 'secure': False, 'path': '/'})
def deck_choice_elements(self, autocomplete_name=None):
autocomplete_name = autocomplete_name or self.autocomplete_name
return self.selenium.find_elements_by_css_selector(
'#id_%s-deck [data-value]' % autocomplete_name)
def autocomplete(self, autocomplete_name=None):
autocomplete_name = autocomplete_name or self.autocomplete_name
return self.selenium.find_element_by_css_selector(
'.yourlabs-autocomplete[data-input-id="id_'+autocomplete_name+'-autocomplete"]')
def deck_choices(self, autocomplete_name=None):
autocomplete_name = autocomplete_name or self.autocomplete_name
xpath = ''.join([
'//*[@id="id_%s%s"]/' % (autocomplete_name,
self.input_name_suffix),
'preceding-sibling::',
'span[contains(',
'concat(" ", normalize-space(@class), " "), ',
'" deck ")',
']/*[@data-value]'])
return self.selenium.find_elements_by_xpath(xpath)
def hilighted_choice(self, autocomplete_name=None):
autocomplete_name = autocomplete_name or self.autocomplete_name
return self.selenium.find_element_by_css_selector(
'.yourlabs-autocomplete[data-input-id="id_'+autocomplete_name+'-autocomplete"] .hilight')
def autocomplete_choices(self, autocomplete_name=None):
autocomplete_name = autocomplete_name or self.autocomplete_name
return self.selenium.find_elements_by_css_selector(
'.yourlabs-autocomplete[data-input-id="id_'+autocomplete_name+'-autocomplete"] [data-value]')
def input(self, autocomplete_name=None):
autocomplete_name = autocomplete_name or self.autocomplete_name
return self.selenium.find_element_by_css_selector(
'input[name=%s%s]' % (autocomplete_name,
self.input_name_suffix))
def select(self, autocomplete_name=None):
autocomplete_name = autocomplete_name or self.autocomplete_name
xpath = ''.join([
'//*[@id="id_%s%s"]/' % (autocomplete_name,
self.input_name_suffix),
'following-sibling::',
'select'])
return self.selenium.find_element_by_xpath(xpath)
def set_implicit_wait(self):
self.selenium.implicitly_wait(WAIT_TIME)
self.selenium.set_page_load_timeout(WAIT_TIME)
def unset_implicit_wait(self):
self.selenium.implicitly_wait(0)
self.selenium.set_page_load_timeout(0)
def select_values(self):
self.select # wait for select
# don't wait for options as there might be none
self.unset_implicit_wait()
ret = [o.get_attribute('value') for o in Select(self.select()).options if
o.is_selected()]
# restore implicit wait
self.set_implicit_wait()
return ret
def assertSameChoice(self, autocomplete_choice, deck_choice):
if autocomplete_choice.get_attribute('data-value') != deck_choice.get_attribute('data-value'):
self.fail('Choices have different data-value')
if autocomplete_choice.text not in deck_choice.text:
# deck_choice has an additional span.remove
self.fail('Choices have different text')
def assertAutocompleteEmpty(self):
self.unset_implicit_wait()
self.assertTrue(len(self.autocomplete_choices()) == 0)
self.set_implicit_wait()
class ActivateAutocompleteInBlankFormTestCase(WidgetTestCase):
def setup_test_case(self):
self.login()
self.open_url('/admin/basic/fkmodel/add/')
self.send_keys('ja')
def test_autocomplete_shows_up(self):
self.assertTrue(self.autocomplete().is_displayed())
def test_autocomplete_has_four_choices(self):
self.assertEqual(4, len(self.autocomplete_choices()))
class XhrPendingTestCase(WidgetTestCase):
def setup_test_case(self):
self.login()
self.open_url('/admin/basic/fkmodel/add/')
def test_xhr_pending(self):
self.send_keys('ja')
self.selenium.find_element_by_css_selector(
'input[name=%s-autocomplete]' % self.autocomplete_name)
self.selenium.find_element_by_css_selector(
'input:not(.xhr-pending)[name=%s-autocomplete]' % self.autocomplete_name)
class SelectChoiceInEmptyFormTestCase(WidgetTestCase):
def setup_test_case(self):
self.login()
self.open_url('/admin/basic/fkmodel/add/')
self.send_keys('ja')
self.autocomplete_choices()[1].click()
def test_autocomplete_disappears(self):
self.assertFalse(self.autocomplete().is_displayed())
def test_input_disappears(self):
self.assertFalse(self.input().is_displayed())
def test_deck_choice_shows_up(self):
self.assertEqual(len(self.deck_choices()), 1)
def test_deck_choice_same_as_selected(self):
self.assertSameChoice(self.autocomplete_choices()[1], self.deck_choices()[0])
def test_hidden_select_value(self):
self.assertEqual(self.select_values(), ['4'])
def test_admin_change_link_update(self):
change_link = self.selenium.find_element_by_id('change_id_%s' % self.autocomplete_name)
href = change_link.get_attribute('href')
assert href.endswith('%s?_to_field=id&_popup=1' %
reverse('admin:basic_fkmodel_change', args=(4,)))
@unittest.skipIf(Tag is None, 'django-taggit not installed')
class TextWidgetWithTaggitForm(WidgetTestCase):
input_name_suffix = ''
def setup_test_case(self):
Tag.objects.create(name='foo & bar')
self.login()
self.open_url('/admin/basic/taggitmodel/add/')
def test_ampersand(self):
self.send_keys('foo & bar')
ui.WebDriverWait(self.selenium, WAIT_TIME).until(
lambda x: self.hilighted_choice())
self.send_keys([Keys.TAB])
assert 'foo & bar' == self.input().get_attribute('value')
class WidgetInitialStatusInEditForm(WidgetTestCase):
def setup_test_case(self):
self.login()
self.open_url('/admin/basic/fkmodel/1/')
def test_hidden_select_values(self):
self.assertEqual(self.select_values(), ['4'])
def test_input_is_hidden(self):
self.assertFalse(self.input().is_displayed())
class RemoveChoiceInEditFormTestCase(WidgetTestCase):
def setup_test_case(self):
self.login()
self.open_url('/admin/basic/fkmodel/1/')
self.deck_choices()[0].find_element_by_css_selector('.remove').click()
def test_input_shows_up(self):
self.assertTrue(self.input().is_displayed())
def test_hidden_select_option_was_unselected(self):
self.unset_implicit_wait()
self.assertEqual(self.select_values(), [])
self.set_implicit_wait()
def test_element_was_remove_from_deck(self):
self.unset_implicit_wait()
self.assertEqual(0, len(self.deck_choices()))
self.set_implicit_wait()
def test_admin_change_link_has_no_href(self):
change_link = self.selenium.find_element_by_id('change_id_%s' % self.autocomplete_name)
href = change_link.get_attribute('href')
assert href is None
class KeyboardTestCase(WidgetTestCase):
def setup_test_case(self):
self.login()
self.open_url('/admin/basic/fkmodel/add/')
self.send_keys('jac')
def assertHilightedChoiceNmber(self, n):
self.assertSameChoice(self.hilighted_choice(), self.autocomplete_choices()[n])
def send_keys_wait_assert_choice_number(self, key, choice):
old_hilight = self.hilighted_choice()
self.send_keys([key])
ui.WebDriverWait(self.selenium, WAIT_TIME).until(
lambda x: old_hilight != self.hilighted_choice())
self.assertSameChoice(self.hilighted_choice(), self.autocomplete_choices()[choice])
def test_00_first_to_second_with_down(self):
self.send_keys_wait_assert_choice_number(Keys.ARROW_DOWN, 1)
def test_01_last_to_first_with_down(self):
self.send_keys_wait_assert_choice_number(Keys.ARROW_DOWN, 0)
def test_02_first_to_last_with_up(self):
self.send_keys_wait_assert_choice_number(Keys.ARROW_UP, -1)
def test_03_last_to_first_with_up(self):
self.send_keys_wait_assert_choice_number(Keys.ARROW_UP, 0)
def test_04_tab_to_select_choice(self):
self.send_keys([Keys.TAB])
self.assertSameChoice(self.autocomplete_choices()[0], self.deck_choices()[0])
self.assertEqual(self.select_values(), ['4'])
class InlineBlankTestCase(ActivateAutocompleteInBlankFormTestCase):
autocomplete_name = 'reverse_for_inline-3-relation'
def setup_test_case(self):
self.login()
self.open_url('/admin/basic/fkmodel/add/')
self.selenium.find_element_by_css_selector('.add-row a').click()
self.send_keys('ja')
class InlineSelectChoiceTestCase(SelectChoiceInEmptyFormTestCase):
autocomplete_name = 'reverse_for_inline-3-relation'
def setup_test_case(self):
self.login()
self.open_url('/admin/basic/fkmodel/add/')
self.selenium.find_element_by_css_selector('.add-row a').click()
self.send_keys('ja')
self.autocomplete_choices()[1].click()
| 34.580645 | 108 | 0.68268 | [
"MIT"
] | kimetrica/django-autocomplete-light | autocomplete_light/tests/test_widget.py | 12,864 | Python |
from test import support
from test.support import bigmemtest, _4G
import array
import unittest
from io import BytesIO, DEFAULT_BUFFER_SIZE
import os
import pickle
import glob
import tempfile
import pathlib
import random
import shutil
import subprocess
import threading
from test.support import import_helper
from test.support import threading_helper
from test.support.os_helper import unlink
import _compression
import sys
# Skip tests if the bz2 module doesn't exist.
bz2 = import_helper.import_module('bz2')
from bz2 import BZ2File, BZ2Compressor, BZ2Decompressor
has_cmdline_bunzip2 = None
def ext_decompress(data):
global has_cmdline_bunzip2
if has_cmdline_bunzip2 is None:
has_cmdline_bunzip2 = bool(shutil.which('bunzip2'))
if has_cmdline_bunzip2:
return subprocess.check_output(['bunzip2'], input=data)
else:
return bz2.decompress(data)
class BaseTest(unittest.TestCase):
"Base for other testcases."
TEXT_LINES = [
b'root:x:0:0:root:/root:/bin/bash\n',
b'bin:x:1:1:bin:/bin:\n',
b'daemon:x:2:2:daemon:/sbin:\n',
b'adm:x:3:4:adm:/var/adm:\n',
b'lp:x:4:7:lp:/var/spool/lpd:\n',
b'sync:x:5:0:sync:/sbin:/bin/sync\n',
b'shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\n',
b'halt:x:7:0:halt:/sbin:/sbin/halt\n',
b'mail:x:8:12:mail:/var/spool/mail:\n',
b'news:x:9:13:news:/var/spool/news:\n',
b'uucp:x:10:14:uucp:/var/spool/uucp:\n',
b'operator:x:11:0:operator:/root:\n',
b'games:x:12:100:games:/usr/games:\n',
b'gopher:x:13:30:gopher:/usr/lib/gopher-data:\n',
b'ftp:x:14:50:FTP User:/var/ftp:/bin/bash\n',
b'nobody:x:65534:65534:Nobody:/home:\n',
b'postfix:x:100:101:postfix:/var/spool/postfix:\n',
b'niemeyer:x:500:500::/home/niemeyer:/bin/bash\n',
b'postgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\n',
b'mysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\n',
b'www:x:103:104::/var/www:/bin/false\n',
]
TEXT = b''.join(TEXT_LINES)
DATA = b'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2<Q\xb5\x0fH\xd3\xd4\xdd\xd5\x87\xbb\xf8\x94\r\x8f\xafI\x12\xe1\xc9\xf8/E\x00pu\x89\x12]\xc9\xbbDL\nQ\x0e\t1\x12\xdf\xa0\xc0\x97\xac2O9\x89\x13\x94\x0e\x1c7\x0ed\x95I\x0c\xaaJ\xa4\x18L\x10\x05#\x9c\xaf\xba\xbc/\x97\x8a#C\xc8\xe1\x8cW\xf9\xe2\xd0\xd6M\xa7\x8bXa<e\x84t\xcbL\xb3\xa7\xd9\xcd\xd1\xcb\x84.\xaf\xb3\xab\xab\xad`n}\xa0lh\tE,\x8eZ\x15\x17VH>\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`'
EMPTY_DATA = b'BZh9\x17rE8P\x90\x00\x00\x00\x00'
BAD_DATA = b'this is not a valid bzip2 file'
# Some tests need more than one block of uncompressed data. Since one block
# is at least 100,000 bytes, we gather some data dynamically and compress it.
# Note that this assumes that compression works correctly, so we cannot
# simply use the bigger test data for all tests.
test_size = 0
BIG_TEXT = bytearray(128*1024)
for fname in glob.glob(os.path.join(glob.escape(os.path.dirname(__file__)), '*.py')):
with open(fname, 'rb') as fh:
test_size += fh.readinto(memoryview(BIG_TEXT)[test_size:])
if test_size > 128*1024:
break
BIG_DATA = bz2.compress(BIG_TEXT, compresslevel=1)
def setUp(self):
fd, self.filename = tempfile.mkstemp()
os.close(fd)
def tearDown(self):
unlink(self.filename)
class BZ2FileTest(BaseTest):
"Test the BZ2File class."
def createTempFile(self, streams=1, suffix=b""):
with open(self.filename, "wb") as f:
f.write(self.DATA * streams)
f.write(suffix)
def testBadArgs(self):
self.assertRaises(TypeError, BZ2File, 123.456)
self.assertRaises(ValueError, BZ2File, os.devnull, "z")
self.assertRaises(ValueError, BZ2File, os.devnull, "rx")
self.assertRaises(ValueError, BZ2File, os.devnull, "rbt")
self.assertRaises(ValueError, BZ2File, os.devnull, compresslevel=0)
self.assertRaises(ValueError, BZ2File, os.devnull, compresslevel=10)
# compresslevel is keyword-only
self.assertRaises(TypeError, BZ2File, os.devnull, "r", 3)
def testRead(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT)
def testReadBadFile(self):
self.createTempFile(streams=0, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertRaises(OSError, bz2f.read)
def testReadMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testReadMonkeyMultiStream(self):
# Test BZ2File.read() on a multi-stream archive where a stream
# boundary coincides with the end of the raw read buffer.
buffer_size = _compression.BUFFER_SIZE
_compression.BUFFER_SIZE = len(self.DATA)
try:
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT * 5)
finally:
_compression.BUFFER_SIZE = buffer_size
def testReadTrailingJunk(self):
self.createTempFile(suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT)
def testReadMultiStreamTrailingJunk(self):
self.createTempFile(streams=5, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testRead0(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(0), b"")
def testReadChunk10(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT)
def testReadChunk10MultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT * 5)
def testRead100(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(100), self.TEXT[:100])
def testPeek(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testReadInto(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
n = 128
b = bytearray(n)
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b, self.TEXT[:n])
n = len(self.TEXT) - n
b = bytearray(len(self.TEXT))
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b[:n], self.TEXT[-n:])
def testReadLine(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES:
self.assertEqual(bz2f.readline(), line)
def testReadLineMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES * 5:
self.assertEqual(bz2f.readline(), line)
def testReadLines(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES)
def testReadLinesMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES * 5)
def testIterator(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES)
def testIteratorMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES * 5)
def testClosedIteratorDeadlock(self):
# Issue #3309: Iteration on a closed BZ2File should release the lock.
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.close()
self.assertRaises(ValueError, next, bz2f)
# This call will deadlock if the above call failed to release the lock.
self.assertRaises(ValueError, bz2f.readlines)
def testWrite(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteChunks10(self):
with BZ2File(self.filename, "w") as bz2f:
n = 0
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
bz2f.write(str)
n += 1
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteNonDefaultCompressLevel(self):
expected = bz2.compress(self.TEXT, compresslevel=5)
with BZ2File(self.filename, "w", compresslevel=5) as bz2f:
bz2f.write(self.TEXT)
with open(self.filename, "rb") as f:
self.assertEqual(f.read(), expected)
def testWriteLines(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.writelines)
bz2f.writelines(self.TEXT_LINES)
# Issue #1535500: Calling writelines() on a closed BZ2File
# should raise an exception.
self.assertRaises(ValueError, bz2f.writelines, ["a"])
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteMethodsOnReadOnlyFile(self):
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(b"abc")
with BZ2File(self.filename, "r") as bz2f:
self.assertRaises(OSError, bz2f.write, b"a")
self.assertRaises(OSError, bz2f.writelines, [b"a"])
def testAppend(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with BZ2File(self.filename, "a") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT * 2)
def testSeekForward(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekForwardAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(len(self.TEXT) + 150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwards(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def testSeekBackwardsAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
readto = len(self.TEXT) + 100
while readto > 0:
readto -= len(bz2f.read(readto))
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[100-150:] + self.TEXT)
def testSeekBackwardsFromEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150, 2)
self.assertEqual(bz2f.read(), self.TEXT[len(self.TEXT)-150:])
def testSeekBackwardsFromEndAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-1000, 2)
self.assertEqual(bz2f.read(), (self.TEXT * 2)[-1000:])
def testSeekPostEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwice(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwiceMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPreStart(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT)
def testSeekPreStartMultiStream(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT * 2)
def testFileno(self):
self.createTempFile()
with open(self.filename, 'rb') as rawf:
bz2f = BZ2File(rawf)
try:
self.assertEqual(bz2f.fileno(), rawf.fileno())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.fileno)
def testSeekable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.seekable())
bz2f.read()
self.assertTrue(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
src = BytesIO(self.DATA)
src.seekable = lambda: False
bz2f = BZ2File(src)
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
def testReadable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.readable())
bz2f.read()
self.assertTrue(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
def testWritable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertFalse(bz2f.writable())
bz2f.read()
self.assertFalse(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertTrue(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
def testOpenDel(self):
self.createTempFile()
for i in range(10000):
o = BZ2File(self.filename)
del o
def testOpenNonexistent(self):
self.assertRaises(OSError, BZ2File, "/non/existent")
def testReadlinesNoNewline(self):
# Issue #1191043: readlines() fails on a file containing no newline.
data = b'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t'
with open(self.filename, "wb") as f:
f.write(data)
with BZ2File(self.filename) as bz2f:
lines = bz2f.readlines()
self.assertEqual(lines, [b'Test'])
with BZ2File(self.filename) as bz2f:
xlines = list(bz2f.readlines())
self.assertEqual(xlines, [b'Test'])
def testContextProtocol(self):
f = None
with BZ2File(self.filename, "wb") as f:
f.write(b"xxx")
f = BZ2File(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with BZ2File(self.filename, "wb") as f:
1/0
except ZeroDivisionError:
pass
else:
self.fail("1/0 didn't raise an exception")
def testThreading(self):
# Issue #7205: Using a BZ2File from several threads shouldn't deadlock.
data = b"1" * 2**20
nthreads = 10
with BZ2File(self.filename, 'wb') as f:
def comp():
for i in range(5):
f.write(data)
threads = [threading.Thread(target=comp) for i in range(nthreads)]
with threading_helper.start_threads(threads):
pass
def testMixedIterationAndReads(self):
self.createTempFile()
linelen = len(self.TEXT_LINES[0])
halflen = linelen // 2
with BZ2File(self.filename) as bz2f:
bz2f.read(halflen)
self.assertEqual(next(bz2f), self.TEXT_LINES[0][halflen:])
self.assertEqual(bz2f.read(), self.TEXT[linelen:])
with BZ2File(self.filename) as bz2f:
bz2f.readline()
self.assertEqual(next(bz2f), self.TEXT_LINES[1])
self.assertEqual(bz2f.readline(), self.TEXT_LINES[2])
with BZ2File(self.filename) as bz2f:
bz2f.readlines()
self.assertRaises(StopIteration, next, bz2f)
self.assertEqual(bz2f.readlines(), [])
def testMultiStreamOrdering(self):
# Test the ordering of streams when reading a multi-stream archive.
data1 = b"foo" * 1000
data2 = b"bar" * 1000
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(data1)
with BZ2File(self.filename, "a") as bz2f:
bz2f.write(data2)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), data1 + data2)
def testOpenBytesFilename(self):
str_filename = self.filename
try:
bytes_filename = str_filename.encode("ascii")
except UnicodeEncodeError:
self.skipTest("Temporary file name needs to be ASCII")
with BZ2File(bytes_filename, "wb") as f:
f.write(self.DATA)
with BZ2File(bytes_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
# Sanity check that we are actually operating on the right file.
with BZ2File(str_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
def testOpenPathLikeFilename(self):
filename = pathlib.Path(self.filename)
with BZ2File(filename, "wb") as f:
f.write(self.DATA)
with BZ2File(filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
def testDecompressLimited(self):
"""Decompressed data buffering should be limited"""
bomb = bz2.compress(b'\0' * int(2e6), compresslevel=9)
self.assertLess(len(bomb), _compression.BUFFER_SIZE)
decomp = BZ2File(BytesIO(bomb))
self.assertEqual(decomp.read(1), b'\0')
max_decomp = 1 + DEFAULT_BUFFER_SIZE
self.assertLessEqual(decomp._buffer.raw.tell(), max_decomp,
"Excessive amount of data was decompressed")
# Tests for a BZ2File wrapping another file object:
def testReadBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT)
self.assertFalse(bio.closed)
def testPeekBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testWriteBytesIO(self):
with BytesIO() as bio:
with BZ2File(bio, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
self.assertEqual(ext_decompress(bio.getvalue()), self.TEXT)
self.assertFalse(bio.closed)
def testSeekForwardBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwardsBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def test_read_truncated(self):
# Drop the eos_magic field (6 bytes) and CRC (4 bytes).
truncated = self.DATA[:-10]
with BZ2File(BytesIO(truncated)) as f:
self.assertRaises(EOFError, f.read)
with BZ2File(BytesIO(truncated)) as f:
self.assertEqual(f.read(len(self.TEXT)), self.TEXT)
self.assertRaises(EOFError, f.read, 1)
# Incomplete 4-byte file header, and block header of at least 146 bits.
for i in range(22):
with BZ2File(BytesIO(truncated[:i])) as f:
self.assertRaises(EOFError, f.read, 1)
def test_issue44439(self):
q = array.array('Q', [1, 2, 3, 4, 5])
LENGTH = len(q) * q.itemsize
with BZ2File(BytesIO(), 'w') as f:
self.assertEqual(f.write(q), LENGTH)
self.assertEqual(f.tell(), LENGTH)
class BZ2CompressorTest(BaseTest):
def testCompress(self):
bz2c = BZ2Compressor()
self.assertRaises(TypeError, bz2c.compress)
data = bz2c.compress(self.TEXT)
data += bz2c.flush()
self.assertEqual(ext_decompress(data), self.TEXT)
def testCompressEmptyString(self):
bz2c = BZ2Compressor()
data = bz2c.compress(b'')
data += bz2c.flush()
self.assertEqual(data, self.EMPTY_DATA)
def testCompressChunks10(self):
bz2c = BZ2Compressor()
n = 0
data = b''
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
data += bz2c.compress(str)
n += 1
data += bz2c.flush()
self.assertEqual(ext_decompress(data), self.TEXT)
@support.skip_if_pgo_task
@bigmemtest(size=_4G + 100, memuse=2)
def testCompress4G(self, size):
# "Test BZ2Compressor.compress()/flush() with >4GiB input"
bz2c = BZ2Compressor()
data = b"x" * size
try:
compressed = bz2c.compress(data)
compressed += bz2c.flush()
finally:
data = None # Release memory
data = bz2.decompress(compressed)
try:
self.assertEqual(len(data), size)
self.assertEqual(len(data.strip(b"x")), 0)
finally:
data = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Compressor(), proto)
class BZ2DecompressorTest(BaseTest):
def test_Constructor(self):
self.assertRaises(TypeError, BZ2Decompressor, 42)
def testDecompress(self):
bz2d = BZ2Decompressor()
self.assertRaises(TypeError, bz2d.decompress)
text = bz2d.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressChunks10(self):
bz2d = BZ2Decompressor()
text = b''
n = 0
while True:
str = self.DATA[n*10:(n+1)*10]
if not str:
break
text += bz2d.decompress(str)
n += 1
self.assertEqual(text, self.TEXT)
def testDecompressUnusedData(self):
bz2d = BZ2Decompressor()
unused_data = b"this is unused data"
text = bz2d.decompress(self.DATA+unused_data)
self.assertEqual(text, self.TEXT)
self.assertEqual(bz2d.unused_data, unused_data)
def testEOFError(self):
bz2d = BZ2Decompressor()
text = bz2d.decompress(self.DATA)
self.assertRaises(EOFError, bz2d.decompress, b"anything")
self.assertRaises(EOFError, bz2d.decompress, b"")
@support.skip_if_pgo_task
@bigmemtest(size=_4G + 100, memuse=3.3)
def testDecompress4G(self, size):
# "Test BZ2Decompressor.decompress() with >4GiB input"
blocksize = 10 * 1024 * 1024
block = random.randbytes(blocksize)
try:
data = block * (size // blocksize + 1)
compressed = bz2.compress(data)
bz2d = BZ2Decompressor()
decompressed = bz2d.decompress(compressed)
self.assertTrue(decompressed == data)
finally:
data = None
compressed = None
decompressed = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Decompressor(), proto)
def testDecompressorChunksMaxsize(self):
bzd = BZ2Decompressor()
max_length = 100
out = []
# Feed some input
len_ = len(self.BIG_DATA) - 64
out.append(bzd.decompress(self.BIG_DATA[:len_],
max_length=max_length))
self.assertFalse(bzd.needs_input)
self.assertEqual(len(out[-1]), max_length)
# Retrieve more data without providing more input
out.append(bzd.decompress(b'', max_length=max_length))
self.assertFalse(bzd.needs_input)
self.assertEqual(len(out[-1]), max_length)
# Retrieve more data while providing more input
out.append(bzd.decompress(self.BIG_DATA[len_:],
max_length=max_length))
self.assertLessEqual(len(out[-1]), max_length)
# Retrieve remaining uncompressed data
while not bzd.eof:
out.append(bzd.decompress(b'', max_length=max_length))
self.assertLessEqual(len(out[-1]), max_length)
out = b"".join(out)
self.assertEqual(out, self.BIG_TEXT)
self.assertEqual(bzd.unused_data, b"")
def test_decompressor_inputbuf_1(self):
# Test reusing input buffer after moving existing
# contents to beginning
bzd = BZ2Decompressor()
out = []
# Create input buffer and fill it
self.assertEqual(bzd.decompress(self.DATA[:100],
max_length=0), b'')
# Retrieve some results, freeing capacity at beginning
# of input buffer
out.append(bzd.decompress(b'', 2))
# Add more data that fits into input buffer after
# moving existing data to beginning
out.append(bzd.decompress(self.DATA[100:105], 15))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[105:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_decompressor_inputbuf_2(self):
# Test reusing input buffer by appending data at the
# end right away
bzd = BZ2Decompressor()
out = []
# Create input buffer and empty it
self.assertEqual(bzd.decompress(self.DATA[:200],
max_length=0), b'')
out.append(bzd.decompress(b''))
# Fill buffer with new data
out.append(bzd.decompress(self.DATA[200:280], 2))
# Append some more data, not enough to require resize
out.append(bzd.decompress(self.DATA[280:300], 2))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[300:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_decompressor_inputbuf_3(self):
# Test reusing input buffer after extending it
bzd = BZ2Decompressor()
out = []
# Create almost full input buffer
out.append(bzd.decompress(self.DATA[:200], 5))
# Add even more data to it, requiring resize
out.append(bzd.decompress(self.DATA[200:300], 5))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[300:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_failure(self):
bzd = BZ2Decompressor()
self.assertRaises(Exception, bzd.decompress, self.BAD_DATA * 30)
# Previously, a second call could crash due to internal inconsistency
self.assertRaises(Exception, bzd.decompress, self.BAD_DATA * 30)
@support.refcount_test
def test_refleaks_in___init__(self):
gettotalrefcount = support.get_attribute(sys, 'gettotalrefcount')
bzd = BZ2Decompressor()
refs_before = gettotalrefcount()
for i in range(100):
bzd.__init__()
self.assertAlmostEqual(gettotalrefcount() - refs_before, 0, delta=10)
class CompressDecompressTest(BaseTest):
def testCompress(self):
data = bz2.compress(self.TEXT)
self.assertEqual(ext_decompress(data), self.TEXT)
def testCompressEmptyString(self):
text = bz2.compress(b'')
self.assertEqual(text, self.EMPTY_DATA)
def testDecompress(self):
text = bz2.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressEmpty(self):
text = bz2.decompress(b"")
self.assertEqual(text, b"")
def testDecompressToEmptyString(self):
text = bz2.decompress(self.EMPTY_DATA)
self.assertEqual(text, b'')
def testDecompressIncomplete(self):
self.assertRaises(ValueError, bz2.decompress, self.DATA[:-10])
def testDecompressBadData(self):
self.assertRaises(OSError, bz2.decompress, self.BAD_DATA)
def testDecompressMultiStream(self):
text = bz2.decompress(self.DATA * 5)
self.assertEqual(text, self.TEXT * 5)
def testDecompressTrailingJunk(self):
text = bz2.decompress(self.DATA + self.BAD_DATA)
self.assertEqual(text, self.TEXT)
def testDecompressMultiStreamTrailingJunk(self):
text = bz2.decompress(self.DATA * 5 + self.BAD_DATA)
self.assertEqual(text, self.TEXT * 5)
class OpenTest(BaseTest):
"Test the open function."
def open(self, *args, **kwargs):
return bz2.open(*args, **kwargs)
def test_binary_modes(self):
for mode in ("wb", "xb"):
if mode == "xb":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "rb") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "ab") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_implicit_binary_modes(self):
# Test implicit binary modes (no "b" or "t" in mode string).
for mode in ("w", "x"):
if mode == "x":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "a") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_text_modes(self):
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
for mode in ("wt", "xt"):
if mode == "xt":
unlink(self.filename)
with self.open(self.filename, mode, encoding="ascii") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt", encoding="ascii") as f:
self.assertEqual(f.read(), text)
with self.open(self.filename, "at", encoding="ascii") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol * 2)
def test_x_mode(self):
for mode in ("x", "xb", "xt"):
unlink(self.filename)
encoding = "utf-8" if "t" in mode else None
with self.open(self.filename, mode, encoding=encoding) as f:
pass
with self.assertRaises(FileExistsError):
with self.open(self.filename, mode) as f:
pass
def test_fileobj(self):
with self.open(BytesIO(self.DATA), "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(BytesIO(self.DATA), "rb") as f:
self.assertEqual(f.read(), self.TEXT)
text = self.TEXT.decode("ascii")
with self.open(BytesIO(self.DATA), "rt", encoding="utf-8") as f:
self.assertEqual(f.read(), text)
def test_bad_params(self):
# Test invalid parameter combinations.
self.assertRaises(ValueError,
self.open, self.filename, "wbt")
self.assertRaises(ValueError,
self.open, self.filename, "xbt")
self.assertRaises(ValueError,
self.open, self.filename, "rb", encoding="utf-8")
self.assertRaises(ValueError,
self.open, self.filename, "rb", errors="ignore")
self.assertRaises(ValueError,
self.open, self.filename, "rb", newline="\n")
def test_encoding(self):
# Test non-default encoding.
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
with self.open(self.filename, "wt", encoding="utf-16-le") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("utf-16-le")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt", encoding="utf-16-le") as f:
self.assertEqual(f.read(), text)
def test_encoding_error_handler(self):
# Test with non-default encoding error handler.
with self.open(self.filename, "wb") as f:
f.write(b"foo\xffbar")
with self.open(self.filename, "rt", encoding="ascii", errors="ignore") \
as f:
self.assertEqual(f.read(), "foobar")
def test_newline(self):
# Test with explicit newline (universal newline mode disabled).
text = self.TEXT.decode("ascii")
with self.open(self.filename, "wt", encoding="utf-8", newline="\n") as f:
f.write(text)
with self.open(self.filename, "rt", encoding="utf-8", newline="\r") as f:
self.assertEqual(f.readlines(), [text])
def test_main():
support.run_unittest(
BZ2FileTest,
BZ2CompressorTest,
BZ2DecompressorTest,
CompressDecompressTest,
OpenTest,
)
support.reap_children()
if __name__ == '__main__':
test_main()
| 37.521569 | 1,181 | 0.599603 | [
"BSD-3-Clause"
] | Froggo8311/brython | www/src/Lib/test/test_bz2.py | 38,272 | Python |
# coding=utf-8
# Copyright 2018 HuggingFace Inc..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import sys
import unittest
from unittest.mock import patch
import run_generation
import run_glue
import run_squad
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
def get_setup_file():
parser = argparse.ArgumentParser()
parser.add_argument("-f")
args = parser.parse_args()
return args.f
class ExamplesTests(unittest.TestCase):
def test_run_glue(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
testargs = [
"run_glue.py",
"--data_dir=./examples/tests_samples/MRPC/",
"--task_name=mrpc",
"--do_train",
"--do_eval",
"--output_dir=./examples/tests_samples/temp_dir",
"--per_gpu_train_batch_size=2",
"--per_gpu_eval_batch_size=1",
"--learning_rate=1e-4",
"--max_steps=10",
"--warmup_steps=2",
"--overwrite_output_dir",
"--seed=42",
]
model_type, model_name = ("--model_type=bert", "--model_name_or_path=bert-base-uncased")
with patch.object(sys, "argv", testargs + [model_type, model_name]):
result = run_glue.score_challenges()
for value in result.values():
self.assertGreaterEqual(value, 0.75)
def test_run_squad(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
testargs = [
"run_squad.py",
"--data_dir=./examples/tests_samples/SQUAD",
"--model_name=bert-base-uncased",
"--output_dir=./examples/tests_samples/temp_dir",
"--max_steps=10",
"--warmup_steps=2",
"--do_train",
"--do_eval",
"--version_2_with_negative",
"--learning_rate=2e-4",
"--per_gpu_train_batch_size=2",
"--per_gpu_eval_batch_size=1",
"--overwrite_output_dir",
"--seed=42",
]
model_type, model_name = ("--model_type=bert", "--model_name_or_path=bert-base-uncased")
with patch.object(sys, "argv", testargs + [model_type, model_name]):
result = run_squad.score_challenges()
self.assertGreaterEqual(result["f1"], 30)
self.assertGreaterEqual(result["exact"], 30)
def test_generation(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
testargs = ["run_generation.py", "--prompt=Hello", "--length=10", "--seed=42"]
model_type, model_name = ("--model_type=openai-gpt", "--model_name_or_path=openai-gpt")
with patch.object(sys, "argv", testargs + [model_type, model_name]):
result = run_generation.score_challenges()
self.assertGreaterEqual(len(result[0]), 10)
| 34.60396 | 96 | 0.629185 | [
"MIT"
] | borgr/ordert | transformers/examples/test_examples.py | 3,495 | Python |
#!/usr/bin/env python
# Problem: Many forked repos on GitHub fall behind from their origins.
# Solution:
# 1) Verify that `apt install myrepos` is available on the system.
# 2) Query GitHub API to find all of my repositories
# 3) Clone each *fork* into *~/repos/mynameofit*, such that place I forked it
# from is git origin (or update, if it exists)
# 4) For each such clone, add *my fork* as a remote named after my GitHub
# username
# 5) Also for each clone, `mr register` the repo
# Now custom `mr` commands can pull all origins and push to my remotes.
| 40.285714 | 77 | 0.719858 | [
"MIT"
] | edunham/toys | utilities/updatify.py | 564 | Python |
#!/usr/bin/env python
#
# Copyright (c) 2015, 2016, 2017, 2018, 2019, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import absolute_import
import cffi
_ffi = cffi.FFI()
_ffi.cdef("""
enum geopm_error_e {
GEOPM_ERROR_RUNTIME = -1,
GEOPM_ERROR_LOGIC = -2,
GEOPM_ERROR_INVALID = -3,
GEOPM_ERROR_FILE_PARSE = -4,
GEOPM_ERROR_LEVEL_RANGE = -5,
GEOPM_ERROR_NOT_IMPLEMENTED = -6,
GEOPM_ERROR_PLATFORM_UNSUPPORTED = -7,
GEOPM_ERROR_MSR_OPEN = -8,
GEOPM_ERROR_MSR_READ = -9,
GEOPM_ERROR_MSR_WRITE = -10,
GEOPM_ERROR_AGENT_UNSUPPORTED = -11,
GEOPM_ERROR_AFFINITY = -12,
GEOPM_ERROR_NO_AGENT = -13,
};
void geopm_error_message(int err, char *msg, size_t size);
""")
_dl = _ffi.dlopen('libgeopmpolicy.so')
ERROR_RUNTIME = _dl.GEOPM_ERROR_RUNTIME
ERROR_LOGIC = _dl.GEOPM_ERROR_LOGIC
ERROR_INVALID = _dl.GEOPM_ERROR_INVALID
ERROR_FILE_PARSE = _dl.GEOPM_ERROR_FILE_PARSE
ERROR_LEVEL_RANGE = _dl.GEOPM_ERROR_LEVEL_RANGE
ERROR_NOT_IMPLEMENTED = _dl.GEOPM_ERROR_NOT_IMPLEMENTED
ERROR_PLATFORM_UNSUPPORTED = _dl.GEOPM_ERROR_PLATFORM_UNSUPPORTED
ERROR_MSR_OPEN = _dl.GEOPM_ERROR_MSR_OPEN
ERROR_MSR_READ = _dl.GEOPM_ERROR_MSR_READ
ERROR_MSR_WRITE = _dl.GEOPM_ERROR_MSR_WRITE
ERROR_AGENT_UNSUPPORTED = _dl.GEOPM_ERROR_AGENT_UNSUPPORTED
ERROR_AFFINITY = _dl.GEOPM_ERROR_AFFINITY
ERROR_NO_AGENT = _dl.GEOPM_ERROR_NO_AGENT
def message(err_number):
"""Return the error message associated with the error code. Positive
error codes are interpreted as system error numbers, and
negative error codes are interpreted as GEOPM error numbers.
Args:
err_number (int): Error code to be interpreted.
Returns:
str: Error message associated with error code.
"""
global _ffi
global _dl
name_max = 1024
result_cstr = _ffi.new("char[]", name_max)
_dl.geopm_error_message(err_number, result_cstr, name_max)
return _ffi.string(result_cstr).decode()
| 36.904255 | 74 | 0.758144 | [
"BSD-3-Clause"
] | RyoTTa/geopm | scripts/geopmpy/error.py | 3,469 | Python |
# Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
# Imports from this application
from app import app
# 2 column layout. 1st column width = 4/12
# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
column1 = dbc.Col(
[
dcc.Markdown(
"""
## Predictions
Your instructions: How to use your app to get new predictions.
"""
),
],
md=4,
)
column2 = dbc.Col(
[
]
)
layout = dbc.Row([column1, column2]) | 20.705882 | 78 | 0.620739 | [
"MIT"
] | Build-Week-Med-Cabinet-October-2020/med-cabinet | pages/predictions.py | 704 | Python |
# https://github.com/RainerKuemmerle/g2o/blob/master/g2o/examples/ba_anchored_inverse_depth/ba_anchored_inverse_depth_demo.cpp
import numpy as np
import g2o
from collections import defaultdict
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--noise', dest='pixel_noise', type=float, default=1.,
help='noise in image pixel space (default: 1.0)')
parser.add_argument('--outlier', dest='outlier_ratio', type=float, default=0.,
help='probability of spuroius observation (default: 0.0)')
parser.add_argument('--robust', dest='robust_kernel', action='store_true', help='use robust kernel')
parser.add_argument('--no-schur', dest='schur_trick', action='store_false', help='not use Schur-complement trick')
parser.add_argument('--seed', type=int, default=0, help='random seed')
args = parser.parse_args()
def invert_depth(x):
assert len(x) == 3 and x[2] != 0
return np.array([x[0], x[1], 1]) / x[2]
def main():
optimizer = g2o.SparseOptimizer()
if args.schur_trick:
solver = g2o.BlockSolverSE3(g2o.LinearSolverEigenSE3())
else:
solver = g2o.BlockSolverX(g2o.LinearSolverEigenX()) # slower
solver = g2o.OptimizationAlgorithmLevenberg(solver)
optimizer.set_algorithm(solver)
true_points = np.hstack([
np.random.random((500, 1)) * 3 - 1.5,
np.random.random((500, 1)) - 0.5,
np.random.random((500, 1)) + 3])
focal_length = 1000.
principal_point = (320, 240)
cam = g2o.CameraParameters(focal_length, principal_point, 0)
cam.set_id(0)
optimizer.add_parameter(cam)
true_poses = []
num_pose = 15
for i in range(num_pose):
# pose here means transform points from world coordinates to camera coordinates
pose = g2o.SE3Quat(np.identity(3), [i*0.04-1, 0, 0])
true_poses.append(pose)
v_se3 = g2o.VertexSE3Expmap()
v_se3.set_id(i)
v_se3.set_estimate(pose)
if i < 2:
v_se3.set_fixed(True)
optimizer.add_vertex(v_se3)
point_id = num_pose
inliers = dict()
sse = defaultdict(float)
for i, point in enumerate(true_points):
visible = []
for j, pose in enumerate(true_poses):
z = cam.cam_map(pose * point)
if 0 <= z[0] < 640 and 0 <= z[1] < 480:
visible.append((j, z))
if len(visible) < 2:
continue
v_p = g2o.VertexSBAPointXYZ()
v_p.set_id(point_id)
v_p.set_marginalized(args.schur_trick)
anchor = visible[0][0]
point2 = true_poses[anchor] * (point + np.random.randn(3))
if point2[2] == 0:
continue
v_p.set_estimate(invert_depth(point2))
optimizer.add_vertex(v_p)
inlier = True
for j, z in visible:
if np.random.random() < args.outlier_ratio:
inlier = False
z = np.random.random(2) * [640, 480]
z += np.random.randn(2) * args.pixel_noise
edge = g2o.EdgeProjectPSI2UV()
edge.resize(3)
edge.set_vertex(0, v_p)
edge.set_vertex(1, optimizer.vertex(j))
edge.set_vertex(2, optimizer.vertex(anchor))
edge.set_measurement(z)
edge.set_information(np.identity(2))
if args.robust_kernel:
edge.set_robust_kernel(g2o.RobustKernelHuber())
edge.set_parameter_id(0, 0)
optimizer.add_edge(edge)
if inlier:
inliers[point_id] = (i, anchor)
error = (true_poses[anchor].inverse() * invert_depth(v_p.estimate()) -
true_points[i])
sse[0] += np.sum(error**2)
point_id += 1
print('Performing full BA:')
optimizer.initialize_optimization()
optimizer.set_verbose(True)
optimizer.optimize(10)
for i in inliers:
v_p = optimizer.vertex(i)
v_anchor = optimizer.vertex(inliers[i][1])
error = (v_anchor.estimate().inverse() * invert_depth(v_p.estimate()) -
true_points[inliers[i][0]])
sse[1] += np.sum(error**2)
print('\nRMSE (inliers only):')
print('before optimization:', np.sqrt(sse[0] / len(inliers)))
print('after optimization:', np.sqrt(sse[1] / len(inliers)))
if __name__ == '__main__':
if args.seed > 0:
np.random.seed(args.seed)
main() | 31.702899 | 126 | 0.611886 | [
"MIT"
] | alecone/ROS_project | g2opy/python/examples/ba_anchored_inverse_depth_demo.py | 4,375 | Python |
"""
Conditional Generative adversarial networks:
https://arxiv.org/abs/1611.07004
U-net:
https://arxiv.org/abs/1505.04597
Conditional generative adversarial network architecture modules
used for simulation of detector response and unfolding in JetGAN framework.
Generator() returns the generator model, and Discriminator() returns the
descriminator model.
"""
| 27.923077 | 75 | 0.807163 | [
"MIT"
] | nickelsey/jetgan | jetgan/model/cgan.py | 363 | Python |
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: © 2019- d3p Developers and their Assignees
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jax
from numpyro.handlers import seed, trace, substitute, condition
from d3p.util import unvectorize_shape_2d
def get_samples_from_trace(trace, with_intermediates=False):
""" Extracts all sample values from a numpyro trace.
:param trace: trace object obtained from `numpyro.handlers.trace().get_trace()`
:param with_intermediates: If True, intermediate(/latent) samples from
sample site distributions are included in the result.
:return: Dictionary of sampled values associated with the names given
via `sample()` in the model. If with_intermediates is True,
dictionary values are tuples where the first element is the final
sample values and the second element is a list of intermediate values.
"""
samples = {
k: (v['value'], v['intermediates']) if with_intermediates else v['value']
for k, v in trace.items() if v['type'] == 'sample'
}
return samples
def sample_prior_predictive(
rng_key,
model,
model_args,
substitutes=None,
with_intermediates=False,
**kwargs
): # noqa: E121,E125
""" Samples once from the prior predictive distribution.
Individual sample sites, as designated by `sample`, can be frozen to
pre-determined values given in `substitutes`. In that case, values for these
sites are not actually sampled but the value provided in `substitutes` is
returned as the sample. This facilitates conditional sampling.
Note that if the model function is written in such a way that it returns, e.g.,
multiple observations from a single prior draw, the same is true for the
values returned by this function.
:param rng_key: Jax PRNG key
:param model: Function representing the model using numpyro distributions
and the `sample` primitive
:param model_args: Arguments to the model function
:param substitutes: An optional dictionary of frozen substitutes for
sample sites.
:param with_intermediates: If True, intermediate(/latent) samples from
sample site distributions are included in the result.
:param **kwargs: Keyword arguments passed to the model function.
:return: Dictionary of sampled values associated with the names given
via `sample()` in the model. If with_intermediates is True,
dictionary values are tuples where the first element is the final
sample values and the second element is a list of intermediate values.
"""
if substitutes is None:
substitutes = dict()
model = seed(substitute(model, data=substitutes), rng_key)
t = trace(model).get_trace(*model_args, **kwargs)
return get_samples_from_trace(t, with_intermediates)
def sample_posterior_predictive(
rng_key,
model,
model_args,
guide,
guide_args,
params,
with_intermediates=False,
**kwargs
): # noqa: E121, E125
""" Samples once from the posterior predictive distribution.
Note that if the model function is written in such a way that it returns, e.g.,
multiple observations from a single posterior draw, the same is true for the
values returned by this function.
:param rng_key: Jax PRNG key
:param model: Function representing the model using numpyro distributions
and the `sample` primitive
:param model_args: Arguments to the model function
:param guide: Function representing the variational distribution (the guide)
using numpyro distributions as well as the `sample` and `param` primitives
:param guide_args: Arguments to the guide function
:param params: A dictionary providing values for the parameters
designated by call to `param` in the guide
:param with_intermediates: If True, intermediate(/latent) samples from
sample site distributions are included in the result.
:param **kwargs: Keyword arguments passed to the model and guide functions.
:return: Dictionary of sampled values associated with the names given
via `sample()` in the model. If with_intermediates is True,
dictionary values are tuples where the first element is the final
sample values and the second element is a list of intermediate values.
"""
model_rng_key, guide_rng_key = jax.random.split(rng_key)
guide = seed(substitute(guide, data=params), guide_rng_key)
guide_samples = get_samples_from_trace(
trace(guide).get_trace(*guide_args, **kwargs), with_intermediates
)
model_params = dict(**params)
if with_intermediates:
model_params.update({k: v[0] for k, v in guide_samples.items()})
else:
model_params.update({k: v for k, v in guide_samples.items()})
model = seed(substitute(model, data=model_params), model_rng_key)
model_samples = get_samples_from_trace(
trace(model).get_trace(*model_args, **kwargs), with_intermediates
)
guide_samples.update(model_samples)
return guide_samples
def _sample_a_lot(rng_key, n, single_sample_fn):
rng_keys = jax.random.split(rng_key, n)
return jax.vmap(single_sample_fn)(rng_keys)
def sample_multi_prior_predictive(
rng_key,
n,
model,
model_args,
substitutes=None,
with_intermediates=False,
**kwargs
): # noqa: E121, E125
""" Samples n times from the prior predictive distribution.
Individual sample sites, as designated by `sample`, can be frozen to
pre-determined values given in `substitutes`. In that case, values for these
sites are not actually sampled but the value provided in `substitutes` is
returned as the sample. This facilitates conditional sampling.
Note that if the model function is written in such a way that it returns, e.g.,
multiple observations, say n_model many, from a single prior draw, the same is
true for the values returned by this function, i.e., this function will
output n x n_model observations.
:param rng_key: Jax PRNG key
:param n: Number of draws from the prior predictive.
:param model: Function representing the model using numpyro distributions
and the `sample` primitive
:param model_args: Arguments to the model function
:param substitutes: An optional dictionary of frozen substitutes for
sample sites.
:param with_intermediates: If True, intermediate(/latent) samples from
sample site distributions are included in the result.
:param **kwargs: Keyword arguments passed to the model function.
:return: Dictionary of sampled values associated with the names given
via `sample()` in the model. If with_intermediates is True,
dictionary values are tuples where the first element is the final
sample values and the second element is a list of intermediate values.
"""
def single_sample_fn(rng):
return sample_prior_predictive(
rng, model, model_args, substitutes=substitutes,
with_intermediates=with_intermediates, **kwargs
)
return _sample_a_lot(rng_key, n, single_sample_fn)
def sample_multi_posterior_predictive(
rng_key,
n,
model,
model_args,
guide,
guide_args,
params,
with_intermediates=False,
**kwargs
): # noqa: E121, E125
""" Samples n times from the posterior predictive distribution.
Note that if the model function is written in such a way that it returns, e.g.,
multiple observations, say n_model many, from a single posterior draw, the same is
true for the values returned by this function, i.e., this function will
output n x n_model observations.
:param rng_key: Jax PRNG key
:param model: Function representing the model using numpyro distributions
and the `sample` primitive
:param model_args: Arguments to the model function
:param guide: Function representing the variational distribution (the guide)
using numpyro distributions as well as the `sample` and `param` primitives
:param guide_args: Arguments to the guide function
:param params: A dictionary providing values for the parameters
designated by call to `param` in the guide
:param with_intermediates: If True, intermediate(/latent) samples from
sample site distributions are included in the result.
:param **kwargs: Keyword arguments passed to the model and guide functions.
:return: Dictionary of sampled values associated with the names given
via `sample()` in the model. If with_intermediates is True,
dictionary values are tuples where the first element is the final
sample values and the second element is a list of intermediate values.
"""
def single_sample_fn(rng):
return sample_posterior_predictive(
rng, model, model_args, guide, guide_args, params,
with_intermediates=with_intermediates, **kwargs
)
return _sample_a_lot(rng_key, n, single_sample_fn)
def map_args_obs_to_shape(obs, *args, **kwargs):
return unvectorize_shape_2d(obs), kwargs, {'obs': obs}
def make_observed_model(model, obs_to_model_args_fn):
""" Transforms a generative model function into one with fixed observations
for likelihood evaluation in the SVI algorithm.
:param model: Any generative model function using the numpyro `sample`
primitive.
:param obs_to_model_args_fn: A function mapping from an argument list compatible
with SVI (i.e., accepting a batch of observations) to that of `model`. The
mapping function can take arbitrary arguments and must return a tuple
(args, kwargs, observations), where args and kwargs are passed to `model`
as argument and keyword arguments and observations is a dictionary of
observations for sample sites in `model` that will be fixed using the
`observe` handler.
"""
def transformed_model_fn(*args, **kwargs):
mapped_args, mapped_kwargs, fixed_obs = obs_to_model_args_fn(*args, **kwargs)
return condition(model, data=fixed_obs)(*mapped_args, **mapped_kwargs)
return transformed_model_fn
| 43.657258 | 86 | 0.714325 | [
"Apache-2.0"
] | DPBayes/d3p | d3p/modelling.py | 10,828 | Python |
# encoding: utf-8
from libs.configs import cfgs
from libs.box_utils import bbox_transform
from libs.box_utils import nms_rotate
import tensorflow as tf
from libs.box_utils.coordinate_convert import coordinate_present_convert
def filter_detections(boxes, scores, is_training, gpu_id):
"""
:param boxes: [-1, 4]
:param scores: [-1, ]
:param labels: [-1, ]
:return:
"""
if is_training:
indices = tf.reshape(tf.where(tf.greater(scores, cfgs.VIS_SCORE)), [-1, ])
else:
indices = tf.reshape(tf.where(tf.greater(scores, cfgs.FILTERED_SCORE)), [-1, ])
if cfgs.NMS:
filtered_boxes = tf.gather(boxes, indices)
filtered_scores = tf.gather(scores, indices)
if cfgs.ANGLE_RANGE == 180:
# _, _, _, _, theta = tf.unstack(boxes_pred, axis=1)
# indx = tf.reshape(tf.where(tf.logical_and(tf.less(theta, 0), tf.greater_equal(theta, -180))), [-1, ])
# boxes_pred = tf.gather(boxes_pred, indx)
# scores = tf.gather(scores, indx)
filtered_boxes = tf.py_func(coordinate_present_convert,
inp=[filtered_boxes, 1],
Tout=[tf.float32])
filtered_boxes = tf.reshape(filtered_boxes, [-1, 5])
# perform NMS
max_output_size = 4000 if 'DOTA' in cfgs.NET_NAME else 200
nms_indices = nms_rotate.nms_rotate(decode_boxes=filtered_boxes,
scores=filtered_scores,
iou_threshold=cfgs.NMS_IOU_THRESHOLD,
max_output_size=100 if is_training else max_output_size,
use_angle_condition=False,
angle_threshold=15,
use_gpu=True,
gpu_id=gpu_id)
# filter indices based on NMS
indices = tf.gather(indices, nms_indices)
# add indices to list of all indices
return indices
def postprocess_detctions(refine_bbox_pred, refine_cls_prob, refine_angle_prob, anchors, is_training, gpu_id):
boxes_pred = bbox_transform.rbbox_transform_inv(boxes=anchors, deltas=refine_bbox_pred,
scale_factors=cfgs.ANCHOR_SCALE_FACTORS)
angle_cls = tf.cast(tf.argmax(refine_angle_prob, axis=1), tf.float32)
angle_cls = (tf.reshape(angle_cls, [-1, ]) * -1 - 0.5) * cfgs.OMEGA
x, y, w, h, theta = tf.unstack(boxes_pred, axis=1)
boxes_pred_angle = tf.transpose(tf.stack([x, y, w, h, angle_cls]))
return_boxes_pred = []
return_boxes_pred_angle = []
return_scores = []
return_labels = []
for j in range(0, cfgs.CLASS_NUM):
indices = filter_detections(boxes_pred_angle, refine_cls_prob[:, j], is_training, gpu_id)
tmp_boxes_pred_angle = tf.reshape(tf.gather(boxes_pred_angle, indices), [-1, 5])
tmp_boxes_pred = tf.reshape(tf.gather(boxes_pred, indices), [-1, 5])
tmp_scores = tf.reshape(tf.gather(refine_cls_prob[:, j], indices), [-1, ])
if cfgs.ANGLE_RANGE == 180:
# _, _, _, _, theta = tf.unstack(boxes_pred, axis=1)
# indx = tf.reshape(tf.where(tf.logical_and(tf.less(theta, 0), tf.greater_equal(theta, -180))), [-1, ])
# boxes_pred = tf.gather(boxes_pred, indx)
# scores = tf.gather(scores, indx)
tmp_boxes_pred_angle = tf.py_func(coordinate_present_convert,
inp=[tmp_boxes_pred_angle, 1],
Tout=[tf.float32])
tmp_boxes_pred_angle = tf.reshape(tmp_boxes_pred_angle, [-1, 5])
tmp_boxes_pred = tf.py_func(coordinate_present_convert,
inp=[tmp_boxes_pred, 1],
Tout=[tf.float32])
tmp_boxes_pred = tf.reshape(tmp_boxes_pred, [-1, 5])
return_boxes_pred.append(tmp_boxes_pred)
return_boxes_pred_angle.append(tmp_boxes_pred_angle)
return_scores.append(tmp_scores)
return_labels.append(tf.ones_like(tmp_scores)*(j+1))
return_boxes_pred = tf.concat(return_boxes_pred, axis=0)
return_boxes_pred_angle = tf.concat(return_boxes_pred_angle, axis=0)
return_scores = tf.concat(return_scores, axis=0)
return_labels = tf.concat(return_labels, axis=0)
return return_boxes_pred, return_scores, return_labels, return_boxes_pred_angle
| 45.405941 | 115 | 0.595072 | [
"Apache-2.0"
] | DLPerf/R3Det_Tensorflow | libs/detection_oprations/refine_proposal_opr_csl.py | 4,586 | Python |
# coding:utf-8
import time
import datetime
import os
import tensorflow as tf
import pickle
import utils
from keras.preprocessing.sequence import pad_sequences
import numpy as np
import evaluate
from utils import Utils
class SMN():
def __init__(self,
device_name='/cpu:0',
lr=0.001,
max_num_utterance=5,
negative_samples=1,
max_sentence_len=20,
word_embedding_size=100,
rnn_units=100,
total_words=66958,
batch_size=32,
max_epoch=100,
num_checkpoints=10,
evaluate_every=100,
checkpoint_every=100):
self.utils = Utils()
self.device_name = device_name
self.lr = lr
self.max_num_utterance = max_num_utterance
self.negative_samples = negative_samples
self.max_sentence_len = max_sentence_len
self.word_embedding_size = word_embedding_size
self.rnn_units = rnn_units
self.total_words = total_words
self.batch_size = batch_size
self.max_epoch = max_epoch
self.num_checkpoints = num_checkpoints
self.evaluate_every = evaluate_every
self.checkpoint_every = checkpoint_every
def LoadModel(self):
#init = tf.global_variables_initializer()
saver = tf.train.Saver()
sess = tf.Session()
#with tf.Session() as sess:
#sess.run(init)
saver.restore(sess,"neg5model\\model.5")
return sess
# Later, launch the model, use the saver to restore variables from disk, and
# do some work with the model.
# with tf.Session() as sess:
# # Restore variables from disk.
# saver.restore(sess, "/model/model.5")
# print("Model restored.")
def build_model(self):
# placeholders
self.utterance_ph = tf.placeholder(tf.int32, shape=(None, self.max_num_utterance, self.max_sentence_len), name='utterances')
self.response_ph = tf.placeholder(tf.int32, shape=(None, self.max_sentence_len), name='responses')
self.y_true = tf.placeholder(tf.int32, shape=(None,), name='y_true')
# self.embedding_ph = tf.placeholder(tf.float32, shape=(self.total_words, self.word_embedding_size))
self.response_len = tf.placeholder(tf.int32, shape=(None,), name='responses_len')
self.all_utterance_len_ph = tf.placeholder(tf.int32, shape=(None, self.max_num_utterance), name='utterances_len')
with tf.device(self.device_name):
# word_embedding vector
word_embeddings = tf.get_variable('word_embeddings_v', initializer=tf.random_normal_initializer(mean=0.0, stddev=0.1), shape=(self.total_words, self.word_embedding_size), dtype=tf.float32, trainable=True)
# word_embeddings = tf.get_variable('word_embeddings_v', shape=(self.total_words, self.word_embedding_size), dtype=tf.float32, trainable=False)
# self.embedding_init = word_embeddings.assign(self.embedding_ph)
# utterance embedding
all_utterance_embeddings = tf.nn.embedding_lookup(word_embeddings, self.utterance_ph)
all_utterance_embeddings = tf.unstack(all_utterance_embeddings, num=self.max_num_utterance, axis=1)
all_utterance_len = tf.unstack(self.all_utterance_len_ph, num=self.max_num_utterance, axis=1)
# response embedding
response_embeddings = tf.nn.embedding_lookup(word_embeddings, self.response_ph)
# GRU initialize
sentence_GRU = tf.nn.rnn_cell.GRUCell(self.rnn_units, kernel_initializer=tf.orthogonal_initializer())
final_GRU = tf.nn.rnn_cell.GRUCell(self.rnn_units, kernel_initializer=tf.orthogonal_initializer())
# matrix 1
A_matrix = tf.get_variable('A_matrix_v', shape=(self.rnn_units, self.rnn_units), initializer=tf.contrib.layers.xavier_initializer(), dtype=tf.float32)
reuse = None
response_GRU_embeddings, _ = tf.nn.dynamic_rnn(sentence_GRU, response_embeddings, sequence_length=self.response_len, dtype=tf.float32,
scope='sentence_GRU')
self.response_embedding_save = response_GRU_embeddings
response_embeddings = tf.transpose(response_embeddings, perm=[0, 2, 1])
response_GRU_embeddings = tf.transpose(response_GRU_embeddings, perm=[0, 2, 1])
# generate matching vectors
matching_vectors = []
for utterance_embeddings, utterance_len in zip(all_utterance_embeddings, all_utterance_len):
matrix1 = tf.matmul(utterance_embeddings, response_embeddings)
utterance_GRU_embeddings, _ = tf.nn.dynamic_rnn(sentence_GRU, utterance_embeddings, sequence_length=utterance_len, dtype=tf.float32,
scope='sentence_GRU')
matrix2 = tf.einsum('aij,jk->aik', utterance_GRU_embeddings, A_matrix) # TODO:check this
matrix2 = tf.matmul(matrix2, response_GRU_embeddings)
matrix = tf.stack([matrix1, matrix2], axis=3, name='matrix_stack')
conv_layer = tf.layers.conv2d(matrix, filters=8, kernel_size=(3, 3), padding='VALID',
kernel_initializer=tf.contrib.keras.initializers.he_normal(),
activation=tf.nn.relu, reuse=reuse, name='conv') # TODO: check other params
pooling_layer = tf.layers.max_pooling2d(conv_layer, (3, 3), strides=(3, 3),
padding='VALID', name='max_pooling') # TODO: check other params
matching_vector = tf.layers.dense(tf.contrib.layers.flatten(pooling_layer), 50,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation=tf.tanh, reuse=reuse, name='matching_v') # TODO: check wthether this is correct
if not reuse:
reuse = True
matching_vectors.append(matching_vector)
# last hidden layer
_, last_hidden = tf.nn.dynamic_rnn(final_GRU, tf.stack(matching_vectors, axis=0, name='matching_stack'), dtype=tf.float32,
time_major=True, scope='final_GRU') # TODO: check time_major
# output layer
output = tf.layers.dense(last_hidden, 2, kernel_initializer=tf.contrib.layers.xavier_initializer(), name='final_v')
self.logits = tf.nn.softmax(output, name='y_logits')
self.y_pred = tf.cast(tf.argmax(input=output, axis=1), 'int32', name='y_pred')
# loss
self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.y_true, logits=output), name='loss')
# accuracy
correct_predictions = tf.equal(self.y_pred, self.y_true)
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, 'float'), name='accuracy')
# optimize
self.global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
grads_and_vars = optimizer.compute_gradients(self.loss)
self.train_op = optimizer.apply_gradients(grads_and_vars, global_step=self.global_step, name='train_op')
def Evaluate(self, sess):
pass
'''
with open(evaluate_file, 'rb') as f:
history, true_utt, labels = pickle.load(f)
self.all_candidate_scores = []
history, history_len = utils.multi_sequences_padding(history, self.max_sentence_len)
history, history_len = np.array(history), np.array(history_len)
true_utt_len = np.array(utils.get_sequences_length(true_utt, maxlen=self.max_sentence_len))
true_utt = np.array(pad_sequences(true_utt, padding='post', maxlen=self.max_sentence_len))
low = 0
while True:
feed_dict = {
self.utterance_ph: np.concatenate([history[low:low + 200]], axis=0),
self.all_utterance_len_ph: np.concatenate([history_len[low:low + 200]], axis=0),
self.response_ph: np.concatenate([true_utt[low:low + 200]], axis=0),
self.response_len: np.concatenate([true_utt_len[low:low + 200]], axis=0),
}
candidate_scores = sess.run(self.y_pred, feed_dict=feed_dict)
self.all_candidate_scores.append(candidate_scores[:, 1])
low = low + 200
if low >= history.shape[0]:
break
all_candidate_scores = np.concatenate(self.all_candidate_scores, axis=0)
evaluate.ComputeR10_1(all_candidate_scores,labels)
evaluate.ComputeR2_1(all_candidate_scores,labels)
'''
def train_model(self, all_sequences, all_responses_true, use_pre_trained=False, pre_trained_modelpath='./model/pre-trained-model'):
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)
with tf.Session(config=config) as sess:
# output directory for models and summaries
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.curdir, 'log', timestamp))
print('Writing log to {}\n'.format(out_dir))
# summary all the trainable variables
for var in tf.trainable_variables():
tf.summary.histogram(name=var.name, values=var)
# summaries for loss and accuracy
loss_summary = tf.summary.scalar('summary_loss', self.loss)
acc_summary = tf.summary.scalar('summary_accuracy', self.accuracy)
# train summaries
train_summary_op = tf.summary.merge_all()
train_summary_dir = os.path.join(out_dir, 'summaries', 'train')
train_summary_writer = tf.summary.FileWriter(train_summary_dir, tf.get_default_graph())
# dev summaries
dev_summary_op = tf.summary.merge_all()
dev_summary_dir = os.path.join(out_dir, 'summaries', 'dev')
dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, tf.get_default_graph())
# checkpointing, tensorflow assumes this directory already existed, so we need to create it
checkpoint_dir = os.path.join(out_dir, 'checkpoints')
checkpoint_prefix = os.path.join(checkpoint_dir, 'model')
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=self.num_checkpoints)
# initialize all variables
sess.run(tf.global_variables_initializer())
# use pre-trained model to continue
if use_pre_trained:
print('reloading model parameters...')
saver.restore(sess, pre_trained_modelpath)
# get input data
actions = all_responses_true[:]
history, history_len = self.utils.multi_sequences_padding(all_sequences, self.max_sentence_len)
true_utt_len = np.array(self.utils.get_sequences_length(all_responses_true, maxlen=self.max_sentence_len))
true_utt = np.array(pad_sequences(all_responses_true, padding='post', maxlen=self.max_sentence_len))
actions_len = np.array(self.utils.get_sequences_length(actions, maxlen=self.max_sentence_len))
actions = np.array(pad_sequences(actions, padding='post', maxlen=self.max_sentence_len))
history, history_len = np.array(history), np.array(history_len)
low = 0
epoch = 1
while epoch <= self.max_epoch:
n_sample = min(low + self.batch_size, history.shape[0]) - low
negative_indices = [np.random.randint(0, actions.shape[0], n_sample) for _ in range(self.negative_samples)]
negs = [actions[negative_indices[i], :] for i in range(self.negative_samples)]
negs_len = [actions_len[negative_indices[i]] for i in range(self.negative_samples)]
feed_dict = {
self.utterance_ph: np.concatenate([history[low:low + n_sample]] * (self.negative_samples + 1), axis=0),
self.all_utterance_len_ph: np.concatenate([history_len[low:low + n_sample]] * (self.negative_samples + 1), axis=0),
self.response_ph: np.concatenate([true_utt[low:low + n_sample]] + negs, axis=0),
self.response_len: np.concatenate([true_utt_len[low:low + n_sample]] + negs_len, axis=0),
self.y_true: np.concatenate([np.ones(n_sample)] + [np.zeros(n_sample)] * self.negative_samples, axis=0)
}
_, step, summaries, loss, accuracy, y_logits, y_pred, y_true = sess.run(
[self.train_op, self.global_step, train_summary_op, self.loss, self.accuracy, self.logits, self.y_pred, self.y_true],
feed_dict)
y_pred_proba = y_logits[:,1]
timestr = datetime.datetime.now().isoformat()
print('{}: => epoch {} | step {} | loss {:.6f} | acc {:.6f}'.format(timestr, epoch, step, loss, accuracy))
train_summary_writer.add_summary(summaries, step)
current_step = tf.train.global_step(sess, self.global_step)
low += n_sample
if current_step % self.evaluate_every == 0:
pass
# print("loss", sess.run(self.loss, feed_dict=feed_dict))
# self.Evaluate(sess)
if current_step % self.checkpoint_every == 0:
path = saver.save(sess=sess, save_path=checkpoint_prefix, global_step=self.global_step)
print('\nSaved model checkpoint to {}\n'.format(path))
if low >= history.shape[0]:
low = 0
epoch += 1
def predict(self, model_file, dev_utterances, dev_responses, dev_utterances_len, dev_responses_len):
# self.build_model()
graph = tf.Graph()
with tf.Session(graph=graph) as sess:
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph('{}.meta'.format(model_file))
saver.restore(sess, model_file)
# Access and create placeholders variables and create feed-dict to feed new data
graph = tf.get_default_graph()
ph_utterances = graph.get_tensor_by_name('utterances:0')
ph_responses = graph.get_tensor_by_name('responses:0')
ph_utterances_len = graph.get_tensor_by_name('utterances_len:0')
ph_responses_len = graph.get_tensor_by_name('responses_len:0')
ph_y_true = graph.get_tensor_by_name('y_true:0')
feed_dict = {
ph_utterances: dev_utterances,
ph_responses: dev_responses,
ph_utterances_len: dev_utterances_len,
ph_responses_len: dev_responses_len
}
op_y_logits = graph.get_tensor_by_name('y_logits:0')
op_y_pred = graph.get_tensor_by_name('y_pred:0')
y_logits, y_pred = sess.run([op_y_logits, op_y_pred], feed_dict)
y_pred_proba = y_logits[:,1]
# print(y_logits)
# print(y_pred)
return y_pred_proba, y_pred
if __name__ == "__main__":
smn = SMN()
smn.build_model()
# smn.train_model()
#sess = scn.LoadModel()
#scn.Evaluate(sess)
#results = scn.BuildIndex(sess)
#print(len(results))
#scn.TrainModel()
| 53.362416 | 216 | 0.621997 | [
"Apache-2.0"
] | zlxy9892/chatbot-retrieval-based-smn | retrieval_model.py | 15,902 | Python |
#!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Disable the lint error for too-long lines for the URL below.
# pylint: disable=C0301
"""Fix Chrome App manifest.json files for use with multi-platform zip files.
See info about multi-platform zip files here:
https://developer.chrome.com/native-client/devguide/distributing#packaged-application
The manifest.json file needs to point to the correct platform-specific paths,
but we build all toolchains and configurations in the same tree. As a result,
we can't have one manifest.json for all combinations.
Instead, we update the top-level manifest.json file during the build:
"platforms": [
{
"nacl_arch": "x86-64",
"sub_package_path": "_platform_specific/x86-64/"
},
...
Becomes
"platforms": [
{
"nacl_arch": "x86-64",
"sub_package_path": "<toolchain>/<config>/_platform_specific/x86-64/"
},
...
"""
import argparse
import collections
import json
import os
import sys
if sys.version_info < (2, 7, 0):
sys.stderr.write("python 2.7 or later is required run this script\n")
sys.exit(1)
class Error(Exception):
"""Local Error class for this file."""
pass
def Trace(msg):
if Trace.verbose:
sys.stderr.write(str(msg) + '\n')
Trace.verbose = False
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-p', '--prefix',
help='Prefix to set for all sub_package_paths in the '
'manifest. If none is specified, the prefix will be '
'removed; i.e. the start of the path will be '
'"_platform_specific/..."')
parser.add_argument('-v', '--verbose',
help='Verbose output', action='store_true')
parser.add_argument('manifest_json')
options = parser.parse_args(args)
if options.verbose:
Trace.verbose = True
Trace('Reading %s' % options.manifest_json)
with open(options.manifest_json) as f:
# Keep the dictionary order. This is only supported on Python 2.7+
if sys.version_info >= (2, 7, 0):
data = json.load(f, object_pairs_hook=collections.OrderedDict)
else:
data = json.load(f)
if 'platforms' not in data:
raise Error('%s does not have "platforms" key.' % options.manifest_json)
platforms = data['platforms']
if not isinstance(platforms, list):
raise Error('Expected "platforms" key to be array.')
if options.prefix:
prefix = options.prefix + '/'
else:
prefix = ''
for platform in platforms:
nacl_arch = platform.get('nacl_arch')
if 'sub_package_path' not in platform:
raise Error('Expected each platform to have "sub_package_path" key.')
sub_package_path = platform['sub_package_path']
index = sub_package_path.find('_platform_specific')
if index == -1:
raise Error('Could not find "_platform_specific" in the '
'"sub_package_path" key.')
new_path = prefix + sub_package_path[index:]
platform['sub_package_path'] = new_path
Trace(' %s: "%s" -> "%s"' % (nacl_arch, sub_package_path, new_path))
with open(options.manifest_json, 'w') as f:
json.dump(data, f, indent=2)
return 0
if __name__ == '__main__':
try:
rtn = main(sys.argv[1:])
except Error, e:
sys.stderr.write('%s: %s\n' % (os.path.basename(__file__), e))
rtn = 1
except KeyboardInterrupt:
sys.stderr.write('%s: interrupted\n' % os.path.basename(__file__))
rtn = 1
sys.exit(rtn)
| 28.328125 | 85 | 0.665196 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | Alan-love/chromium | native_client_sdk/src/tools/fix_manifest.py | 3,626 | Python |
import numpy as np
import cv2
import math
def norm_image(img):
""" normalize image input """
img = img.astype(np.float32)
var = np.var(img, axis=(0, 1), keepdims=True)
mean = np.mean(img, axis=(0, 1), keepdims=True)
return (img - mean) / (np.sqrt(var) + 1e-7)
def mask_depth_image(depth_image, min_depth, max_depth):
""" mask out-of-range pixel to zero """
# print ('mask min max', min_depth, max_depth)
ret, depth_image = cv2.threshold(depth_image, min_depth, 100000, cv2.THRESH_TOZERO)
ret, depth_image = cv2.threshold(depth_image, max_depth, 100000, cv2.THRESH_TOZERO_INV)
depth_image = np.expand_dims(depth_image, 2)
return depth_image
def scale_camera(cam, scale=1):
""" resize input in order to produce sampled depth map """
new_cam = np.copy(cam)
# focal:
new_cam[1][0][0] = cam[1][0][0] * scale
new_cam[1][1][1] = cam[1][1][1] * scale
# principle point:
new_cam[1][0][2] = cam[1][0][2] * scale
new_cam[1][1][2] = cam[1][1][2] * scale
return new_cam
def scale_image(image, scale=1, interpolation='linear'):
""" resize image using cv2 """
if interpolation == 'linear':
return cv2.resize(image, None, fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)
if interpolation == 'nearest':
return cv2.resize(image, None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
def scale_dtu_input(images, cams, depth_image=None, scale=1):
""" resize input to fit into the memory """
for view in range(len(images)):
images[view] = scale_image(images[view], scale=scale)
cams[view] = scale_camera(cams[view], scale=scale)
if depth_image is None:
return images, cams
else:
depth_image = scale_image(depth_image, scale=scale, interpolation='nearest')
return images, cams, depth_image
def crop_dtu_input(images, cams, height, width, base_image_size, depth_image=None):
""" resize images and cameras to fit the network (can be divided by base image size) """
# crop images and cameras
for view in range(len(images)):
h, w = images[view].shape[0:2]
new_h = h
new_w = w
if new_h > height:
new_h = height
else:
new_h = int(math.floor(h / base_image_size) * base_image_size)
if new_w > width:
new_w = width
else:
new_w = int(math.floor(w / base_image_size) * base_image_size)
start_h = int(math.floor((h - new_h) / 2))
start_w = int(math.floor((w - new_w) / 2))
finish_h = start_h + new_h
finish_w = start_w + new_w
images[view] = images[view][start_h:finish_h, start_w:finish_w]
cams[view][1][0][2] = cams[view][1][0][2] - start_w
cams[view][1][1][2] = cams[view][1][1][2] - start_h
# crop depth image
if not depth_image is None:
depth_image = depth_image[start_h:finish_h, start_w:finish_w]
return images, cams, depth_image
else:
return images, cams
| 35.174419 | 92 | 0.631736 | [
"MIT"
] | HelenYang1999/PointMVSNet | pointmvsnet/utils/preprocess.py | 3,025 | Python |
import asyncio, logging
from aiohttp import web
logging.basicConfig(level=logging.INFO)
def index(request):
return web.Response(body=b'<h1>Hello World</h1>', content_type='text/html')
async def init(loop):
app = web.Application(loop=loop)
app.router.add_route('GET', '/', index)
srv = await loop.create_server(app.make_handler(), '127.0.0.1', 5000)
logging.info('server is listening at http://127.0.0.1:5000')
return srv
cur_loop = asyncio.get_event_loop()
cur_loop.run_until_complete(init(cur_loop))
cur_loop.run_forever()
| 22.32 | 79 | 0.718638 | [
"MIT"
] | ResolveWang/minifw | app.py | 558 | Python |
import sys
import numpy as np
import torch
import torch.hub
from PIL import Image
from torchvision.transforms import Compose
from _model_base import ModelBase, handle_alpha
from _util import apply_colormap, to_rgb
# Simplified transforms from
# https://github.com/intel-isl/MiDaS/blob/master/models/transforms.py
class Resize:
def __init__(self, width, height, image_interpolation_method=Image.BICUBIC):
self.__width = width
self.__height = height
self.__multiple_of = 32
self.__image_interpolation_method = image_interpolation_method
def constrain_to_multiple_of(self, x):
return (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
def get_size(self, width, height):
scale_height = self.__height / height
scale_width = self.__width / width
# scale such that output size is upper bound
if scale_width < scale_height:
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
new_height = self.constrain_to_multiple_of(scale_height * height)
new_width = self.constrain_to_multiple_of(scale_width * width)
return new_width, new_height
def __call__(self, image):
width, height = self.get_size(image.shape[1], image.shape[0])
resized = Image.fromarray(image).resize((width, height), self.__image_interpolation_method)
return np.array(resized)
class NormalizeImage:
def __init__(self, mean, std):
self.__mean = mean
self.__std = std
def __call__(self, image):
return (image - self.__mean) / self.__std
class PrepareForNet:
def __call__(self, image):
image = np.transpose(image, (2, 0, 1))
image = np.ascontiguousarray(image, dtype=np.float32)
tensor = torch.from_numpy(image)
return tensor.unsqueeze(0)
class MiDaS(ModelBase):
def __init__(self):
super().__init__()
self.hub_repo = "intel-isl/MiDaS"
def load_model(self):
model = torch.hub.load(self.hub_repo, "MiDaS", pretrained=True)
model.to(self.device)
model.eval()
return model
@staticmethod
def get_transform():
return Compose([
Resize(384, 384),
lambda x: x / 255.,
NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
PrepareForNet()
])
@handle_alpha
@torch.no_grad()
def predict(self, input_image, colormap=None):
h, w, d = input_image.shape
assert d == 3, "Input image must be RGB"
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
transform = self.get_transform()
image_tensor = transform(input_image).to(self.device)
prediction = self.model.forward(image_tensor)
prediction = torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=(h, w),
mode="bicubic",
align_corners=False,
)
disp = prediction.squeeze().cpu().numpy()
disp /= disp.max()
if colormap:
out = apply_colormap(disp, colormap)
else:
out = to_rgb(disp)
return (out * 255).astype(np.uint8)
model = MiDaS()
if __name__ == '__main__':
rpc_url = sys.argv[1]
model.process_rpc(rpc_url)
| 29.145299 | 99 | 0.633724 | [
"MIT"
] | valgur/GIMP-ML | models/MiDaS.py | 3,410 | Python |
import tensorflow as tf
import argparse
import os, re
import numpy as np
from tensorflow.contrib.layers import variance_scaling_initializer
from tensorpack import *
from tensorpack.utils import logger
from tensorpack.utils.stat import RatioCounter
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.tfutils.summary import *
from tensorpack.dataflow.dataset import ILSVRCMeta
MODEL_DEPTH = None
class Model(ModelDesc):
def _get_input_vars(self):
return [InputVar(tf.float32, [None, 224, 224, 3], 'input'),
InputVar(tf.int32, [None], 'label')]
def _build_graph(self, input_vars):
image, label = input_vars
def shortcut(l, n_in, n_out, stride):
if n_in != n_out:
l = Conv2D('convshortcut', l, n_out, 1, stride=stride)
return BatchNorm('bnshortcut', l)
else:
return l
def bottleneck(l, ch_out, stride, preact):
ch_in = l.get_shape().as_list()[-1]
input = l
if preact == 'both_preact':
l = tf.nn.relu(l, name='preact-relu')
input = l
l = Conv2D('conv1', l, ch_out, 1, stride=stride)
l = BatchNorm('bn1', l)
l = tf.nn.relu(l)
l = Conv2D('conv2', l, ch_out, 3)
l = BatchNorm('bn2', l)
l = tf.nn.relu(l)
l = Conv2D('conv3', l, ch_out * 4, 1)
l = BatchNorm('bn3', l) # put bn at the bottom
return l + shortcut(input, ch_in, ch_out * 4, stride)
def layer(l, layername, features, count, stride, first=False):
with tf.variable_scope(layername):
with tf.variable_scope('block0'):
l = bottleneck(l, features, stride,
'no_preact' if first else 'both_preact')
for i in range(1, count):
with tf.variable_scope('block{}'.format(i)):
l = bottleneck(l, features, 1, 'both_preact')
return l
cfg = {
50: ([3,4,6,3]),
101: ([3,4,23,3]),
152: ([3,8,36,3])
}
defs = cfg[MODEL_DEPTH]
with argscope(Conv2D, nl=tf.identity, use_bias=False,
W_init=variance_scaling_initializer(mode='FAN_OUT')):
# tensorflow with padding=SAME will by default pad [2,3] here.
# but caffe conv with stride will pad [3,3]
image = tf.pad(image, [[0,0],[3,3],[3,3],[0,0]])
fc1000 = (LinearWrap(image)
.Conv2D('conv0', 64, 7, stride=2, nl=BNReLU, padding='VALID')
.MaxPooling('pool0', shape=3, stride=2, padding='SAME')
.apply(layer, 'group0', 64, defs[0], 1, first=True)
.apply(layer, 'group1', 128, defs[1], 2)
.apply(layer, 'group2', 256, defs[2], 2)
.apply(layer, 'group3', 512, defs[3], 2)
.tf.nn.relu()
.GlobalAvgPooling('gap')
.FullyConnected('fc1000', 1000, nl=tf.identity)())
prob = tf.nn.softmax(fc1000, name='prob')
nr_wrong = prediction_incorrect(fc1000, label, name='wrong-top1')
nr_wrong = prediction_incorrect(fc1000, label, 5, name='wrong-top5')
def get_inference_augmentor():
# load ResNet mean from Kaiming:
#from tensorpack.utils.loadcaffe import get_caffe_pb
#obj = get_caffe_pb().BlobProto()
#obj.ParseFromString(open('ResNet_mean.binaryproto').read())
#pp_mean_224 = np.array(obj.data).reshape(3, 224, 224).transpose(1,2,0)
meta = ILSVRCMeta()
pp_mean = meta.get_per_pixel_mean()
pp_mean_224 = pp_mean[16:-16,16:-16,:]
transformers = imgaug.AugmentorList([
imgaug.ResizeShortestEdge(256),
imgaug.CenterCrop((224, 224)),
imgaug.MapImage(lambda x: x - pp_mean_224),
])
return transformers
def init_params(params, data_dir):
ds = dataset.ILSVRC12(data_dir, 'val', shuffle=False, dir_structure='train')
ds = AugmentImageComponent(ds, get_inference_augmentor())
ds = BatchData(ds, 128, remainder=True)
pred_config = PredictConfig(
model=Model(),
session_init=ParamRestore(params),
input_names=['input', 'label'],
output_names=['wrong-top1', 'wrong-top5']
)
pred = SimpleDatasetPredictor(pred_config, ds)
acc1, acc5 = RatioCounter(), RatioCounter()
for o in pred.get_result():
batch_size = o[0].shape[0]
acc1.feed(o[0].sum(), batch_size)
acc5.feed(o[1].sum(), batch_size)
print("Top1 Error: {}".format(acc1.ratio))
print("Top5 Error: {}".format(acc5.ratio))
def name_conversion(caffe_layer_name):
""" Convert a caffe parameter name to a tensorflow parameter name as
defined in the above model """
# beginning & end mapping
NAME_MAP = {'bn_conv1/beta': 'conv0/bn/beta',
'bn_conv1/gamma': 'conv0/bn/gamma',
'bn_conv1/mean/EMA': 'conv0/bn/mean/EMA',
'bn_conv1/variance/EMA': 'conv0/bn/variance/EMA',
'conv1/W': 'conv0/W', 'conv1/b': 'conv0/b',
'fc1000/W': 'fc1000/W', 'fc1000/b': 'fc1000/b'}
if caffe_layer_name in NAME_MAP:
return NAME_MAP[caffe_layer_name]
s = re.search('([a-z]+)([0-9]+)([a-z]+)_', caffe_layer_name)
if s is None:
s = re.search('([a-z]+)([0-9]+)([a-z]+)([0-9]+)_', caffe_layer_name)
layer_block_part1 = s.group(3)
layer_block_part2 = s.group(4)
assert layer_block_part1 in ['a', 'b']
layer_block = 0 if layer_block_part1 == 'a' else int(layer_block_part2)
else:
layer_block = ord(s.group(3)) - ord('a')
layer_type = s.group(1)
layer_group = s.group(2)
layer_branch = int(re.search('_branch([0-9])', caffe_layer_name).group(1))
assert layer_branch in [1, 2]
if layer_branch == 2:
layer_id = re.search('_branch[0-9]([a-z])/', caffe_layer_name).group(1)
layer_id = ord(layer_id) - ord('a') + 1
TYPE_DICT = {'res':'conv', 'bn':'bn'}
tf_name = caffe_layer_name[caffe_layer_name.index('/'):]
layer_type = TYPE_DICT[layer_type] + \
(str(layer_id) if layer_branch == 2 else 'shortcut')
tf_name = 'group{}/block{}/{}'.format(
int(layer_group) - 2, layer_block, layer_type) + tf_name
return tf_name
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') # nargs='*' in multi mode
parser.add_argument('--load', required=True,
help='.npy model file generated by tensorpack.utils.loadcaffe')
parser.add_argument('-d', '--depth', help='resnet depth', required=True, type=int, choices=[50, 101, 152])
parser.add_argument('--input', help='an input image')
parser.add_argument('--eval', help='ILSVRC dir to run validation on')
args = parser.parse_args()
assert args.input or args.eval, "Choose either input or eval!"
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
MODEL_DEPTH = args.depth
param = np.load(args.load, encoding='latin1').item()
resnet_param = {}
for k, v in six.iteritems(param):
try:
newname = name_conversion(k)
except:
logger.error("Exception when processing caffe layer {}".format(k))
raise
logger.info("Name Transform: " + k + ' --> ' + newname)
resnet_param[newname] = v
if args.eval:
eval_on_ILSVRC12(resnet_param, args.eval)
else:
run_test(resnet_param, args.input)
| 39.895288 | 110 | 0.591339 | [
"MIT"
] | ivankreso/semseg | OLD/models/resnet/old/resnet_orig.py | 7,620 | Python |
from healthcheck import HealthCheck, EnvironmentDump
from src.frameworks_and_drivers.healthchecks.postgres import postgres_healthcheck
from src.frameworks_and_drivers.healthchecks.redis import redis_healthcheck
from src.frameworks_and_drivers.healthchecks.info import application_data
def init_app(app):
health = HealthCheck(app, '/healthcheck')
health.add_check(redis_healthcheck)
health.add_check(postgres_healthcheck)
envdump = EnvironmentDump(app, '/environment')
envdump.add_section("application", application_data)
| 41.769231 | 81 | 0.830571 | [
"MIT"
] | diododias/flask-boilerplate | src/frameworks_and_drivers/healthchecks/healthchecks.py | 543 | Python |
# test syntax and type errors specific to viper code generation
def test(code):
try:
exec(code)
except (SyntaxError, ViperTypeError, NotImplementedError) as e:
print(repr(e))
# viper: annotations must be identifiers
test("@micropython.viper\ndef f(a:1): pass")
test("@micropython.viper\ndef f() -> 1: pass")
# unknown type
test("@micropython.viper\ndef f(x:unknown_type): pass")
# local used before type known
test(
"""
@micropython.viper
def f():
print(x)
x = 1
"""
)
# type mismatch storing to local
test(
"""
@micropython.viper
def f():
x = 1
y = []
x = y
"""
)
# can't implicitly convert type to bool
test(
"""
@micropython.viper
def f():
x = ptr(0)
if x:
pass
"""
)
# incorrect return type
test("@micropython.viper\ndef f() -> int: return []")
# can't do binary op between incompatible types
test("@micropython.viper\ndef f(): 1 + []")
# can't load
test("@micropython.viper\ndef f(): 1[0]")
test("@micropython.viper\ndef f(): 1[x]")
# can't store
test("@micropython.viper\ndef f(): 1[0] = 1")
test("@micropython.viper\ndef f(): 1[x] = 1")
test("@micropython.viper\ndef f(x:int): x[0] = x")
test("@micropython.viper\ndef f(x:ptr32): x[0] = None")
test("@micropython.viper\ndef f(x:ptr32): x[x] = None")
# must raise an object
test("@micropython.viper\ndef f(): raise 1")
# unary ops not implemented
test("@micropython.viper\ndef f(x:int): +x")
test("@micropython.viper\ndef f(x:int): -x")
test("@micropython.viper\ndef f(x:int): ~x")
# binary op not implemented
test("@micropython.viper\ndef f(x:int): res = x in x")
# yield (from) not implemented
test("@micropython.viper\ndef f(): yield")
test("@micropython.viper\ndef f(): yield from f")
# passing a ptr to a Python function not implemented
test("@micropython.viper\ndef f(): print(ptr(1))")
# cast of a casting identifier not implemented
test("@micropython.viper\ndef f(): int(int)")
| 22.206897 | 67 | 0.653727 | [
"MIT"
] | 0xa71a5/micropython | tests/micropython/viper_error.py | 1,932 | Python |
import sys
from common import unittest2, platform_skip
import pyuv
TEST_PORT = 1234
if sys.platform == 'win32':
TEST_PIPE = '\\\\.\\pipe\\test-pipe'
else:
TEST_PIPE = 'test-pipe'
@platform_skip(["win32"])
class IPCTest(unittest2.TestCase):
def setUp(self):
self.loop = pyuv.Loop.default_loop()
def proc_exit_cb(self, proc, exit_status, term_signal):
proc.close()
def on_client_connection(self, client, error):
client.close()
self.connections.remove(client)
def make_many_connections(self):
for i in range(100):
conn = pyuv.TCP(self.loop)
self.connections.append(conn)
conn.connect(("127.0.0.1", TEST_PORT), self.on_client_connection)
def on_ipc_connection(self, handle, error):
if self.local_conn_accepted:
return
conn = pyuv.TCP(self.loop)
self.tcp_server.accept(conn)
conn.close()
self.tcp_server.close()
self.local_conn_accepted = True
def on_channel_read(self, handle, data, pending, error):
if self.tcp_server is None:
self.assertEqual(pending, pyuv.UV_TCP)
self.tcp_server = pyuv.TCP(self.loop)
self.channel.accept(self.tcp_server)
self.tcp_server.listen(self.on_ipc_connection, 12)
self.assertEqual(data.strip(), b"hello")
self.channel.write(b"world")
self.make_many_connections()
else:
if data.strip() == b"accepted_connection":
self.assertEqual(pending, pyuv.UV_UNKNOWN_HANDLE)
self.channel.close()
def test_ipc1(self):
self.connections = []
self.local_conn_accepted = False
self.tcp_server = None
self.channel = pyuv.Pipe(self.loop, True)
stdio = [pyuv.StdIO(stream=self.channel, flags=pyuv.UV_CREATE_PIPE|pyuv.UV_READABLE_PIPE|pyuv.UV_WRITABLE_PIPE)]
proc = pyuv.Process(self.loop)
if sys.platform == 'win32':
proc.spawn(file="cmd.exe", args=["/c", " proc_ipc.py", "listen_before_write"], exit_callback=self.proc_exit_cb, stdio=stdio)
else:
proc.spawn(file=sys.executable , args=["proc_ipc.py", "listen_before_write"], exit_callback=self.proc_exit_cb, stdio=stdio)
self.channel.start_read2(self.on_channel_read)
self.loop.run()
def test_ipc2(self):
self.connections = []
self.local_conn_accepted = False
self.tcp_server = None
self.channel = pyuv.Pipe(self.loop, True)
stdio = [pyuv.StdIO(stream=self.channel, flags=pyuv.UV_CREATE_PIPE|pyuv.UV_READABLE_PIPE|pyuv.UV_WRITABLE_PIPE)]
proc = pyuv.Process(self.loop)
if sys.platform == 'win32':
proc.spawn(file="cmd.exe", args=["/c", " proc_ipc.py", "listen_after_write"], exit_callback=self.proc_exit_cb, stdio=stdio)
else:
proc.spawn(file=sys.executable, args=["proc_ipc.py", "listen_after_write"], exit_callback=self.proc_exit_cb, stdio=stdio)
self.channel.start_read2(self.on_channel_read)
self.loop.run()
@platform_skip(["win32"])
class IPCSendRecvTest(unittest2.TestCase):
def setUp(self):
self.loop = pyuv.Loop.default_loop()
def proc_exit_cb(self, proc, exit_status, term_signal):
proc.close()
def on_channel_read(self, handle, data, pending, error):
self.assertEqual(pending, pyuv.UV_NAMED_PIPE)
self.recv_pipe = pyuv.Pipe(self.loop)
self.channel.accept(self.recv_pipe)
self.channel.close()
self.send_pipe.close()
self.recv_pipe.close()
def test_ipc_send_recv(self):
# Handle that will be sent to the process and back
self.send_pipe = pyuv.Pipe(self.loop, True)
self.send_pipe.bind(TEST_PIPE)
self.channel = pyuv.Pipe(self.loop, True)
stdio = [pyuv.StdIO(stream=self.channel, flags=pyuv.UV_CREATE_PIPE|pyuv.UV_READABLE_PIPE|pyuv.UV_WRITABLE_PIPE)]
proc = pyuv.Process(self.loop)
if sys.platform == 'win32':
proc.spawn(file="cmd.exe", args=["/c", " proc_ipc_echo.py"], exit_callback=self.proc_exit_cb, stdio=stdio)
else:
proc.spawn(file=sys.executable, args=["proc_ipc_echo.py"], exit_callback=self.proc_exit_cb, stdio=stdio)
self.channel.write2(b".", self.send_pipe)
self.channel.start_read2(self.on_channel_read)
self.loop.run()
if __name__ == '__main__':
unittest2.main(verbosity=2)
| 37.165289 | 136 | 0.648655 | [
"MIT"
] | benoitc/pyuv | tests/test_ipc.py | 4,497 | Python |
import os
import urllib.parse
from datetime import timedelta
import flask
import requests
from cachetools import TTLCache
from flask import current_app, session, request, redirect, abort, jsonify
from flask_oauthlib.client import OAuth
from werkzeug import security
from urllib.parse import urlparse
from common.rpc.auth import get_endpoint
from common.rpc.secrets import get_secret
from common.url_for import get_host, url_for
AUTHORIZED_ROLES = ("staff", "instructor", "grader")
REDIRECT_KEY = "REDIRECT_KEY"
USER_CACHE = TTLCache(1000, timedelta(minutes=30).total_seconds())
def get_user():
"""Get some information on the currently logged in user.
:return: a dictionary representing user data (see
`here <https://okpy.github.io/documentation/ok-api.html#users-view-a-specific-user>`_
for an example)
"""
key = session.get("access_token")
if key in USER_CACHE:
data = USER_CACHE[key]
else:
data = current_app.remote.get("user")
# only cache if the access token is found
if key:
USER_CACHE[key] = data
return data.data["data"]
def is_logged_in():
"""Get whether the current user is logged into the current session.
:return: ``True`` if the user is logged in, ``False`` otherwise
"""
return "access_token" in session
def is_staff(course):
"""Get whether the current user is enrolled as staff, instructor, or grader
for ``course``.
:param course: the course code to check
:type course: str
:return: ``True`` if the user is on staff, ``False`` otherwise
"""
return is_enrolled(course, roles=AUTHORIZED_ROLES)
def is_enrolled(course, *, roles=None):
"""Check whether the current user is enrolled as any of the ``roles`` for
``course``.
:param course: the course code to check
:type course: str
:param roles: the roles to check for the user
:type roles: list-like
:return: ``True`` if the user is any of ``roles``, ``False`` otherwise
"""
try:
endpoint = get_endpoint(course=course)
for participation in get_user()["participations"]:
if roles and participation["role"] not in roles:
continue
if participation["course"]["offering"] != endpoint:
continue
return True
return False
except Exception as e:
# fail safe!
print(e)
return False
def login():
"""Store the current URL as the redirect target on success, then redirect
to the login endpoint for the current app.
:return: a :func:`~flask.redirect` to the login endpoint for the current
:class:`~flask.Flask` app.
"""
session[REDIRECT_KEY] = urlparse(request.url)._replace(netloc=get_host()).geturl()
return redirect(url_for("login"))
def create_oauth_client(
app: flask.Flask,
consumer_key,
secret_key=None,
success_callback=None,
return_response=None,
):
"""Add Okpy OAuth for ``consumer_key`` to the current ``app``.
Specifically, adds an endpoint ``/oauth/login`` that redirects to the Okpy
login process, ``/oauth/authorized`` that receives the successful result
of authentication, ``/api/user`` that acts as a test endpoint, and a
:meth:`~flask_oauthlib.client.OAuthRemoteApp.tokengetter`.
:param app: the app to add OAuth endpoints to
:type app: ~flask.Flask
:param consumer_key: the OAuth client consumer key
:type consumer_key: str
:param secret_key: the OAuth client secret, inferred using
:func:`~common.rpc.secrets.get_secret` if omitted
:type secret_key: str
:param success_callback: an optional function to call upon login
:type success_callback: func
:param return_response: an optional function to send the OAuth response to
:type return_response: func
"""
oauth = OAuth(app)
if os.getenv("ENV") == "prod":
if secret_key is None:
app.secret_key = get_secret(secret_name="OKPY_OAUTH_SECRET")
else:
app.secret_key = secret_key
else:
consumer_key = "local-dev-all"
app.secret_key = "kmSPJYPzKJglOOOmr7q0irMfBVMRFXN"
if not app.debug:
app.config.update(
SESSION_COOKIE_SECURE=True,
SESSION_COOKIE_HTTPONLY=True,
SESSION_COOKIE_SAMESITE="Lax",
)
remote = oauth.remote_app(
"ok-server", # Server Name
consumer_key=consumer_key,
consumer_secret=app.secret_key,
request_token_params={"scope": "all", "state": lambda: security.gen_salt(10)},
base_url="https://okpy.org/api/v3/",
request_token_url=None,
access_token_method="POST",
access_token_url="https://okpy.org/oauth/token",
authorize_url="https://okpy.org/oauth/authorize",
)
def check_req(uri, headers, body):
"""Add access_token to the URL Request."""
if "access_token" not in uri and session.get("access_token"):
params = {"access_token": session.get("access_token")[0]}
url_parts = list(urllib.parse.urlparse(uri))
query = dict(urllib.parse.parse_qsl(url_parts[4]))
query.update(params)
url_parts[4] = urllib.parse.urlencode(query)
uri = urllib.parse.urlunparse(url_parts)
return uri, headers, body
remote.pre_request = check_req
@app.route("/oauth/login")
def login():
if app.debug:
response = remote.authorize(callback=url_for("authorized", _external=True))
else:
response = remote.authorize(
url_for("authorized", _external=True, _scheme="https")
)
return response
@app.route("/oauth/authorized")
def authorized():
resp = remote.authorized_response()
if resp is None:
return "Access denied: error=%s" % (request.args["error"])
if isinstance(resp, dict) and "access_token" in resp:
session["access_token"] = (resp["access_token"], "")
if return_response:
return_response(resp)
if success_callback:
success_callback()
target = session.get(REDIRECT_KEY)
if target:
session.pop(REDIRECT_KEY)
return redirect(target)
return redirect(url_for("index"))
@app.route("/api/user", methods=["POST"])
def client_method():
if "access_token" not in session:
abort(401)
token = session["access_token"][0]
r = requests.get("https://okpy.org/api/v3/user/?access_token={}".format(token))
if not r.ok:
abort(401)
return jsonify(r.json())
@remote.tokengetter
def get_oauth_token():
return session.get("access_token")
app.remote = remote
| 31.031818 | 93 | 0.642889 | [
"MIT"
] | Cal-CS-61A-Staff/cs61a-apps | common/oauth_client.py | 6,827 | Python |
"""
Module for testing goftest module.
"""
__author__ = "wittawat"
import unittest
import matplotlib.pyplot as plt
import numpy as np
import numpy.testing as testing
import scipy.stats as stats
import sbibm.third_party.kgof.data as data
import sbibm.third_party.kgof.density as density
import sbibm.third_party.kgof.glo as glo
import sbibm.third_party.kgof.goftest as gof
import sbibm.third_party.kgof.kernel as kernel
import sbibm.third_party.kgof.util as util
class TestFSSD(unittest.TestCase):
def setUp(self):
pass
def test_basic(self):
"""
Nothing special. Just test basic things.
"""
seed = 12
# sample
n = 100
alpha = 0.01
for d in [1, 4]:
mean = np.zeros(d)
variance = 1
isonorm = density.IsotropicNormal(mean, variance)
# only one dimension of the mean is shifted
# draw_mean = mean + np.hstack((1, np.zeros(d-1)))
draw_mean = mean + 0
draw_variance = variance + 1
X = util.randn(n, d, seed=seed) * np.sqrt(draw_variance) + draw_mean
dat = data.Data(X)
# Test
for J in [1, 3]:
sig2 = util.meddistance(X, subsample=1000) ** 2
k = kernel.KGauss(sig2)
# random test locations
V = util.fit_gaussian_draw(X, J, seed=seed + 1)
null_sim = gof.FSSDH0SimCovObs(n_simulate=200, seed=3)
fssd = gof.FSSD(isonorm, k, V, null_sim=null_sim, alpha=alpha)
tresult = fssd.perform_test(dat, return_simulated_stats=True)
# assertions
self.assertGreaterEqual(tresult["pvalue"], 0)
self.assertLessEqual(tresult["pvalue"], 1)
def test_optimized_fssd(self):
"""
Test FSSD test with parameter optimization.
"""
seed = 4
# sample size
n = 179
alpha = 0.01
for d in [1, 3]:
mean = np.zeros(d)
variance = 1.0
p = density.IsotropicNormal(mean, variance)
# Mean difference. obvious reject
ds = data.DSIsotropicNormal(mean + 4, variance + 0)
dat = ds.sample(n, seed=seed)
# test
for J in [1, 4]:
opts = {"reg": 1e-2, "max_iter": 10, "tol_fun": 1e-3, "disp": False}
tr, te = dat.split_tr_te(tr_proportion=0.3, seed=seed + 1)
Xtr = tr.X
gwidth0 = util.meddistance(Xtr, subsample=1000) ** 2
# random test locations
V0 = util.fit_gaussian_draw(Xtr, J, seed=seed + 1)
V_opt, gw_opt, opt_result = gof.GaussFSSD.optimize_locs_widths(
p, tr, gwidth0, V0, **opts
)
# construct a test
k_opt = kernel.KGauss(gw_opt)
null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=10)
fssd_opt = gof.FSSD(p, k_opt, V_opt, null_sim=null_sim, alpha=alpha)
fssd_opt_result = fssd_opt.perform_test(te, return_simulated_stats=True)
assert fssd_opt_result["h0_rejected"]
def test_auto_init_opt_fssd(self):
"""
Test FSSD-opt test with automatic parameter initialization.
"""
seed = 5
# sample size
n = 191
alpha = 0.01
for d in [1, 4]:
mean = np.zeros(d)
variance = 1.0
p = density.IsotropicNormal(mean, variance)
# Mean difference. obvious reject
ds = data.DSIsotropicNormal(mean + 4, variance + 0)
dat = ds.sample(n, seed=seed)
# test
for J in [1, 3]:
opts = {"reg": 1e-2, "max_iter": 10, "tol_fun": 1e-3, "disp": False}
tr, te = dat.split_tr_te(tr_proportion=0.3, seed=seed + 1)
V_opt, gw_opt, opt_result = gof.GaussFSSD.optimize_auto_init(
p, tr, J, **opts
)
# construct a test
k_opt = kernel.KGauss(gw_opt)
null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=10)
fssd_opt = gof.FSSD(p, k_opt, V_opt, null_sim=null_sim, alpha=alpha)
fssd_opt_result = fssd_opt.perform_test(te, return_simulated_stats=True)
assert fssd_opt_result["h0_rejected"]
def test_ustat_h1_mean_variance(self):
seed = 20
# sample
n = 200
alpha = 0.01
for d in [1, 4]:
mean = np.zeros(d)
variance = 1
isonorm = density.IsotropicNormal(mean, variance)
draw_mean = mean + 2
draw_variance = variance + 1
X = util.randn(n, d, seed=seed) * np.sqrt(draw_variance) + draw_mean
dat = data.Data(X)
# Test
for J in [1, 3]:
sig2 = util.meddistance(X, subsample=1000) ** 2
k = kernel.KGauss(sig2)
# random test locations
V = util.fit_gaussian_draw(X, J, seed=seed + 1)
null_sim = gof.FSSDH0SimCovObs(n_simulate=200, seed=3)
fssd = gof.FSSD(isonorm, k, V, null_sim=null_sim, alpha=alpha)
fea_tensor = fssd.feature_tensor(X)
u_mean, u_variance = gof.FSSD.ustat_h1_mean_variance(fea_tensor)
# assertions
self.assertGreaterEqual(u_variance, 0)
# should reject H0
self.assertGreaterEqual(u_mean, 0)
def tearDown(self):
pass
# end class TestFSSD
class TestSteinWitness(unittest.TestCase):
def test_basic(self):
d = 3
p = density.IsotropicNormal(mean=np.zeros(d), variance=3.0)
q = density.IsotropicNormal(mean=np.zeros(d) + 2, variance=3.0)
k = kernel.KGauss(2.0)
ds = q.get_datasource()
n = 97
dat = ds.sample(n, seed=3)
witness = gof.SteinWitness(p, k, dat)
# points to evaluate the witness
J = 4
V = np.random.randn(J, d) * 2
evals = witness(V)
testing.assert_equal(evals.shape, (J, d))
# end class TestSteinWitness
if __name__ == "__main__":
unittest.main()
| 32.458763 | 88 | 0.545974 | [
"MIT"
] | mackelab/sbibm | sbibm/third_party/kgof/test/test_goftest.py | 6,297 | Python |
# coding=utf-8
# unpack.py
# Author: Meghan Clark
import binascii
import struct
from .message import HEADER_SIZE_BYTES, Message
from .msgtypes import *
# Creates a LIFX Message out of packed binary data
# If the message type is not one of the officially released ones above, it will create just a Message out of it
# If it's not in the LIFX protocol format, uhhhhh...we'll put that on a to-do list.
def unpack_lifx_message(packed_message):
header_str = packed_message[0:HEADER_SIZE_BYTES]
payload_str = packed_message[HEADER_SIZE_BYTES:]
size = struct.unpack("<H", header_str[0:2])[0]
flags = struct.unpack("<H", header_str[2:4])[0]
origin = (flags >> 14) & 3
tagged = (flags >> 13) & 1
addressable = (flags >> 12) & 1
protocol = flags & 4095
source_id = struct.unpack("<I", header_str[4:8])[0]
target_addr = ":".join([('%02x' % b) for b in struct.unpack("<" + ("B"*6), header_str[8:14])])
response_flags = struct.unpack("<B", header_str[22:23])[0]
ack_requested = response_flags & 2
response_requested = response_flags & 1
seq_num = struct.unpack("<B", header_str[23:24])[0]
message_type = struct.unpack("<H", header_str[32:34])[0]
message = None
if message_type == MSG_IDS[GetService]:
message = GetService(target_addr, source_id, seq_num, {}, ack_requested, response_requested)
elif message_type == MSG_IDS[StateService]:
service = struct.unpack("<B", payload_str[0:1])[0]
port = struct.unpack("<I", payload_str[1:5])[0]
payload = {"service": service, "port": port}
message = StateService(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[GetHostInfo]:
message = GetHostInfo(target_addr, source_id, seq_num, {}, ack_requested, response_requested)
elif message_type == MSG_IDS[StateHostInfo]:
signal = struct.unpack("<f", payload_str[0:4])[0]
tx = struct.unpack("<I", payload_str[4:8])[0]
rx = struct.unpack("<I", payload_str[8:12])[0]
reserved1 = struct.unpack("<h", payload_str[12:14])[0]
payload = {"signal": signal, "tx": tx, "rx": rx, "reserved1": reserved1}
message = StateHostInfo(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[GetHostFirmware]:
message = GetHostFirmware(target_addr, source_id, seq_num, {}, ack_requested, response_requested)
elif message_type == MSG_IDS[StateHostFirmware]:
build = struct.unpack("<Q", payload_str[0:8])[0]
reserved1 = struct.unpack("<Q", payload_str[8:16])[0]
version = struct.unpack("<I", payload_str[16:20])[0]
payload = {"build": build, "reserved1": reserved1, "version": version}
message = StateHostFirmware(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[GetWifiInfo]:
message = GetWifiInfo(target_addr, source_id, seq_num, {}, ack_requested, response_requested)
elif message_type == MSG_IDS[StateWifiInfo]:
signal = struct.unpack("<f", payload_str[0:4])[0]
tx = struct.unpack("<I", payload_str[4:8])[0]
rx = struct.unpack("<I", payload_str[8:12])[0]
reserved1 = struct.unpack("<h", payload_str[12:14])[0]
payload = {"signal": signal, "tx": tx, "rx": rx, "reserved1": reserved1}
message = StateWifiInfo(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[GetWifiFirmware]:
message = GetWifiFirmware(target_addr, source_id, seq_num, {}, ack_requested, response_requested)
elif message_type == MSG_IDS[StateWifiFirmware]:
build = struct.unpack("<Q", payload_str[0:8])[0]
reserved1 = struct.unpack("<Q", payload_str[8:16])[0]
version = struct.unpack("<I", payload_str[16:20])[0]
payload = {"build": build, "reserved1": reserved1, "version": version}
message = StateWifiFirmware(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[GetPower]:
message = GetPower(target_addr, source_id, seq_num, {}, ack_requested, response_requested)
elif message_type == MSG_IDS[SetPower]:
power_level = struct.unpack("<H", payload_str[0:2])[0]
payload = {"power_level": power_level}
message = SetPower(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[StatePower]:
power_level = struct.unpack("<H", payload_str[0:2])[0]
payload = {"power_level": power_level}
message = StatePower(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[GetLabel]:
message = GetLabel(target_addr, source_id, seq_num, {}, ack_requested, response_requested)
elif message_type == MSG_IDS[SetLabel]:
label = binascii.unhexlify("".join(["%2.2x" % (b & 0x000000ff) for b in struct.unpack("<" + ("b"*32), payload_str[0:32])])).replace(b'\x00', b'')
label = label.decode('utf-8')
payload = {"label": label}
message = SetLabel(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[StateLabel]:
label = binascii.unhexlify("".join(["%2.2x" % (b & 0x000000ff) for b in struct.unpack("<" + ("b"*32), payload_str[0:32])])).replace(b'\x00', b'')
label = label.decode('utf-8')
payload = {"label": label}
message = StateLabel(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[GetLocation]:
message = GetLocation(target_addr, source_id, seq_num, {}, ack_requested, response_requested)
elif message_type == MSG_IDS[StateLocation]:
location = [b for b in struct.unpack("<" + ("B"*16), payload_str[0:16])]
label = binascii.unhexlify("".join(["%2.2x" % (b & 0x000000ff) for b in struct.unpack("<" + ("b"*32), payload_str[16:48])])).replace(b'\x00', b'')
label = label.decode('utf-8')
updated_at = struct.unpack("<Q", payload_str[48:56])[0]
payload = {"location": location, "label": label, "updated_at": updated_at}
message = StateLocation(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[GetGroup]:
message = GetGroup(target_addr, source_id, seq_num, {}, ack_requested, response_requested)
elif message_type == MSG_IDS[StateGroup]:
group = [b for b in struct.unpack("<" + ("B"*16), payload_str[0:16])]
label = binascii.unhexlify("".join(["%2.2x" % (b & 0x000000ff) for b in struct.unpack("<" + ("b"*32), payload_str[16:48])])).replace(b'\x00', b'')
label = label.decode('utf-8')
updated_at = struct.unpack("<Q", payload_str[48:56])[0]
payload = {"group": group, "label": label, "updated_at": updated_at}
message = StateGroup(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[GetVersion]:
message = GetVersion(target_addr, source_id, seq_num, {}, ack_requested, response_requested)
elif message_type == MSG_IDS[StateVersion]:
vendor = struct.unpack("<I", payload_str[0:4])[0]
product = struct.unpack("<I", payload_str[4:8])[0]
version = struct.unpack("<I", payload_str[8:12])[0]
payload = {"vendor": vendor, "product": product, "version": version}
message = StateVersion(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[GetInfo]:
message = GetInfo(target_addr, source_id, seq_num, {}, ack_requested, response_requested)
elif message_type == MSG_IDS[StateInfo]:
time = struct.unpack("<Q", payload_str[0:8])[0]
uptime = struct.unpack("<Q", payload_str[8:16])[0]
downtime = struct.unpack("<Q", payload_str[16:24])[0]
payload = {"time": time, "uptime": uptime, "downtime": downtime}
message = StateInfo(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[Acknowledgement]:
message = Acknowledgement(target_addr, source_id, seq_num, {}, ack_requested, response_requested)
elif message_type == MSG_IDS[EchoRequest]:
byte_array_len = len(payload_str)
byte_array = [b for b in struct.unpack("<" + ("B"*byte_array_len), payload_str[0:byte_array_len])]
payload = {"byte_array": byte_array}
message = EchoRequest(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[EchoResponse]:
byte_array_len = len(payload_str)
byte_array = [b for b in struct.unpack("<" + ("B"*byte_array_len), payload_str[0:byte_array_len])]
payload = {"byte_array": byte_array}
message = EchoResponse(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[LightGet]:
message = LightGet(target_addr, source_id, seq_num, {}, ack_requested, response_requested)
elif message_type == MSG_IDS[LightSetColor]:
reserved = struct.unpack("<B", payload_str[0:1])[0]
color = struct.unpack("<" + ("H"*4), payload_str[1:9])
duration = struct.unpack("<I", payload_str[9:13])[0]
payload = {"color": color, "duration": duration}
message = LightSetColor(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[LightState]:
color = struct.unpack("<" + ("H"*4), payload_str[0:8])
reserved1 = struct.unpack("<H", payload_str[8:10])[0]
power_level = struct.unpack("<H", payload_str[10:12])[0]
label = binascii.unhexlify("".join(["%2.2x" % (b & 0x000000ff) for b in struct.unpack("<" + ("b"*32), payload_str[12:44])])).replace(b'\x00', b'')
label = label.decode('utf-8')
reserved2 = struct.unpack("<Q", payload_str[44:52])[0]
payload = {"color": color, "reserved1": reserved1, "power_level": power_level, "label": label, "reserved2": reserved2}
message = LightState(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[LightGetPower]:
message = LightGetPower(target_addr, source_id, seq_num, {}, ack_requested, response_requested)
elif message_type == MSG_IDS[LightSetPower]:
power_level = struct.unpack("<H", payload_str[0:2])[0]
duration = struct.unpack("<I", payload_str[2:6])[0]
payload = {"power_level": power_level, "duration": duration}
message = LightSetPower(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[LightStatePower]:
power_level = struct.unpack("<H", payload_str[0:2])[0]
payload = {"power_level": power_level}
message = LightStatePower(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[LightGetInfrared]: # 120
message = LightGetInfrared(target_addr, source_id, seq_num, {}, ack_requested, response_requested)
elif message_type == MSG_IDS[LightStateInfrared]: # 121
infrared_brightness = struct.unpack("<H", payload_str[0:2])[0]
payload = {"infrared_brightness": infrared_brightness}
message = LightStateInfrared(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[LightSetInfrared]: # 122
infrared_brightness = struct.unpack("<H", payload_str[0:2])[0]
payload = {"infrared_brightness": infrared_brightness}
message = LightSetInfrared(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[MultiZoneSetColorZones]: #501
start_index = struct.unpack("<c", payload_str[0:1])[0]
start_index = ord(start_index) # 8 bit
end_index = struct.unpack("<c", payload_str[1:2])[0]
end_index = ord(end_index) #8 bit
color = struct.unpack("<" + ("H" * 4), payload_str[2:10])
duration = struct.unpack("<I", payload_str[10:14])[0]
apply = struct.unpack("<c", payload_str[14:15])[0]
apply = ord(apply) #8 bit
payload = {"start_index": start_index, "end_index": end_index, "color": color, "duration": duration, "apply": apply}
message = MultiZoneSetColorZones(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[MultiZoneGetColorZones]: #502
start_index = struct.unpack("<c", payload_str[0:1])[0]
start_index = ord(start_index) # 8 bit
end_index = struct.unpack("<c", payload_str[1:2])[0]
end_index = ord(end_index) #8 bit
payload = {"start_index": start_index, "end_index": end_index}
message = MultiZoneGetColorZones(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[MultiZoneStateZone]: #503
count = struct.unpack("<c", payload_str[0:1])[0]
count = ord(count) # 8 bit
index = struct.unpack("<c", payload_str[1:2])[0]
index = ord(index) #8 bit
color = struct.unpack("<" + ("H" * 4), payload_str[2:10])
payload = {"count": count, "index": index, "color": color}
message = MultiZoneStateZone(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[MultiZoneStateMultiZone]: #506
count = struct.unpack("<c", payload_str[0:1])[0]
count = ord(count) # 8 bit
index = struct.unpack("<c", payload_str[1:2])[0]
index = ord(index) #8 bit
colors = []
for i in range(8):
color = struct.unpack("<" + ("H" * 4), payload_str[2+(i*8):10+(i*8)])
colors.append(color)
payload = {"count": count, "index": index, "color": colors}
message = MultiZoneStateMultiZone(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[GetDeviceChain]: #701
message = GetDeviceChain(target_addr, source_id, seq_num, {}, ack_requested, response_requested)
elif message_type == MSG_IDS[StateDeviceChain]: #702
start_index = struct.unpack("<B", payload_str[0:1])[0]
tile_devices = []
tilesize_bytes = 55
for i in range(16):
offset = (i * tilesize_bytes)
tile = {"reserved1": struct.unpack("<h", payload_str[1+offset:3+offset])[0],
"reserved2": struct.unpack("<h", payload_str[3+offset:5+offset])[0],
"reserved3": struct.unpack("<h", payload_str[5+offset:7+offset])[0],
"reserved4": struct.unpack("<h", payload_str[7+offset:9+offset])[0],
"user_x": struct.unpack("<f", payload_str[9+offset:13+offset])[0],
"user_y": struct.unpack("<f", payload_str[13+offset:17+offset])[0],
"width": struct.unpack("<B", payload_str[17+offset:18+offset])[0],
"height": struct.unpack("<B", payload_str[18+offset:19+offset])[0],
"reserved5": struct.unpack("<B", payload_str[19+offset:20+offset])[0],
"device_version_vendor": struct.unpack("<I", payload_str[20+offset:24+offset])[0],
"device_version_product": struct.unpack("<I", payload_str[24+offset:28+offset])[0],
"device_version_version": struct.unpack("<I", payload_str[28+offset:32+offset])[0],
"firmware_build": struct.unpack("<Q", payload_str[32+offset:40+offset])[0],
"reserved6": struct.unpack("<Q", payload_str[40+offset:48+offset])[0],
"firmware_version": struct.unpack("<I", payload_str[48+offset:52+offset])[0],
"reserved7": struct.unpack("<I", payload_str[52+offset:56+offset])[0]}
tile_devices.append(tile)
total_count = struct.unpack("<B", payload_str[881:882])[0]
payload = {"start_index": start_index, "total_count": total_count, "tile_devices": tile_devices}
message = StateDeviceChain(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[SetUserPosition]: #703
tile_index = struct.unpack("<B", payload_str[0:1])[0]
reserved = struct.unpack("<H", payload_str[1:3])[0]
user_x = struct.unpack("<f", payload_str[3:7])[0]
user_y = struct.unpack("<f", payload_str[7:11])[0]
payload = {"tile_index": tile_index, "reserved": reserved, "user_x": user_x, "user_y": user_y}
message = SetUserPosition(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[GetTileState64]: #707
tile_index = struct.unpack("<B", payload_str[0:1])[0]
length = struct.unpack("<B", payload_str[1:2])[0]
reserved = struct.unpack("<B", payload_str[2:3])[0]
x = struct.unpack("<B", payload_str[3:4])[0]
y = struct.unpack("<B", payload_str[4:5])[0]
width = struct.unpack("<B", payload_str[5:6])[0]
payload = {"tile_index": tile_index, "length": length, "reserved": reserved, "x": x, "y": y, "width": width}
message = GetTileState64(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[StateTileState64]: #711
tile_index = struct.unpack("<B", payload_str[0:1])[0]
reserved = struct.unpack("<B", payload_str[1:2])[0]
x = struct.unpack("<B", payload_str[2:3])[0]
y = struct.unpack("<B", payload_str[3:4])[0]
width = struct.unpack("<B", payload_str[4:5])[0]
colors = []
for i in range(64):
color = struct.unpack("<" + ("H" * 4), payload_str[5+(i*8):13+(i*8)])
colors.append(color)
payload = {"tile_index": tile_index, "reserved": reserved, "x": x, "y": y, "width": width, "colors": colors}
message = StateTileState64(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
elif message_type == MSG_IDS[SetTileState64]: #715
tile_index = struct.unpack("<B", payload_str[0:1])[0]
length = struct.unpack("<B", payload_str[1:2])[0]
reserved = struct.unpack("<B", payload_str[2:3])[0]
x = struct.unpack("<B", payload_str[3:4])[0]
y = struct.unpack("<B", payload_str[4:5])[0]
width = struct.unpack("<B", payload_str[5:6])[0]
duration = struct.unpack("<I", payload_str[6:10])[0]
colors = []
for i in range(64):
color = struct.unpack("<" + ("H" * 4), payload_str[10+(i*8):18+(i*8)])
colors.append(color)
payload = {"tile_index": tile_index, "length": length, "reserved": reserved, "x": x, "y": y, "width": width, "duration": duration, "colors": colors}
message = SetTileState64(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
else:
message = Message(message_type, target_addr, source_id, seq_num, ack_requested, response_requested)
message.size = size
message.origin = origin
message.tagged = tagged
message.addressable = addressable
message.protocol = protocol
message.source_id = source_id
message.header = header_str
message.payload = payload_str
message.packed_message = packed_message
return message
| 56.146132 | 156 | 0.651187 | [
"MIT"
] | DrTexx/lifxlan | lifxlan/unpack.py | 19,595 | Python |
import json
import logging.config
import os
default_config = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"simple": {
"format": "%(asctime)s :: %(name)s :: %(levelname)s :: %(message)s"
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "simple",
"stream": "ext://sys.stdout"
},
"info_file_handler": {
"class": "logging.handlers.RotatingFileHandler",
"level": "INFO",
"formatter": "simple",
"filename": "gee_assets_info.log",
"maxBytes": 10485760,
"backupCount": 20,
"encoding": "utf8"
},
"error_file_handler": {
"class": "logging.handlers.RotatingFileHandler",
"level": "ERROR",
"formatter": "simple",
"filename": "gee_assets_errors.log",
"maxBytes": 10485760,
"backupCount": 20,
"encoding": "utf8"
}
},
"root": {
"level": "INFO",
"handlers": ["console", "info_file_handler", "error_file_handler"]
}
}
def setup_logging():
path = os.path.join(os.path.dirname(__file__), 'logconfig.json')
try:
with open(path, 'rt') as f:
config = json.load(f)
except Exception as e:
logging.exception('Could not load logconfig.json. Loading default logging configuration.')
config = default_config
logging.config.dictConfig(config)
def get_credential(file_path):
"""
Read credential json file and return
username and password
"""
with open(file_path) as json_file:
config = json.load(json_file)
assert "username" in config.keys()
assert "password" in config.keys()
return config["username"], config["password"]
| 26.929577 | 98 | 0.54341 | [
"Apache-2.0"
] | thipokKub/geeup | geeup/config.py | 1,912 | Python |
from direct.directnotify import DirectNotifyGlobal
import RingTrack
class RingAction:
notify = DirectNotifyGlobal.directNotify.newCategory('RingAction')
def __init__(self):
pass
def eval(self, t):
return (0, 0)
class RingActionStaticPos(RingAction):
def __init__(self, pos):
RingAction.__init__(self)
self.__pos = pos
def eval(self, t):
return self.__pos
class RingActionFunction(RingAction):
def __init__(self, func, args):
RingAction.__init__(self)
self.__func = func
self.__args = args
def eval(self, t):
return self.__func(t, *self.__args)
class RingActionRingTrack(RingAction):
def __init__(self, ringTrack):
RingAction.__init__(self)
self.__track = ringTrack
def eval(self, t):
return self.__track.eval(t)
| 19.976744 | 70 | 0.656577 | [
"BSD-3-Clause"
] | AnonymousDeveloper65535/open-toontown | toontown/minigame/RingAction.py | 859 | Python |
###Titulo: Multiplicação através de repetidas somas
###Função: Este programa realiza a multiplicação de dois números através de sucessivas adições
###Autor: Valmor Mantelli Jr.
###Data: 14/12/2018
###Versão: 0.0.5
# Declaração de variáve
x = 0
y = 0
w = 0
z = 1
# Atribuição de valor a variavel
x = int(input("Diga o primeiro número: "))
y = int(input("Diga por qual número deseja multiplicar: "))
# Processamento
while z <= x:
w += y
z += 1
# Saída
print("%d x %d = %d" % (x, y, w))
| 15.212121 | 94 | 0.651394 | [
"MIT"
] | profnssorg/valmorMantelli1 | exer508.py | 522 | Python |
#!/usr/bin/python
# Copyright: (c) 2018, Johannes Brunswicker <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: utm_proxy_location
author:
- Johannes Brunswicker (@MatrixCrawler)
short_description: create, update or destroy reverse_proxy location entry in Sophos UTM
description:
- Create, update or destroy a reverse_proxy location entry in SOPHOS UTM.
- This module needs to have the REST Ability of the UTM to be activated.
options:
name:
description:
- The name of the object. Will be used to identify the entry
required: true
access_control:
description:
- whether to activate the access control for the location
type: str
default: '0'
choices:
- '0'
- '1'
allowed_networks:
description:
- A list of allowed networks
type: list
default: REF_NetworkAny
auth_profile:
description:
- The reference name of the auth profile
backend:
description:
- A list of backends that are connected with this location declaration
default: []
be_path:
description:
- The path of the backend
comment:
description:
- The optional comment string
denied_networks:
description:
- A list of denied network references
default: []
hot_standby:
description:
- Activate hot standby mode
type: bool
default: False
path:
description:
- The path of the location
default: "/"
status:
description:
- Whether the location is active or not
type: bool
default: True
stickysession_id:
description:
- The stickysession id
default: ROUTEID
stickysession_status:
description:
- Enable the stickysession
type: bool
default: False
websocket_passthrough:
description:
- Enable the websocket passthrough
type: bool
default: False
extends_documentation_fragment:
- community.general.utm
'''
EXAMPLES = """
- name: Create UTM proxy_location
utm_proxy_backend:
utm_host: sophos.host.name
utm_token: abcdefghijklmno1234
name: TestLocationEntry
backend: REF_OBJECT_STRING
state: present
- name: Remove UTM proxy_location
utm_proxy_backend:
utm_host: sophos.host.name
utm_token: abcdefghijklmno1234
name: TestLocationEntry
state: absent
"""
RETURN = """
result:
description: The utm object that was created
returned: success
type: complex
contains:
_ref:
description: The reference name of the object
type: str
_locked:
description: Whether or not the object is currently locked
type: bool
_type:
description: The type of the object
type: str
name:
description: The name of the object
type: str
access_control:
description: Whether to use access control state
type: str
allowed_networks:
description: List of allowed network reference names
type: list
auth_profile:
description: The auth profile reference name
type: str
backend:
description: The backend reference name
type: str
be_path:
description: The backend path
type: str
comment:
description: The comment string
type: str
denied_networks:
description: The list of the denied network names
type: list
hot_standby:
description: Use hot standy
type: bool
path:
description: Path name
type: str
status:
description: Whether the object is active or not
type: bool
stickysession_id:
description: The identifier of the stickysession
type: str
stickysession_status:
description: Whether to use stickysession or not
type: bool
websocket_passthrough:
description: Whether websocket passthrough will be used or not
type: bool
"""
from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
from ansible.module_utils._text import to_native
def main():
endpoint = "reverse_proxy/location"
key_to_check_for_changes = ["access_control", "allowed_networks", "auth_profile", "backend", "be_path", "comment",
"denied_networks", "hot_standby", "path", "status", "stickysession_id",
"stickysession_status", "websocket_passthrough"]
module = UTMModule(
argument_spec=dict(
name=dict(type='str', required=True),
access_control=dict(type='str', required=False, default="0", choices=['0', '1']),
allowed_networks=dict(type='list', elements='str', required=False, default=['REF_NetworkAny']),
auth_profile=dict(type='str', required=False, default=""),
backend=dict(type='list', elements='str', required=False, default=[]),
be_path=dict(type='str', required=False, default=""),
comment=dict(type='str', required=False, default=""),
denied_networks=dict(type='list', elements='str', required=False, default=[]),
hot_standby=dict(type='bool', required=False, default=False),
path=dict(type='str', required=False, default="/"),
status=dict(type='bool', required=False, default=True),
stickysession_id=dict(type='str', required=False, default='ROUTEID'),
stickysession_status=dict(type='bool', required=False, default=False),
websocket_passthrough=dict(type='bool', required=False, default=False),
)
)
try:
UTM(module, endpoint, key_to_check_for_changes).execute()
except Exception as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
main()
| 31.20098 | 118 | 0.614925 | [
"Apache-2.0"
] | jkroepke/homelab | kubernetes-the-hard-way/system/collections/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py | 6,365 | Python |
import rlkit.misc.hyperparameter as hyp
from multiworld.envs.mujoco.cameras import init_sawyer_camera_v1
from multiworld.envs.mujoco.cameras import sawyer_pick_and_place_camera
from rlkit.launchers.launcher_util import run_experiment
from rlkit.torch.grill.launcher import grill_her_td3_full_experiment
import rlkit.torch.vae.vae_schedules as vae_schedules
from multiworld.envs.mujoco.sawyer_xyz.sawyer_pick_and_place \
import SawyerPickAndPlaceEnv, SawyerPickAndPlaceEnvYZ
from rlkit.envs.goal_generation.pickup_goal_dataset import \
generate_vae_dataset, get_image_presampled_goals_from_vae_env
from multiworld.envs.mujoco.cameras import \
sawyer_pick_and_place_camera, sawyer_pick_and_place_camera_slanted_angle
if __name__ == "__main__":
num_images = 1
variant = dict(
imsize=84,
double_algo=False,
env_id="SawyerPickupEnv-v0",
grill_variant=dict(
save_video=True,
save_video_period=50,
presample_goals=True,
generate_goal_dataset_fctn=get_image_presampled_goals_from_vae_env,
goal_generation_kwargs=dict(
num_presampled_goals=1000,
),
do_state_exp=True,
algo_kwargs=dict(
base_kwargs=dict(
num_epochs=505,
num_steps_per_epoch=1000,
num_steps_per_eval=1000,
min_num_steps_before_training=4000,
batch_size=128,
max_path_length=50,
discount=0.99,
num_updates_per_env_step=4,
collection_mode='online-parallel',
reward_scale=100,
),
td3_kwargs=dict(
tau=1e-2,
),
her_kwargs=dict(),
),
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
replay_buffer_kwargs=dict(
max_size=int(1e6),
fraction_goals_are_rollout_goals=0.0,
fraction_resampled_goals_are_env_goals=0.5,
),
algorithm='GRILL-HER-TD3',
normalize=False,
render=False,
exploration_noise=0.3,
exploration_type='ou',
training_mode='train',
testing_mode='test',
reward_params=dict(
type='latent_distance',
),
observation_key='latent_observation',
desired_goal_key='latent_desired_goal',
),
train_vae_variant=dict(
generate_vae_data_fctn=generate_vae_dataset,
dump_skew_debug_plots=False,
representation_size=16,
beta=0.5,
num_epochs=0,
generate_vae_dataset_kwargs=dict(
N=50,
oracle_dataset=True,
use_cached=True,
num_channels=3*num_images,
),
vae_kwargs=dict(
input_channels=3*num_images,
),
algo_kwargs=dict(
train_data_workers=4,
do_scatterplot=False,
lr=1e-3,
),
#beta_schedule_kwargs=dict(
# x_values=[0, 100, 200, 500],
# y_values=[0, 0, 5, 5],
#),
save_period=5,
),
)
search_space = {
'grill_variant.training_mode': ['train'],
'grill_variant.replay_kwargs.fraction_goals_are_rollout_goals': [0.0],
'grill_variant.algo_kwargs.base_kwargs.num_updates_per_env_step': [4],
'grill_variant.exploration_noise': [.3, .5],
'env_kwargs.random_init': [False],
'env_kwargs.action_scale': [.02],
'init_camera': [
sawyer_pick_and_place_camera,
],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
n_seeds = 4
mode = 'ec2'
exp_prefix = 'pickup-true-state-exp-rig-paper'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for _ in range(n_seeds):
run_experiment(
grill_her_td3_full_experiment,
exp_prefix=exp_prefix,
mode=mode,
variant=variant,
use_gpu=True,
# trial_dir_suffix='n1000-{}--zoomed-{}'.format(n1000, zoomed),
snapshot_gap=200,
snapshot_mode='gap_and_last',
num_exps_per_instance=2,
)
| 35.5 | 80 | 0.568289 | [
"MIT"
] | Asap7772/railrl_evalsawyer | experiments/steven/online-vae/pick_and_place/state_exp.py | 4,686 | Python |
from commons.neural_network import TwoLayerNet
from datasets.mnist import load_mnist
import numpy as np
(x_train, t_train), (x_test, t_test) = load_mnist(
normalize=True, one_hot_label=True)
network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
iters_num = 10000
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1
train_loss_list = []
train_acc_list = []
test_acc_list = []
iter_per_epoch = max(train_size / batch_size, 1)
for i in range(iters_num):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
# calculate gradients with backpropagation
grad = network.gradient(x_batch, t_batch)
for key in ('W1', 'b1', 'W2', 'b2'): # renewal
network.params[key] -= learning_rate * grad[key]
loss = network.loss(x_batch, t_batch)
train_loss_list.append(loss)
if i % iter_per_epoch == 0:
train_acc = network.accuracy(x_train, t_train)
test_acc = network.accuracy(x_test, t_test)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
print(train_acc, test_acc)
| 28.121951 | 69 | 0.712923 | [
"MIT"
] | Myeonghan-Jeong/Deep-learning-from-scratch | chapter05/5.7.4_backpropagation_learning.py | 1,153 | Python |
import os
import re
from therandy.utils import get_closest, replace_command
from therandy.specific.brew import get_brew_path_prefix, brew_available
BREW_CMD_PATH = '/Library/Homebrew/cmd'
TAP_PATH = '/Library/Taps'
TAP_CMD_PATH = '/%s/%s/cmd'
enabled_by_default = brew_available
def _get_brew_commands(brew_path_prefix):
"""To get brew default commands on local environment"""
brew_cmd_path = brew_path_prefix + BREW_CMD_PATH
return [name[:-3] for name in os.listdir(brew_cmd_path)
if name.endswith(('.rb', '.sh'))]
def _get_brew_tap_specific_commands(brew_path_prefix):
"""To get tap's specific commands
https://github.com/Homebrew/homebrew/blob/master/Library/brew.rb#L115"""
commands = []
brew_taps_path = brew_path_prefix + TAP_PATH
for user in _get_directory_names_only(brew_taps_path):
taps = _get_directory_names_only(brew_taps_path + '/%s' % user)
# Brew Taps's naming rule
# https://github.com/Homebrew/homebrew/blob/master/share/doc/homebrew/brew-tap.md#naming-conventions-and-limitations
taps = (tap for tap in taps if tap.startswith('homebrew-'))
for tap in taps:
tap_cmd_path = brew_taps_path + TAP_CMD_PATH % (user, tap)
if os.path.isdir(tap_cmd_path):
commands += (name.replace('brew-', '').replace('.rb', '')
for name in os.listdir(tap_cmd_path)
if _is_brew_tap_cmd_naming(name))
return commands
def _is_brew_tap_cmd_naming(name):
return name.startswith('brew-') and name.endswith('.rb')
def _get_directory_names_only(path):
return [d for d in os.listdir(path)
if os.path.isdir(os.path.join(path, d))]
def _brew_commands():
brew_path_prefix = get_brew_path_prefix()
if brew_path_prefix:
try:
return (_get_brew_commands(brew_path_prefix)
+ _get_brew_tap_specific_commands(brew_path_prefix))
except OSError:
pass
# Failback commands for testing (Based on Homebrew 0.9.5)
return ['info', 'home', 'options', 'install', 'uninstall',
'search', 'list', 'update', 'upgrade', 'pin', 'unpin',
'doctor', 'create', 'edit']
def match(command):
is_proper_command = ('brew' in command.script and
'Unknown command' in command.output)
if is_proper_command:
broken_cmd = re.findall(r'Error: Unknown command: ([a-z]+)',
command.output)[0]
return bool(get_closest(broken_cmd, _brew_commands()))
return False
def get_new_command(command):
broken_cmd = re.findall(r'Error: Unknown command: ([a-z]+)',
command.output)[0]
return replace_command(command, broken_cmd, _brew_commands())
| 34.048193 | 124 | 0.649328 | [
"MIT"
] | benmonro/thefuck | therandy/rules/brew_unknown_command.py | 2,826 | Python |
#==============================================================================
# DEREDDEN.py Sean Andrews's deredden.pro ported to python3
#
# A simple function to provide the de-reddening factor in either magnitudes
# (with keyword /mags set) or flux density at a range of input wavelengths,
# given a visual extinction (Av).
#
# made composite extinction curves for different Av
# regimes: at higher Av, use McClure 2009 model, but at
# lower Av can use the Rv = 3.1 (DISM) Mathis 1990 model.
# the McClure 2009 model switches at Ak = 1
#==============================================================================
from astropy.io import ascii
import numpy as np
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
data = ascii.read("ext_curves.dat")
awl = data['wl'] #wavelength grid for extinction [microns]
A1 = data['A1'] #Mathis Law
A2 = data['A2'] # Valid 0.3 < Ak < 1
A3 = data['A3'] # Valid 1 < Ak < 7
#what is Av_me? An arbitrary cutoff assumed by Sean?
# Alambda = Av * (1/7.75 * interpol(A2,awl,wl))
def deredden(wl, Av, thres=None, mags=True):
'''Takes in wavelength array in microns. Valid between .1200 um and 1e4 microns.'''
#- thresholds for different extinction curve regimes
if thres is not None:
Av_lo = thresh
else:
Av_lo = 0.0
Av_me = 2.325 #McClure 2009 threshold: AK = 0.3
if (Av_lo >= Av_me):
Av_lo = 0.0
Av_hi = 7.75 #McClure 2009 threshold: AK = 1.0
if (Av >= Av_hi):
AA = A3
AvAk = 7.75
if (Av >= Av_me) and (Av < Av_hi):
AA = A2
AvAk = 7.75
if (Av >= Av_lo) and (Av < Av_me):
AA = A2
AvAk = 7.75
if (Av < Av_lo):
AA = A1
AvAk = 9.03
AK_AV = 1. / AvAk
#interpolate extinction curve onto input wavelength grid
Alambda_func = interp1d(awl, Av * AK_AV * AA)
Alambda = Alambda_func(wl)
# - return the extinction at input wavelengths
#at this point, Alambda is in magnitudes
if mags:
return Alambda
else:
# to convert to flux, raise 10^(0.4 * Alambda)
return 10. ** (0.4 * Alambda)
def av_point(wl):
'''call this, get grid. multiply grid by Av to get redenning at that wavelength.'''
# Using A2
AK_AV = 1 / 7.75
Alambda_func = interp1d(awl, AK_AV * A2, kind='linear')
return Alambda_func(wl)
def create_red_grid(wl):
avs = av_points(wl)
np.save('red_grid.npy',avs)
def plot_curve():
'''To test implementation'''
fig = plt.figure()
ax = fig.add_subplot(111)
wl = np.linspace(.13, 10, num=300)
ax.plot(wl, deredden(wl, .2, mags=False), label="0.2 mags")
ax.plot(wl, deredden(wl, 1.0, mags=False), label="1.0 mags")
ax.plot(wl, deredden(wl, 2.0, mags=False), label="2.0 mags")
avs = av_points(wl)
ax.plot(wl, 10**(0.4 * avs), "k:", label="fiducial")
ax.legend(loc="upper right")
ax.set_xlabel(r"$\lambda\quad[\AA]$")
ax.set_ylabel(r"$A_\lambda$")
plt.savefig("redenning_curves.png")
def main():
plot_curve()
if __name__ == "__main__":
main()
| 28.962617 | 87 | 0.592126 | [
"MIT"
] | Circumstellar/MichaelJordan | deredden.py | 3,099 | Python |
from option import *
import tkinter as tk
class Block:
""" Block class for each block of the map """
def __init__(self, x, y, char):
self.x = x
self.y = y
self.char = char
self.blockType = MAP_CHARS[char]
self.texture = BLOCK_TEXTURE[self.blockType]
self.collision = self.blockType in COLLISION_BLOCK
self.firstDraw = True # If the first time of drawing block
def drawBlock(self, canvas):
if self.firstDraw: # If their is the first draw of the block
self.display = []
for texture in self.texture:
if texture[0] == "rect":
self.display.append(
canvas.create_rectangle(
(self.x + texture[1]) * BLOCK_SIZE,
(self.y + texture[2]) * BLOCK_SIZE,
(self.x + texture[3]) * BLOCK_SIZE,
(self.y + texture[4]) * BLOCK_SIZE,
fill=texture[5],
width=0,
)
)
elif texture[0] == "oval":
self.display.append(
canvas.create_oval(
(self.x + texture[1]) * BLOCK_SIZE,
(self.y + texture[2]) * BLOCK_SIZE,
(self.x + texture[3]) * BLOCK_SIZE,
(self.y + texture[4]) * BLOCK_SIZE,
fill=texture[5],
width=0,
)
)
elif texture[0] == "polygone":
self.display.append(
canvas.create_polygon(
[
((self.x + x) * BLOCK_SIZE, (self.y + y) * BLOCK_SIZE)
for x, y in texture[1]
],
fill=texture[2],
width=0,
)
)
self.firstDraw = False
self.texture2 = self.texture
elif self.texture == self.texture2:
pass
else:
for texture in self.texture:
canvas.delete(texture)
self.firstDraw = True
self.drawBlock(canvas)
self.texture2 = self.texture
class MapParser:
def __init__(self, source):
self.coin = 0 # Number of coin in the map
self.lines = source.split("\n") # List of all the lines
self._grid = [] # 2D array who contain the block
# Filter the void line
for line in range(len(self.lines) - 1):
if self.lines[line] in ["", "\n", " ", "\t"]:
del self.lines[line]
# Making the map in self._grid
for y, line in enumerate(self.lines):
self._grid.append([])
for x, char in enumerate(line):
self._grid[y].append(Block(x, y, char))
if self._grid[y][x].blockType == "playerspawn":
self.spawn = (x, y)
elif self._grid[y][x].blockType == "coin":
self.coin += 1
del self.lines
def draw(self, canvas):
for y in range(len(self._grid)):
for x in range(len(self._grid[y])):
self._grid[y][x].drawBlock(canvas)
def __getitem__(self, index):
return self._grid[index]
def __len__(self):
return len(self._grid)
class Player:
def __init__(self, gMap):
self.score = 0
self.x = gMap.spawn[0]
self.y = gMap.spawn[1]
self.velocity = 0 # For simulate gravity
self.color = PLAYER_COLOR
self.firstDraw = True
def drawPlayer(self, canvas):
if self.firstDraw:
self.display = canvas.create_oval(
self.x * BLOCK_SIZE,
self.y * BLOCK_SIZE,
(self.x + 1) * BLOCK_SIZE - 1,
(self.y + 1) * BLOCK_SIZE - 1,
fill=self.color,
)
self.firstDraw = False
else:
canvas.itemconfigure(self.display, fill=self.color)
canvas.coords(
self.display,
self.x * BLOCK_SIZE,
self.y * BLOCK_SIZE,
(self.x + 1) * BLOCK_SIZE - 1,
(self.y + 1) * BLOCK_SIZE - 1,
)
| 34.875969 | 86 | 0.4481 | [
"Apache-2.0"
] | rokonio/Platformer | script/map_and_player.py | 4,499 | Python |
#!/bin/python3
"""
https://www.hackerrank.com/challenges/crossword-puzzle/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=recursion-backtracking&h_r=next-challenge&h_v=zen
"""
# Complete the crossword_puzzle function below.
def crossword_puzzle(crossword, words):
"""resuelve el puzzle"""
palabras = words.split(";")
puzzle_y = len(crossword)
puzzle_x = len(crossword[0])
pos = []
for i in palabras:
pos.append([0, 0, "x", 1])
cruces = []
sig = 0
j = 0
while j < puzzle_y:
i = 0
while i < puzzle_x:
if (
crossword[j][i] == "-"
or (
i + 1 < puzzle_x
and crossword[j][i] == "v"
and crossword[j][i + 1] == "-"
)
or (
j + 1 < puzzle_y
and crossword[j][i] == "h"
and crossword[j + 1][i] == "-"
)
):
if crossword[j][i] != "-":
cruces.append([sig, i, j])
crossword[j] = crossword[j][:i] + "i" + crossword[j][i + 1 :]
pos[sig][0] = i
pos[sig][1] = j
sig += 1
iter_i = i + 1
iter_j = j + 1
while iter_i < puzzle_x and (
crossword[j][iter_i] == "-"
or crossword[j][iter_i] == "v"
):
pos[sig - 1][2] = "h"
pos[sig - 1][3] += 1
if crossword[j][iter_i] == "v":
crossword[j] = (
crossword[j][:iter_i]
+ "x"
+ crossword[j][iter_i + 1 :]
)
cruces.append([sig - 1, iter_i, j])
else:
crossword[j] = (
crossword[j][:iter_i]
+ "h"
+ crossword[j][iter_i + 1 :]
)
iter_i += 1
while iter_j < puzzle_y and (
crossword[iter_j][i] == "-"
or crossword[iter_j][i] == "h"
):
pos[sig - 1][2] = "v"
pos[sig - 1][3] += 1
if crossword[iter_j][i] == "h":
crossword[iter_j] = (
crossword[iter_j][:i]
+ "x"
+ crossword[iter_j][i + 1 :]
)
cruces.append([sig - 1, i, iter_j])
else:
crossword[iter_j] = (
crossword[iter_j][:i]
+ "v"
+ crossword[iter_j][i + 1 :]
)
iter_j += 1
i += 1
j += 1
for palabra_aux1 in pos:
posibles = []
for pal in palabras:
if len(pal) == palabra_aux1[3]:
posibles.append(pal)
palabra_aux1.append(posibles)
for cruce in cruces:
i = 0
while i < len(pos):
if pos[i][2] == "h":
if (
pos[i][0] <= cruce[1]
and pos[i][0] + pos[i][3] >= cruce[1]
and pos[i][1] == cruce[2]
):
break
if pos[i][2] == "v":
if (
pos[i][1] <= cruce[2]
and pos[i][1] + pos[i][3] >= cruce[2]
and pos[i][0] == cruce[1]
):
break
i += 1
letra1 = abs(cruce[1] - pos[i][0] + cruce[2] - pos[i][1])
letra2 = abs(pos[cruce[0]][0] - cruce[1] + pos[cruce[0]][1] - cruce[2])
palabra_aux1 = ""
palabra_aux2 = ""
for palabra1 in pos[i][4]:
for palabra2 in pos[cruce[0]][4]:
if palabra1[letra1] == palabra2[letra2]:
palabra_aux1 = palabra1
palabra_aux2 = palabra2
break
pos[i][4] = [palabra_aux1]
pos[cruce[0]][4] = [palabra_aux2]
for pal in pos:
if pal[2] == "h":
crossword[pal[1]] = (
crossword[pal[1]][: pal[0]]
+ pal[4][0]
+ crossword[pal[1]][pal[0] + pal[3] :]
)
else:
i = 0
while i < pal[3]:
crossword[pal[1] + i] = (
crossword[pal[1] + i][: pal[0]]
+ pal[4][0][i]
+ crossword[pal[1] + i][pal[0] + 1 :]
)
i += 1
return crossword
# ++H+F+++++++++
# +RINOCERONTE++
# ++E+C++++++L++
# ++N+AGUILA+E++
# ++A++++++++F++
# +++++++++++A++
# +++++++++++N++
# +++++++++++T++
# +++++++++++E++
CROSSWORD = [
"++-+-+++++++++",
"+-----------++",
"++-+-++++++-++",
"++-+------+-++",
"++-++++++++-++",
"+++++++++++-++",
"+++++++++++-++",
"+++++++++++-++",
"+++++++++++-++",
]
WORDS = "AGUILA;RINOCERONTE;ELEFANTE;HIENA;FOCA"
for x in crossword_puzzle(CROSSWORD, WORDS):
print(x)
| 32.464286 | 194 | 0.336084 | [
"MIT"
] | pablosambuco/hackerrank | Interview Preparation Kit/Crossword puzzle/test.py | 5,454 | Python |
# coding: pyxl
from pyxl import html
def test():
if True:
b = <frag>{0#lol
}</frag>
| 13.714286 | 24 | 0.5625 | [
"Apache-2.0"
] | gvanrossum/pyxl3 | tests/test_python_comments_5.py | 96 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.