hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a263c5a019460be71ed70498727084ba6412b83 | 14,809 | py | Python | pytorch3d/loss/point_mesh_distance.py | martinruenz/pytorch3d | 7f1e63aed1252ba8145d4a66ce2272331d60cdae | [
"BSD-3-Clause"
] | 3 | 2022-03-09T08:12:54.000Z | 2022-03-10T01:57:03.000Z | pytorch3d/loss/point_mesh_distance.py | martinruenz/pytorch3d | 7f1e63aed1252ba8145d4a66ce2272331d60cdae | [
"BSD-3-Clause"
] | null | null | null | pytorch3d/loss/point_mesh_distance.py | martinruenz/pytorch3d | 7f1e63aed1252ba8145d4a66ce2272331d60cdae | [
"BSD-3-Clause"
] | 1 | 2020-05-25T07:19:08.000Z | 2020-05-25T07:19:08.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from pytorch3d import _C
from pytorch3d.structures import Meshes, Pointclouds
from torch.autograd import Function
from torch.autograd.function import once_differentiable
"""
This file defines distances between meshes and pointclouds.
The functions make use of the definition of a distance between a point and
an edge segment or the distance of a point and a triangle (face).
The exact mathematical formulations and implementations of these
distances can be found in `csrc/utils/geometry_utils.cuh`.
"""
# PointFaceDistance
class _PointFaceDistance(Function):
"""
Torch autograd Function wrapper PointFaceDistance Cuda implementation
"""
@staticmethod
def forward(ctx, points, points_first_idx, tris, tris_first_idx, max_points):
"""
Args:
ctx: Context object used to calculate gradients.
points: FloatTensor of shape `(P, 3)`
points_first_idx: LongTensor of shape `(N,)` indicating the first point
index in each example in the batch
tris: FloatTensor of shape `(T, 3, 3)` of triangular faces. The `t`-th
triangular face is spanned by `(tris[t, 0], tris[t, 1], tris[t, 2])`
tris_first_idx: LongTensor of shape `(N,)` indicating the first face
index in each example in the batch
max_points: Scalar equal to maximum number of points in the batch
Returns:
dists: FloatTensor of shape `(P,)`, where `dists[p]` is the squared
euclidean distance of `p`-th point to the closest triangular face
in the corresponding example in the batch
idxs: LongTensor of shape `(P,)` indicating the closest triangular face
in the corresponding example in the batch.
`dists[p]` is
`d(points[p], tris[idxs[p], 0], tris[idxs[p], 1], tris[idxs[p], 2])`
where `d(u, v0, v1, v2)` is the distance of point `u` from the triangular
face `(v0, v1, v2)`
"""
dists, idxs = _C.point_face_dist_forward(
points, points_first_idx, tris, tris_first_idx, max_points
)
ctx.save_for_backward(points, tris, idxs)
return dists
@staticmethod
@once_differentiable
def backward(ctx, grad_dists):
grad_dists = grad_dists.contiguous()
points, tris, idxs = ctx.saved_tensors
grad_points, grad_tris = _C.point_face_dist_backward(
points, tris, idxs, grad_dists
)
return grad_points, None, grad_tris, None, None
# pyre-fixme[16]: `_PointFaceDistance` has no attribute `apply`.
point_face_distance = _PointFaceDistance.apply
# FacePointDistance
class _FacePointDistance(Function):
"""
Torch autograd Function wrapper FacePointDistance Cuda implementation
"""
@staticmethod
def forward(ctx, points, points_first_idx, tris, tris_first_idx, max_tris):
"""
Args:
ctx: Context object used to calculate gradients.
points: FloatTensor of shape `(P, 3)`
points_first_idx: LongTensor of shape `(N,)` indicating the first point
index in each example in the batch
tris: FloatTensor of shape `(T, 3, 3)` of triangular faces. The `t`-th
triangular face is spanned by `(tris[t, 0], tris[t, 1], tris[t, 2])`
tris_first_idx: LongTensor of shape `(N,)` indicating the first face
index in each example in the batch
max_tris: Scalar equal to maximum number of faces in the batch
Returns:
dists: FloatTensor of shape `(T,)`, where `dists[t]` is the squared
euclidean distance of `t`-th trianguar face to the closest point in the
corresponding example in the batch
idxs: LongTensor of shape `(T,)` indicating the closest point in the
corresponding example in the batch.
`dists[t] = d(points[idxs[t]], tris[t, 0], tris[t, 1], tris[t, 2])`,
where `d(u, v0, v1, v2)` is the distance of point `u` from the triangular
face `(v0, v1, v2)`.
"""
dists, idxs = _C.face_point_dist_forward(
points, points_first_idx, tris, tris_first_idx, max_tris
)
ctx.save_for_backward(points, tris, idxs)
return dists
@staticmethod
@once_differentiable
def backward(ctx, grad_dists):
grad_dists = grad_dists.contiguous()
points, tris, idxs = ctx.saved_tensors
grad_points, grad_tris = _C.face_point_dist_backward(
points, tris, idxs, grad_dists
)
return grad_points, None, grad_tris, None, None
# pyre-fixme[16]: `_FacePointDistance` has no attribute `apply`.
face_point_distance = _FacePointDistance.apply
# PointEdgeDistance
class _PointEdgeDistance(Function):
"""
Torch autograd Function wrapper PointEdgeDistance Cuda implementation
"""
@staticmethod
def forward(ctx, points, points_first_idx, segms, segms_first_idx, max_points):
"""
Args:
ctx: Context object used to calculate gradients.
points: FloatTensor of shape `(P, 3)`
points_first_idx: LongTensor of shape `(N,)` indicating the first point
index for each example in the mesh
segms: FloatTensor of shape `(S, 2, 3)` of edge segments. The `s`-th
edge segment is spanned by `(segms[s, 0], segms[s, 1])`
segms_first_idx: LongTensor of shape `(N,)` indicating the first edge
index for each example in the mesh
max_points: Scalar equal to maximum number of points in the batch
Returns:
dists: FloatTensor of shape `(P,)`, where `dists[p]` is the squared
euclidean distance of `p`-th point to the closest edge in the
corresponding example in the batch
idxs: LongTensor of shape `(P,)` indicating the closest edge in the
corresponding example in the batch.
`dists[p] = d(points[p], segms[idxs[p], 0], segms[idxs[p], 1])`,
where `d(u, v0, v1)` is the distance of point `u` from the edge segment
spanned by `(v0, v1)`.
"""
dists, idxs = _C.point_edge_dist_forward(
points, points_first_idx, segms, segms_first_idx, max_points
)
ctx.save_for_backward(points, segms, idxs)
return dists
@staticmethod
@once_differentiable
def backward(ctx, grad_dists):
grad_dists = grad_dists.contiguous()
points, segms, idxs = ctx.saved_tensors
grad_points, grad_segms = _C.point_edge_dist_backward(
points, segms, idxs, grad_dists
)
return grad_points, None, grad_segms, None, None
# pyre-fixme[16]: `_PointEdgeDistance` has no attribute `apply`.
point_edge_distance = _PointEdgeDistance.apply
# EdgePointDistance
class _EdgePointDistance(Function):
"""
Torch autograd Function wrapper EdgePointDistance Cuda implementation
"""
@staticmethod
def forward(ctx, points, points_first_idx, segms, segms_first_idx, max_segms):
"""
Args:
ctx: Context object used to calculate gradients.
points: FloatTensor of shape `(P, 3)`
points_first_idx: LongTensor of shape `(N,)` indicating the first point
index for each example in the mesh
segms: FloatTensor of shape `(S, 2, 3)` of edge segments. The `s`-th
edge segment is spanned by `(segms[s, 0], segms[s, 1])`
segms_first_idx: LongTensor of shape `(N,)` indicating the first edge
index for each example in the mesh
max_segms: Scalar equal to maximum number of edges in the batch
Returns:
dists: FloatTensor of shape `(S,)`, where `dists[s]` is the squared
euclidean distance of `s`-th edge to the closest point in the
corresponding example in the batch
idxs: LongTensor of shape `(S,)` indicating the closest point in the
corresponding example in the batch.
`dists[s] = d(points[idxs[s]], edges[s, 0], edges[s, 1])`,
where `d(u, v0, v1)` is the distance of point `u` from the segment
spanned by `(v0, v1)`.
"""
dists, idxs = _C.edge_point_dist_forward(
points, points_first_idx, segms, segms_first_idx, max_segms
)
ctx.save_for_backward(points, segms, idxs)
return dists
@staticmethod
@once_differentiable
def backward(ctx, grad_dists):
grad_dists = grad_dists.contiguous()
points, segms, idxs = ctx.saved_tensors
grad_points, grad_segms = _C.edge_point_dist_backward(
points, segms, idxs, grad_dists
)
return grad_points, None, grad_segms, None, None
# pyre-fixme[16]: `_EdgePointDistance` has no attribute `apply`.
edge_point_distance = _EdgePointDistance.apply
def point_mesh_edge_distance(meshes: Meshes, pcls: Pointclouds):
"""
Computes the distance between a pointcloud and a mesh within a batch.
Given a pair `(mesh, pcl)` in the batch, we define the distance to be the
sum of two distances, namely `point_edge(mesh, pcl) + edge_point(mesh, pcl)`
`point_edge(mesh, pcl)`: Computes the squared distance of each point p in pcl
to the closest edge segment in mesh and averages across all points in pcl
`edge_point(mesh, pcl)`: Computes the squared distance of each edge segment in mesh
to the closest point in pcl and averages across all edges in mesh.
The above distance functions are applied for all `(mesh, pcl)` pairs in the batch
and then averaged across the batch.
Args:
meshes: A Meshes data structure containing N meshes
pcls: A Pointclouds data structure containing N pointclouds
Returns:
loss: The `point_edge(mesh, pcl) + edge_point(mesh, pcl)` distance
between all `(mesh, pcl)` in a batch averaged across the batch.
"""
if len(meshes) != len(pcls):
raise ValueError("meshes and pointclouds must be equal sized batches")
N = len(meshes)
# packed representation for pointclouds
points = pcls.points_packed() # (P, 3)
points_first_idx = pcls.cloud_to_packed_first_idx()
max_points = pcls.num_points_per_cloud().max().item()
# packed representation for edges
verts_packed = meshes.verts_packed()
edges_packed = meshes.edges_packed()
segms = verts_packed[edges_packed] # (S, 2, 3)
segms_first_idx = meshes.mesh_to_edges_packed_first_idx()
max_segms = meshes.num_edges_per_mesh().max().item()
# point to edge distance: shape (P,)
point_to_edge = point_edge_distance(
points, points_first_idx, segms, segms_first_idx, max_points
)
# weight each example by the inverse of number of points in the example
point_to_cloud_idx = pcls.packed_to_cloud_idx() # (sum(P_i), )
num_points_per_cloud = pcls.num_points_per_cloud() # (N,)
weights_p = num_points_per_cloud.gather(0, point_to_cloud_idx)
weights_p = 1.0 / weights_p.float()
point_to_edge = point_to_edge * weights_p
point_dist = point_to_edge.sum() / N
# edge to edge distance: shape (S,)
edge_to_point = edge_point_distance(
points, points_first_idx, segms, segms_first_idx, max_segms
)
# weight each example by the inverse of number of edges in the example
segm_to_mesh_idx = meshes.edges_packed_to_mesh_idx() # (sum(S_n),)
num_segms_per_mesh = meshes.num_edges_per_mesh() # (N,)
weights_s = num_segms_per_mesh.gather(0, segm_to_mesh_idx)
weights_s = 1.0 / weights_s.float()
edge_to_point = edge_to_point * weights_s
edge_dist = edge_to_point.sum() / N
return point_dist + edge_dist
def point_mesh_face_distance(meshes: Meshes, pcls: Pointclouds):
"""
Computes the distance between a pointcloud and a mesh within a batch.
Given a pair `(mesh, pcl)` in the batch, we define the distance to be the
sum of two distances, namely `point_face(mesh, pcl) + face_point(mesh, pcl)`
`point_face(mesh, pcl)`: Computes the squared distance of each point p in pcl
to the closest triangular face in mesh and averages across all points in pcl
`face_point(mesh, pcl)`: Computes the squared distance of each triangular face in
mesh to the closest point in pcl and averages across all faces in mesh.
The above distance functions are applied for all `(mesh, pcl)` pairs in the batch
and then averaged across the batch.
Args:
meshes: A Meshes data structure containing N meshes
pcls: A Pointclouds data structure containing N pointclouds
Returns:
loss: The `point_face(mesh, pcl) + face_point(mesh, pcl)` distance
between all `(mesh, pcl)` in a batch averaged across the batch.
"""
if len(meshes) != len(pcls):
raise ValueError("meshes and pointclouds must be equal sized batches")
N = len(meshes)
# packed representation for pointclouds
points = pcls.points_packed() # (P, 3)
points_first_idx = pcls.cloud_to_packed_first_idx()
max_points = pcls.num_points_per_cloud().max().item()
# packed representation for faces
verts_packed = meshes.verts_packed()
faces_packed = meshes.faces_packed()
tris = verts_packed[faces_packed] # (T, 3, 3)
tris_first_idx = meshes.mesh_to_faces_packed_first_idx()
max_tris = meshes.num_faces_per_mesh().max().item()
# point to face distance: shape (P,)
point_to_face = point_face_distance(
points, points_first_idx, tris, tris_first_idx, max_points
)
# weight each example by the inverse of number of points in the example
point_to_cloud_idx = pcls.packed_to_cloud_idx() # (sum(P_i),)
num_points_per_cloud = pcls.num_points_per_cloud() # (N,)
weights_p = num_points_per_cloud.gather(0, point_to_cloud_idx)
weights_p = 1.0 / weights_p.float()
point_to_face = point_to_face * weights_p
point_dist = point_to_face.sum() / N
# face to point distance: shape (T,)
face_to_point = face_point_distance(
points, points_first_idx, tris, tris_first_idx, max_tris
)
# weight each example by the inverse of number of faces in the example
tri_to_mesh_idx = meshes.faces_packed_to_mesh_idx() # (sum(T_n),)
num_tris_per_mesh = meshes.num_faces_per_mesh() # (N, )
weights_t = num_tris_per_mesh.gather(0, tri_to_mesh_idx)
weights_t = 1.0 / weights_t.float()
face_to_point = face_to_point * weights_t
face_dist = face_to_point.sum() / N
return point_dist + face_dist
| 41.365922 | 87 | 0.661557 |
4a263d1def8a5daa7e1fcce18ee873e7e8903f9a | 992 | py | Python | cloudscraper/exceptions/reCaptcha_exceptions.py | oczkers/cloudscraper | a8c0bdd999d89a6192d0e31fb6b1d42a7bc397d1 | [
"MIT"
] | null | null | null | cloudscraper/exceptions/reCaptcha_exceptions.py | oczkers/cloudscraper | a8c0bdd999d89a6192d0e31fb6b1d42a7bc397d1 | [
"MIT"
] | null | null | null | cloudscraper/exceptions/reCaptcha_exceptions.py | oczkers/cloudscraper | a8c0bdd999d89a6192d0e31fb6b1d42a7bc397d1 | [
"MIT"
] | 1 | 2021-01-21T21:15:03.000Z | 2021-01-21T21:15:03.000Z | # ------------------------------------------------------------------------------- #
class reCaptcha_Service_Unavailable(Exception):
"""
Raise error for external services that cannot be reached
"""
class reCaptcha_Error_From_API(Exception):
"""
Raise error for error from API response.
"""
class reCaptcha_Account_Error(Exception):
"""
Raise error for reCaptcha provider account problem.
"""
class reCaptcha_Timeout(Exception):
"""
Raise error for reCaptcha provider taking too long.
"""
class reCaptcha_Bad_Parameter(NotImplementedError):
"""
Raise error for bad or missing Parameter.
"""
class reCaptcha_Bad_Job_ID(Exception):
"""
Raise error for invalid job id.
"""
class reCaptcha_Report_Error(Exception):
"""
Raise error for reCaptcha provider unable to report bad solve.
"""
class reCaptcha_Import_Error(Exception):
"""
Raise error for reCaptcha, cannot import a module.
"""
| 19.84 | 83 | 0.627016 |
4a263f00940393e9cde7ab76f36920e289c2bf05 | 9,672 | py | Python | pa_elasticsearch/coupler.py | ncc-tools/pa-elasticsearch-coupler | 0e6fcb8e851eb72c1eb9c91ff55551fa817d02a2 | [
"Apache-2.0"
] | 1 | 2018-10-24T11:13:06.000Z | 2018-10-24T11:13:06.000Z | pa_elasticsearch/coupler.py | ncc-tools/pa-elasticsearch-coupler | 0e6fcb8e851eb72c1eb9c91ff55551fa817d02a2 | [
"Apache-2.0"
] | 5 | 2017-01-31T11:22:11.000Z | 2021-06-01T22:15:58.000Z | pa_elasticsearch/coupler.py | ncc-tools/pa-elasticsearch-coupler | 0e6fcb8e851eb72c1eb9c91ff55551fa817d02a2 | [
"Apache-2.0"
] | 2 | 2017-02-15T11:38:41.000Z | 2017-03-17T12:33:06.000Z | """
Reads NCC Performance Analyser data and indexes it in ElasticSearch
"""
# Copyright 2017 NCC Group plc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from configparser import ConfigParser
import datetime
import logging
import os.path
import signal
import sys
import time
import elasticsearch
from elasticsearch import Elasticsearch
from paapi import PaAuth, PaApi, ApiQueryError
from .tagdb import NCCTagDB
class Coupler:
"""
Imports data from the PA API into an ElasticSearch database
"""
conf_path = None
elasticsearch = None
paapi = None
logfile = None
loglevel = None
poll_period = 600 # Default polling period is 10min
jobtemplates_whitelist = []
jobtemplates_since = None
running = True
polling = False
def __init__(self, conf_path):
self.conf_path = conf_path
self._read_config()
self.tagdb = NCCTagDB()
def _read_config(self):
config = ConfigParser()
if not os.path.isfile(self.conf_path):
raise Exception("Couldn't open configuration file '%s'" % (self.conf_path,))
config.read(self.conf_path)
es_hosts = config['elasticsearch']['hosts'].split(',')
if 'logfile' in config['coupler'] and config['coupler']['logfile'].strip() != '':
self.logfile = config['coupler']['logfile']
if config['coupler']['loglevel'] == 'ERROR':
self.loglevel = logging.ERROR
elif config['coupler']['loglevel'] == 'INFO':
self.loglevel = logging.INFO
elif config['coupler']['loglevel'] == 'DEBUG':
self.loglevel = logging.DEBUG
else:
self.loglevel = logging.WARNING
try:
self.poll_period = int(config['coupler']['poll_period']) * 60
except ValueError:
raise Exception("Couldn't read poll_period from configuration")
# Optional config options.
try:
es_username = config['elasticsearch']['username']
es_password = config['elasticsearch']['password']
verify_certs = True
if 'verify_certs' in config['elasticsearch']:
verify_certs = not config['elasticsearch']['verify_certs'] == '0'
from pprint import pprint; pprint(verify_certs)
self.elasticsearch = Elasticsearch(
es_hosts, http_auth=(es_username, es_password), verify_certs=verify_certs)
except KeyError:
self.elasticsearch = Elasticsearch(es_hosts)
auth = PaAuth(username=config['pa']['username'],
password=config['pa']['password'].strip('"'),
client_username=config['pa']['client_username'].strip('"'),
client_password=config['pa']['client_password'].strip('"'))
self.paapi = PaApi(auth, config['pa']['realm'])
if 'since' in config['pa'] and config['pa']['since'].strip() != '':
try:
self.jobtemplates_since = datetime.datetime.strptime(
config['pa']['since'],
'%Y-%m-%dT%H:%M%z')
except ValueError:
raise Exception("Error: couldn't parse the pa.since configuration option")
if 'jobtemplates' in config['pa'] and config['pa']['jobtemplates'].strip() != '':
self.jobtemplates_whitelist = config['pa']['jobtemplates'].split(',')
def _process_jobtemplate_testruns(self, jobtemplate, last_update):
try:
testruns = self.paapi.get_testruns_for_jobtemplate(jobtemplate['sref'], last_update)
except ApiQueryError:
return
logging.info('Importing %d testruns from Jobtemplate %s',
len(testruns),
jobtemplate['sref'])
for testrun in testruns:
self._process_testrun_pageobjects(testrun, jobtemplate)
def _process_testrun_pageobjects(self, testrun, jobtemplate):
testrun['jobTemplateUri'] = jobtemplate['sref']
testrun['jobTemplateName'] = jobtemplate['name']
self.elasticsearch.index(index='pa-testruns',
doc_type='testrun',
id=testrun['sref'],
body=testrun)
logging.info('Indexed testrun %s', testrun['sref'])
pageobjects = self.paapi.get_pageobjects_for_testrun(testrun['sref'])
for pageobject in pageobjects:
self._process_pageobject(pageobject, testrun, jobtemplate)
def _process_pageobject(self, pageobject, testrun, jobtemplate):
pageobject['company'] = 'Unknown'
pageobject['category'] = 'None'
pageobject['ranAt'] = testrun['ranAt']
pageobject['jobTemplateUri'] = jobtemplate['sref']
pageobject['jobTemplateName'] = jobtemplate['name']
pageobject['parentUrl'] = testrun['url']
pageobject['parentPageTitle'] = testrun['pageTitle']
try:
domain_info = self.tagdb.get_url_info(pageobject['url'])
if len(domain_info) > 0:
company_info = self.tagdb.get_company_info(
domain_info[0]['company'],
domain_info[0]['product'])
pageobject['company'] = company_info[0]['name']
pageobject['category'] = company_info[0]['category']
logging.info("Retrieved Tag info for %s", pageobject['sref'])
except Exception as error:
logging.warning("Failed to retrieve 3rd party info for '%s'", pageobject['url'])
self.elasticsearch.index(index='pa-objects',
doc_type='pageobject',
id=pageobject['sref'],
body=pageobject)
logging.info('Indexed pageobject %s', pageobject['sref'])
def _is_jobtemplate_allowed(self, jobtemplate):
"""
Checks if jobtemplate is allowed by the configured whitelist.
"""
if len(self.jobtemplates_whitelist) == 0:
return True
return jobtemplate['type'] in self.jobtemplates_whitelist
def _poll(self, force_reindex):
logging.info("Polling")
self.polling = True
min_date = None
if force_reindex is False:
try:
results = self.elasticsearch.search(index='pa',
doc_type='testrun',
sort='ranAt:desc',
size=1)
if len(results['hits']['hits']) > 0:
last_index = results['hits']['hits'][0]
min_date = last_index['_source']['ranAt'].replace('+00:00', 'Z')
logging.info("Importing new data from PA")
except elasticsearch.exceptions.NotFoundError:
logging.info("No existing data found. Fully indexing from PA")
min_date = None
logging.info("Fully indexing data from PA")
# If no last update could be found, then try to use the one from the config
if min_date is None and self.jobtemplates_since is not None:
min_date = self.jobtemplates_since.isoformat()
jobtemplates = self.paapi.get_all_jobtemplates()
for jobtemplate in jobtemplates:
if not self._is_jobtemplate_allowed(jobtemplate):
continue
self._process_jobtemplate_testruns(jobtemplate, min_date)
def os_signal_handler(self, signum, frame):
"Handles process signals SIGHUP and SIGTERM"
if signum == signal.SIGHUP:
logging.info("(SIGHUP) Reloading config")
self._read_config()
elif signum == signal.SIGTERM:
if self.polling:
logging.info("(SIGTERM) Exiting at next poll")
self.running = False
else:
logging.info("(SIGTERM) Exiting")
sys.exit(0)
def run(self, force_reindex=False):
"Starts polling PA data into Elasticsearch"
log_format = '[%(asctime)s] %(levelname)s: %(message)s'
if self.logfile:
logging.basicConfig(filename=self.logfile, level=self.loglevel, format=log_format)
else:
logging.basicConfig(level=self.loglevel, format=log_format)
signal.signal(signal.SIGHUP, self.os_signal_handler)
signal.signal(signal.SIGTERM, self.os_signal_handler)
self.running = True
self.polling = False
while self.running:
started = time.time()
try:
self._poll(force_reindex)
except KeyboardInterrupt:
logging.info("Aborting from user input")
return 0
except Exception as error:
logging.error(str(error))
self.polling = False
# If running was set to false, the sleep is skipped so the program can exit immediately
if self.running:
time.sleep(max(0, self.poll_period - (time.time() - started)))
logging.info("Done, exiting")
| 41.87013 | 99 | 0.593052 |
4a263f1ebb2a68fa4a07da6e8729d9321de25b34 | 331 | py | Python | server/com/pi_error.py | JoeEmp/pi_web_file_system | e252660c7b8f2d26853a43860da0a7fdd062e505 | [
"MIT"
] | null | null | null | server/com/pi_error.py | JoeEmp/pi_web_file_system | e252660c7b8f2d26853a43860da0a7fdd062e505 | [
"MIT"
] | null | null | null | server/com/pi_error.py | JoeEmp/pi_web_file_system | e252660c7b8f2d26853a43860da0a7fdd062e505 | [
"MIT"
] | null | null | null | PARAMETERS_ERROR = {'code':-1,'msg':'参数错误'}
UNKNOW_ERROR = {'code':-1,'msg':'未知错误,请联系管理员'}
LOCAL_FAIL_ERROR = {'code':-1,'msg':'接口不存在'}
LOGIN_ERROR = {'code':2,'msg':'请重新登录'}
class pi_exception(BaseException):
def __init__(self, reason, *args, **kwargs):
self.reason = reason
super().__init__(*args, **kwargs) | 33.1 | 48 | 0.637462 |
4a263f368ee4b948d4e4142ca90756042c6207bc | 3,140 | py | Python | libs/cherrypy/test/test_etags.py | scambra/HTPC-Manager | 1a1440db84ae1b6e7a2610c7f3bd5b6adf0aab1d | [
"MIT"
] | 422 | 2015-01-08T14:08:08.000Z | 2022-02-07T11:47:37.000Z | libs/cherrypy/test/test_etags.py | scambra/HTPC-Manager | 1a1440db84ae1b6e7a2610c7f3bd5b6adf0aab1d | [
"MIT"
] | 581 | 2015-01-01T08:07:16.000Z | 2022-02-23T11:44:37.000Z | libs/cherrypy/test/test_etags.py | scambra/HTPC-Manager | 1a1440db84ae1b6e7a2610c7f3bd5b6adf0aab1d | [
"MIT"
] | 115 | 2015-01-08T14:41:00.000Z | 2022-02-13T12:31:17.000Z | import cherrypy
from cherrypy._cpcompat import ntou
from cherrypy.test import helper
class ETagTest(helper.CPWebCase):
def setup_server():
class Root:
def resource(self):
return "Oh wah ta goo Siam."
resource.exposed = True
def fail(self, code):
code = int(code)
if 300 <= code <= 399:
raise cherrypy.HTTPRedirect([], code)
else:
raise cherrypy.HTTPError(code)
fail.exposed = True
def unicoded(self):
return ntou('I am a \u1ee4nicode string.', 'escape')
unicoded.exposed = True
# In Python 3, tools.encode is on by default
unicoded._cp_config = {'tools.encode.on': True}
conf = {'/': {'tools.etags.on': True,
'tools.etags.autotags': True,
}}
cherrypy.tree.mount(Root(), config=conf)
setup_server = staticmethod(setup_server)
def test_etags(self):
self.getPage("/resource")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html;charset=utf-8')
self.assertBody('Oh wah ta goo Siam.')
etag = self.assertHeader('ETag')
# Test If-Match (both valid and invalid)
self.getPage("/resource", headers=[('If-Match', etag)])
self.assertStatus("200 OK")
self.getPage("/resource", headers=[('If-Match', "*")])
self.assertStatus("200 OK")
self.getPage("/resource", headers=[('If-Match', "*")], method="POST")
self.assertStatus("200 OK")
self.getPage("/resource", headers=[('If-Match', "a bogus tag")])
self.assertStatus("412 Precondition Failed")
# Test If-None-Match (both valid and invalid)
self.getPage("/resource", headers=[('If-None-Match', etag)])
self.assertStatus(304)
self.getPage("/resource", method='POST',
headers=[('If-None-Match', etag)])
self.assertStatus("412 Precondition Failed")
self.getPage("/resource", headers=[('If-None-Match', "*")])
self.assertStatus(304)
self.getPage("/resource", headers=[('If-None-Match', "a bogus tag")])
self.assertStatus("200 OK")
def test_errors(self):
self.getPage("/resource")
self.assertStatus(200)
etag = self.assertHeader('ETag')
# Test raising errors in page handler
self.getPage("/fail/412", headers=[('If-Match', etag)])
self.assertStatus(412)
self.getPage("/fail/304", headers=[('If-Match', etag)])
self.assertStatus(304)
self.getPage("/fail/412", headers=[('If-None-Match', "*")])
self.assertStatus(412)
self.getPage("/fail/304", headers=[('If-None-Match', "*")])
self.assertStatus(304)
def test_unicode_body(self):
self.getPage("/unicoded")
self.assertStatus(200)
etag1 = self.assertHeader('ETag')
self.getPage("/unicoded", headers=[('If-Match', etag1)])
self.assertStatus(200)
self.assertHeader('ETag', etag1)
| 36.941176 | 77 | 0.567834 |
4a263f48177cad001b782106805379e0a7660863 | 794 | py | Python | sktime/transformers/panel/tests/test_ColumnConcatenator.py | bmurdata/sktime | fb079e76e3f3dfbb849fb805e53f09adec6cdf79 | [
"BSD-3-Clause"
] | 1 | 2021-01-10T06:32:00.000Z | 2021-01-10T06:32:00.000Z | sktime/transformers/panel/tests/test_ColumnConcatenator.py | bmurdata/sktime | fb079e76e3f3dfbb849fb805e53f09adec6cdf79 | [
"BSD-3-Clause"
] | null | null | null | sktime/transformers/panel/tests/test_ColumnConcatenator.py | bmurdata/sktime | fb079e76e3f3dfbb849fb805e53f09adec6cdf79 | [
"BSD-3-Clause"
] | 1 | 2021-07-08T07:07:42.000Z | 2021-07-08T07:07:42.000Z | # -*- coding: utf-8 -*-
import numpy as np
from sktime.datasets import load_basic_motions
from sktime.transformers.panel.compose import ColumnConcatenator
def test_TimeSeriesConcatenator():
X, y = load_basic_motions(split="train", return_X_y=True)
# check that loaded dataframe is multivariate
assert X.shape[1] > 1
trans = ColumnConcatenator()
Xt = trans.fit_transform(X)
# check if transformed dataframe is univariate
assert Xt.shape[1] == 1
# check if number of time series observations are correct
n_obs = np.sum([X.loc[0, col].shape[0] for col in X])
assert Xt.iloc[0, 0].shape[0] == n_obs
# check specific observations
assert X.iloc[0, -1].iloc[-3] == Xt.iloc[0, 0].iloc[-3]
assert X.iloc[0, 0].iloc[3] == Xt.iloc[0, 0].iloc[3]
| 29.407407 | 64 | 0.68136 |
4a2640ffb6816cce3e52ecf3d2fab28e4c548ed7 | 2,126 | py | Python | pymoo/algorithms/unsga3.py | temaurer/pymoo | 1a2f3e0944ef7094df7518623e3ce23f94a39a39 | [
"Apache-2.0"
] | null | null | null | pymoo/algorithms/unsga3.py | temaurer/pymoo | 1a2f3e0944ef7094df7518623e3ce23f94a39a39 | [
"Apache-2.0"
] | null | null | null | pymoo/algorithms/unsga3.py | temaurer/pymoo | 1a2f3e0944ef7094df7518623e3ce23f94a39a39 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from pymoo.algorithms.nsga3 import NSGA3
from pymoo.docs import parse_doc_string
from pymoo.operators.selection.tournament_selection import TournamentSelection, compare
from pymoo.rand import random
# =========================================================================================================
# Implementation
# =========================================================================================================
def comp_by_rank_and_ref_line_dist(pop, P, **kwargs):
S = np.full(P.shape[0], np.nan)
for i in range(P.shape[0]):
a, b = P[i, 0], P[i, 1]
# if at least one solution is infeasible
if pop[a].CV > 0.0 or pop[b].CV > 0.0:
S[i] = compare(a, pop[a].CV, b, pop[b].CV, method='smaller_is_better', return_random_if_equal=True)
# both solutions are feasible
else:
# if in the same niche select by rank
if pop[a].get("niche") == pop[b].get("niche"):
if pop[a].get("rank") != pop[b].get("rank"):
S[i] = compare(a, pop[a].get("rank"), b, pop[b].get("rank"), method='smaller_is_better')
else:
S[i] = compare(a, pop[a].get("dist_to_niche"), b, pop[b].get("dist_to_niche"),
method='smaller_is_better')
if np.isnan(S[i]):
S[i] = random.choice([a, b])
return S[:, None].astype(np.int)
# =========================================================================================================
# Interface
# =========================================================================================================
def unsga3(**kwargs):
"""
This is an implementation of the Unified NSGA3 algorithm :cite:`unsga3`. The same options as for
:class:`pymoo.algorithms.nsga3.nsga3` are available.
Returns
-------
unsga3 : :class:`~pymoo.model.algorithm.Algorithm`
Returns an UNSGA3 algorithm object.
"""
return NSGA3(selection=TournamentSelection(func_comp=comp_by_rank_and_ref_line_dist), **kwargs)
parse_doc_string(unsga3)
| 33.746032 | 111 | 0.484008 |
4a26414645635a8ce0cc76352220ca72317bce5d | 452 | py | Python | Day X/main.py | edgorman/advent-of-code | 374fee72dd853c60851231a672498ab2732889cf | [
"MIT"
] | null | null | null | Day X/main.py | edgorman/advent-of-code | 374fee72dd853c60851231a672498ab2732889cf | [
"MIT"
] | null | null | null | Day X/main.py | edgorman/advent-of-code | 374fee72dd853c60851231a672498ab2732889cf | [
"MIT"
] | null | null | null | import os
def part_one(entries):
pass
def part_two(entries):
pass
if __name__ == "__main__":
# Get input from txt file
with open(os.getcwd() + '\\YEAR\\Day X\\input.txt', 'r') as file_obj:
file_input = file_obj.readlines()
# Clean input
entries = []
for entry in file_input:
entries.append(entry.rstrip())
# Part one
print(part_one(entries))
# Part two
print(part_two(entries))
| 18.833333 | 73 | 0.606195 |
4a2641593a3279a2c829ccbecf1a373b4904e32e | 9,266 | py | Python | manage/discovery_manager.py | korenlev/calipso-cvim | 39278a5cf09c40b26a8a143ccc0c8d437961abc2 | [
"Apache-2.0"
] | null | null | null | manage/discovery_manager.py | korenlev/calipso-cvim | 39278a5cf09c40b26a8a143ccc0c8d437961abc2 | [
"Apache-2.0"
] | null | null | null | manage/discovery_manager.py | korenlev/calipso-cvim | 39278a5cf09c40b26a8a143ccc0c8d437961abc2 | [
"Apache-2.0"
] | null | null | null | ###############################################################################
# Copyright (c) 2017-2020 Koren Lev (Cisco Systems), #
# Yaron Yogev (Cisco Systems), Ilia Abashin (Cisco Systems) and others #
# #
# All rights reserved. This program and the accompanying materials #
# are made available under the terms of the Apache License, Version 2.0 #
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
import argparse
from typing import Optional
from base.manager import Manager
from base.utils.data_access_base import DataAccessBase
from base.utils.logging.file_logger import FileLogger
from base.utils.logging.logger import Logger
from manage import discovery_api
from manage.async_mongo_connector import AsyncMongoConnector
from manage.async_replication_client import AsyncReplicationClient
from manage.pod_data import PodData
from manage.pod_manager import PodManager
from manage.schedule_manager import ScheduleManager
class DiscoveryManager(Manager, DataAccessBase):
REQUIRED_ENV_VARIABLES = {
'central_mongo_host': 'CALIPSO_MONGO_SERVICE_HOST',
'central_mongo_password': 'CALIPSO_MONGO_SERVICE_PWD',
}
OPTIONAL_ENV_VARIABLES = {
'discovery_api_user': 'CALIPSO_MANAGE_SERVICE_USER',
'discovery_api_password': 'CALIPSO_MANAGE_SERVICE_PWD',
'central_mongo_port': 'CALIPSO_MONGO_SERVICE_PORT',
'central_mongo_user': 'CALIPSO_MONGO_SERVICE_USER',
}
DEFAULTS = {
"bind": "0.0.0.0",
"port": "8757",
"log_file": "discovery_manager.log",
"log_level": Logger.INFO,
"cert_file": "",
"key_file": "",
"project_prefix": "cvim"
}
def __init__(self):
self.args: argparse.Namespace = self.get_args()
super().__init__(log_directory=self.args.log_directory,
log_level=self.args.log_level, log_file=self.args.log_file)
self.setup_data: Optional[dict] = None
self.project_prefix: str = self.args.project_prefix
self.verify_remotes_tls: bool = not self.args.skip_remotes_tls_verify
calipso_connection_params = self.get_connection_parameters()
self.central_mongo_host: str = calipso_connection_params['central_mongo_host']
self.central_mongo_port: int = int(calipso_connection_params.get('central_mongo_port',
AsyncMongoConnector.DEFAULT_PORT))
self.central_mongo_user: str = calipso_connection_params.get('central_mongo_user',
AsyncMongoConnector.DEFAULT_USER)
self.central_mongo_password: str = calipso_connection_params['central_mongo_password']
self.discovery_api_user: str = calipso_connection_params.get('discovery_api_user')
self.discovery_api_password: str = calipso_connection_params.get('discovery_api_password')
self.schedule_manager: Optional[ScheduleManager] = None
self.discovery_api: Optional[discovery_api.DiscoveryAPI] = None
self.tls: bool = False
if self.args.key_file and self.args.cert_file:
self.tls = True
elif self.args.key_file or self.args.cert_file:
raise ValueError("Either both key_file and cert_file should be specified or neither")
@staticmethod
def get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("--cert_file", nargs="?", type=str,
default=DiscoveryManager.DEFAULTS["cert_file"],
help="Path to SSL certificate")
parser.add_argument("--key_file", nargs="?", type=str,
default=DiscoveryManager.DEFAULTS["key_file"],
help="Path to SSL key")
parser.add_argument("-b", "--bind", nargs="?", type=str,
default=DiscoveryManager.DEFAULTS["bind"],
help="Address or addresses to bind Discovery API to. "
"Must be a hostname/IP or a list of comma-separated hostnames/IPs")
parser.add_argument("-p", "--port", nargs="?", type=int,
default=DiscoveryManager.DEFAULTS["port"],
help="A port for Discovery API to bind to")
parser.add_argument("-d", "--log_directory", nargs="?", type=str,
default=FileLogger.LOG_DIRECTORY,
help="Log file path \n(default: '{}')"
.format(FileLogger.LOG_DIRECTORY))
parser.add_argument("-f", "--log_file", nargs="?", type=str,
default=DiscoveryManager.DEFAULTS["log_file"],
help="Scan manager log file name \n(default: '{}')"
.format(DiscoveryManager.DEFAULTS["log_file"])),
parser.add_argument("-l", "--log_level", nargs="?", type=str,
default=DiscoveryManager.DEFAULTS["log_level"],
help="Logging level \n(default: '{}')"
.format(DiscoveryManager.DEFAULTS["log_level"]))
parser.add_argument("--project_prefix", nargs="?", type=str,
default=DiscoveryManager.DEFAULTS["project_prefix"],
help="Project prefix to use in environment configurations")
parser.add_argument("--skip_remotes_tls_verify", action="store_true",
help="Skip TLS verification on remotes")
parser.add_argument("--skip_discovery", action="store_true", default=False,
help="Skip remotes discovery (simulate schedules only)"
.format(DiscoveryManager.DEFAULTS["log_level"]))
parser.add_argument("--skip_replication", action="store_true", default=False,
help="Skip remotes replication (simulate schedules only)"
.format(DiscoveryManager.DEFAULTS["log_level"]))
args = parser.parse_args()
return args
def configure_central_pod(self) -> None:
PodData.set_project_prefix(self.project_prefix)
PodData.VERIFY_TLS = self.verify_remotes_tls
self.schedule_manager = ScheduleManager(mongo_host=self.central_mongo_host,
mongo_port=self.central_mongo_port,
mongo_pwd=self.central_mongo_password,
mongo_user=self.central_mongo_user,
log_directory=self.args.log_directory,
log_file=self.args.log_file,
log_level=self.args.log_level,
skip_discovery=self.args.skip_discovery,
skip_replication=self.args.skip_replication)
self.schedule_manager.run(detach=True)
def stop_schedule_manager(self):
if self.schedule_manager:
self.schedule_manager.stop()
@staticmethod
def setup_loggers(level: str = Logger.INFO, log_file: str = ""):
if log_file:
for cls in (AsyncReplicationClient, discovery_api.DiscoveryAPI, PodManager):
cls.LOG_FILE = log_file
for cls in (AsyncReplicationClient, discovery_api.DiscoveryAPI, PodManager):
cls.LOG_LEVEL = level
def configure(self) -> None:
self.setup_loggers(level=self.args.log_level, log_file=self.args.log_file)
self.configure_central_pod()
self.discovery_api = discovery_api.DiscoveryAPI(discovery_mgr=self,
user=self.discovery_api_user,
password=self.discovery_api_password,
host=self.args.bind,
port=self.args.port,
tls=self.tls,
key_file=self.args.key_file,
cert_file=self.args.cert_file)
discovery_api.discovery_manager = self
self.log.info("Started DiscoveryManager with following configuration: "
"Bind address(es): {0.bind}, port: {0.port}. "
"Log path: {0.log_directory}{0.log_file}, log level: {0.log_level}. "
.format(self.args))
def do_action(self) -> None:
self.discovery_api.run()
if __name__ == "__main__":
discovery_manager = DiscoveryManager()
try:
discovery_manager.run()
finally:
discovery_manager.stop_schedule_manager()
| 52.948571 | 107 | 0.567883 |
4a26422f650cf13ee7d4e8d2228b50ec49876fb8 | 7,162 | py | Python | examples/simultaneous_translation/models/convtransformer_simul_trans.py | Shiguang-Guo/fairseq | c9d3df5679d0829cda8fc3c818b6cab52b78dc37 | [
"MIT"
] | 16,259 | 2018-05-02T02:31:30.000Z | 2022-03-31T21:50:23.000Z | examples/simultaneous_translation/models/convtransformer_simul_trans.py | Shiguang-Guo/fairseq | c9d3df5679d0829cda8fc3c818b6cab52b78dc37 | [
"MIT"
] | 3,863 | 2018-05-02T13:42:39.000Z | 2022-03-31T19:03:32.000Z | examples/simultaneous_translation/models/convtransformer_simul_trans.py | Shiguang-Guo/fairseq | c9d3df5679d0829cda8fc3c818b6cab52b78dc37 | [
"MIT"
] | 4,796 | 2018-05-02T07:55:51.000Z | 2022-03-31T14:46:45.000Z | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from fairseq import checkpoint_utils
from fairseq.models import (
register_model,
register_model_architecture,
)
from fairseq.models.speech_to_text import (
ConvTransformerModel,
convtransformer_espnet,
ConvTransformerEncoder,
)
from fairseq.models.speech_to_text.modules.augmented_memory_attention import (
augmented_memory,
SequenceEncoder,
AugmentedMemoryConvTransformerEncoder,
)
from torch import nn, Tensor
from typing import Dict, List
from fairseq.models.speech_to_text.modules.emformer import NoSegAugmentedMemoryTransformerEncoderLayer
@register_model("convtransformer_simul_trans")
class SimulConvTransformerModel(ConvTransformerModel):
"""
Implementation of the paper:
SimulMT to SimulST: Adapting Simultaneous Text Translation to
End-to-End Simultaneous Speech Translation
https://www.aclweb.org/anthology/2020.aacl-main.58.pdf
"""
@staticmethod
def add_args(parser):
super(SimulConvTransformerModel, SimulConvTransformerModel).add_args(parser)
parser.add_argument(
"--train-monotonic-only",
action="store_true",
default=False,
help="Only train monotonic attention",
)
@classmethod
def build_decoder(cls, args, task, embed_tokens):
tgt_dict = task.tgt_dict
from examples.simultaneous_translation.models.transformer_monotonic_attention import (
TransformerMonotonicDecoder,
)
decoder = TransformerMonotonicDecoder(args, tgt_dict, embed_tokens)
if getattr(args, "load_pretrained_decoder_from", None):
decoder = checkpoint_utils.load_pretrained_component_from_model(
component=decoder, checkpoint=args.load_pretrained_decoder_from
)
return decoder
@register_model_architecture(
"convtransformer_simul_trans", "convtransformer_simul_trans_espnet"
)
def convtransformer_simul_trans_espnet(args):
convtransformer_espnet(args)
@register_model("convtransformer_augmented_memory")
@augmented_memory
class AugmentedMemoryConvTransformerModel(SimulConvTransformerModel):
@classmethod
def build_encoder(cls, args):
encoder = SequenceEncoder(args, AugmentedMemoryConvTransformerEncoder(args))
if getattr(args, "load_pretrained_encoder_from", None) is not None:
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=args.load_pretrained_encoder_from
)
return encoder
@register_model_architecture(
"convtransformer_augmented_memory", "convtransformer_augmented_memory"
)
def augmented_memory_convtransformer_espnet(args):
convtransformer_espnet(args)
# ============================================================================ #
# Convtransformer
# with monotonic attention decoder
# with emformer encoder
# ============================================================================ #
class ConvTransformerEmformerEncoder(ConvTransformerEncoder):
def __init__(self, args):
super().__init__(args)
stride = self.conv_layer_stride(args)
trf_left_context = args.segment_left_context // stride
trf_right_context = args.segment_right_context // stride
context_config = [trf_left_context, trf_right_context]
self.transformer_layers = nn.ModuleList(
[
NoSegAugmentedMemoryTransformerEncoderLayer(
input_dim=args.encoder_embed_dim,
num_heads=args.encoder_attention_heads,
ffn_dim=args.encoder_ffn_embed_dim,
num_layers=args.encoder_layers,
dropout_in_attn=args.dropout,
dropout_on_attn=args.dropout,
dropout_on_fc1=args.dropout,
dropout_on_fc2=args.dropout,
activation_fn=args.activation_fn,
context_config=context_config,
segment_size=args.segment_length,
max_memory_size=args.max_memory_size,
scaled_init=True, # TODO: use constant for now.
tanh_on_mem=args.amtrf_tanh_on_mem,
)
]
)
self.conv_transformer_encoder = ConvTransformerEncoder(args)
def forward(self, src_tokens, src_lengths):
encoder_out: Dict[str, List[Tensor]] = self.conv_transformer_encoder(src_tokens, src_lengths.to(src_tokens.device))
output = encoder_out["encoder_out"][0]
encoder_padding_masks = encoder_out["encoder_padding_mask"]
return {
"encoder_out": [output],
# This is because that in the original implementation
# the output didn't consider the last segment as right context.
"encoder_padding_mask": [encoder_padding_masks[0][:, : output.size(0)]] if len(encoder_padding_masks) > 0
else [],
"encoder_embedding": [],
"encoder_states": [],
"src_tokens": [],
"src_lengths": [],
}
@staticmethod
def conv_layer_stride(args):
# TODO: make it configurable from the args
return 4
@register_model("convtransformer_emformer")
class ConvtransformerEmformer(SimulConvTransformerModel):
@staticmethod
def add_args(parser):
super(ConvtransformerEmformer, ConvtransformerEmformer).add_args(parser)
parser.add_argument(
"--segment-length",
type=int,
metavar="N",
help="length of each segment (not including left context / right context)",
)
parser.add_argument(
"--segment-left-context",
type=int,
help="length of left context in a segment",
)
parser.add_argument(
"--segment-right-context",
type=int,
help="length of right context in a segment",
)
parser.add_argument(
"--max-memory-size",
type=int,
default=-1,
help="Right context for the segment.",
)
parser.add_argument(
"--amtrf-tanh-on-mem",
default=False,
action="store_true",
help="whether to use tanh on memory vector",
)
@classmethod
def build_encoder(cls, args):
encoder = ConvTransformerEmformerEncoder(args)
if getattr(args, "load_pretrained_encoder_from", None):
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=args.load_pretrained_encoder_from
)
return encoder
@register_model_architecture(
"convtransformer_emformer",
"convtransformer_emformer",
)
def convtransformer_emformer_base(args):
convtransformer_espnet(args)
| 34.936585 | 123 | 0.655124 |
4a2642ac791e570cccaeb0e5c00164ad6a11f7a6 | 1,268 | py | Python | src/data/remove_spam.py | tamarakatic/sentiment-analysis-in-amazon-reviews | 4b12b9136e2809fd581d3cc730cd872b7a0b2e27 | [
"MIT"
] | 9 | 2018-09-18T07:19:03.000Z | 2021-01-15T06:22:30.000Z | src/data/remove_spam.py | tamarakatic/sentiment-analysis-in-amazon-reviews | 4b12b9136e2809fd581d3cc730cd872b7a0b2e27 | [
"MIT"
] | 1 | 2020-04-19T15:20:14.000Z | 2020-04-25T02:53:43.000Z | src/data/remove_spam.py | tamarakatic/sentiment-analysis-in-amazon-reviews | 4b12b9136e2809fd581d3cc730cd872b7a0b2e27 | [
"MIT"
] | 3 | 2019-01-28T05:59:47.000Z | 2020-05-27T14:03:51.000Z | #!/usr/bin/env python3
import sys
import multiprocessing
import pandas as pd
import dask.dataframe as dd
from langdetect import detect
from dask.multiprocessing import get
from timeit import default_timer as timer
CPU_CORES = multiprocessing.cpu_count()
def detect_language(review, lang="en"):
try:
return detect(review) == lang
except Exception:
return False
def remove_spam(filename):
df = pd.read_csv(filename, header=None)
reviews = df[2]
dask_df = dd.from_pandas(reviews, npartitions=CPU_CORES)
non_en_indices = dask_df.map_partitions(
lambda partition: partition.apply(detect_language)
).compute(get=get)
# Non-english reviews are spam
non_spam = df[non_en_indices]
non_spam[0] -= 1 # Make sentiment binary. 1 - positive and 0 - negative
print("-- Saving processed file...\n")
non_spam.to_csv(filename, header=False, index=False)
def main(argv):
if len(argv) == 1:
print("\nNo files specified.")
return
for filename in argv[1:]:
print("\n-- Processing '{}'--\n".format(filename))
start = timer()
remove_spam(filename)
print("-- Done in {:.2f}s".format(timer() - start))
if __name__ == "__main__":
main(sys.argv)
| 23.054545 | 76 | 0.667192 |
4a2643af68e625a02271ab509d06de8a5a8ce165 | 385 | py | Python | horaizon/data_generator.py | mertensu/horaizon | ac0d39fca6a1e6953e8e4bb8aafc71af05d945ba | [
"MIT"
] | null | null | null | horaizon/data_generator.py | mertensu/horaizon | ac0d39fca6a1e6953e8e4bb8aafc71af05d945ba | [
"MIT"
] | null | null | null | horaizon/data_generator.py | mertensu/horaizon | ac0d39fca6a1e6953e8e4bb8aafc71af05d945ba | [
"MIT"
] | null | null | null | import numpy as np
import torch
from torch.utils.data import Dataset
class DataGenerator(Dataset):
def __init__(self, data_frame, q):
self.data = data_frame.values
self.q = q
def __len__(self):
return self.data.shape[0] - self.q
def __getitem__(self, index):
return torch.tensor(self.data[index : (index + self.q)], dtype=torch.float32)
| 24.0625 | 85 | 0.667532 |
4a2643db60228d3c87f1eeb59234bec353c70804 | 1,422 | py | Python | honda_dashboard/components.py | todstoychev/dashboard | 7f582c4f48511866db96475e4cfdb36f5a10eb31 | [
"BSD-2-Clause"
] | null | null | null | honda_dashboard/components.py | todstoychev/dashboard | 7f582c4f48511866db96475e4cfdb36f5a10eb31 | [
"BSD-2-Clause"
] | null | null | null | honda_dashboard/components.py | todstoychev/dashboard | 7f582c4f48511866db96475e4cfdb36f5a10eb31 | [
"BSD-2-Clause"
] | null | null | null | import os
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QFont
from PyQt5.QtWidgets import QMainWindow, QWidget, QGridLayout, QLabel, QVBoxLayout
from honda_dashboard.config import Config
class DashboardWindow(QMainWindow):
def __init__(self, cfg: Config):
super().__init__()
self.__config = cfg
self.setWindowTitle('Dashboard')
self.handle_mode()
def handle_mode(self):
if self.__config.night_mode_on:
color = self.__config.night_colors['bg']
else:
color = self.__config.day_colors['bg']
self.setStyleSheet('background-color: #' + color + ';')
class DashboardWidget(QWidget):
def __init__(self):
super().__init__()
self.setLayout(QGridLayout())
class InitialScreen(QWidget):
def __init__(self):
super().__init__()
self.setLayout(QVBoxLayout())
loading = QLabel('LOADING...')
font = QFont('Arial', 32)
font.setBold(True)
loading.setFont(font)
loading.setAlignment(Qt.AlignCenter)
self.layout().addWidget(loading)
self.setMinimumSize(480, 320)
path = os.path.dirname(os.path.abspath(__file__))
self.setStyleSheet('background-image: url(' + path + '/honda.png); background-position: center; '
'background-repeat: no-repeat;')
loading.setStyleSheet('color: #ed323e;')
| 26.333333 | 105 | 0.636428 |
4a2643e55cfb8a87a76ed196b01b5994115d62b4 | 114 | py | Python | app/back/mongo/data/collect/metas/model.py | jgphilpott/polyplot | c46861174ee5881dadffbfb2278d555462523547 | [
"MIT"
] | 5 | 2021-05-17T14:17:14.000Z | 2021-12-14T12:54:32.000Z | app/back/mongo/data/collect/metas/model.py | jgphilpott/iGraph | 2a91ba57e4950856a83d3a109753f8f2badee829 | [
"MIT"
] | 8 | 2020-02-09T02:48:41.000Z | 2021-05-16T04:57:02.000Z | app/back/mongo/data/collect/metas/model.py | jgphilpott/iGraph | 2a91ba57e4950856a83d3a109753f8f2badee829 | [
"MIT"
] | 2 | 2016-09-12T03:48:16.000Z | 2019-05-04T14:15:19.000Z | class Meta():
def __init__(self, meta):
self.code = meta["code"]
self.value = meta["value"]
| 16.285714 | 34 | 0.54386 |
4a2643fbf94b7b96fdc691562a698cd996a25bd0 | 4,206 | py | Python | threat_connect/komand_threat_connect/actions/bulk_indicator_download/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | threat_connect/komand_threat_connect/actions/bulk_indicator_download/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | threat_connect/komand_threat_connect/actions/bulk_indicator_download/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
ATTRIBUTE = "attribute"
CONFIDENCE = "confidence"
DATE_ADDED = "date_added"
LAST_MODIFIED = "last_modified"
OWNER = "owner"
RATING = "rating"
TAG = "tag"
THREAT_ASSESS_CONFIDENCE = "threat_assess_confidence"
THREAT_ASSESS_RATING = "threat_assess_rating"
TYPE = "type"
class Output:
BULK_INDICATORS = "bulk_indicators"
class BulkIndicatorDownloadInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"attribute": {
"type": "string",
"title": "Attribute",
"description": "Attribute type",
"order": 2
},
"confidence": {
"type": "integer",
"title": "Confidence",
"description": "Confidence value",
"order": 3
},
"date_added": {
"type": "string",
"title": "Date Added",
"displayType": "date",
"description": "Date indicator added",
"format": "date-time",
"order": 4
},
"last_modified": {
"type": "string",
"title": "Last Modified",
"displayType": "date",
"description": "Last modified date",
"format": "date-time",
"order": 5
},
"owner": {
"type": "string",
"title": "Owner",
"description": "Owner/Organization",
"order": 1
},
"rating": {
"type": "string",
"title": "Rating",
"description": "Indicator rating",
"order": 6
},
"tag": {
"type": "string",
"title": "Tag",
"description": "Single tag filter",
"order": 7
},
"threat_assess_confidence": {
"type": "integer",
"title": "Threat Assess Confidence",
"description": "Threat Assess Confidence filter",
"order": 8
},
"threat_assess_rating": {
"type": "string",
"title": "Threat Assess Rating",
"description": "Threat Assess Rating filter",
"order": 9
},
"type": {
"type": "string",
"title": "Type",
"description": "Indicator type",
"order": 10
}
},
"required": [
"owner"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class BulkIndicatorDownloadOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"bulk_indicators": {
"type": "array",
"title": "Bulk Indicators",
"description": "Bulk indicators",
"items": {
"$ref": "#/definitions/bulk_indicator_output"
},
"order": 1
}
},
"definitions": {
"bulk_indicator_output": {
"type": "object",
"title": "bulk_indicator_output",
"properties": {
"confidence": {
"type": "string",
"title": "Confidence",
"order": 7
},
"date_added": {
"type": "string",
"title": "Date Added",
"displayType": "date",
"format": "date-time",
"order": 3
},
"id": {
"type": "integer",
"title": "ID",
"order": 1
},
"last_modified": {
"type": "string",
"title": "Last Modified",
"displayType": "date",
"format": "date-time",
"order": 4
},
"owner_name": {
"type": "string",
"title": "Owner Name",
"order": 2
},
"rating": {
"type": "string",
"title": "Rating",
"order": 5
},
"threat_assess_confidence": {
"type": "string",
"title": "Threat Assess Confidence",
"order": 8
},
"threat_assess_rating": {
"type": "string",
"title": "Threat Assess Rating",
"order": 6
},
"type": {
"type": "string",
"title": "Type",
"order": 9
},
"weblink": {
"type": "string",
"title": "Weblink",
"order": 10
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 22.491979 | 57 | 0.485259 |
4a264492b62d81d1e87b97d2fe50d971235adcad | 11,114 | py | Python | src/foremast/s3/s3apps.py | dnava13/foremast | 53acb17631b43b6e0adc87366faa0e7e2066e6c3 | [
"Apache-2.0"
] | null | null | null | src/foremast/s3/s3apps.py | dnava13/foremast | 53acb17631b43b6e0adc87366faa0e7e2066e6c3 | [
"Apache-2.0"
] | null | null | null | src/foremast/s3/s3apps.py | dnava13/foremast | 53acb17631b43b6e0adc87366faa0e7e2066e6c3 | [
"Apache-2.0"
] | null | null | null | # Foremast - Pipeline Tooling
#
# Copyright 2018 Gogo, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""S3 web application infrastructure."""
import json
import logging
import boto3
from botocore.client import ClientError
from ..exceptions import S3SharedBucketNotFound
from ..utils import generate_s3_tags, get_details, get_dns_zone_ids, get_properties, update_dns_zone_record
LOG = logging.getLogger(__name__)
# pylint: disable=too-few-public-methods
class S3Apps:
"""Configure infrastructure and policies for S3 web applications."""
def __init__(self, app, env, region, prop_path, primary_region='us-east-1'):
"""S3 application object. Setups Bucket and policies for S3 applications.
Args:
app (str): Application name
env (str): Environment/Account
region (str): AWS Region
prop_path (str): Path of environment property file
primary_region (str): The primary region for the application.
"""
self.app_name = app
self.env = env
self.region = region
boto_sess = boto3.session.Session(profile_name=env)
self.s3client = boto_sess.client('s3')
self.generated = get_details(app=app, env=env, region=self.region)
self.properties = get_properties(prop_path, env=self.env, region=self.region)
self.s3props = self.properties['s3']
self.group = self.generated.project
include_region = True
if self.region == primary_region:
include_region = False
if self.s3props.get('shared_bucket_master'):
self.bucket = self.generated.shared_s3_app_bucket(include_region=include_region)
elif self.s3props.get('shared_bucket_target'):
shared_app = self.s3props['shared_bucket_target']
newgenerated = get_details(app=shared_app, env=env, region=self.region)
self.bucket = newgenerated.shared_s3_app_bucket(include_region=include_region)
else:
self.bucket = self.generated.s3_app_bucket(include_region=include_region)
def create_bucket(self):
"""Create or update bucket based on app name."""
bucket_exists = self._bucket_exists()
if self.s3props.get('shared_bucket_target'):
if bucket_exists:
LOG.info('App uses shared bucket - %s ', self.bucket)
else:
LOG.error("Shared bucket %s does not exist", self.bucket)
raise S3SharedBucketNotFound
else:
if self.region == 'us-east-1':
_response = self.s3client.create_bucket(ACL=self.s3props['bucket_acl'], Bucket=self.bucket)
else:
if not bucket_exists:
_response = self.s3client.create_bucket(ACL=self.s3props['bucket_acl'], Bucket=self.bucket,
CreateBucketConfiguration={
'LocationConstraint': self.region})
else:
_response = "bucket already exists, skipping create for non-standard region buckets."
LOG.debug('Response creating bucket: %s', _response)
LOG.info('%s - S3 Bucket Upserted', self.bucket)
self._put_bucket_policy()
self._put_bucket_website()
self._put_bucket_logging()
self._put_bucket_lifecycle()
self._put_bucket_versioning()
self._put_bucket_encryption()
self._put_bucket_tagging()
def _bucket_exists(self):
"""Check if the bucket exists."""
try:
self.s3client.get_bucket_location(Bucket=self.bucket)
return True
except ClientError as error:
LOG.error(error)
return False
def _put_bucket_policy(self):
"""Attach a bucket policy to app bucket."""
if self.s3props['bucket_policy']:
policy_str = json.dumps(self.s3props['bucket_policy'])
_response = self.s3client.put_bucket_policy(Bucket=self.bucket, Policy=policy_str)
else:
_response = self.s3client.delete_bucket_policy(Bucket=self.bucket)
LOG.debug('Response adding bucket policy: %s', _response)
LOG.info('S3 Bucket Policy Attached')
def _put_bucket_website(self):
"""Configure static website on S3 bucket."""
if self.s3props['website']['enabled']:
website_config = {
'ErrorDocument': {
'Key': self.s3props['website']['error_document']
},
'IndexDocument': {
'Suffix': self.s3props['website']['index_suffix']
}
}
_response = self.s3client.put_bucket_website(Bucket=self.bucket, WebsiteConfiguration=website_config)
self._put_bucket_cors()
self._set_bucket_dns()
else:
_response = self.s3client.delete_bucket_website(Bucket=self.bucket)
self._put_bucket_cors()
LOG.debug('Response setting up S3 website: %s', _response)
LOG.info('S3 website settings updated')
def _set_bucket_dns(self):
"""Create CNAME for S3 endpoint."""
# Different regions have different s3 endpoint formats
dotformat_regions = ["eu-west-2", "eu-central-1", "ap-northeast-2", "ap-south-1", "ca-central-1", "us-east-2"]
if self.region in dotformat_regions:
s3_endpoint = "{0}.s3-website.{1}.amazonaws.com".format(self.bucket, self.region)
else:
s3_endpoint = "{0}.s3-website-{1}.amazonaws.com".format(self.bucket, self.region)
zone_ids = get_dns_zone_ids(env=self.env, facing="public")
dns_kwargs = {
'dns_name': self.bucket,
'dns_name_aws': s3_endpoint,
'dns_ttl': self.properties['dns']['ttl']
}
for zone_id in zone_ids:
LOG.debug('zone_id: %s', zone_id)
update_dns_zone_record(self.env, zone_id, **dns_kwargs)
LOG.info("Created DNS %s for Bucket", self.bucket)
def _put_bucket_cors(self):
"""Adds bucket cors configuration."""
if self.s3props['cors']['enabled'] and self.s3props['website']['enabled']:
cors_config = {}
cors_rules = []
for each_rule in self.s3props['cors']['cors_rules']:
cors_rules.append({
'AllowedHeaders': each_rule['cors_headers'],
'AllowedMethods': each_rule['cors_methods'],
'AllowedOrigins': each_rule['cors_origins'],
'ExposeHeaders': each_rule['cors_expose_headers'],
'MaxAgeSeconds': each_rule['cors_max_age']
})
cors_config = {
'CORSRules': cors_rules
}
LOG.debug(cors_config)
_response = self.s3client.put_bucket_cors(Bucket=self.bucket, CORSConfiguration=cors_config)
else:
_response = self.s3client.delete_bucket_cors(Bucket=self.bucket)
LOG.debug('Response setting up S3 CORS: %s', _response)
LOG.info('S3 CORS configuration updated')
def _put_bucket_encryption(self):
"""Adds bucket encryption configuration."""
if self.s3props['encryption']['enabled']:
encryption_config = {'Rules': [{}]}
encryption_config = {
'Rules': self.s3props['encryption']['encryption_rules']
}
LOG.debug(encryption_config)
_response = self.s3client.put_bucket_encryption(Bucket=self.bucket,
ServerSideEncryptionConfiguration=encryption_config)
else:
_response = self.s3client.delete_bucket_encryption(Bucket=self.bucket)
LOG.debug('Response setting up S3 encryption: %s', _response)
LOG.info('S3 encryption configuration updated')
def _put_bucket_lifecycle(self):
"""Adds bucket lifecycle configuration."""
status = 'deleted'
if self.s3props['lifecycle']['enabled']:
lifecycle_config = {
'Rules': self.s3props['lifecycle']['lifecycle_rules']
}
LOG.debug('Lifecycle Config: %s', lifecycle_config)
_response = self.s3client.put_bucket_lifecycle_configuration(Bucket=self.bucket,
LifecycleConfiguration=lifecycle_config)
status = 'applied'
else:
_response = self.s3client.delete_bucket_lifecycle(Bucket=self.bucket)
LOG.debug('Response setting up S3 lifecycle: %s', _response)
LOG.info('S3 lifecycle configuration %s', status)
def _put_bucket_logging(self):
"""Adds bucket logging policy to bucket for s3 access requests"""
logging_config = {}
if self.s3props['logging']['enabled']:
logging_config = {
'LoggingEnabled': {
'TargetBucket': self.s3props['logging']['logging_bucket'],
'TargetGrants': self.s3props['logging']['logging_grants'],
'TargetPrefix': self.s3props['logging']['logging_bucket_prefix']
}
}
_response = self.s3client.put_bucket_logging(Bucket=self.bucket, BucketLoggingStatus=logging_config)
LOG.debug('Response setting up S3 logging: %s', _response)
LOG.info('S3 logging configuration updated')
def _put_bucket_tagging(self):
"""Add bucket tags to bucket."""
all_tags = self.s3props['tagging']['tags']
all_tags.update({'app_group': self.group, 'app_name': self.app_name})
tag_set = generate_s3_tags.generated_tag_data(all_tags)
tagging_config = {'TagSet': tag_set}
self.s3client.put_bucket_tagging(Bucket=self.bucket, Tagging=tagging_config)
LOG.info("Adding tagging %s for Bucket", tag_set)
def _put_bucket_versioning(self):
"""Adds bucket versioning policy to bucket"""
status = 'Suspended'
if self.s3props['versioning']['enabled']:
status = 'Enabled'
versioning_config = {
'MFADelete': self.s3props['versioning']['mfa_delete'],
'Status': status
}
_response = self.s3client.put_bucket_versioning(Bucket=self.bucket, VersioningConfiguration=versioning_config)
LOG.debug('Response setting up S3 versioning: %s', _response)
LOG.info('S3 versioning configuration updated')
| 44.814516 | 118 | 0.61517 |
4a2645096ab98e8af315d9ed1e98a43542f1e15f | 6,377 | py | Python | plot_1D.py | cclauss/loss-landscape | 35a4c9bcabdefdcaf1e3d7266da878205bfca2a0 | [
"MIT"
] | 3 | 2020-12-17T19:17:20.000Z | 2021-08-17T12:00:18.000Z | plot_1D.py | cclauss/loss-landscape | 35a4c9bcabdefdcaf1e3d7266da878205bfca2a0 | [
"MIT"
] | null | null | null | plot_1D.py | cclauss/loss-landscape | 35a4c9bcabdefdcaf1e3d7266da878205bfca2a0 | [
"MIT"
] | null | null | null | """
1D plotting routines
"""
from matplotlib import pyplot as pp
import h5py
import argparse
import numpy as np
import os
def plot_1d_loss_err(surf_file, xmin=-1.0, xmax=1.0, loss_max=5, log=False, show=False):
print('------------------------------------------------------------------')
print('plot_1d_loss_err')
print('------------------------------------------------------------------')
f = h5py.File(surf_file,'r')
print(f.keys())
x = f['xcoordinates'][:]
assert 'train_loss' in f.keys(), "'train_loss' does not exist"
train_loss = f['train_loss'][:]
train_acc = f['train_acc'][:]
print("train_loss")
print(train_loss)
print("train_acc")
print(train_acc)
xmin = xmin if xmin != -1.0 else min(x)
xmax = xmax if xmax != 1.0 else max(x)
# loss and accuracy map
fig, ax1 = pp.subplots()
ax2 = ax1.twinx()
if log:
tr_loss, = ax1.semilogy(x, train_loss, 'b-', label='Training loss', linewidth=1)
else:
tr_loss, = ax1.plot(x, train_loss, 'b-', label='Training loss', linewidth=1)
tr_acc, = ax2.plot(x, train_acc, 'r-', label='Training accuracy', linewidth=1)
if 'test_loss' in f.keys():
test_loss = f['test_loss'][:]
test_acc = f['test_acc'][:]
if log:
te_loss, = ax1.semilogy(x, test_loss, 'b--', label='Test loss', linewidth=1)
else:
te_loss, = ax1.plot(x, test_loss, 'b--', label='Test loss', linewidth=1)
te_acc, = ax2.plot(x, test_acc, 'r--', label='Test accuracy', linewidth=1)
pp.xlim(xmin, xmax)
ax1.set_ylabel('Loss', color='b', fontsize='xx-large')
ax1.tick_params('y', colors='b', labelsize='x-large')
ax1.tick_params('x', labelsize='x-large')
ax1.set_ylim(0, loss_max)
ax2.set_ylabel('Accuracy', color='r', fontsize='xx-large')
ax2.tick_params('y', colors='r', labelsize='x-large')
ax2.set_ylim(0, 100)
pp.savefig(surf_file + '_1d_loss_acc' + ('_log' if log else '') + '.pdf',
dpi=300, bbox_inches='tight', format='pdf')
# train_loss curve
pp.figure()
if log:
pp.semilogy(x, train_loss)
else:
pp.plot(x, train_loss)
pp.ylabel('Training Loss', fontsize='xx-large')
pp.xlim(xmin, xmax)
pp.ylim(0, loss_max)
pp.savefig(surf_file + '_1d_train_loss' + ('_log' if log else '') + '.pdf',
dpi=300, bbox_inches='tight', format='pdf')
# train_err curve
pp.figure()
pp.plot(x, 100 - train_acc)
pp.xlim(xmin, xmax)
pp.ylim(0, 100)
pp.ylabel('Training Error', fontsize='xx-large')
pp.savefig(surf_file + '_1d_train_err.pdf', dpi=300, bbox_inches='tight', format='pdf')
if show: pp.show()
f.close()
def plot_1d_loss_err_repeat(prefix, idx_min=1, idx_max=10, xmin=-1.0, xmax=1.0,
loss_max=5, show=False):
"""
Plotting multiple 1D loss surface with different directions in one figure.
"""
fig, ax1 = pp.subplots()
ax2 = ax1.twinx()
for idx in range(idx_min, idx_max + 1):
# The file format should be prefix_{idx}.h5
f = h5py.File(prefix + '_' + str(idx) + '.h5','r')
x = f['xcoordinates'][:]
train_loss = f['train_loss'][:]
train_acc = f['train_acc'][:]
test_loss = f['test_loss'][:]
test_acc = f['test_acc'][:]
xmin = xmin if xmin != -1.0 else min(x)
xmax = xmax if xmax != 1.0 else max(x)
tr_loss, = ax1.plot(x, train_loss, 'b-', label='Training loss', linewidth=1)
te_loss, = ax1.plot(x, test_loss, 'b--', label='Testing loss', linewidth=1)
tr_acc, = ax2.plot(x, train_acc, 'r-', label='Training accuracy', linewidth=1)
te_acc, = ax2.plot(x, test_acc, 'r--', label='Testing accuracy', linewidth=1)
pp.xlim(xmin, xmax)
ax1.set_ylabel('Loss', color='b', fontsize='xx-large')
ax1.tick_params('y', colors='b', labelsize='x-large')
ax1.tick_params('x', labelsize='x-large')
ax1.set_ylim(0, loss_max)
ax2.set_ylabel('Accuracy', color='r', fontsize='xx-large')
ax2.tick_params('y', colors='r', labelsize='x-large')
ax2.set_ylim(0, 100)
pp.savefig(prefix + '_1d_loss_err_repeat.pdf', dpi=300, bbox_inches='tight', format='pdf')
if show: pp.show()
def plot_1d_eig_ratio(surf_file, xmin=-1.0, xmax=1.0, val_1='min_eig', val_2='max_eig', ymax=1, show=False):
print('------------------------------------------------------------------')
print('plot_1d_eig_ratio')
print('------------------------------------------------------------------')
f = h5py.File(surf_file,'r')
x = f['xcoordinates'][:]
Z1 = np.array(f[val_1][:])
Z2 = np.array(f[val_2][:])
abs_ratio = np.absolute(np.divide(Z1, Z2))
pp.plot(x, abs_ratio)
pp.xlim(xmin, xmax)
pp.ylim(0, ymax)
pp.savefig(surf_file + '_1d_eig_abs_ratio.pdf', dpi=300, bbox_inches='tight', format='pdf')
ratio = np.divide(Z1, Z2)
pp.plot(x, ratio)
pp.xlim(xmin, xmax)
pp.ylim(0, ymax)
pp.savefig(surf_file + '_1d_eig_ratio.pdf', dpi=300, bbox_inches='tight', format='pdf')
f.close()
if show: pp.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Plott 1D loss and error curves')
parser.add_argument('--surf_file', '-f', default='', help='The h5 file contains loss values')
parser.add_argument('--log', action='store_true', default=False, help='logarithm plot')
parser.add_argument('--xmin', default=-1, type=float, help='xmin value')
parser.add_argument('--xmax', default=1, type=float, help='xmax value')
parser.add_argument('--loss_max', default=5, type=float, help='ymax value')
parser.add_argument('--show', action='store_true', default=False, help='show plots')
parser.add_argument('--prefix', default='', help='The common prefix for surface files')
parser.add_argument('--idx_min', default=1, type=int, help='min index for the surface file')
parser.add_argument('--idx_max', default=10, type=int, help='max index for the surface file')
args = parser.parse_args()
if args.prefix:
plot_1d_loss_err_repeat(args.prefix, args.idx_min, args.idx_max,
args.xmin, args.xmax, args.loss_max, args.show)
else:
plot_1d_loss_err(args.surf_file, args.xmin, args.xmax, args.loss_max, args.log, args.show)
| 37.075581 | 108 | 0.591971 |
4a26460f1709eee1e790412938d9a269e95414b1 | 15,553 | py | Python | CryostatGUI/Keithley/Keithley6221_ControlClient.py | Cryostat-GUI/Cryostat-GUI | 9b538ecaef4f1c0758907b9ee32d79ffd6793867 | [
"MIT"
] | 2 | 2018-11-23T15:59:19.000Z | 2019-01-28T20:18:58.000Z | CryostatGUI/Keithley/Keithley6221_ControlClient.py | Cryostat-GUI/Cryostat-GUI | 9b538ecaef4f1c0758907b9ee32d79ffd6793867 | [
"MIT"
] | 54 | 2018-10-16T20:03:32.000Z | 2021-11-09T09:07:03.000Z | CryostatGUI/Keithley/Keithley6221_ControlClient.py | Cryostat-GUI/Cryostat-GUI | 9b538ecaef4f1c0758907b9ee32d79ffd6793867 | [
"MIT"
] | 9 | 2018-11-04T17:37:30.000Z | 2021-05-03T21:15:33.000Z | """Module containing a class to run a Keithley 6221 Current Source in a pyqt5 application
Classes:
Keithley6221_ControlClient: a class for interfacing with a Keithley 6221 Current Source
inherits from AbstractLoopThreadClient
"""
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# to be removed once this is packaged!
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtCore import QTimer
# from PyQt5 import QtWidgets
from copy import deepcopy
import time
from drivers import ApplicationExit
from util import ExceptionHandling
from util import AbstractLoopThreadClient
from util import Window_trayService_ui
from util import AbstractMainApp
from datetime import datetime
from pyvisa.errors import VisaIOError
import logging
# from Keithley.Keithley6221 import Keithley6221_ethernet
from pymeasure.instruments.keithley import Keithley6221
class Keithley6221_ControlClient(AbstractLoopThreadClient):
"""Updater class for a hardware device
For each device function there is a wrapping method,
which we can call by a signal/by zmq comms. This wrapper sends
the corresponding value to the device.
There is a second method for all wrappers, which accepts
the corresponding value, and stores it, so it can be sent upon acknowledgment
The information from the device is collected in regular intervals (method "running"),
and subsequently published on the data upstream. It is packed in a dict,
the keys of which are displayed in the "data" dict in this class.
"""
# exposable data dictionary
data = {}
def __init__(
self, mainthread=None, comLock=None, identity="", InstrumentAddress="", **kwargs
):
super().__init__(identity=identity, **kwargs)
self.interval = 0.15
self.__name__ = "Keithley6221_control " + InstrumentAddress
self._logger = logging.getLogger(
"CryoGUI." + __name__ + "." + self.__class__.__name__
)
# -------------------------------------------------------------------------------------------------------------------------
# Interface with hardware device
self.Keithley6221 = Keithley6221(
InstrumentAddress,
read_termination="\n",
)
# -------------------------------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------------------
# initial configurations for the hardware device
self.Current_A_storage = self.Keithley6221.source_current
self.OutputOn = self.getstatus() # 0 == OFF, 1 == ON
if self.OutputOn:
self.Current_A_value = self.Current_A_storage
else:
self.Current_A_value = 0
# if self.OutputOn:
# self.disable()
# -------------------------------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------------------
# GUI: passing GUI interactions to the corresponding slots
# Examples:
if mainthread is not None:
self.mainthread = mainthread
mainthread.hardware_IP.setText(InstrumentAddress)
mainthread.hardware_id.setText(identity)
mainthread.spinSetCurrent_mA.valueChanged.connect(
lambda value: self.gettoset_Current_A(value * 1e-3)
)
mainthread.spinSetCurrent_mA.editingFinished.connect(self.setCurrent_A)
mainthread.pushToggleOut.clicked.connect(self.toggleCurrent)
# -------------------------------------------------------------------------------------------------------------------------
# mainthread.spin_threadinterval.valueChanged.connect(
# lambda value: self.setInterval(value)
# )
# @control_checks
@ExceptionHandling
def running(self):
"""
Try to extract all current data from LakeShore350,
and emit signal, sending the data
"""
# print('run')
self.run_finished = False
# -------------------------------------------------------------------------------------------------------------------------
# data collection for to be exposed on the data upstream
# to be stored in self.data
# example:
self.OutputOn = self.getstatus()
self.data["OutputOn"] = self.OutputOn
self.Current_A_value = self.Keithley6221.source_current
self.data["Current_A"] = self.Current_A_value
# for error in self.Keithley6221.error_gen():
# if error[0] != "0":
# self._logger.error("code:%s, message:%s", error[0], error[1].strip('"'))
self.Keithley6221.check_errors() # pymeasure writing errors to pymeasure log
self.data["realtime"] = datetime.now()
# -------------------------------------------------------------------------------------------------------------------------
self.sig_Infodata.emit(deepcopy(self.data))
self.run_finished = True
# data is being sent by the zmqClient class automatically
def sending_upstream(self):
self.data = {}
self.data["OutputOn"] = self.OutputOn
self.data["Current_A"] = self.Current_A_storage
self.set_gauges()
self.sig_Infodata.emit(deepcopy(self.data))
super().send_data_upstream()
@ExceptionHandling
def act_on_command(self, command):
"""execute commands sent on downstream"""
# -------------------------------------------------------------------------------------------------------------------------
# commands, like for adjusting a set temperature on the device
# commands are received via zmq downstream, and executed here
# examples:
if "set_Current_A" in command:
self._logger.debug(
"setting the current to %.5f A", command["set_Current_A"]
)
self.setCurrent(command["set_Current_A"])
if "set_Output" in command:
if int(command["set_Output"]) == 1:
self._logger.debug("enabling current")
self.enable()
elif int(command["set_Output"]) == 0:
self._logger.debug("disabling current")
self.disable()
else:
self._logger.warning(
"output must be 0 or 1, I received '%s'", str(command["set_Output"])
)
self.sending_upstream()
# if 'setTemp_K' in command:
# self.setTemp_K(command['setTemp_K'])
# if 'configTempLimit' in command:
# self.configTempLimit(command['configTempLimit'])
# -------------------------------------------------------------------------------------------------------------------------
@ExceptionHandling
def query_on_command(self, command):
"""execute commands sent via tcp"""
answer_dict = {}
# -------------------------------------------------------------------------------------------------------------------------
# commands, like for adjusting a set temperature on the device
# commands are received via zmq tcp, and executed here
# examples:
# if 'configTempLimit' in command:
# self.configTempLimit(command['configTempLimit'])
try:
self.act_on_command(command)
answer_dict.update(
dict(
Current_A=self.Keithley6221.source_current,
OutputOn=self.getstatus(),
)
)
answer_dict["OK"] = True
finally:
return answer_dict
# -------------------------------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------------------
# hardware communication functions
# Examples:
def getCurrent_A(self):
"""return currently operated current value"""
return self.Current_A_value
@pyqtSlot()
@ExceptionHandling
def disable(self):
"""disable the output current"""
self.Keithley6221.source_enabled = False
self.Current_A_storage = self.Current_A_value
# for logging/application running:
self.Current_A_value = 0
# self.OutputOn = self.Keithley6221.source_enabled
self.OutputOn = False
@pyqtSlot()
@ExceptionHandling
def enable(self):
"""enable the output current"""
self.Keithley6221.source_enabled = True
# self.Current_A_value = self.Keithley6221.source_current
self.Current_A_value = self.Current_A_storage
# self.setCurrent_A()
# self.OutputOn = self.Keithley6221.source_enabled
self.OutputOn = True
@pyqtSlot()
@ExceptionHandling
def getstatus(self):
"""retrieve output current status"""
return int(self.Keithley6221.source_enabled)
@ExceptionHandling
def toggle_frontpanel(self, bools, text=None):
"""toggle frontpanel display text"""
self.Keithley6221.display_enabled = bools
@pyqtSlot()
@ExceptionHandling
def setCurrent_A(self):
"""set a previously stored value for the current"""
self.Keithley6221.source_current = self.Current_A_value
self.sending_upstream()
@pyqtSlot(float)
@ExceptionHandling
def setCurrent(self, current: float):
"""set a pass value for the current"""
if self.getstatus():
self.Current_A_value = current
self.Current_A_storage = current
self.Keithley6221.source_current = current
self.sending_upstream()
# @pyqtSlot()
# @ExceptionHandling
# def setSweep(self):
# """set a current sweep"""
# self.Keithley6221.SetupSweet(
# self.Start_Current_value, self.Step_Current_value, self.Stop_Current_value
# )
# @pyqtSlot()
# @ExceptionHandling
# def startSweep(self):
# """start a current sweep"""
# self.Keithley6221.StartSweep()
@pyqtSlot()
@ExceptionHandling
def toggleCurrent(self):
self.OutputOn = self.getstatus()
if self.OutputOn:
self.disable()
self.OutputOn = False
self.mainthread.pushToggleOut.setText("output is OFF")
else:
self.enable()
self.OutputOn = True
self.mainthread.pushToggleOut.setText("output is ON")
self.sending_upstream()
# -------------------------------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------------------
# GUI value acceptance functions
# Examples:
@pyqtSlot(float)
def gettoset_Current_A(self, value):
"""store a current value for later usage"""
self.Current_A_value = value
self.Current_A_storage = value
# @pyqtSlot(float)
# def gettoset_Start_Current(self, value):
# """store a start current for a sweep"""
# self.Start_Current_value = value
# @pyqtSlot(float)
# def gettoset_Step_Current(self, value):
# """store a step current for a sweep"""
# self.Step_Current_value = value
# @pyqtSlot(float)
# def gettoset_Stop_Current(self, value):
# """store a stop current for a sweep"""
# self.Stop_Current_value = value
class Keithley6221GUI(AbstractMainApp, Window_trayService_ui):
"""This is the LakeShore GUI Window"""
sig_arbitrary = pyqtSignal()
sig_assertion = pyqtSignal(str)
def __init__(
self, identity=None, InstrumentAddress=None, prometheus_port=None, **kwargs
):
self._identity = identity
self._InstrumentAddress = InstrumentAddress
self._prometheus_port = prometheus_port
super().__init__(**kwargs)
self._logger = logging.getLogger(
"CryoGUI." + __name__ + "." + self.__class__.__name__
)
self.__name__ = "Keithley6221_Window"
self.controls = [self.groupSettings]
QTimer.singleShot(0, self.run_Hardware)
@pyqtSlot()
def run_Hardware(self):
"""start/stop the LakeShore350 thread"""
try:
self.getInfodata = self.running_thread_control(
Keithley6221_ControlClient(
InstrumentAddress=self._InstrumentAddress,
mainthread=self,
identity=self._identity,
prometheus_port=self._prometheus_port,
prometheus_name=self._identity,
),
"Hardware",
)
self.getInfodata.sig_Infodata.connect(self.updateGUI)
except (VisaIOError, NameError) as e:
# self.show_error_general('running: {}'.format(e))
self._logger.exception(e)
raise ApplicationExit("Could not connect to Hardware!")
def closeEvent(self, event):
while not self.getInfodata.run_finished:
time.sleep(0.1)
with self.getInfodata.lock:
del self.getInfodata.Keithley6221
super().closeEvent(event)
@pyqtSlot(dict)
def updateGUI(self, data):
"""
Store Device data in self.data, update values in GUI
"""
self.data.update(data)
# data['date'] = convert_time(time.time())
# self.store_data(data=data, device='LakeShore350')
# with self.dataLock:
# this needs to draw from the self.data so that in case one of the keys did not show up,
# since the command failed in the communication with the device,
# the last value is retained
# -----------------------------------------------------------------------------------------------------------
# update the GUI
# Examples:
# self.progressHeaterOutput_percentage.setValue(
# self.data['Heater_Output_percentage'])
# self.lcdHeaterOutput_mW.display(
# self.data['Heater_Output_mW'])
# self.lcdSetTemp_K.display(
# self.data['Temp_K'])
# # self.lcdRampeRate_Status.display(self.data['RampRate_Status'])
# self.lcdSetRampRate_Kpmin.display(
# self.data['Ramp_Rate'])
# self.comboSetInput_Sensor.setCurrentIndex(
# int(self.data['Input_Sensor']) - 1)
# self.lcdSensor1_K.display(
# self.data['Sensor_1_K'])
# self.lcdSensor2_K.display(
# self.data['Sensor_2_K'])
# self.lcdSensor3_K.display(
# self.data['Sensor_3_K'])
# self.lcdSensor4_K.display(
# self.data['Sensor_4_K'])
# -----------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
print(
"please use the program 'start_Keithley6221.py' to start communicating with this device!"
)
| 38.120098 | 135 | 0.535009 |
4a26471cdff861e5213b0af1231d84b299d25ecc | 4,482 | py | Python | doc/sphinx/source/conf.py | JarryShaw/jspcap | 9d4c980598e3c5e0af864044976f91b9b96e2e3e | [
"BSD-3-Clause"
] | 3 | 2018-01-21T15:22:21.000Z | 2018-06-22T01:27:59.000Z | doc/sphinx/source/conf.py | JarryShaw/jspcap | 9d4c980598e3c5e0af864044976f91b9b96e2e3e | [
"BSD-3-Clause"
] | null | null | null | doc/sphinx/source/conf.py | JarryShaw/jspcap | 9d4c980598e3c5e0af864044976f91b9b96e2e3e | [
"BSD-3-Clause"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import datetime
# -- Project information -----------------------------------------------------
project = 'PyPCAPKit'
copyright = f'2018-{datetime.date.today().year}, Jarry Shaw' # pylint: disable=redefined-builtin
author = 'Jarry Shaw'
# The full version, including alpha/beta/rc tags
release = '0.15.5'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinx.ext.autodoc', 'sphinx.ext.autodoc.typehints',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'dictdumper': ('https://dictdumper.jarryshaw.me/en/latest/', None),
'chardet': ('https://chardet.readthedocs.io/en/latest/', None),
'dpkt': ('https://dpkt.readthedocs.io/en/latest/', None),
'scapy': ('https://scapy.readthedocs.io/en/latest/', None),
'requests': ('https://requests.readthedocs.io/en/latest/', None),
'bs4': ('https://www.crummy.com/software/BeautifulSoup/bs4/doc/', None),
}
autodoc_default_options = {
'members': True,
'member-order': 'groupwise',
'special-members': '__init__',
'undoc-members': True,
'exclude-members': '__weakref__, _abc_impl',
'ignore-module-all': True,
'private-members': True,
}
autodoc_typehints = 'description'
#autodoc_member_order = 'bysource'
#autodoc_member_order = 'alphabetic'
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = True
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = True
napoleon_use_admonition_for_notes = True
napoleon_use_admonition_for_references = True
napoleon_use_ivar = True
napoleon_use_param = True
napoleon_use_rtype = True
napoleon_use_keyword = True
napoleon_custom_sections = None
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'show_powered_by': False,
'github_user': 'JarryShaw',
'github_repo': 'pypcapkit',
'github_banner': True,
'github_type': 'star',
#'show_related': False,
#'note_bg': '#FFF59C',
#'travis_button': True,
#'codecov_button': True,
}
def maybe_skip_member(app, what: str, name: str, obj: object, skip: bool, options: dict): # pylint: disable=unused-argument
if '_abc_impl' in name:
return True
return skip
def remove_module_docstring(app, what: str, name: str, obj: object, options: dict, lines: list): # pylint: disable=unused-argument
if what == "module" and "pcapkit" in name:
lines.clear()
def setup(app):
app.connect("autodoc-process-docstring", remove_module_docstring)
app.connect('autodoc-skip-member', maybe_skip_member)
| 33.447761 | 131 | 0.692994 |
4a2647a927ddb3c65afb470e140ef45219d4f29d | 568 | py | Python | contrib/coinkit/coinkit/__init__.py | gwangjin2/gwangcoin-core | 588e357e13c385906729d9078b796dd740745445 | [
"MIT"
] | 2 | 2017-12-12T23:20:33.000Z | 2021-04-15T23:10:38.000Z | contrib/coinkit/coinkit/__init__.py | gwangjin2/gwangcoin-core | 588e357e13c385906729d9078b796dd740745445 | [
"MIT"
] | null | null | null | contrib/coinkit/coinkit/__init__.py | gwangjin2/gwangcoin-core | 588e357e13c385906729d9078b796dd740745445 | [
"MIT"
] | 3 | 2016-04-03T18:45:17.000Z | 2021-04-15T23:30:04.000Z | # -*- coding: utf-8 -*-
"""
Coinkit
~~~~~
:copyright: (c) 2014 by Halfmoon Labs
:license: MIT, see LICENSE for more details.
"""
__version__ = '0.4.2'
from .entropy import random_secret_exponent
from .passphrase import random_256bit_passphrase, random_160bit_passphrase
from .b58check import b58check_encode, b58check_decode, b58check_unpack, \
b58check_version_byte, is_b58check
from .utils import is_secret_exponent, is_256bit_hex_string, \
is_wif_pk, is_b58check_address, extract_pk_as_int
from .wallet import SDWallet
from .keypair import * | 29.894737 | 74 | 0.764085 |
4a264819af5d3d20cbe8e6b03c43852a5b07cafd | 599 | py | Python | googleapiclient/version.py | abhi-hash256/google-api-python-client | 9c24f44980a43b4d467532f0ac05fa6a04c53758 | [
"Apache-2.0"
] | 1 | 2022-03-21T16:06:04.000Z | 2022-03-21T16:06:04.000Z | googleapiclient/version.py | hermann-wenninger/google-api-python-client | 9c24f44980a43b4d467532f0ac05fa6a04c53758 | [
"Apache-2.0"
] | null | null | null | googleapiclient/version.py | hermann-wenninger/google-api-python-client | 9c24f44980a43b4d467532f0ac05fa6a04c53758 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.41.0"
| 37.4375 | 74 | 0.75626 |
4a2649322c9803f3d4a93af2f1c3a69f141e58de | 2,535 | py | Python | examples/ping.py | awesome-security/impacket | 240e760ab543f11c913d11b1bfca26be28ed1c91 | [
"Apache-1.1"
] | 106 | 2019-03-04T21:31:47.000Z | 2021-08-13T01:51:31.000Z | examples/ping.py | awesome-security/impacket | 240e760ab543f11c913d11b1bfca26be28ed1c91 | [
"Apache-1.1"
] | null | null | null | examples/ping.py | awesome-security/impacket | 240e760ab543f11c913d11b1bfca26be28ed1c91 | [
"Apache-1.1"
] | 21 | 2019-06-13T22:57:52.000Z | 2021-12-18T11:39:01.000Z | #!/usr/bin/env python
# SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Simple ICMP ping.
#
# This implementation of ping uses the ICMP echo and echo-reply packets
# to check the status of a host. If the remote host is up, it should reply
# to the echo probe with an echo-reply packet.
# Note that this isn't a definite test, as in the case the remote host is up
# but refuses to reply the probes.
# Also note that the user must have special access to be able to open a raw
# socket, which this program requires.
#
# Authors:
# Gerardo Richarte <[email protected]>
# Javier Kohen <[email protected]>
#
# Reference for:
# ImpactPacket: IP, ICMP, DATA.
# ImpactDecoder.
import select
import socket
import time
import sys
from impacket import ImpactDecoder, ImpactPacket
if len(sys.argv) < 3:
print("Use: %s <src ip> <dst ip>" % sys.argv[0])
sys.exit(1)
src = sys.argv[1]
dst = sys.argv[2]
# Create a new IP packet and set its source and destination addresses.
ip = ImpactPacket.IP()
ip.set_ip_src(src)
ip.set_ip_dst(dst)
# Create a new ICMP packet of type ECHO.
icmp = ImpactPacket.ICMP()
icmp.set_icmp_type(icmp.ICMP_ECHO)
# Include a 156-character long payload inside the ICMP packet.
icmp.contains(ImpactPacket.Data("A"*156))
# Have the IP packet contain the ICMP packet (along with its payload).
ip.contains(icmp)
# Open a raw socket. Special permissions are usually required.
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
s.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
seq_id = 0
while 1:
# Give the ICMP packet the next ID in the sequence.
seq_id += 1
icmp.set_icmp_id(seq_id)
# Calculate its checksum.
icmp.set_icmp_cksum(0)
icmp.auto_checksum = 1
# Send it to the target host.
s.sendto(ip.get_packet(), (dst, 0))
# Wait for incoming replies.
if s in select.select([s],[],[],1)[0]:
reply = s.recvfrom(2000)[0]
# Use ImpactDecoder to reconstruct the packet hierarchy.
rip = ImpactDecoder.IPDecoder().decode(reply)
# Extract the ICMP packet from its container (the IP packet).
ricmp = rip.child()
# If the packet matches, report it to the user.
if rip.get_ip_dst() == src and rip.get_ip_src() == dst and icmp.ICMP_ECHOREPLY == ricmp.get_icmp_type():
print("Ping reply for sequence #%d" % ricmp.get_icmp_id())
time.sleep(1)
| 28.806818 | 108 | 0.729783 |
4a26493b80cd43970fd57f6d34cb57328a1c4da6 | 888 | py | Python | test/test_patch_apply.py | vishwas1234567/tensorflow-determinism | ed01f5f34951aa113fb5907570ba6df98ebf9d8a | [
"Apache-2.0"
] | null | null | null | test/test_patch_apply.py | vishwas1234567/tensorflow-determinism | ed01f5f34951aa113fb5907570ba6df98ebf9d8a | [
"Apache-2.0"
] | null | null | null | test/test_patch_apply.py | vishwas1234567/tensorflow-determinism | ed01f5f34951aa113fb5907570ba6df98ebf9d8a | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 NVIDIA Corporation. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import tensorflow as tf
sys.path.append('..')
from tfdeterminism import patch
patch()
| 32.888889 | 74 | 0.71509 |
4a26495153891cc7fe22bfc89542a395b4db312b | 1,728 | py | Python | pidal/lib/snowflake.py | pi-plan/pidal | bfd1b9c4de87bc92565acbcff108270265757e39 | [
"BSD-3-Clause"
] | 6 | 2021-02-05T04:21:00.000Z | 2021-11-29T06:46:21.000Z | pidal/lib/snowflake.py | pi-plan/pidal | bfd1b9c4de87bc92565acbcff108270265757e39 | [
"BSD-3-Clause"
] | 1 | 2021-11-30T06:08:53.000Z | 2021-11-30T06:08:53.000Z | pidal/lib/snowflake.py | pi-plan/pidal | bfd1b9c4de87bc92565acbcff108270265757e39 | [
"BSD-3-Clause"
] | null | null | null | import time
import logging
from typing import Generator
log = logging.getLogger(__name__)
twepoch = 1609430400000
worker_id_bits = 5
data_center_id_bits = 5
max_worker_id = -1 ^ (-1 << worker_id_bits)
max_data_center_id = -1 ^ (-1 << data_center_id_bits)
sequence_bits = 12
worker_id_shift = sequence_bits
data_center_id_shift = sequence_bits + worker_id_bits
timestamp_left_shift = sequence_bits + worker_id_bits + data_center_id_bits
sequence_mask = -1 ^ (-1 << sequence_bits)
def snowflake_to_timestamp(_id):
_id = _id >> 22
_id += twepoch
_id = _id / 1000
return _id
def snowflake(worker_id, data_center_id, sleep=lambda x: time.sleep(x/1000.0))\
-> Generator[int, None, None]:
assert worker_id >= 0 and worker_id <= max_worker_id
assert data_center_id >= 0 and data_center_id <= max_data_center_id
last_timestamp = -1
sequence = 0
while True:
timestamp = int(time.time()*1000)
if last_timestamp > timestamp:
log.warning(
"clock is moving backwards. waiting until %i" % last_timestamp)
sleep(last_timestamp-timestamp)
continue
if last_timestamp == timestamp:
sequence = (sequence + 1) & sequence_mask
if sequence == 0:
log.warning("sequence overrun")
sequence = -1 & sequence_mask
sleep(1)
continue
else:
sequence = 0
last_timestamp = timestamp
yield (
((timestamp-twepoch) << timestamp_left_shift) |
(data_center_id << data_center_id_shift) |
(worker_id << worker_id_shift) |
sequence)
generator = snowflake(1, 1)
| 26.181818 | 79 | 0.632523 |
4a264a2a68426f09fc7e7cac5dc0cf2dc55bdfa6 | 437 | py | Python | AP1/calculadora.py | GiovannaPazello/Projetos-em-Python | 3cf7edbdf2a2350605a775389f7fe2cc7fe8032e | [
"MIT"
] | null | null | null | AP1/calculadora.py | GiovannaPazello/Projetos-em-Python | 3cf7edbdf2a2350605a775389f7fe2cc7fe8032e | [
"MIT"
] | null | null | null | AP1/calculadora.py | GiovannaPazello/Projetos-em-Python | 3cf7edbdf2a2350605a775389f7fe2cc7fe8032e | [
"MIT"
] | null | null | null | print('Calculadora')
nro1 = int(input("Digite o primeiro número: "))
nro2 = int(input("Digite o segundo número: "))
print("Menu")
print("1 - somar")
print("2 - subtrair")
print("3 - multiplicar")
print("4 - divisao")
operacao = int(input("Digite a opcao que deseja: "))
if operacao == 1:
soma = nro1 + nro2
print("resultado: " + str(soma))
if operacao == 2:
subtracao = nro1 - nro2
print("resultado: " + str(subtracao)) | 25.705882 | 52 | 0.640732 |
4a264af03807760c35bfb1e63787ac28bcc9f51f | 4,556 | py | Python | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2018_02_01_preview/models/_paged_models.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2018_02_01_preview/models/_paged_models.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2018_02_01_preview/models/_paged_models.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class RegistryPaged(Paged):
"""
A paging container for iterating over a list of :class:`Registry <azure.mgmt.containerregistry.v2018_02_01_preview.models.Registry>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Registry]'}
}
def __init__(self, *args, **kwargs):
super(RegistryPaged, self).__init__(*args, **kwargs)
class OperationDefinitionPaged(Paged):
"""
A paging container for iterating over a list of :class:`OperationDefinition <azure.mgmt.containerregistry.v2018_02_01_preview.models.OperationDefinition>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[OperationDefinition]'}
}
def __init__(self, *args, **kwargs):
super(OperationDefinitionPaged, self).__init__(*args, **kwargs)
class ReplicationPaged(Paged):
"""
A paging container for iterating over a list of :class:`Replication <azure.mgmt.containerregistry.v2018_02_01_preview.models.Replication>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Replication]'}
}
def __init__(self, *args, **kwargs):
super(ReplicationPaged, self).__init__(*args, **kwargs)
class WebhookPaged(Paged):
"""
A paging container for iterating over a list of :class:`Webhook <azure.mgmt.containerregistry.v2018_02_01_preview.models.Webhook>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Webhook]'}
}
def __init__(self, *args, **kwargs):
super(WebhookPaged, self).__init__(*args, **kwargs)
class EventPaged(Paged):
"""
A paging container for iterating over a list of :class:`Event <azure.mgmt.containerregistry.v2018_02_01_preview.models.Event>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Event]'}
}
def __init__(self, *args, **kwargs):
super(EventPaged, self).__init__(*args, **kwargs)
class BuildPaged(Paged):
"""
A paging container for iterating over a list of :class:`Build <azure.mgmt.containerregistry.v2018_02_01_preview.models.Build>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Build]'}
}
def __init__(self, *args, **kwargs):
super(BuildPaged, self).__init__(*args, **kwargs)
class BuildStepPaged(Paged):
"""
A paging container for iterating over a list of :class:`BuildStep <azure.mgmt.containerregistry.v2018_02_01_preview.models.BuildStep>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[BuildStep]'}
}
def __init__(self, *args, **kwargs):
super(BuildStepPaged, self).__init__(*args, **kwargs)
class BuildArgumentPaged(Paged):
"""
A paging container for iterating over a list of :class:`BuildArgument <azure.mgmt.containerregistry.v2018_02_01_preview.models.BuildArgument>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[BuildArgument]'}
}
def __init__(self, *args, **kwargs):
super(BuildArgumentPaged, self).__init__(*args, **kwargs)
class BuildTaskPaged(Paged):
"""
A paging container for iterating over a list of :class:`BuildTask <azure.mgmt.containerregistry.v2018_02_01_preview.models.BuildTask>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[BuildTask]'}
}
def __init__(self, *args, **kwargs):
super(BuildTaskPaged, self).__init__(*args, **kwargs)
| 34.515152 | 165 | 0.624451 |
4a264b41d715f0e83f4446389d82e552b97bc536 | 2,084 | py | Python | src/games/mcts/MctsNode.py | flegac/power4 | f178dd225f4b859d4292b134b6651bf119a45c30 | [
"Apache-2.0"
] | null | null | null | src/games/mcts/MctsNode.py | flegac/power4 | f178dd225f4b859d4292b134b6651bf119a45c30 | [
"Apache-2.0"
] | null | null | null | src/games/mcts/MctsNode.py | flegac/power4 | f178dd225f4b859d4292b134b6651bf119a45c30 | [
"Apache-2.0"
] | null | null | null | from math import sqrt, log2
from src.games.state.State import State
class MctsNode:
def __init__(self, state: State, parent=None) -> None:
self.parent = parent
self.depth = parent.depth + 1 if parent else 0
self.state = state
self.score = 0
self.games = 0
self._children = None
def best_action(self):
side_factor = 1 if self.state.board.current_turn % 2 == 0 else -1
children = self.children()
def action_value(action):
return side_factor * children[action].exploitation_score()
return max(children.keys(), key=action_value)
def children(self):
if self._children is None:
next_states = {a: self.state.next(a) for a in self.state.actions()}
self._children = {a: MctsNode(state=next_states[a], parent=self) for a in next_states}
return self._children
def side_exploitation_score(self):
side = -1 if self.state.board.current_turn % 2 == 0 else 1
return side * self.exploitation_score()
def exploitation_score(self):
# terminal states scores are certain (so score is better)
if self.state.is_terminal:
return self.state.terminal_result
# unknown states are like a draw
if self.games == 0:
return 0
# exploitation score computing
return self.score / self.games
def exploration_score(self):
return sqrt(log2(self.parent.games) / max(self.games, 1)) if self.parent else 0
def __repr__(self) -> str:
parent_games = self.parent.games if self.parent else 0
return '[depth={}, score={}, exploration={} ({}/{})]'.format(self.depth,
int(100 * self.exploitation_score()),
int(100 * self.exploration_score()),
self.games,
parent_games)
| 36.561404 | 106 | 0.551344 |
4a264ca58a0b075a92fef7b1bb2b1863de5f730a | 1,526 | py | Python | predict.py | ArasAzimi/ai2r | 060a95f2091b3c93c17cce4c65da46e0aa792849 | [
"MIT"
] | 5 | 2018-12-18T02:14:38.000Z | 2021-09-09T01:19:22.000Z | predict.py | ArasAzimi/ai2r | 060a95f2091b3c93c17cce4c65da46e0aa792849 | [
"MIT"
] | 1 | 2019-01-14T01:57:21.000Z | 2019-01-14T01:57:21.000Z | predict.py | ArasAzimi/ai2r | 060a95f2091b3c93c17cce4c65da46e0aa792849 | [
"MIT"
] | null | null | null | from src.predict import Predict
def main():
import cv2
import argparse
import os
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to input image")
args = vars(ap.parse_args())
image = args["image"]
out_dir = 'out/aircrafts'
# check if any model exists?
# Check if model and label files exists
print("-- Available trained models --")
# List the sub-dicrectories in out directory:
files = os.listdir(out_dir)
for name, index in enumerate(files):
print(str(name)+": "+index)
user_model_choice = input("Choose a model for prediction (i.e., 0) > ")
type(user_model_choice)
model = files[int(user_model_choice)].split('_e')[0]
model_file = out_dir+'/'+files[int(user_model_choice)]+'/aircrafts.model'
label_file = out_dir+'/'+files[int(user_model_choice)]+'/aircrafts_lbls.pickle'
config_file = 'config.json'
obj = Predict(image, model, model_file, label_file, config_file=config_file)
res = obj.prediction()
label = res['label']
percent = res['percent']
(B, G, R) = (0,0,0)
if percent<85: # print in Red
(B, G, R) = (0,0,255)
elif percent>95: # print in Green
(B, G, R) = (0,255,0)
else: # print in Yellow
(B, G, R) =(0,255,255)
legend = "Detected: {}".format(label)
image_original = cv2.imread(image)
cv2.putText(image_original, legend, (20, 40), cv2.FONT_HERSHEY_DUPLEX, 0.7, (B, G, R), 2)
cv2.imshow("Image", image_original)
cv2.waitKey(0)
if __name__ == "__main__":
main()
| 28.259259 | 90 | 0.686763 |
4a264cc6ff1c1126add8710c99179801f212c6cf | 15,576 | py | Python | src/sparseml/pytorch/optim/modifier_quantization.py | natuan/sparseml | 299fbf87ab31a642f363dde4d403acdc38d96bc4 | [
"Apache-2.0"
] | 1 | 2021-06-03T09:16:09.000Z | 2021-06-03T09:16:09.000Z | src/sparseml/pytorch/optim/modifier_quantization.py | natuan/sparseml | 299fbf87ab31a642f363dde4d403acdc38d96bc4 | [
"Apache-2.0"
] | null | null | null | src/sparseml/pytorch/optim/modifier_quantization.py | natuan/sparseml | 299fbf87ab31a642f363dde4d403acdc38d96bc4 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Modifier for models through quantization aware training.
PyTorch version must support quantization (>=1.2, ONNX export support introduced in 1.7)
"""
from typing import Any, Dict, List, Optional, Union
from torch.nn import Module
from torch.optim.optimizer import Optimizer
try:
from torch import quantization as torch_quantization
from torch.nn import intrinsic as torch_intrinsic
except Exception:
torch_quantization = None
torch_intrinsic = None
from sparseml.optim import ModifierProp
from sparseml.pytorch.optim.modifier import PyTorchModifierYAML, ScheduledModifier
from sparseml.pytorch.utils import BaseLogger
from sparseml.pytorch.utils.quantization import (
add_quant_dequant,
fuse_module_conv_bn_relus,
get_qat_qconfig,
)
__all__ = [
"QuantizationModifier",
]
@PyTorchModifierYAML()
class QuantizationModifier(ScheduledModifier):
"""
Enables quantization aware training (QAT) for a given module or its submodules
After the start epoch, the specified module(s)' forward pass will emulate
quantized execution and the modifier will be enabled until training is completed.
| Sample yaml:
| !QuantizationModifier
| start_epoch: 0.0
| submodules: ['blocks.0', 'blocks.2']
| model_fuse_fn_name: 'fuse_module'
| disable_quantization_observer_epoch: 2.0
| freeze_bn_stats_epoch: 3.0
:param start_epoch: The epoch to start the modifier at
:param submodules: List of submodule names to perform QAT on. Leave None to quantize
entire model. Default is None
:param model_fuse_fn_name: Name of model function to fuse the model in place prior
to performing QAT. Set as 'no_fuse' to skip module fusing. Leave None to use
the default function `sparseml.pytorch.utils.fuse_module_conv_bn_relus`.
Default is None
:param disable_quantization_observer_epoch: Epoch to disable updates to the module's
quantization observers. After this point, quantized weights and zero points will
not be updated. Leave None to not disable observers during QAT. Default is None
:param freeze_bn_stats_epoch: Epoch to stop the tracking of batch norm stats. Leave
None to not stop tracking batch norm stats during QAT. Default is None
:param end_epoch: Disabled, setting to anything other than -1 will raise an
exception. For compatibility with YAML serialization only.
:param model_fuse_fn_kwargs: dictionary of keyword argument values to be passed
to the model fusing function
"""
def __init__(
self,
start_epoch: float = -1.0,
submodules: Union[List[str], None] = None,
model_fuse_fn_name: Union[str, None] = None,
disable_quantization_observer_epoch: Union[float, None] = None,
freeze_bn_stats_epoch: Union[float, None] = None,
end_epoch: float = -1,
model_fuse_fn_kwargs: Dict[str, Any] = None,
):
if torch_quantization is None or torch_intrinsic is None:
raise RuntimeError(
"Unable to import package torch.quantization and/or "
"torch.nn.intrinsic. "
"Try upgrading your PyTorch version to use the QuantizationModifier."
)
if end_epoch != -1:
raise ValueError(
"end_epoch is disabled for QuantizationModifier and can only be set to"
" -1. Given {}".format(end_epoch)
)
super().__init__(start_epoch=start_epoch, end_epoch=-1.0, end_comparator=-1)
self._start_epoch = start_epoch
self._submodules = submodules
self._model_fuse_fn_name = model_fuse_fn_name
self._model_fuse_fn_kwargs = model_fuse_fn_kwargs or {}
self._disable_quantization_observer_epoch = disable_quantization_observer_epoch
self._freeze_bn_stats_epoch = freeze_bn_stats_epoch
self._modules_to_quantize = None
self._qat_enabled = False
self._quantization_observer_disabled = False
self._bn_stats_frozen = False
if (
isinstance(self._model_fuse_fn_name, str)
and self._model_fuse_fn_name.lower() == "none"
):
self._model_fuse_fn_name = None
if isinstance(self._submodules, list):
self._submodules = set(self._submodules)
self._validate_params()
@ModifierProp()
def submodules(self) -> Union[List[str], None]:
"""
:return: List of submodule names to perform QAT on. None quantizes the entire
model
"""
return list(self._submodules) if self._submodules is not None else None
@submodules.setter
def submodules(self, value: Union[List[str], None]):
"""
:params value: List of submodule names to perform QAT on. Set None to quantize
entire model
"""
self._submodules = value
if isinstance(self._submodules, list):
self._submodules = set(self._submodules)
self._validate_params()
@ModifierProp()
def model_fuse_fn_name(self) -> Union[str, None]:
"""
:return: Name of model function to fuse the model in place prior
to performing QAT. None to uses the default function
`sparseml.pytorch.utils.fuse_module_conv_bn_relus`.
"""
return self._model_fuse_fn_name
@model_fuse_fn_name.setter
def model_fuse_fn_name(self, value: Union[str, None]):
"""
:params value: Name of model function to fuse the model in place prior
to performing QAT. Set None to use the default function
`sparseml.pytorch.utils.fuse_module_conv_bn_relus`. Set as 'no_fuse'
to skip module fusing.
"""
self._model_fuse_fn_name = value
if (
isinstance(self._model_fuse_fn_name, str)
and self._model_fuse_fn_name.lower() == "none"
):
self._model_fuse_fn_name = None
self._validate_params()
@ModifierProp()
def disable_quantization_observer_epoch(self) -> Union[float, None]:
"""
:return: Epoch to disable updates to the module's
quantization observers. After this point, quantized weights and zero points will
not be updated. When None, observers never disabled during QAT
"""
return self._disable_quantization_observer_epoch
@disable_quantization_observer_epoch.setter
def disable_quantization_observer_epoch(self, value: Union[float, None]):
"""
:params value: Epoch to disable updates to the module's
quantization observers. After this point, quantized weights and zero points will
not be updated. Set None to not disable observers during QAT
"""
self._disable_quantization_observer_epoch = value
self._validate_params()
@ModifierProp()
def freeze_bn_stats_epoch(self) -> Union[float, None]:
"""
:return: Epoch to stop the tracking of batch norm stats. When
None, batch norm stats are track for all of training
"""
return self._freeze_bn_stats_epoch
@freeze_bn_stats_epoch.setter
def freeze_bn_stats_epoch(self, value: Union[float, None]):
"""
:params value: Epoch to stop the tracking of batch norm stats. Set
None to not stop tracking batch norm stats during QAT
"""
self._freeze_bn_stats_epoch = value
self._validate_params()
def initialize(
self,
module: Module,
epoch: float = 0,
loggers: Optional[List[BaseLogger]] = None,
**kwargs,
):
"""
Grab the module / submodule to perform QAT on
:param module: the PyTorch model/module to modify
:param epoch: The epoch to initialize the modifier and module at.
Defaults to 0 (start of the training process)
:param loggers: Optional list of loggers to log the modification process to
:param kwargs: Optional kwargs to support specific arguments
for individual modifiers.
"""
super().initialize(module, epoch, loggers, **kwargs)
self._modules_to_quantize = []
if self._submodules is not None:
found_submodules = []
for name, submodule in module.named_modules():
if name in self._submodules:
self._modules_to_quantize.append(submodule)
found_submodules.append(name)
if not len(found_submodules) == len(self._submodules):
raise RuntimeError(
"Could not find all provided submodules to quantize"
"given: {}, found: {}".format(
list(self._submodules), found_submodules
)
)
else:
self._modules_to_quantize.append(module)
self._check_quantization_update(module, epoch, steps_per_epoch=0)
def finalize(
self, module: Optional[Module] = None, reset_loggers: bool = True, **kwargs
):
"""
Cleans up any state
:param module: The model/module to finalize the modifier for.
Marked optional so state can still be cleaned up on delete,
but generally should always be passed in.
:param reset_loggers: True to remove any currently attached loggers (default),
False to keep the loggers attached.
:param kwargs: Optional kwargs to support specific arguments
for individual modifiers.
"""
super().finalize(module, reset_loggers, **kwargs)
self._modules_to_quantize = None
def update(
self, module: Module, optimizer: Optimizer, epoch: float, steps_per_epoch: int
):
"""
If start_pending(), fuses the model, sets the model quantization config,
calls torch.quantization.prepare_qat on the model to begin QAT
If end_pending(), updates the modules layers params to their original
trainable state.
:param module: module to modify
:param optimizer: optimizer to modify
:param epoch: current epoch and progress within the current epoch
:param steps_per_epoch: number of steps taken within each epoch
(calculate batch number using this and epoch)
"""
super().update(module, optimizer, epoch, steps_per_epoch)
self._check_quantization_update(module, epoch, steps_per_epoch)
def update_ready(self, epoch: float, steps_per_epoch: int) -> bool:
"""
:param epoch: current epoch and progress within the current epoch
:param steps_per_epoch: number of steps taken within each epoch
(calculate batch number using this and epoch)
:return: True if the modifier is pending an update and update() should be called
"""
if not self._initialized:
raise RuntimeError("modifier must be initialized first")
if not self._enabled:
return False
pending = (
self.start_pending(epoch, steps_per_epoch)
or self._disable_quantization_observer_update_ready(epoch)
or self._freeze_bn_stats_update_ready(epoch)
)
return pending
def _check_quantization_update(
self, module: Module, epoch: float, steps_per_epoch: int
):
if self.start_pending(epoch, steps_per_epoch) and not self._qat_enabled:
self._enable_module_qat(module)
if self._disable_quantization_observer_update_ready(epoch):
for quant_module in self._modules_to_quantize:
quant_module.apply(torch_quantization.disable_observer)
self._quantization_observer_disabled = True
if self._freeze_bn_stats_update_ready(epoch):
for quant_module in self._modules_to_quantize:
quant_module.apply(torch_intrinsic.qat.freeze_bn_stats)
self._bn_stats_frozen = True
def _enable_module_qat(self, module: Module):
# fuse module Conv-BNs
if (
self._model_fuse_fn_name is not None
and self._model_fuse_fn_name != "no_fuse"
): # module class fn
module_fuse_fn = getattr(module, self._model_fuse_fn_name, None)
if module_fuse_fn is None or not callable(module_fuse_fn):
raise ValueError(
"Invalid model_fuse_fn_name. "
"Module has no callable function {}".format(
self._model_fuse_fn_name
)
)
module_fuse_fn(**self._model_fuse_fn_kwargs)
elif self._model_fuse_fn_name is None: # default auto fn
self._model_fuse_fn_kwargs["inplace"] = True
fuse_module_conv_bn_relus(module, **self._model_fuse_fn_kwargs)
# prepare each module / submodule for quantization
qconfig = get_qat_qconfig()
for quant_module in self._modules_to_quantize:
# set quantization config (asymmetric activations, symmetric weights)
quant_module.qconfig = qconfig
# wrap all conv / linear blocks in with quantization observers
torch_quantization.propagate_qconfig_(quant_module)
add_quant_dequant(quant_module)
# set model to QAT mode
torch_quantization.prepare_qat(quant_module, inplace=True)
self._qat_enabled = True
def _disable_quantization_observer_update_ready(self, epoch: float) -> bool:
return (
self._disable_quantization_observer_epoch is not None
and epoch >= self._disable_quantization_observer_epoch
and not self._quantization_observer_disabled
)
def _freeze_bn_stats_update_ready(self, epoch: float) -> bool:
return (
self._freeze_bn_stats_epoch is not None
and epoch >= self._freeze_bn_stats_epoch
and not self._bn_stats_frozen
)
def _validate_params(self):
if (
self._disable_quantization_observer_epoch is not None
and self._disable_quantization_observer_epoch < self._start_epoch
):
raise ValueError(
f"disable_quantization_observer_epoch may not be greater than "
f"start_epoch for QuantizationModifier, received: "
f"{self._disable_quantization_observer_epoch} with start_epoch "
f"{self._start_epoch}"
)
if (
self._freeze_bn_stats_epoch is not None
and self._freeze_bn_stats_epoch < self._start_epoch
):
raise ValueError(
"freeze_bn_stats_epoch may not be greater than start_epoch"
" for QuantizationModifier, received: {} with start_epoch {}".format(
self._freeze_bn_stats_epoch, self._start_epoch
)
)
| 40.352332 | 88 | 0.657935 |
4a264d7376edb50f9a137afdcee090824e6397da | 2,496 | py | Python | moocng/decorators.py | OpenMOOC/moocng | 1e3dafb84aa1838c881df0c9bcca069e47c7f52d | [
"Apache-2.0"
] | 36 | 2015-01-10T06:00:36.000Z | 2020-03-19T10:06:59.000Z | moocng/decorators.py | OpenMOOC/moocng | 1e3dafb84aa1838c881df0c9bcca069e47c7f52d | [
"Apache-2.0"
] | 3 | 2015-10-01T17:59:32.000Z | 2018-09-04T03:32:17.000Z | moocng/decorators.py | OpenMOOC/moocng | 1e3dafb84aa1838c881df0c9bcca069e47c7f52d | [
"Apache-2.0"
] | 17 | 2015-01-13T03:46:58.000Z | 2020-07-05T06:29:51.000Z | # -*- coding: utf-8 -*-
# Copyright 2012-2013 UNED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urlparse
from functools import wraps
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.views import redirect_to_login
from django.core.exceptions import PermissionDenied
from django.utils.decorators import available_attrs
def user_passes_test(test_func, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
"""Saner version of django.contrib.auth.decorators.user_passes_test.
The problem with Django's version of this function is that if the user
does not pass the test it get redirected to the LOGIN_URL, even if
he is already logged in. In this later case, he should get a
Forbidden (403) error instead.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if test_func(request.user):
return view_func(request, *args, **kwargs)
# This is different from the Django version
# if the user is already logged in, we return a 403 error response
if request.user.is_authenticated():
raise PermissionDenied
path = request.build_absolute_uri()
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse.urlparse(login_url or
settings.LOGIN_URL)[:2]
current_scheme, current_netloc = urlparse.urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
return redirect_to_login(path, login_url, redirect_field_name)
return _wrapped_view
return decorator
| 43.034483 | 89 | 0.692708 |
4a264fb00faf3418a333ea50aa3e6ba3019b7a1b | 2,602 | py | Python | src/dynamic-route-agent/dra/introspect_service.py | khappucino/global-traffic-management | 81a80ae86c9119066c92df0727554cb5b61f9899 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/dynamic-route-agent/dra/introspect_service.py | khappucino/global-traffic-management | 81a80ae86c9119066c92df0727554cb5b61f9899 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/dynamic-route-agent/dra/introspect_service.py | khappucino/global-traffic-management | 81a80ae86c9119066c92df0727554cb5b61f9899 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (C) 2018 Nordstrom, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import requests
from .constants import MESSAGE
AWS_METADATA_URI = 'http://169.254.169.254/latest/dynamic/instance-identity/document'
ACCOUNT_ID_KEY = 'accountId'
class Introspect_Service:
"""
This class is used to fetch the account_id that the this instance is running in
"""
def __init__(self, requests_service, logger, call_timeout):
self.requests_service = requests_service
self.logger = logger
self.call_timeout = call_timeout
def get_account_id(self):
result = None
try:
req = self.requests_service.get(AWS_METADATA_URI, timeout=self.call_timeout)
if req.status_code == 200:
json_payload = req.json()
if ACCOUNT_ID_KEY in json_payload:
result = json_payload[ACCOUNT_ID_KEY]
else:
if self.logger is not None:
log_val = 'there was a Request Non-200 issue with the introspecting accountId'
self.logger.info({MESSAGE:log_val})
except self.requests_service.exceptions.RequestException as e:
if self.logger is not None:
log_val = 'there was a RequestException issue with the introspecting accountId'
self.logger.info({MESSAGE:log_val})
except ValueError:
if self.logger is not None:
log_val = 'there was a ValueError issue with the introspecting accountId'
self.logger.info({MESSAGE:log_val})
except:
if self.logger is not None:
e = sys.exc_info()[0]
log_val = 'there was a Generic Excedption %s issue with the introspecting accountId' % (e)
self.logger.info({MESSAGE:log_val})
finally:
return result
#------------------------------
# Built in helpful test drive
#------------------------------
if __name__ == "__main__":
subject = Introspect_Service(requests, None, 5)
print(subject.get_account_id())
| 39.424242 | 106 | 0.642198 |
4a26507f0088ee524235e93e4e971112fb3d2911 | 600 | py | Python | api/filters.py | amirh-moshfeghi/kudo_cards | 65167e1d09051bc6172e07b01c40ea2765b5a277 | [
"Linux-OpenIB"
] | null | null | null | api/filters.py | amirh-moshfeghi/kudo_cards | 65167e1d09051bc6172e07b01c40ea2765b5a277 | [
"Linux-OpenIB"
] | null | null | null | api/filters.py | amirh-moshfeghi/kudo_cards | 65167e1d09051bc6172e07b01c40ea2765b5a277 | [
"Linux-OpenIB"
] | null | null | null | from django_filters import rest_framework as filters
from .models import Kudo
# We create filters for each field we want to be able to filter on
class KudoFilter(filters.FilterSet):
title = filters.CharFilter(lookup_expr='icontains')
year = filters.NumberFilter()
year__gt = filters.NumberFilter(field_name='year', lookup_expr='gt')
year__lt = filters.NumberFilter(field_name='year', lookup_expr='lt')
creator__email = filters.CharFilter(lookup_expr='icontains')
class Meta:
model = Kudo
fields = ['title', 'year', 'year__gt', 'year__lt', 'creator__email']
| 37.5 | 76 | 0.726667 |
4a26515272d21dfbb96b944f42a77d9f57f5b8cf | 1,790 | py | Python | var/spack/repos/builtin/packages/libspatialite/package.py | klevzoff/spack | 396936d24173254ecf4148bc460702185e4c99e5 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-10T13:47:48.000Z | 2019-04-17T13:05:17.000Z | var/spack/repos/builtin/packages/libspatialite/package.py | klevzoff/spack | 396936d24173254ecf4148bc460702185e4c99e5 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 17 | 2019-03-21T15:54:00.000Z | 2022-03-29T19:34:28.000Z | var/spack/repos/builtin/packages/libspatialite/package.py | Kerilk/spack | e027942b55407a4a5fe323b93d8e57200c873a43 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2018-04-06T09:04:11.000Z | 2020-01-24T12:52:12.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Libspatialite(AutotoolsPackage):
"""SpatiaLite is an open source library intended to extend the
SQLite core to support fully fledged Spatial SQL capabilities."""
homepage = "http://www.gaia-gis.it"
url = "http://www.gaia-gis.it/gaia-sins/libspatialite-sources/libspatialite-4.3.0a.tar.gz"
manual_download = True
version('5.0.0', preferred=True, sha256='7b7fd70243f5a0b175696d87c46dde0ace030eacc27f39241c24bac5dfac6dac')
# Must download manually from:
# https://www.gaia-gis.it/fossil/libspatialite/info/c7f67038bf06d98d
# For instructions on the file:// below..
# https://github.com/spack/spack/issues/2489
version('5.0.0.2.c7f67038bf',
sha256='f8100f71b769c7db066c6f938af6b00e920e4b90ac14c00a4f3ed7171565caab',
url="file://%s/SpatiaLite-c7f67038bf.tar.gz" % os.getcwd())
version('5.0.0-beta0', sha256='caacf5378a5cfab9b8e98bb361e2b592e714e21f5c152b795df80d0ab1da1c42')
version('4.3.0a',
sha256='88900030a4762904a7880273f292e5e8ca6b15b7c6c3fb88ffa9e67ee8a5a499')
version('3.0.1', sha256='4983d6584069fd5ff0cfcccccee1015088dab2db177c0dc7050ce8306b68f8e6')
depends_on('pkgconfig', type='build')
depends_on('sqlite+rtree')
depends_on('proj@:5', when='@:4.999.999')
# PROJ.6 is OK w/ newer versions
# https://www.gaia-gis.it/fossil/libspatialite/wiki?name=PROJ.6
depends_on('proj')
depends_on('geos')
depends_on('freexl')
depends_on('iconv')
depends_on('libxml2')
depends_on('minizip', when='@5.0.0:')
| 41.627907 | 111 | 0.718994 |
4a2651957a34fe8f8f4c453104b8270818a266ee | 13,026 | py | Python | official/cv/yolov3_resnet18/src/dataset.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | official/cv/yolov3_resnet18/src/dataset.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | official/cv/yolov3_resnet18/src/dataset.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""YOLOv3 dataset"""
from __future__ import division
import os
import platform
import numpy as np
from PIL import Image
import mindspore.dataset as de
from mindspore.mindrecord import FileWriter
import mindspore.dataset.vision.c_transforms as C
from src.config import ConfigYOLOV3ResNet18
iter_cnt = 0
_NUM_BOXES = 50
def preprocess_fn(image, box, is_training):
"""Preprocess function for dataset."""
config_anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 163, 326]
anchors = np.array([float(x) for x in config_anchors]).reshape(-1, 2)
max_boxes = 20
num_classes = ConfigYOLOV3ResNet18.num_classes
def _preprocess_true_boxes(true_boxes, anchors, in_shape=None):
"""Get true boxes."""
num_layers = anchors.shape[0] // 3
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
true_boxes = np.array(true_boxes, dtype='float32')
input_shape = np.array(in_shape, dtype='int32')
boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2.
boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]
true_boxes[..., 0:2] = boxes_xy / input_shape[::-1]
true_boxes[..., 2:4] = boxes_wh / input_shape[::-1]
grid_shapes = [input_shape // 32, input_shape // 16, input_shape // 8]
y_true = [np.zeros((grid_shapes[l][0], grid_shapes[l][1], len(anchor_mask[l]),
5 + num_classes), dtype='float32') for l in range(num_layers)]
anchors = np.expand_dims(anchors, 0)
anchors_max = anchors / 2.
anchors_min = -anchors_max
valid_mask = boxes_wh[..., 0] >= 1
wh = boxes_wh[valid_mask]
if len(wh) >= 1:
wh = np.expand_dims(wh, -2)
boxes_max = wh / 2.
boxes_min = -boxes_max
intersect_min = np.maximum(boxes_min, anchors_min)
intersect_max = np.minimum(boxes_max, anchors_max)
intersect_wh = np.maximum(intersect_max - intersect_min, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
box_area = wh[..., 0] * wh[..., 1]
anchor_area = anchors[..., 0] * anchors[..., 1]
iou = intersect_area / (box_area + anchor_area - intersect_area)
best_anchor = np.argmax(iou, axis=-1)
for t, n in enumerate(best_anchor):
for l in range(num_layers):
if n in anchor_mask[l]:
i = np.floor(true_boxes[t, 0] * grid_shapes[l][1]).astype('int32')
j = np.floor(true_boxes[t, 1] * grid_shapes[l][0]).astype('int32')
k = anchor_mask[l].index(n)
c = true_boxes[t, 4].astype('int32')
y_true[l][j, i, k, 0:4] = true_boxes[t, 0:4]
y_true[l][j, i, k, 4] = 1.
y_true[l][j, i, k, 5 + c] = 1.
pad_gt_box0 = np.zeros(shape=[50, 4], dtype=np.float32)
pad_gt_box1 = np.zeros(shape=[50, 4], dtype=np.float32)
pad_gt_box2 = np.zeros(shape=[50, 4], dtype=np.float32)
mask0 = np.reshape(y_true[0][..., 4:5], [-1])
gt_box0 = np.reshape(y_true[0][..., 0:4], [-1, 4])
gt_box0 = gt_box0[mask0 == 1]
pad_gt_box0[:gt_box0.shape[0]] = gt_box0
mask1 = np.reshape(y_true[1][..., 4:5], [-1])
gt_box1 = np.reshape(y_true[1][..., 0:4], [-1, 4])
gt_box1 = gt_box1[mask1 == 1]
pad_gt_box1[:gt_box1.shape[0]] = gt_box1
mask2 = np.reshape(y_true[2][..., 4:5], [-1])
gt_box2 = np.reshape(y_true[2][..., 0:4], [-1, 4])
gt_box2 = gt_box2[mask2 == 1]
pad_gt_box2[:gt_box2.shape[0]] = gt_box2
return y_true[0], y_true[1], y_true[2], pad_gt_box0, pad_gt_box1, pad_gt_box2
def _infer_data(img_data, input_shape, box):
w, h = img_data.size
input_h, input_w = input_shape
scale = min(float(input_w) / float(w), float(input_h) / float(h))
nw = int(w * scale)
nh = int(h * scale)
img_data = img_data.resize((nw, nh), Image.BICUBIC)
new_image = np.zeros((input_h, input_w, 3), np.float32)
new_image.fill(128)
img_data = np.array(img_data)
if len(img_data.shape) == 2:
img_data = np.expand_dims(img_data, axis=-1)
img_data = np.concatenate([img_data, img_data, img_data], axis=-1)
dh = int((input_h - nh) / 2)
dw = int((input_w - nw) / 2)
new_image[dh:(nh + dh), dw:(nw + dw), :] = img_data
new_image /= 255.
new_image = np.transpose(new_image, (2, 0, 1))
new_image = np.expand_dims(new_image, 0)
return new_image, np.array([h, w], np.float32), box
def _data_aug(image, box, is_training, jitter=0.3, hue=0.1, sat=1.5, val=1.5, image_size=(352, 640)):
"""Data augmentation function."""
if not isinstance(image, Image.Image):
image = Image.fromarray(image)
iw, ih = image.size
ori_image_shape = np.array([ih, iw], np.int32)
h, w = image_size
if not is_training:
return _infer_data(image, image_size, box)
flip = np.random.rand() < 0.5
# correct boxes
box_data = np.zeros((max_boxes, 5))
while True:
# Prevent the situation that all boxes are eliminated
new_ar = float(w) / float(h) * np.random.uniform(1 - jitter, 1 + jitter) / \
np.random.uniform(1 - jitter, 1 + jitter)
scale = np.random.uniform(0.25, 2)
if new_ar < 1:
nh = int(scale * h)
nw = int(nh * new_ar)
else:
nw = int(scale * w)
nh = int(nw / new_ar)
dx = int(np.random.uniform(0, w - nw))
dy = int(np.random.uniform(0, h - nh))
if len(box) >= 1:
t_box = box.copy()
np.random.shuffle(t_box)
t_box[:, [0, 2]] = t_box[:, [0, 2]] * float(nw) / float(iw) + dx
t_box[:, [1, 3]] = t_box[:, [1, 3]] * float(nh) / float(ih) + dy
if flip:
t_box[:, [0, 2]] = w - t_box[:, [2, 0]]
t_box[:, 0:2][t_box[:, 0:2] < 0] = 0
t_box[:, 2][t_box[:, 2] > w] = w
t_box[:, 3][t_box[:, 3] > h] = h
box_w = t_box[:, 2] - t_box[:, 0]
box_h = t_box[:, 3] - t_box[:, 1]
t_box = t_box[np.logical_and(box_w > 1, box_h > 1)] # discard invalid box
if len(t_box) >= 1:
box = t_box
break
box_data[:len(box)] = box
# resize image
image = image.resize((nw, nh), Image.BICUBIC)
# place image
new_image = Image.new('RGB', (w, h), (128, 128, 128))
new_image.paste(image, (dx, dy))
image = new_image
# flip image or not
if flip:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
# convert image to gray or not
if np.random.rand() < 0.25:
image = image.convert('L').convert('RGB')
# when the channels of image is 1
image = np.array(image)
if len(image.shape) == 2:
image = np.expand_dims(image, axis=-1)
image = np.concatenate([image, image, image], axis=-1)
# distort image
image_data = image.astype(np.float32) / 255.
# preprocess bounding boxes
bbox_true_1, bbox_true_2, bbox_true_3, gt_box1, gt_box2, gt_box3 = \
_preprocess_true_boxes(box_data, anchors, image_size)
return image_data, bbox_true_1, bbox_true_2, bbox_true_3, \
ori_image_shape, gt_box1, gt_box2, gt_box3
if is_training:
images, bbox_1, bbox_2, bbox_3, _, gt_box1, gt_box2, gt_box3 = _data_aug(image, box, is_training)
return images, bbox_1, bbox_2, bbox_3, gt_box1, gt_box2, gt_box3
images, shape, anno = _data_aug(image, box, is_training)
return images, shape, anno
def anno_parser(annos_str):
"""Parse annotation from string to list."""
annos = []
for anno_str in annos_str:
anno = list(map(int, anno_str.strip().split(',')))
annos.append(anno)
return annos
def filter_valid_data(image_dir, anno_path):
"""Filter valid image file, which both in image_dir and anno_path."""
image_files = []
image_anno_dict = {}
if not os.path.isdir(image_dir):
raise RuntimeError("Path given is not valid.")
if not os.path.isfile(anno_path):
raise RuntimeError("Annotation file is not valid.")
with open(anno_path, "rb") as f:
lines = f.readlines()
for line in lines:
line_str = line.decode("utf-8").strip()
line_split = str(line_str).split(' ')
file_name = line_split[0]
if os.path.isfile(os.path.join(image_dir, file_name)):
image_anno_dict[file_name] = anno_parser(line_split[1:])
image_files.append(file_name)
else:
raise ValueError("there is no file {}, it is construct by parameter "
"image_dir:{} and first field in file {} every "
"line, please check them.".format(os.path.join(image_dir, file_name),
image_dir, anno_path))
return image_files, image_anno_dict
def data_to_mindrecord_byte_image(image_dir, anno_path, mindrecord_dir, prefix, file_num):
"""Create MindRecord file by image_dir and anno_path."""
mindrecord_path = os.path.join(mindrecord_dir, prefix)
writer = FileWriter(mindrecord_path, file_num)
image_files, image_anno_dict = filter_valid_data(image_dir, anno_path)
yolo_json = {
"image": {"type": "bytes"},
"annotation": {"type": "int64", "shape": [-1, 5]},
}
writer.add_schema(yolo_json, "yolo_json")
for image_name in image_files:
image_path = os.path.join(image_dir, image_name)
with open(image_path, 'rb') as f:
img = f.read()
annos = np.array(image_anno_dict[image_name])
row = {"image": img, "annotation": annos}
writer.write_raw_data([row])
writer.commit()
def create_yolo_dataset(mindrecord_dir, batch_size=32, device_num=1, rank=0,
is_training=True, num_parallel_workers=8):
"""Create YOLOv3 dataset with MindDataset."""
de.config.set_prefetch_size(64)
if "x86" in platform.machine():
ds = de.MindDataset(mindrecord_dir, columns_list=["image", "annotation"], num_shards=device_num, shard_id=rank,
num_parallel_workers=num_parallel_workers, shuffle=is_training)
else:
ds = de.MindDataset(mindrecord_dir, columns_list=["image", "annotation"], num_shards=device_num, shard_id=rank,
num_parallel_workers=2, shuffle=is_training)
decode = C.Decode()
if "x86" in platform.machine():
ds = ds.map(operations=decode, input_columns=["image"], num_parallel_workers=num_parallel_workers)
else:
ds = ds.map(operations=decode, input_columns=["image"], num_parallel_workers=1)
compose_map_func = (lambda image, annotation: preprocess_fn(image, annotation, is_training))
if is_training:
hwc_to_chw = C.HWC2CHW()
ds = ds.map(operations=compose_map_func, input_columns=["image", "annotation"],
output_columns=["image", "bbox_1", "bbox_2", "bbox_3", "gt_box1", "gt_box2", "gt_box3"],
column_order=["image", "bbox_1", "bbox_2", "bbox_3", "gt_box1", "gt_box2", "gt_box3"],
num_parallel_workers=num_parallel_workers)
if "x86" in platform.machine():
ds = ds.map(operations=hwc_to_chw, input_columns=["image"], num_parallel_workers=num_parallel_workers)
else:
ds = ds.map(operations=hwc_to_chw, input_columns=["image"], num_parallel_workers=1)
ds = ds.batch(batch_size, drop_remainder=True)
else:
ds = ds.map(operations=compose_map_func, input_columns=["image", "annotation"],
output_columns=["image", "image_shape", "annotation"],
column_order=["image", "image_shape", "annotation"],
num_parallel_workers=num_parallel_workers)
return ds
| 41.75 | 119 | 0.578689 |
4a2651e26a1ed7e60e02b74322d46aeb196b03c2 | 7,084 | py | Python | examples/heat-source-mpi.py | jlevine18/mirgecom | e044197cf1535a2e9742b7d97b91be4eba98db2c | [
"MIT"
] | null | null | null | examples/heat-source-mpi.py | jlevine18/mirgecom | e044197cf1535a2e9742b7d97b91be4eba98db2c | [
"MIT"
] | 1 | 2021-08-11T16:21:21.000Z | 2021-08-11T16:21:21.000Z | examples/heat-source-mpi.py | dshtey2/mirgecom | 5089accba9f7954ca426ee5b3bd97c511e4f6861 | [
"MIT"
] | null | null | null | """Demonstrate heat source example."""
__copyright__ = "Copyright (C) 2020 University of Illinois Board of Trustees"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import numpy as np
import numpy.linalg as la # noqa
import pyopencl as cl
from meshmode.array_context import (
PyOpenCLArrayContext,
PytatoPyOpenCLArrayContext
)
from mirgecom.profiling import PyOpenCLProfilingArrayContext
from meshmode.mesh import BTAG_ALL, BTAG_NONE # noqa
from grudge.eager import EagerDGDiscretization
from grudge.shortcuts import make_visualizer
from grudge.dof_desc import DISCR_TAG_BASE, DTAG_BOUNDARY
from mirgecom.integrators import rk4_step
from mirgecom.diffusion import (
diffusion_operator,
DirichletDiffusionBoundary,
NeumannDiffusionBoundary)
from mirgecom.mpi import mpi_entry_point
import pyopencl.tools as cl_tools
from mirgecom.logging_quantities import (initialize_logmgr,
logmgr_add_device_name,
logmgr_add_device_memory_usage)
from logpyle import IntervalTimer, set_dt
@mpi_entry_point
def main(ctx_factory=cl.create_some_context, use_logmgr=True,
use_leap=False, use_profiling=False, casename=None,
rst_filename=None, actx_class=PyOpenCLArrayContext):
"""Run the example."""
cl_ctx = cl.create_some_context()
queue = cl.CommandQueue(cl_ctx)
from mpi4py import MPI
comm = MPI.COMM_WORLD
num_parts = comm.Get_size()
logmgr = initialize_logmgr(use_logmgr,
filename="heat-source.sqlite", mode="wu", mpi_comm=comm)
if use_profiling:
queue = cl.CommandQueue(
cl_ctx, properties=cl.command_queue_properties.PROFILING_ENABLE)
else:
queue = cl.CommandQueue(cl_ctx)
actx = actx_class(
queue,
allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)))
from meshmode.distributed import MPIMeshDistributor, get_partition_by_pymetis
mesh_dist = MPIMeshDistributor(comm)
dim = 2
nel_1d = 16
t = 0
t_final = 0.01
istep = 0
if mesh_dist.is_mananger_rank():
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(-0.5,)*dim,
b=(0.5,)*dim,
nelements_per_axis=(nel_1d,)*dim,
boundary_tag_to_face={
"dirichlet": ["+x", "-x"],
"neumann": ["+y", "-y"]
}
)
print("%d elements" % mesh.nelements)
part_per_element = get_partition_by_pymetis(mesh, num_parts)
local_mesh = mesh_dist.send_mesh_parts(mesh, part_per_element, num_parts)
del mesh
else:
local_mesh = mesh_dist.receive_mesh_part()
order = 3
discr = EagerDGDiscretization(actx, local_mesh, order=order,
mpi_communicator=comm)
if dim == 2:
# no deep meaning here, just a fudge factor
dt = 0.0025/(nel_1d*order**2)
else:
raise ValueError("don't have a stable time step guesstimate")
source_width = 0.2
from meshmode.array_context import thaw
nodes = thaw(actx, discr.nodes())
boundaries = {
DTAG_BOUNDARY("dirichlet"): DirichletDiffusionBoundary(0.),
DTAG_BOUNDARY("neumann"): NeumannDiffusionBoundary(0.)
}
u = discr.zeros(actx)
if logmgr:
logmgr_add_device_name(logmgr, queue)
logmgr_add_device_memory_usage(logmgr, queue)
logmgr.add_watches(["step.max", "t_step.max", "t_log.max"])
try:
logmgr.add_watches(["memory_usage_python.max", "memory_usage_gpu.max"])
except KeyError:
pass
if use_profiling:
logmgr.add_watches(["multiply_time.max"])
vis_timer = IntervalTimer("t_vis", "Time spent visualizing")
logmgr.add_quantity(vis_timer)
vis = make_visualizer(discr)
def rhs(t, u):
return (
diffusion_operator(
discr, quad_tag=DISCR_TAG_BASE,
alpha=1, boundaries=boundaries, u=u)
+ actx.np.exp(-np.dot(nodes, nodes)/source_width**2))
rank = comm.Get_rank()
while t < t_final:
if logmgr:
logmgr.tick_before()
if istep % 10 == 0:
print(istep, t, discr.norm(u))
vis.write_vtk_file("fld-heat-source-mpi-%03d-%04d.vtu" % (rank, istep),
[
("u", u)
])
u = rk4_step(u, t, dt, rhs)
t += dt
istep += 1
if logmgr:
set_dt(logmgr, dt)
logmgr.tick_after()
if __name__ == "__main__":
import argparse
casename = "heat-source"
parser = argparse.ArgumentParser(description=f"MIRGE-Com Example: {casename}")
parser.add_argument("--lazy", action="store_true",
help="switch to a lazy computation mode")
parser.add_argument("--profiling", action="store_true",
help="turn on detailed performance profiling")
parser.add_argument("--log", action="store_true", default=True,
help="turn on logging")
parser.add_argument("--leap", action="store_true",
help="use leap timestepper")
parser.add_argument("--restart_file", help="root name of restart file")
parser.add_argument("--casename", help="casename to use for i/o")
args = parser.parse_args()
if args.profiling:
if args.lazy:
raise ValueError("Can't use lazy and profiling together.")
actx_class = PyOpenCLProfilingArrayContext
else:
actx_class = PytatoPyOpenCLArrayContext if args.lazy \
else PyOpenCLArrayContext
logging.basicConfig(format="%(message)s", level=logging.INFO)
if args.casename:
casename = args.casename
rst_filename = None
if args.restart_file:
rst_filename = args.restart_file
main(use_logmgr=args.log, use_leap=args.leap, use_profiling=args.profiling,
casename=casename, rst_filename=rst_filename, actx_class=actx_class)
# vim: foldmethod=marker
| 32.347032 | 83 | 0.669819 |
4a26525ac2a0772d60cb0154c6a730eb29336e96 | 10,931 | py | Python | im2txt/tf_cider.py | wangheda/ImageCaption-UnderFitting | ca98807d3a35a40a35446678d7e7b43242767a63 | [
"Apache-2.0"
] | 8 | 2018-05-08T12:29:56.000Z | 2021-04-07T03:11:32.000Z | im2txt/tf_cider.py | wangheda/ImageCaption-UnderFitting | ca98807d3a35a40a35446678d7e7b43242767a63 | [
"Apache-2.0"
] | null | null | null | im2txt/tf_cider.py | wangheda/ImageCaption-UnderFitting | ca98807d3a35a40a35446678d7e7b43242767a63 | [
"Apache-2.0"
] | 4 | 2017-12-22T02:10:38.000Z | 2021-04-07T03:12:24.000Z |
import json
import tensorflow as tf
import numpy as np
import sys
tf.flags.DEFINE_integer("max_vocab_size", 10000,
"Don't change this.")
tf.flags.DEFINE_string("document_frequency_file",
"data/document_frequency.json",
"File containing the document frequency infos.")
FLAGS = tf.app.flags.FLAGS
LOG_TENSOR = True
def log_tensor(name, g=None, l=None):
if LOG_TENSOR:
if g is None and l is None:
print >> sys.stderr, name, eval(name, {"self":self})
else:
print >> sys.stderr, name, eval(name, g, l)
def get_rank(tensor):
return len(get_shape_as_list(tensor))
def get_shape_as_list(tensor):
return tensor.get_shape().as_list()
def get_shape(tensor):
"""Returns static shape if available and dynamic shape otherwise."""
static_shape = tensor.shape.as_list()
dynamic_shape = tf.unstack(tf.shape(tensor))
dims = [s[1] if s[0] is None else s[0]
for s in zip(static_shape, dynamic_shape)]
return dims
def get_real_lengths(words, lengths):
# t should be rank 3
batch, num_refs, max_length = words.get_shape().as_list()
mask = tf.reshape(tf.sequence_mask(tf.reshape(lengths, [-1]),
maxlen=max_length),
shape=[batch, num_refs, max_length])
num_end_tokens = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(words, FLAGS.end_token),
mask),
dtype=tf.int64),
axis=-1)
lengths = tf.maximum(lengths - num_end_tokens, 0)
return lengths
class CiderScorer(object):
def __init__(self):
with open(FLAGS.document_frequency_file, 'r') as f:
df_data = json.load(f)
df_keys = df_data['df_keys']
df_values = df_data['df_values']
ref_len = df_data['ref_len']
self.df_table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(
df_keys, df_values,
key_dtype=tf.int64, value_dtype=tf.float32), default_value = 0.0)
self.ref_len = tf.constant(ref_len)
def score(self, hyp_words, hyp_lengths, ref_words, ref_lengths, sigma=6.0):
"""
parameters
hyp_words: [batch, hyp_length]
hyp_lengths: [batch]
ref_words: [batch, num_refs, hyp_length]
ref_lengths: [batch, num_refs]
return
score: [batch]
"""
hyp_words = tf.cast(hyp_words, dtype=tf.int64)
ref_words = tf.cast(ref_words, dtype=tf.int64)
hyp_lengths = tf.cast(hyp_lengths, dtype=tf.int64)
ref_lengths = tf.cast(ref_lengths, dtype=tf.int64)
if get_rank(hyp_words) == 2:
hyp_words = tf.expand_dims(hyp_words, axis=1)
hyp_lengths = tf.expand_dims(hyp_lengths, axis=1)
if get_rank(ref_words) == 2:
ref_words = tf.expand_dims(ref_words, axis=1)
ref_lengths = tf.expand_dims(ref_lengths, axis=1)
hyp_lengths = get_real_lengths(hyp_words, hyp_lengths)
ref_lengths = get_real_lengths(ref_words, ref_lengths)
log_tensor("hyp_words", l=locals())
log_tensor("hyp_lengths", l=locals())
log_tensor("ref_words", l=locals())
log_tensor("ref_lengths", l=locals())
def ngram_count(words, lengths, n=4):
shape = words.get_shape().as_list()
if len(shape) == 3:
batch, num_sents, max_length = shape
else:
raise NotImplementedError("tensor must be of rank 3")
tmp_ngrams = []
tmp_lengths = []
tmp_shifted = []
words_idx = words + 1
log_tensor("words_idx", l=locals())
for i in range(n):
weights = [FLAGS.max_vocab_size**k for k in range(i,-1,-1)]
if i == 0:
tmp_shifted.append(words_idx)
else:
tmp_shifted.append(tf.concat([words_idx[:,:,i:], tf.constant(0, dtype=tf.int64, shape=[batch,num_sents,i])], axis=-1))
tmp_ngram = tf.add_n([x*y for x,y in zip(tmp_shifted, weights)])
log_tensor("tmp_ngram", l=locals())
tmp_ngrams.append(tmp_ngram) # n-gram ids
tmp_lengths.append(tf.maximum(lengths-i, 0)) # bi-gram ids are shorther by 1, etc
tmp_ngrams = tf.stack(tmp_ngrams, axis=2)
tmp_lengths = tf.stack(tmp_lengths, axis=2)
log_tensor("tmp_ngrams", l=locals())
log_tensor("tmp_lengths", l=locals())
return tmp_ngrams, tmp_lengths
def compute_vec_norm_and_freq(ngrams, ngram_lengths):
"""
parameters
ngrams : [batch, num_sents, n, max_length]
ngram_lengths : [batch, num_sents, n]
return
vec : [batch, num_sents, n, max_length] tfidf values of every ngram
norm : [batch, num_sents, n]
text_freq : [batch, num_sents, n, max_length]
"""
shape = ngrams.get_shape().as_list()
batch, num_sents, n, max_length = shape
mask = tf.reshape(
tf.sequence_mask(
tf.reshape(ngram_lengths, shape=[-1]),
maxlen=max_length),
shape=[batch, num_sents, n, max_length])
float_mask = tf.cast(mask, dtype=tf.float32)
square_masks = tf.reshape(float_mask, shape=[batch, num_sents, n, max_length, 1]) \
* tf.reshape(float_mask, shape=[batch, num_sents, n, 1, max_length])
tmp1_ngrams = tf.reshape(ngrams, shape=[batch, num_sents, n, 1, max_length])
tmp2_ngrams = tf.reshape(ngrams, shape=[batch, num_sents, n, max_length, 1])
tmp12_equal = tf.cast(tf.equal(tmp1_ngrams, tmp2_ngrams), dtype=tf.float32)
text_freq = tf.reduce_sum(tmp12_equal * square_masks, axis=-1)
doc_freq = self.df_table.lookup(ngrams)
df_values = tf.log(tf.maximum(doc_freq, 1.0))
tf.summary.histogram("cider/document_freq", doc_freq)
tf.summary.histogram("cider/text_freq", text_freq)
tf.summary.histogram("cider/df_values", df_values)
vec = text_freq * tf.maximum(self.ref_len - df_values, 0.0)
norm = tf.reduce_sum(vec * vec * float_mask / (text_freq + 1e-12), axis=-1)
norm = tf.sqrt(norm)
tf.summary.histogram("cider/vec", vec)
tf.summary.histogram("cider/norm", norm)
return vec, norm, text_freq
def sim(hyp_vec, hyp_norm, hyp_tf, hyp_lengths, hyp_ngrams, hyp_ngram_lengths,
ref_vec, ref_norm, ref_tf, ref_lengths, ref_ngrams, ref_ngram_lengths,
sigma=6.0):
"""
parameters
vec : [batch, num_sents, n, max_length] tfidf values of every ngram
norm : [batch, num_sents, n]
tf : [batch, num_sents, n, max_length]
lengths : [batch, num_sents, n]
ngrams : [batch, num_sents, n, max_length]
ngram_lengths : [batch, num_sents, n]
return
score : [batch]
"""
batch, num_sents, n, max_hyp_length = hyp_vec.get_shape().as_list()
_, _, _, max_ref_length = ref_vec.get_shape().as_list()
log_tensor("hyp_vec", l=locals())
log_tensor("hyp_norm", l=locals())
log_tensor("hyp_ngrams", l=locals())
log_tensor("hyp_ngram_lengths", l=locals())
log_tensor("ref_vec", l=locals())
log_tensor("ref_norm", l=locals())
log_tensor("ref_ngrams", l=locals())
log_tensor("ref_ngram_lengths", l=locals())
delta = tf.cast(hyp_lengths - ref_lengths, tf.float32)
log_tensor("delta", l=locals())
ref_masks = tf.cast(tf.reshape(
tf.sequence_mask(
tf.reshape(ref_ngram_lengths, shape=[-1]),
maxlen=max_ref_length),
shape=[batch, num_sents, n, max_ref_length]), dtype=tf.float32)
hyp_masks = tf.cast(tf.reshape(
tf.sequence_mask(
tf.reshape(hyp_ngram_lengths, shape=[-1]),
maxlen=max_hyp_length),
shape=[batch, num_sents, n, max_hyp_length]), dtype=tf.float32)
square_masks = tf.reshape(hyp_masks, shape=[batch, num_sents, n, max_hyp_length, 1]) \
* tf.reshape(ref_masks, shape=[batch, num_sents, n, 1, max_ref_length])
freq_masks = tf.reshape(hyp_tf, shape=[batch, num_sents, n, max_hyp_length, 1]) \
* tf.reshape(ref_tf, shape=[batch, num_sents, n, 1, max_ref_length])
equal_masks = tf.cast(tf.equal(
tf.reshape(hyp_ngrams, shape=[batch, num_sents, n, max_hyp_length, 1]),
tf.reshape(ref_ngrams, shape=[batch, num_sents, n, 1, max_ref_length])
), dtype=tf.float32)
min_vec = tf.reduce_sum(tf.minimum(
tf.reshape(hyp_vec, [batch, num_sents, n, max_hyp_length, 1]),
tf.reshape(ref_vec, [batch, num_sents, n, 1, max_ref_length]))
* equal_masks * square_masks,
axis=-1) / (hyp_tf + 1e-12)
prod = tf.reduce_sum(tf.reshape(min_vec, [batch, num_sents, n, max_hyp_length, 1])
* tf.reshape(ref_vec, [batch, num_sents, n, 1, max_ref_length])
* equal_masks * square_masks / (freq_masks + 1e-12),
axis=[-2,-1])
val = prod / (hyp_norm * ref_norm + 1e-12)
log_tensor("val", l=locals())
mult = np.e ** (-(delta ** 2) / ((sigma ** 2) * 2))
mask = tf.cast(ref_lengths > 0, dtype=tf.float32)
scores = val * tf.expand_dims(mult, axis=2) * tf.expand_dims(mask, axis=2)
tf.summary.histogram("cider/scores", scores)
log_tensor("scores", l=locals())
score_avg = tf.reduce_sum(scores, axis=[1,2]) \
/ (tf.reduce_sum(mask, axis=1) * float(n) + 1e-12)
score_avg = score_avg * 10.0
return score_avg
def tile_on_axis(tensor, axis=1, copies=5):
shape = tensor.get_shape().as_list()
multiples = [1] * len(shape)
multiples[axis] = copies
return tf.tile(tensor, multiples=multiples)
ref_ngrams, ref_ngram_lengths = ngram_count(ref_words, ref_lengths)
ref_vec, ref_norm, ref_text_freq = compute_vec_norm_and_freq(ref_ngrams, ref_ngram_lengths)
hyp_ngrams, hyp_ngram_lengths = ngram_count(hyp_words, hyp_lengths)
hyp_vec, hyp_norm, hyp_text_freq = compute_vec_norm_and_freq(hyp_ngrams, hyp_ngram_lengths)
ref_vec_shape = ref_vec.get_shape().as_list()
num_refs = ref_vec_shape[1]
hyp_ngrams, hyp_ngram_lengths, hyp_vec, hyp_norm, hyp_text_freq = map(
lambda x: tile_on_axis(x, axis=1, copies=num_refs),
[hyp_ngrams, hyp_ngram_lengths, hyp_vec, hyp_norm, hyp_text_freq])
sim_score = sim(hyp_vec, hyp_norm, hyp_text_freq, hyp_lengths, hyp_ngrams, hyp_ngram_lengths,
ref_vec, ref_norm, ref_text_freq, ref_lengths, ref_ngrams, ref_ngram_lengths,
sigma=sigma)
return sim_score
| 41.249057 | 128 | 0.609642 |
4a26531c58e3c1e3bfa11d76a73cdb07d28faaba | 3,485 | py | Python | Avengers_Of_GitHub/settings.py | AnubhavMadhav/Avengers-Of-GitHub | eb26a9aa65a60a8742ec2ee20ff4bbe3c9863fbb | [
"MIT"
] | 36 | 2020-05-16T18:35:16.000Z | 2022-02-22T03:53:09.000Z | Avengers_Of_GitHub/settings.py | AnubhavMadhav/Avengers-Of-GitHub | eb26a9aa65a60a8742ec2ee20ff4bbe3c9863fbb | [
"MIT"
] | 8 | 2020-05-17T15:44:56.000Z | 2021-09-22T19:01:28.000Z | Avengers_Of_GitHub/settings.py | AnubhavMadhav/Avengers-Of-GitHub | eb26a9aa65a60a8742ec2ee20ff4bbe3c9863fbb | [
"MIT"
] | 3 | 2020-05-30T07:41:18.000Z | 2020-06-11T19:54:32.000Z | """
Django settings for Avengers_Of_GitHub project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')ceom0_5y%ogm*imldv9vb_g@lp%=*kcm%uuq4h^&q_^8*38&m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['avengers-of-github.herokuapp.com','127.0.0.1']
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
'D:\My_Projects\Avengers-Of-GitHub\static',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'avengers',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Avengers_Of_GitHub.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Avengers_Of_GitHub.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' | 27.65873 | 91 | 0.706169 |
4a265386a07d05cd00051869e3144095195fe9e7 | 7,053 | py | Python | libcxx/utils/generate_header_tests.py | LaudateCorpus1/llvm-project | ff2e0f0c1112558b3f30d8afec7c9882c33c79e3 | [
"Apache-2.0"
] | null | null | null | libcxx/utils/generate_header_tests.py | LaudateCorpus1/llvm-project | ff2e0f0c1112558b3f30d8afec7c9882c33c79e3 | [
"Apache-2.0"
] | null | null | null | libcxx/utils/generate_header_tests.py | LaudateCorpus1/llvm-project | ff2e0f0c1112558b3f30d8afec7c9882c33c79e3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import glob
import os
import posixpath
import re
def get_libcxx_paths():
utils_path = os.path.dirname(os.path.abspath(__file__))
script_name = os.path.basename(__file__)
assert os.path.exists(utils_path)
src_root = os.path.dirname(utils_path)
include_path = os.path.join(src_root, 'include')
assert os.path.exists(include_path)
libcxx_test_path = os.path.join(src_root, 'test', 'libcxx')
assert os.path.exists(libcxx_test_path)
return script_name, src_root, include_path, libcxx_test_path
script_name, source_root, include_path, libcxx_test_path = get_libcxx_paths()
header_markup = {
"barrier": ["ifndef _LIBCPP_HAS_NO_THREADS"],
"future": ["ifndef _LIBCPP_HAS_NO_THREADS"],
"latch": ["ifndef _LIBCPP_HAS_NO_THREADS"],
"mutex": ["ifndef _LIBCPP_HAS_NO_THREADS"],
"semaphore": ["ifndef _LIBCPP_HAS_NO_THREADS"],
"shared_mutex": ["ifndef _LIBCPP_HAS_NO_THREADS"],
"thread": ["ifndef _LIBCPP_HAS_NO_THREADS"],
"filesystem": ["ifndef _LIBCPP_HAS_NO_FILESYSTEM_LIBRARY"],
"format": ["ifndef _LIBCPP_HAS_NO_INCOMPLETE_FORMAT"],
"clocale": ["ifndef _LIBCPP_HAS_NO_LOCALIZATION"],
"codecvt": ["ifndef _LIBCPP_HAS_NO_LOCALIZATION"],
"fstream": ["ifndef _LIBCPP_HAS_NO_LOCALIZATION"],
"iomanip": ["ifndef _LIBCPP_HAS_NO_LOCALIZATION"],
"ios": ["ifndef _LIBCPP_HAS_NO_LOCALIZATION"],
"iostream": ["ifndef _LIBCPP_HAS_NO_LOCALIZATION"],
"istream": ["ifndef _LIBCPP_HAS_NO_LOCALIZATION"],
"locale.h": ["ifndef _LIBCPP_HAS_NO_LOCALIZATION"],
"locale": ["ifndef _LIBCPP_HAS_NO_LOCALIZATION"],
"ostream": ["ifndef _LIBCPP_HAS_NO_LOCALIZATION"],
"ranges": ["ifndef _LIBCPP_HAS_NO_INCOMPLETE_RANGES"],
"regex": ["ifndef _LIBCPP_HAS_NO_LOCALIZATION"],
"sstream": ["ifndef _LIBCPP_HAS_NO_LOCALIZATION"],
"streambuf": ["ifndef _LIBCPP_HAS_NO_LOCALIZATION"],
"strstream": ["ifndef _LIBCPP_HAS_NO_LOCALIZATION"],
"wctype.h": ["ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS"],
"cwctype": ["ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS"],
"cwchar": ["ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS"],
"wchar.h": ["ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS"],
"experimental/coroutine": ["ifndef _LIBCPP_HAS_NO_EXPERIMENTAL_COROUTINES"],
"coroutine": ["ifndef _LIBCPP_HAS_NO_CXX20_COROUTINES"],
"experimental/regex": ["ifndef _LIBCPP_HAS_NO_LOCALIZATION"],
}
allowed_extensions = ['', '.h']
indent_width = 4
begin_pattern = """\
////////////////////////////////////////////////////////////////////////////////
// BEGIN-GENERATED-HEADERS
////////////////////////////////////////////////////////////////////////////////
"""
warning_note = """\
// WARNING: This test was generated by {script_name}
// and should not be edited manually.
""".format(script_name=script_name)
end_pattern = """\
////////////////////////////////////////////////////////////////////////////////
// END-GENERATED-HEADERS
////////////////////////////////////////////////////////////////////////////////
"""
generated_part_pattern = re.compile(re.escape(begin_pattern) + ".*" + re.escape(end_pattern),
re.MULTILINE | re.DOTALL)
headers_template = """\
// Top level headers
{top_level_headers}
// experimental headers
#if __cplusplus >= 201103L
{experimental_headers}
#endif // __cplusplus >= 201103L
// extended headers
{extended_headers}
"""
def should_keep_header(p, exclusions=None):
if os.path.isdir(p):
return False
if exclusions:
relpath = os.path.relpath(p, include_path)
relpath = posixpath.join(*os.path.split(relpath))
if relpath in exclusions:
return False
return os.path.splitext(p)[1] in allowed_extensions
def produce_include(relpath, indent_level, post_include=None):
relpath = posixpath.join(*os.path.split(relpath))
template = "{preamble}#{indentation}include <{include}>{post_include}{postamble}"
base_indentation = ' '*(indent_width * indent_level)
next_indentation = base_indentation + ' '*(indent_width)
post_include = "\n{}".format(post_include) if post_include else ''
markup = header_markup.get(relpath, None)
if markup:
preamble = '#{indentation}{directive}\n'.format(
directive=markup[0],
indentation=base_indentation,
)
postamble = '\n#{indentation}endif'.format(
indentation=base_indentation,
)
indentation = next_indentation
else:
preamble = ''
postamble = ''
indentation = base_indentation
return template.format(
include=relpath,
post_include=post_include,
preamble=preamble,
postamble=postamble,
indentation=indentation,
)
def produce_headers(path_parts, indent_level, post_include=None, exclusions=None):
pattern = os.path.join(*path_parts, '[a-z]*')
files = sorted(glob.glob(pattern, recursive=False))
include_headers = [
produce_include(os.path.relpath(p, include_path),
indent_level, post_include=post_include)
for p in files
if should_keep_header(p, exclusions)
]
return '\n'.join(include_headers)
def produce_top_level_headers(post_include=None, exclusions=None):
return produce_headers([include_path], 0, post_include=post_include, exclusions=exclusions)
def produce_experimental_headers(post_include=None, exclusions=None):
return produce_headers([include_path, 'experimental'], 1, post_include=post_include, exclusions=exclusions)
def produce_extended_headers(post_include=None, exclusions=None):
return produce_headers([include_path, 'ext'], 0, post_include=post_include, exclusions=exclusions)
def replace_generated_headers(test_path, test_str):
with open(test_path, 'r') as f:
content = f.read()
preamble = begin_pattern + '\n// clang-format off\n\n' + warning_note
postamble = '\n// clang-format on\n\n' + end_pattern
content = generated_part_pattern.sub(
preamble + test_str + postamble, content)
with open(test_path, 'w', newline='\n') as f:
f.write(content)
def produce_test(test_filename, exclusions=None, post_include=None):
test_str = headers_template.format(
top_level_headers=produce_top_level_headers(
post_include=post_include,
exclusions=exclusions,
),
experimental_headers=produce_experimental_headers(
post_include=post_include,
),
extended_headers=produce_extended_headers(
post_include=post_include,
),
)
replace_generated_headers(os.path.join(
libcxx_test_path, test_filename), test_str)
def main():
produce_test('clang_tidy.sh.cpp')
produce_test('double_include.sh.cpp')
produce_test('min_max_macros.compile.pass.cpp', post_include='TEST_MACROS();')
produce_test('nasty_macros.compile.pass.cpp')
produce_test('no_assert_include.compile.pass.cpp', exclusions=['cassert'])
if __name__ == '__main__':
main()
| 33.112676 | 111 | 0.667801 |
4a2653b2cf9c33c3287bc94bf19ad5a02463974c | 5,677 | py | Python | tests/eval_tests.py | andybalaam/cell | 03d0670f9ebd513a983b9327108a84f2eff8ee75 | [
"MIT"
] | 118 | 2016-10-17T09:04:42.000Z | 2021-12-31T03:00:55.000Z | tests/eval_tests.py | JoeyCluett/cell | a3203731e0c63a55955509e843fb99e38cf7cc7c | [
"MIT"
] | 4 | 2019-01-23T09:59:43.000Z | 2020-11-02T11:00:38.000Z | tests/eval_tests.py | JoeyCluett/cell | a3203731e0c63a55955509e843fb99e38cf7cc7c | [
"MIT"
] | 21 | 2016-06-05T08:05:53.000Z | 2022-01-29T10:08:47.000Z |
from tests.util.asserts import assert_that, assert_fails, equals
from tests.util.test import test
from pycell.lexer import lex
from pycell.parser import parse
from pycell.eval_ import eval_expr, eval_list
from pycell.env import Env
# --- Utils ---
def evald(inp, env=None):
if env is None:
env = Env()
return eval_list(parse(lex(inp)), env)
def assert_prog_fails(program, error, env=None):
assert_fails(error, evald, program, env)
# --- Evaluating ---
@test
def Evaluating_an_empty_program_gives_none():
assert_that(evald(""), equals(("none",)))
@test
def Evaluating_a_primitive_returns_itself():
assert_that(evald("3;"), equals(("number", 3)))
assert_that(evald("3.1;"), equals(("number", 3.1)))
assert_that(evald("'foo';"), equals(("string", "foo")))
@test
def Arithmetic_expressions_come_out_correct():
assert_that(evald("3 + 4;"), equals(("number", 7)))
assert_that(evald("3 - 4;"), equals(("number", -1)))
assert_that(evald("3 * 4;"), equals(("number", 12)))
assert_that(evald("3 / 4;"), equals(("number", 0.75)))
@test
def Referring_to_an_unknown_symbol_is_an_error():
assert_prog_fails("x;", "Unknown symbol 'x'.")
@test
def Can_define_a_value_and_retrieve_it():
assert_that(evald("x = 30;x;"), equals(("number", 30)))
assert_that(evald("y = 'foo';y;"), equals(("string", "foo")))
@test
def Modifying_a_value_is_an_error():
assert_prog_fails("x = 30;x = 10;", "Not allowed to re-assign symbol 'x'.")
@test
def Value_of_an_assignment_is_the_value_assigned():
assert_that(evald("x = 31;"), equals(("number", 31)))
@test
def None_evaluates_to_None():
assert_that(eval_expr(("none",), Env()), equals(("none", )))
@test
def Calling_a_function_returns_its_last_value():
assert_that(
evald("{10;11;}();"),
equals(("number", 11))
)
@test
def Body_of_a_function_can_use_arg_values():
assert_that(
evald("{:(x, y) x + y;}(100, 1);"),
equals(("number", 101))
)
@test
def Can_hold_a_reference_to_a_function_and_call_it():
assert_that(
evald("""
add = {:(x, y) x + y;};
add(20, 2.2);
"""),
equals(("number", 22.2))
)
@test
def A_symbol_has_different_life_inside_and_outside_a_function():
"""Define a symbol outside a function, redefine inside,
then evaluate outside. What happened inside the
function should not affect the value outside."""
assert_that(
evald("""
foo = "bar";
{foo = 3;}();
foo;
"""),
equals(("string", "bar"))
)
@test
def A_symbol_within_a_function_has_the_local_value():
assert_that(
evald("""
foo = 3;
bar = {foo = 77;foo;}();
bar;
"""),
equals(("number", 77))
)
@test
def Native_function_gets_called():
def native_fn(env, x, y):
return ("number", x[1] + y[1])
env = Env()
env.set("native_fn", ("native", native_fn))
assert_that(evald("native_fn( 2, 8 );", env), equals(("number", 10)))
@test
def Wrong_number_of_arguments_to_a_function_is_an_error():
assert_prog_fails(
"{}(3);",
"1 arguments passed to function ('function', [], []), but it requires 0 arguments."
)
assert_prog_fails(
"x={:(a, b, c)}; x(3, 2);",
"2 arguments passed to function ('symbol', 'x'), but it requires 3 arguments."
)
@test
def Wrong_number_of_arguments_to_a_native_function_is_an_error():
def native_fn0(env):
return ("number", 12)
def native_fn3(env, x, y, z):
return ("number", 12)
env = Env()
env.set("native_fn0", ("native", native_fn0))
env.set("native_fn3", ("native", native_fn3))
assert_prog_fails(
"native_fn0(3);",
"1 arguments passed to function ('symbol', 'native_fn0'), but it requires 0 arguments.",
env
)
assert_prog_fails(
"native_fn3(3, 2);",
"2 arguments passed to function ('symbol', 'native_fn3'), but it requires 3 arguments.",
env
)
@test
def Function_arguments_are_independent():
assert_that(evald(
"""
fn = {:(x) {x;};};
a = fn("a");
b = fn("b");
a();
"""
),
equals(evald("'a';"))
)
assert_that(evald(
"""
fn = {:(x) {x;};};
a = fn("a");
b = fn("b");
b();
"""
),
equals(evald("'b';"))
)
@test
def A_native_function_can_edit_the_environment():
def mx3(env):
env.set("x", ("number", 3))
env = Env()
env.set("make_x_three", ("native", mx3))
assert_that(
evald("x=1;make_x_three();x;", env),
equals(("number", 3))
)
@test
def A_closure_holds_updateable_values():
def dumb_set(env, sym, val):
env.parent.parent.parent.set(sym[1], val)
def dumb_if_equal(env, val1, val2, then_fn, else_fn):
if val1 == val2:
ret = then_fn
else:
ret = else_fn
return eval_expr(("call", ret, []), env)
env = Env()
env.set("dumb_set", ("native", dumb_set))
env.set("dumb_if_equal", ("native", dumb_if_equal))
assert_that(
evald(
"""
counter = {
x = 0;
{:(meth)
dumb_if_equal(meth, "get",
{x;},
{dumb_set("x", x + 1);}
);
}
}();
counter("inc");
counter("inc");
counter("get");
""",
env
),
equals(("number", 2))
)
| 23.753138 | 96 | 0.55399 |
4a2653d0b031f53616bdf84842787f011d448ecd | 1,273 | py | Python | MF.py | ahsanabbas123/Recommender-System | 68804e68fbcadaaa909cbde4a27ee7176390a0f3 | [
"MIT"
] | null | null | null | MF.py | ahsanabbas123/Recommender-System | 68804e68fbcadaaa909cbde4a27ee7176390a0f3 | [
"MIT"
] | null | null | null | MF.py | ahsanabbas123/Recommender-System | 68804e68fbcadaaa909cbde4a27ee7176390a0f3 | [
"MIT"
] | 1 | 2020-12-08T09:45:06.000Z | 2020-12-08T09:45:06.000Z | # Import libraries
import numpy as np
import pandas as pd
import argparse
import os
# Getting the required packages from the Surprise library
# Surprise is a Python scikit for building and analyzing recommender systems that deal with explicit rating data
from surprise import SVD
from surprise import Dataset
from surprise import accuracy
from surprise import Reader
from surprise.model_selection import train_test_split
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--ratings", required=True,
help="path to input dataset")
ap.add_argument("-p", "--movies", type=str,
help="path to movies dataset")
args = vars(ap.parse_args())
# Reading ratings file
ratings = pd.read_csv(args["ratings"])
# Calculating the sparsity of the data
sparsity = round(1.0 - len(ratings) / float(n_users * n_movies), 3)
print('Sparsity Level of MovieLens dataset= ' + str(sparsity * 100) + '%')
# Load Reader library
reader = Reader()
# Load ratings dataset with the Dataset library
data = Dataset.load_from_df(ratings[['userId', 'movieId', 'rating']], reader)
# Split the dataset for 5-fold evaluation
trainset, testset = train_test_split(data, test_size=.25)
algo = SVD()
algo.fit(trainset)
predictions = algo.test(testset)
accuracy.mae(predictions)
algo.predict(340, 544)
| 28.288889 | 112 | 0.760408 |
4a2653d2722a04c49242fb9ccb0891803970fd9c | 595 | py | Python | leetcode/palindromic_substrings/palindromic_substrings.py | sagasu/python-algorithms | d630777a3f17823165e4d72ab780ede7b10df752 | [
"MIT"
] | null | null | null | leetcode/palindromic_substrings/palindromic_substrings.py | sagasu/python-algorithms | d630777a3f17823165e4d72ab780ede7b10df752 | [
"MIT"
] | null | null | null | leetcode/palindromic_substrings/palindromic_substrings.py | sagasu/python-algorithms | d630777a3f17823165e4d72ab780ede7b10df752 | [
"MIT"
] | null | null | null | class Solution:
def countSubstrings(self, s: str) -> int:
N = len(s)
count = 0
for mid in range(N):
left = mid
right = mid
while left >= 0 and right < N and s[left] == s[right]:
left -= 1
right += 1
count +=1
if mid + 1 < N:
left = mid
right = mid +1
while left >= 0 and right < N and s[left] == s[right]:
left -= 1
right += 1
count += 1
return count | 24.791667 | 70 | 0.363025 |
4a26540de67574abfbab30598d70643d06e036d9 | 1,169 | py | Python | public/models.py | Andrew-Chen-Wang/django-stripe-subscription | bb6031cda032784858bb484771f9a1da35b54169 | [
"MIT"
] | 1 | 2020-02-13T15:52:52.000Z | 2020-02-13T15:52:52.000Z | public/models.py | Andrew-Chen-Wang/django-stripe-subscription | bb6031cda032784858bb484771f9a1da35b54169 | [
"MIT"
] | 6 | 2020-06-05T20:52:33.000Z | 2021-09-22T18:30:41.000Z | public/models.py | Andrew-Chen-Wang/django-stripe-subscription | bb6031cda032784858bb484771f9a1da35b54169 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
class Profile(models.Model):
"""
Extra information that isn't accessed as much as the User model.
This example code uses the Client + Server integration method because
we want to remember a customer_id from Stripe with our own server.
"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
class MagazineTitles(models.Model):
"""
These are simply magazine titles that a user can select
"""
id = models.BigAutoField(primary_key=True)
name = models.CharField(max_length=250, unique=True)
class DistributedMagazine(models.Model):
"""
These are the magazines that we distribute.
Assume that a completely different service distributes
these magazines. Our only concern is the financial component.
"""
id = models.BigAutoField(primary_key=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
magazine = models.ForeignKey(MagazineTitles, on_delete=models.CASCADE)
active = models.BooleanField(default=True)
expires = models.DateTimeField()
| 33.4 | 80 | 0.74337 |
4a2654c6cb76d1ae1393640d5762e3c97193e700 | 84 | py | Python | djangobench/benchmarks/query_in_bulk/settings.py | smithdc1/djangobench | 912dc536db706fb73d24a53d10d1739a0824d2c1 | [
"BSD-3-Clause"
] | 92 | 2015-02-07T11:03:45.000Z | 2022-03-23T10:51:06.000Z | djangobench/benchmarks/query_in_bulk/settings.py | smithdc1/djangobench | 912dc536db706fb73d24a53d10d1739a0824d2c1 | [
"BSD-3-Clause"
] | 19 | 2015-03-22T14:30:03.000Z | 2022-03-24T22:43:24.000Z | djangobench/benchmarks/query_in_bulk/settings.py | smithdc1/djangobench | 912dc536db706fb73d24a53d10d1739a0824d2c1 | [
"BSD-3-Clause"
] | 25 | 2015-01-22T19:53:25.000Z | 2022-03-20T11:15:54.000Z | from djangobench.base_settings import * # NOQA
INSTALLED_APPS = ['query_in_bulk']
| 21 | 47 | 0.77381 |
4a2654daab98f63dbfff863fb8e989f78f42543b | 1,418 | py | Python | app/recipe/serializers.py | EliasOPrado/recipe-app-api | 4ae20e7cb3195fa742fc6d2934f3fd5100e3c062 | [
"MIT"
] | null | null | null | app/recipe/serializers.py | EliasOPrado/recipe-app-api | 4ae20e7cb3195fa742fc6d2934f3fd5100e3c062 | [
"MIT"
] | null | null | null | app/recipe/serializers.py | EliasOPrado/recipe-app-api | 4ae20e7cb3195fa742fc6d2934f3fd5100e3c062 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from core.models import Tag, Ingredient, Recipe
class TagSerializer(serializers.ModelSerializer):
""" Serializer for tag objects """
class Meta:
model = Tag
fields = ['id', 'name']
read_only_fields = ['id']
class IngredientSerializer(serializers.ModelSerializer):
""" Serializer for ingredient objects """
class Meta:
model = Ingredient
fields = ['id', 'user', 'name']
read_only_fields = ['id']
class RecipeSerializer(serializers.ModelSerializer):
""" Serializer for recipe objects """
ingredients = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Ingredient.objects.all()
)
tags = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Tag.objects.all()
)
class Meta:
model = Recipe
fields = ['id', 'title', 'ingredients', 'tags', 'time_minutes', 'price', 'link']
read_only_fields = ['id']
class RecipeDetailSerializer(RecipeSerializer):
""" Serializer a recipe detail"""
ingredients = IngredientSerializer(many=True, read_only=True)
tags = TagSerializer(many=True, read_only=True)
class RecipeImageSerializer(serializers.ModelSerializer):
""" serializer for uploading images to recipes """
class Meta:
model = Recipe
fields = ['id','image']
read_only_fields = ['id'] | 27.269231 | 88 | 0.655148 |
4a26556d94198daa7c73bc05e74dd06a0f620615 | 2,866 | py | Python | var/spack/repos/builtin/packages/r-brms/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2021-11-04T17:08:15.000Z | 2021-11-04T17:08:15.000Z | var/spack/repos/builtin/packages/r-brms/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 23 | 2021-01-25T15:13:45.000Z | 2022-03-28T20:19:04.000Z | var/spack/repos/builtin/packages/r-brms/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 7 | 2018-09-13T18:04:56.000Z | 2020-03-18T20:52:06.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RBrms(RPackage):
"""Bayesian Regression Models using 'Stan':
Fit Bayesian generalized (non-)linear multivariate multilevel models using
'Stan' for full Bayesian inference. A wide range of distributions and link
functions are supported, allowing users to fit - among others - linear,
robust linear, count data, survival, response times, ordinal,
zero-inflated, hurdle, and even self-defined mixture models all in a
multilevel context. Further modeling options include non-linear and smooth
terms, auto-correlation structures, censored data, meta-analytic standard
errors, and quite a few more. In addition, all parameters of the response
distribution can be predicted in order to perform distributional
regression. Prior specifications are flexible and explicitly encourage
users to apply prior distributions that actually reflect their beliefs.
Model fit can easily be assessed and compared with posterior predictive
checks and leave-one-out cross-validation. References: Burkner (2017)
<doi:10.18637/jss.v080.i01>; Burkner (2018) <doi:10.32614/RJ-2018-017>;
Carpenter et al. (2017) <doi:10.18637/jss.v076.i01>."""
homepage = "https://github.com/paul-buerkner/brms"
cran = "brms"
version('2.16.1', sha256='749efbd9fb061fe207cf2e729c1387d9a8538b922f12ceec4e82a9f8dd9c1bc4')
version('2.15.0', sha256='c11701d1d8758590b74bb845b568b736e4455a81b114c7dfde0b27b7bd1bcc2f')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@2.16:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-matrixstats', type=('build', 'run'))
depends_on('r-nleqslv', type=('build', 'run'))
depends_on('r-nlme', type=('build', 'run'))
depends_on('r-coda', type=('build', 'run'))
depends_on('r-abind', type=('build', 'run'))
depends_on('r-backports', type=('build', 'run'))
| 52.109091 | 96 | 0.682484 |
4a2655cf14da7dc7a481f8ede62a301d3d9ccbbd | 55,807 | py | Python | addons/osfstorage/tests/test_views.py | kounoAkihiro/SV-COS-osf.io | 0a9a68bbf9cf254d2e900d49b20d8a8e6e359c21 | [
"Apache-2.0"
] | null | null | null | addons/osfstorage/tests/test_views.py | kounoAkihiro/SV-COS-osf.io | 0a9a68bbf9cf254d2e900d49b20d8a8e6e359c21 | [
"Apache-2.0"
] | 16 | 2020-03-24T16:30:32.000Z | 2022-03-03T22:39:45.000Z | addons/osfstorage/tests/test_views.py | kounoAkihiro/SV-COS-osf.io | 0a9a68bbf9cf254d2e900d49b20d8a8e6e359c21 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
from __future__ import unicode_literals
import json
import mock
import datetime
import pytest
import responses
from nose.tools import * # noqa
from dateutil.parser import parse as parse_datetime
from website import settings
from addons.osfstorage.models import OsfStorageFileNode, OsfStorageFolder
from framework.auth.core import Auth
from addons.osfstorage.tests.utils import (
StorageTestCase, Delta, AssertDeltas,
recursively_create_file,
)
from addons.osfstorage.tests import factories
from addons.osfstorage.tests.utils import make_payload
from framework.auth import signing
from website.util import rubeus, api_url_for
from framework.auth import cas
from osf.models import Tag, QuickFilesNode
from osf.models import files as models
from addons.osfstorage.apps import osf_storage_root
from addons.osfstorage import utils
from addons.base.views import make_auth
from addons.osfstorage import settings as storage_settings
from api_tests.utils import create_test_file
from osf_tests.factories import ProjectFactory, ApiOAuth2PersonalTokenFactory, PreprintFactory
def create_record_with_version(path, node_settings, **kwargs):
version = factories.FileVersionFactory(**kwargs)
node_settings.get_root().append_file(path)
record.versions.append(version)
record.save()
return record
@pytest.mark.django_db
class HookTestCase(StorageTestCase):
def send_hook(self, view_name, view_kwargs, payload, target, method='get', **kwargs):
method = getattr(self.app, method)
guid = view_kwargs.pop('guid', None) or target._id
return method(
api_url_for(view_name, guid=guid, **view_kwargs),
signing.sign_data(signing.default_signer, payload),
**kwargs
)
@pytest.mark.django_db
class TestGetMetadataHook(HookTestCase):
def test_empty(self):
res = self.send_hook(
'osfstorage_get_children',
{'fid': self.node_settings.get_root()._id, 'user_id': self.user._id},
{},
self.node
)
assert_true(isinstance(res.json, list))
assert_equal(res.json, [])
def test_file_metdata(self):
path = u'kind/of/magíc.mp3'
record = recursively_create_file(self.node_settings, path)
version = factories.FileVersionFactory()
record.versions.add(version)
record.save()
res = self.send_hook(
'osfstorage_get_metadata',
{'fid': record.parent._id},
{},
self.node
)
assert_true(isinstance(res.json, dict))
assert_equal(res.json, record.parent.serialize(True))
def test_preprint_primary_file_metadata(self):
preprint = PreprintFactory()
record = preprint.primary_file
version = factories.FileVersionFactory()
record.versions.add(version)
record.save()
res = self.send_hook(
'osfstorage_get_metadata',
{'fid': record.parent._id},
{},
preprint
)
assert_true(isinstance(res.json, dict))
assert_equal(res.json, record.parent.serialize(True))
def test_children_metadata(self):
path = u'kind/of/magíc.mp3'
record = recursively_create_file(self.node_settings, path)
version = factories.FileVersionFactory()
record.versions.add(version)
record.save()
res = self.send_hook(
'osfstorage_get_children',
{'fid': record.parent._id, 'user_id': self.user._id},
{},
self.node
)
assert_equal(len(res.json), 1)
res_data = res.json[0]
expected_data = record.serialize()
# Datetimes in response might not be exactly the same as in record.serialize
# because of the way Postgres serializes dates. For example,
# '2017-06-05T17:32:20.964950+00:00' will be
# serialized as '2017-06-05T17:32:20.96495+00:00' by postgres
# Therefore, we parse the dates then compare them
expected_date_modified = parse_datetime(expected_data.pop('modified'))
expected_date_created = parse_datetime(expected_data.pop('created'))
res_date_modified = parse_datetime(res_data.pop('modified'))
res_date_created = parse_datetime(res_data.pop('created'))
# latestVersionSeen should not be present in record.serialize, because it has to do
# with the user making the request itself, which isn't important when serializing the record
expected_data['latestVersionSeen'] = None
assert_equal(res_date_modified, expected_date_modified)
assert_equal(res_date_created, expected_date_created)
assert_equal(res_data, expected_data)
def test_children_metadata_preprint(self):
preprint = PreprintFactory()
record = preprint.primary_file
version = factories.FileVersionFactory()
record.versions.add(version)
record.save()
res = self.send_hook(
'osfstorage_get_children',
{'fid': record.parent._id, 'user_id': self.user._id},
{},
preprint
)
assert_equal(len(res.json), 1)
res_data = res.json[0]
expected_data = record.serialize()
# Datetimes in response might not be exactly the same as in record.serialize
# because of the way Postgres serializes dates. For example,
# '2017-06-05T17:32:20.964950+00:00' will be
# serialized as '2017-06-05T17:32:20.96495+00:00' by postgres
# Therefore, we parse the dates then compare them
expected_date_modified = parse_datetime(expected_data.pop('modified'))
expected_date_created = parse_datetime(expected_data.pop('created'))
res_date_modified = parse_datetime(res_data.pop('modified'))
res_date_created = parse_datetime(res_data.pop('created'))
# latestVersionSeen should not be present in record.serialize, because it has to do
# with the user making the request itself, which isn't important when serializing the record
expected_data['latestVersionSeen'] = None
assert_equal(res_date_modified, expected_date_modified)
assert_equal(res_date_created, expected_date_created)
def test_osf_storage_root(self):
auth = Auth(self.project.creator)
result = osf_storage_root(self.node_settings.config, self.node_settings, auth)
node = self.project
expected = rubeus.build_addon_root(
node_settings=self.node_settings,
name='',
permissions=auth,
user=auth.user,
nodeUrl=node.url,
nodeApiUrl=node.api_url,
)
root = result[0]
assert_equal(root, expected)
def test_root_default(self):
res = self.send_hook('osfstorage_get_metadata', {}, {}, self.node)
assert_equal(res.json['fullPath'], '/')
assert_equal(res.json['id'], self.node_settings.get_root()._id)
def test_root_preprint_default(self):
preprint = PreprintFactory()
res = self.send_hook('osfstorage_get_metadata', {}, {}, preprint)
assert_equal(res.json['fullPath'], '/')
assert_equal(res.json['id'], preprint.root_folder._id)
def test_metadata_not_found(self):
res = self.send_hook(
'osfstorage_get_metadata',
{'fid': 'somebogusid'}, {},
self.node,
expect_errors=True,
)
assert_equal(res.status_code, 404)
def test_metadata_not_found_lots_of_slashes(self):
res = self.send_hook(
'osfstorage_get_metadata',
{'fid': '/not/fo/u/nd/'}, {},
self.node,
expect_errors=True,
)
assert_equal(res.status_code, 404)
@pytest.mark.django_db
class TestUploadFileHook(HookTestCase):
def setUp(self):
super(TestUploadFileHook, self).setUp()
self.name = 'pízza.png'
self.record = recursively_create_file(self.node_settings, self.name)
self.auth = make_auth(self.user)
def send_upload_hook(self, parent, target=None, payload=None, **kwargs):
return self.send_hook(
'osfstorage_create_child',
{'fid': parent._id},
payload=payload or {},
target=target or self.project,
method='post_json',
**kwargs
)
def make_payload(self, **kwargs):
user = kwargs.pop('user', self.user)
name = kwargs.pop('name', self.name)
return make_payload(user=user, name=name, **kwargs)
def test_upload_create(self):
name = 'slightly-mad'
res = self.send_upload_hook(self.node_settings.get_root(), self.project, self.make_payload(name=name))
assert_equal(res.status_code, 201)
assert_equal(res.json['status'], 'success')
record = self.node_settings.get_root().find_child_by_name(name)
version = models.FileVersion.load(res.json['version'])
assert_equal(version.size, 123)
assert_equal(version.location_hash, 'file')
assert_equal(version.location, {
'object': 'file',
'uname': 'testmachine',
'service': 'filesystem',
'provider': 'filesystem',
storage_settings.WATERBUTLER_RESOURCE: 'blah',
})
assert_equal(version.metadata, {
'size': 123,
'name': 'file',
'base64': '==',
'provider': 'filesystem',
'modified': 'Mon, 16 Feb 2015 18:45:34 GMT'
})
assert_is_not(version, None)
assert_equal([version], list(record.versions.all()))
assert_not_in(version, self.record.versions.all())
assert_equal(record.serialize(), res.json['data'])
assert_equal(res.json['data']['downloads'], self.record.get_download_count())
def test_upload_update(self):
delta = Delta(lambda: self.record.versions.count(), lambda value: value + 1)
with AssertDeltas(delta):
res = self.send_upload_hook(self.node_settings.get_root(), self.project, self.make_payload())
self.record.reload()
assert_equal(res.status_code, 200)
assert_equal(res.json['status'], 'success')
version = models.FileVersion.load(res.json['version'])
assert_is_not(version, None)
assert_in(version, self.record.versions.all())
def test_upload_duplicate(self):
location = {
'service': 'cloud',
storage_settings.WATERBUTLER_RESOURCE: 'osf',
'object': 'file',
}
version = self.record.create_version(self.user, location)
with AssertDeltas(Delta(lambda: self.record.versions.count())):
res = self.send_upload_hook(self.node_settings.get_root(), payload=self.make_payload())
self.record.reload()
assert_equal(res.status_code, 200)
assert_equal(res.json['status'], 'success')
version = models.FileVersion.load(res.json['version'])
assert_is_not(version, None)
assert_in(version, self.record.versions.all())
def test_upload_create_child(self):
name = 'ლ(ಠ益ಠლ).unicode'
parent = self.node_settings.get_root().append_folder('cheesey')
res = self.send_upload_hook(parent, payload=self.make_payload(name=name))
assert_equal(res.status_code, 201)
assert_equal(res.json['status'], 'success')
assert_equal(res.json['data']['downloads'], self.record.get_download_count())
version = models.FileVersion.load(res.json['version'])
assert_is_not(version, None)
assert_not_in(version, self.record.versions.all())
record = parent.find_child_by_name(name)
assert_in(version, record.versions.all())
assert_equals(record.name, name)
assert_equals(record.parent, parent)
def test_upload_create_child_with_same_name(self):
name = 'ლ(ಠ益ಠლ).unicode'
self.node_settings.get_root().append_file(name)
parent = self.node_settings.get_root().append_folder('cheesey')
res = self.send_upload_hook(parent, payload=self.make_payload(name=name))
assert_equal(res.status_code, 201)
assert_equal(res.json['status'], 'success')
assert_equal(res.json['data']['downloads'], self.record.get_download_count())
version = models.FileVersion.load(res.json['version'])
assert_is_not(version, None)
assert_not_in(version, self.record.versions.all())
record = parent.find_child_by_name(name)
assert_in(version, record.versions.all())
assert_equals(record.name, name)
assert_equals(record.parent, parent)
def test_upload_fail_to_create_version_due_to_checkout(self):
user = factories.AuthUserFactory()
name = 'Gunter\'s noise.mp3'
self.node_settings.get_root().append_file(name)
root = self.node_settings.get_root()
file = root.find_child_by_name(name)
file.checkout = user
file.save()
res = self.send_upload_hook(root, payload=self.make_payload(name=name), expect_errors=True)
assert_equal(res.status_code, 403)
def test_update_nested_child(self):
name = 'ლ(ಠ益ಠლ).unicode'
parent = self.node_settings.get_root().append_folder('cheesey')
old_node = parent.append_file(name)
res = self.send_upload_hook(parent, payload=self.make_payload(name=name))
old_node.reload()
new_node = parent.find_child_by_name(name)
assert_equal(res.status_code, 200)
assert_equal(res.json['status'], 'success')
assert_equal(res.json['data']['downloads'], new_node.get_download_count())
assert_equal(old_node, new_node)
version = models.FileVersion.load(res.json['version'])
assert_is_not(version, None)
assert_in(version, new_node.versions.all())
assert_in(version, new_node.versions.all())
assert_equals(new_node.name, name)
assert_equals(new_node.parent, parent)
def test_upload_weird_name(self):
name = 'another/dir/carpe.png'
parent = self.node_settings.get_root().append_folder('cheesey')
res = self.send_upload_hook(parent, payload=self.make_payload(name=name), expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(len(parent.children), 0)
def test_upload_to_file(self):
name = 'carpe.png'
parent = self.node_settings.get_root().append_file('cheesey')
res = self.send_upload_hook(parent, payload=self.make_payload(name=name), expect_errors=True)
assert_true(parent.is_file)
assert_equal(res.status_code, 400)
def test_upload_no_data(self):
res = self.send_upload_hook(self.node_settings.get_root(), expect_errors=True)
assert_equal(res.status_code, 400)
def test_archive(self):
name = 'ლ(ಠ益ಠლ).unicode'
parent = self.node_settings.get_root().append_folder('cheesey')
res = self.send_upload_hook(parent, payload=self.make_payload(name=name, hashes={'sha256': 'foo'}))
assert_equal(res.status_code, 201)
assert_equal(res.json['status'], 'success')
assert_is(res.json['archive'], True)
res = self.send_hook(
'osfstorage_update_metadata',
{},
target=self.project,
payload={'metadata': {
'vault': 'Vault 101',
'archive': '101 tluaV',
}, 'version': res.json['version']},
method='put_json',
)
res = self.send_upload_hook(parent, payload=self.make_payload(
name=name,
hashes={'sha256': 'foo'},
metadata={
'name': 'lakdjf',
'provider': 'testing',
}))
assert_equal(res.status_code, 200)
assert_equal(res.json['status'], 'success')
assert_is(res.json['archive'], False)
# def test_upload_update_deleted(self):
# pass
@pytest.mark.django_db
class TestUploadFileHookPreprint(TestUploadFileHook):
def setUp(self):
super(TestUploadFileHookPreprint, self).setUp()
self.preprint = PreprintFactory(creator=self.user)
self.name = self.preprint.primary_file.name
self.record = self.preprint.primary_file
self.auth = make_auth(self.user)
def test_upload_create(self):
name = 'slightly-mad'
res = self.send_upload_hook(self.preprint.root_folder, self.preprint, self.make_payload(name=name))
assert_equal(res.status_code, 201)
assert_equal(res.json['status'], 'success')
record = self.preprint.root_folder.find_child_by_name(name)
version = models.FileVersion.load(res.json['version'])
assert_equal(version.size, 123)
assert_equal(version.location_hash, 'file')
assert_equal(version.location, {
'object': 'file',
'uname': 'testmachine',
'service': 'filesystem',
'provider': 'filesystem',
storage_settings.WATERBUTLER_RESOURCE: 'blah',
})
assert_equal(version.metadata, {
'size': 123,
'name': 'file',
'base64': '==',
'provider': 'filesystem',
'modified': 'Mon, 16 Feb 2015 18:45:34 GMT'
})
assert_is_not(version, None)
assert_equal([version], list(record.versions.all()))
assert_not_in(version, self.record.versions.all())
assert_equal(record.serialize(), res.json['data'])
assert_equal(res.json['data']['downloads'], self.record.get_download_count())
def test_upload_update(self):
delta = Delta(lambda: self.record.versions.count(), lambda value: value + 1)
with AssertDeltas(delta):
res = self.send_upload_hook(self.preprint.root_folder, self.preprint, self.make_payload())
self.record.reload()
assert_equal(res.status_code, 200)
assert_equal(res.json['status'], 'success')
version = models.FileVersion.load(res.json['version'])
assert_is_not(version, None)
assert_in(version, self.record.versions.all())
def test_upload_duplicate(self):
location = {
'service': 'cloud',
storage_settings.WATERBUTLER_RESOURCE: 'osf',
'object': 'file',
}
version = self.record.create_version(self.user, location)
with AssertDeltas(Delta(lambda: self.record.versions.count())):
res = self.send_upload_hook(self.preprint.root_folder, self.preprint, self.make_payload())
self.record.reload()
assert_equal(res.status_code, 200)
assert_equal(res.json['status'], 'success')
version = models.FileVersion.load(res.json['version'])
assert_is_not(version, None)
assert_in(version, self.record.versions.all())
def test_upload_create_child(self):
name = 'ლ(ಠ益ಠლ).unicode'
parent = self.preprint.root_folder.append_folder('cheesey')
res = self.send_upload_hook(parent, self.preprint, self.make_payload(name=name))
assert_equal(res.status_code, 201)
assert_equal(res.json['status'], 'success')
assert_equal(res.json['data']['downloads'], self.record.get_download_count())
version = models.FileVersion.load(res.json['version'])
assert_is_not(version, None)
assert_not_in(version, self.record.versions.all())
record = parent.find_child_by_name(name)
assert_in(version, record.versions.all())
assert_equals(record.name, name)
assert_equals(record.parent, parent)
def test_upload_create_child_with_same_name(self):
name = 'ლ(ಠ益ಠლ).unicode'
self.preprint.root_folder.append_file(name)
parent = self.preprint.root_folder.append_folder('cheesey')
res = self.send_upload_hook(parent, self.preprint, self.make_payload(name=name))
assert_equal(res.status_code, 201)
assert_equal(res.json['status'], 'success')
assert_equal(res.json['data']['downloads'], self.record.get_download_count())
version = models.FileVersion.load(res.json['version'])
assert_is_not(version, None)
assert_not_in(version, self.record.versions.all())
record = parent.find_child_by_name(name)
assert_in(version, record.versions.all())
assert_equals(record.name, name)
assert_equals(record.parent, parent)
def test_upload_fail_to_create_version_due_to_checkout(self):
user = factories.AuthUserFactory()
name = 'Gunter\'s noise.mp3'
self.preprint.root_folder.append_file(name)
root = self.preprint.root_folder
file = root.find_child_by_name(name)
file.checkout = user
file.save()
res = self.send_upload_hook(root, self.preprint, self.make_payload(name=name), expect_errors=True)
assert_equal(res.status_code, 403)
def test_update_nested_child(self):
name = 'ლ(ಠ益ಠლ).unicode'
parent = self.preprint.root_folder.append_folder('cheesey')
old_node = parent.append_file(name)
res = self.send_upload_hook(parent, self.preprint, self.make_payload(name=name))
old_node.reload()
new_node = parent.find_child_by_name(name)
assert_equal(res.status_code, 200)
assert_equal(res.json['status'], 'success')
assert_equal(res.json['data']['downloads'], new_node.get_download_count())
assert_equal(old_node, new_node)
version = models.FileVersion.load(res.json['version'])
assert_is_not(version, None)
assert_in(version, new_node.versions.all())
assert_in(version, new_node.versions.all())
assert_equals(new_node.name, name)
assert_equals(new_node.parent, parent)
def test_upload_weird_name(self):
name = 'another/dir/carpe.png'
parent = self.preprint.root_folder.append_folder('cheesey')
res = self.send_upload_hook(parent, self.preprint, self.make_payload(name=name), expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(len(parent.children), 0)
def test_upload_to_file(self):
name = 'carpe.png'
parent = self.preprint.root_folder.append_file('cheesey')
res = self.send_upload_hook(parent, self.preprint, self.make_payload(name=name), expect_errors=True)
assert_true(parent.is_file)
assert_equal(res.status_code, 400)
def test_upload_no_data(self):
res = self.send_upload_hook(self.preprint.root_folder, self.preprint, expect_errors=True)
assert_equal(res.status_code, 400)
@pytest.mark.django_db
class TestUpdateMetadataHook(HookTestCase):
def setUp(self):
super(TestUpdateMetadataHook, self).setUp()
self.path = 'greasy/pízza.png'
self.record = recursively_create_file(self.node_settings, self.path)
self.version = factories.FileVersionFactory()
self.record.versions = [self.version]
self.record.save()
self.payload = {
'metadata': {
'size': 123,
'modified': 'Mon, 16 Feb 2015 18:45:34 GMT',
'md5': 'askjasdlk;jsadlkjsadf',
'sha256': 'sahduashduahdushaushda',
},
'version': self.version._id,
'size': 321, # Just to make sure the field is ignored
}
def send_metadata_hook(self, payload=None, target=None, **kwargs):
return self.send_hook(
'osfstorage_update_metadata',
{},
payload=payload or self.payload,
target=target or self.node,
method='put_json',
**kwargs
)
def test_callback(self):
self.version.external_modified = None
self.version.save()
self.send_metadata_hook()
self.version.reload()
#Test fields are added
assert_equal(self.version.metadata['size'], 123)
assert_equal(self.version.metadata['md5'], 'askjasdlk;jsadlkjsadf')
assert_equal(self.version.metadata['modified'], 'Mon, 16 Feb 2015 18:45:34 GMT')
#Test attributes are populated
assert_equal(self.version.size, 123)
assert_true(isinstance(self.version.external_modified, datetime.datetime))
def test_archived(self):
self.send_metadata_hook({
'version': self.version._id,
'metadata': {
'vault': 'osf_storage_prod',
'archive': 'Some really long glacier object id here'
}
})
self.version.reload()
assert_equal(self.version.metadata['vault'], 'osf_storage_prod')
assert_equal(self.version.metadata['archive'], 'Some really long glacier object id here')
def test_archived_record_not_found(self):
res = self.send_metadata_hook(
payload={
'metadata': {'archive': 'glacier'},
'version': self.version._id[::-1],
'size': 123,
'modified': 'Mon, 16 Feb 2015 18:45:34 GMT'
},
expect_errors=True,
)
assert_equal(res.status_code, 404)
self.version.reload()
assert_not_in('archive', self.version.metadata)
@pytest.mark.django_db
class TestUpdateMetadataHookPreprints(HookTestCase):
def setUp(self):
super(TestUpdateMetadataHookPreprints, self).setUp()
self.preprint = PreprintFactory()
self.record = self.preprint.primary_file
self.path = 'greasy/pízza.png'
self.version = factories.FileVersionFactory()
self.record.versions = [self.version]
self.record.save()
self.payload = {
'metadata': {
'size': 123,
'modified': 'Mon, 16 Feb 2015 18:45:34 GMT',
'md5': 'askjasdlk;jsadlkjsadf',
'sha256': 'sahduashduahdushaushda',
},
'version': self.version._id,
'size': 321, # Just to make sure the field is ignored
}
def send_metadata_hook(self, payload=None, target=None, **kwargs):
return self.send_hook(
'osfstorage_update_metadata',
{},
payload=payload or self.payload,
target=target or self.preprint,
method='put_json',
**kwargs
)
def test_callback(self):
self.version.external_modified = None
self.version.save()
self.send_metadata_hook()
self.version.reload()
#Test fields are added
assert_equal(self.version.metadata['size'], 123)
assert_equal(self.version.metadata['md5'], 'askjasdlk;jsadlkjsadf')
assert_equal(self.version.metadata['modified'], 'Mon, 16 Feb 2015 18:45:34 GMT')
#Test attributes are populated
assert_equal(self.version.size, 123)
assert_true(isinstance(self.version.external_modified, datetime.datetime))
def test_archived(self):
self.send_metadata_hook({
'version': self.version._id,
'metadata': {
'vault': 'osf_storage_prod',
'archive': 'Some really long glacier object id here'
}
})
self.version.reload()
assert_equal(self.version.metadata['vault'], 'osf_storage_prod')
assert_equal(self.version.metadata['archive'], 'Some really long glacier object id here')
def test_archived_record_not_found(self):
res = self.send_metadata_hook(
payload={
'metadata': {'archive': 'glacier'},
'version': self.version._id[::-1],
'size': 123,
'modified': 'Mon, 16 Feb 2015 18:45:34 GMT'
},
expect_errors=True,
)
assert_equal(res.status_code, 404)
self.version.reload()
assert_not_in('archive', self.version.metadata)
@pytest.mark.django_db
class TestGetRevisions(StorageTestCase):
def setUp(self):
super(TestGetRevisions, self).setUp()
self.path = 'tie/your/mother/down.mp3'
self.record = recursively_create_file(self.node_settings, self.path)
self.record.versions = [factories.FileVersionFactory() for __ in range(15)]
self.record.save()
def get_revisions(self, fid=None, guid=None, **kwargs):
return self.app.get(
api_url_for(
'osfstorage_get_revisions',
fid=fid or self.record._id,
guid=guid or self.project._id,
**signing.sign_data(signing.default_signer, {})
),
auth=self.user.auth,
**kwargs
)
def test_get_revisions(self):
res = self.get_revisions()
expected = [
utils.serialize_revision(
self.project,
self.record,
version,
index=self.record.versions.count() - 1 - idx
)
for idx, version in enumerate(self.record.versions.all())
]
assert_equal(len(res.json['revisions']), 15)
assert_equal(res.json['revisions'], [x for x in expected])
assert_equal(res.json['revisions'][0]['index'], 15)
assert_equal(res.json['revisions'][-1]['index'], 1)
def test_get_revisions_path_not_found(self):
res = self.get_revisions(fid='missing', expect_errors=True)
assert_equal(res.status_code, 404)
@pytest.mark.django_db
class TestCreateFolder(HookTestCase):
def setUp(self):
super(TestCreateFolder, self).setUp()
self.root_node = self.node_settings.get_root()
def create_folder(self, name, parent=None, target=None, **kwargs):
parent = parent or self.node_settings.get_root()
target = target or self.project
return self.send_hook(
'osfstorage_create_child',
{'fid': parent._id, 'guid': target._id},
payload={
'name': name,
'user': self.user._id,
'kind': 'folder'
},
target=self.project,
method='post_json',
**kwargs
)
def test_create_folder(self):
resp = self.create_folder('name')
self.root_node.reload()
assert_equal(resp.status_code, 201)
assert_equal(len(self.root_node.children), 1)
assert_equal(self.root_node.children[0].serialize(), resp.json['data'])
def test_no_data(self):
resp = self.send_hook(
'osfstorage_create_child',
{'fid': self.root_node._id, 'guid': self.project._id},
payload={},
target=self.project,
method='post_json',
expect_errors=True
)
assert_equal(resp.status_code, 400)
def test_create_with_parent(self):
resp = self.create_folder('name')
assert_equal(resp.status_code, 201)
assert_equal(self.root_node.children.count(), 1)
assert_equal(self.root_node.children.all()[0].serialize(), resp.json['data'])
resp = self.create_folder('name', parent=OsfStorageFileNode.load(resp.json['data']['id']))
assert_equal(resp.status_code, 201)
assert_equal(self.root_node.children.count(), 1)
assert_false(self.root_node.children.all()[0].is_file)
assert_equal(self.root_node.children.all()[0].children.count(), 1)
assert_false(self.root_node.children.all()[0].children.all()[0].is_file)
assert_equal(self.root_node.children.all()[0].children.all()[0].serialize(), resp.json['data'])
@pytest.mark.django_db
class TestDeleteHook(HookTestCase):
def setUp(self):
super(TestDeleteHook, self).setUp()
self.root_node = self.node_settings.get_root()
def send_hook(self, view_name, view_kwargs, payload, target, method='get', **kwargs):
method = getattr(self.app, method)
return method(
'{url}?payload={payload}&signature={signature}'.format(
url=api_url_for(view_name, guid=target._id, **view_kwargs),
**signing.sign_data(signing.default_signer, payload)
),
**kwargs
)
def delete(self, file_node, **kwargs):
return self.send_hook(
'osfstorage_delete',
{'fid': file_node._id},
payload={
'user': self.user._id
},
target=self.node,
method='delete',
**kwargs
)
def test_delete(self):
file = self.root_node.append_file('Newfile')
resp = self.delete(file)
assert_equal(resp.status_code, 200)
assert_equal(resp.json, {'status': 'success'})
fid = file._id
del file
# models.StoredFileNode._clear_object_cache()
assert_is(OsfStorageFileNode.load(fid), None)
assert_true(models.TrashedFileNode.load(fid))
def test_delete_deleted(self):
file = self.root_node.append_file('Newfile')
file.delete()
resp = self.delete(file, expect_errors=True)
assert_equal(resp.status_code, 404)
def test_cannot_delete_root(self):
resp = self.delete(self.root_node, expect_errors=True)
assert_equal(resp.status_code, 400)
def test_attempt_delete_rented_file(self):
user = factories.AuthUserFactory()
file_checked = self.root_node.append_file('Newfile')
file_checked.checkout = user
file_checked.save()
res = self.delete(file_checked, expect_errors=True)
assert_equal(res.status_code, 403)
def test_attempt_delete_folder_with_rented_file(self):
folder = self.root_node.append_folder('Hotel Events')
user = factories.AuthUserFactory()
file_checked = folder.append_file('Checkout time')
file_checked.checkout = user
file_checked.save()
res = self.delete(folder, expect_errors=True)
assert_equal(res.status_code, 403)
def test_attempt_delete_double_nested_folder_rented_file(self):
folder = self.root_node.append_folder('One is not enough')
folder_two = folder.append_folder('Two might be doe')
user = factories.AuthUserFactory()
file_checked = folder_two.append_file('We shall see')
file_checked.checkout = user
file_checked.save()
res = self.delete(folder, expect_errors=True)
assert_equal(res.status_code, 403)
@pytest.mark.django_db
class TestDeleteHookPreprint(TestDeleteHook):
def setUp(self):
super(TestDeleteHookPreprint, self).setUp()
self.preprint = PreprintFactory(creator=self.user)
self.node = self.preprint
self.root_node = self.preprint.root_folder
def test_attempt_delete_while_preprint(self):
res = self.delete(self.preprint.primary_file, expect_errors=True)
assert_equal(res.status_code, 403)
def test_attempt_delete_folder_with_preprint(self):
folder = self.root_node.append_folder('Fishes')
file = folder.append_file('Fish')
self.preprint.primary_file = file
self.preprint.save()
res = self.delete(folder, expect_errors=True)
assert_equal(res.status_code, 403)
def test_delete_folder_while_preprint(self):
folder = self.root_node.append_folder('Mr. Yuck')
preprint_file = self.root_node.append_file('Thyme Out')
self.preprint.primary_file = preprint_file
self.preprint.save()
res = self.delete(folder)
assert_equal(res.status_code, 200)
def test_delete_folder_on_preprint_with_non_preprint_file_inside(self):
folder = self.root_node.append_folder('Herbal Crooners')
file = folder.append_file('Frank Cilantro')
# project having a preprint should not block other moves
primary_file = self.root_node.append_file('Thyme Out')
self.preprint.primary_file = primary_file
self.preprint.save()
res = self.delete(folder)
assert_equal(res.status_code, 200)
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
class TestMoveHook(HookTestCase):
def setUp(self):
super(TestMoveHook, self).setUp()
self.root_node = self.node_settings.get_root()
def test_move_hook(self):
file = self.root_node.append_file('Ain\'t_got_no,_I_got_life')
folder = self.root_node.append_folder('Nina Simone')
res = self.send_hook(
'osfstorage_move_hook',
{'guid': self.root_node.target._id},
payload={
'source': file._id,
'target': self.root_node._id,
'user': self.user._id,
'destination': {
'parent': folder._id,
'target': folder.target._id,
'name': folder.name,
}
},
target=self.node,
method='post_json',)
assert_equal(res.status_code, 200)
def test_move_checkedout_file(self):
file = self.root_node.append_file('Ain\'t_got_no,_I_got_life')
file.checkout = self.user
file.save()
folder = self.root_node.append_folder('Nina Simone')
res = self.send_hook(
'osfstorage_move_hook',
{'guid': self.root_node.target._id},
payload={
'source': file._id, # id of the actual file
'target': self.root_node._id, # the source FOLDER
'user': self.user._id,
'destination': {
'parent': folder._id, # the destination FOLDER
'target': folder.target._id, # The TARGET for the folder where it is going
'name': folder.name,
}
},
target=self.node,
method='post_json',
expect_errors=True,
)
assert_equal(res.status_code, 405)
def test_move_checkedout_file_in_folder(self):
folder = self.root_node.append_folder('From Here')
file = folder.append_file('No I don\'t wanna go')
file.checkout = self.user
file.save()
folder_two = self.root_node.append_folder('To There')
res = self.send_hook(
'osfstorage_move_hook',
{'guid': self.root_node.target._id},
payload={
'source': folder._id,
'target': self.root_node._id,
'user': self.user._id,
'destination': {
'parent': folder_two._id,
'target': folder_two.target._id,
'name': folder_two.name,
}
},
target=self.node,
method='post_json',
expect_errors=True,
)
assert_equal(res.status_code, 405)
def test_move_checkedout_file_two_deep_in_folder(self):
folder = self.root_node.append_folder('From Here')
folder_nested = folder.append_folder('Inbetween')
file = folder_nested.append_file('No I don\'t wanna go')
file.checkout = self.user
file.save()
folder_two = self.root_node.append_folder('To There')
res = self.send_hook(
'osfstorage_move_hook',
{'guid': self.root_node.target._id},
payload={
'source': folder._id,
'target': self.root_node._id,
'user': self.user._id,
'destination': {
'parent': folder_two._id,
'target': folder_two.target._id,
'name': folder_two.name,
}
},
target=self.node,
method='post_json',
expect_errors=True,
)
assert_equal(res.status_code, 405)
def test_move_file_out_of_node(self):
folder = self.root_node.append_folder('A long time ago')
file = folder.append_file('in a galaxy')
project = ProjectFactory(creator=self.user)
project_settings = project.get_addon('osfstorage')
project_root_node = project_settings.get_root()
folder_two = project_root_node.append_folder('far away')
res = self.send_hook(
'osfstorage_move_hook',
{'guid': self.root_node.target._id},
payload={
'source': folder._id,
'target': self.root_node._id,
'user': self.user._id,
'destination': {
'parent': folder_two._id,
'target': folder_two.target._id,
'name': folder_two.name,
}
},
target=project,
method='post_json',
expect_errors=True,
)
assert_equal(res.status_code, 200)
def test_can_move_file_out_of_quickfiles_node(self):
quickfiles_node = QuickFilesNode.objects.get_for_user(self.user)
quickfiles_file = create_test_file(quickfiles_node, self.user, filename='slippery.mp3')
quickfiles_folder = OsfStorageFolder.objects.get_root(target=quickfiles_node)
dest_folder = OsfStorageFolder.objects.get_root(target=self.project)
res = self.send_hook(
'osfstorage_move_hook',
{'guid': quickfiles_node._id},
payload={
'source': quickfiles_file._id,
'target': quickfiles_node._id,
'user': self.user._id,
'destination': {
'parent': dest_folder._id,
'target': self.project._id,
'name': dest_folder.name,
}
},
target=quickfiles_node,
method='post_json',
)
assert_equal(res.status_code, 200)
def test_can_rename_file_in_quickfiles_node(self):
quickfiles_node = QuickFilesNode.objects.get_for_user(self.user)
quickfiles_file = create_test_file(quickfiles_node, self.user, filename='road_dogg.mp3')
quickfiles_folder = OsfStorageFolder.objects.get_root(target=quickfiles_node)
dest_folder = OsfStorageFolder.objects.get_root(target=self.project)
new_name = 'JesseJames.mp3'
res = self.send_hook(
'osfstorage_move_hook',
{'guid': quickfiles_node._id},
payload={
'action': 'rename',
'source': quickfiles_file._id,
'target': quickfiles_node._id,
'user': self.user._id,
'name': quickfiles_file.name,
'destination': {
'parent': quickfiles_folder._id,
'target': quickfiles_node._id,
'name': new_name,
}
},
target=quickfiles_node,
method='post_json',
expect_errors=True,
)
quickfiles_file.reload()
assert_equal(res.status_code, 200)
assert_equal(quickfiles_file.name, new_name)
@pytest.mark.django_db
class TestMoveHookPreprint(TestMoveHook):
def setUp(self):
super(TestMoveHook, self).setUp()
self.node = PreprintFactory(creator=self.user)
self.root_node = self.node.root_folder
def test_move_primary_file_out_of_preprint(self):
project = ProjectFactory(creator=self.user)
project_settings = project.get_addon('osfstorage')
project_root_node = project_settings.get_root()
folder_two = project_root_node.append_folder('To There')
res = self.send_hook(
'osfstorage_move_hook',
{'guid': self.root_node.target._id},
payload={
'source': self.node.primary_file._id,
'target': self.root_node._id,
'user': self.user._id,
'destination': {
'parent': folder_two._id,
'target': folder_two.target._id,
'name': folder_two.name,
}
},
target=project,
method='post_json',
expect_errors=True,
)
assert_equal(res.status_code, 403)
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
class TestCopyHook(HookTestCase):
@pytest.mark.enable_implicit_clean
def test_can_copy_file_out_of_quickfiles_node(self):
quickfiles_node = QuickFilesNode.objects.get_for_user(self.user)
quickfiles_folder = OsfStorageFolder.objects.get_root(target=quickfiles_node)
dest_folder = OsfStorageFolder.objects.get_root(target=self.project)
res = self.send_hook(
'osfstorage_copy_hook',
{'guid': quickfiles_node._id},
payload={
'source': quickfiles_folder._id,
'target': quickfiles_node._id,
'user': self.user._id,
'destination': {
'parent': dest_folder._id,
'target': self.project._id,
'name': dest_folder.name,
}
},
target=self.project,
method='post_json',
)
assert_equal(res.status_code, 201)
@pytest.mark.django_db
class TestFileTags(StorageTestCase):
def test_file_add_tag(self):
file = self.node_settings.get_root().append_file('Good Morning.mp3')
assert_not_in('Kanye_West', file.tags.values_list('name', flat=True))
url = api_url_for('osfstorage_add_tag', guid=self.node._id, fid=file._id)
self.app.post_json(url, {'tag': 'Kanye_West'}, auth=self.user.auth)
file.reload()
assert_in('Kanye_West', file.tags.values_list('name', flat=True))
def test_file_add_non_ascii_tag(self):
file = self.node_settings.get_root().append_file('JapaneseCharacters.txt')
assert_not_in('コンサート', file.tags.values_list('name', flat=True))
url = api_url_for('osfstorage_add_tag', guid=self.node._id, fid=file._id)
self.app.post_json(url, {'tag': 'コンサート'}, auth=self.user.auth)
file.reload()
assert_in('コンサート', file.tags.values_list('name', flat=True))
def test_file_remove_tag(self):
file = self.node_settings.get_root().append_file('Champion.mp3')
tag = Tag(name='Graduation')
tag.save()
file.tags.add(tag)
file.save()
assert_in('Graduation', file.tags.values_list('name', flat=True))
url = api_url_for('osfstorage_remove_tag', guid=self.node._id, fid=file._id)
self.app.delete_json(url, {'tag': 'Graduation'}, auth=self.user.auth)
file.reload()
assert_not_in('Graduation', file.tags.values_list('name', flat=True))
def test_tag_the_same_tag(self):
file = self.node_settings.get_root().append_file('Lie,Cheat,Steal.mp3')
tag = Tag(name='Run_the_Jewels')
tag.save()
file.tags.add(tag)
file.save()
assert_in('Run_the_Jewels', file.tags.values_list('name', flat=True))
url = api_url_for('osfstorage_add_tag', guid=self.node._id, fid=file._id)
res = self.app.post_json(url, {'tag': 'Run_the_Jewels'}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['status'], 'failure')
def test_remove_nonexistent_tag(self):
file = self.node_settings.get_root().append_file('WonderfulEveryday.mp3')
assert_not_in('Chance', file.tags.values_list('name', flat=True))
url = api_url_for('osfstorage_remove_tag', guid=self.node._id, fid=file._id)
res = self.app.delete_json(url, {'tag': 'Chance'}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['status'], 'failure')
def test_file_add_tag_creates_log(self):
file = self.node_settings.get_root().append_file('Yeezy Season 3.mp4')
url = api_url_for('osfstorage_add_tag', guid=self.node._id, fid=file._id)
res = self.app.post_json(url, {'tag': 'Kanye_West'}, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.node.reload()
assert_equal(self.node.logs.latest().action, 'file_tag_added')
@mock.patch('addons.osfstorage.models.OsfStorageFile.add_tag_log')
def test_file_add_tag_fail_doesnt_create_log(self, mock_log):
file = self.node_settings.get_root().append_file('UltraLightBeam.mp3')
tag = Tag(name='The Life of Pablo')
tag.save()
file.tags.add(tag)
file.save()
url = api_url_for('osfstorage_add_tag', guid=self.node._id, fid=file._id)
res = self.app.post_json(url, {'tag': 'The Life of Pablo'}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
mock_log.assert_not_called()
def test_file_remove_tag_creates_log(self):
file = self.node_settings.get_root().append_file('Formation.flac')
tag = Tag(name='You that when you cause all this conversation')
tag.save()
file.tags.add(tag)
file.save()
url = api_url_for('osfstorage_remove_tag', guid=self.node._id, fid=file._id)
res = self.app.delete_json(url, {'tag': 'You that when you cause all this conversation'}, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.node.reload()
assert_equal(self.node.logs.latest().action, 'file_tag_removed')
@mock.patch('addons.osfstorage.models.OsfStorageFile.add_tag_log')
def test_file_remove_tag_fail_doesnt_create_log(self, mock_log):
file = self.node_settings.get_root().append_file('For-once-in-my-life.mp3')
url = api_url_for('osfstorage_remove_tag', guid=self.node._id, fid=file._id)
res = self.app.delete_json(url, {'tag': 'wonder'}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
mock_log.assert_not_called()
@pytest.mark.django_db
@pytest.mark.enable_bookmark_creation
class TestFileViews(StorageTestCase):
def test_file_views(self):
file = create_test_file(target=self.node, user=self.user)
url = self.node.web_url_for('addon_view_or_download_file', path=file._id, provider=file.provider)
# Test valid url file 200 on redirect
redirect = self.app.get(url, auth=self.user.auth)
assert redirect.status_code == 302
res = redirect.follow(auth=self.user.auth)
assert res.status_code == 200
# Test invalid node but valid deep_url redirects (moved log urls)
project_two = ProjectFactory(creator=self.user)
url = project_two.web_url_for('addon_view_or_download_file', path=file._id, provider=file.provider)
redirect = self.app.get(url, auth=self.user.auth)
assert redirect.status_code == 302
redirect_two = redirect.follow(auth=self.user.auth)
assert redirect_two.status_code == 302
res = redirect_two.follow(auth=self.user.auth)
assert res.status_code == 200
def test_download_file(self):
file = create_test_file(target=self.node, user=self.user)
folder = self.node_settings.get_root().append_folder('Folder')
base_url = '/download/{}/'
# Test download works with path
url = base_url.format(file._id)
redirect = self.app.get(url, auth=self.user.auth)
assert redirect.status_code == 302
# Test download works with guid
url = base_url.format(file.get_guid()._id)
redirect = self.app.get(url, auth=self.user.auth)
assert redirect.status_code == 302
# Test nonexistant file 404's
url = base_url.format('FakeGuid')
redirect = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert redirect.status_code == 404
# Test folder 400's
url = base_url.format(folder._id)
redirect = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert redirect.status_code == 400
@responses.activate
@mock.patch('framework.auth.cas.get_client')
def test_download_file_with_token(self, mock_get_client):
cas_base_url = 'http://accounts.test.test'
client = cas.CasClient(cas_base_url)
mock_get_client.return_value = client
base_url = '/download/{}/'
file = create_test_file(target=self.node, user=self.user)
responses.add(
responses.Response(
responses.GET,
'{}/oauth2/profile'.format(cas_base_url),
body=json.dumps({'id': '{}'.format(self.user._id)}),
status=200,
)
)
download_url = base_url.format(file.get_guid()._id)
token = ApiOAuth2PersonalTokenFactory(owner=self.user)
headers = {
'Authorization': str('Bearer {}'.format(token.token_id))
}
redirect = self.app.get(download_url, headers=headers)
assert mock_get_client.called
assert self.node.osfstorage_region.waterbutler_url in redirect.location
assert redirect.status_code == 302
@pytest.mark.django_db
class TestPreprintFileViews(StorageTestCase):
def test_file_views(self):
self.preprint = PreprintFactory(creator=self.user)
file = self.preprint.primary_file
guid = file.get_guid(create=True)
url = self.preprint.web_url_for('resolve_guid', guid=guid._id)
# File view for preprint file redirects to the preprint
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert res.status_code == 302
assert self.preprint._id in res.location
def test_download_file(self):
self.preprint = PreprintFactory(creator=self.user)
file = self.preprint.primary_file
folder = self.preprint.root_folder.append_folder('Folder')
base_url = '/download/{}/'
# Test download works with path
url = base_url.format(file._id)
redirect = self.app.get(url, auth=self.user.auth)
assert redirect.status_code == 302
# Test download works with guid
url = base_url.format(file.get_guid(create=True)._id)
redirect = self.app.get(url, auth=self.user.auth)
assert redirect.status_code == 302
# Test nonexistant file 404's
url = base_url.format('FakeGuid')
redirect = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert redirect.status_code == 404
# Test folder 400's
url = base_url.format(folder._id)
redirect = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert redirect.status_code == 400
@responses.activate
@mock.patch('framework.auth.cas.get_client')
def test_download_file_with_token(self, mock_get_client):
self.preprint = PreprintFactory(creator=self.user)
file = self.preprint.primary_file
cas_base_url = 'http://accounts.test.test'
client = cas.CasClient(cas_base_url)
mock_get_client.return_value = client
base_url = '/download/{}/'
responses.add(
responses.Response(
responses.GET,
'{}/oauth2/profile'.format(cas_base_url),
body=json.dumps({'id': '{}'.format(self.user._id)}),
status=200,
)
)
download_url = base_url.format(file.get_guid(create=True)._id)
token = ApiOAuth2PersonalTokenFactory(owner=self.user)
headers = {
'Authorization': str('Bearer {}'.format(token.token_id))
}
redirect = self.app.get(download_url, headers=headers)
assert mock_get_client.called
assert settings.WATERBUTLER_URL in redirect.location
assert redirect.status_code == 302
| 37.479516 | 118 | 0.627807 |
4a2656b804a5ca954a770e7a6addf69d2637b32b | 1,070 | py | Python | examples/rules/TaskHasTag.py | tonipamies/ansible-lint | 7c82e774dbecf1cdd0757fc3ca774849534b29bc | [
"MIT"
] | null | null | null | examples/rules/TaskHasTag.py | tonipamies/ansible-lint | 7c82e774dbecf1cdd0757fc3ca774849534b29bc | [
"MIT"
] | null | null | null | examples/rules/TaskHasTag.py | tonipamies/ansible-lint | 7c82e774dbecf1cdd0757fc3ca774849534b29bc | [
"MIT"
] | null | null | null | """Example implementation of a rule requiring tasks to have tags set."""
from ansiblelint.rules import AnsibleLintRule
class TaskHasTag(AnsibleLintRule):
"""Tasks must have tag."""
id = 'EXAMPLE001'
shortdesc = 'Tasks must have tag'
description = 'Tasks must have tag'
tags = ['productivity', 'tags']
def matchtask(self, file, task):
"""Task matching method."""
# The meta files don't have tags
if file.kind in ["meta", "playbooks"]:
return False
if isinstance(task, str):
return False
# If the task include another task or make the playbook fail
# Don't force to have a tag
if not set(task.keys()).isdisjoint(['include', 'fail']):
return False
if not set(task.keys()).isdisjoint(['include_tasks', 'fail']):
return False
if not set(task.keys()).isdisjoint(['import_tasks', 'fail']):
return False
# Task should have tags
if 'tags' not in task:
return True
return False
| 28.157895 | 72 | 0.592523 |
4a2656bd124c3f265419f37400b3f4026fddddd6 | 1,763 | py | Python | qiskit_metal/tests/test_gui_basic.py | rupeshknn/qiskit-metal | 94d2cd1ecac4b42828bca855adedcea5a6afe3b2 | [
"Apache-2.0"
] | null | null | null | qiskit_metal/tests/test_gui_basic.py | rupeshknn/qiskit-metal | 94d2cd1ecac4b42828bca855adedcea5a6afe3b2 | [
"Apache-2.0"
] | null | null | null | qiskit_metal/tests/test_gui_basic.py | rupeshknn/qiskit-metal | 94d2cd1ecac4b42828bca855adedcea5a6afe3b2 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
#pylint: disable-msg=unnecessary-pass
#pylint: disable-msg=broad-except
#pylint: disable-msg=unused-variable
"""
Qiskit Metal unit tests analyses functionality.
Test a planar design and launching the GUI.
"""
import unittest
from qiskit_metal._gui.widgets.bases.dict_tree_base import BranchNode
from qiskit_metal._gui.widgets.bases.dict_tree_base import LeafNode
class TestGUIBasic(unittest.TestCase):
"""
Unit test class.
"""
def setUp(self):
"""
Setup unit test.
"""
pass
def tearDown(self):
"""
Tie any loose ends.
"""
pass
def test_instantiate_branch_node(self):
"""
Test instantiation of BranchNode in dict_tree_base.py.
"""
try:
BranchNode('my_name')
except Exception:
message = "BranchNode instantiation failed"
self.fail(message)
def test_instantiate_leaf_node(self):
"""
Test instantiation of LeafNode in dict_tree_base.py.
"""
try:
LeafNode('my_label')
except Exception:
message = "LeafNode instantiation failed"
self.fail(message)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 25.550725 | 77 | 0.654566 |
4a2656d17fbf208077789e571a4e3ebe62ff8bd5 | 2,238 | py | Python | languages/python3/dash/app.py | jcnaud/snippet | 10db24e2a648af29c51f6bc3a083ffe86e11ae5c | [
"Apache-2.0"
] | 5 | 2018-01-18T10:08:50.000Z | 2020-05-01T04:18:02.000Z | languages/python3/dash/app.py | jcnaud/snippet | 10db24e2a648af29c51f6bc3a083ffe86e11ae5c | [
"Apache-2.0"
] | null | null | null | languages/python3/dash/app.py | jcnaud/snippet | 10db24e2a648af29c51f6bc3a083ffe86e11ae5c | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import dash
import dash_core_components as dcc
import dash_html_components as html
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
html.H1(children='Hello Dash'),
html.Div(children='''
Dash: A web application framework for Python.
'''),
dcc.Graph(
id='example-graph',
figure={
'data': [
{'x': [1, 2, 3], 'y': [4, 1, 2],
'type': 'bar', 'name': 'SF'},
{'x': [1, 2, 3], 'y': [2, 4, 5],
'type': 'bar', 'name': u'Montréal'},
],
'layout': {
'title': 'Dash Data Visualization'
}
}
),
html.Label('Dropdown'),
dcc.Dropdown(
options=[
{'label': 'New York City', 'value': 'NYC'},
{'label': u'Montréal', 'value': 'MTL'},
{'label': 'San Francisco', 'value': 'SF'}
],
value='MTL'
),
html.Label('Multi-Select Dropdown'),
dcc.Dropdown(
options=[
{'label': 'New York City', 'value': 'NYC'},
{'label': u'Montréal', 'value': 'MTL'},
{'label': 'San Francisco', 'value': 'SF'}
],
value=['MTL', 'SF'],
multi=True
),
html.Label('Radio Items'),
dcc.RadioItems(
options=[
{'label': 'New York City', 'value': 'NYC'},
{'label': u'Montréal', 'value': 'MTL'},
{'label': 'San Francisco', 'value': 'SF'}
],
value='MTL'
),
html.Label('Checkboxes'),
dcc.Checklist(
options=[
{'label': 'New York City', 'value': 'NYC'},
{'label': u'Montréal', 'value': 'MTL'},
{'label': 'San Francisco', 'value': 'SF'}
],
values=['MTL', 'SF']
),
html.Label('Text Input'),
dcc.Input(value='MTL', type='text'),
html.Label('Slider'),
dcc.Slider(
min=0,
max=9,
marks={i: 'Label {}'.format(i) if i == 1 else str(i) for i in range(1, 6)},
value=5,
),
])
if __name__ == '__main__':
app.run_server(debug=True)
| 25.431818 | 83 | 0.471403 |
4a2657a3ec4be89c1f254a3781efc76635d6c2af | 42,430 | py | Python | extra/unused/pxfuncs.py | whyjz/CARST | 875c915e835b0e09a7eccb58833719bbfc85b635 | [
"MIT"
] | 10 | 2018-01-02T18:03:07.000Z | 2022-01-25T05:36:21.000Z | extra/unused/pxfuncs.py | whyjz/CARST | 875c915e835b0e09a7eccb58833719bbfc85b635 | [
"MIT"
] | 1 | 2020-04-14T16:57:15.000Z | 2020-05-15T16:10:17.000Z | extra/unused/pxfuncs.py | whyjz/CARST | 875c915e835b0e09a7eccb58833719bbfc85b635 | [
"MIT"
] | 4 | 2016-08-12T15:06:48.000Z | 2019-11-27T05:33:50.000Z | #!/usr/bin/python
# Author: Andrew Kenneth Melkonian
# All rights reserved
#import calendar;
#import fileinput;
from makeAzo import *;
import math;
import numpy;
import os;
#from pxfuncs import *;
import pylab;
import re;
import scipy;
import shutil;
import subprocess;
#import sys;
#import time;
def adjustPhase(radar_path, wavelength, width):
radar_dir = ".";
index = radar_path.rfind("/");
if index > -1:
radar_dir = radar_path[ : index];
radar_name = radar_path[index + 1 : ];
new_radar_path = radar_dir + "/new_" + radar_name;
infile = open(radar_path, "rb");
radar_unw_data = scipy.matrix(numpy.fromfile(infile,numpy.float32, -1)).reshape(int(width), -1);
radar_unw_data = radar_unw_data * float(wavelength) / 4 / numpy.pi;
infile.close();
radar_unw_data = scipy.matrix(radar_unw_data,scipy.float32);
radar_unw_data.tofile(new_radar_path);
radar_unw_data = None;
return(new_radar_path);
def ampcor(path, rwin, awin, search_x, search_y, wsamp, numproc):
cwd = os.getcwd();
import glob;
cull_paths = glob.glob(path + "/int*/*_cull.off");
for i in range(0,len(cull_paths)):
cull_name = cull_paths[i].strip()[cull_paths[i].rfind("/")+1:];
cull_dir = cull_paths[i][:cull_paths[i].rfind("/")];
if not re.search("\d{6}",cull_name):
continue;
already_processed=False;
contents=os.listdir(cull_dir);
for item in contents:
if re.search("azo_" + wsamp + "_r" + rwin + "x" + awin + "_s" + search_x + "x" + search_y,item) > -1:
already_processed=True;
break;
if already_processed:
print("\n***** WARNING, " + cull_dir + " contains \"" + item +"\", \"ampcor\" step already run, exiting...\n");
continue;
index1 = re.search("\d{6}",cull_name).start(0);
index2 = re.search("\d{6}",cull_name).end(0);
index3 = re.search("\d{6}",cull_name[index2:]).start(0)+index2;
index4 = re.search("\d{6}",cull_name[index2:]).end(0)+index2;
date2 = cull_name[index1:index2];
date1 = cull_name[index3:index4];
slc1 = path + "/" + date1 + "/" + date1 + ".slc";
if not os.path.exists(slc1):
print("\n***** ERROR, could not find \"" + date1 + ".slc\" in \"" + path + "/" + date1 + "/\"\n");
break;
slc2 = path + "/" + date2 + "/" + date2 + ".slc";
if not os.path.exists(slc2):
print("\n***** ERROR, could not find \"" + date2 + ".slc\" in \"" + path + "/" + date2 + "/\"\n");
break;
slc1_rsc_file = open(slc1 + ".rsc","r");
while 1:
line = slc1_rsc_file.readline();
if not line:
break;
elif line.find("WIDTH") > -1:
width = line.split()[1].strip();
slc1_rsc_file.close();
amp1 = cull_dir + "/" + date1 + ".amp";
amp2 = cull_dir + "/" + date2 + ".amp";
if not os.path.exists(amp1):
cmd = "\ncpx2mag_phs " + slc1 + " " + cull_dir + "/" + date1 + ".amp " + cull_dir + "/" + date1 + ".phs " + width + "\n";
cmd += "\ncp -pr " + slc1 + ".rsc " + cull_dir + "/" + date1 + ".amp.rsc\n";
cmd += "\nrm " + cull_dir + "/" + date1 + ".phs\n";
subprocess.call(cmd,shell=True);
slc2_rsc_file = open(slc2 + ".rsc","r");
while 1:
line = slc2_rsc_file.readline();
if not line:
break;
elif line.find("WIDTH") > -1:
width = line.split()[1].strip();
slc2_rsc_file.close();
if not os.path.exists(amp2):
cmd = "\ncpx2mag_phs " + slc2 + " " + cull_dir + "/" + date2 + ".amp " + cull_dir + "/" + date2 + ".phs " + width + "\n";
cmd += "\ncp -pr " + slc2 + ".rsc " + cull_dir + "/" + date2 + ".amp.rsc\n";
cmd += "\nrm " + cull_dir + "/" + date2 + ".phs\n";
subprocess.call(cmd,shell=True);
cmd = "\ncp -pr azo_real.pl " + cull_dir + "\n";
subprocess.call(cmd,shell=True);
cmd = "\ncd " + cull_dir + "\n";
cmd += "\nperl azo_real.pl " + amp2 + " " + amp1 + " " + cull_name[0:cull_name.rfind(".")] + " " + cull_name[index1:index4] + "_azo_" + wsamp + " " + rwin + " " + awin + " " + search_x + " " + search_y + " " + wsamp + " " + numproc + " &\n";
cmd += "\ncd " + cwd + "\n";
print(cmd);
#subprocess.call(cmd,shell=True);
return;
def makeUNW(path, rwin, awin, search_x, search_y, wsamp, angle, data_type):
cmd = "\nfind " + path + " -name \"*azo_" + wsamp + "_r" + rwin + "x" + awin + "_s" + search_x + "x" + search_y + "*.off\" -print\n";
pipe = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
ampoff_paths = pipe.read().split();
pipe.close();
ampoff_dirs={};
cat_cmds={};
angles = {};
max_inc_angle = "";
min_inc_angle = "";
if data_type.lower().find("tsx") > -1:
cmd = "\nfind " + path + " -name \"T*X*.xml\"\n";
pipe = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
leader_file_paths = pipe.read().split();
pipe.close();
for path in leader_file_paths:
date = "";
infile = open(path,"r");
for line in infile:
if line.find("timeUTC") > -1:
index = re.search("timeUTC>",line).end(0);
year = line[index + 2 : index + 4];
month = line[index + 5 : index + 7];
day = line[index + 8 : index + 10];
date = year + month + day;
elif line.find("coverageRegionMin incidenceAngle") > -1:
min_inc_angle = line[re.search("\">",line).end(0) : re.search("</",line).start(0)];
elif line.find("coverageRegionMax incidenceAngle") > -1:
max_inc_angle = line[re.search("\">",line).end(0) : re.search("</",line).start(0)];
infile.close();
angles[date] = str((float(max_inc_angle) + float(min_inc_angle)) / 2.);
for i in range(0,len(ampoff_paths)):
ampoff_dir = ampoff_paths[i].strip()[0:ampoff_paths[i].strip().rfind("/")];
if ampoff_dir not in ampoff_dirs:
ampoff_dirs[ampoff_dir] = ampoff_paths[i];
cat_cmds[ampoff_dir] = "\ncat " + ampoff_paths[i];
else:
cat_cmds[ampoff_dir] += " " + ampoff_paths[i];
for ampoff_dir in cat_cmds:
cmd = cat_cmds[ampoff_dir];
elements = cmd.split();
if len(elements) < 3:
continue;
else:
if not re.search("_\d\.off",elements[1]):
ampoff_dirs[ampoff_dir] = elements[1];
continue;
else:
composite_ampoff_path = elements[1][:re.search("_\d\.off",elements[1]).start(0)] + ".off";
ampoff_dirs[ampoff_dir]=composite_ampoff_path;
if os.path.exists(composite_ampoff_path):
continue;
cat_cmds[ampoff_dir] += " > " + composite_ampoff_path + "\n";
print("\n***** pixelTrack - step \"make_unw\" - running cat to compose ampcor results into single file...\n");
subprocess.call(cat_cmds[ampoff_dir],shell=True);
for ampoff_dir in ampoff_dirs:
ampoff_dir_contents = os.listdir(ampoff_dir);
already_done = False;
item="";
for i in range(0,len(ampoff_dir_contents)):
item = ampoff_dir_contents[i];
if re.search(".*azimuth_r" + rwin + "x" + awin + "_s" + search_x + "x" + search_y + "_" + str(int(rwin)/int(wsamp)) + "rlks.unw",item) or \
re.search(".*range_r" + rwin + "x" + awin + "_s" + search_x + "x" + search_y + "_" + str(int(rwin)/int(wsamp)) + "rlks.unw",item):
already_done=True;
break;
if already_done:
print("\n****** \"" + item +"\" already exists in \"" + ampoff_dir + "\", make_unw step likely already done for this directory, skipping...\n");
continue;
ampoff_path = ampoff_dirs[ampoff_dir];
date = ampoff_path[re.search("/\d{6}[\-_]\d{6}",ampoff_path).start(0) + 1 : re.search("/\d{6}[\-_]\d{6}", ampoff_path).start(0) + 7];
cmd = "\nls " + ampoff_path[0:ampoff_path.rfind("azo")+3]+"*.off.rsc\n";
pipe = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
ampoff_rsc_paths = pipe.read().split();
pipe.close();
if len(ampoff_rsc_paths) < 1:
print("\n***** WARNING, could not find any azo rsc file in \"" + amcporDir + "\", skipping these results\n");
break;
ampoff_rsc_path = ampoff_rsc_paths[0];
da_p = "";
r_e = "";
p_h = "";
dr = "";
endRefSample = "";
endRefLine = "";
ampoff_rsc_file = open(ampoff_rsc_path,"r");
while 1:
line = ampoff_rsc_file.readline();
if not line:
break;
elif line.find("RANGE_PIXEL_SIZE") > -1:
dr = line.split()[1].strip();
elif line.find("FILE_LENGTH") > -1:
endRefLine = line.split()[1].strip();
elif line.find("WIDTH") > -1:
endRefSample = line.split()[1].strip();
elif line.find("EARTH_RADIUS") > -1:
r_e = line.split()[1].strip();
elif re.search("^HEIGHT\s+",line):
p_h = line.split()[1].strip();
elif line.find("AZIMUTH_PIXEL_SIZE") > -1:
da_p = line.split()[1].strip();
ampoff_rsc_file.close();
if da_p == "":
print("\n***** WARNING, could not find parameter \"FILE_LENGTH\" in \"" + ampoff_rsc_path[0].strip() + "\", skipping these results\n");
break;
if da_p == "":
print("\n***** WARNING, could not find parameter \"WIDTH\" in \"" + ampoff_rsc_path[0].strip() + "\", skipping these results\n");
break;
if da_p == "":
print("\n***** WARNING, could not find parameter \"AZIMUTH_PIXEL_SIZE\" in \"" + ampoff_rsc_path[0].strip() + "\", skipping these results\n");
break;
if r_e == "":
print("\n***** WARNING, could not find parameter \"EARTH_RADIUS\" in \"" + ampoff_rsc_path[0].strip() + "\", skipping these results\n");
break;
if p_h == "":
print("\n***** WARNING, could not find parameter \"HEIGHT\" in \"" + ampoff_rsc_path[0].strip() + "\", skipping these results\n");
break;
if dr == "":
print("\n***** WARNING, could not find parameter \"RANGE_PIXEL_SIZE\" in \"" + ampoff_rsc_path[0].strip() + "\", skipping these results\n");
break;
input_angle = angle;
if data_type.lower().find("tsx") > -1:
input_angle = angles[date];
print("\n***** pixelTrack - step \"make_unw\" - running makeAzo in " + ampoff_dir + " to generate azimuth and range unw files ...\n");
makeAzo(ampoff_path, float(da_p), float(r_e), float(p_h), float(dr), float(input_angle), int(wsamp), int(rwin), int(awin), search_x, search_y, int(endRefSample), int(endRefLine));
cwd = os.getcwd();
if not os.path.exists(ampoff_dir+"/azimuth_" + rwin + "x" + awin + "_" + str(int(rwin)/int(wsamp)) + "rlks.unw.rsc"):
date = ampoff_path[re.search("/\d{6}[\-_]\d{6}",ampoff_path).start(0)+1:re.search("/\d{6}[\-_]\d{6}",ampoff_path).start(0)+7];
cmd = "";
if not os.path.exists(ampoff_dir + "/" + date + "_" + str(int(rwin)/int(wsamp)) + "rlks.slc.rsc"):
cmd += "\nlook.pl " + ampoff_dir + "/" + date + ".slc " + str(int(rwin)/int(wsamp)) + " " + str(int(awin)/int(wsamp)) + "\n";
cmd += "\ncp -p " + ampoff_dir + "/" + date + "_" + str(int(rwin)/int(wsamp)) + "rlks.slc.rsc " + ampoff_dir + "/azimuth_r" + rwin + "x" + awin + "_s" + search_x + "x" + search_y + "_" + str(int(rwin)/int(wsamp)) + "rlks.unw.rsc\n";
cmd += "\ncp -p " + ampoff_dir + "/" + date + "_" + str(int(rwin)/int(wsamp)) + "rlks.slc.rsc " + ampoff_dir + "/range_r" + rwin + "x" + awin + "_s" + search_x + "x" + search_y + "_" + str(int(rwin)/int(wsamp)) + "rlks.unw.rsc\n";
cmd += "\ncp -p " + ampoff_dir + "/" + date + "_" + str(int(rwin)/int(wsamp)) + "rlks.slc.rsc " + ampoff_dir + "/snr_r" + rwin + "x" + awin + "_s" + search_x + "x" + search_y + "_" + str(int(rwin)/int(wsamp)) + "rlks.unw.rsc\n";
subprocess.call(cmd,shell=True);
return;
def beamTable():
beam_angle["ST1"] = "23.7";
beam_angle["ST2"] = "27.7";
beam_angle["ST3"] = "33.7";
beam_angle["ST4"] = "36.6";
beam_angle["ST5"] = "39.4";
beam_angle["ST6"] = "44.0";
beam_angle["ST7"] = "47.2";
beam_angle["F1"] = "38.5";
beam_angle["F2"] = "40.8";
beam_angle["F3"] = "42.9";
beam_angle["F4"] = "44.8";
beam_angle["F5"] = "46.6";
return;
#def densifyAmpmag(path, date):
#
# if
#
# return;
def findAzimuthPixelSize(path, date, orbit):
cwd = os.getcwd();
cmd = "find " + path + " -name \"" + date + ".slc.rsc\" -print";
pipe = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
slc_rsc_paths = pipe.read().split();
pipe.close();
slc_rsc_path = "";
if len(slc_rsc_paths) < 1:
cmd = "find " + path + " -name \"" + date + ".raw\" -print";
pipe = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
raw_paths = pipe.read().split();
pipe.close();
cmd = "find " + path + " -name \"hdr*"+date+"*.rsc\" -print";
pipe = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
hdr_paths = pipe.read().split();
pipe.close();
if len(raw_paths) < 1:
print("\n***** WARNING, could not find \"" + date + ".raw\", necessary to determine azimuth pixel size\n");
return "-1";
raw_path = raw_paths[0];
if not os.path.exists(raw_path + ".rsc"):
print("\n***** WARNING, could not find \"" + date + ".raw.rsc\", necessary to determine azimuth pixel size\n");
return "-1";
if len(hdr_paths) < 1:
print("\n***** WARNING, could not find \"hdr*" + date + "*.rsc\", necessary to determine azimuth pixel size\n");
return "-1";
hdr_path = hdr_paths[0];
cmd = "\nmkdir " + path + "/" + date + "_APS\n";
cmd += "\ncd " + path + "/" + date + "_APS\n";
cmd += "\nln -s " + raw_path + " " + raw_path[raw_path.rfind("/") + 1 : ] + "\n";
cmd += "\nln -s " + raw_path + ".rsc " + raw_path[raw_path.rfind("/") + 1 : ] + ".rsc\n";
cmd += "\nln -s " + hdr_path + " " + hdr_path[hdr_path.rfind("/") + 1 : ]+"\n";
cmd += "\ndopav.pl . . " + date + " " + date + " \"\"\n";
cmd += "\nroi_prep.pl " + date + " " + orbit + " " + date + "-" + date + "\n";
cmd += "\ncd " + cwd + "\n";
subprocess.call(cmd,shell=True);
slc_rsc_path = path + "/" + date + "_APS/" + date + ".slc.rsc";
else:
slc_rsc_path = slc_rsc_paths[0];
slc_rsc_file = open(slc_rsc_path,"r");
while 1:
line = slc_rsc_file.readline();
if not line:
break;
if line.find("AZIMUTH_PIXEL_SIZE") > -1:
slc_rsc_file.close();
if os.path.exists(path + "/" + date + "_APS"):
shutil.rmtree(path + "/" + date + "_APS");
return line[re.search("\d+\.*\d*",line).start(0) : re.search("\d+\.*\d*",line).end(0)];
slc_rsc_file.close();
print("\n***** WARNING, unable to determine azimuth pixel size, using default value of \"5\"\n");
shutil.rmtree(path + "/" + date + "_APS");
return "-1";
def GCF(num):
temp = num[0];
for i in range(len(num)-1):
num1 = temp;
num2 = num[i+1];
if num1 < num2:
num1,num2=num2,num1;
while num1 - num2:
num3 = num1 - num2;
num1 = max(num2,num3);
num2 = min(num2,num3);
temp = num1;
return num1;
def has_value(self, value):
return value in self.values();
def LCM(num):
temp = num[0];
for i in range(len(num)-1):
num1 = temp;
num2 = num[i+1];
t_gcf = GCF([num1,num2]);
temp = t_gcf * num1/t_gcf * num2/t_gcf;
return temp;
def makeProcFile(path, date2, date1, angle, dem, orbit):
proc_file_path = path + "/int_" + date2 + "_" + date1 + ".proc";
print(proc_file_path);
if os.path.exists(proc_file_path):
print("\n\"" + proc_file_path + "\" already exists, skipping\n");
return;
int_path = path + "/int_" + date2 + "_" + date1;
proc_file = open(proc_file_path,"w");
proc_file.write("SarDir1=" + path + "/" + date2 + "\n");
proc_file.write("SarDir2=" + path + "/" + date1 + "\n");
proc_file.write("IntDir=" + int_path + "\n");
proc_file.write("SimDir=" + int_path + "/SIM\n");
proc_file.write("GeoDir=" + int_path + "/GEO\n");
proc_file.write("flattening=orbit\n");
proc_file.write("DEM=" + dem + "\n");
proc_file.write("OrbitType=" + orbit + "\n");
proc_file.write("Rlooks_sim=1\n");
proc_file.write("Rlooks_unw=1\n");
proc_file.write("Rlooks_geo=1\n");
proc_file.write("Rlooks_int=1\n");
pixelRatio = "-1";
if re.search("\d+", angle):
azimuth_pixel_size = findAzimuthPixelSize(path, date1, orbit);
range_pixel_size = "-1";
if azimuth_pixel_size != "-1":
cmd = "\nfind " + path + " -name \"" + date1 + ".raw.rsc\" -print\n";
pipe = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
raw_rsc_paths = pipe.read().split();
pipe.close();
if len(raw_rsc_paths) > 0:
raw_rsc_file = open(raw_rsc_paths[0],"r");
while 1:
line = raw_rsc_file.readline();
if not line:
break;
if line.find("RANGE_PIXEL_SIZE") > -1:
raw_rsc_file.close();
range_pixel_size = line[re.search("\d+\.*\d*",line).start(0) : re.search("\d+\.*\d*",line).end(0)];
pixel_ratio = str(round(float(range_pixel_size) / math.sin(math.radians(float(angle))) / float(azimuth_pixel_size)));
pixel_ratio = pixel_ratio[0 : pixel_ratio.rfind(".")];
break;
raw_rsc_file.close();
if pixel_ratio != "-1":
proc_file.write("pixel_ratio=" + pixel_ratio + "\n");
proc_file.close();
def getPixelRatios(path):
return;
def readProcFile(path,date2,date1):
procCmd = "find " + path + " -name \"*" + date2 + "*" + date1 + "*.proc\" -print";
procStream = subprocess.Popen(procCmd);
procOutput = procStream.read();
procFilePath = procOutput.strip().split();
if len(procFilePath) < 1:
print("\n***** ERROR, no proc file found for dates \"" + date2 + ", " + date1 + "\" in \"" + path + "\"\n");
sys.exit();
if len(procFilePath) > 1:
print("\n***** WARNING, found more than one proc file for dates \"" + date2 + ", " + date1 + "\", using \"" + procFilePath[0] + "\"\n");
procStream.close();
procFile = open(procFilePath[0],"r");
procHash = {};
while 1:
line = procFile.readline();
if not line:
break;
line = line.strip();
name = "";
value = "";
elements = line.split("=");
if len(elements) < 2 or len(elements[0]) < 1 or len(elements[1]) < 1:
print("\n***** ERROR, proc file line format is \"varName=varValue\", \"" + line + "\" does not conform to this format\n");
sys.exit();
procHash[elements[0]] = elements[1];
procFile.close();
return procHash;
def gausshpfilt(data,kernel):
padSize = numpy.size(kernel,axis=0) / 2;
temp = numpy.zeros((numpy.size(data,axis=0)+2*padSize,numpy.size(data,axis=1)+2*padSize));
#fill temp with data values
for i in range(padSize,numpy.size(temp,axis=0)-padSize):
for j in range(padSize,numpy.size(temp,axis=1)-padSize):
temp[i,j] = data[i-padSize,j-padSize];
#pad left
for i in range(0,padSize):
for j in range(padSize,padSize+numpy.size(data,axis=0)):
temp[j,padSize-1-i] = data[j-padSize,i];
#pad top
for i in range(0,padSize):
for j in range(padSize,padSize+numpy.size(data,axis=1)):
temp[padSize-1-i,j] = data[i,j-padSize];
#pad right
for i in range(0,padSize):
for j in range(padSize,padSize+numpy.size(data,axis=0)):
temp[j,numpy.size(temp,axis=1)-padSize+i] = data[j-padSize,numpy.size(data,axis=1)-1-i];
#pad bottom
for i in range(0,padSize):
for j in range(padSize,padSize+numpy.size(data,axis=1)):
temp[numpy.size(temp,axis=0)-padSize+i,j] = data[numpy.size(data,axis=0)-1-i,j-padSize];
#fill top-left corner
for i in range(0,padSize):
for j in range(0, padSize):
temp[padSize-i-1,padSize-j-1] = int((temp[padSize-i-1,padSize-j] + temp[padSize-i,padSize-j-1]) / 2);
#fill top-right corner
for i in range(0,padSize):
for j in range(0, padSize):
temp[padSize-i-1,numpy.size(temp,axis=1)-padSize+j] = int((temp[padSize-i-1,numpy.size(temp,axis=1)-padSize+j-1] + temp[padSize-i,numpy.size(temp,axis=1)-padSize+j]) / 2);
#fill bottom-right corner
for i in range(0,padSize):
for j in range(0, padSize):
temp[numpy.size(temp,axis=0)-padSize+i,numpy.size(temp,axis=1)-padSize+j] = int((temp[numpy.size(temp,axis=0)-padSize+i,numpy.size(temp,axis=1)-padSize+j-1] + temp[numpy.size(temp,axis=0)-padSize+i-1,numpy.size(temp,axis=1)-padSize+j]) / 2);
#fill bottom-left corner
for i in range(0,padSize):
for j in range(0, padSize):
temp[numpy.size(temp,axis=0)-padSize+i,padSize-j-1] = (temp[numpy.size(temp,axis=0)-padSize+i,padSize-j] + temp[numpy.size(temp,axis=0)-padSize+i-1,padSize-j-1]) / 2;
#perform convolution
ghp_data = numpy.zeros((numpy.size(data,axis=0),numpy.size(data,axis=1)));
for i in range(numpy.size(ghp_data,axis=0)):
for j in range(numpy.size(ghp_data,axis=1)):
ghp_data[i,j] = numpy.sum(kernel*temp[i:i+numpy.size(kernel,axis=0),j:j+numpy.size(kernel,axis=1)]);
return ghp_data;
def geocode(path, rwin, awin, search_x, search_y, wsamp, orbit, dem_path):
import fnmatch;
cwd = os.getcwd();
azo_unw_paths = [];
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, "*.unw"):
if re.search("r" + rwin + "x" + awin + "_s" + search_x + "x" + search_y + "_" + str(int(rwin) / int(wsamp)), filename):
azo_unw_paths.append(root + "/" + filename);
ld_range = str(int(rwin) / int(wsamp));
ld_azimuth = str(int(awin) / int(wsamp));
for azo_unw_path in azo_unw_paths:
index = re.search("\d{6}_\d{6}", azo_unw_path).start(0);
later_date = azo_unw_path[index : index + 6];
early_date = azo_unw_path[index + 7 : index + 13];
print(azo_unw_path);
azo_unw_dir = ".";
index = azo_unw_path.rfind("/");
if index > -1:
azo_unw_dir = azo_unw_path[ : index];
azo_unw_name = azo_unw_path[index + 1 : ];
os.chdir(azo_unw_dir);
geo_unw = "geo_" + azo_unw_name[ : azo_unw_name.find("_")] + "_" + later_date + "-" + early_date + "_r" + rwin + "x" + awin + "_s" + search_x + "x" + search_y + "_" + ld_range + "rlks.unw";
if os.path.exists(geo_unw):
print("\n**** WARNING, \"" + geo_unw + "\" already exists in \"" + azo_unw_dir + "\", skipping " + azo_unw_name + "...\n");
elif geo_unw.find("range") > -1 and os.path.exists(geo_unw.replace("range", "adj_range")):
print("\n**** WARNING, \"" + geo_unw.replace("range", "adj_range") + "\" already exists in \"" + azo_unw_dir + "\", skipping " + azo_unw_name + "...\n");
radar_name = "radar_" + orbit + ".unw";
radar_rsc_name = radar_name + ".rsc";
if not os.path.exists(radar_name):
print("\n**** WARNING, \"" + radar_name + "\" not found in \"" + azo_unw_dir + "\", skipping range ramp-removal for this pair...\n");
if not os.path.exists(radar_rsc_name):
print("\n***** WARNING, \"" + radar_rsc_name + "\" not found in \"" + azo_unw_dir + "\", skipping range ramp-removal for this pair...\n");
if re.search("^blalbalbrange", azo_unw_name) and os.path.exists(radar_name) and os.path.exists(radar_name + ".rsc"):
cmd = "\nlook.pl " + radar_name + " " + ld_range + " " + ld_azimuth + "\n";
subprocess.call(cmd, shell=True);
radar_ld_name = "radar_" + orbit + "_" + ld_range + "rlks";
radar_ld_unw = "radar_" + orbit + "_" + ld_range + "rlks.unw";
width = "";
wavelength = "";
radar_rsc_file = open(radar_ld_unw + ".rsc", "r");
while 1:
line = radar_rsc_file.readline();
if not line:
break;
if line.find("WIDTH") > -1:
elements = line.split();
width = elements[1];
if line.find("WAVELENGTH") > -1:
elements = line.split();
wavelength = elements[1];
radar_rsc_file.close();
if width == "":
print("\n***** WARNING, could not find \"WIDTH\" in \"" + radar_ld_unw + ".rsc\", skipping range ramp-removal for \"" + azo_unw_dir + "\"...\n");
continue;
if wavelength == "":
print("\n***** WARNING, could not find \"WAVELENGTH\" in \"" + radar_ld_unw + ".rsc\", skipping range ramp-removal for \"" + azo_unw_dir + "\"...\n");
continue;
cmd = "\nrmg2mag_phs " + radar_ld_unw + " " + radar_ld_name + ".mag " + radar_ld_name + ".phs " + width + "\n";
subprocess.call(cmd, shell=True);
adj_radar_ld_phs = adjustPhase(radar_ld_name + ".phs", str(100 * float(wavelength)), width);
cmd = "\nmag_phs2rmg " + radar_ld_name + ".mag " + adj_radar_ld_phs + " " + radar_ld_unw + " " + width + "\n";
subprocess.call(cmd, shell=True);
adj_range_unw_name = "adj_" + azo_unw_name;
cmd = "\nadd_rmg.pl " + azo_unw_name + " " + radar_ld_unw + " " + adj_range_unw_name + " -1 1\n";
subprocess.call(cmd, shell=True);
azo_unw_name = adj_range_unw_name;
cmd = "";
if not os.path.exists(azo_unw_dir + "/" + later_date + "_" + ld_range + "rlks.slc.rsc"):
cmd += "\nlook.pl " + later_date + ".slc " + ld_range + " " + ld_azimuth + "\n";
cmd += "\ncp -pr " + later_date + "_" + ld_range + "rlks.slc.rsc " + azo_unw_path + ".rsc\n";
cmd += "\nmake_geomap.pl ./GEO " + azo_unw_name + " azm.trans " + orbit + " " + dem_path + " " + later_date + "-" + early_date + "_SIM.aff " + ld_range + " " + later_date + " yes ../SIM\n";
cmd += "\ngeocode.pl ./GEO/azm.trans " + azo_unw_name + " geo_" + azo_unw_name[ : azo_unw_name.find("_")] + "_" + later_date + "-" + early_date + "_r" + rwin + "x" + awin + "_s" + search_x + "x" + search_y + "_" + ld_range + "rlks.unw\n";
subprocess.call(cmd,shell=True);
os.chdir(cwd);
return;
def generateProfiles(path):
currentDir = os.getcwd();
profilesCmd = "find " + path + " -name \"*.distance\" -print";
profilesStream = subprocess.Popen(profilesCmd);
profilesOutput = profilesStream.read();
profilesStream.close();
profiles = profilesOutput.split();
xyzCmd = "find " + path + " -name \"northxyz.txt\" -print";
xyzStream = subprocess.Popen(xyzCmd);
xyzOutput = xyzStream.read();
xyzStream.close();
xyzCmd = "find " + path + " -name \"eastxyz.txt\" -print";
xyzStream = subprocess.Popen(xyzCmd);
xyzOutput = xyzOutput + xyzStream.read();
xyzStream.close();
xyzCmd = "find " + path + " -name \"magxyz.txt\" -print";
xyzStream = subprocess.Popen(xyzCmd);
xyzOutput = xyzOutput + xyzStream.read();
xyzStream.close();
xyzFileList = xyzOutput.split();
for i in range(0,len(xyzFileList)):
xyzPath = xyzFileList[i].strip()[0:xyzFileList[i].strip().rfind("/")];
xyzFileName = xyzFileList[i].strip()[xyzFileList[i].strip().rfind("/")+1:];
xyzName = xyzFileName[0:xyzFileName.find(".")];
gridCmd = "";
if not os.path.exists(xyzPath + "/" + xyzName + ".grd"):
gridCmd = gridCmd + "\npython grid.py " + xyzFileList[i].strip() + "\n";
gridCmdStream = subprocess.Popen(gridCmd);
gridCmdOutput = gridCmdStream.read();
gridCmdStream.close();
#for i in range(0,len(profiles)):
# genProfileCmd = "\ncd " + xyzPath + "\ngrdtrack " + profiles[i] + " -G" + xyzName + ".grd > " + profiles[i][profiles[i].rfind("/")+1:profiles[i].find(".")] + "_" + xyzName + ".txt\ncd " + currentDir + "\n";
# print(genProfileCmd);
#genProfileStream = subprocess.Popen(genProfileCmd);
#genProfileStream.close();
def generatePNGs(path):
currentDir = os.getcwd();
findGRDsCmd = "find " + path + " -name \"*.grd\" -print";
findGRDsStream = subprocess.Popen(findGRDsCmd);
findGRDsOutput = findGRDsStream.read().split();
findGRDsStream.close();
pngCmd = "";
for i in range(0,len(findGRDsOutput)):
psName = findGRDsOutput[i][0:findGRDsOutput[i].rfind(".")] + ".ps";
psPath = findGRDsOutput[i][0:findGRDsOutput[i].rfind("/")];
pngName = findGRDsOutput[i][0:findGRDsOutput[i].rfind(".")] + ".png";
if os.path.exists(psName) and not os.path.exists(pngName):
pngCmd += "\ncd " + psPath + "\nps2raster -A -TG " + psName + "\ncd " + currentDir + "\n";
if pngCmd != "":
pngStream = subprocess.Popen(pngCmd);
pngStream.close();
def getAffineTrans(path):
cwd = os.getcwd();
contents = os.listdir(path);
proc_paths = [item for item in contents if ".proc" in item];
if len(proc_paths) < 1:
print("\n***** WARNING, no *.proc files found in " + path + ", not running \"affine\" step...\n");
return;
cmd = "";
for proc_path in proc_paths:
int_vars = readIntProcFile(proc_path);
date1 = int_vars["SarDir1"];
date2 = int_vars["SarDir2"];
int_dir = int_vars["IntDir"];
rlooks = int_vars["Rlooks_geo"];
aff_path = path + "/" + int_dir + "/" + date1 + "-" + date2 + "_" + rlooks + "rlks_SIM.aff";
if os.path.exists(aff_path):
print("\n***** WARNING, " + aff_path + " already exists in " + int_dir + ", skipping...\n");
continue;
cmd += "\ncd " + path + "\n";
cmd += "\nprocess_2pass_glac.pl " + proc_path + " offsets done_sim_removal &\n";
cmd += "\ncd " + cwd + "\n";
print(cmd);
#subprocess.call(cmd,shell=True);
return;
def getGRDCorners(path):
currentDir = os.getcwd();
findGRDsCmd = "find " + path + " -name \"*.grd\" -print";
findGRDsStream = subprocess.Popen(findGRDsCmd);
findGRDsOutput = findGRDsStream.read().split();
findGRDsStream.close();
for i in range(0,len(findGRDsOutput)):
grdPath = findGRDsOutput[i][0:findGRDsOutput[i].rfind("/")];
grdName = findGRDsOutput[i][findGRDsOutput[i].rfind("/")+1:findGRDsOutput[i].rfind(".")];
if not os.path.exists(grdPath + "/" + grdName + "_corners.dat"):
grdinfoCmd = "\ngrdinfo " + findGRDsOutput[i].strip() + "\n";
grdinfoStream = subprocess.Popen(grdinfoCmd);
grdinfoOutput = grdinfoStream.read();
grdinfoStream.close();
x_min = grdinfoOutput[grdinfoOutput.find("x_min:")+6:grdinfoOutput.find("x_max:")].strip();
x_max = grdinfoOutput[grdinfoOutput.find("x_max:")+6:grdinfoOutput.find("x_inc:")].strip();
y_min = grdinfoOutput[grdinfoOutput.find("y_min:")+6:grdinfoOutput.find("y_max:")].strip();
y_max = grdinfoOutput[grdinfoOutput.find("y_max:")+6:grdinfoOutput.find("y_inc:")].strip();
cornersFileName = grdPath + "/" + grdName + "_corners.dat";
cornersFile = open(cornersFileName,"w");
cornersFile.write(x_min + " " + y_min + " LL\n");
cornersFile.write(x_max + " " + y_max + " TR\n");
cornersFile.write(x_min + " " + y_max + " TL\n");
cornersFile.write(x_max + " " + y_min + " LR\n");
cornersFile.close()
def generateKML(path):
findPNGsCmd = "find " + path + " -name \"*.png\" -print";
findPNGsStream = subprocess.Popen(findPNGsCmd);
findPNGsOutput = findPNGsStream.read().split();
findPNGsStream.close();
def createMatlabGetXYZ(matlabPath,ampcorInFilePath):
startRefSample = "";
endRefSample = "";
skipRefSample = "";
startRefLine = "";
endRefLine = "";
skipRefLine = "";
ampcorInFile = open(ampcorInFilePath,"r");
ampoff_dir = ampcorInFilePath[0:ampcorInFilePath.rfind("/")];
ampoff_name = ampcorInFilePath[0:ampcorInFilePath.rfind(".")];
cornersFilePath = ampoff_dir + "/corners.dat";
cornersFile = open(cornersFilePath,"r");
ul_long = "";
ul_lat = "";
while 1:
line = cornersFile.readline();
if not line:
break;
line = line.strip();
if line.find("ul_long") > -1:
ul_long = line.split("=")[1];
elif line.find("ul_lat") > -1:
ul_lat = line.split("=")[1];
cornersFile.close();
while 1:
line = ampcorInFile.readline();
if not line:
break;
if line.find("Start, End and Skip Samples in Reference Image") > -1:
line = line.strip().split("=");
sampleInfo = line[1].split();
startRefSample = sampleInfo[0];
endRefSample = sampleInfo[1];
skipRefSample = sampleInfo[2];
elif line.find("Start, End and Skip Lines in Reference Image") > -1:
line = line.strip().split("=");
lineInfo = line[1].split();
startRefLine = lineInfo[0];
endRefLine = lineInfo[1];
skipRefLine = lineInfo[2];
ampcorInFile.close();
matlabFile = open(matlabPath,"r");
outputMatlabFile = open(ampoff_dir + "/getxyzs.m","w");
while 1:
line = matlabFile.readline();
if not line:
break;
elif re.search("rwin\s*=\s*;",line):
outputMatlabFile.write(line.replace(";",skipRefSample+";"));
break;
else:
outputMatlabFile.write(line);
while 1:
line = matlabFile.readline();
if not line:
break;
elif re.search("awin\s*=\s*;",line):
outputMatlabFile.write(line.replace(";",skipRefLine+";"));
break;
else:
outputMatlabFile.write(line);
while 1:
line = matlabFile.readline();
if not line:
break;
elif re.search("load\s*;",line):
outputMatlabFile.write(line.replace(";",ampoff_name[ampoff_name.rfind("/")+1:]+".off;"));
break;
else:
outputMatlabFile.write(line);
while 1:
line = matlabFile.readline();
if not line:
break;
elif re.search("indat\s*=\s*;",line):
outputMatlabFile.write(line.replace(";",ampoff_name[ampoff_name.rfind("/")+1:]+";"));
break;
else:
outputMatlabFile.write(line);
while 1:
line = matlabFile.readline();
if not line:
break;
elif re.search("width0\s*=\s*;",line):
outputMatlabFile.write(line.replace(";",endRefSample+";"));
break;
else:
outputMatlabFile.write(line);
while 1:
line = matlabFile.readline();
if not line:
break;
elif re.search("length0\s*=\s*;",line):
outputMatlabFile.write(line.replace(";",endRefLine+";"));
break;
else:
outputMatlabFile.write(line);
while 1:
line = matlabFile.readline();
if not line:
break;
elif re.search("ul_long\s*=\s*;",line):
outputMatlabFile.write(line.replace(";",ul_long+";"));
break;
else:
outputMatlabFile.write(line);
while 1:
line = matlabFile.readline();
if not line:
break;
elif re.search("ul_lat\s*=\s*;",line):
outputMatlabFile.write(line.replace(";",ul_lat+";"));
break;
else:
outputMatlabFile.write(line);
while 1:
line = matlabFile.readline();
if not line:
break;
elif re.search("x_step\s*=\s*;",line):
outputMatlabFile.write(line.replace(";",str(15*int(skipRefSample))+";"));
break;
else:
outputMatlabFile.write(line);
while 1:
line = matlabFile.readline();
if not line:
break;
elif re.search("y_step\s*=\s*",line):
outputMatlabFile.write(line.replace(";",str(15*int(skipRefLine))+";"));
else:
outputMatlabFile.write(line);
outputMatlabFile.close();
matlabFile.close();
currentDir = os.getcwd();
getXYZCmd = "\ncd " + ampoff_dir + "\nmatlab -nodesktop -nosplash -r getxyzs\ncd " + currentDir;
getXYZCmdStream = subprocess.Popen(getXYZCmd);
getXYZCmdStream.close();
def makeRawALOS(WorkPath):
contents = os.listdir(WorkPath);
cmd = "";
for i in range(0, len(contents)):
if re.search("^\d\d\d\d\d\d$", contents[i]):
date_contents = os.listdir(WorkPath + "/" + contents[i]);
for item in date_contents:
if item.find("LED") > -1:
fbd2fbs = "NO";
img_path = item.replace("LED", "IMG-HH");
img_full_path = os.readlink(WorkPath + "/" + contents[i] + "/" + img_path);
img_alt_path = img_full_path.replace("HH","HV");
if os.path.exists(img_alt_path):
fbd2fbs = "FBD2FBS";
cwd = os.getcwd();
cmd = cmd + "\ncd " + WorkPath + "/" + contents[i] + "\nmake_raw_alos.pl IMG " + contents[i] + " " + fbd2fbs + "\ncd " + cwd + "\n";
break;
subprocess.call(cmd,shell=True);
return;
def makeRawENVISAT(WorkPath, orbit):
contents = os.listdir(WorkPath);
cmd = "";
for i in range(0, len(contents)):
if re.search("^\d\d\d\d\d\d$", contents[i]):
date_contents = os.listdir(WorkPath + "/" + contents[i]);
for item in date_contents:
if item.find("ASA_") > -1:
cwd = os.getcwd();
cmd = cmd + "\ncd " + WorkPath + "/" + contents[i] + "\nmake_raw_envi.pl " + item + " " + orbit + " " + contents[i] + "\ncd " + cwd + "\n";
break;
subprocess.call(cmd,shell=True);
return;
def makeRawERS(WorkPath, orbit):
contents = os.listdir(WorkPath);
cmd = "";
for i in range(0, len(contents)):
if re.search("^\d\d\d\d\d\d$", contents[i]):
date_contents = os.listdir(WorkPath + "/" + contents[i]);
for item in date_contents:
if item.find("SARLEADER") > -1:
cwd = os.getcwd();
# cmd = cmd + "\ncd " + WorkPath + "/" + contents[i] + "\nmake_raw_ASF.pl " + orbit + " " + item + " " + contents[i] + "\ncd " + cwd + "\n";
cmd = cmd + "\ncd " + WorkPath + "/" + contents[i] + "\nmake_raw.pl " + orbit + " " + item + " " + contents[i] + "\ncd " + cwd + "\n";
break;
subprocess.call(cmd,shell=True);
return;
def makeRawTSX(WorkPath):
cwd = os.getcwd();
cmd = "\nfind " + WorkPath + " -name \"TDX*.xml\"\n";
pipe = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
leader_file_paths = pipe.read().split();
pipe.close();
dates = {};
for path in leader_file_paths:
infile = open(path,"r");
for line in infile:
if line.find("timeUTC") > -1:
index = re.search("timeUTC>",line).end(0);
year = line[index + 2 : index + 4];
month = line[index + 5 : index + 7];
day = line[index + 8 : index + 10];
date = year + month + day;
dates[date] = path;
break;
infile.close();
for date in dates:
cmd = "\ncd " + WorkPath + "/" + date + "\n";
cmd += "\nmake_slc_tsx.csh " + dates[date] + " " + date + "\n";
cmd += "\ncp -p " + WorkPath + "/" + date + "/" + date + ".slc.rsc " + WorkPath + "/" + date + "/" + date + ".raw.rsc\n";
cmd += "\ntouch " + WorkPath + "/" + date + "/" + date + ".raw\n";
cmd += "\ncd " + cwd + "\n";
subprocess.call(cmd,shell=True);
return;
def readIntProcFile(proc_path):
assert os.path.exists(proc_path), "***** ERROR: " + proc_path + " not found, cannot read proc file\n";
int_vars = {};
proc_file = open(proc_path,"r");
while 1:
line = proc_file.readline();
if not line:
break;
line = line.strip();
if not line:
continue;
name = "";
value = "";
elements = line.split("=");
if len(elements) < 2 or len(elements[0]) < 1 or len(elements[1]) < 1:
print("\n***** ERROR, proc file line format is \"name = value\", \"" + line + "\" does not conform to this format\n");
sys.exit();
name = elements[0].strip();
value = elements[1].strip();
int_vars[name] = value;
proc_file.close();
return int_vars;
def setupALOS(WorkPath, leader_file_paths):
for leader_path in leader_file_paths:
existingSARLeaderFiles = {};
sarNumber = {};
dateName = "";
extension = leader_path[leader_path.rfind("."):];
leader_name = leader_path[leader_path.rfind("/") + 1 : ];
leaderFile = open(leader_path,"rb");
while 1:
line = leaderFile.readline();
if not line:
break;
searchExp = "\s\d\d\d\d\d\d\d\d";
if re.search(searchExp,line):
index = re.search(searchExp,line).start(0);
dateName = line[index:index+9].strip();
dateName = dateName[2:8];
if not os.path.isdir(WorkPath + "/" + dateName):
cmd = "mkdir " + WorkPath + "/" + dateName;
subprocess.call(cmd,shell=True);
if not existingSARLeaderFiles.has_key(leader_path):
leader_link_path = WorkPath + "/" + dateName + "/" + leader_name;
os.symlink(leader_path, leader_link_path);
existingSARLeaderFiles[leader_path] = leader_link_path;
break;
leaderFile.close();
if re.search("LED-A",leader_path):
raw_path = leader_path.replace("LED","IMG-HH");
raw_alt_path = leader_path.replace("LED","IMG-HV");
raw_name = raw_path[raw_path.rfind("IMG") : ];
raw_alt_name = raw_alt_path[raw_alt_path.rfind("IMG") : ];
raw_link_path = WorkPath + "/" + dateName + "/" + raw_name;
raw_alt_link_path = WorkPath + "/" + dateName + "/" + raw_alt_name;
if os.path.exists(raw_path) and not os.path.exists(raw_link_path):
os.symlink(raw_path, raw_link_path);
# if os.path.exists(raw_alt_path) and not os.path.exists(raw_alt_link_path):
# os.symlink(raw_alt_path, raw_alt_link_path);
if not os.path.exists(raw_path):
print("\n***** WARNING, could not find corresponding raw file for leader file \"" + leader_path + "\"\nPlease make sure the raw file is in the same directory and is named \"IMG-HH*"+leader_path.replace("LED","")+"\"\n");
continue;
return;
def setupTSX(WorkPath, leader_file_paths):
for path in leader_file_paths:
infile = open(path,"r");
for path in leader_file_paths:
print(path);
return;
def setupENVISAT(WorkPath, leader_file_paths):
for path in leader_file_paths:
print(path);
return;
def setupERS(WorkPath, leader_file_paths):
for path in leader_file_paths:
existingSARLeaderFiles = {};
sarNumber = {};
dateName = "";
extension = path[path.rfind("."):];
leaderFile = open(path,"rb");
while 1:
line = leaderFile.readline();
if not line:
break;
searchExp = "\s\d\d\d\d\d\d\d\d";
if re.search(searchExp,line):
index = re.search(searchExp,line).start(0);
dateName = line[index:index+9].strip();
dateName = dateName[2:8];
if not os.path.isdir(WorkPath + "/" + dateName):
cmd = "mkdir " + WorkPath + "/" + dateName;
subprocess.call(cmd,shell=True);
if not existingSARLeaderFiles.has_key(path):
if not sarNumber.has_key(dateName):
sarNumber[dateName] = 1;
else:
sarNumber[dateName] = sarNumber[dateName] + 1;
sarNumberStr = str(sarNumber[dateName])
if sarNumber[dateName] < 10:
sarNumberStr = "0" + sarNumberStr;
tempPath = WorkPath + "/" + dateName + "/SARLEADER" + sarNumberStr;
while has_value(existingSARLeaderFiles,tempPath):
sarNumber[dateName] = sarNumber[dateName] + 1;
sarNumberStr = str(sarNumber[dateName]);
if sarNumber[dateName] < 10:
sarNumberStr = "0" + sarNumberStr;
tempPath = WorkPath + "/" + dateName + "/SARLEADER" + sarNumberStr;
os.symlink(path,tempPath);
existingSARLeaderFiles[path] = tempPath;
break;
leaderFile.close();
rawFileName = "rawness";
if re.search("LEA.*\.001",path):
rawFileName = path.replace("LEA","DAT");
else:
rawFileName = path[0:path.find(".ldr")] + ".raw";
if not os.path.exists(rawFileName):
rawFileName = rawFileName[0:rawFileName.find(".raw")] + ".RAW";
if not os.path.exists(rawFileName):
rawFileName = rawFileName[0:rawFileName.find(".RAW")] + ".Raw";
if not os.path.exists(rawFileName):
if DataType.lower().find("alos") > -1:
print("\n***** WARNING, could not find corresponding raw file for leader file \"" + path + "\"\nPlease make sure the raw file is in the same directory and is named \"IMG*"+path.replace("LED","")+"\"\n");
else:
print("\n***** WARNING, could not find corresponding raw file for leader file \"" + path + "\"\nPlease make sure the raw file is in the same directory and has the extension \".raw\"\n");
continue;
tempImagePath = "";
if re.search("SARLEADER", existingSARLeaderFiles[path]):
tempImagePath = existingSARLeaderFiles[path].replace("SARLEADER","IMAGERY");
if not os.path.exists(tempImagePath):
os.symlink(rawFileName, tempImagePath);
return;
def setupTSX(WorkPath, leader_file_paths):
for path in leader_file_paths:
infile = open(path,"r");
for line in infile:
if line.find("timeUTC") > -1:
index = re.search("timeUTC>",line).end(0);
year = line[index + 2 : index + 4];
month = line[index + 5 : index + 7];
day = line[index + 8 : index + 10];
date = year + month + day;
if not os.path.exists(date):
os.mkdir(WorkPath + "/" + date);
break;
infile.close();
return;
| 29.485754 | 245 | 0.615107 |
4a265858cb387fa7e2cf4e16153a25cf47610f0c | 1,896 | py | Python | tests/components/homekit/test_type_triggers.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 7 | 2019-08-15T13:36:58.000Z | 2020-03-18T10:46:29.000Z | tests/components/homekit/test_type_triggers.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 100 | 2020-06-17T22:22:41.000Z | 2022-03-31T06:24:19.000Z | tests/components/homekit/test_type_triggers.py | winning1120xx/home-assistant | 53d4c0ce2d374b5e97bbdc37742656c27adf8eea | [
"Apache-2.0"
] | 11 | 2020-12-16T13:48:14.000Z | 2022-02-01T00:28:05.000Z | """Test different accessory types: Triggers (Programmable Switches)."""
from unittest.mock import MagicMock
from homeassistant.components.homekit.type_triggers import DeviceTriggerAccessory
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry, async_get_device_automations
async def test_programmable_switch_button_fires_on_trigger(
hass, hk_driver, events, demo_cleanup, device_reg, entity_reg
):
"""Test that DeviceTriggerAccessory fires the programmable switch event on trigger."""
hk_driver.publish = MagicMock()
demo_config_entry = MockConfigEntry(domain="domain")
demo_config_entry.add_to_hass(hass)
assert await async_setup_component(hass, "demo", {"demo": {}})
await hass.async_block_till_done()
hass.states.async_set("light.ceiling_lights", STATE_OFF)
await hass.async_block_till_done()
entry = entity_reg.async_get("light.ceiling_lights")
assert entry is not None
device_id = entry.device_id
device_triggers = await async_get_device_automations(hass, "trigger", device_id)
acc = DeviceTriggerAccessory(
hass,
hk_driver,
"DeviceTriggerAccessory",
None,
1,
None,
device_id=device_id,
device_triggers=device_triggers,
)
await acc.run()
await hass.async_block_till_done()
assert acc.entity_id is None
assert acc.device_id is device_id
assert acc.available is True
hk_driver.publish.reset_mock()
hass.states.async_set("light.ceiling_lights", STATE_ON)
await hass.async_block_till_done()
hk_driver.publish.assert_called_once()
hk_driver.publish.reset_mock()
hass.states.async_set("light.ceiling_lights", STATE_OFF)
await hass.async_block_till_done()
hk_driver.publish.assert_called_once()
await acc.stop()
| 32.689655 | 90 | 0.75 |
4a2658eee845a1c3ee9848750128da0c11abf7f7 | 459 | py | Python | venv/Scripts/easy_install-3.7-script.py | derysudrajat/Translate | 7ee7c62d10268166033bcf5c7c6254dcd4c7c055 | [
"MIT"
] | null | null | null | venv/Scripts/easy_install-3.7-script.py | derysudrajat/Translate | 7ee7c62d10268166033bcf5c7c6254dcd4c7c055 | [
"MIT"
] | null | null | null | venv/Scripts/easy_install-3.7-script.py | derysudrajat/Translate | 7ee7c62d10268166033bcf5c7c6254dcd4c7c055 | [
"MIT"
] | null | null | null | #!C:\Users\ASUS\PycharmProjects\Translate\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| 35.307692 | 87 | 0.694989 |
4a265a09a4ecaf80eed578a082070b3972e744e8 | 7,384 | py | Python | thothlibrary/mutation.py | dqprogramming/thoth-client | 616a2286efd562d580568b9d80e439343c19e1fb | [
"MIT"
] | 3 | 2021-07-27T17:46:32.000Z | 2021-10-12T15:46:43.000Z | thothlibrary/mutation.py | dqprogramming/thoth-client | 616a2286efd562d580568b9d80e439343c19e1fb | [
"MIT"
] | null | null | null | thothlibrary/mutation.py | dqprogramming/thoth-client | 616a2286efd562d580568b9d80e439343c19e1fb | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
GraphQL client for Thoth
(c) Open Book Publishers, February 2020
This programme is free software; you may redistribute and/or modify
it under the terms of the Apache License v2.0.
"""
import json
import urllib
from .errors import ThothError
class ThothMutation():
"""GraphQL mutation in Thoth
Mutations are specified in the MUTATIONS list, which specifies
their fields and desired return value 'fields' must be a list of
tuples (str, bool) where the string represents the attribute and the
boolean represents whether the values should be enclosed with quotes
and sanitised.
Each mutation must have a return_value. Normally this is the primary key
of that object, but in some cases, when we don't need to use the return
value, we simply specify any field that can be returned in that mutation
(e.g. createContribution).
"""
MUTATIONS = {
"createPublisher": {
"fields": [
("publisherName", True),
("publisherShortname", True),
("publisherUrl", True)
],
"return_value": "publisherId"
},
"createImprint": {
"fields": [
("publisherId", True),
("imprintName", True),
("imprintUrl", True)
],
"return_value": "imprintId"
},
"createWork": {
"fields": [
("workType", False),
("workStatus", False),
("fullTitle", True),
("title", True),
("subtitle", True),
("reference", True),
("edition", False),
("imprintId", True),
("doi", True),
("publicationDate", True),
("place", True),
("width", False),
("height", False),
("pageCount", False),
("pageBreakdown", True),
("imageCount", False),
("tableCount", False),
("audioCount", False),
("videoCount", False),
("license", True),
("copyrightHolder", True),
("landingPage", True),
("lccn", True),
("oclc", True),
("shortAbstract", True),
("longAbstract", True),
("generalNote", True),
("toc", True),
("coverUrl", True),
("coverCaption", True)
],
"return_value": "workId"
},
"createPublication": {
"fields": [
("publicationType", False),
("workId", True),
("isbn", True),
("publicationUrl", True)
],
"return_value": "publicationId"
},
"createPrice": {
"fields": [
("publicationId", True),
("currencyCode", False),
("unitPrice", False)
],
"return_value": "priceId"
},
"createLanguage": {
"fields": [
("workId", True),
("languageCode", False),
("languageRelation", False),
("mainLanguage", False)
],
"return_value": "languageId"
},
"createSubject": {
"fields": [
("workId", True),
("subjectType", False),
("subjectCode", True),
("subjectOrdinal", False)
],
"return_value": "subjectId"
},
"createSeries": {
"fields": [
("imprintId", True),
("seriesType", False),
("seriesName", True),
("issnPrint", True),
("issnDigital", True),
("seriesUrl", True)
],
"return_value": "seriesId"
},
"createIssue": {
"fields": [
("seriesId", True),
("workId", True),
("issueOrdinal", False)
],
"return_value": "issueOrdinal"
},
"createContributor": {
"fields": [
("firstName", True),
("lastName", True),
("fullName", True),
("orcid", True),
("website", True)
],
"return_value": "contributorId"
},
"createContribution": {
"fields": [
("workId", True),
("contributorId", True),
("contributionType", False),
("mainContribution", False),
("biography", True),
("institution", True),
("firstName", True),
("lastName", True),
("fullName", True)
],
"return_value": "workId"
}
}
def __init__(self, mutation_name, mutation_data):
"""Returns new ThothMutation object with specified mutation data
mutation_name: Must match one of the keys found in MUTATIONS.
mutation_data: Dictionary of mutation fields and their values.
"""
self.mutation_name = mutation_name
self.return_value = self.MUTATIONS[mutation_name]["return_value"]
self.mutation_data = mutation_data
self.data_str = self.generate_values()
self.request = self.prepare_request()
def prepare_request(self):
"""Format the mutation request string"""
values = {
"mutation_name": self.mutation_name,
"data": self.data_str,
"return_value": self.return_value
}
payload = """
mutation {
%(mutation_name)s(
data: {
%(data)s
}
) {
%(return_value)s
}
}
"""
return payload % values
def run(self, client):
"""Perform the GraphQL mutation and report any errors"""
result = ""
try:
result = client.execute(self.request)
if "errors" in result:
raise AssertionError
return json.loads(result)[
"data"][self.mutation_name][self.return_value]
except (KeyError, TypeError, ValueError, AssertionError,
json.decoder.JSONDecodeError, urllib.error.HTTPError):
raise ThothError(self.request, result)
def generate_values(self):
"""Returns a set of mutation statements based on object attributes."""
def sanitise(text):
"""Escape quotes and linebreaks"""
tmp = text.replace("\n", "\\n")
return tmp.replace('"', '''\\"''')
values = []
for key, enclose in self.MUTATIONS[self.mutation_name]["fields"]:
value = self.mutation_data[key]
if value is None or not str(value):
continue
if enclose:
statement = "{}: \"{}\"".format(key, sanitise(value))
else:
statement = "{}: {}".format(key, value)
values.append(statement)
return "\n".join(values)
| 32.672566 | 79 | 0.463841 |
4a265a8a40f890ccf8bc0958235d99156edec773 | 2,972 | py | Python | siam_tracker/benchmarks/data/vot.py | microsoft/PySiamTracking | a82dabeaa42a7816dbd8e823da7b7e92ebb622ce | [
"MIT"
] | 28 | 2020-03-18T04:41:21.000Z | 2022-02-24T16:44:01.000Z | siam_tracker/benchmarks/data/vot.py | HengFan2010/PySiamTracking | a82dabeaa42a7816dbd8e823da7b7e92ebb622ce | [
"MIT"
] | 1 | 2020-04-05T15:23:22.000Z | 2020-04-07T16:23:12.000Z | siam_tracker/benchmarks/data/vot.py | HengFan2010/PySiamTracking | a82dabeaa42a7816dbd8e823da7b7e92ebb622ce | [
"MIT"
] | 11 | 2020-03-19T00:30:06.000Z | 2021-11-10T08:22:35.000Z | import os
import numpy as np
from .base_data import Dataset, Sequence
from ..utils import read_file
from ..builder import BENCHMARKS
@BENCHMARKS.register_module
class VOT(Dataset):
def __init__(self,
name='vot',
vot_root='data/benchmark/vot17/',
zip_mode=False):
super(VOT, self).__init__(name=name, zip_mode=zip_mode)
self._load_seqs(vot_root)
def select_tag(self, video_name: str, tag: str, start: int = 0, end: int = 0):
seq = self.seqs[self.get(video_name)]
if tag == 'all':
all_tags = [1] * len(seq)
return all_tags[start:end]
elif hasattr(seq, tag):
return getattr(seq, tag)[start:end]
else:
raise NotImplementedError("Cannot find tag '{}'".format(tag))
def _load_seqs(self, vot_root: str) -> None:
list_file = os.path.join(vot_root, 'list.txt')
seq_name_list = read_file(list_file)
for seq_name in seq_name_list:
seq = self._load_single_seq(os.path.join(vot_root, seq_name))
self._seqs.append(seq)
def _load_single_seq(self, data_dir: str) -> Sequence:
name = os.path.basename(data_dir)
# load ground-truth annotation
gt_rects = np.loadtxt(os.path.join(data_dir, 'groundtruth.txt'), dtype=np.float64, delimiter=',')
# load frames
img_dir = os.path.join(data_dir, 'color')
frames = [os.path.join(img_dir, '{:08d}.jpg'.format(i+1)) for i in range(len(gt_rects))]
kwargs = dict()
for att in ['occlusion', 'illum_change', 'motion_change', 'size_change', 'camera_motion']:
tag_file = os.path.join(data_dir, '{}.tag'.format(att))
if os.path.exists(tag_file):
att_tags = np.loadtxt(tag_file).astype(np.bool)
if len(att_tags) < len(frames):
_pad = np.zeros((len(frames),), dtype=np.bool)
_pad[:len(att_tags)] = att_tags
att_tags = _pad
kwargs[att] = att_tags
if self.zip_mode:
seq_name = os.path.basename(data_dir)
zip_path = os.path.join(data_dir, '..', 'zips', '{}.zip'.format(seq_name))
else:
zip_path = None
return Sequence(name, frames, gt_rects, attrs=None, zip_path=zip_path, **kwargs)
@BENCHMARKS.register_module
class VOT17(VOT):
def __init__(self, data_root='data/benchmark/', zip_mode=False):
super(VOT17, self).__init__(name='vot17',
vot_root=os.path.join(data_root, 'vot17'),
zip_mode=zip_mode)
@BENCHMARKS.register_module
class VOT16(VOT):
def __init__(self, data_root='data/benchmark/', zip_mode=False):
super(VOT16, self).__init__(name='vot16',
vot_root=os.path.join(data_root, 'vot16'),
zip_mode=zip_mode)
| 37.620253 | 105 | 0.5821 |
4a265dbfbb52ff2693eeef28547374a23730396c | 2,495 | py | Python | backend/api/python_http_client/test/test_api_report_run_metrics_request.py | FrancisLfg/pipelines | b0466cb9626407f125bf7ce2c9de37991e654a6d | [
"Apache-2.0"
] | null | null | null | backend/api/python_http_client/test/test_api_report_run_metrics_request.py | FrancisLfg/pipelines | b0466cb9626407f125bf7ce2c9de37991e654a6d | [
"Apache-2.0"
] | null | null | null | backend/api/python_http_client/test/test_api_report_run_metrics_request.py | FrancisLfg/pipelines | b0466cb9626407f125bf7ce2c9de37991e654a6d | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Kubeflow Pipelines API
This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition. # noqa: E501
The version of the OpenAPI document: 1.0.0-dev.1
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kfp_server_api
from kfp_server_api.models.api_report_run_metrics_request import ApiReportRunMetricsRequest # noqa: E501
from kfp_server_api.rest import ApiException
class TestApiReportRunMetricsRequest(unittest.TestCase):
"""ApiReportRunMetricsRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ApiReportRunMetricsRequest
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kfp_server_api.models.api_report_run_metrics_request.ApiReportRunMetricsRequest() # noqa: E501
if include_optional :
return ApiReportRunMetricsRequest(
run_id = '0',
metrics = [
kfp_server_api.models.api_run_metric.apiRunMetric(
name = '0',
node_id = '0',
number_value = 1.337,
format = 'UNSPECIFIED', )
]
)
else :
return ApiReportRunMetricsRequest(
)
def testApiReportRunMetricsRequest(self):
"""Test ApiReportRunMetricsRequest"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 33.266667 | 138 | 0.676954 |
4a265f4c5a64df6d722eef18391454eae2ad69a1 | 5,031 | py | Python | tests/test_packages/test_skills_integration/test_coin_price.py | bryanchriswhite/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | 1 | 2022-01-23T22:28:43.000Z | 2022-01-23T22:28:43.000Z | tests/test_packages/test_skills_integration/test_coin_price.py | salman6049/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | null | null | null | tests/test_packages/test_skills_integration/test_coin_price.py | salman6049/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This test module contains the integration test for the coin price skill."""
import time
from typing import Dict
import pytest
from aea.helpers import http_requests as requests
from aea.test_tools.test_cases import AEATestCaseEmpty
def parse_prometheus_output(prom_data: bytes) -> Dict[str, float]:
"""Convert prometheus text output to a dict of {"metric": value}"""
metrics = {}
for line in prom_data.decode().splitlines():
tokens = line.split()
if tokens[0] != "#":
metrics.update({tokens[0]: float(tokens[1])})
return metrics
@pytest.mark.integration
class TestCoinPriceSkill(AEATestCaseEmpty):
"""Test that coin price skill works."""
def test_coin_price(self):
"""Run the coin price skill sequence."""
coin_price_feed_aea_name = self.agent_name
self.generate_private_key()
self.add_private_key()
self.add_item("connection", "fetchai/http_client:0.24.0")
self.add_item("connection", "fetchai/http_server:0.23.0")
self.add_item("connection", "fetchai/prometheus:0.9.0")
self.add_item("skill", "fetchai/advanced_data_request:0.7.0")
self.set_config("agent.default_connection", "fetchai/http_server:0.23.0")
default_routing = {
"fetchai/http:1.1.0": "fetchai/http_client:0.24.0",
"fetchai/prometheus:1.1.0": "fetchai/prometheus:0.9.0",
}
setting_path = "agent.default_routing"
self.nested_set_config(setting_path, default_routing)
# set 'api spec path' *after* comparison with fetched agent.
self.set_config(
"vendor.fetchai.connections.http_server.config.api_spec_path",
"vendor/fetchai/skills/advanced_data_request/api_spec.yaml",
)
self.set_config(
"vendor.fetchai.connections.http_server.config.target_skill_id",
"fetchai/advanced_data_request:0.7.0",
)
self.set_config(
"vendor.fetchai.skills.advanced_data_request.models.advanced_data_request_model.args.use_http_server",
True,
type_="bool",
)
self.set_config(
"vendor.fetchai.skills.advanced_data_request.models.advanced_data_request_model.args.url",
"https://api.coingecko.com/api/v3/simple/price?ids=fetch-ai&vs_currencies=usd",
type_="str",
)
self.set_config(
"vendor.fetchai.skills.advanced_data_request.models.advanced_data_request_model.args.outputs",
'[{"name": "price", "json_path": "fetch-ai.usd"}]',
type_="list",
)
diff = self.difference_to_fetched_agent(
"fetchai/coin_price_feed:0.15.0", coin_price_feed_aea_name
)
assert (
diff == []
), "Difference between created and fetched project for files={}".format(diff)
self.run_install()
process = self.run_agent()
is_running = self.is_running(process)
assert is_running, "AEA not running within timeout!"
time.sleep(6) # we wait a bit longer than the tick rate of the behaviour
response = requests.get("http://127.0.0.1:8000/data")
assert response.status_code == 200, "Failed to get response code 200"
coin_price = response.json()
assert "price" in coin_price, "Response does not contain 'price'"
response = requests.get("http://127.0.0.1:8000")
assert response.status_code == 404
assert response.content == b"", "Get request should not work without valid path"
response = requests.post("http://127.0.0.1:8000/data")
assert response.status_code == 404
assert response.content == b"", "Post not allowed"
# test prometheus metrics
prom_response = requests.get("http://127.0.0.1:9090/metrics")
metrics = parse_prometheus_output(prom_response.content)
assert metrics["num_retrievals"] > 0.0, "num_retrievals metric not updated"
assert metrics["num_requests"] == 1.0, "num_requests metric not equal to 1"
self.terminate_agents()
assert (
self.is_successfully_terminated()
), "Http echo agent wasn't successfully terminated."
| 39.304688 | 114 | 0.636255 |
4a266002989791867cf4545fe65b05f759b2a16f | 1,873 | py | Python | venv/Lib/site-packages/pyrogram/raw/base/payment_charge.py | D1ne2021/jjhhhjj | a090da30983b3ef276dfe4cef2ded4526f36002a | [
"MIT"
] | 5 | 2021-09-11T22:01:15.000Z | 2022-03-16T21:33:42.000Z | backend/pyrogram/raw/base/payment_charge.py | iamatlasss/social-media-analyzer | 429d1d2bbd8bfce80c50c5f8edda58f87ace668d | [
"Apache-2.0"
] | null | null | null | backend/pyrogram/raw/base/payment_charge.py | iamatlasss/social-media-analyzer | 429d1d2bbd8bfce80c50c5f8edda58f87ace668d | [
"Apache-2.0"
] | 3 | 2022-01-18T11:06:22.000Z | 2022-02-26T13:39:28.000Z | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
from typing import Union
from pyrogram import raw
from pyrogram.raw.core import TLObject
PaymentCharge = Union[raw.types.PaymentCharge]
# noinspection PyRedeclaration
class PaymentCharge: # type: ignore
"""This base type has 1 constructor available.
Constructors:
.. hlist::
:columns: 2
- :obj:`PaymentCharge <pyrogram.raw.types.PaymentCharge>`
"""
QUALNAME = "pyrogram.raw.base.PaymentCharge"
def __init__(self):
raise TypeError("Base types can only be used for type checking purposes: "
"you tried to use a base type instance as argument, "
"but you need to instantiate one of its constructors instead. "
"More info: https://docs.pyrogram.org/telegram/base/payment-charge")
| 37.46 | 92 | 0.641217 |
4a266023c87aaa921a04897001c3cc745dd37909 | 21,376 | py | Python | tests/integration/package/test_package_command_zip.py | westonsteimel/aws-sam-cli | eeb462e50b41b408da3b8c7df618f5a0e3a4f3dc | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | tests/integration/package/test_package_command_zip.py | westonsteimel/aws-sam-cli | eeb462e50b41b408da3b8c7df618f5a0e3a4f3dc | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | tests/integration/package/test_package_command_zip.py | westonsteimel/aws-sam-cli | eeb462e50b41b408da3b8c7df618f5a0e3a4f3dc | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | import os
import pathlib
import re
from subprocess import Popen, PIPE, TimeoutExpired
import tempfile
from unittest import skipIf
from parameterized import parameterized, param
from samcli.lib.utils.hash import dir_checksum, file_checksum
from samcli.lib.warnings.sam_cli_warning import CodeDeployWarning
from .package_integ_base import PackageIntegBase
from tests.testing_utils import RUNNING_ON_CI, RUNNING_TEST_FOR_MASTER_ON_CI, RUN_BY_CANARY
# Package tests require credentials and CI/CD will only add credentials to the env if the PR is from the same repo.
# This is to restrict package tests to run outside of CI/CD, when the branch is not master and tests are not run by Canary.
SKIP_PACKAGE_TESTS = RUNNING_ON_CI and RUNNING_TEST_FOR_MASTER_ON_CI and not RUN_BY_CANARY
TIMEOUT = 300
@skipIf(SKIP_PACKAGE_TESTS, "Skip package tests in CI/CD only")
class TestPackageZip(PackageIntegBase):
def setUp(self):
super().setUp()
def tearDown(self):
super().tearDown()
@parameterized.expand(["aws-serverless-function.yaml"])
def test_package_template_flag(self, template_file):
template_path = self.test_data_path.joinpath(template_file)
command_list = self.get_command_list(s3_bucket=self.s3_bucket.name, template=template_path)
process = Popen(command_list, stdout=PIPE)
try:
stdout, _ = process.communicate(timeout=TIMEOUT)
except TimeoutExpired:
process.kill()
raise
process_stdout = stdout.strip()
self.assertIn("{bucket_name}".format(bucket_name=self.s3_bucket.name), process_stdout.decode("utf-8"))
@parameterized.expand(
[
"aws-serverless-function.yaml",
"aws-serverless-api.yaml",
"aws-serverless-httpapi.yaml",
"aws-appsync-graphqlschema.yaml",
"aws-appsync-resolver.yaml",
"aws-appsync-functionconfiguration.yaml",
"aws-lambda-function.yaml",
"aws-apigateway-restapi.yaml",
"aws-elasticbeanstalk-applicationversion.yaml",
"aws-cloudformation-stack.yaml",
"aws-serverless-application.yaml",
"aws-lambda-layerversion.yaml",
"aws-serverless-layerversion.yaml",
"aws-glue-job.yaml",
"aws-serverlessrepo-application.yaml",
"aws-serverless-statemachine.yaml",
"aws-stepfunctions-statemachine.yaml",
"aws-include-transform.yaml",
]
)
def test_package_barebones(self, template_file):
template_path = self.test_data_path.joinpath(template_file)
command_list = self.get_command_list(s3_bucket=self.s3_bucket.name, template_file=template_path)
process = Popen(command_list, stdout=PIPE)
try:
stdout, _ = process.communicate(timeout=TIMEOUT)
except TimeoutExpired:
process.kill()
raise
process_stdout = stdout.strip()
self.assertIn("{bucket_name}".format(bucket_name=self.s3_bucket.name), process_stdout.decode("utf-8"))
def test_package_without_required_args(self):
command_list = self.get_command_list()
process = Popen(command_list, stdout=PIPE)
try:
process.communicate(timeout=TIMEOUT)
except TimeoutExpired:
process.kill()
raise
self.assertNotEqual(process.returncode, 0)
@parameterized.expand(
[
"aws-serverless-function.yaml",
"aws-serverless-api.yaml",
"aws-serverless-httpapi.yaml",
"aws-appsync-graphqlschema.yaml",
"aws-appsync-resolver.yaml",
"aws-appsync-functionconfiguration.yaml",
"aws-lambda-function.yaml",
"aws-apigateway-restapi.yaml",
"aws-elasticbeanstalk-applicationversion.yaml",
"aws-cloudformation-stack.yaml",
"aws-serverless-application.yaml",
"aws-lambda-layerversion.yaml",
"aws-serverless-layerversion.yaml",
"aws-glue-job.yaml",
"aws-serverlessrepo-application.yaml",
"aws-serverless-statemachine.yaml",
"aws-stepfunctions-statemachine.yaml",
]
)
def test_package_with_prefix(self, template_file):
template_path = self.test_data_path.joinpath(template_file)
s3_prefix = "integ_test_prefix"
command_list = self.get_command_list(
s3_bucket=self.s3_bucket.name, template_file=template_path, s3_prefix=s3_prefix
)
process = Popen(command_list, stdout=PIPE)
try:
stdout, _ = process.communicate(timeout=TIMEOUT)
except TimeoutExpired:
process.kill()
raise
process_stdout = stdout.strip()
self.assertIn("{bucket_name}".format(bucket_name=self.s3_bucket.name), process_stdout.decode("utf-8"))
self.assertIn("{s3_prefix}".format(s3_prefix=s3_prefix), process_stdout.decode("utf-8"))
@parameterized.expand(
[
"aws-serverless-function.yaml",
"aws-serverless-api.yaml",
"aws-serverless-httpapi.yaml",
"aws-appsync-graphqlschema.yaml",
"aws-appsync-resolver.yaml",
"aws-appsync-functionconfiguration.yaml",
"aws-lambda-function.yaml",
"aws-apigateway-restapi.yaml",
"aws-elasticbeanstalk-applicationversion.yaml",
"aws-cloudformation-stack.yaml",
"aws-serverless-application.yaml",
"aws-lambda-layerversion.yaml",
"aws-serverless-layerversion.yaml",
"aws-glue-job.yaml",
"aws-serverlessrepo-application.yaml",
"aws-serverless-statemachine.yaml",
"aws-stepfunctions-statemachine.yaml",
]
)
def test_package_with_output_template_file(self, template_file):
template_path = self.test_data_path.joinpath(template_file)
s3_prefix = "integ_test_prefix"
with tempfile.NamedTemporaryFile(delete=False) as output_template:
command_list = self.get_command_list(
s3_bucket=self.s3_bucket.name,
template_file=template_path,
s3_prefix=s3_prefix,
output_template_file=output_template.name,
)
process = Popen(command_list, stdout=PIPE)
try:
stdout, _ = process.communicate(timeout=TIMEOUT)
except TimeoutExpired:
process.kill()
raise
process_stdout = stdout.strip()
self.assertIn(
bytes(
"Successfully packaged artifacts and wrote output template to file {output_template_file}".format(
output_template_file=str(output_template.name)
),
encoding="utf-8",
),
process_stdout,
)
@parameterized.expand(
[
"aws-serverless-function.yaml",
"aws-serverless-api.yaml",
"aws-serverless-httpapi.yaml",
"aws-appsync-graphqlschema.yaml",
"aws-appsync-resolver.yaml",
"aws-appsync-functionconfiguration.yaml",
"aws-lambda-function.yaml",
"aws-apigateway-restapi.yaml",
"aws-elasticbeanstalk-applicationversion.yaml",
"aws-cloudformation-stack.yaml",
"aws-serverless-application.yaml",
"aws-lambda-layerversion.yaml",
"aws-serverless-layerversion.yaml",
"aws-glue-job.yaml",
"aws-serverlessrepo-application.yaml",
"aws-serverless-statemachine.yaml",
"aws-stepfunctions-statemachine.yaml",
]
)
def test_package_with_json(self, template_file):
template_path = self.test_data_path.joinpath(template_file)
s3_prefix = "integ_test_prefix"
with tempfile.NamedTemporaryFile(delete=False) as output_template:
command_list = self.get_command_list(
s3_bucket=self.s3_bucket.name,
template_file=template_path,
s3_prefix=s3_prefix,
output_template_file=output_template.name,
use_json=True,
)
process = Popen(command_list, stdout=PIPE)
try:
stdout, _ = process.communicate(timeout=TIMEOUT)
except TimeoutExpired:
process.kill()
raise
process_stdout = stdout.strip()
self.assertIn(
bytes(
"Successfully packaged artifacts and wrote output template to file {output_template_file}".format(
output_template_file=str(output_template.name)
),
encoding="utf-8",
),
process_stdout,
)
@parameterized.expand(
[
"aws-serverless-function.yaml",
"aws-serverless-api.yaml",
"aws-serverless-httpapi.yaml",
"aws-appsync-graphqlschema.yaml",
"aws-appsync-resolver.yaml",
"aws-appsync-functionconfiguration.yaml",
"aws-lambda-function.yaml",
"aws-apigateway-restapi.yaml",
"aws-elasticbeanstalk-applicationversion.yaml",
"aws-cloudformation-stack.yaml",
"aws-serverless-application.yaml",
"aws-lambda-layerversion.yaml",
"aws-serverless-layerversion.yaml",
"aws-glue-job.yaml",
"aws-serverlessrepo-application.yaml",
"aws-serverless-statemachine.yaml",
"aws-stepfunctions-statemachine.yaml",
]
)
def test_package_with_force_upload(self, template_file):
template_path = self.test_data_path.joinpath(template_file)
s3_prefix = "integ_test_prefix"
with tempfile.NamedTemporaryFile(delete=False) as output_template:
# Upload twice and see the string to have packaged artifacts both times.
for _ in range(2):
command_list = self.get_command_list(
s3_bucket=self.s3_bucket.name,
template_file=template_path,
s3_prefix=s3_prefix,
output_template_file=output_template.name,
force_upload=True,
)
process = Popen(command_list, stdout=PIPE)
try:
stdout, _ = process.communicate(timeout=TIMEOUT)
except TimeoutExpired:
process.kill()
raise
process_stdout = stdout.strip()
self.assertIn(
bytes(
"Successfully packaged artifacts and wrote output template to file {output_template_file}".format(
output_template_file=str(output_template.name)
),
encoding="utf-8",
),
process_stdout,
)
@parameterized.expand(
[
"aws-serverless-function.yaml",
"aws-serverless-api.yaml",
"aws-serverless-httpapi.yaml",
"aws-appsync-graphqlschema.yaml",
"aws-appsync-resolver.yaml",
"aws-appsync-functionconfiguration.yaml",
"aws-lambda-function.yaml",
"aws-apigateway-restapi.yaml",
"aws-elasticbeanstalk-applicationversion.yaml",
"aws-cloudformation-stack.yaml",
"aws-serverless-application.yaml",
"aws-lambda-layerversion.yaml",
"aws-serverless-layerversion.yaml",
"aws-glue-job.yaml",
"aws-serverlessrepo-application.yaml",
"aws-serverless-statemachine.yaml",
"aws-stepfunctions-statemachine.yaml",
]
)
def test_package_with_kms_key(self, template_file):
template_path = self.test_data_path.joinpath(template_file)
s3_prefix = "integ_test_prefix"
with tempfile.NamedTemporaryFile(delete=False) as output_template:
command_list = self.get_command_list(
s3_bucket=self.s3_bucket.name,
template_file=template_path,
s3_prefix=s3_prefix,
output_template_file=output_template.name,
force_upload=True,
kms_key_id=self.kms_key,
)
process = Popen(command_list, stdout=PIPE)
try:
stdout, _ = process.communicate(timeout=TIMEOUT)
except TimeoutExpired:
process.kill()
raise
process_stdout = stdout.strip()
self.assertIn(
bytes(
"Successfully packaged artifacts and wrote output template to file {output_template_file}".format(
output_template_file=str(output_template.name)
),
encoding="utf-8",
),
process_stdout,
)
@parameterized.expand(
[
"aws-serverless-function.yaml",
"aws-serverless-api.yaml",
"aws-serverless-httpapi.yaml",
"aws-appsync-graphqlschema.yaml",
"aws-appsync-resolver.yaml",
"aws-appsync-functionconfiguration.yaml",
"aws-lambda-function.yaml",
"aws-apigateway-restapi.yaml",
"aws-elasticbeanstalk-applicationversion.yaml",
"aws-cloudformation-stack.yaml",
"aws-serverless-application.yaml",
"aws-lambda-layerversion.yaml",
"aws-serverless-layerversion.yaml",
"aws-glue-job.yaml",
"aws-serverlessrepo-application.yaml",
"aws-serverless-statemachine.yaml",
"aws-stepfunctions-statemachine.yaml",
]
)
def test_package_with_metadata(self, template_file):
template_path = self.test_data_path.joinpath(template_file)
s3_prefix = "integ_test_prefix"
with tempfile.NamedTemporaryFile(delete=False) as output_template:
command_list = self.get_command_list(
s3_bucket=self.s3_bucket.name,
template_file=template_path,
s3_prefix=s3_prefix,
output_template_file=output_template.name,
force_upload=True,
metadata={"integ": "yes"},
)
process = Popen(command_list, stdout=PIPE)
try:
stdout, _ = process.communicate(timeout=TIMEOUT)
except TimeoutExpired:
process.kill()
raise
process_stdout = stdout.strip()
self.assertIn(
bytes(
"Successfully packaged artifacts and wrote output template to file {output_template_file}".format(
output_template_file=str(output_template.name)
),
encoding="utf-8",
),
process_stdout,
)
@parameterized.expand(
[
"aws-serverless-function.yaml",
"aws-serverless-api.yaml",
"aws-appsync-graphqlschema.yaml",
"aws-appsync-resolver.yaml",
"aws-appsync-functionconfiguration.yaml",
"aws-lambda-function.yaml",
"aws-apigateway-restapi.yaml",
"aws-elasticbeanstalk-applicationversion.yaml",
"aws-cloudformation-stack.yaml",
"aws-serverless-application.yaml",
"aws-lambda-layerversion.yaml",
"aws-serverless-layerversion.yaml",
"aws-glue-job.yaml",
"aws-serverlessrepo-application.yaml",
"aws-serverless-statemachine.yaml",
"aws-stepfunctions-statemachine.yaml",
]
)
def test_package_with_resolve_s3(self, template_file):
template_path = self.test_data_path.joinpath(template_file)
s3_prefix = "integ_test_prefix"
with tempfile.NamedTemporaryFile(delete=False) as output_template:
command_list = self.get_command_list(
template_file=template_path,
s3_prefix=s3_prefix,
output_template_file=output_template.name,
force_upload=True,
resolve_s3=True,
)
process = Popen(command_list, stdout=PIPE)
try:
stdout, _ = process.communicate(timeout=TIMEOUT)
except TimeoutExpired:
process.kill()
raise
process_stdout = stdout.strip()
self.assertIn(
bytes(
"Successfully packaged artifacts and wrote output template to file {output_template_file}".format(
output_template_file=str(output_template.name)
),
encoding="utf-8",
),
process_stdout,
)
@parameterized.expand([(True,), (False,)])
def test_package_with_no_progressbar(self, no_progressbar):
template_path = self.test_data_path.joinpath("aws-serverless-function.yaml")
s3_prefix = "integ_test_prefix"
with tempfile.NamedTemporaryFile(delete=False) as output_template:
command_list = self.get_command_list(
template_file=template_path,
s3_prefix=s3_prefix,
output_template_file=output_template.name,
force_upload=True,
no_progressbar=no_progressbar,
resolve_s3=True,
)
process = Popen(command_list, stdout=PIPE, stderr=PIPE)
try:
_, stderr = process.communicate(timeout=TIMEOUT)
except TimeoutExpired:
process.kill()
raise
process_stderr = stderr.strip()
upload_message = bytes("Uploading to", encoding="utf-8")
if no_progressbar:
self.assertNotIn(
upload_message,
process_stderr,
)
else:
self.assertIn(
upload_message,
process_stderr,
)
@parameterized.expand(
[
param("aws-serverless-function-codedeploy-warning.yaml", "CodeDeploy"),
param("aws-serverless-function-codedeploy-condition-warning.yaml", "CodeDeploy DeploymentGroups"),
]
)
def test_package_with_warning_template(self, template_file, warning_keyword):
template_path = self.test_data_path.joinpath(template_file)
command_list = self.get_command_list(s3_bucket=self.s3_bucket.name, template=template_path)
process = Popen(command_list, stdout=PIPE)
try:
stdout, _ = process.communicate(timeout=TIMEOUT)
except TimeoutExpired:
process.kill()
raise
process_stdout = stdout.strip().decode("utf-8")
# Not comparing with full warning message because of line ending mismatch on
# windows and non-windows
self.assertIn(warning_keyword, process_stdout)
def test_package_with_deep_nested_template(self):
"""
this template contains two nested stacks:
- root
- FunctionA
- ChildStackX
- FunctionB
- ChildStackY
- FunctionA
- MyLayerVersion
"""
template_file = os.path.join("deep-nested", "template.yaml")
template_path = self.test_data_path.joinpath(template_file)
command_list = self.get_command_list(s3_bucket=self.s3_bucket.name, template=template_path, force_upload=True)
process = Popen(command_list, stdout=PIPE, stderr=PIPE)
try:
_, stderr = process.communicate(timeout=TIMEOUT)
except TimeoutExpired:
process.kill()
raise
process_stderr = stderr.strip().decode("utf-8")
# there are in total 3 function dir, 1 layer dir and 2 child templates to upload
uploads = re.findall(r"Uploading to.+", process_stderr)
self.assertEqual(len(uploads), 6)
# make sure uploads' checksum match the dirs and child templates
build_dir = pathlib.Path(os.path.dirname(__file__)).parent.joinpath("testdata", "package", "deep-nested")
dirs = [
build_dir.joinpath("FunctionA"),
build_dir.joinpath("ChildStackX", "FunctionB"),
build_dir.joinpath("ChildStackX", "ChildStackY", "FunctionA"),
build_dir.joinpath("ChildStackX", "ChildStackY", "MyLayerVersion"),
]
# here we only verify function/layer code dirs' hash
# because templates go through some pre-process before being uploaded and the hash can not be determined
for dir in dirs:
checksum = dir_checksum(dir.absolute())
self.assertIn(checksum, process_stderr)
# verify both child templates are uploaded
uploads = re.findall(r"\.template", process_stderr)
self.assertEqual(len(uploads), 2)
| 38.865455 | 123 | 0.591832 |
4a2660d1dd5fedffca147810343fcd4ed3e4e03c | 4,897 | py | Python | program_slicing/graph/convert/cdg.py | acheshkov/program_slicing | 124d2dcf6c9c2cd8e505b96f4f47f3ea98f0a260 | [
"MIT"
] | 5 | 2021-11-06T04:35:17.000Z | 2022-03-21T09:11:54.000Z | program_slicing/graph/convert/cdg.py | acheshkov/program_slicing | 124d2dcf6c9c2cd8e505b96f4f47f3ea98f0a260 | [
"MIT"
] | 19 | 2021-11-15T14:42:56.000Z | 2022-02-01T14:30:34.000Z | program_slicing/graph/convert/cdg.py | acheshkov/program_slicing | 124d2dcf6c9c2cd8e505b96f4f47f3ea98f0a260 | [
"MIT"
] | null | null | null | __licence__ = 'MIT'
__author__ = 'kuyaki'
__credits__ = ['kuyaki']
__maintainer__ = 'kuyaki'
__date__ = '2021/04/01'
from typing import Dict, List
from program_slicing.graph.cdg import ControlDependenceGraph
from program_slicing.graph.cfg import ControlFlowGraph
from program_slicing.graph.ddg import DataDependenceGraph
from program_slicing.graph.pdg import ProgramDependenceGraph
from program_slicing.graph.basic_block import BasicBlock
from program_slicing.graph.statement import Statement
from program_slicing.graph.convert.cfg import to_ddg as cfg_to_ddg
def to_cfg(cdg: ControlDependenceGraph) -> ControlFlowGraph:
"""
Convert the Control Dependence Graph into a Control Flow Graph.
New graph will contain links on nodes of the original one so that
any changes made after converting in the original graph's statements will affect the converted one.
:param cdg: Control Dependence Graph that should to be converted.
:return: Control Flow Graph which nodes contain nodes of the Control Dependence Graph on which it was based on.
"""
cfg = ControlFlowGraph()
block: Dict[Statement, BasicBlock] = {}
for root in cdg.entry_points:
__to_cfg(root, cdg=cdg, cfg=cfg, block=block)
cfg.set_scope_dependency(cdg.scope_dependency)
return cfg
def to_ddg(cdg: ControlDependenceGraph) -> DataDependenceGraph:
"""
Convert the Control Dependence Graph into a Data Dependence Graph.
New graph will contain same nodes as in the original one so that
any changes made after converting in the original graph's statements will affect the converted one.
:param cdg: Control Dependence Graph that should to be converted.
:return: Data Dependence Graph which nodes where presented in the Control Dependence Graph on which it was based on.
"""
cfg = to_cfg(cdg)
return cfg_to_ddg(cfg)
def to_pdg(cdg: ControlDependenceGraph) -> ProgramDependenceGraph:
"""
Convert the Control Dependence Graph into a Program Dependence Graph.
New graph will contain same nodes as in the original one so that
any changes made after converting in the original graph's statements will affect the converted one.
:param cdg: Control Dependence Graph that should to be converted.
:return: Program Dependence Graph which nodes where presented in the original Control Dependence Graph.
"""
ddg = to_ddg(cdg)
pdg = ProgramDependenceGraph()
for node in cdg:
pdg.add_node(node)
for cdg_successor in cdg.successors(node):
pdg.add_edge(node, cdg_successor)
if node in ddg:
for ddg_successor in ddg.successors(node):
pdg.add_edge(node, ddg_successor)
for entry_point in cdg.entry_points:
pdg.add_entry_point(entry_point)
pdg.set_scope_dependency(cdg.scope_dependency)
return pdg
def __to_cfg(
statement: Statement,
cdg: ControlDependenceGraph,
cfg: ControlFlowGraph,
block: Dict[Statement, BasicBlock]) -> None:
f_children: List[Statement] = cdg.control_flow.get(statement, [])
prev_block: BasicBlock = block.get(statement, None)
process_list: List[Statement] = []
for child in f_children:
if child in block:
__process_loop(child, cfg, block, prev_block)
elif len(f_children) > 1:
new_block = BasicBlock(statements=[child])
cfg.add_node(new_block)
if prev_block is None:
cfg.add_entry_point(new_block)
else:
cfg.add_edge(prev_block, new_block)
block[child] = new_block
process_list.append(child)
else:
if prev_block is None:
prev_block = BasicBlock()
cfg.add_node(prev_block)
cfg.add_entry_point(prev_block)
prev_block.append(child)
block[child] = prev_block
process_list.append(child)
for child in process_list:
__to_cfg(child, cdg, cfg, block)
def __process_loop(
child: Statement,
cfg: ControlFlowGraph,
block: Dict[Statement, BasicBlock],
prev_block: BasicBlock) -> None:
old_block: BasicBlock = block[child]
index = old_block.statements.index(child)
if index == 0:
if prev_block is not None:
cfg.add_edge(prev_block, old_block)
return
new_block = old_block.split(index)
for new_block_statement in new_block.statements:
block[new_block_statement] = new_block
cfg.add_node(new_block)
old_successors: List[BasicBlock] = [successor for successor in cfg.successors(old_block)]
for old_successor in old_successors:
cfg.remove_edge(old_block, old_successor)
cfg.add_edge(new_block, old_successor)
cfg.add_edge(old_block, new_block)
if prev_block is not None:
cfg.add_edge(prev_block, new_block)
| 39.813008 | 120 | 0.700837 |
4a266106cdb808a33ff7ef5ee9224b147c18711e | 1,615 | py | Python | tests/conftest.py | adamkusmirek/mkdocs-static-i18n | c8be7dd86a7b0b951414e18b93b2a7b7ea439373 | [
"MIT"
] | null | null | null | tests/conftest.py | adamkusmirek/mkdocs-static-i18n | c8be7dd86a7b0b951414e18b93b2a7b7ea439373 | [
"MIT"
] | null | null | null | tests/conftest.py | adamkusmirek/mkdocs-static-i18n | c8be7dd86a7b0b951414e18b93b2a7b7ea439373 | [
"MIT"
] | null | null | null | import tempfile
import pytest
from mkdocs.config.base import load_config
@pytest.fixture
def config_base():
with tempfile.TemporaryDirectory(prefix="mkdocs_tests_") as site_dir:
return load_config(
"tests/mkdocs_base.yml", docs_dir="../docs/", site_dir=site_dir
)
@pytest.fixture
def config_plugin():
with tempfile.TemporaryDirectory(prefix="mkdocs_tests_") as site_dir:
return load_config(
"tests/mkdocs_i18n.yml", docs_dir="../docs/", site_dir=site_dir
)
@pytest.fixture
def config_plugin_static_nav():
with tempfile.TemporaryDirectory(prefix="mkdocs_tests_") as site_dir:
return load_config(
"tests/mkdocs_i18n_static_nav.yml", docs_dir="../docs/", site_dir=site_dir
)
@pytest.fixture
def config_plugin_no_default_language():
with tempfile.TemporaryDirectory(prefix="mkdocs_tests_") as site_dir:
return load_config(
"tests/mkdocs_i18n_no_default_language.yml",
docs_dir="../docs/",
site_dir=site_dir,
)
@pytest.fixture
def config_plugin_translated_nav():
with tempfile.TemporaryDirectory(prefix="mkdocs_tests_") as site_dir:
return load_config(
"tests/mkdocs_i18n_translated_nav.yml",
docs_dir="../docs/",
site_dir=site_dir,
)
@pytest.fixture
def config_plugin_search():
with tempfile.TemporaryDirectory(prefix="mkdocs_tests_") as site_dir:
return load_config(
"tests/mkdocs_i18n_search.yml",
docs_dir="../docs/",
site_dir=site_dir,
)
| 27.372881 | 86 | 0.668111 |
4a2661efb470c1729b1b26483256e864f7e0395f | 12,169 | py | Python | utils/utils.py | andreasmarxer/keras-yolo3 | b56bf38c830c6a57a299335aa2192fe845830264 | [
"MIT"
] | null | null | null | utils/utils.py | andreasmarxer/keras-yolo3 | b56bf38c830c6a57a299335aa2192fe845830264 | [
"MIT"
] | null | null | null | utils/utils.py | andreasmarxer/keras-yolo3 | b56bf38c830c6a57a299335aa2192fe845830264 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import os
from .bbox import BoundBox, bbox_iou
from scipy.special import expit
def _sigmoid(x):
return expit(x)
def makedirs(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def evaluate(model,
generator,
iou_threshold=0.5,
obj_thresh=0.5,
nms_thresh=0.45,
net_h=416,
net_w=416,
save_path=None):
""" Evaluate a given dataset using a given model.
code originally from https://github.com/fizyr/keras-retinanet
# Arguments
model : The model to evaluate.
generator : The generator that represents the dataset to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
obj_thresh : The threshold used to distinguish between object and non-object
nms_thresh : The threshold used to determine whether two detections are duplicates
net_h : The height of the input image to the model, higher value results in better accuracy
net_w : The width of the input image to the model
save_path : The path to save images with visualized detections to.
# Returns
A dict mapping class names to mAP scores.
"""
# gather all detections and annotations
all_detections = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
all_annotations = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
for i in range(generator.size()):
raw_image = [generator.load_image(i)]
# make the boxes and the labels
pred_boxes = get_yolo_boxes(model, raw_image, net_h, net_w, generator.get_anchors(), obj_thresh, nms_thresh)[0]
score = np.array([box.get_score() for box in pred_boxes])
pred_labels = np.array([box.label for box in pred_boxes])
if len(pred_boxes) > 0:
pred_boxes = np.array([[box.xmin, box.ymin, box.xmax, box.ymax, box.get_score()] for box in pred_boxes])
else:
pred_boxes = np.array([[]])
# sort the boxes and the labels according to scores
score_sort = np.argsort(-score)
pred_labels = pred_labels[score_sort]
pred_boxes = pred_boxes[score_sort]
# copy detections to all_detections
for label in range(generator.num_classes()):
all_detections[i][label] = pred_boxes[pred_labels == label, :]
annotations = generator.load_annotation(i)
# copy detections to all_annotations
for label in range(generator.num_classes()):
all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()
# compute mAP by comparing all detections and all annotations
average_precisions = {}
for label in range(generator.num_classes()):
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
scores = np.zeros((0,))
num_annotations = 0.0
for i in range(generator.size()):
detections = all_detections[i][label]
annotations = all_annotations[i][label]
num_annotations += annotations.shape[0]
detected_annotations = []
for d in detections:
scores = np.append(scores, d[4])
if annotations.shape[0] == 0:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
continue
overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
# no annotations -> AP for this class is 0 (is this correct?)
if num_annotations == 0:
average_precisions[label] = 0
continue
# sort by score
indices = np.argsort(-scores)
false_positives = false_positives[indices]
true_positives = true_positives[indices]
# compute false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
# compute recall and precision
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
# compute average precision
average_precision = compute_ap(recall, precision)
average_precisions[label] = average_precision
return average_precisions
# stretched back into the original size
def correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w):
if (float(net_w)/image_w) < (float(net_h)/image_h):
new_w = net_w
new_h = (image_h*net_w)/image_w
else:
new_h = net_w
new_w = (image_w*net_h)/image_h
#print('correct yolo boxes - new_h: ' +str(new_h))
#print('correct yolo boxes - new_w: ' +str(new_w))
for i in range(len(boxes)):
x_offset, x_scale = (net_w - new_w)/2./net_w, float(new_w)/net_w #either high or width is let original with offset 0 scale 1
y_offset, y_scale = (net_h - new_h)/2./net_h, float(new_h)/net_h
#if and elif added Andreas
if abs(boxes[i].xmin) > 99999:
boxes[i].xmin = 9999999999
elif abs(boxes[i].xmax) > 99999:
boxes[i].xmax = 9999999999
else:
boxes[i].xmin = int((boxes[i].xmin - x_offset) / x_scale * image_w)
boxes[i].xmax = int((boxes[i].xmax - x_offset) / x_scale * image_w)
boxes[i].ymin = int((boxes[i].ymin - y_offset) / y_scale * image_h)
boxes[i].ymax = int((boxes[i].ymax - y_offset) / y_scale * image_h)
# non-max supression
def do_nms(boxes, nms_thresh):
if len(boxes) > 0:
nb_class = len(boxes[0].classes)
else:
return
for c in range(nb_class):
sorted_indices = np.argsort([-box.classes[c] for box in boxes])
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0: continue
for j in range(i+1, len(sorted_indices)):
index_j = sorted_indices[j]
if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_thresh:
boxes[index_j].classes[c] = 0
def decode_netout(netout, anchors, obj_thresh, net_h, net_w):
grid_h, grid_w = netout.shape[:2]
nb_box = 3
netout = netout.reshape((grid_h, grid_w, nb_box, -1))
nb_class = netout.shape[-1] - 5
boxes = []
netout[..., :2] = _sigmoid(netout[..., :2])
netout[..., 4] = _sigmoid(netout[..., 4])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * _softmax(netout[..., 5:])
netout[..., 5:] *= netout[..., 5:] > obj_thresh
for i in range(grid_h*grid_w):
row = i // grid_w
col = i % grid_w
for b in range(nb_box):
# 4th element is objectness score
objectness = netout[row, col, b, 4]
if(objectness <= obj_thresh): continue
# first 4 elements are x, y, w, and h
x, y, w, h = netout[row,col,b,:4]
x = (col + x) / grid_w # center position, unit: image width
y = (row + y) / grid_h # center position, unit: image height
w = anchors[2 * b + 0] * np.exp(w) / net_w # unit: image width
h = anchors[2 * b + 1] * np.exp(h) / net_h # unit: image height
# last elements are class probabilities
classes = netout[row,col,b,5:]
box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, objectness, classes)
boxes.append(box)
return boxes
def preprocess_input(image, net_h, net_w):
new_h, new_w, _ = image.shape
# determine the new size of the image
if (float(net_w)/new_w) < (float(net_h)/new_h):
new_h = (new_h * net_w)//new_w
new_w = net_w
else:
new_w = (new_w * net_h)//new_h
new_h = net_h
# resize the image to the new size
resized = cv2.resize(image[:,:,::-1]/255., (new_w, new_h))
# embed the image into the standard letter box
new_image = np.ones((net_h, net_w, 3)) * 0.5
new_image[(net_h-new_h)//2:(net_h+new_h)//2, (net_w-new_w)//2:(net_w+new_w)//2, :] = resized
new_image = np.expand_dims(new_image, 0)
return new_image
def normalize(image):
return image/255.
# calculates the yolo boxes and rescale to original size
def get_yolo_boxes(model, images, net_h, net_w, anchors, obj_thresh, nms_thresh):
image_h, image_w, _ = images[0].shape
nb_images = len(images)
batch_input = np.zeros((nb_images, net_h, net_w, 3))
# preprocess the input
for i in range(nb_images):
batch_input[i] = preprocess_input(images[i], net_h, net_w)
# run the prediction
batch_output = model.predict_on_batch(batch_input)
batch_boxes = [None]*nb_images
for i in range(nb_images):
yolos = [batch_output[0][i], batch_output[1][i], batch_output[2][i]]
boxes = []
# decode the output of the network
for j in range(len(yolos)):
yolo_anchors = anchors[(2-j)*6:(3-j)*6] # config['model']['anchors']
boxes += decode_netout(yolos[j], yolo_anchors, obj_thresh, net_h, net_w)
# correct the sizes of the bounding boxes to the original image size
correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w)
# suppress non-maximal boxes
do_nms(boxes, nms_thresh)
batch_boxes[i] = boxes
return batch_boxes
def compute_overlap(a, b):
"""
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
Parameters
----------
a: (N, 4) ndarray of float
b: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])
iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0])
ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1])
iw = np.maximum(iw, 0)
ih = np.maximum(ih, 0)
ua = np.expand_dims((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), axis=1) + area - iw * ih
ua = np.maximum(ua, np.finfo(float).eps)
intersection = iw * ih
return intersection / ua
def compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves.
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
# Arguments
recall: The recall curve (list).
precision: The precision curve (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], recall, [1.]))
mpre = np.concatenate(([0.], precision, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def _softmax(x, axis=-1):
x = x - np.amax(x, axis, keepdims=True)
e_x = np.exp(x)
return e_x / e_x.sum(axis, keepdims=True)
| 36.434132 | 135 | 0.599638 |
4a2663218fac23d533f18bddaa5a4264609e9b6c | 22,805 | py | Python | networkx/readwrite/tests/test_gexf.py | ibraheem-moosa/networkx | fe8ac9f2d9c66e0337e11688078fdaccc90052cd | [
"BSD-3-Clause"
] | 1 | 2020-08-20T03:02:00.000Z | 2020-08-20T03:02:00.000Z | networkx/readwrite/tests/test_gexf.py | hugs314/networkx | 7b4168bdb09275a2ed860b01b9dbb77a151d85c6 | [
"BSD-3-Clause"
] | null | null | null | networkx/readwrite/tests/test_gexf.py | hugs314/networkx | 7b4168bdb09275a2ed860b01b9dbb77a151d85c6 | [
"BSD-3-Clause"
] | null | null | null | import io
import sys
import time
import pytest
import networkx as nx
class TestGEXF:
@classmethod
def setup_class(cls):
_ = pytest.importorskip("xml.etree.ElementTree")
cls.simple_directed_data = """<?xml version="1.0" encoding="UTF-8"?>
<gexf xmlns="http://www.gexf.net/1.2draft" version="1.2">
<graph mode="static" defaultedgetype="directed">
<nodes>
<node id="0" label="Hello" />
<node id="1" label="Word" />
</nodes>
<edges>
<edge id="0" source="0" target="1" />
</edges>
</graph>
</gexf>
"""
cls.simple_directed_graph = nx.DiGraph()
cls.simple_directed_graph.add_node("0", label="Hello")
cls.simple_directed_graph.add_node("1", label="World")
cls.simple_directed_graph.add_edge("0", "1", id="0")
cls.simple_directed_fh = io.BytesIO(cls.simple_directed_data.encode("UTF-8"))
cls.attribute_data = """<?xml version="1.0" encoding="UTF-8"?>\
<gexf xmlns="http://www.gexf.net/1.2draft" xmlns:xsi="http://www.w3.\
org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.gexf.net/\
1.2draft http://www.gexf.net/1.2draft/gexf.xsd" version="1.2">
<meta lastmodifieddate="2009-03-20">
<creator>Gephi.org</creator>
<description>A Web network</description>
</meta>
<graph defaultedgetype="directed">
<attributes class="node">
<attribute id="0" title="url" type="string"/>
<attribute id="1" title="indegree" type="integer"/>
<attribute id="2" title="frog" type="boolean">
<default>true</default>
</attribute>
</attributes>
<nodes>
<node id="0" label="Gephi">
<attvalues>
<attvalue for="0" value="https://gephi.org"/>
<attvalue for="1" value="1"/>
<attvalue for="2" value="false"/>
</attvalues>
</node>
<node id="1" label="Webatlas">
<attvalues>
<attvalue for="0" value="http://webatlas.fr"/>
<attvalue for="1" value="2"/>
<attvalue for="2" value="false"/>
</attvalues>
</node>
<node id="2" label="RTGI">
<attvalues>
<attvalue for="0" value="http://rtgi.fr"/>
<attvalue for="1" value="1"/>
<attvalue for="2" value="true"/>
</attvalues>
</node>
<node id="3" label="BarabasiLab">
<attvalues>
<attvalue for="0" value="http://barabasilab.com"/>
<attvalue for="1" value="1"/>
<attvalue for="2" value="true"/>
</attvalues>
</node>
</nodes>
<edges>
<edge id="0" source="0" target="1"/>
<edge id="1" source="0" target="2"/>
<edge id="2" source="1" target="0"/>
<edge id="3" source="2" target="1"/>
<edge id="4" source="0" target="3"/>
</edges>
</graph>
</gexf>
"""
cls.attribute_graph = nx.DiGraph()
cls.attribute_graph.graph["node_default"] = {"frog": True}
cls.attribute_graph.add_node(
"0", label="Gephi", url="https://gephi.org", indegree=1, frog=False
)
cls.attribute_graph.add_node(
"1", label="Webatlas", url="http://webatlas.fr", indegree=2, frog=False
)
cls.attribute_graph.add_node(
"2", label="RTGI", url="http://rtgi.fr", indegree=1, frog=True
)
cls.attribute_graph.add_node(
"3",
label="BarabasiLab",
url="http://barabasilab.com",
indegree=1,
frog=True,
)
cls.attribute_graph.add_edge("0", "1", id="0")
cls.attribute_graph.add_edge("0", "2", id="1")
cls.attribute_graph.add_edge("1", "0", id="2")
cls.attribute_graph.add_edge("2", "1", id="3")
cls.attribute_graph.add_edge("0", "3", id="4")
cls.attribute_fh = io.BytesIO(cls.attribute_data.encode("UTF-8"))
cls.simple_undirected_data = """<?xml version="1.0" encoding="UTF-8"?>
<gexf xmlns="http://www.gexf.net/1.2draft" version="1.2">
<graph mode="static" defaultedgetype="undirected">
<nodes>
<node id="0" label="Hello" />
<node id="1" label="Word" />
</nodes>
<edges>
<edge id="0" source="0" target="1" />
</edges>
</graph>
</gexf>
"""
cls.simple_undirected_graph = nx.Graph()
cls.simple_undirected_graph.add_node("0", label="Hello")
cls.simple_undirected_graph.add_node("1", label="World")
cls.simple_undirected_graph.add_edge("0", "1", id="0")
cls.simple_undirected_fh = io.BytesIO(
cls.simple_undirected_data.encode("UTF-8")
)
def test_read_simple_directed_graphml(self):
G = self.simple_directed_graph
H = nx.read_gexf(self.simple_directed_fh)
assert sorted(G.nodes()) == sorted(H.nodes())
assert sorted(G.edges()) == sorted(H.edges())
assert sorted(G.edges(data=True)) == sorted(H.edges(data=True))
self.simple_directed_fh.seek(0)
def test_write_read_simple_directed_graphml(self):
G = self.simple_directed_graph
fh = io.BytesIO()
nx.write_gexf(G, fh)
fh.seek(0)
H = nx.read_gexf(fh)
assert sorted(G.nodes()) == sorted(H.nodes())
assert sorted(G.edges()) == sorted(H.edges())
assert sorted(G.edges(data=True)) == sorted(H.edges(data=True))
self.simple_directed_fh.seek(0)
def test_read_simple_undirected_graphml(self):
G = self.simple_undirected_graph
H = nx.read_gexf(self.simple_undirected_fh)
assert sorted(G.nodes()) == sorted(H.nodes())
assert sorted(sorted(e) for e in G.edges()) == sorted(
sorted(e) for e in H.edges()
)
self.simple_undirected_fh.seek(0)
def test_read_attribute_graphml(self):
G = self.attribute_graph
H = nx.read_gexf(self.attribute_fh)
assert sorted(G.nodes(True)) == sorted(H.nodes(data=True))
ge = sorted(G.edges(data=True))
he = sorted(H.edges(data=True))
for a, b in zip(ge, he):
assert a == b
self.attribute_fh.seek(0)
def test_directed_edge_in_undirected(self):
s = """<?xml version="1.0" encoding="UTF-8"?>
<gexf xmlns="http://www.gexf.net/1.2draft" version='1.2'>
<graph mode="static" defaultedgetype="undirected" name="">
<nodes>
<node id="0" label="Hello" />
<node id="1" label="Word" />
</nodes>
<edges>
<edge id="0" source="0" target="1" type="directed"/>
</edges>
</graph>
</gexf>
"""
fh = io.BytesIO(s.encode("UTF-8"))
pytest.raises(nx.NetworkXError, nx.read_gexf, fh)
def test_undirected_edge_in_directed(self):
s = """<?xml version="1.0" encoding="UTF-8"?>
<gexf xmlns="http://www.gexf.net/1.2draft" version='1.2'>
<graph mode="static" defaultedgetype="directed" name="">
<nodes>
<node id="0" label="Hello" />
<node id="1" label="Word" />
</nodes>
<edges>
<edge id="0" source="0" target="1" type="undirected"/>
</edges>
</graph>
</gexf>
"""
fh = io.BytesIO(s.encode("UTF-8"))
pytest.raises(nx.NetworkXError, nx.read_gexf, fh)
def test_key_raises(self):
s = """<?xml version="1.0" encoding="UTF-8"?>
<gexf xmlns="http://www.gexf.net/1.2draft" version='1.2'>
<graph mode="static" defaultedgetype="directed" name="">
<nodes>
<node id="0" label="Hello">
<attvalues>
<attvalue for='0' value='1'/>
</attvalues>
</node>
<node id="1" label="Word" />
</nodes>
<edges>
<edge id="0" source="0" target="1" type="undirected"/>
</edges>
</graph>
</gexf>
"""
fh = io.BytesIO(s.encode("UTF-8"))
pytest.raises(nx.NetworkXError, nx.read_gexf, fh)
def test_relabel(self):
s = """<?xml version="1.0" encoding="UTF-8"?>
<gexf xmlns="http://www.gexf.net/1.2draft" version='1.2'>
<graph mode="static" defaultedgetype="directed" name="">
<nodes>
<node id="0" label="Hello" />
<node id="1" label="Word" />
</nodes>
<edges>
<edge id="0" source="0" target="1"/>
</edges>
</graph>
</gexf>
"""
fh = io.BytesIO(s.encode("UTF-8"))
G = nx.read_gexf(fh, relabel=True)
assert sorted(G.nodes()) == ["Hello", "Word"]
def test_default_attribute(self):
G = nx.Graph()
G.add_node(1, label="1", color="green")
nx.add_path(G, [0, 1, 2, 3])
G.add_edge(1, 2, foo=3)
G.graph["node_default"] = {"color": "yellow"}
G.graph["edge_default"] = {"foo": 7}
fh = io.BytesIO()
nx.write_gexf(G, fh)
fh.seek(0)
H = nx.read_gexf(fh, node_type=int)
assert sorted(G.nodes()) == sorted(H.nodes())
assert sorted(sorted(e) for e in G.edges()) == sorted(
sorted(e) for e in H.edges()
)
# Reading a gexf graph always sets mode attribute to either
# 'static' or 'dynamic'. Remove the mode attribute from the
# read graph for the sake of comparing remaining attributes.
del H.graph["mode"]
assert G.graph == H.graph
def test_serialize_ints_to_strings(self):
G = nx.Graph()
G.add_node(1, id=7, label=77)
fh = io.BytesIO()
nx.write_gexf(G, fh)
fh.seek(0)
H = nx.read_gexf(fh, node_type=int)
assert list(H) == [7]
assert H.nodes[7]["label"] == "77"
# FIXME: We should test xml without caring about their order This is causing a
# problem b/c of a change in Python 3.8
#
# "Prior to Python 3.8, the serialisation order of the XML attributes of
# elements was artificially made predictable by sorting the attributes by their
# name. Based on the now guaranteed ordering of dicts, this arbitrary
# reordering was removed in Python 3.8 to preserve the order in which
# attributes were originally parsed or created by user code."
#
# https://docs.python.org/3.8/library/xml.etree.elementtree.html
# https://bugs.python.org/issue34160
def test_write_with_node_attributes(self):
# Addresses #673.
G = nx.OrderedGraph()
G.add_edges_from([(0, 1), (1, 2), (2, 3)])
for i in range(4):
G.nodes[i]["id"] = i
G.nodes[i]["label"] = i
G.nodes[i]["pid"] = i
G.nodes[i]["start"] = i
G.nodes[i]["end"] = i + 1
if sys.version_info < (3, 8):
expected = f"""<gexf version="1.2" xmlns="http://www.gexf.net/1.2\
draft" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:\
schemaLocation="http://www.gexf.net/1.2draft http://www.gexf.net/1.2draft/\
gexf.xsd">
<meta lastmodifieddate="{time.strftime('%Y-%m-%d')}">
<creator>NetworkX {nx.__version__}</creator>
</meta>
<graph defaultedgetype="undirected" mode="dynamic" name="" timeformat="long">
<nodes>
<node end="1" id="0" label="0" pid="0" start="0" />
<node end="2" id="1" label="1" pid="1" start="1" />
<node end="3" id="2" label="2" pid="2" start="2" />
<node end="4" id="3" label="3" pid="3" start="3" />
</nodes>
<edges>
<edge id="0" source="0" target="1" />
<edge id="1" source="1" target="2" />
<edge id="2" source="2" target="3" />
</edges>
</graph>
</gexf>"""
else:
expected = f"""<gexf xmlns="http://www.gexf.net/1.2draft" xmlns:xsi\
="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation=\
"http://www.gexf.net/1.2draft http://www.gexf.net/1.2draft/\
gexf.xsd" version="1.2">
<meta lastmodifieddate="{time.strftime('%Y-%m-%d')}">
<creator>NetworkX {nx.__version__}</creator>
</meta>
<graph defaultedgetype="undirected" mode="dynamic" name="" timeformat="long">
<nodes>
<node id="0" label="0" pid="0" start="0" end="1" />
<node id="1" label="1" pid="1" start="1" end="2" />
<node id="2" label="2" pid="2" start="2" end="3" />
<node id="3" label="3" pid="3" start="3" end="4" />
</nodes>
<edges>
<edge source="0" target="1" id="0" />
<edge source="1" target="2" id="1" />
<edge source="2" target="3" id="2" />
</edges>
</graph>
</gexf>"""
obtained = "\n".join(nx.generate_gexf(G))
assert expected == obtained
def test_edge_id_construct(self):
G = nx.Graph()
G.add_edges_from([(0, 1, {"id": 0}), (1, 2, {"id": 2}), (2, 3)])
if sys.version_info < (3, 8):
expected = f"""<gexf version="1.2" xmlns="http://www.gexf.net/\
1.2draft" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:\
schemaLocation="http://www.gexf.net/1.2draft http://www.gexf.net/1.2draft/\
gexf.xsd">
<meta lastmodifieddate="{time.strftime('%Y-%m-%d')}">
<creator>NetworkX {nx.__version__}</creator>
</meta>
<graph defaultedgetype="undirected" mode="static" name="">
<nodes>
<node id="0" label="0" />
<node id="1" label="1" />
<node id="2" label="2" />
<node id="3" label="3" />
</nodes>
<edges>
<edge id="0" source="0" target="1" />
<edge id="2" source="1" target="2" />
<edge id="1" source="2" target="3" />
</edges>
</graph>
</gexf>"""
else:
expected = f"""<gexf xmlns="http://www.gexf.net/1.2draft" xmlns:xsi\
="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.\
gexf.net/1.2draft http://www.gexf.net/1.2draft/gexf.xsd" version="1.2">
<meta lastmodifieddate="{time.strftime('%Y-%m-%d')}">
<creator>NetworkX {nx.__version__}</creator>
</meta>
<graph defaultedgetype="undirected" mode="static" name="">
<nodes>
<node id="0" label="0" />
<node id="1" label="1" />
<node id="2" label="2" />
<node id="3" label="3" />
</nodes>
<edges>
<edge source="0" target="1" id="0" />
<edge source="1" target="2" id="2" />
<edge source="2" target="3" id="1" />
</edges>
</graph>
</gexf>"""
obtained = "\n".join(nx.generate_gexf(G))
assert expected == obtained
def test_numpy_type(self):
G = nx.path_graph(4)
try:
import numpy
except ImportError:
return
nx.set_node_attributes(G, {n: n for n in numpy.arange(4)}, "number")
G[0][1]["edge-number"] = numpy.float64(1.1)
if sys.version_info < (3, 8):
expected = f"""<gexf version="1.2" xmlns="http://www.gexf.net/1.2draft"\
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation\
="http://www.gexf.net/1.2draft http://www.gexf.net/1.2draft/gexf.xsd">
<meta lastmodifieddate="{time.strftime('%Y-%m-%d')}">
<creator>NetworkX {nx.__version__}</creator>
</meta>
<graph defaultedgetype="undirected" mode="static" name="">
<attributes class="edge" mode="static">
<attribute id="1" title="edge-number" type="float" />
</attributes>
<attributes class="node" mode="static">
<attribute id="0" title="number" type="int" />
</attributes>
<nodes>
<node id="0" label="0">
<attvalues>
<attvalue for="0" value="0" />
</attvalues>
</node>
<node id="1" label="1">
<attvalues>
<attvalue for="0" value="1" />
</attvalues>
</node>
<node id="2" label="2">
<attvalues>
<attvalue for="0" value="2" />
</attvalues>
</node>
<node id="3" label="3">
<attvalues>
<attvalue for="0" value="3" />
</attvalues>
</node>
</nodes>
<edges>
<edge id="0" source="0" target="1">
<attvalues>
<attvalue for="1" value="1.1" />
</attvalues>
</edge>
<edge id="1" source="1" target="2" />
<edge id="2" source="2" target="3" />
</edges>
</graph>
</gexf>"""
else:
expected = f"""<gexf xmlns="http://www.gexf.net/1.2draft"\
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation\
="http://www.gexf.net/1.2draft http://www.gexf.net/1.2draft/gexf.xsd"\
version="1.2">
<meta lastmodifieddate="{time.strftime('%Y-%m-%d')}">
<creator>NetworkX {nx.__version__}</creator>
</meta>
<graph defaultedgetype="undirected" mode="static" name="">
<attributes mode="static" class="edge">
<attribute id="1" title="edge-number" type="float" />
</attributes>
<attributes mode="static" class="node">
<attribute id="0" title="number" type="int" />
</attributes>
<nodes>
<node id="0" label="0">
<attvalues>
<attvalue for="0" value="0" />
</attvalues>
</node>
<node id="1" label="1">
<attvalues>
<attvalue for="0" value="1" />
</attvalues>
</node>
<node id="2" label="2">
<attvalues>
<attvalue for="0" value="2" />
</attvalues>
</node>
<node id="3" label="3">
<attvalues>
<attvalue for="0" value="3" />
</attvalues>
</node>
</nodes>
<edges>
<edge source="0" target="1" id="0">
<attvalues>
<attvalue for="1" value="1.1" />
</attvalues>
</edge>
<edge source="1" target="2" id="1" />
<edge source="2" target="3" id="2" />
</edges>
</graph>
</gexf>"""
obtained = "\n".join(nx.generate_gexf(G))
assert expected == obtained
def test_bool(self):
G = nx.Graph()
G.add_node(1, testattr=True)
fh = io.BytesIO()
nx.write_gexf(G, fh)
fh.seek(0)
H = nx.read_gexf(fh, node_type=int)
assert H.nodes[1]["testattr"]
# Test for NaN, INF and -INF
def test_specials(self):
from math import isnan
inf, nan = float("inf"), float("nan")
G = nx.Graph()
G.add_node(1, testattr=inf, strdata="inf", key="a")
G.add_node(2, testattr=nan, strdata="nan", key="b")
G.add_node(3, testattr=-inf, strdata="-inf", key="c")
fh = io.BytesIO()
nx.write_gexf(G, fh)
fh.seek(0)
filetext = fh.read()
fh.seek(0)
H = nx.read_gexf(fh, node_type=int)
assert b"INF" in filetext
assert b"NaN" in filetext
assert b"-INF" in filetext
assert H.nodes[1]["testattr"] == inf
assert isnan(H.nodes[2]["testattr"])
assert H.nodes[3]["testattr"] == -inf
assert H.nodes[1]["strdata"] == "inf"
assert H.nodes[2]["strdata"] == "nan"
assert H.nodes[3]["strdata"] == "-inf"
assert H.nodes[1]["networkx_key"] == "a"
assert H.nodes[2]["networkx_key"] == "b"
assert H.nodes[3]["networkx_key"] == "c"
def test_simple_list(self):
G = nx.Graph()
list_value = [(1, 2, 3), (9, 1, 2)]
G.add_node(1, key=list_value)
fh = io.BytesIO()
nx.write_gexf(G, fh)
fh.seek(0)
H = nx.read_gexf(fh, node_type=int)
assert H.nodes[1]["networkx_key"] == list_value
def test_dynamic_mode(self):
G = nx.Graph()
G.add_node(1, label="1", color="green")
G.graph["mode"] = "dynamic"
fh = io.BytesIO()
nx.write_gexf(G, fh)
fh.seek(0)
H = nx.read_gexf(fh, node_type=int)
assert sorted(G.nodes()) == sorted(H.nodes())
assert sorted(sorted(e) for e in G.edges()) == sorted(
sorted(e) for e in H.edges()
)
def test_multigraph_with_missing_attributes(self):
G = nx.MultiGraph()
G.add_node(0, label="1", color="green")
G.add_node(1, label="2", color="green")
G.add_edge(0, 1, id="0", weight=3, type="undirected", start=0, end=1)
G.add_edge(0, 1)
fh = io.BytesIO()
nx.write_gexf(G, fh)
fh.seek(0)
H = nx.read_gexf(fh, node_type=int)
assert sorted(G.nodes()) == sorted(H.nodes())
assert sorted(sorted(e) for e in G.edges()) == sorted(
sorted(e) for e in H.edges()
)
def test_missing_viz_attributes(self):
G = nx.Graph()
G.add_node(0, label="1", color="green")
G.nodes[0]["viz"] = {"size": 54}
G.nodes[0]["viz"]["position"] = {"x": 0, "y": 1, "z": 0}
G.nodes[0]["viz"]["color"] = {"r": 0, "g": 0, "b": 256}
G.nodes[0]["viz"]["shape"] = "http://random.url"
G.nodes[0]["viz"]["thickness"] = 2
fh = io.BytesIO()
nx.write_gexf(G, fh, version="1.1draft")
fh.seek(0)
H = nx.read_gexf(fh, node_type=int)
assert sorted(G.nodes()) == sorted(H.nodes())
assert sorted(sorted(e) for e in G.edges()) == sorted(
sorted(e) for e in H.edges()
)
# Second graph for the other branch
G = nx.Graph()
G.add_node(0, label="1", color="green")
G.nodes[0]["viz"] = {"size": 54}
G.nodes[0]["viz"]["position"] = {"x": 0, "y": 1, "z": 0}
G.nodes[0]["viz"]["color"] = {"r": 0, "g": 0, "b": 256, "a": 0.5}
G.nodes[0]["viz"]["shape"] = "ftp://random.url"
G.nodes[0]["viz"]["thickness"] = 2
fh = io.BytesIO()
nx.write_gexf(G, fh)
fh.seek(0)
H = nx.read_gexf(fh, node_type=int)
assert sorted(G.nodes()) == sorted(H.nodes())
assert sorted(sorted(e) for e in G.edges()) == sorted(
sorted(e) for e in H.edges()
)
def test_slice_and_spell(self):
# Test spell first, so version = 1.2
G = nx.Graph()
G.add_node(0, label="1", color="green")
G.nodes[0]["spells"] = [(1, 2)]
fh = io.BytesIO()
nx.write_gexf(G, fh)
fh.seek(0)
H = nx.read_gexf(fh, node_type=int)
assert sorted(G.nodes()) == sorted(H.nodes())
assert sorted(sorted(e) for e in G.edges()) == sorted(
sorted(e) for e in H.edges()
)
G = nx.Graph()
G.add_node(0, label="1", color="green")
G.nodes[0]["slices"] = [(1, 2)]
fh = io.BytesIO()
nx.write_gexf(G, fh, version="1.1draft")
fh.seek(0)
H = nx.read_gexf(fh, node_type=int)
assert sorted(G.nodes()) == sorted(H.nodes())
assert sorted(sorted(e) for e in G.edges()) == sorted(
sorted(e) for e in H.edges()
)
def test_add_parent(self):
G = nx.Graph()
G.add_node(0, label="1", color="green", parents=[1, 2])
fh = io.BytesIO()
nx.write_gexf(G, fh)
fh.seek(0)
H = nx.read_gexf(fh, node_type=int)
assert sorted(G.nodes()) == sorted(H.nodes())
assert sorted(sorted(e) for e in G.edges()) == sorted(
sorted(e) for e in H.edges()
)
| 34.658055 | 85 | 0.545056 |
4a26648869af28ec0b7f28ff31ae30b04775baa5 | 259 | py | Python | Chapter05/datetimes_01.py | vabyte/Modern-Python-Standard-Library-Cookbook | 4f53e3ab7b61aca1cca9343e7421e170280cd5b5 | [
"MIT"
] | 84 | 2018-08-09T09:30:03.000Z | 2022-01-04T23:20:38.000Z | Chapter05/datetimes_01.py | jiro74/Modern-Python-Standard-Library-Cookbook | 4f53e3ab7b61aca1cca9343e7421e170280cd5b5 | [
"MIT"
] | 1 | 2019-11-04T18:57:40.000Z | 2020-09-07T08:52:25.000Z | Chapter05/datetimes_01.py | jiro74/Modern-Python-Standard-Library-Cookbook | 4f53e3ab7b61aca1cca9343e7421e170280cd5b5 | [
"MIT"
] | 33 | 2018-09-26T11:05:55.000Z | 2022-03-15T10:31:10.000Z | import datetime
def now():
return datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)
def astimezone(d, offset):
return d.astimezone(datetime.timezone(datetime.timedelta(hours=offset)))
d = now()
print(d)
d = astimezone(d, 1)
print(d)
| 19.923077 | 76 | 0.733591 |
4a2665379ab538120810c72d37bc108e504ce264 | 23,263 | py | Python | sympy/printing/fcode.py | GayanSandaruwan/sympy | 7b127bdf71a36d85216315f80c1b54d22b060818 | [
"BSD-3-Clause"
] | null | null | null | sympy/printing/fcode.py | GayanSandaruwan/sympy | 7b127bdf71a36d85216315f80c1b54d22b060818 | [
"BSD-3-Clause"
] | null | null | null | sympy/printing/fcode.py | GayanSandaruwan/sympy | 7b127bdf71a36d85216315f80c1b54d22b060818 | [
"BSD-3-Clause"
] | 1 | 2020-01-01T19:49:22.000Z | 2020-01-01T19:49:22.000Z | """
Fortran code printer
The FCodePrinter converts single sympy expressions into single Fortran
expressions, using the functions defined in the Fortran 77 standard where
possible. Some useful pointers to Fortran can be found on wikipedia:
http://en.wikipedia.org/wiki/Fortran
Most of the code below is based on the "Professional Programmer\'s Guide to
Fortran77" by Clive G. Page:
http://www.star.le.ac.uk/~cgp/prof77.html
Fortran is a case-insensitive language. This might cause trouble because
SymPy is case sensitive. The implementation below does not care and leaves
the responsibility for generating properly cased Fortran code to the user.
"""
from __future__ import print_function, division
from collections import defaultdict
from itertools import chain
import string
from sympy.core import S, Add, N, Float
from sympy.core.compatibility import string_types, range
from sympy.core.function import Function
from sympy.core.relational import Eq
from sympy.sets import Range
from sympy.codegen.ast import (Assignment, Declaration, Pointer, Type,
float32, float64, complex64, complex128, intc,
real, integer, bool_, complex_)
from sympy.codegen.ffunctions import isign, dsign, cmplx, merge, literal_dp
from sympy.printing.codeprinter import CodePrinter
from sympy.printing.precedence import precedence, PRECEDENCE
known_functions = {
"sin": "sin",
"cos": "cos",
"tan": "tan",
"asin": "asin",
"acos": "acos",
"atan": "atan",
"atan2": "atan2",
"sinh": "sinh",
"cosh": "cosh",
"tanh": "tanh",
"log": "log",
"exp": "exp",
"erf": "erf",
"Abs": "abs",
"conjugate": "conjg"
}
class FCodePrinter(CodePrinter):
"""A printer to convert sympy expressions to strings of Fortran code"""
printmethod = "_fcode"
language = "Fortran"
type_aliases = {
real: float64,
complex_: complex128,
}
type_mappings = {
intc: 'integer(c_int)',
float32: 'real(4)',
float64: 'real(8)',
complex64: 'complex(4)',
complex128: 'complex(8)',
integer: 'integer',
bool_: 'logical'
}
type_modules = {
intc: {'iso_c_binding': 'c_int'}
}
_default_settings = {
'order': None,
'full_prec': 'auto',
'precision': 17,
'user_functions': {},
'human': True,
'source_format': 'fixed',
'contract': True,
'standard': 77,
}
_operators = {
'and': '.and.',
'or': '.or.',
'xor': '.neqv.',
'equivalent': '.eqv.',
'not': '.not. ',
}
_relationals = {
'!=': '/=',
}
def __init__(self, settings={}):
self.type_aliases = dict(chain(self.type_aliases.items(),
settings.pop('type_aliases', {}).items()))
self.type_mappings = dict(chain(self.type_mappings.items(),
settings.pop('type_mappings', {}).items()))
super(FCodePrinter, self).__init__(settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
# leading columns depend on fixed or free format
standards = {66, 77, 90, 95, 2003, 2008}
if self._settings['standard'] not in standards:
raise ValueError("Unknown Fortran standard: %s" % self._settings[
'standard'])
self.module_uses = defaultdict(set) # e.g.: use iso_c_binding, only: c_int
@property
def _lead(self):
if self._settings['source_format'] == 'fixed':
return {'code': " ", 'cont': " @ ", 'comment': "C "}
elif self._settings['source_format'] == 'free':
return {'code': "", 'cont': " ", 'comment': "! "}
else:
raise ValueError("Unknown source format: %s" % self._settings['source_format'])
def _rate_index_position(self, p):
return -p*5
def _get_statement(self, codestring):
return codestring
def _get_comment(self, text):
return "! {0}".format(text)
def _declare_number_const(self, name, value):
return "parameter ({0} = {1})".format(name, self._print(value))
def _print_NumberSymbol(self, expr):
# A Number symbol that is not implemented here or with _printmethod
# is registered and evaluated
self._number_symbols.add((expr, Float(expr.evalf(self._settings['precision']))))
return str(expr)
def _format_code(self, lines):
return self._wrap_fortran(self.indent_code(lines))
def _traverse_matrix_indices(self, mat):
rows, cols = mat.shape
return ((i, j) for j in range(cols) for i in range(rows))
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
for i in indices:
# fortran arrays start at 1 and end at dimension
var, start, stop = map(self._print,
[i.label, i.lower + 1, i.upper + 1])
open_lines.append("do %s = %s, %s" % (var, start, stop))
close_lines.append("end do")
return open_lines, close_lines
def _print_sign(self, expr):
from sympy import Abs
arg, = expr.args
if arg.is_integer:
new_expr = merge(0, isign(1, arg), Eq(arg, 0))
elif arg.is_complex:
new_expr = merge(cmplx(literal_dp(0), literal_dp(0)), arg/Abs(arg), Eq(Abs(arg), literal_dp(0)))
else:
new_expr = merge(literal_dp(0), dsign(literal_dp(1), arg), Eq(arg, literal_dp(0)))
return self._print(new_expr)
def _print_Piecewise(self, expr):
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
lines = []
if expr.has(Assignment):
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s) then" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines.append("else")
else:
lines.append("else if (%s) then" % self._print(c))
lines.append(self._print(e))
lines.append("end if")
return "\n".join(lines)
elif self._settings["standard"] >= 95:
# Only supported in F95 and newer:
# The piecewise was used in an expression, need to do inline
# operators. This has the downside that inline operators will
# not work for statements that span multiple lines (Matrix or
# Indexed expressions).
pattern = "merge({T}, {F}, {COND})"
code = self._print(expr.args[-1].expr)
terms = list(expr.args[:-1])
while terms:
e, c = terms.pop()
expr = self._print(e)
cond = self._print(c)
code = pattern.format(T=expr, F=code, COND=cond)
return code
else:
# `merge` is not supported prior to F95
raise NotImplementedError("Using Piecewise as an expression using "
"inline operators is not supported in "
"standards earlier than Fortran95.")
def _print_MatrixElement(self, expr):
return "{0}({1}, {2})".format(self.parenthesize(expr.parent,
PRECEDENCE["Atom"], strict=True), expr.i + 1, expr.j + 1)
def _print_Add(self, expr):
# purpose: print complex numbers nicely in Fortran.
# collect the purely real and purely imaginary parts:
pure_real = []
pure_imaginary = []
mixed = []
for arg in expr.args:
if arg.is_number and arg.is_real:
pure_real.append(arg)
elif arg.is_number and arg.is_imaginary:
pure_imaginary.append(arg)
else:
mixed.append(arg)
if len(pure_imaginary) > 0:
if len(mixed) > 0:
PREC = precedence(expr)
term = Add(*mixed)
t = self._print(term)
if t.startswith('-'):
sign = "-"
t = t[1:]
else:
sign = "+"
if precedence(term) < PREC:
t = "(%s)" % t
return "cmplx(%s,%s) %s %s" % (
self._print(Add(*pure_real)),
self._print(-S.ImaginaryUnit*Add(*pure_imaginary)),
sign, t,
)
else:
return "cmplx(%s,%s)" % (
self._print(Add(*pure_real)),
self._print(-S.ImaginaryUnit*Add(*pure_imaginary)),
)
else:
return CodePrinter._print_Add(self, expr)
def _print_Function(self, expr):
# All constant function args are evaluated as floats
prec = self._settings['precision']
args = [N(a, prec) for a in expr.args]
eval_expr = expr.func(*args)
if not isinstance(eval_expr, Function):
return self._print(eval_expr)
else:
return CodePrinter._print_Function(self, expr.func(*args))
def _print_ImaginaryUnit(self, expr):
# purpose: print complex numbers nicely in Fortran.
return "cmplx(0,1)"
def _print_int(self, expr):
return str(expr)
def _print_Mul(self, expr):
# purpose: print complex numbers nicely in Fortran.
if expr.is_number and expr.is_imaginary:
return "cmplx(0,%s)" % (
self._print(-S.ImaginaryUnit*expr)
)
else:
return CodePrinter._print_Mul(self, expr)
def _print_Pow(self, expr):
PREC = precedence(expr)
if expr.exp == -1:
return '1.0/%s' % (self.parenthesize(expr.base, PREC))
elif expr.exp == 0.5:
if expr.base.is_integer:
# Fortran intrinsic sqrt() does not accept integer argument
if expr.base.is_Number:
return 'sqrt(%s.0d0)' % self._print(expr.base)
else:
return 'sqrt(dble(%s))' % self._print(expr.base)
else:
return 'sqrt(%s)' % self._print(expr.base)
else:
return CodePrinter._print_Pow(self, expr)
def _print_Rational(self, expr):
p, q = int(expr.p), int(expr.q)
return "%d.0d0/%d.0d0" % (p, q)
def _print_Float(self, expr):
printed = CodePrinter._print_Float(self, expr)
e = printed.find('e')
if e > -1:
return "%sd%s" % (printed[:e], printed[e + 1:])
return "%sd0" % printed
def _print_Indexed(self, expr):
inds = [ self._print(i) for i in expr.indices ]
return "%s(%s)" % (self._print(expr.base.label), ", ".join(inds))
def _print_Idx(self, expr):
return self._print(expr.label)
def _print_For(self, expr):
target = self._print(expr.target)
if isinstance(expr.iterable, Range):
start, stop, step = expr.iterable.args
else:
raise NotImplementedError("Only iterable currently supported is Range")
body = self._print(expr.body)
return ('do {target} = {start}, {stop}, {step}\n'
'{body}\n'
'end do').format(target=target, start=start, stop=stop,
step=step, body=body)
def _print_Equality(self, expr):
lhs, rhs = expr.args
return ' == '.join(map(self._print, (lhs, rhs)))
def _print_Unequality(self, expr):
lhs, rhs = expr.args
return ' /= '.join(map(self._print, (lhs, rhs)))
def _print_Type(self, type_):
type_ = self.type_aliases.get(type_, type_)
type_str = self.type_mappings.get(type_, type_.name)
module_uses = self.type_modules.get(type_)
if module_uses:
for k, v in module_uses:
self.module_uses[k].add(v)
return type_str
def _print_Declaration(self, expr):
var, val = expr.variable, expr.value
if isinstance(var, Pointer):
raise NotImplementedError("Pointers are not available by default in Fortran.")
if self._settings["standard"] >= 90:
result = '{t}{vc} :: {s}'.format(
t=self._print(var.type),
vc=', parameter' if var.value_const else '',
s=self._print(var.symbol)
)
if val is not None:
result += ' = %s' % self._print(val)
else:
if var.value_const or val:
raise NotImplementedError("F77 init./parameter statem. req. multiple lines.")
result = ' '.join(self._print(var.type), self._print(var.symbol))
return result
def _print_BooleanTrue(self, expr):
return '.true.'
def _print_BooleanFalse(self, expr):
return '.false.'
def _pad_leading_columns(self, lines):
result = []
for line in lines:
if line.startswith('!'):
result.append(self._lead['comment'] + line[1:].lstrip())
else:
result.append(self._lead['code'] + line)
return result
def _wrap_fortran(self, lines):
"""Wrap long Fortran lines
Argument:
lines -- a list of lines (without \\n character)
A comment line is split at white space. Code lines are split with a more
complex rule to give nice results.
"""
# routine to find split point in a code line
my_alnum = set("_+-." + string.digits + string.ascii_letters)
my_white = set(" \t()")
def split_pos_code(line, endpos):
if len(line) <= endpos:
return len(line)
pos = endpos
split = lambda pos: \
(line[pos] in my_alnum and line[pos - 1] not in my_alnum) or \
(line[pos] not in my_alnum and line[pos - 1] in my_alnum) or \
(line[pos] in my_white and line[pos - 1] not in my_white) or \
(line[pos] not in my_white and line[pos - 1] in my_white)
while not split(pos):
pos -= 1
if pos == 0:
return endpos
return pos
# split line by line and add the split lines to result
result = []
if self._settings['source_format'] == 'free':
trailing = ' &'
else:
trailing = ''
for line in lines:
if line.startswith(self._lead['comment']):
# comment line
if len(line) > 72:
pos = line.rfind(" ", 6, 72)
if pos == -1:
pos = 72
hunk = line[:pos]
line = line[pos:].lstrip()
result.append(hunk)
while len(line) > 0:
pos = line.rfind(" ", 0, 66)
if pos == -1 or len(line) < 66:
pos = 66
hunk = line[:pos]
line = line[pos:].lstrip()
result.append("%s%s" % (self._lead['comment'], hunk))
else:
result.append(line)
elif line.startswith(self._lead['code']):
# code line
pos = split_pos_code(line, 72)
hunk = line[:pos].rstrip()
line = line[pos:].lstrip()
if line:
hunk += trailing
result.append(hunk)
while len(line) > 0:
pos = split_pos_code(line, 65)
hunk = line[:pos].rstrip()
line = line[pos:].lstrip()
if line:
hunk += trailing
result.append("%s%s" % (self._lead['cont'], hunk))
else:
result.append(line)
return result
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
if isinstance(code, string_types):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
free = self._settings['source_format'] == 'free'
code = [ line.lstrip(' \t') for line in code ]
inc_keyword = ('do ', 'if(', 'if ', 'do\n', 'else')
dec_keyword = ('end do', 'enddo', 'end if', 'endif', 'else')
increase = [ int(any(map(line.startswith, inc_keyword)))
for line in code ]
decrease = [ int(any(map(line.startswith, dec_keyword)))
for line in code ]
continuation = [ int(any(map(line.endswith, ['&', '&\n'])))
for line in code ]
level = 0
cont_padding = 0
tabwidth = 3
new_code = []
for i, line in enumerate(code):
if line == '' or line == '\n':
new_code.append(line)
continue
level -= decrease[i]
if free:
padding = " "*(level*tabwidth + cont_padding)
else:
padding = " "*level*tabwidth
line = "%s%s" % (padding, line)
if not free:
line = self._pad_leading_columns([line])[0]
new_code.append(line)
if continuation[i]:
cont_padding = 2*tabwidth
else:
cont_padding = 0
level += increase[i]
if not free:
return self._wrap_fortran(new_code)
return new_code
def fcode(expr, assign_to=None, **settings):
"""Converts an expr to a string of fortran code
Parameters
==========
expr : Expr
A sympy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of
line-wrapping, or for expressions that generate multi-line statements.
precision : integer, optional
DEPRECATED. Use type_mappings instead. The precision for numbers such
as pi [default=17].
user_functions : dict, optional
A dictionary where keys are ``FunctionClass`` instances and values are
their string representations. Alternatively, the dictionary value can
be a list of tuples i.e. [(argument_test, cfunction_string)]. See below
for examples.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
source_format : optional
The source format can be either 'fixed' or 'free'. [default='fixed']
standard : integer, optional
The Fortran standard to be followed. This is specified as an integer.
Acceptable standards are 66, 77, 90, 95, 2003, and 2008. Default is 77.
Note that currently the only distinction internally is between
standards before 95, and those 95 and after. This may change later as
more features are added.
Examples
========
>>> from sympy import fcode, symbols, Rational, sin, ceiling, floor
>>> x, tau = symbols("x, tau")
>>> fcode((2*tau)**Rational(7, 2))
' 8*sqrt(2.0d0)*tau**(7.0d0/2.0d0)'
>>> fcode(sin(x), assign_to="s")
' s = sin(x)'
Custom printing can be defined for certain types by passing a dictionary of
"type" : "function" to the ``user_functions`` kwarg. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
cfunction_string)].
>>> custom_functions = {
... "ceiling": "CEIL",
... "floor": [(lambda x: not x.is_integer, "FLOOR1"),
... (lambda x: x.is_integer, "FLOOR2")]
... }
>>> fcode(floor(x) + ceiling(x), user_functions=custom_functions)
' CEIL(x) + FLOOR1(x)'
``Piecewise`` expressions are converted into conditionals. If an
``assign_to`` variable is provided an if statement is created, otherwise
the ternary operator is used. Note that if the ``Piecewise`` lacks a
default term, represented by ``(expr, True)`` then an error will be thrown.
This is to prevent generating an expression that may not evaluate to
anything.
>>> from sympy import Piecewise
>>> expr = Piecewise((x + 1, x > 0), (x, True))
>>> print(fcode(expr, tau))
if (x > 0) then
tau = x + 1
else
tau = x
end if
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> fcode(e.rhs, assign_to=e.lhs, contract=False)
' Dy(i) = (y(i + 1) - y(i))/(t(i + 1) - t(i))'
Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions
must be provided to ``assign_to``. Note that any expression that can be
generated normally can also exist inside a Matrix:
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)])
>>> A = MatrixSymbol('A', 3, 1)
>>> print(fcode(mat, A))
A(1, 1) = x**2
if (x > 0) then
A(2, 1) = x + 1
else
A(2, 1) = x
end if
A(3, 1) = sin(x)
"""
return FCodePrinter(settings).doprint(expr, assign_to)
def print_fcode(expr, **settings):
"""Prints the Fortran representation of the given expression.
See fcode for the meaning of the optional arguments.
"""
print(fcode(expr, **settings))
| 37.042994 | 108 | 0.549155 |
4a2665b628fbe584f2a8e7ced912de880d01d0f4 | 8,512 | py | Python | program/object-detection-tensorrt-loadgen-py/tensorrt_detect_loadgen.py | dsavenko/ck-mlperf | 55ab3d2ed4cd0e11fb3103f76429a6ebec3ea788 | [
"BSD-3-Clause"
] | 35 | 2018-09-20T10:21:43.000Z | 2021-07-28T15:01:48.000Z | program/object-detection-tensorrt-loadgen-py/tensorrt_detect_loadgen.py | dsavenko/ck-mlperf | 55ab3d2ed4cd0e11fb3103f76429a6ebec3ea788 | [
"BSD-3-Clause"
] | 25 | 2018-12-14T07:48:03.000Z | 2021-05-01T13:35:31.000Z | program/object-detection-tensorrt-loadgen-py/tensorrt_detect_loadgen.py | dsavenko/ck-mlperf | 55ab3d2ed4cd0e11fb3103f76429a6ebec3ea788 | [
"BSD-3-Clause"
] | 14 | 2018-12-13T18:57:11.000Z | 2021-04-27T14:32:51.000Z | #!/usr/bin/env python3
import array
import numpy as np
import os
import sys
import time
from coco_helper import (load_image_by_index_and_normalize, image_filenames, original_w_h,
class_labels, num_classes, bg_class_offset, class_map,
MODEL_DATA_LAYOUT, MODEL_USE_DLA, BATCH_SIZE,
MODEL_IMAGE_CHANNELS, MODEL_IMAGE_HEIGHT, MODEL_IMAGE_WIDTH, MODEL_INPUT_DATA_TYPE)
from tensorrt_helper import (initialize_predictor, inference_for_given_batch)
import mlperf_loadgen as lg
## LoadGen test properties:
#
LOADGEN_SCENARIO = os.getenv('CK_LOADGEN_SCENARIO', 'SingleStream')
LOADGEN_MODE = os.getenv('CK_LOADGEN_MODE', 'AccuracyOnly')
LOADGEN_BUFFER_SIZE = int(os.getenv('CK_LOADGEN_BUFFER_SIZE')) # set to how many samples are you prepared to keep in memory at once
LOADGEN_DATASET_SIZE = int(os.getenv('CK_LOADGEN_DATASET_SIZE')) # set to how many total samples to choose from (0 = full set)
LOADGEN_COUNT_OVERRIDE = os.getenv('CK_LOADGEN_COUNT_OVERRIDE', '') # if not set, use value from LoadGen's config file
LOADGEN_MULTISTREAMNESS = os.getenv('CK_LOADGEN_MULTISTREAMNESS', '') # if not set, use value from LoadGen's config file
MLPERF_CONF_PATH = os.environ['CK_ENV_MLPERF_INFERENCE_MLPERF_CONF']
USER_CONF_PATH = os.environ['CK_LOADGEN_USER_CONF']
MODEL_NAME = os.getenv('ML_MODEL_MODEL_NAME', 'unknown_model')
## Model properties:
#
MODEL_MAX_PREDICTIONS = int(os.getenv('ML_MODEL_MAX_PREDICTIONS', 100))
## Misc
#
VERBOSITY_LEVEL = int(os.getenv('CK_VERBOSE', '0'))
# Load preprocessed image filepaths:
LOADGEN_DATASET_SIZE = LOADGEN_DATASET_SIZE or len(image_path_list)
def tick(letter, quantity=1):
if VERBOSITY_LEVEL:
print(letter + (str(quantity) if quantity>1 else ''), end='')
# Currently loaded preprocessed images are stored in pre-allocated numpy arrays:
preprocessed_image_buffer = None
preprocessed_image_map = np.empty(LOADGEN_DATASET_SIZE, dtype=np.int) # this type should be able to hold indices in range 0:LOADGEN_DATASET_SIZE
def load_query_samples(sample_indices): # 0-based indices in our whole dataset
global preprocessed_image_buffer
if VERBOSITY_LEVEL > 1:
print("load_query_samples({})".format(sample_indices))
len_sample_indices = len(sample_indices)
tick('B', len_sample_indices)
if preprocessed_image_buffer is None: # only do this once, once we know the expected size of the buffer
preprocessed_image_buffer = np.empty((len_sample_indices, MODEL_IMAGE_CHANNELS, MODEL_IMAGE_HEIGHT, MODEL_IMAGE_WIDTH), dtype=MODEL_INPUT_DATA_TYPE)
for buffer_index, sample_index in zip(range(len_sample_indices), sample_indices):
preprocessed_image_map[sample_index] = buffer_index
preprocessed_image_buffer[buffer_index] = np.array( load_image_by_index_and_normalize(sample_index) )
tick('l')
if VERBOSITY_LEVEL:
print('')
def unload_query_samples(sample_indices):
#print("unload_query_samples({})".format(sample_indices))
tick('U')
if VERBOSITY_LEVEL:
print('')
def issue_queries(query_samples):
global BATCH_SIZE
if VERBOSITY_LEVEL > 2:
printable_query = [(qs.index, qs.id) for qs in query_samples]
print("issue_queries( {} )".format(printable_query))
tick('Q', len(query_samples))
for j in range(0, len(query_samples), BATCH_SIZE):
batch = query_samples[j:j+BATCH_SIZE] # NB: the last one may be shorter than BATCH_SIZE in length
batch_data = preprocessed_image_buffer[preprocessed_image_map[ [qs.index for qs in batch] ]]
trimmed_batch_results, inference_time_s = inference_for_given_batch(batch_data)
actual_batch_size = len(trimmed_batch_results)
if VERBOSITY_LEVEL > 1:
print("[batch of {}] inference={:.2f} ms".format(actual_batch_size, inference_time_s*1000))
tick('p', len(batch))
if VERBOSITY_LEVEL > 2:
print("predicted_batch_results = {}".format(trimmed_batch_results))
response = []
response_array_refs = [] # This is needed to guarantee that the individual buffers to which we keep extra-Pythonian references, do not get garbage-collected.
for qs, all_boxes_for_this_sample in zip(batch, trimmed_batch_results):
num_active_boxes_for_this_sample = all_boxes_for_this_sample[MODEL_MAX_PREDICTIONS*7].view('int32')
global_image_index = qs.index
width_orig, height_orig = original_w_h[global_image_index]
reformed_active_boxes_for_this_sample = []
for i in range(num_active_boxes_for_this_sample):
(image_id, ymin, xmin, ymax, xmax, confidence_score, class_number) = all_boxes_for_this_sample[i*7:(i+1)*7]
if class_map:
class_number = float(class_map[int(class_number)])
reformed_active_boxes_for_this_sample += [
float(global_image_index), ymin, xmin, ymax, xmax, confidence_score, class_number ]
response_array = array.array("B", np.array(reformed_active_boxes_for_this_sample, np.float32).tobytes())
response_array_refs.append(response_array)
bi = response_array.buffer_info()
response.append(lg.QuerySampleResponse(qs.id, bi[0], bi[1]))
lg.QuerySamplesComplete(response)
#tick('R', len(response))
sys.stdout.flush()
def flush_queries():
pass
def process_latencies(latencies_ns):
print("LG called process_latencies({})".format(latencies_ns))
latencies_size = len(latencies_ns)
latencies_avg = int(sum(latencies_ns)/latencies_size)
latencies_sorted = sorted(latencies_ns)
latencies_p50 = int(latencies_size * 0.5);
latencies_p90 = int(latencies_size * 0.9);
print("--------------------------------------------------------------------")
print("| LATENCIES (in nanoseconds and fps) |")
print("--------------------------------------------------------------------")
print("Number of queries run: {:9d}".format(latencies_size))
print("Min latency: {:9d} ns ({:.3f} fps)".format(latencies_sorted[0], 1e9/latencies_sorted[0]))
print("Median latency: {:9d} ns ({:.3f} fps)".format(latencies_sorted[latencies_p50], 1e9/latencies_sorted[latencies_p50]))
print("Average latency: {:9d} ns ({:.3f} fps)".format(latencies_avg, 1e9/latencies_avg))
print("90 percentile latency: {:9d} ns ({:.3f} fps)".format(latencies_sorted[latencies_p90], 1e9/latencies_sorted[latencies_p90]))
print("Max latency: {:9d} ns ({:.3f} fps)".format(latencies_sorted[-1], 1e9/latencies_sorted[-1]))
print("--------------------------------------------------------------------")
def benchmark_using_loadgen():
"Perform the benchmark using python API for the LoadGen library"
pycuda_context, max_batch_size, input_volume, output_volume, num_layers = initialize_predictor()
scenario = {
'SingleStream': lg.TestScenario.SingleStream,
'MultiStream': lg.TestScenario.MultiStream,
'Server': lg.TestScenario.Server,
'Offline': lg.TestScenario.Offline,
}[LOADGEN_SCENARIO]
mode = {
'AccuracyOnly': lg.TestMode.AccuracyOnly,
'PerformanceOnly': lg.TestMode.PerformanceOnly,
'SubmissionRun': lg.TestMode.SubmissionRun,
}[LOADGEN_MODE]
ts = lg.TestSettings()
ts.FromConfig(MLPERF_CONF_PATH, MODEL_NAME, LOADGEN_SCENARIO)
ts.FromConfig(USER_CONF_PATH, MODEL_NAME, LOADGEN_SCENARIO)
ts.scenario = scenario
ts.mode = mode
if LOADGEN_MULTISTREAMNESS:
ts.multi_stream_samples_per_query = int(LOADGEN_MULTISTREAMNESS)
if LOADGEN_COUNT_OVERRIDE:
ts.min_query_count = int(LOADGEN_COUNT_OVERRIDE)
ts.max_query_count = int(LOADGEN_COUNT_OVERRIDE)
sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
qsl = lg.ConstructQSL(LOADGEN_DATASET_SIZE, LOADGEN_BUFFER_SIZE, load_query_samples, unload_query_samples)
log_settings = lg.LogSettings()
log_settings.enable_trace = False
lg.StartTestWithLogSettings(sut, qsl, ts, log_settings)
lg.DestroyQSL(qsl)
lg.DestroySUT(sut)
pycuda_context.pop()
try:
benchmark_using_loadgen()
except Exception as e:
print('{}'.format(e))
| 40.923077 | 168 | 0.683153 |
4a2665f145842f18f5531d92dce8b3114a3f788c | 37,026 | py | Python | dependency_miner/ltminer.py | AshwiniJogbhat/dependency-miner-pm4py | 2b05781dd30d7462adce5dbd30cac5b6365e43fa | [
"MIT"
] | null | null | null | dependency_miner/ltminer.py | AshwiniJogbhat/dependency-miner-pm4py | 2b05781dd30d7462adce5dbd30cac5b6365e43fa | [
"MIT"
] | null | null | null | dependency_miner/ltminer.py | AshwiniJogbhat/dependency-miner-pm4py | 2b05781dd30d7462adce5dbd30cac5b6365e43fa | [
"MIT"
] | null | null | null | import copy
import os
from os import listdir
from os.path import isfile, join
import dependency_miner.settings as settings
#import Miner.to_petri_net_bordered as discover_net
#from Miner.helper_functions import *
from pm4py.objects.log.importer.xes import importer as xes_importer
import pm4py.objects.process_tree as pt
from pm4py.analysis import check_soundness
from pm4py.algo.evaluation.precision import algorithm as precision_evaluator
from pm4py.algo.evaluation.replay_fitness import algorithm as replay_fitness_evaluator
from pm4py.visualization.petri_net import visualizer as pn_visualizer
from pm4py.objects.process_tree.utils import bottomup as b
from pm4py.algo.discovery.inductive import algorithm as inductive_miner
from pm4py.visualization.process_tree import visualizer as pt_visualizer
from pm4py.objects.petri_net.exporter import exporter as pnml_exporter
from pm4py.objects.petri_net.importer import importer as pnml_importer
from pm4py.objects.petri_net.obj import PetriNet, Marking
from pm4py.statistics.traces.generic.log import case_statistics
from pm4py.objects.process_tree.obj import Operator as pt_op
from pm4py.objects.process_tree.utils import generic as util
from pm4py.objects.petri_net import obj
from pm4py.objects.petri_net.utils import petri_utils as pn_util
log_attributes = {}
def miner(log_path, support, confidence, lift, sound):
"""
Extracts long-term dependencies between the task using the given input thresholds.
Parameters:
logpath (str): Path of event log
support (str): Threshold value for support
confidence (str) : Threshold value for confidence
lift (str): Threshold value for confidence
sound (str) : sound model requirement Yes/No
Returns:
rules (dict): Rules added to the repaired precise net
precision (float): Improved precision value
fitness (float): Fitness of the repaired Petri net
net_path (str) : Path of the .SVG file generated for the repaired net
pnml_path (str) : Path of the .pnml file generated for the repaired net
"""
settings.init()
eventlogs, attributes, log, tree, net, im, fm = set_event_log(log_path)
#process_tree_path = display_process_tree()
if sound == 'Yes' or sound == 'yes':
sound = 'on'
net_path, precision, fitness, rules, pnml_path = repair_petri_net(support, confidence, lift, sound)
print("Added rules in the repaired Petri net", rules)
print("Precision of the repaired Petri net", precision)
print("Fitness of the repaired Petri net", fitness)
print("Saved Path of the repaired Petri net in .SVG format", net_path)
print("Saved Path of the repaired Petri net in .pnml format", pnml_path)
return rules, precision, fitness, net_path, pnml_path
def set_event_log(file_path):
"""
Given an input event log, the function imports the log and returns discovered process tree
and petri net model along with the details of the log.
Parameters:
file_path (str): Path of event log
Returns:
eventlogs (list): List of event logs
log_attributes (dict): Details of the log
log (EventLog) : Imported event log
tree (processtree): Discovered Process tree from the given event log
net (PetriNet) : Discovered Petri net from the given event log
im (Marking) : Initial marking of the generated Petri net
fm (Marking) : Final marking of the generated Petri net
"""
filename = os.path.basename(file_path)
settings.EVENT_LOG_NAME = filename
settings.EVENT_LOG_PATH = file_path
log = import_event_log(file_path)
settings.EVENT_LOG = log
no_traces = len(log)
no_events = sum([len(trace) for trace in log])
log_attributes['no_traces'] = no_traces
log_attributes['no_events'] = no_events
#discover Tree
tree = None
tree = discover_process_tree(log)
#discover net
net = None
im = None
fm = None
settings.RULES_DICT = {}
settings.RULES = {}
settings.PRECISION = None
settings.FITNESS = None
net, im, fm = discover_petri_net(tree)
pnml_path = export_pnml(net, im, fm)
# disover rules
rules_dict = {}
xor_tree = []
rules_dicts, xor_tree = findAsociationRules()
settings.RULES_DICT = copy.deepcopy(rules_dicts)
settings.XOR_TREES = copy.deepcopy(xor_tree)
#eventlogs = [f for f in listdir(file_path) if isfile(join(file_path, f))]
eventlogs = ""
return eventlogs, log_attributes, log, tree, net, im, fm
def findAsociationRules():
"""
This function mines the long-term dependency rules between XOR branches of the process tree.
Parameters:
Returns:
Rules (dict) : Discovered rules between XOR branches of the process tree
XOR blocks (dict) : Candidate XOR blocks present in the process tree
"""
tree = settings.PROCESS_TREE
log = settings.EVENT_LOG
# Explore Log
total_traces = 0
xor_tree = {}
rules_dict = {}
variants_count = case_statistics.get_variant_statistics(log)
variants_count = sorted(variants_count, key=lambda x: x['count'], reverse=True)
rules_values = {}
for ele in variants_count:
total_traces += ele['count']
rule_dicti = {}
## Firstly, get all XOR tree list if it has no tau at the leaves.
xor_tree = {}
xor_tree = get_xor_trees(tree)
## find all valid XOR combinations
for i in range(1, len(xor_tree)):
for j in range(i+1, len(xor_tree)+1):
max_v = 0
rules_values = {}
LCA = util.common_ancestor(xor_tree[f'X{i}'], xor_tree[f'X{j}'])
if LCA.operator == pt_op.SEQUENCE and (pt_op.XOR not in get_ancestors_operator(xor_tree[f'X{i}'], LCA)) and (pt_op.XOR not in get_ancestors_operator(xor_tree[f'X{j}'], LCA)) and (pt_op.LOOP not in get_ancestors_operator(xor_tree[f'X{i}'], LCA)) and (pt_op.LOOP not in get_ancestors_operator(xor_tree[f'X{j}'], LCA)):
xor_children = []
source, target = get_candidates(xor_tree[f'X{i}'], xor_tree[f'X{j}'])
for s in source:
for t in target:
values = []
support = get_support_updated([s,t], variants_count, total_traces, source, target)
conf_value = get_confidence([s,t], support[tuple(s), tuple(t)], variants_count, total_traces)
lift_value = get_lift([s, t], conf_value, variants_count, total_traces)
values.append(support[tuple(s), tuple(t)])
values.append(conf_value)
values.append(lift_value)
l = [s,t]
rules_values[(f"{s}", f"{t}")] = values
if values[2] > max_v:
max_v = values[2]
rules_values['Max'] = max_v
rule_dicti[(f"X{i}", f"X{j}")] = rules_values
sorted_rule_dict = dict(sorted(rule_dicti.items(), key=lambda item: item[1]['Max'], reverse=True))
return sorted_rule_dict, xor_tree
def import_event_log(log_path):
EVENT_LOG = xes_importer.apply(log_path)
return EVENT_LOG
def discover_petri_net(tree):
"""
Given a process tree, the function generates the corresponding petri net.
Parameters:
tree (ProcessTree): The discovered process tree from the given event log
Returns:
net (PetriNet): Generated Petri net of the log
im (Marking) : Initial marking of the generated Petri net
fm (Marking) : Final marking of the generated Petri net
"""
orig_net = None
im = None
fm = None
settings.sink_dict = {}
settings.src_dict = {}
orig_net, im, fm = apply(tree)
settings.PETRI_NET_ORIG = orig_net
settings.I_MARKS_ORIG = im
settings.F_MARKS_ORIG = fm
settings.PETRI_NET = orig_net
return orig_net, im, fm
def discover_process_tree(log):
"""
Given an event log, the function discovers the process tree using inductive miner algorithm.
Parameters:
log (EventLog): Given event log
Returns:
tree (ProcessTree): The generated Process tree from the log
"""
tree = inductive_miner.apply_tree(log)
settings.PROCESS_TREE = tree
return tree
def export_pnml(precise_net, im, fm, net_name=None):
"""
The function exports the Petri net in pnml format and saves it in current directory
Parameter:
precise_net (PetriNet) : Petri net model to be stored in .pnml format
im (Marking) : Initial marking of the generated Petri net
fm (Marking) : Final marking of the generated Petri net
net_name (str) : Any prefered name to be stored, by default it is the log name
Returns:
pnml_path (str): The path of the saved Petri net model in .pnml form
"""
if net_name == None:
net_name = f"{settings.EVENT_LOG_NAME}"
net_name = net_name.rsplit('.', 1)[0]
net_name = net_name+".pnml"
settings.PNML_PATH = None
log_path = os.path.dirname(copy.deepcopy(settings.EVENT_LOG_PATH))
pnml_path = os.path.join(log_path, net_name)
pnml_exporter.apply(precise_net, im, pnml_path)
pnml_exporter.apply(precise_net, im, pnml_path , final_marking=fm)
settings.PNML_PATH = pnml_path
return pnml_path
def get_xor_trees(pt, xor_tree = None):
"""
Given a process tree, it extracts all XOR block from the process tree
Parameter:
pt (ProcessTree) : Generated process tree from the log
xor_tree (list) : All XOR blocks list, at the beginning it is empty
Returns:
xor_tree (str): All extracted XOR blocks from the process tree
"""
xor_tree = xor_tree if xor_tree is not None else {}
if pt.operator != None:
for node in pt.children:
if node.operator != None and node.operator == pt_op.XOR and not check_for_tau(node):
xor_tree[f'X{len(xor_tree)+1}'] = node
else:
xor_tree = get_xor_trees(node, xor_tree)
return xor_tree
def get_candidates(node1, node2):
"""
Given two XOR branches, checks whether the given branches are in sequential order.
Parameter:
node1 (ProcessTree) : Source XOR blocks
node2 (ProcessTree) : All XOR blocks list, at the beginning it is empty
Returns:
XOR_source (list) : source branches of the candidate XOR blocks pair
XOR_target (list) : target branches of the candidate XOR blocks pair
"""
XOR_source = []
XOR_target = []
if util.common_ancestor(node1, node2).operator == pt_op.SEQUENCE:
XOR_source = get_xor_children(node1, XOR_source)
XOR_target = get_xor_children(node2, XOR_target)
return XOR_source, XOR_target
def get_ancestors_operator(t, until, include_until = True):
"""
Given an XOR block and lowest common ancestor(LCA),
the method returns all operators present in the path from XOR branch to LCA, adapted from PM4Py.
Parameter:
t (ProcessTree) : Source XOR block
until (ProcessTree) : LCA of those XOR blocks
Returns:
ancestors (Operator): All operators present in the path from XOR block to the given LCA
"""
ancestors = list()
if t == until:
return ancestors
parent = t.parent
while parent != until:
ancestors.append(parent.operator)
parent = parent.parent
if parent is None:
return None
if include_until:
ancestors.append(until.operator)
return ancestors
def get_lift(pair, confidence, variants, total):
"""
Given a long-term dependency rules, confidence value of the rule,
It calculates the lift of the rule.
Parameter:
pair (dict) : Long-term dependency rules
confidence (str) : Confidence of the rule
variants (dict) : Unique traces with the count of those traces
total (str) : Total number of traces in Event log
Returns:
lift (str): Lift value of the rule
"""
rhs_c = 0
for item in variants:
for i in range(0, len(pair[1])):
if not repr(pair[1][i]) in item['variant']:
continue
else:
rhs_c += item['count']
break
sup_c = round((rhs_c / total),3)
lift = round((confidence / sup_c), 3)
return lift
def get_support_updated(pair, variants, total, source, target):
"""
Given a long-term dependency rules, variants of the event log and total number of traces
It calculates the support value of the rule.
Parameter:
pair (dict) : Long-term dependency rules
variants (dict) : Unique traces with the count of those traces
total (str) : Total number of traces in Event log
source (list) : All source XOR branches
target (list) : All target XOR branches
Returns:
sup (dict): Support value of the rule
"""
lhs_c = 0
rule_count = 0
l_found = 0
r_found = 0
sup = {}
for item in variants:
trace = item['variant'].split(",")
#added line
temp_src = [str(i) for i in pair[0]]
temp_tgt = [str(i) for i in pair[1]]
for i in range(0, len(trace)):
if not str(trace[i]) in temp_src:#repr(pair[0]):
continue
else:
l_found = 1
track = 0
for j in range(i, len(trace)):
track = j
if str(trace[j]) in temp_tgt:#repr(pair[1]):
if l_found:
r_found = 1
rule_count += item['count']
i = j
break
else:
if str(trace[j]) in list(str(source)) and str(trace[j]) not in temp_src: #repr(pair[0]):
l_found = 0
break
if track == len(trace) - 1:
break
if l_found and r_found:
break
sup[tuple(pair[0]), tuple(pair[1])] = round((rule_count / total), 3)
return sup
def get_confidence(pair, sup, variants, total):
"""
Given a long-term dependency rules, variants of the event log and total number of traces
It calculates the support value of the rule.
Parameter:
pair (dict) : Long-term dependency rules
support (dict) : support of the rule
variants (dict) : Unique traces with the count of those traces
total (str) : Total number of traces in Event log
Returns:
conf (str): Confidence value of the rule
"""
lhs_c = 0
for item in variants:
trace = item['variant'].split(",")
for i in range(0, len(pair[0])):
if not repr(pair[0][i]) in trace:#item['variant']:
continue
else:
lhs_c += item['count']
break
sup_c = round((lhs_c / total),3)
conf = round((sup / sup_c), 3)
return conf
def check_for_tau(tree):
"""
Given a process tree, this function checks whether invisible node exists in the tree.
Parameter:
tree (ProcessTree): Generated Process tree for the given log
Returns:
bool : True if tau exists, else False
"""
for node in tree.children:
leaves = util.get_leaves(node)
if len(leaves) == 1:
for leaf in leaves:
if util.is_tau_leaf(leaf):
return True
def get_xor_children(node, xor_list=None):
"""
Given a process tree, this function returns the activity involved in the tree.
Parameter:
xor_list (list): Activity involved in the branch tree
Returns:
bool : True if tau exists, else False
"""
xor_list = xor_list if xor_list is not None else {}
for child in node.children:
if len(get_xor_leaves(child)) > 0:
xor_list.append(get_xor_leaves(child))
return xor_list
def get_xor_leaves(xor_tree, leaves=None):
"""
Given a xor tree, this function returns the leaves of the tree.
Parameter:
xor_tree (ProcessTree): XOR block
Returns:
leaves (list) : Leaves of the XOR trees
"""
tau_exist = 0
leaves = leaves if leaves is not None else []
if len(xor_tree.children) == 0:
if xor_tree.label is not None:
leaves.append(xor_tree)
else:
for c in xor_tree.children:
leaves = get_xor_leaves(c, leaves)
return leaves
def repair_petri_net(support, confidence, lift, sound):
"""
Given a Petri net and threshold values, the functions repair the free-choice Petri net
Parameter:
support (str): Threshold value for support
confidence (str) : Threshold value for confidence
lift (str): Threshold value for confidence
sound (str) : sound model requirement Yes/No
Returns:
net_path (str) : Path of the .SVG file generated for the repaired net
precision (float): Improved precision value
fitness (float): Fitness of the repaired Petri net
rules (dict): Rules added to the repaired precise net
pnml_path (str) : Path of the .pnml file generated for the repaired net
"""
p_net = None
im = None
fm = None
repaired_net = None
sound_net = None
p_net, im, fm = discover_petri_net(settings.PROCESS_TREE)
rules_dict = dict(settings.RULES_DICT)
if sound == 'on':
print("Sound Model Requirement is On")
rules_dict_sound = soundness_at_XOR_tree(rules_dict)
else:
print("Sound Model Requirement is Off")
rules_dict_sound = rules_dict
repair_net = 1
rules_dicti = {}
if rules_dict_sound != {}:
for pair, value in rules_dict_sound.items():
rules_dicti.update(value)
if sound == 'on':
maxi = list()
for key, value in rules_dict_sound.items():
for k, v in value.items():
if k == 'Max':
maxi.append(v)
if max(maxi) < float(lift):
repair_net = 0
settings.RULES = {}
del rules_dicti['Max']
if repair_net:
repaired_net = None
if sound == 'on':
sound_net = discover_sound_petrinet(rules_dicti, p_net)
repaired_net, rules = repair_sound_Model(sound_net, rules_dicti, support, confidence, lift, sound)
check_soundness(repaired_net, im, fm)
else:
repaired_net, rules = repair_unsound_model(p_net, rules_dicti, support, confidence, lift)
settings.PETRI_NET = None
settings.PETRI_NET = repaired_net
settings.RULES = rules
precision = get_precision(settings.PETRI_NET ,im, fm)
fitness = get_fitness(settings.PETRI_NET, im, fm)
net_path = display_petri_net(settings.PETRI_NET)
pnml_path = export_pnml(settings.PETRI_NET, im,fm)
return net_path, round(precision,2), round(fitness['average_trace_fitness'], 2), settings.RULES, pnml_path
def get_soundness():
"""
Returns the soundness of the model.
"""
return check_soundness(settings.PETRI_NET, settings.I_MARKS_ORIG, settings.F_MARKS_ORIG)
def get_precision(pn_net, im, fm):
"""
Returns the precision of the model.
Parameter:
net (PetriNet): Generated Petri net of the log
im (Marking) : Initial marking of the generated Petri net
fm (Marking) : Final marking of the generated Petri net
Return:
Precision (float) : Precision value measured using pm4py
"""
log = settings.EVENT_LOG
prec = precision_evaluator.apply(log, pn_net, im, fm, variant=precision_evaluator.Variants.ALIGN_ETCONFORMANCE)
return prec
def get_fitness(net, im, fm):
"""
Returns the precision of the model.
Parameter:
net (PetriNet): Generated Petri net of the log
im (Marking) : Initial marking of the generated Petri net
fm (Marking) : Final marking of the generated Petri net
Return:
Fitness (float) : Fitness value measured using pm4py
"""
log = settings.EVENT_LOG
fitness = replay_fitness_evaluator.apply(log, net, im, fm, variant=replay_fitness_evaluator.Variants.ALIGNMENT_BASED)
return fitness
def repair_unsound_model(net, rules_dict, support, confidence, lift):
"""
Repairing a bordered Petri net generated from Process tree to include long-term dependencies in it and
create a precise Petri net. Soundness parameter is not given.
Parameter:
net (PetriNet): Generated Petri net of the log
rules (dict) : Discovered rules with the association rule metrics values
support (str): Threshold value for support
confidence (str) : Threshold value for confidence
lift (str): Threshold value for confidence
Return:
net (PetriNet): Repaired Petri net of the log
rules (dict) : Added rules to the net with their association rule metrics values
"""
rules = {}
for pair, value in rules_dict.items():
if str(value[2]) > lift and str(value[0]) > support and str(value[1]) > confidence and value[2] > 1.001:
rules[pair] = value
trans_exist = 0
#if the place already exists, We do not need to add new places, just use existing ones
tau_t = PetriNet.Transition(f"tau_{pair[0]}{pair[1]}", None)
for trans in net.transitions:
if str(trans) == str(tau_t):
trans_exist = 1
break
if(trans_exist == 0):
net.transitions.add(tau_t)
s_place = f"ps_{pair[0]}"
t_place = f"pt_{pair[1]}"
source_found = 0
target_found = 0
for place in net.places:
if place.name == s_place:
source_found = 1
pn_util.add_arc_from_to(place, tau_t, net)
elif place.name == t_place:
target_found = 1
pn_util.add_arc_from_to(tau_t, place, net)
if (source_found and target_found):
break
## Handle Source Side
# Adding new place after source
if (not source_found):
source = PetriNet.Place(s_place)
net.places.add(source)
pn_util.add_arc_from_to(source, tau_t, net)
all_src = pair[0][1:-1].split(", ")
for k,v in settings.sink_dict.items():
if all(item in list(map(str,settings.sink_dict[k])) for item in list(all_src)):
for t in net.transitions:
if str(t) == str(k):
pn_util.add_arc_from_to(t, source, net)
break
if (not target_found):
target = PetriNet.Place(t_place)
net.places.add(target)
pn_util.add_arc_from_to(tau_t, target, net)
all_tgt = pair[1][1:-1].split(", ")
for k,v in settings.src_dict.items():
if all(item in list(map(str,settings.src_dict[k])) for item in list(all_tgt)):
for t in net.transitions:
if str(t) == str(k):
pn_util.add_arc_from_to(target, t, net)
break
return net, rules
def soundness_at_XOR_tree(rules):
"""
Preserving Soundness between XOR blocks based on the highest lift value.
Parameters:
rules (dict) : Discovered rules and their XOR blocks
Return:
Sound XOR blocks pair (dict) : Sound XOR block pairs to be used for generating sound Precise net
"""
sound_xor_rule = {}
keys_to_be_removed = []
key_copy = tuple(rules.keys())
for i in range(len(rules.keys())):
if len(rules.keys()) != 0:
sound_xor_rule[next(iter(rules))] = rules[next(iter(rules))]
for k,v in rules.items():
if k[0] == list(sound_xor_rule.items())[len(sound_xor_rule)-1][0][0]:
keys_to_be_removed.append(k)
elif k[1] == list(sound_xor_rule.items())[len(sound_xor_rule)-1][0][1]:
keys_to_be_removed.append(k)
for k in keys_to_be_removed:
if k in rules.keys():
del rules[k]
return sound_xor_rule
def discover_sound_petrinet(rules_dict, net):
"""
Discover Intermediate Petri net which preserves soundness between XOR branches.
Parameter:
rules (dict) : Discovered rules with the association rule metrics values
net (PetriNet): Generated Petri net of the log
Return:
net (PetriNet): Intermediate Petri net of the log
"""
for pair in rules_dict:
trans_exist = 0
#if the place already exists, We do not need to add new places, just use existing ones
tau_t = PetriNet.Transition(f"tau_{pair[0]}{pair[1]}", None)
for trans in net.transitions:
if str(trans) == str(tau_t):
trans_exist = 1
break
if(trans_exist == 0):
net.transitions.add(tau_t)
s_place = f"ps_{pair[0]}"
t_place = f"pt_{pair[1]}"
source_found = 0
target_found = 0
for place in net.places:
if place.name == s_place:
source_found = 1
pn_util.add_arc_from_to(place, tau_t, net)
elif place.name == t_place:
target_found = 1
pn_util.add_arc_from_to(tau_t, place, net)
if (source_found and target_found):
break
## Handle Source Side
# Adding new place after source
if (not source_found):
source = PetriNet.Place(s_place)
net.places.add(source)
pn_util.add_arc_from_to(source, tau_t, net)
all_src = pair[0][1:-1].split(", ")
for k,v in settings.sink_dict.items():
if all(item in list(map(str,settings.sink_dict[k])) for item in list(all_src)):
for t in net.transitions:
if str(t) == str(k):
pn_util.add_arc_from_to(t, source, net)
break
if (not target_found):
target = PetriNet.Place(t_place)
net.places.add(target)
pn_util.add_arc_from_to(tau_t, target, net)
all_tgt = pair[1][1:-1].split(", ")
for k,v in settings.src_dict.items():
if all(item in list(map(str,settings.src_dict[k])) for item in list(all_tgt)):
for t in net.transitions:
if str(t) == str(k):
pn_util.add_arc_from_to(target, t, net)
break
return net
def repair_sound_Model(s_net, rules_dict, support, confidence, lift, sound=1):
"""
Repairing a bordered Petri net generated from Process tree to include long-term dependencies in it and
create a precise Petri net. Soundness parameter is a given requirement.
Parameter:
net (PetriNet): Generated Petri net of the log
rules (dict) : Discovered rules with the association rule metrics values
support (str): Threshold value for support
confidence (str) : Threshold value for confidence
lift (str): Threshold value for confidence
sound (str) : Yes
Return:
net (PetriNet): Repaired Sound Petri net of the log
rules (dict) : Added rules to the net with their association rule metrics values
"""
rules = {}
rules_dict = dict(sorted(rules_dict.items(), key=lambda item: item[1][2]))
for pair, value in rules_dict.items():
trans = None
if value[2] < 1.001 or str(value[2]) < lift or str(value[0]) < support or str(value[1]) < confidence:
tau_t = f"tau_{pair[0]}{pair[1]}"
for t in s_net.transitions:
s_place_valid = 0
t_place_valid = 0
if str(t) == str(tau_t):
trans = t
source_places = set([x.source for x in t.in_arcs])
for p in source_places:
s_place = f"ps_{pair[0]}"
if str(p) == s_place:
if sound == 'on' and len(p.out_arcs) > 1:
s_place_valid = 1
elif sound == None:
s_place_valid = -1
if len(p.out_arcs) == 1:
pn_util.remove_place(s_net, p)
if sound == 'on' and len(p.out_arcs) == 1:
rules[pair] = value
target_places = set([x.target for x in t.out_arcs])
for p in target_places:
t_place = f"pt_{pair[1]}"
if str(p) == t_place:
if sound == 'on' and len(p.in_arcs) > 1:
t_place_valid = 1
elif sound== None:
t_place_valid = -1
if len(p.in_arcs) == 1:
pn_util.remove_place(s_net, p)
if sound == 'on' and len(p.in_arcs) == 1:
rules[pair] = value
if s_place_valid==1 and t_place_valid==1:
s_net = pn_util.remove_transition(s_net, trans)
break
elif s_place_valid == -1 and t_place_valid == -1:
s_net = pn_util.remove_transition(s_net, trans)
break
else:
rules[pair] = value
return s_net, rules
def display_petri_net(net=None):
"""
The function exports the Petri net in .SVG format and saves it in current directory
Parameter:
net (PetriNet) : Petri net model to be stored in .SVG format
Returns:
net_path (str): The path of the saved Petri net model in .SVG form
"""
if net == None :
net = settings.PETRI_NET
im = settings.I_MARKS_ORIG
fm = settings.F_MARKS_ORIG
parameters = {pn_visualizer.Variants.WO_DECORATION.value.Parameters.FORMAT: "SVG"}
gviz = pn_visualizer.apply(net, im, fm, parameters=parameters)
log_name = settings.EVENT_LOG_NAME
log_name = log_name.rsplit('.', 1)[0]
log_name = log_name.replace(" ", "")
log_path = os.path.dirname(copy.deepcopy(settings.EVENT_LOG_PATH))
image_path = os.path.join(log_path, f"{log_name}.SVG")
pn_visualizer.save(gviz, image_path)
return image_path
def apply(tree, parameters=None):
'''
Generation of bordered Petri net from the given tree, adapted from PM4Py
Only supports loops with 2 children!
:param tree:
:return:
'''
net = obj.PetriNet(name=str(tree))
if len(tree.children) == 0:
pn_util.add_transition(net, label=tree.label, name=str(id(tree)))
else:
sub_nets = list()
for c in tree.children:
sub_net, ini, fin = apply(c)
sub_nets.append(sub_net)
pn_util.merge(net, sub_nets)
switch = {
pt_op.SEQUENCE: construct_sequence_pattern,
pt_op.XOR: construct_xor_pattern,
pt_op.PARALLEL: construct_and_pattern,
pt_op.LOOP: construct_loop_pattern
}
net, ini, fin = switch[tree.operator](net, sub_nets)
if tree.parent is None:
p_ini = pn_util.add_place(net)
p_fin = pn_util.add_place(net)
pn_util.add_arc_from_to(p_ini, _get_src_transition(net), net)
pn_util.add_arc_from_to(_get_sink_transition(net), p_fin, net)
return net, obj.Marking({p_ini: 1}), obj.Marking({p_fin: 1})
return net, obj.Marking(), obj.Marking()
def _get_src_transition(sub_net):
for t in sub_net.transitions:
if len(pn_util.pre_set(t)) == 0:
return t
return None
def _get_sink_transition(sub_net):
for t in sub_net.transitions:
if len(pn_util.post_set(t)) == 0:
return t
return None
def _add_src_sink_transitions(net, p_s, p_t):
src = pn_util.add_transition(net)
pn_util.add_arc_from_to(src, p_s, net)
sink = pn_util.add_transition(net)
pn_util.add_arc_from_to(p_t, sink, net)
return net, obj.Marking(), obj.Marking()
def construct_sequence_pattern(net, sub_nets):
places = [None] * (len(sub_nets) + 1)
for i in range(len(sub_nets) + 1):
places[i] = pn_util.add_place(net)
for i in range(len(sub_nets)):
pn_util.add_arc_from_to(places[i], _get_src_transition(sub_nets[i]), net)
pn_util.add_arc_from_to(_get_sink_transition(sub_nets[i]), places[i + 1], net)
src = pn_util.add_transition(net)
pn_util.add_arc_from_to(src, places[0], net)
sink = pn_util.add_transition(net)
pn_util.add_arc_from_to(places[len(places) - 1], sink, net)
return net, obj.Marking(), obj.Marking()
def construct_xor_pattern(net, sub_nets):
p_s = pn_util.add_place(net)
p_o = pn_util.add_place(net)
for n in sub_nets:
#settings.src_dict[tuple(n.transitions)] = _get_src_transition(n)
#settings.sink_dict[tuple(n.transitions)] = _get_sink_transition(n)
settings.src_dict[_get_src_transition(n)] = n.transitions
settings.sink_dict[_get_sink_transition(n)] = n.transitions
pn_util.add_arc_from_to(p_s, _get_src_transition(n), net)
pn_util.add_arc_from_to(_get_sink_transition(n), p_o, net)
return _add_src_sink_transitions(net, p_s, p_o)
def construct_and_pattern(net, sub_nets):
p_s = [None] * len(sub_nets)
p_t = [None] * len(sub_nets)
for i in range(len(sub_nets)):
p_s[i] = pn_util.add_place(net)
p_t[i] = pn_util.add_place(net)
pn_util.add_arc_from_to(p_s[i], _get_src_transition(sub_nets[i]), net)
pn_util.add_arc_from_to(_get_sink_transition(sub_nets[i]), p_t[i], net)
src = pn_util.add_transition(net)
for p in p_s:
pn_util.add_arc_from_to(src, p, net)
sink = pn_util.add_transition(net)
for p in p_t:
pn_util.add_arc_from_to(p, sink, net)
return net, obj.Marking(), obj.Marking()
def construct_loop_pattern(net, sub_nets):
assert (len(sub_nets) == 2)
p_s = pn_util.add_place(net)
p_t = pn_util.add_place(net)
pn_util.add_arc_from_to(p_s, _get_src_transition(sub_nets[0]), net)
pn_util.add_arc_from_to(p_t, _get_src_transition(sub_nets[1]), net)
pn_util.add_arc_from_to(_get_sink_transition(sub_nets[0]), p_t, net)
pn_util.add_arc_from_to(_get_sink_transition(sub_nets[1]), p_s, net)
net, ini, fin = _add_src_sink_transitions(net, p_s, p_t)
return net, obj.Marking(), obj.Marking()
def deepcopy_net():
im = obj.Marking()
fm = obj.Marking()
p_net = copy.deepcopy(settings.PETRI_NET_ORIG)
for place in p_net.places:
for p_ini in settings.I_MARKS_ORIG:
if str(p_ini) == str(place):
im = obj.Marking({place : 1})
for p_f in settings.F_MARKS_ORIG:
if str(p_f) == str(place):
fm = obj.Marking({place : 1})
return p_net, im, fm
if __name__ == "__main__":
file_path = "<path>\<file>.xes"
support = "0.2"
confidence = "0.3"
lift = "1.0"
sound = "No"
rules, precision, fitness, net_path, pnml_path = miner(file_path, support, confidence, lift, sound)
| 36.732143 | 328 | 0.590747 |
4a26663ae2d365a402e0ba02ec71cf6f10d75768 | 3,587 | py | Python | samples/main_menu_simple.py | Werxzy/ursina | c897ab329e5c5489ac4073a3b4a26ed162f771c6 | [
"MIT"
] | 2 | 2018-01-27T14:25:22.000Z | 2018-05-17T20:06:04.000Z | samples/main_menu_simple.py | pokepetter/pandaeditor | 7c47279f7492ad2b70c6310600fd8a40a10848fb | [
"MIT"
] | null | null | null | samples/main_menu_simple.py | pokepetter/pandaeditor | 7c47279f7492ad2b70c6310600fd8a40a10848fb | [
"MIT"
] | null | null | null | from ursina import *
# Main Menu Example, or it can be any kind of menu, like Inventory, Quest journal, etc.
# Created by Doctor
# 09 Feb 21
# Edited version by @jasonheller on 07 Apr 22
# Class of game menu
class MenuMenu(Entity):
def __init__(self, **kwargs):
super().__init__(parent=camera.ui, ignore_paused=True)
# Create empty entities that will be parents of our menus content
self.main_menu = Entity(parent=self, enabled=True)
self.options_menu = Entity(parent=self, enabled=False)
self.help_menu = Entity(parent=self, enabled=False)
# Add a background. You can change 'shore' to a different texture of you'd like.
self.background = Sprite('shore', color=color.dark_gray, z=1)
# [MAIN MENU] WINDOW START
# Title of our menu
Text("MAIN MENU", parent=self.main_menu, y=0.4, x=0, origin=(0,0))
def switch(menu1, menu2):
menu1.enable()
menu2.disable()
# Button list
ButtonList(button_dict={
"Start": Func(print_on_screen,"You clicked on Start button!", position=(0,.1), origin=(0,0)),
"Options": Func(lambda: switch(self.options_menu, self.main_menu)),
"Help": Func(lambda: switch(self.help_menu, self.main_menu)),
"Exit": Func(lambda: application.quit())
},y=0,parent=self.main_menu)
# [MAIN MENU] WINDOW END
# [OPTIONS MENU] WINDOW START
# Title of our menu
Text ("OPTIONS MENU", parent=self.options_menu, y=0.4, x=0, origin=(0, 0))
# Button
Button("Back",parent=self.options_menu,y=-0.3,scale=(0.1,0.05),color=rgb(50,50,50),
on_click=lambda: switch(self.main_menu, self.options_menu))
# [OPTIONS MENU] WINDOW END
# [HELP MENU] WINDOW START
# Title of our menu
Text ("HELP MENU", parent=self.help_menu, y=0.4, x=0, origin=(0, 0))
# Button list
ButtonList (button_dict={
"Gameplay": Func(print_on_screen,"You clicked on Gameplay help button!", position=(0,.1), origin=(0,0)),
"Battle": Func(print_on_screen,"You clicked on Battle help button!", position=(0,.1), origin=(0,0)),
"Control": Func(print_on_screen,"You clicked on Control help button!", position=(0,.1), origin=(0,0)),
"Back": Func (lambda: switch(self.main_menu, self.help_menu))
}, y=0, parent=self.help_menu)
# [HELP MENU] WINDOW END
# Here we can change attributes of this class when call this class
for key, value in kwargs.items ():
setattr (self, key, value)
# Input function that check if key pressed on keyboard
def input(self, key):
# And if you want use same keys on different windows
# Like [Escape] or [Enter] or [Arrows]
# Just write like that:
# If our main menu enabled and we press [Escape]
if self.main_menu.enabled and key == "escape":
application.quit()
elif self.options_menu.enabled and key == "escape":
self.main_menu.enable()
self.options_menu.disable()
elif self.help_menu.enabled and key == "escape":
self.main_menu.enable()
self.help_menu.disable()
# Update function that check something every frame
# You can use it similar to input with checking
# what menu is currently enabled
def update(self):
pass
# Init application
app = Ursina(title='Main Menu Tutorial')
# Call our menu
main_menu = MenuMenu()
# Run application
app.run()
| 36.979381 | 116 | 0.620574 |
4a26663cdd82019c971f03bf313d8b7fc4852002 | 3,390 | py | Python | app/auth/forms.py | gzxultra/FlaskLoginManagement | 0a36a36fa1322e91a35735280bb119c94d016592 | [
"MIT"
] | null | null | null | app/auth/forms.py | gzxultra/FlaskLoginManagement | 0a36a36fa1322e91a35735280bb119c94d016592 | [
"MIT"
] | null | null | null | app/auth/forms.py | gzxultra/FlaskLoginManagement | 0a36a36fa1322e91a35735280bb119c94d016592 | [
"MIT"
] | null | null | null | from flask_wtf import Form
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired, Length, Email, Regexp, EqualTo
from wtforms import ValidationError
from wtforms.fields import RadioField, SelectField
from ..models import User
class LoginMethodForm(Form):
method = RadioField(u'Login Method',
choices=[('email', 'Email'), ('weibo', 'Weibo'), ('qzone', ' Qzone')])
class LoginForm(Form):
email = StringField('Email', validators=[DataRequired(), Length(1, 64), Email()])
password = PasswordField('Passowrd', validators=[DataRequired()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Login')
class RegistrationForm(Form):
email = StringField('Email', validators=[DataRequired(), Length(1, 64), Email()])
username = StringField('Username', validators=[
DataRequired(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Username must have only letters,'
'numbers, dots or underscores')])
password = PasswordField('Password', validators=[
DataRequired(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm password', validators=[DataRequired()])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
user = User.query.filter_by(username=field.data).first()
if user is not None and user.weibo_id is None:
raise ValidationError('Username already in use.')
class ChangePasswordForm(Form):
old_password = PasswordField('Old password', validators=[DataRequired()])
password = PasswordField('New password', validators=[
DataRequired(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm new password', validators=[DataRequired()])
submit = SubmitField('Update Password')
class PasswordResetRequestForm(Form):
email = StringField('Email', validators=[DataRequired(), Length(1, 64),
Email()])
submit = SubmitField('Reset Password')
class PasswordResetForm(Form):
email = StringField('Email', validators=[DataRequired(), Length(1, 64),
Email()])
password = PasswordField('New Password', validators=[
DataRequired(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm password', validators=[DataRequired()])
submit = SubmitField('Reset Password')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first() is None:
raise ValidationError('Unknown email address.')
class ChangeEmailForm(Form):
email = StringField('New Email', validators=[DataRequired(), Length(1, 64),
Email()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Update Email Address')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.') | 44.025974 | 94 | 0.651327 |
4a26665270fd70e722a05d2a74779dd8e5474cde | 455 | py | Python | src/evaluation.py | skeptycal/twde-datalab | ba2038e0e9d9f05b53e2723bc18f93fdb73f462a | [
"Apache-2.0"
] | 53 | 2017-12-12T17:18:13.000Z | 2020-02-10T10:21:24.000Z | src/evaluation.py | thoughtworks/twde-datalab | 00702081a41ac0868d518eeea95a165c19ccd24e | [
"Apache-2.0"
] | 27 | 2017-12-08T10:11:57.000Z | 2018-09-03T11:36:47.000Z | src/evaluation.py | skeptycal/twde-datalab | ba2038e0e9d9f05b53e2723bc18f93fdb73f462a | [
"Apache-2.0"
] | 14 | 2018-01-28T10:41:39.000Z | 2020-01-08T19:56:48.000Z | import numpy as np
import pandas as pd
def nwrmsle(predictions, targets, weights):
predictions = np.array([x if x > 0 else 0 for x in list(predictions)])
targetsf = targets.astype(float)
targetsf = np.array([x if x > 0 else 0 for x in list(targetsf)])
weights = 1 + 0.25 * weights
log_square_errors = (np.log(predictions + 1) - np.log(targetsf + 1)) ** 2
return(np.sqrt(np.sum(weights * log_square_errors) / np.sum(weights)))
| 30.333333 | 77 | 0.668132 |
4a26667a776374657bc0715ab94124e21b7b5228 | 5,768 | py | Python | __dump__/_x_segslice.py | fakegit/mo-han-toolbox | 9d5bbc1fe7f12040715d3a0d3f320a1ad617aed8 | [
"MIT"
] | 24 | 2019-12-08T03:56:32.000Z | 2021-10-02T13:26:37.000Z | __dump__/_x_segslice.py | fakegit/mo-han-toolbox | 9d5bbc1fe7f12040715d3a0d3f320a1ad617aed8 | [
"MIT"
] | 2 | 2020-04-27T14:20:01.000Z | 2020-07-17T06:05:33.000Z | __dump__/_x_segslice.py | fakegit/mo-han-toolbox | 9d5bbc1fe7f12040715d3a0d3f320a1ad617aed8 | [
"MIT"
] | 10 | 2019-08-06T01:11:28.000Z | 2021-07-19T08:45:11.000Z | #!/usr/bin/env python3
# encoding=utf8
class Formatter:
def __init__(self, fmt_func=lambda x: x, mark=None):
self._fmt_func = fmt_func
self._mark = mark
def __call__(self, *args, **kwargs):
return self._fmt_func(*args, **kwargs)
def __hash__(self):
return hash(self._fmt_func)
def __eq__(self, other):
return hash(self) == hash(other) and self.mark == other.mark
@property
def mark(self):
return self._mark
class Segment:
def __init__(self, value, formatter: Formatter):
self._value = value
self._formatter = formatter
self._form = self._formatter(self._value)
def __str__(self):
return 'segment({}, {})'.format(self.signature, self.value)
def __eq__(self, other):
return self.value == other.value and self.formatter == other.formatter
def __hash__(self):
return hash(self.value) + hash(self.formatter)
@property
def mark(self):
return self.formatter.mark
@property
def value(self):
return self._value
@value.setter
def value(self, new):
self._value = new
if self._formatter:
self._form = self._formatter(self._value)
else:
self._form = self._value
@property
def formatter(self):
return self._formatter
@formatter.setter
def formatter(self, new):
self._formatter = new
if self._formatter:
self._form = self._formatter(self._value)
else:
self._form = self._value
@property
def signature(self):
return self.formatter.mark
@property
def repr(self):
return self._form
class BaseSegments:
def __init__(self, whole=None, segments: list = None):
if whole:
self._whole = whole
self.redivide()
elif segments:
self._segments = segments
self.reunion()
else:
self._whole = ''
self._segments = []
def __getitem__(self, item):
return self.segments[item]
def redivide(self):
self.segments = [Segment(self.whole, Formatter())]
def reunion(self):
segments = self.segments
whole = segments[0].repr
for e in segments[1:]:
whole += e.repr
self.whole = whole
@property
def is_original(self):
return len(self.segments) == 1 and (self.segments[0].formatter is None or self.segments[0].value == self.whole)
@property
def whole(self):
return self._whole
@whole.setter
def whole(self, new):
self._whole = new
self.redivide()
@property
def segments(self):
return self._segments
@segments.setter
def segments(self, new):
self._segments = new
self.reunion()
def __repr__(self):
head = super(BaseSegments, self).__repr__()
body = '\n'.join(seg.repr for seg in self.segments)
return '{}\n{}'.format(head, body)
class BracketedSegments(BaseSegments):
def __init__(self, brackets: tuple, whole=None, segments: list = None):
left, right = brackets
def fmt_func(value):
return left + value + right
self._left = left
self._right = right
self._formatter = Formatter(fmt_func, left + right)
super(BracketedSegments, self).__init__(whole=whole, segments=segments)
@property
def left_mark(self):
return self._left
@property
def right_mark(self):
return self._right
@property
def formatter(self):
return self._formatter
def redivide(self):
empty_fmt = Formatter()
i = preamble_stop = start = stop = stage = 0
left = self.left_mark
left_n = len(left)
right = self.right_mark
right_n = len(right)
w = self.whole
seg_l = []
while w:
while i < len(w):
if stage == 0:
search_l = w[i:i + left_n]
if search_l == left:
preamble_stop = i
start = i + left_n
i += left_n
stage += 1
else:
i += 1
elif stage == 1:
search_r = w[i:i + right_n]
if search_r == right:
stop = i
i += right_n
stage += 1
else:
i += 1
elif stage == 2:
search_l = w[i:i + left_n]
search_r = w[i:i + right_n]
if search_l == left:
if preamble_stop:
seg_l.append(Segment(w[:preamble_stop], empty_fmt))
seg_l.append(Segment(w[start:stop], self.formatter))
w = w[stop + right_n:]
i = stage = preamble_stop = start = stop = 0
break
elif search_r == right:
stop = i
i += right_n
else:
i += 1
else:
if stop:
if preamble_stop:
seg_l.append(Segment(w[:preamble_stop], empty_fmt))
seg_l.append(Segment(w[start:stop], self.formatter))
w = w[stop + right_n:]
else:
seg_l.append(Segment(w, empty_fmt))
w = None
break
i = stage = preamble_stop = start = stop = 0
self._segments = seg_l
| 27.730769 | 119 | 0.509362 |
4a2666a7a4c10964001e6425d84360da643336ef | 1,435 | py | Python | ibis/sql/sqlite/api.py | hjoo/ibis | 72ece317337fb7d329337f20db930845a669ce85 | [
"Apache-2.0"
] | 5 | 2018-04-26T17:42:14.000Z | 2020-10-14T19:02:59.000Z | ibis/sql/sqlite/api.py | hjoo/ibis | 72ece317337fb7d329337f20db930845a669ce85 | [
"Apache-2.0"
] | 12 | 2018-04-07T03:13:34.000Z | 2020-07-13T15:45:34.000Z | ibis/sql/sqlite/api.py | ian-r-rose/ibis | c2323b8dfd7b56db821426513c379de38203b332 | [
"Apache-2.0"
] | 1 | 2020-10-01T18:48:01.000Z | 2020-10-01T18:48:01.000Z | # Copyright 2015 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ibis.sql.sqlite.client import SQLiteClient
from ibis.sql.sqlite.compiler import dialect, rewrites # noqa: F401
def compile(expr, params=None):
"""
Force compilation of expression for the SQLite target
"""
from ibis.sql.alchemy import to_sqlalchemy
return to_sqlalchemy(expr, dialect.make_context(params=params))
def connect(path=None, create=False):
"""
Create an Ibis client connected to a SQLite database.
Multiple database files can be created using the attach() method
Parameters
----------
path : string, default None
File path to the SQLite database file. If None, creates an in-memory
transient database and you can use attach() to add more files
create : boolean, default False
If file does not exist, create it
"""
return SQLiteClient(path, create=create)
| 31.195652 | 76 | 0.725436 |
4a2667126470fba36ed8973ffd0e62fe87264e5e | 23,575 | py | Python | gui/kivy/uix/dialogs/installwizard.py | recryptproject/recrypt-electrum | 4898f1b2b265b4a5980893471a3ed40c92e44193 | [
"MIT"
] | null | null | null | gui/kivy/uix/dialogs/installwizard.py | recryptproject/recrypt-electrum | 4898f1b2b265b4a5980893471a3ed40c92e44193 | [
"MIT"
] | null | null | null | gui/kivy/uix/dialogs/installwizard.py | recryptproject/recrypt-electrum | 4898f1b2b265b4a5980893471a3ed40c92e44193 | [
"MIT"
] | null | null | null |
from functools import partial
import threading
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.properties import ObjectProperty, StringProperty, OptionProperty
from kivy.core.window import Window
from kivy.uix.button import Button
from kivy.utils import platform
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.clock import Clock
from kivy.utils import platform
from recrypt_electrum.base_wizard import BaseWizard
from . import EventsDialog
from ...i18n import _
from .password_dialog import PasswordDialog
# global Variables
is_test = (platform == "linux")
test_seed = "time taxi field recycle tiny license olive virus report rare steel portion achieve"
test_xpub = "xpub661MyMwAqRbcEbvVtRRSjqxVnaWVUMewVzMiURAKyYratih4TtBpMypzzefmv8zUNebmNVzB3PojdC5sV2P9bDgMoo9B3SARw1MXUUfU1GL"
Builder.load_string('''
#:import Window kivy.core.window.Window
#:import _ electrum_gui.kivy.i18n._
<WizardTextInput@TextInput>
border: 4, 4, 4, 4
font_size: '15sp'
padding: '15dp', '15dp'
background_color: (1, 1, 1, 1) if self.focus else (0.454, 0.698, 0.909, 1)
foreground_color: (0.31, 0.31, 0.31, 1) if self.focus else (0.835, 0.909, 0.972, 1)
hint_text_color: self.foreground_color
background_active: 'atlas://gui/kivy/theming/light/create_act_text_active'
background_normal: 'atlas://gui/kivy/theming/light/create_act_text_active'
size_hint_y: None
height: '48sp'
<WizardButton@Button>:
root: None
size_hint: 1, None
height: '48sp'
on_press: if self.root: self.root.dispatch('on_press', self)
on_release: if self.root: self.root.dispatch('on_release', self)
<BigLabel@Label>
color: .854, .925, .984, 1
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
bold: True
<-WizardDialog>
text_color: .854, .925, .984, 1
value: ''
#auto_dismiss: False
size_hint: None, None
canvas.before:
Color:
rgba: 0, 0, 0, .9
Rectangle:
size: Window.size
Color:
rgba: .239, .588, .882, 1
Rectangle:
size: Window.size
crcontent: crcontent
# add recrypt_electrum icon
BoxLayout:
orientation: 'vertical' if self.width < self.height else 'horizontal'
padding:
min(dp(27), self.width/32), min(dp(27), self.height/32),\
min(dp(27), self.width/32), min(dp(27), self.height/32)
spacing: '10dp'
GridLayout:
id: grid_logo
cols: 1
pos_hint: {'center_y': .5}
size_hint: 1, None
height: self.minimum_height
Label:
color: root.text_color
text: 'ELECTRUM'
size_hint: 1, None
height: self.texture_size[1] if self.opacity else 0
font_size: '33sp'
font_name: 'gui/kivy/data/fonts/tron/Tr2n.ttf'
GridLayout:
cols: 1
id: crcontent
spacing: '1dp'
Widget:
size_hint: 1, 0.3
GridLayout:
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
id: back
text: _('Back')
root: root
WizardButton:
id: next
text: _('Next')
root: root
disabled: root.value == ''
<WizardMultisigDialog>
value: 'next'
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: _("Choose the number of signatures needed to unlock funds in your wallet")
Widget
size_hint: 1, 1
GridLayout:
orientation: 'vertical'
cols: 2
spacing: '14dp'
size_hint: 1, 1
height: self.minimum_height
Label:
color: root.text_color
text: _('From %d cosigners')%n.value
Slider:
id: n
range: 2, 5
step: 1
value: 2
Label:
color: root.text_color
text: _('Require %d signatures')%m.value
Slider:
id: m
range: 1, n.value
step: 1
value: 2
<WizardChoiceDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 1
spacing: '14dp'
size_hint: 1, None
<MButton@Button>:
size_hint: 1, None
height: '33dp'
on_release:
self.parent.update_amount(self.text)
<WordButton@Button>:
size_hint: None, None
padding: '5dp', '5dp'
text_size: None, self.height
width: self.texture_size[0]
height: '30dp'
on_release:
self.parent.new_word(self.text)
<SeedButton@Button>:
height: dp(100)
border: 4, 4, 4, 4
halign: 'justify'
valign: 'top'
font_size: '18dp'
text_size: self.width - dp(24), self.height - dp(12)
color: .1, .1, .1, 1
background_normal: 'atlas://gui/kivy/theming/light/white_bg_round_top'
background_down: self.background_normal
size_hint_y: None
<SeedLabel@Label>:
font_size: '12sp'
text_size: self.width, None
size_hint: 1, None
height: self.texture_size[1]
halign: 'justify'
valign: 'middle'
border: 4, 4, 4, 4
<RestoreSeedDialog>
message: ''
word: ''
BigLabel:
text: "ENTER YOUR SEED PHRASE"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input_seed
text: ''
on_text: Clock.schedule_once(root.on_text)
on_release: root.options_dialog()
SeedLabel:
text: root.message
BoxLayout:
id: suggestions
height: '35dp'
size_hint: 1, None
new_word: root.on_word
BoxLayout:
id: line1
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
MButton:
text: 'Q'
MButton:
text: 'W'
MButton:
text: 'E'
MButton:
text: 'R'
MButton:
text: 'T'
MButton:
text: 'Y'
MButton:
text: 'U'
MButton:
text: 'I'
MButton:
text: 'O'
MButton:
text: 'P'
BoxLayout:
id: line2
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 0.5, None
height: '33dp'
MButton:
text: 'A'
MButton:
text: 'S'
MButton:
text: 'D'
MButton:
text: 'F'
MButton:
text: 'G'
MButton:
text: 'H'
MButton:
text: 'J'
MButton:
text: 'K'
MButton:
text: 'L'
Widget:
size_hint: 0.5, None
height: '33dp'
BoxLayout:
id: line3
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 1, None
MButton:
text: 'Z'
MButton:
text: 'X'
MButton:
text: 'C'
MButton:
text: 'V'
MButton:
text: 'B'
MButton:
text: 'N'
MButton:
text: 'M'
MButton:
text: ' '
MButton:
text: '<'
<AddXpubDialog>
title: ''
message: ''
BigLabel:
text: root.title
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: ''
on_text: Clock.schedule_once(root.check_text)
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
IconButton:
id: scan
height: '48sp'
on_release: root.scan_xpub()
icon: 'atlas://gui/kivy/theming/light/camera'
size_hint: 1, None
WizardButton:
text: _('Paste')
on_release: root.do_paste()
WizardButton:
text: _('Clear')
on_release: root.do_clear()
<ShowXpubDialog>
xpub: ''
message: _('Here is your master public key. Share it with your cosigners.')
BigLabel:
text: "MASTER PUBLIC KEY"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: root.xpub
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
text: _('QR code')
on_release: root.do_qr()
WizardButton:
text: _('Copy')
on_release: root.do_copy()
WizardButton:
text: _('Share')
on_release: root.do_share()
<ShowSeedDialog>
spacing: '12dp'
value: 'next'
BigLabel:
text: "PLEASE WRITE DOWN YOUR SEED PHRASE"
GridLayout:
id: grid
cols: 1
pos_hint: {'center_y': .5}
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
spacing: '12dp'
SeedButton:
text: root.seed_text
on_release: root.options_dialog()
SeedLabel:
text: root.message
<LineDialog>
BigLabel:
text: root.title
SeedLabel:
text: root.message
TextInput:
id: passphrase_input
multiline: False
size_hint: 1, None
height: '27dp'
SeedLabel:
text: root.warning
''')
class WizardDialog(EventsDialog):
''' Abstract dialog to be used as the base for all Create Account Dialogs
'''
crcontent = ObjectProperty(None)
def __init__(self, wizard, **kwargs):
super(WizardDialog, self).__init__()
self.wizard = wizard
self.ids.back.disabled = not wizard.can_go_back()
self.app = App.get_running_app()
self.run_next = kwargs['run_next']
_trigger_size_dialog = Clock.create_trigger(self._size_dialog)
Window.bind(size=_trigger_size_dialog,
rotation=_trigger_size_dialog)
_trigger_size_dialog()
self._on_release = False
def _size_dialog(self, dt):
app = App.get_running_app()
if app.ui_mode[0] == 'p':
self.size = Window.size
else:
#tablet
if app.orientation[0] == 'p':
#portrait
self.size = Window.size[0]/1.67, Window.size[1]/1.4
else:
self.size = Window.size[0]/2.5, Window.size[1]
def add_widget(self, widget, index=0):
if not self.crcontent:
super(WizardDialog, self).add_widget(widget)
else:
self.crcontent.add_widget(widget, index=index)
def on_dismiss(self):
app = App.get_running_app()
if app.wallet is None and not self._on_release:
app.stop()
def get_params(self, button):
return (None,)
def on_release(self, button):
self._on_release = True
self.close()
if not button:
self.parent.dispatch('on_wizard_complete', None)
return
if button is self.ids.back:
self.wizard.go_back()
return
params = self.get_params(button)
self.run_next(*params)
class WizardMultisigDialog(WizardDialog):
def get_params(self, button):
m = self.ids.m.value
n = self.ids.n.value
return m, n
class WizardChoiceDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardChoiceDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
choices = kwargs.get('choices', [])
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for action, text in choices:
l = WizardButton(text=text)
l.action = action
l.height = '48dp'
l.root = self
layout.add_widget(l)
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (button.action,)
class LineDialog(WizardDialog):
title = StringProperty('')
message = StringProperty('')
warning = StringProperty('')
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.disabled = False
def get_params(self, b):
return (self.ids.passphrase_input.text,)
class ShowSeedDialog(WizardDialog):
seed_text = StringProperty('')
message = _("If you forget your PIN or lose your device, your seed phrase will be the only way to recover your funds.")
ext = False
def __init__(self, wizard, **kwargs):
super(ShowSeedDialog, self).__init__(wizard, **kwargs)
self.seed_text = kwargs['seed_text']
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(self.ids.back.dispatch, 'on_release')
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_params(self, b):
return (self.ext,)
class WordButton(Button):
pass
class WizardButton(Button):
pass
class RestoreSeedDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(RestoreSeedDialog, self).__init__(wizard, **kwargs)
self._test = kwargs['test']
from recrypt_electrum.mnemonic import Mnemonic
from recrypt_electrum.old_mnemonic import words as old_wordlist
self.words = set(Mnemonic('en').wordlist).union(set(old_wordlist))
self.ids.text_input_seed.text = test_seed if is_test else ''
self.message = _('Please type your seed phrase using the virtual keyboard.')
self.title = _('Enter Seed')
self.ext = False
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_suggestions(self, prefix):
for w in self.words:
if w.startswith(prefix):
yield w
def on_text(self, dt):
self.ids.next.disabled = not bool(self._test(self.get_text()))
text = self.ids.text_input_seed.text
if not text:
last_word = ''
elif text[-1] == ' ':
last_word = ''
else:
last_word = text.split(' ')[-1]
enable_space = False
self.ids.suggestions.clear_widgets()
suggestions = [x for x in self.get_suggestions(last_word)]
if last_word in suggestions:
b = WordButton(text=last_word)
self.ids.suggestions.add_widget(b)
enable_space = True
for w in suggestions:
if w != last_word and len(suggestions) < 10:
b = WordButton(text=w)
self.ids.suggestions.add_widget(b)
i = len(last_word)
p = set()
for x in suggestions:
if len(x)>i: p.add(x[i])
for line in [self.ids.line1, self.ids.line2, self.ids.line3]:
for c in line.children:
if isinstance(c, Button):
if c.text in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
c.disabled = (c.text.lower() not in p) and last_word
elif c.text == ' ':
c.disabled = not enable_space
def on_word(self, w):
text = self.get_text()
words = text.split(' ')
words[-1] = w
text = ' '.join(words)
self.ids.text_input_seed.text = text + ' '
self.ids.suggestions.clear_widgets()
def get_text(self):
ti = self.ids.text_input_seed
return ' '.join(ti.text.strip().split())
def update_text(self, c):
c = c.lower()
text = self.ids.text_input_seed.text
if c == '<':
text = text[:-1]
else:
text += c
self.ids.text_input_seed.text = text
def on_parent(self, instance, value):
if value:
tis = self.ids.text_input_seed
tis.focus = True
#tis._keyboard.bind(on_key_down=self.on_key_down)
self._back = _back = partial(self.ids.back.dispatch,
'on_release')
app = App.get_running_app()
def on_key_down(self, keyboard, keycode, key, modifiers):
if keycode[0] in (13, 271):
self.on_enter()
return True
def on_enter(self):
#self._remove_keyboard()
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
def _remove_keyboard(self):
tis = self.ids.text_input_seed
if tis._keyboard:
tis._keyboard.unbind(on_key_down=self.on_key_down)
tis.focus = False
def get_params(self, b):
return (self.get_text(), False, self.ext)
class ConfirmSeedDialog(RestoreSeedDialog):
def get_params(self, b):
return (self.get_text(),)
def options_dialog(self):
pass
class ShowXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.xpub = kwargs['xpub']
self.ids.next.disabled = False
def do_copy(self):
self.app._clipboard.copy(self.xpub)
def do_share(self):
self.app.do_share(self.xpub, _("Master Public Key"))
def do_qr(self):
from .qr_dialog import QRDialog
popup = QRDialog(_("Master Public Key"), self.xpub, True)
popup.open()
class AddXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.is_valid = kwargs['is_valid']
self.title = kwargs['title']
self.message = kwargs['message']
def check_text(self, dt):
self.ids.next.disabled = not bool(self.is_valid(self.get_text()))
def get_text(self):
ti = self.ids.text_input
return ti.text.strip()
def get_params(self, button):
return (self.get_text(),)
def scan_xpub(self):
def on_complete(text):
self.ids.text_input.text = text
self.app.scan_qr(on_complete)
def do_paste(self):
self.ids.text_input.text = test_xpub if is_test else self.app._clipboard.paste()
def do_clear(self):
self.ids.text_input.text = ''
class InstallWizard(BaseWizard, Widget):
'''
events::
`on_wizard_complete` Fired when the wizard is done creating/ restoring
wallet/s.
'''
__events__ = ('on_wizard_complete', )
def on_wizard_complete(self, wallet):
"""overriden by main_window"""
pass
def waiting_dialog(self, task, msg):
'''Perform a blocking task in the background by running the passed
method in a thread.
'''
def target():
# run your threaded function
try:
task()
except Exception as err:
self.show_error(str(err))
# on completion hide message
Clock.schedule_once(lambda dt: app.info_bubble.hide(now=True), -1)
app = App.get_running_app()
app.show_info_bubble(
text=msg, icon='atlas://gui/kivy/theming/light/important',
pos=Window.center, width='200sp', arrow_pos=None, modal=True)
t = threading.Thread(target = target)
t.start()
def terminate(self, **kwargs):
self.dispatch('on_wizard_complete', self.wallet)
def choice_dialog(self, **kwargs):
choices = kwargs['choices']
if len(choices) > 1:
WizardChoiceDialog(self, **kwargs).open()
else:
f = kwargs['run_next']
f(choices[0][0])
def multisig_dialog(self, **kwargs): WizardMultisigDialog(self, **kwargs).open()
def show_seed_dialog(self, **kwargs): ShowSeedDialog(self, **kwargs).open()
def line_dialog(self, **kwargs): LineDialog(self, **kwargs).open()
def confirm_seed_dialog(self, **kwargs):
kwargs['title'] = _('Confirm Seed')
kwargs['message'] = _('Please retype your seed phrase, to confirm that you properly saved it')
ConfirmSeedDialog(self, **kwargs).open()
def restore_seed_dialog(self, **kwargs):
RestoreSeedDialog(self, **kwargs).open()
def add_xpub_dialog(self, **kwargs):
kwargs['message'] += ' ' + _('Use the camera button to scan a QR code.')
AddXpubDialog(self, **kwargs).open()
def add_cosigner_dialog(self, **kwargs):
kwargs['title'] = _("Add Cosigner") + " %d"%kwargs['index']
kwargs['message'] = _('Please paste your cosigners master public key, or scan it using the camera button.')
AddXpubDialog(self, **kwargs).open()
def show_xpub_dialog(self, **kwargs): ShowXpubDialog(self, **kwargs).open()
def show_error(self, msg):
app = App.get_running_app()
Clock.schedule_once(lambda dt: app.show_error(msg))
def password_dialog(self, message, callback):
popup = PasswordDialog()
popup.init(message, callback)
popup.open()
def request_password(self, run_next, force_disable_encrypt_cb=False):
def callback(pin):
if pin:
self.run('confirm_password', pin, run_next)
else:
run_next(None, None)
self.password_dialog('Choose a PIN code', callback)
def confirm_password(self, pin, run_next):
def callback(conf):
if conf == pin:
run_next(pin, False)
else:
self.show_error(_('PIN mismatch'))
self.run('request_password', run_next)
self.password_dialog('Confirm your PIN code', callback)
def action_dialog(self, action, run_next):
f = getattr(self, action)
f()
| 28.541162 | 125 | 0.562587 |
4a266847a6ed732b2115cd32026c7f35425a7d06 | 4,921 | py | Python | faceTk.py | snaily16/FaceFilters | 466e27041e53549d81ded3f8e099e5a1a696e6e9 | [
"MIT"
] | null | null | null | faceTk.py | snaily16/FaceFilters | 466e27041e53549d81ded3f8e099e5a1a696e6e9 | [
"MIT"
] | null | null | null | faceTk.py | snaily16/FaceFilters | 466e27041e53549d81ded3f8e099e5a1a696e6e9 | [
"MIT"
] | null | null | null | from tkinter import *
import cv2
import threading
import datetime
import os
import time
from PIL import Image, ImageTk
from FaceFilters import FaceFilters
class GUIFace:
def __init__(self, vs, fc, outPath):
self.vs = vs
self.fc = fc
self.outPath = outPath
self.frame = None
self.thread = None
self.stopEvent = None
self.filterChoice = None
# initialize the root window and video panel
self.root = Tk()
self.panel = None
self.center = Frame(self.root, width=150, height=40,padx=10, pady=10)
btm_frame = Frame(self.root, bg='white', width=450, height=45, padx=3, pady=3)
btm_frame2 = Frame(self.root, bg='white', width=450, height=60)
# layout all of the main containers
self.root.grid_rowconfigure(1, weight=1)
self.root.grid_columnconfigure(0, weight=1)
self.center.grid(row=1)
btm_frame.grid(row=3)
btm_frame2.grid(row=4)
# create the center widgets
self.center.grid_rowconfigure(1, weight=1)
self.center.grid_columnconfigure(1, weight=1)
#panel = Frame(center, bg='yellow', width=250, height=210, padx=3, pady=3)
#ctr_mid.grid(row=0, column=1, sticky="nsew")
# create the bottom widgets
btn1 = Button(btm_frame, text='Glasses', command=lambda: self.setFilterChoice(0), width = 20, fg = "black", bg = "pink", bd = 0)
btn2 = Button(btm_frame, text='Sunglasses1', command=lambda: self.setFilterChoice(1), width = 20, fg = "black", bg = "light blue", bd = 0)
btn3 = Button(btm_frame, text='Sunglasses2', command=lambda: self.setFilterChoice(2), width = 20, fg = "black", bg = "aquamarine", bd = 0)
btn4 = Button(btm_frame, text='Sunglasses3', command=lambda: self.setFilterChoice(3), width = 20, fg = "black", bg = "light blue", bd = 0)
btn5 = Button(btm_frame, text='Dog', command=lambda: self.setFilterChoice(4), width = 20, fg = "black", bg = "pink", bd = 0)
btn6 = Button(btm_frame, text='Rabbit', command=lambda: self.setFilterChoice(5), width = 20,fg = "black", bg = "aquamarine", bd = 0)
btn7 = Button(btm_frame, text='Moustache1', command=lambda: self.setFilterChoice(6), width = 20, fg = "black", bg = "pink", bd = 0)
btn8 = Button(btm_frame, text='Moustache2', command=lambda: self.setFilterChoice(7), width = 20, fg = "black", bg = "light blue", bd = 0)
btn9 = Button(btm_frame, text='Ironman', command=lambda: self.setFilterChoice(8), width = 20, fg = "black", bg = "aquamarine", bd = 0)
btn10 = Button(btm_frame, text='Captain America', command=lambda: self.setFilterChoice(9), width = 20, fg = "black", bg = "light blue", bd = 0)
# layout the widgets in bottom frame
btn1.grid(row=0, column=1)
btn2.grid(row=0, column=2)
btn3.grid(row=0, column=3)
btn4.grid(row=0, column=4)
btn5.grid(row=0, column=5)
btn6.grid(row=1, column=1)
btn7.grid(row=1, column=2)
btn8.grid(row=1, column=3)
btn9.grid(row=1, column=4)
btn10.grid(row=1, column=5)
# create the bottom2 widgets
btm_frame2.grid_columnconfigure(1, weight=1)
snapbtn = Button(btm_frame2, text='Snap!', command=self.takeSnapshot, width = 80, height=2, fg = "black", bg = "lime green", bd = 1)
snapbtn.grid(row=0, column=0,columnspan=3)
# start a thread that constantly pools video sensor for most recently read frame
self.stopEvent = threading.Event()
self.videoLoop()
#self.root.geometry('800x610')
self.root.wm_title('Face Filters')
self.root.wm_protocol('WM_DELETE_WINDOW', self.onClose)
self.root.mainloop()
def videoLoop(self):
try:
if not self.stopEvent.is_set():
# keep looping over frames until instructed to stop
self.frame = self.vs.read()
if self.filterChoice!=None:
self.frame = self.fc.applyFilter(self.frame, self.filterChoice)
self.frame = cv2.flip(self.frame, 1)
cv2image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
img = ImageTk.PhotoImage(image=img)
# if panel in not None, we need to initialize it
if self.panel is None:
self.panel = Label(self.center,image=img)
self.panel.image = img
#self.panel.pack(side='left', expand='yes', padx=10, pady=10)
self.panel.grid(row=0, column=1, sticky="nsew")
else:
self.panel.configure(image=img)
self.panel.image = img
self.panel.after(10,self.videoLoop)
except Exception as e:
print("[ERROR] {}".format(e))
def setFilterChoice(self, n):
self.filterChoice = n
print('[INFO] Filter selected: {}'.format(self.fc.filters[n]))
def takeSnapshot(self):
# grab current timestamp and construct the output path
ts = datetime.datetime.now()
filename = '{}.jpg'.format(ts.strftime('%Y%b%d_%H%M%S'))
p = os.path.sep.join((self.outPath, filename))
# save file
cv2.imwrite(p, self.frame.copy())
print("[INFO] saved {}".format(filename))
def onClose(self):
# set stop event, cleanup the camera
# allow rest of the quit process to continue
print("[INFO] closing...")
self.stopEvent.set()
self.vs.stop()
self.root.quit()
| 37.853846 | 145 | 0.687462 |
4a26685139d38367e6c8767a7d77fd703eacef36 | 917 | py | Python | Preprocessing/Zhang_psf/src/blackboxrepairers/calculators.py | maliha93/Fairness-Analysis-Code | acf13c6e7993704fc627249fe4ada44d8b616264 | [
"MIT"
] | null | null | null | Preprocessing/Zhang_psf/src/blackboxrepairers/calculators.py | maliha93/Fairness-Analysis-Code | acf13c6e7993704fc627249fe4ada44d8b616264 | [
"MIT"
] | null | null | null | Preprocessing/Zhang_psf/src/blackboxrepairers/calculators.py | maliha93/Fairness-Analysis-Code | acf13c6e7993704fc627249fe4ada44d8b616264 | [
"MIT"
] | null | null | null | from copy import deepcopy
def get_median(values):
"""
Given an unsorted list of numeric values, return median value (as a float).
Note that in the case of even-length lists of values, we apply the value to
the left of the center to be the median (such that the median can only be
a value from the list of values).
Eg: get_median([1,2,3,4]) == 2, not 2.5.
"""
if not values:
raise Exception("Cannot calculate median of list with no values!")
sorted_values = deepcopy(values)
sorted_values.sort() # Not calling `sorted` b/c `sorted_values` may not be list.
if len(values) % 2 == 0:
return sorted_values[len(values)/2-1]
else:
return sorted_values[len(values)/2]
def test():
test_median()
def test_median():
feature_values = [4,1,3,2]
correct_median = 2
print "median value is correct?", get_median(feature_values) == correct_median
if __name__=="__main__":
test()
| 26.2 | 82 | 0.697928 |
4a266a2d8174cd3d49aebe2e67aa92ba5bdb69af | 1,966 | py | Python | test/unit/mysql_class/fetch_global_var.py | deepcoder42/mysql-lib | d3d2459e0476fdbc4465e1d9389612e58d36fb25 | [
"MIT"
] | 1 | 2022-03-23T04:53:19.000Z | 2022-03-23T04:53:19.000Z | test/unit/mysql_class/fetch_global_var.py | deepcoder42/mysql-lib | d3d2459e0476fdbc4465e1d9389612e58d36fb25 | [
"MIT"
] | null | null | null | test/unit/mysql_class/fetch_global_var.py | deepcoder42/mysql-lib | d3d2459e0476fdbc4465e1d9389612e58d36fb25 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Classification (U)
"""Program: fetch_global_var.py
Description: Unit testing of fetch_global_var in mysql_class.py.
Usage:
test/unit/mysql_class/fetch_global_var.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
# Local
sys.path.append(os.getcwd())
import mysql_class
import version
__version__ = version.__version__
class Server(object):
"""Class: Server
Description: Class stub holder for Server class.
Methods:
__init__ -> Class initialization.
sql -> Stub holder for Server.sql method.
"""
def __init__(self):
"""Method: __init__
Description: Class initialization.
Arguments:
"""
self.cmd = None
self.var = None
def vert_sql(self, cmd, var):
"""Method: vert_sql
Description: Stub holder for Server.vert_sql method.
Arguments:
(input) cmd -> Query command.
(input) var -> Global variable name.
"""
self.cmd = cmd
self.var = var
return True
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp -> Initialize testing environment.
test_fetch_global_var -> Test fetch_global_var function.
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.server = Server()
def test_fetch_global_var(self):
"""Function: test_fetch_global_var
Description: Test fetch_global_var function.
Arguments:
"""
self.assertTrue(mysql_class.fetch_global_var(self.server, "Variable"))
if __name__ == "__main__":
unittest.main()
| 16.661017 | 78 | 0.620549 |
4a266a4aa2f2e727fa4178292e36bee6e400ea04 | 2,653 | py | Python | gdal/swig/python/samples/classify.py | jpapadakis/gdal | f07aa15fd65af36b04291303cc6834c87f662814 | [
"MIT"
] | 3,100 | 2015-01-02T10:33:40.000Z | 2022-03-31T02:06:51.000Z | gdal/swig/python/samples/classify.py | jpapadakis/gdal | f07aa15fd65af36b04291303cc6834c87f662814 | [
"MIT"
] | 3,496 | 2015-01-06T16:53:30.000Z | 2022-03-31T20:18:51.000Z | gdal/swig/python/samples/classify.py | jpapadakis/gdal | f07aa15fd65af36b04291303cc6834c87f662814 | [
"MIT"
] | 2,036 | 2015-01-08T20:22:12.000Z | 2022-03-31T10:24:08.000Z | #!/usr/bin/env python3
# ******************************************************************************
#
# Project: GDAL
# Purpose: Example doing range based classification
# Author: Frank Warmerdam, [email protected]
#
# ******************************************************************************
# Copyright (c) 2008, Frank Warmerdam <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
import sys
import numpy as np
from osgeo import gdal, gdal_array
def doit(src_filename, dst_filename):
class_defs = [(1, 10, 20),
(2, 20, 30),
(3, 128, 255)]
src_ds = gdal.Open(src_filename)
xsize = src_ds.RasterXSize
ysize = src_ds.RasterYSize
src_image = gdal_array.LoadFile(src_filename)
dst_image = np.zeros((ysize, xsize))
for class_info in class_defs:
class_id = class_info[0]
class_start = class_info[1]
class_end = class_info[2]
class_value = np.ones((ysize, xsize)) * class_id
mask = np.bitwise_and(
np.greater_equal(src_image, class_start),
np.less_equal(src_image, class_end))
dst_image = np.choose(mask, (dst_image, class_value))
gdal_array.SaveArray(dst_image, dst_filename)
def main(argv):
src_filename = 'utm.tif'
dst_filename = 'classes.tif'
if len(argv) > 1:
src_filename = argv[1]
if len(argv) > 2:
dst_filename = argv[2]
return doit(src_filename, dst_filename)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 34.454545 | 80 | 0.634376 |
4a266b65476d12f075878e9efbfbcf37fcb76ba4 | 2,722 | py | Python | mangopi/metasite.py | BFTeck/mangopi | 8598c8b35c38f9bf2a0880c93af5c1d3ae5728be | [
"MIT"
] | 24 | 2015-01-03T00:47:06.000Z | 2020-11-27T14:58:32.000Z | mangopi/metasite.py | BFTeck/mangopi | 8598c8b35c38f9bf2a0880c93af5c1d3ae5728be | [
"MIT"
] | 4 | 2015-03-14T14:00:21.000Z | 2020-12-30T07:15:20.000Z | mangopi/metasite.py | BFTeck/mangopi | 8598c8b35c38f9bf2a0880c93af5c1d3ae5728be | [
"MIT"
] | 5 | 2015-02-04T00:44:08.000Z | 2018-08-13T21:59:47.000Z | from collections import OrderedDict
from underscore import _
from mangopi.helper.decorators import memoize
from mangopi.helper.util import Util
class MetaSite(object):
def __init__(self, modules=[], options={}):
self.modules = modules
self.options = options
@memoize
def series(self, name):
"""
:type name: str
:rtype: MetaSeries
"""
return MetaSite.MetaSeries(self, name)
class MetaSeries(object):
def __init__(self, site, name):
"""
:type site: MetaSite
:type name: str
"""
self.site = site
self.name = name
@property
@memoize
def chapters(self):
"""
:rtype: OrderedDict of (str, MetaChapter)
"""
all_chapters = _.flatten([
site.series(self.name).chapters for site in self.site.modules
])
chapter_map = OrderedDict(
Util.natural_sort(
_.groupBy(all_chapters, lambda chapter, index: chapter.chapter).items(),
key=lambda t: t[0]
)
)
return OrderedDict(
(chapter, MetaSite.MetaChapter(self, chapter, choices)) for chapter, choices in
chapter_map.items())
class MetaChapter(object):
def __init__(self, series, chapter, choices):
"""
:type series: MetaSeries
:type chapter: str
:type choices: list of site.mangasite.MangaSite.Chapter
"""
self.series = series
self.chapter = chapter
self.choices = choices
@property
@memoize
def title(self):
"""
:rtype: str
"""
return (_(self.choices).chain()
.map(lambda chapter, *args: chapter.title)
.sortBy(lambda title, *args: len(title))
.last().value()
)
@property
@memoize
def first_available_choice(self):
"""
:rtype: site.mangasite.MangaSite.Chapter
"""
return _(self.choices).find(
lambda chapter, *args: (_(chapter.pages).chain()
.map(lambda page, *args: page.image)
.all(lambda image, *args: image is not None).value()
)
)
@property
@memoize
def pages(self):
"""
:rtype: list of site.mangasite.MangaSite.Page
"""
return self.first_available_choice.pages
| 28.652632 | 95 | 0.490816 |
4a266b93125f2d17c5864a04e94f97e47e3221dd | 234 | py | Python | unet_ce_hard_per_im_c13_1.py | pykao/BraTS2018-tumor-segmentation | 6c81ab670f7bd035312f7ccd729776c5c05c47a3 | [
"MIT"
] | 86 | 2018-09-07T08:45:19.000Z | 2022-03-26T18:26:24.000Z | unet_ce_hard_per_im_c13_1.py | pykao/BraTS2018-tumor-segmentation | 6c81ab670f7bd035312f7ccd729776c5c05c47a3 | [
"MIT"
] | 5 | 2018-09-16T07:59:47.000Z | 2020-12-16T07:20:26.000Z | unet_ce_hard_per_im_c13_1.py | pykao/BraTS2018-tumor-segmentation | 6c81ab670f7bd035312f7ccd729776c5c05c47a3 | [
"MIT"
] | 27 | 2018-09-11T05:08:40.000Z | 2021-12-22T16:02:35.000Z | from subprocess import call
model = 'unet_ce_hard_per_im_c13'
seeds = ['_s5042', '_s6437', '_s7859', '_s8074', '_s9829']
for seed in seeds:
cfg_name = model+seed
call(['python', 'train_unet.py', '--gpu', '3', '--cfg', cfg_name])
| 23.4 | 67 | 0.662393 |
4a266c4d635d789ab00624c41443d2e3e227ce7e | 12,165 | py | Python | heat/tests/test_neutron_autoscaling.py | redhat-openstack/heat | 6b9be0a868b857e942c1cc90594d0f3a0d0725d0 | [
"Apache-2.0"
] | null | null | null | heat/tests/test_neutron_autoscaling.py | redhat-openstack/heat | 6b9be0a868b857e942c1cc90594d0f3a0d0725d0 | [
"Apache-2.0"
] | null | null | null | heat/tests/test_neutron_autoscaling.py | redhat-openstack/heat | 6b9be0a868b857e942c1cc90594d0f3a0d0725d0 | [
"Apache-2.0"
] | null | null | null | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from neutronclient.v2_0 import client as neutronclient
import uuid
import mox
from oslo.config import cfg
from heat.common import template_format
from heat.db import api as db_api
from heat.engine.clients.os import glance
from heat.engine.clients.os import nova
from heat.engine import environment
from heat.engine import parser
from heat.engine.resources import instance
from heat.engine import template
from heat.tests.common import HeatTestCase
from heat.tests import utils
from heat.tests.v1_1 import fakes as v1fakes
as_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "AutoScaling Test",
"Parameters" : {
"ImageId": {"Type": "String"},
"KeyName": {"Type": "String"},
"SubnetId": {"Type": "String"}
},
"Resources" : {
"SvrGrp" : {
"Type" : "AWS::AutoScaling::AutoScalingGroup",
"Properties" : {
"AvailabilityZones" : ["nova"],
"LaunchConfigurationName" : { "Ref" : "LaunchConfig" },
"MinSize" : "1",
"MaxSize" : "5",
"DesiredCapacity": "1",
"VPCZoneIdentifier": [ { "Ref": "SubnetId" } ],
"LoadBalancerNames" : [ { "Ref" : "ElasticLoadBalancer" } ]
}
},
"myMonitor": {
"Type": "OS::Neutron::HealthMonitor",
"Properties": {
"type": "HTTP",
"delay": 3,
"max_retries": 5,
"timeout": 10
}
},
"myPool": {
"Type": "OS::Neutron::Pool",
"Properties": {
"description": "Test Pool",
"lb_method": "ROUND_ROBIN",
"monitors": [ { "Ref": "myMonitor" } ],
"name": "Test_Pool",
"protocol": "HTTP",
"subnet_id": { "Ref": "SubnetId" },
"vip": {
"description": "Test VIP",
"connection_limit": 1000,
"address": "10.0.3.121",
"protocol_port": 80,
"name": "test_vip"
}
}
},
"ElasticLoadBalancer" : {
'Type': 'OS::Neutron::LoadBalancer',
'Properties': {
'protocol_port': 8080,
'pool_id': { "Ref": "myPool" }
}
},
"LaunchConfig" : {
"Type" : "AWS::AutoScaling::LaunchConfiguration",
"Properties": {
"ImageId" : {"Ref": "ImageId"},
"InstanceType" : "bar",
}
}
}
}
'''
class AutoScalingTest(HeatTestCase):
params = {'KeyName': 'test', 'ImageId': 'foo'}
def setUp(self):
super(AutoScalingTest, self).setUp()
self.ctx = utils.dummy_context()
self.fc = v1fakes.FakeClient()
cfg.CONF.set_default('heat_waitcondition_server_url',
'http://server.test:8000/v1/waitcondition')
self.stub_keystoneclient()
self.m.StubOutWithMock(neutronclient.Client,
'create_health_monitor')
self.m.StubOutWithMock(neutronclient.Client,
'associate_health_monitor')
self.m.StubOutWithMock(neutronclient.Client, 'create_pool')
self.m.StubOutWithMock(neutronclient.Client, 'create_vip')
self.m.StubOutWithMock(neutronclient.Client, 'show_pool')
self.m.StubOutWithMock(neutronclient.Client, 'show_vip')
self.m.StubOutWithMock(neutronclient.Client, 'create_member')
self.m.StubOutWithMock(neutronclient.Client, 'list_members')
self.m.StubOutWithMock(nova.NovaClientPlugin, 'server_to_ipaddress')
self.m.StubOutWithMock(parser.Stack, 'validate')
self.m.StubOutWithMock(instance.Instance, 'handle_create')
self.m.StubOutWithMock(instance.Instance, 'check_create_complete')
self.m.StubOutWithMock(glance.ImageConstraint, "validate")
def test_lb(self):
tmpl = template_format.parse(as_template)
network_body = {
"network": {
"id": str(uuid.uuid4()),
"name": "testnet",
"admin_state_up": True
}
}
subnet_body = {
"subnet": {
"name": "testsubnet",
"id": str(uuid.uuid4()),
"network_id": network_body['network']['id'],
"ip_version": 4,
"cidr": "10.0.3.0/24",
"allocation_pools": [
{
"start": "10.0.3.20",
"end": "10.0.3.150"
}
],
"gateway_ip": "10.0.3.1"
}
}
self.params["SubnetId"] = subnet_body['subnet']['id']
mon_block = {
'health_monitor': tmpl['Resources']['myMonitor']['Properties']
}
mon_block['health_monitor']['admin_state_up'] = True
mon_ret_block = copy.deepcopy(mon_block)
mon_ret_block['health_monitor']['id'] = str(uuid.uuid4())
mon_ret_block['health_monitor']['status'] = 'ACTIVE'
pool_block = {'pool': {}}
tmp_pool_block = tmpl['Resources']['myPool']['Properties']
for val in ['lb_method', 'protocol', 'name', 'description']:
pool_block['pool'][val] = tmp_pool_block[val]
pool_block['pool']['admin_state_up'] = True
pool_block['pool']['subnet_id'] = self.params['SubnetId']
pool_block['pool']['admin_state_up'] = True
pool_ret_block = copy.deepcopy(pool_block)
pool_ret_block['pool']['id'] = str(uuid.uuid4())
pool_ret_block['pool']['status'] = 'ACTIVE'
tmp_vip_block = tmp_pool_block.pop('vip')
vip_block = {
'vip': {
'protocol': pool_block['pool']['protocol'],
'description': tmp_vip_block['description'],
'admin_state_up': True,
'subnet_id': self.params['SubnetId'],
'connection_limit': tmp_vip_block['connection_limit'],
'pool_id': pool_ret_block['pool']['id'],
'address': tmp_vip_block['address'],
'protocol_port': tmp_vip_block['protocol_port'],
'name': tmp_vip_block['name']
}
}
vip_ret_block = copy.deepcopy(vip_block)
vip_ret_block['vip']['id'] = str(uuid.uuid4())
vip_ret_block['vip']['status'] = 'ACTIVE'
port_block = {
'port': {
'network_id': network_body['network']['id'],
'fixed_ips': [
{
'subnet_id': subnet_body['subnet']['id'],
}
],
'admin_state_up': True
}
}
port_ret_block = copy.deepcopy(port_block)
port_ret_block['port']['id'] = str(uuid.uuid4())
membera_block = {
'member': {
'protocol_port': 8080,
'pool_id': pool_ret_block['pool']['id'],
'address': '1.2.3.4'
}
}
membera_ret_block = copy.deepcopy(membera_block)
membera_ret_block['member']['id'] = str(uuid.uuid4())
memberb_block = {
'member': {
'protocol_port': 8080,
'pool_id': pool_ret_block['pool']['id'],
'address': '1.2.3.5'
}
}
memberb_ret_block = copy.deepcopy(memberb_block)
memberb_ret_block['member']['id'] = str(uuid.uuid4())
memberc_block = {
'member': {
'protocol_port': 8080,
'pool_id': pool_ret_block['pool']['id'],
'address': '1.2.3.6'
}
}
memberc_ret_block = copy.deepcopy(memberc_block)
memberc_ret_block['member']['id'] = str(uuid.uuid4())
class id_type(object):
def __init__(self, id, name):
self.id = id
self.name = name
instances = {}
neutronclient.Client.create_health_monitor(mon_block).\
AndReturn(mon_ret_block)
neutronclient.Client.create_pool(pool_block).\
AndReturn(pool_ret_block)
neutronclient.Client.associate_health_monitor(
pool_ret_block['pool']['id'],
{'health_monitor': {
'id': mon_ret_block['health_monitor']['id']
}}).AndReturn(None)
neutronclient.Client.create_vip(vip_block).\
AndReturn(vip_ret_block)
neutronclient.Client.show_pool(pool_ret_block['pool']['id']).\
AndReturn(pool_ret_block)
neutronclient.Client.show_vip(vip_ret_block['vip']['id']).\
AndReturn(vip_ret_block)
parser.Stack.validate()
instid = str(uuid.uuid4())
instance.Instance.handle_create().AndReturn(instid)
instance.Instance.check_create_complete(mox.IgnoreArg())\
.AndReturn(False)
instance.Instance.check_create_complete(mox.IgnoreArg())\
.AndReturn(True)
glance.ImageConstraint.validate(
mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(True)
nova.NovaClientPlugin.server_to_ipaddress(
mox.IgnoreArg()).AndReturn('1.2.3.4')
neutronclient.Client.create_member(membera_block).\
AndReturn(membera_ret_block)
instances[instid] = membera_ret_block['member']['id']
# Start of update
parser.Stack.validate()
instid = str(uuid.uuid4())
instance.Instance.handle_create().AndReturn(instid)
instance.Instance.check_create_complete(mox.IgnoreArg())\
.AndReturn(False)
instance.Instance.check_create_complete(mox.IgnoreArg())\
.AndReturn(True)
instances[instid] = memberb_ret_block['member']['id']
instid = str(uuid.uuid4())
instance.Instance.handle_create().AndReturn(instid)
instance.Instance.check_create_complete(mox.IgnoreArg())\
.AndReturn(False)
instance.Instance.check_create_complete(mox.IgnoreArg())\
.AndReturn(True)
nova.NovaClientPlugin.server_to_ipaddress(
mox.IgnoreArg()).AndReturn('1.2.3.5')
neutronclient.Client.create_member(memberb_block).\
AndReturn(memberb_ret_block)
nova.NovaClientPlugin.server_to_ipaddress(
mox.IgnoreArg()).AndReturn('1.2.3.6')
neutronclient.Client.create_member(memberc_block).\
AndReturn(memberc_ret_block)
self.m.ReplayAll()
# Start of stack create
env = {'parameters': self.params}
tmpl = template_format.parse(as_template)
stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
environment.Environment(env))
stack.store()
stack.create()
self.assertEqual((parser.Stack.CREATE, parser.Stack.COMPLETE),
stack.state)
# Start of stack update
stack2 = parser.Stack.load(self.ctx, stack_id=stack.id)
tmpl2 = copy.deepcopy(tmpl)
tmpl2['Resources']['SvrGrp']['Properties']['DesiredCapacity'] = '3'
update_stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl2),
environment.Environment(env))
stack2.update(update_stack)
self.assertEqual((parser.Stack.UPDATE, parser.Stack.COMPLETE),
stack2.state)
members = db_api.resource_data_get_all(stack['ElasticLoadBalancer'])
self.assertEqual(3, len(members.keys()))
self.m.VerifyAll()
| 34.461756 | 78 | 0.563337 |
4a266c62f676ba2c74b510484819887db52cc18c | 1,275 | py | Python | code/ch_1-DATA_STRUCTURES_AND_ALGORITHMS/16-filtering_sequence_elements/main.py | ordinary-developer/book_python_cookbook_3_ed_d_beazley_b_k_jones | 677c2bcd12c2223e731908114ae081bcbc32077d | [
"MIT"
] | 1 | 2017-05-04T08:23:46.000Z | 2017-05-04T08:23:46.000Z | code/ch_1-DATA_STRUCTURES_AND_ALGORITHMS/16-filtering_sequence_elements/main.py | ordinary-developer/book_python_cookbook_3_ed_d_beazley_b_k_jones | 677c2bcd12c2223e731908114ae081bcbc32077d | [
"MIT"
] | null | null | null | code/ch_1-DATA_STRUCTURES_AND_ALGORITHMS/16-filtering_sequence_elements/main.py | ordinary-developer/book_python_cookbook_3_ed_d_beazley_b_k_jones | 677c2bcd12c2223e731908114ae081bcbc32077d | [
"MIT"
] | null | null | null | def example_1():
mylist = [1, 4, -5, 10, -7, 2, 3, -1]
print([n for n in mylist if n > 0])
print([n for n in mylist if n < 0])
def example_2():
mylist = [1, 4, -5, 10, -7, 2, 3, -1]
positive_numbers = (n for n in mylist if n > 0)
for number in positive_numbers:
print(number)
def example_3():
values = ['1', '2', '-3', '4', 'N/A', '5']
def is_int(value):
try:
x = int(value)
return True
except ValueError:
return False
print(list(filter(is_int, values)))
def example_4():
mylist = [1, 4, -5, 10, -7, 2, 3, -1]
clip_neg = [n if n > 0 else 0 for n in mylist]
clip_pos = [n if n < 0 else 0 for n in mylist]
print(clip_neg)
print(clip_pos)
def example_5():
addresses = [
'5412 N CLARK',
'5148 N CLARK',
'5800 E 58TH',
'2122 N CLARK',
'5645 N RAVENSWOOD',
'1060 W ADDISON',
'4801 N BROADWAY',
'1039 W GRANVILLE'
]
counts = [0, 3, 10, 4, 1, 7, 6, 1]
from itertools import compress
more5 = [n > 5 for n in counts]
print(list(compress(addresses, more5)))
if __name__ == '__main__':
example_1()
example_2()
example_3()
example_4()
example_5()
| 20.238095 | 51 | 0.522353 |
4a266dc46c2aee6f2a4c5e07b7a52309177bdfeb | 2,082 | py | Python | mars/tensor/expressions/arithmetic/rint.py | ChenQuan/mars | 46fc9747e99210cebfabfc2d85bcc8272440d1a3 | [
"Apache-2.0"
] | null | null | null | mars/tensor/expressions/arithmetic/rint.py | ChenQuan/mars | 46fc9747e99210cebfabfc2d85bcc8272440d1a3 | [
"Apache-2.0"
] | null | null | null | mars/tensor/expressions/arithmetic/rint.py | ChenQuan/mars | 46fc9747e99210cebfabfc2d85bcc8272440d1a3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .... import operands
from ..utils import infer_dtype
from .core import TensorUnaryOp
from .utils import arithmetic_operand
@arithmetic_operand(sparse_mode='unary')
class TensorRint(operands.Rint, TensorUnaryOp):
pass
@infer_dtype(np.rint)
def rint(x, out=None, where=None, **kwargs):
"""
Round elements of the tensor to the nearest integer.
Parameters
----------
x : array_like
Input tensor.
out : Tensor, None, or tuple of Tensor and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated tensor is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
where : array_like, optional
Values of True indicate to calculate the ufunc at that position, values
of False indicate to leave the value in the output alone.
**kwargs
Returns
-------
out : Tensor or scalar
Output array is same shape and type as `x`.
See Also
--------
ceil, floor, trunc
Examples
--------
>>> import mars.tensor as mt
>>> a = mt.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> mt.rint(a).execute()
array([-2., -2., -0., 0., 2., 2., 2.])
"""
op = TensorRint(**kwargs)
return op(x, out=out, where=where)
| 30.617647 | 79 | 0.664745 |
4a266e5b985ff54396048cf5cfc3a53340147958 | 598 | py | Python | apps/02-guess-number-app/you_try/program.py | alexjj/python-jumpstart-course-demos | 962bd8cbe412a385158fc74fb7529825c2b01471 | [
"MIT"
] | null | null | null | apps/02-guess-number-app/you_try/program.py | alexjj/python-jumpstart-course-demos | 962bd8cbe412a385158fc74fb7529825c2b01471 | [
"MIT"
] | null | null | null | apps/02-guess-number-app/you_try/program.py | alexjj/python-jumpstart-course-demos | 962bd8cbe412a385158fc74fb7529825c2b01471 | [
"MIT"
] | null | null | null | import random
print('-' * 32)
print(' Guess a number game')
print('-' * 32)
print()
the_number = random.randint(0, 100)
guess: int = -1
name = input('What is your name? ')
while guess != the_number:
guess_text = input('Guess a number between 0 and 100: ')
guess = int(guess_text)
if guess < the_number:
print('Sorry {1}, Your guess of {0} is too low'.format(guess, name))
elif guess > the_number:
print('Sorry {1}, Your guess of {0} is too high'.format(guess, name))
else:
print('Well done {1}! Your guess of {0} is right!'.format(guess, name))
| 24.916667 | 79 | 0.620401 |
4a266f9e74db7e4bff4939db449806378bbd77de | 4,687 | py | Python | basic_samples/SDS/Python/SDSPy/Python3/Dataview.py | hanhossain/OCS-Samples | 6f0f7878e6d9bccc32b6d663446678e070859d14 | [
"Apache-2.0"
] | null | null | null | basic_samples/SDS/Python/SDSPy/Python3/Dataview.py | hanhossain/OCS-Samples | 6f0f7878e6d9bccc32b6d663446678e070859d14 | [
"Apache-2.0"
] | null | null | null | basic_samples/SDS/Python/SDSPy/Python3/Dataview.py | hanhossain/OCS-Samples | 6f0f7878e6d9bccc32b6d663446678e070859d14 | [
"Apache-2.0"
] | null | null | null | # Dataview.py
#
# Copyright (C) 2018 OSIsoft, LLC. All rights reserved.
#
# THIS SOFTWARE CONTAINS CONFIDENTIAL INFORMATION AND TRADE SECRETS OF
# OSIsoft, LLC. USE, DISCLOSURE, OR REPRODUCTION IS PROHIBITED WITHOUT
# THE PRIOR EXPRESS WRITTEN PERMISSION OF OSIsoft, LLC.
#
# RESTRICTED RIGHTS LEGEND
# Use, duplication, or disclosure by the Government is subject to restrictions
# as set forth in subparagraph (c)(1)(ii) of the Rights in Technical Data and
# Computer Software clause at DFARS 252.227.7013
#
# OSIsoft, LLC
# 1600 Alvarado St, San Leandro, CA 94577
import json
from DataviewQuery import DataviewQuery
from DataviewMapping import DataviewMapping
from DataviewIndexConfig import DataviewIndexConfig
from DataviewGroupRule import DataviewGroupRule
class Dataview(object):
"""Sds dataview definition"""
@property
def Id(self):
return self.__id
@Id.setter
def Id(self, id):
self.__id = id
@property
def Name(self):
return self.__name
@Name.setter
def Name(self, name):
self.__name = name
@property
def Description(self):
return self.__description
@Description.setter
def Description(self, description):
self.__description = description
@property
def Queries(self):
return self.__queries
@Queries.setter
def Queries(self, queries):
self.__queries = queries
@property
def Mappings(self):
return self.__mappings
@Mappings.setter
def Mappings(self, mappings):
self.__mappings = mappings
@property
def IndexConfig(self):
return self.__indexConfig
@IndexConfig.setter
def IndexConfig(self, indexConfig):
self.__indexConfig = indexConfig
@property
def IndexDataType(self):
return self.__indexDataType
@IndexDataType.setter
def IndexDataType(self, indexDataType):
self.__indexDataType = indexDataType
@property
def GroupRules(self):
return self.__groupRules
@GroupRules.setter
def GroupRules(self, groupRules):
self.__groupRules = groupRules
def toJson(self):
return json.dumps(self.toDictionary())
def toDictionary(self):
# required properties
dictionary = { 'Id' : self.Id}
dictionary['Queries'] = []
for value in self.Queries:
dictionary['Queries'].append(value.toDictionary())
# optional properties
if hasattr(self, 'Name'):
dictionary['Name'] = self.Name
if hasattr(self, 'Description'):
dictionary['Description'] = self.Description
if hasattr(self, 'Mappings'):
dictionary['Mappings'] = self.Mappings.toDictionary()
if hasattr(self, 'IndexConfig'):
dictionary['IndexConfig'] = self.IndexConfig.toDictionary()
if hasattr(self, 'IndexDataType'):
dictionary['IndexDataType'] = self.IndexDataType
if hasattr(self, 'GroupRules'):
dictionary['GroupRules'] = []
for value in self.GroupRules:
dictionary['GroupRules'].append(value.toDictionary())
return dictionary
@staticmethod
def fromJson(jsonObj):
return Dataview.fromDictionary(jsonObj)
@staticmethod
def fromDictionary(content):
dataview = Dataview()
if len(content) == 0:
return dataview
if 'Id' in content:
dataview.Id = content['Id']
if 'Name' in content:
dataview.Name = content['Name']
if 'Description' in content:
dataview.Description = content['Description']
if 'Queries' in content:
queries = content['Queries']
if queries is not None and len(queries) > 0:
dataview.Queries = []
for value in queries:
dataview.Queries.append(DataviewQuery.fromDictionary(value))
if 'Mappings' in content:
dataview.Mappings = DataviewMapping.fromDictionary(content['Mappings'])
if 'IndexConfig' in content:
dataview.IndexConfig = DataviewIndexConfig.fromDictionary(content['IndexConfig'])
if 'IndexDataType' in content:
dataview.IndexDataType = content['IndexDataType']
if 'GroupRules' in content:
groupRules = content['GroupRules']
if groupRules is not None and len(groupRules) > 0:
dataview.GroupRules = []
for value in groupRules:
dataview.GroupRules.append(DataviewGroupRule.fromDictionary(value))
return dataview
| 28.406061 | 93 | 0.632814 |
4a26708b266dcd2768cef363b9ab044bf3cc0cb4 | 9,174 | py | Python | sdk/python/pulumi_azure/kusto/database_principal.py | davidobrien1985/pulumi-azure | 811beeea473bd798d77354521266a87a2fac5888 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/kusto/database_principal.py | davidobrien1985/pulumi-azure | 811beeea473bd798d77354521266a87a2fac5888 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/kusto/database_principal.py | davidobrien1985/pulumi-azure | 811beeea473bd798d77354521266a87a2fac5888 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class DatabasePrincipal(pulumi.CustomResource):
app_id: pulumi.Output[str]
"""
The app id, if not empty, of the principal.
"""
client_id: pulumi.Output[str]
"""
The Client ID that owns the specified `object_id`. Changing this forces a new resource to be created.
"""
cluster_name: pulumi.Output[str]
"""
Specifies the name of the Kusto Cluster this database principal will be added to. Changing this forces a new resource to be created.
"""
database_name: pulumi.Output[str]
"""
Specified the name of the Kusto Database this principal will be added to. Changing this forces a new resource to be created.
"""
email: pulumi.Output[str]
"""
The email, if not empty, of the principal.
"""
fully_qualified_name: pulumi.Output[str]
"""
The fully qualified name of the principal.
"""
name: pulumi.Output[str]
"""
The name of the Kusto Database Principal.
"""
object_id: pulumi.Output[str]
"""
An Object ID of a User, Group, or App. Changing this forces a new resource to be created.
"""
resource_group_name: pulumi.Output[str]
"""
Specifies the Resource Group where the Kusto Database Principal should exist. Changing this forces a new resource to be created.
"""
role: pulumi.Output[str]
"""
Specifies the permissions the Principal will have. Valid values include `Admin`, `Ingestor`, `Monitor`, `UnrestrictedViewers`, `User`, `Viewer`. Changing this forces a new resource to be created.
"""
type: pulumi.Output[str]
"""
Specifies the type of object the principal is. Valid values include `App`, `Group`, `User`. Changing this forces a new resource to be created.
"""
def __init__(__self__, resource_name, opts=None, client_id=None, cluster_name=None, database_name=None, object_id=None, resource_group_name=None, role=None, type=None, __props__=None, __name__=None, __opts__=None):
"""
Manages a Kusto (also known as Azure Data Explorer) Database Principal
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] client_id: The Client ID that owns the specified `object_id`. Changing this forces a new resource to be created.
:param pulumi.Input[str] cluster_name: Specifies the name of the Kusto Cluster this database principal will be added to. Changing this forces a new resource to be created.
:param pulumi.Input[str] database_name: Specified the name of the Kusto Database this principal will be added to. Changing this forces a new resource to be created.
:param pulumi.Input[str] object_id: An Object ID of a User, Group, or App. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: Specifies the Resource Group where the Kusto Database Principal should exist. Changing this forces a new resource to be created.
:param pulumi.Input[str] role: Specifies the permissions the Principal will have. Valid values include `Admin`, `Ingestor`, `Monitor`, `UnrestrictedViewers`, `User`, `Viewer`. Changing this forces a new resource to be created.
:param pulumi.Input[str] type: Specifies the type of object the principal is. Valid values include `App`, `Group`, `User`. Changing this forces a new resource to be created.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if client_id is None:
raise TypeError("Missing required property 'client_id'")
__props__['client_id'] = client_id
if cluster_name is None:
raise TypeError("Missing required property 'cluster_name'")
__props__['cluster_name'] = cluster_name
if database_name is None:
raise TypeError("Missing required property 'database_name'")
__props__['database_name'] = database_name
if object_id is None:
raise TypeError("Missing required property 'object_id'")
__props__['object_id'] = object_id
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if role is None:
raise TypeError("Missing required property 'role'")
__props__['role'] = role
if type is None:
raise TypeError("Missing required property 'type'")
__props__['type'] = type
__props__['app_id'] = None
__props__['email'] = None
__props__['fully_qualified_name'] = None
__props__['name'] = None
super(DatabasePrincipal, __self__).__init__(
'azure:kusto/databasePrincipal:DatabasePrincipal',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, app_id=None, client_id=None, cluster_name=None, database_name=None, email=None, fully_qualified_name=None, name=None, object_id=None, resource_group_name=None, role=None, type=None):
"""
Get an existing DatabasePrincipal resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] app_id: The app id, if not empty, of the principal.
:param pulumi.Input[str] client_id: The Client ID that owns the specified `object_id`. Changing this forces a new resource to be created.
:param pulumi.Input[str] cluster_name: Specifies the name of the Kusto Cluster this database principal will be added to. Changing this forces a new resource to be created.
:param pulumi.Input[str] database_name: Specified the name of the Kusto Database this principal will be added to. Changing this forces a new resource to be created.
:param pulumi.Input[str] email: The email, if not empty, of the principal.
:param pulumi.Input[str] fully_qualified_name: The fully qualified name of the principal.
:param pulumi.Input[str] name: The name of the Kusto Database Principal.
:param pulumi.Input[str] object_id: An Object ID of a User, Group, or App. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: Specifies the Resource Group where the Kusto Database Principal should exist. Changing this forces a new resource to be created.
:param pulumi.Input[str] role: Specifies the permissions the Principal will have. Valid values include `Admin`, `Ingestor`, `Monitor`, `UnrestrictedViewers`, `User`, `Viewer`. Changing this forces a new resource to be created.
:param pulumi.Input[str] type: Specifies the type of object the principal is. Valid values include `App`, `Group`, `User`. Changing this forces a new resource to be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["app_id"] = app_id
__props__["client_id"] = client_id
__props__["cluster_name"] = cluster_name
__props__["database_name"] = database_name
__props__["email"] = email
__props__["fully_qualified_name"] = fully_qualified_name
__props__["name"] = name
__props__["object_id"] = object_id
__props__["resource_group_name"] = resource_group_name
__props__["role"] = role
__props__["type"] = type
return DatabasePrincipal(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 55.939024 | 234 | 0.686178 |
4a2670cc49b03f95d2155dc6e7e0b5d4ba67dad7 | 4,925 | py | Python | models/seq2seq-pytorch/run.py | heyLinsir/cotk | 95f2b671779adfe8fb46e82b18e368867a5899bd | [
"Apache-2.0"
] | null | null | null | models/seq2seq-pytorch/run.py | heyLinsir/cotk | 95f2b671779adfe8fb46e82b18e368867a5899bd | [
"Apache-2.0"
] | null | null | null | models/seq2seq-pytorch/run.py | heyLinsir/cotk | 95f2b671779adfe8fb46e82b18e368867a5899bd | [
"Apache-2.0"
] | null | null | null | # coding:utf-8
def run(*argv):
import argparse
import time
from utils import Storage
parser = argparse.ArgumentParser(description='A seq2seq model with GRU encoder and decoder. Attention, beamsearch,\
dropout and batchnorm is supported.')
args = Storage()
parser.add_argument('--name', type=str, default=None,
help='The name of your model, used for tensorboard, etc. Default: runXXXXXX_XXXXXX (initialized by current time)')
parser.add_argument('--restore', type=str, default=None,
help='Checkpoints name to load. \
"NAME_last" for the last checkpoint of model named NAME. "NAME_best" means the best checkpoint. \
You can also use "last" and "best", defaultly use last model you run. \
Attention: "NAME_last" and "NAME_best" are not guaranteed to work when 2 models with same name run in the same time. \
"last" and "best" are not guaranteed to work when 2 models run in the same time.\
Default: None (don\'t load anything)')
parser.add_argument('--mode', type=str, default="train",
help='"train" or "test". Default: train')
parser.add_argument('--eh_size', type=int, default=200,
help='Size of encoder GRU')
parser.add_argument('--dh_size', type=int, default=200,
help='Size of decoder GRU')
parser.add_argument('--droprate', type=float, default=0,
help='The probability to be zerod in dropout. 0 indicates for don\'t use dropout')
parser.add_argument('--batchnorm', action='store_true',
help='Use bathnorm')
parser.add_argument('--decode_mode', type=str, choices=['max', 'sample', 'gumbel', 'samplek', 'beam'], default='beam',
help='The decode strategy when freerun. Choices: max, sample, gumbel(=sample), \
samplek(sample from topk), beam(beamsearch). Default: beam')
parser.add_argument('--top_k', type=int, default=10,
help='The top_k when decode_mode == "beam" or "samplek"')
parser.add_argument('--length_penalty', type=float, default=0.7,
help='The beamsearch penalty for short sentences. The penalty will get larger when this becomes smaller.')
parser.add_argument('--dataset', type=str, default='OpenSubtitles',
help='Dataloader class. Default: OpenSubtitles')
parser.add_argument('--datapath', type=str, default='OpenSubtitles',
help='Directory for data set. Default: OpenSubtitles')
parser.add_argument('--epoch', type=int, default=100,
help="Epoch for trainning. Default: 100")
parser.add_argument('--wvclass', type=str, default='Glove',
help="Wordvector class, none for not using pretrained wordvec. Default: Glove")
parser.add_argument('--wvpath', type=str, default="resources://Glove300d",
help="Directory for pretrained wordvector. Default: resources://Glove300d")
parser.add_argument('--out_dir', type=str, default="./output",
help='Output directory for test output. Default: ./output')
parser.add_argument('--log_dir', type=str, default="./tensorboard",
help='Log directory for tensorboard. Default: ./tensorboard')
parser.add_argument('--model_dir', type=str, default="./model",
help='Checkpoints directory for model. Default: ./model')
parser.add_argument('--cache_dir', type=str, default="./cache",
help='Checkpoints directory for cache. Default: ./cache')
parser.add_argument('--cpu', action="store_true",
help='Use cpu.')
parser.add_argument('--debug', action='store_true',
help='Enter debug mode (using ptvsd).')
parser.add_argument('--cache', action='store_true',
help='Use cache for speeding up load data and wordvec. (It may cause problems when you switch dataset.)')
cargs = parser.parse_args(argv)
# Editing following arguments to bypass command line.
args.name = cargs.name or time.strftime("run%Y%m%d_%H%M%S", time.localtime())
args.restore = cargs.restore
args.mode = cargs.mode
args.dataset = cargs.dataset
args.datapath = cargs.datapath
args.epochs = cargs.epoch
args.wvclass = cargs.wvclass
args.wvpath = cargs.wvpath
args.out_dir = cargs.out_dir
args.log_dir = cargs.log_dir
args.model_dir = cargs.model_dir
args.cache_dir = cargs.cache_dir
args.debug = cargs.debug
args.cache = cargs.cache
args.cuda = not cargs.cpu
# The following arguments are not controlled by command line.
args.restore_optimizer = True
load_exclude_set = []
restoreCallback = None
args.batch_per_epoch = 1500
args.embedding_size = 300
args.eh_size = cargs.eh_size
args.dh_size = cargs.dh_size
args.decode_mode = cargs.decode_mode
args.top_k = cargs.top_k
args.length_penalty = cargs.length_penalty
args.droprate = cargs.droprate
args.batchnorm = cargs.batchnorm
args.lr = 1e-3
args.batch_size = 64
args.batch_num_per_gradient = 4
args.grad_clip = 5
args.show_sample = [0] # show which batch when evaluating at tensorboard
args.max_sent_length = 50
args.checkpoint_steps = 20
args.checkpoint_max_to_keep = 5
import random
random.seed(0)
from main import main
main(args, load_exclude_set, restoreCallback)
if __name__ == '__main__':
import sys
run(*sys.argv[1:])
| 40.368852 | 121 | 0.73665 |
4a2670f2929357b84c72362e5a993a84e853da18 | 2,304 | py | Python | config/urls.py | Michaelwwgo/V2X_Project | d26f476329dd7f6083e9275e01e2748d38918afc | [
"MIT"
] | 1 | 2021-02-03T08:15:59.000Z | 2021-02-03T08:15:59.000Z | config/urls.py | Michaelwwgo/V2X_Project | d26f476329dd7f6083e9275e01e2748d38918afc | [
"MIT"
] | null | null | null | config/urls.py | Michaelwwgo/V2X_Project | d26f476329dd7f6083e9275e01e2748d38918afc | [
"MIT"
] | null | null | null | from django.conf import settings
from django.urls import include, path
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from rest_framework_jwt.views import obtain_jwt_token
urlpatterns = [
path("", TemplateView.as_view(template_name="pages/home.html"), name="home"),
path(
"about/",
TemplateView.as_view(template_name="pages/about.html"),
name="about",
),
# Django Admin, use {% url 'admin:index' %}
path(settings.ADMIN_URL, admin.site.urls),
# User management
path(
"rest-auth/", include('rest_auth.urls')
),
path(
"rest-auth/registration/", include('rest_auth.registration.urls')
),
path(
"users/",
include("v2x_solution.users.urls", namespace="users"),
),
path("accounts/", include("allauth.urls")),
# Your stuff: custom urls includes go here
path(
"roads/",
include("v2x_solution.road.urls", namespace="road"),
),
path(
"events/",
include("v2x_solution.event.urls", namespace="event"),
),
path(
"cars/",
include("v2x_solution.car.urls", namespace="car"),
),
path(
"boards/",
include("v2x_solution.board.urls", namespace="board"),
)
] + static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
| 30.315789 | 85 | 0.613715 |
4a26715dfd6c73c52f1d9e51f3c67b812864b3d1 | 1,796 | py | Python | JuckPae.py | AINukeHere/SCBot-DiscordBot | 821997f19dd0affc3a65aa56f23696496803dd13 | [
"MIT"
] | null | null | null | JuckPae.py | AINukeHere/SCBot-DiscordBot | 821997f19dd0affc3a65aa56f23696496803dd13 | [
"MIT"
] | null | null | null | JuckPae.py | AINukeHere/SCBot-DiscordBot | 821997f19dd0affc3a65aa56f23696496803dd13 | [
"MIT"
] | null | null | null | CommentDict={
'기사단장':[],
'버서커':[],
'아크메이지':[],
'배틀메이지':[],
'스나이퍼':[],
'트랩퍼':[],
'클레릭':[],
'프리스트':[],
'레인저':[],
'런처':[],
'닌자':[],
'어쌔신':[],
'발로그':[],
'디아블로':[],
'메카니즘마스터':[],
'폭발물제조사':[],
'소울브레이커':[],
'대지도자':[],
'버프빌더':[],
'모스트빌더':[],
'네크로맨서':[],
'신앙자':[],
'검신':[],
'자객':[],
'크루세이더':[],
'샤이닝엠페러스':[],
'갓핸드':[],
'파일럿':[],
'광기의살육자':[],
'의문의사나이':[],
}
class Comment():
def __init__(self, author, content):
self.author = author
self.content = content
def getCharNames():
res = ''
for charname in list(CommentDict.keys()):
res += charname+'\n'
return res
def isCharacter(name):
return name in CommentDict
def addComment(name,comment):
CommentDict[name].append(comment)
print('추가됨')
saveComment(name)
def getComments(name):
res = ''
for comment in CommentDict[name]:
res += f'[{comment.author}] : {comment.content}\n'
return res
def saveComment(name):
with open('TeampleRPG Data\\'+name+'.dat','wt',encoding="utf-8") as f:
for comment in CommentDict[name]:
f.writelines(comment.author + '\n')
f.writelines(comment.content + '\n')
def loadAllComment():
for key in list(CommentDict.keys()):
loadComment(key)
def loadComment(name):
try:
with open('TeampleRPG Data\\'+name+'.dat','rt',encoding="utf-8") as f:
data = f.readlines()
print(data,data.__len__())
i = 0
while i < data.__len__():
addComment(name, Comment(data[i].strip(),data[i+1].strip()))
i+=2
except FileNotFoundError:
pass
except:
import traceback
traceback.print_exc() | 22.45 | 78 | 0.508352 |
4a2672176ed1084bb34cac9a25db1646bc262584 | 3,534 | py | Python | find_best_sampled_weight_rec.py | gugarosa/synthetic_rbms | a86c323e165a893810ac5bf79213f603ed86a8b3 | [
"MIT"
] | null | null | null | find_best_sampled_weight_rec.py | gugarosa/synthetic_rbms | a86c323e165a893810ac5bf79213f603ed86a8b3 | [
"MIT"
] | null | null | null | find_best_sampled_weight_rec.py | gugarosa/synthetic_rbms | a86c323e165a893810ac5bf79213f603ed86a8b3 | [
"MIT"
] | null | null | null | import argparse
import numpy as np
import torch
import utils.stream as s
def get_arguments():
"""Gets arguments from the command line.
Returns:
A parser with the input arguments.
"""
# Creates the ArgumentParser
parser = argparse.ArgumentParser(
usage='Finds the best sampled weight by reconstructing an RBM over a validation set with original and sampled weights.')
# Adds a dataset argument with pre-defined choices
parser.add_argument('dataset', help='Dataset identifier', choices=[
'mnist', 'fmnist', 'kmnist'])
# Adds an identifier argument to the desired pre-trained model path
parser.add_argument(
'input_model', help='Input name for the pre-trained RBM', type=str)
# Adds an identifier argument to the desired weights file
parser.add_argument(
'input_weight', help='Input name for the weight file', type=str)
# Adds an identifier argument to the desired sampled weights file
parser.add_argument(
'input_sampled', help='Input name for the sampled weight file', type=str)
return parser.parse_args()
if __name__ == '__main__':
# Gathers the input arguments
args = get_arguments()
# Gathering variables from arguments
dataset = args.dataset
input_model = args.input_model
input_weight = args.input_weight
input_sampled = args.input_sampled
# Loads the validation data
_, val, _ = s.load_dataset(name=dataset)
# Loads the pre-trained model
model = torch.load(f'models/{input_model}.pth')
# Loading original and sampled weights
W = np.load(f'weights/{input_weight}.npy')
W_sampled = np.load(f'weights/{input_sampled}.npy')
# Reshaping weights to correct dimension
W = np.reshape(W, [model.n_visible, model.n_hidden])
W_sampled = np.reshape(W_sampled, [W_sampled.shape[0], model.n_visible, model.n_hidden])
# Resetting biases for fair comparison
model.a = torch.nn.Parameter(torch.zeros(model.n_visible))
model.b = torch.nn.Parameter(torch.zeros(model.n_hidden))
# Applying original weights
model.W = torch.nn.Parameter(torch.from_numpy(W))
# Checking model device type
if model.device == 'cuda':
# Applying its parameters as cuda again
model = model.cuda()
# Reconstructs the original RBM
original_mse, _ = model.reconstruct(val)
# Defining best sampled MSE as a high value
best_sampled_mse = 9999999
# Iterating over all possible epochs
for e in range(W_sampled.shape[0]):
print(f'Weights from GAN epoch {e+1}/{W_sampled.shape[0]}')
# Resetting biases for fair comparison
model.a = torch.nn.Parameter(torch.zeros(model.n_visible))
model.b = torch.nn.Parameter(torch.zeros(model.n_hidden))
# Applying sampled weights
model.W = torch.nn.Parameter(torch.from_numpy(W_sampled[e]))
# Checking model device type
if model.device == 'cuda':
# Applying its parameters as cuda again
model = model.cuda()
# Reconstructs an RBM
sampled_mse, _ = model.reconstruct(val)
# Checking if current sampled MSE was better than previous one
if sampled_mse < best_sampled_mse:
# Saving best MSE and best epoch values
best_sampled_mse, best_epoch = sampled_mse, e
print(f'Validation finished and best RBM found.')
print(f'Original MSE: {original_mse} | Best Sampled MSE: {best_sampled_mse} | Epoch: {best_epoch+1}')
| 32.722222 | 128 | 0.680249 |
4a26725c918112afe7a3713fbc27417d2030184f | 5,331 | py | Python | core/match.py | yycho0108/monovo | 9f2b5cf15f97e467c8e6e94ee16bb785ed6c7edd | [
"MIT"
] | null | null | null | core/match.py | yycho0108/monovo | 9f2b5cf15f97e467c8e6e94ee16bb785ed6c7edd | [
"MIT"
] | null | null | null | core/match.py | yycho0108/monovo | 9f2b5cf15f97e467c8e6e94ee16bb785ed6c7edd | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from utils import vmath as M
import viz as V
import time
class Matcher(object):
PRESET_HARD=dict(
lowe=0.75,
maxd=32.0,
cross=True,
fold=True
)
PRESET_SOFT=dict(
lowe=1.0,
maxd=128.0,
cross=False,
fold=True
)
def __init__(self, des):
# define un-exported enums from OpenCV
FLANN_INDEX_LINEAR = 0
FLANN_INDEX_KDTREE = 1
FLANN_INDEX_KMEANS = 2
FLANN_INDEX_COMPOSITE = 3
FLANN_INDEX_KDTREE_SINGLE = 4
FLANN_INDEX_HIERARCHICAL = 5
FLANN_INDEX_LSH = 6
FLANN_INDEX_SAVED = 254
FLANN_INDEX_AUTOTUNED = 255
# TODO : figure out what to set for
# search_params
search_params = dict(checks=50)
# or pass empty dictionary
# build flann matcher
self.des_t_ = (np.uint8 if des.descriptorType() == cv2.CV_8U
else np.float32)
#if isinstance(des, cv2.ORB) or isinstance(des, cv2.AKAZE):
if self.des_t_ == np.uint8:
# probably hamming
# ~HAMMING
index_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 12,#6, # 12
key_size = 20,#12, # 20
multi_probe_level = 2) #2
flann = cv2.FlannBasedMatcher(index_params,search_params)
self.match_ = flann
#bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
#self.match_ = bf
else:
# Euclidean
index_params = dict(
algorithm = FLANN_INDEX_KDTREE,
trees = 5)
flann = cv2.FlannBasedMatcher(index_params,search_params)
self.match_ = flann
def match(self, a, b):
""" search k matches in b for a """
return self.match_.knnMatch(
self.des_t_(a), self.des_t_(b), k=2)
def filter(self, match, lowe, maxd):
"""
Apply lowe + distance filter.
# TODO : set threshold for lowe's filter
# TODO : set reasonable maxd for GFTT, for instance.
"""
good = []
for e in match:
if not len(e) == 2:
continue
(m, n) = e
if not (m.distance <= lowe * n.distance):
continue
#print m.distance
if not (m.distance <= maxd):
continue
# passed all checks
good.append(m)
return good
def __call__(self, des1, des2,
lowe=0.75,
maxd=64.0,
cross=True,
fold=True
):
# soft fail in case des1/des2 is empty
if len(des1) <= 0 or len(des2) <= 0:
return np.int32([]), np.int32([])
i1, i2 = None, None
if cross:
# check bidirectional
i1_ab, i2_ab = self(des1, des2,
lowe, maxd, cross=False)
if fold:
# opt1 : apply match on pre-filtered data ( faster )
i2_ba, i1_ba = self(des2[i2_ab], des1[i1_ab],
lowe, maxd, cross=False)
i1, i2 = i1_ab[i1_ba], i2_ab[i2_ba]
else:
# opt2 : apply the same operation reversed ( slower, maybe more robust ?? )
i2_ba, i1_ba = self(des2, des1,
lowe, maxd, cross=False)
m1 = np.stack([i1_ab, i2_ab], axis=-1)
m2 = np.stack([i1_ba, i2_ba], axis=-1)
m = M.intersect2d(m1, m2)
i1, i2 = m.T
else:
# check unidirectional (des1->des2)
if len(des1) < 2:
# insufficient # of descriptors
return np.int32([]), np.int32([])
match = self.match(des1, des2)
match = self.filter(match, lowe, maxd)
# extract indices
i1, i2 = np.int32([
(m.queryIdx, m.trainIdx) for m in match
]).reshape(-1,2).T
return i1, i2
def main():
np.random.seed( 0 )
orb = cv2.ORB_create(
nfeatures=1024,
scaleFactor=1.2,
nlevels=8,
# NOTE : scoretype here influences response-based filters.
scoreType=cv2.ORB_FAST_SCORE,
#scoreType=cv2.ORB_HARRIS_SCORE,
)
match = Matcher(orb)
img1 = np.random.randint(0, 255, size=(6,8,3), dtype=np.uint8)
img1 = cv2.resize(img1, (640,480), interpolation=cv2.INTER_NEAREST)
#img2 = np.random.randint(0, 255, size=(480,640,3), dtype=np.uint8)
roll_i = np.random.randint( 64 )
roll_j = np.random.randint( 64 )
img2 = np.roll(img1, roll_i, axis=0)
img2 = np.roll(img2, roll_j, axis=1)
kpt1, des1 = orb.detectAndCompute(img1, None)
kpt2, des2 = orb.detectAndCompute(img2, None)
pt1 = cv2.KeyPoint.convert(kpt1)
pt2 = cv2.KeyPoint.convert(kpt2)
i1, i2 = match(des1, des2)
mim = V.draw_matches(img1, img2,
pt1[i1], pt2[i2])
d = (pt1[i1] - pt2[i2]) + (roll_j, roll_i)
tol = 5.0
msk = (np.linalg.norm(d, axis=-1) < tol)
print msk.sum(), msk.size
cv2.imshow('win', mim)
cv2.waitKey(0)
if __name__ == "__main__":
main()
| 30.994186 | 91 | 0.516789 |
4a26738fff337115f0b116b5960b1cff27985620 | 6,920 | py | Python | tfx/orchestration/beam/beam_dag_runner.py | jkim1014/tfx-async | efb007cc8fb884f600b3f7ad2b43268058015ce7 | [
"Apache-2.0"
] | null | null | null | tfx/orchestration/beam/beam_dag_runner.py | jkim1014/tfx-async | efb007cc8fb884f600b3f7ad2b43268058015ce7 | [
"Apache-2.0"
] | null | null | null | tfx/orchestration/beam/beam_dag_runner.py | jkim1014/tfx-async | efb007cc8fb884f600b3f7ad2b43268058015ce7 | [
"Apache-2.0"
] | null | null | null | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of Beam TFX runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from typing import Any, Iterable, List, Optional, Text, Type
import absl
import apache_beam as beam
from tfx.components.base import base_node
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration import tfx_runner
from tfx.orchestration.config import base_component_config
from tfx.orchestration.config import config_utils
from tfx.orchestration.config import pipeline_config
from tfx.orchestration.launcher import base_component_launcher
from tfx.orchestration.launcher import docker_component_launcher
from tfx.orchestration.launcher import looped_component_launcher
from tfx.orchestration.launcher import base_component_launcher_2
from tfx.utils import telemetry_utils
# TODO(jyzhao): confirm it's re-executable, add test case.
@beam.typehints.with_input_types(Any)
@beam.typehints.with_output_types(Any)
class _ComponentAsDoFn(beam.DoFn):
"""Wrap component as beam DoFn."""
def __init__(self, component: base_node.BaseNode,
component_launcher_class: Type[
base_component_launcher_2.BaseComponentLauncher2],
component_config: base_component_config.BaseComponentConfig,
tfx_pipeline: pipeline.Pipeline):
"""Initialize the _ComponentAsDoFn.
Args:
component: Component that to be executed.
component_launcher_class: The class of the launcher to launch the
component.
component_config: component config to launch the component.
tfx_pipeline: Logical pipeline that contains pipeline related information.
"""
driver_args = data_types.DriverArgs(enable_cache=tfx_pipeline.enable_cache)
metadata_connection = metadata.Metadata(
tfx_pipeline.metadata_connection_config)
self._component_launcher = component_launcher_class.create(
component=component,
pipeline_info=tfx_pipeline.pipeline_info,
driver_args=driver_args,
metadata_connection=metadata_connection,
beam_pipeline_args=tfx_pipeline.beam_pipeline_args,
additional_pipeline_args=tfx_pipeline.additional_pipeline_args,
component_config=component_config)
self._component_id = component.id
def process(self, element: Any, *signals: Iterable[Any]) -> None:
"""Executes component based on signals.
Args:
element: a signal element to trigger the component.
*signals: side input signals indicate completeness of upstream components.
"""
for signal in signals:
assert not list(signal), 'Signal PCollection should be empty.'
self._run_component()
def _run_component(self) -> None:
absl.logging.info('Component %s is running.', self._component_id)
self._component_launcher.launch()
absl.logging.info('Component %s is finished.', self._component_id)
class BeamDagRunner(tfx_runner.TfxRunner):
"""Tfx runner on Beam."""
def __init__(self,
beam_orchestrator_args: Optional[List[Text]] = None,
config: Optional[pipeline_config.PipelineConfig] = None):
"""Initializes BeamDagRunner as a TFX orchestrator.
Args:
beam_orchestrator_args: beam args for the beam orchestrator. Note that
this is different from the beam_pipeline_args within
additional_pipeline_args, which is for beam pipelines in components.
config: Optional pipeline config for customizing the launching of each
component. Defaults to pipeline config that supports
InProcessComponentLauncher and DockerComponentLauncher.
"""
if config is None:
config = pipeline_config.PipelineConfig(
supported_launcher_classes=[
looped_component_launcher.LoopedComponentLauncher,
docker_component_launcher.DockerComponentLauncher,
],
)
super(BeamDagRunner, self).__init__(config)
self._beam_orchestrator_args = beam_orchestrator_args
def run(self, tfx_pipeline: pipeline.Pipeline) -> None:
"""Deploys given logical pipeline on Beam.
Args:
tfx_pipeline: Logical pipeline containing pipeline args and components.
"""
# For CLI, while creating or updating pipeline, pipeline_args are extracted
# and hence we avoid deploying the pipeline.
if 'TFX_JSON_EXPORT_PIPELINE_ARGS_PATH' in os.environ:
return
tfx_pipeline.pipeline_info.run_id = datetime.datetime.now().isoformat()
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_RUNNER: 'beam'}):
with beam.Pipeline(argv=self._beam_orchestrator_args) as p:
# Uses for triggering the component DoFns.
root = p | 'CreateRoot' >> beam.Create([None])
# Stores mapping of component to its signal.
signal_map = {}
# pipeline.components are in topological order.
for component in tfx_pipeline.components:
component_id = component.id
# Signals from upstream components.
signals_to_wait = []
if component.upstream_nodes:
for upstream_node in component.upstream_nodes:
assert upstream_node in signal_map, ('Components is not in '
'topological order')
signals_to_wait.append(signal_map[upstream_node])
absl.logging.info('Component %s depends on %s.', component_id,
[s.producer.full_label for s in signals_to_wait])
(component_launcher_class,
component_config) = config_utils.find_component_launch_info(
self._config, component)
# Each signal is an empty PCollection. AsIter ensures component will
# be triggered after upstream components are finished.
signal_map[component] = (
root
| 'Run[%s]' % component_id >> beam.ParDo(
_ComponentAsDoFn(component, component_launcher_class,
component_config, tfx_pipeline),
*[beam.pvalue.AsIter(s) for s in signals_to_wait]))
absl.logging.info('Component %s is scheduled.', component_id)
| 41.437126 | 80 | 0.717775 |
4a2674c51c61c1c312a453f374bf295b0c814999 | 14,995 | py | Python | synapse/models/telco.py | vishalbelsare/synapse | c0f0d318cc5d3098b3a8d80222e2b0b1d19c5740 | [
"Apache-2.0"
] | null | null | null | synapse/models/telco.py | vishalbelsare/synapse | c0f0d318cc5d3098b3a8d80222e2b0b1d19c5740 | [
"Apache-2.0"
] | null | null | null | synapse/models/telco.py | vishalbelsare/synapse | c0f0d318cc5d3098b3a8d80222e2b0b1d19c5740 | [
"Apache-2.0"
] | null | null | null | import logging
import synapse.exc as s_exc
import synapse.lib.types as s_types
import synapse.lib.module as s_module
import synapse.lookup.phonenum as s_l_phone
logger = logging.getLogger(__name__)
def digits(text):
return ''.join([c for c in text if c.isdigit()])
def chop_imei(imei):
valu = int(imei)
tac = int(imei[0:8])
snr = int(imei[8:14])
cd = int(imei[14:15])
return valu, {'subs': {'tac': tac, 'serial': snr, 'cd': cd}}
class Phone(s_types.Str):
def postTypeInit(self):
s_types.Str.postTypeInit(self)
self.opts['globsuffix'] = True
self.setNormFunc(str, self._normPyStr)
self.setNormFunc(int, self._normPyInt)
def _normPyStr(self, valu):
digs = digits(valu)
if not digs:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='requires a digit string')
subs = {}
try:
info = s_l_phone.getPhoneInfo(int(digs))
except Exception as e: # pragma: no cover
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='Failed to get phone info') from None
cc = info.get('cc')
if cc is not None:
subs['loc'] = cc
# TODO prefix based validation?
return digs, {'subs': subs}
def _normPyInt(self, valu):
if valu < 1:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='phone int must be greater than 0')
return self._normPyStr(str(valu))
def repr(self, valu):
# XXX geo-aware reprs are practically a function of cc which
# XXX the raw value may only have after doing a s_l_phone lookup
if valu[0] == '1' and len(valu) == 11:
area = valu[1:4]
pref = valu[4:7]
numb = valu[7:11]
return '+1 (%s) %s-%s' % (area, pref, numb)
return '+' + valu
def imeicsum(text):
'''
Calculate the imei check byte.
'''
digs = []
for i in range(14):
v = int(text[i])
if i % 2:
v *= 2
[digs.append(int(x)) for x in str(v)]
chek = 0
valu = sum(digs)
remd = valu % 10
if remd != 0:
chek = 10 - remd
return str(chek)
class Imsi(s_types.Int):
def postTypeInit(self):
self.opts['size'] = 8
self.opts['signed'] = False
return s_types.Int.postTypeInit(self)
def _normPyInt(self, valu):
imsi = str(valu)
ilen = len(imsi)
if ilen > 15:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='invalid imsi len: %d' % (ilen,))
mcc = imsi[0:3]
# TODO full imsi analysis tree
return valu, {'subs': {'mcc': mcc}}
# TODO: support pre 2004 "old" imei format
class Imei(s_types.Int):
def postTypeInit(self):
self.opts['size'] = 8
self.opts['signed'] = False
return s_types.Int.postTypeInit(self)
def _normPyInt(self, valu):
imei = str(valu)
ilen = len(imei)
# we are missing our optional check digit
# lets add it for consistency...
if ilen == 14:
imei += imeicsum(imei)
return chop_imei(imei)
# if we *have* our check digit, lets check it
elif ilen == 15:
if imeicsum(imei) != imei[-1]:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='invalid imei checksum byte')
return chop_imei(imei)
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='Failed to norm IMEI')
class TelcoModule(s_module.CoreModule):
def getModelDefs(self):
modl = {
'ctors': (
('tel:mob:imei', 'synapse.models.telco.Imei', {}, {
'ex': '490154203237518',
'doc': 'An International Mobile Equipment Id.'}),
('tel:mob:imsi', 'synapse.models.telco.Imsi', {}, {
'ex': '310150123456789',
'doc': 'An International Mobile Subscriber Id.'}),
('tel:phone', 'synapse.models.telco.Phone', {}, {
'ex': '+15558675309',
'doc': 'A phone number.'}),
),
'types': (
('tel:call', ('guid', {}), {
'doc': 'A guid for a telephone call record.'}),
('tel:txtmesg', ('guid', {}), {
'doc': 'A guid for an individual text message.'}),
('tel:mob:tac', ('int', {}), {
'ex': '49015420',
'doc': 'A mobile Type Allocation Code.'}),
('tel:mob:imid', ('comp', {'fields': (('imei', 'tel:mob:imei'), ('imsi', 'tel:mob:imsi'))}), {
'ex': '(490154203237518, 310150123456789)',
'doc': 'Fused knowledge of an IMEI/IMSI used together.'}),
('tel:mob:imsiphone', ('comp', {'fields': (('imsi', 'tel:mob:imsi'), ('phone', 'tel:phone'))}), {
'ex': '(310150123456789, "+7(495) 124-59-83")',
'doc': 'Fused knowledge of an IMSI assigned phone number.'}),
('tel:mob:telem', ('guid', {}), {
'doc': 'A single mobile telemetry measurement.'}),
('tel:mob:mcc', ('str', {'regex': '^[0-9]{3}$', 'strip': 1}), {
'doc': 'ITU Mobile Country Code.',
}),
('tel:mob:mnc', ('str', {'regex': '^[0-9]{2,3}$', 'strip': 1}), {
'doc': 'ITU Mobile Network Code.',
}),
('tel:mob:carrier', ('comp', {'fields': (('mcc', 'tel:mob:mcc'), ('mnc', 'tel:mob:mnc'))}), {
'doc': 'The fusion of a MCC/MNC.'
}),
('tel:mob:cell', ('comp', {'fields': (('carrier', 'tel:mob:carrier'),
('lac', ('int', {})),
('cid', ('int', {})))}), {
'doc': 'A mobile cell site which a phone may connect to.'
}),
),
'forms': (
('tel:phone', {}, (
('loc', ('loc', {}), {
'doc': 'The location associated with the number.',
}),
)),
('tel:call', {}, (
('src', ('tel:phone', {}), {
'doc': 'The source phone number for a call.'
}),
('dst', ('tel:phone', {}), {
'doc': 'The destination phone number for a call.'
}),
('time', ('time', {}), {
'doc': 'The time the call was initiated.'
}),
('duration', ('int', {}), {
'doc': 'The duration of the call in seconds.'
}),
('connected', ('bool', {}), {
'doc': 'Indicator of whether the call was connected.',
}),
('text', ('str', {}), {
'doc': 'The text transcription of the call.',
'disp': {'hint': 'text'},
}),
('file', ('file:bytes', {}), {
'doc': 'A file containing related media.',
}),
)),
('tel:txtmesg', {}, (
('from', ('tel:phone', {}), {
'doc': 'The phone number assigned to the sender.'
}),
('to', ('tel:phone', {}), {
'doc': 'The phone number assigned to the primary recipient.'
}),
('recipients', ('array', {'type': 'tel:phone', 'uniq': True, 'sorted': True}), {
'doc': 'An array of phone numbers for additional recipients of the message.',
}),
('svctype', ('str', {'enums': 'sms,mms,rcs', 'strip': 1, 'lower': 1}), {
'doc': 'The message service type (sms, mms, rcs).',
}),
('time', ('time', {}), {
'doc': 'The time the message was sent.'
}),
('text', ('str', {}), {
'doc': 'The text of the message.',
'disp': {'hint': 'text'},
}),
('file', ('file:bytes', {}), {
'doc': 'A file containing related media.',
}),
)),
('tel:mob:tac', {}, (
('org', ('ou:org', {}), {
'doc': 'The org guid for the manufacturer.',
}),
('manu', ('str', {'lower': 1}), {
'doc': 'The TAC manufacturer name.',
}),
('model', ('str', {'lower': 1}), {
'doc': 'The TAC model name.',
}),
('internal', ('str', {'lower': 1}), {
'doc': 'The TAC internal model name.',
}),
)),
('tel:mob:imei', {}, (
('tac', ('tel:mob:tac', {}), {
'ro': True,
'doc': 'The Type Allocate Code within the IMEI.'
}),
('serial', ('int', {}), {
'ro': True,
'doc': 'The serial number within the IMEI.',
})
)),
('tel:mob:imsi', {}, (
('mcc', ('tel:mob:mcc', {}), {
'ro': True,
'doc': 'The Mobile Country Code.',
}),
)),
('tel:mob:imid', {}, (
('imei', ('tel:mob:imei', {}), {
'ro': True,
'doc': 'The IMEI for the phone hardware.'
}),
('imsi', ('tel:mob:imsi', {}), {
'ro': True,
'doc': 'The IMSI for the phone subscriber.'
}),
)),
('tel:mob:imsiphone', {}, (
('phone', ('tel:phone', {}), {
'ro': True,
'doc': 'The phone number assigned to the IMSI.'
}),
('imsi', ('tel:mob:imsi', {}), {
'ro': True,
'doc': 'The IMSI with the assigned phone number.'
}),
)),
('tel:mob:mcc', {}, (
('loc', ('loc', {}), {'doc': 'Location assigned to the MCC.'}),
)),
('tel:mob:carrier', {}, (
('mcc', ('tel:mob:mcc', {}), {
'ro': True,
}),
('mnc', ('tel:mob:mnc', {}), {
'ro': True,
}),
('org', ('ou:org', {}), {
'doc': 'Organization operating the carrier.'
}),
('loc', ('loc', {}), {
'doc': 'Location the carrier operates from.'
}),
)),
('tel:mob:cell', {}, (
('carrier', ('tel:mob:carrier', {}), {'doc': 'Mobile carrier.'}),
('carrier:mcc', ('tel:mob:mcc', {}), {'doc': 'Mobile Country Code.'}),
('carrier:mnc', ('tel:mob:mnc', {}), {'doc': 'Mobile Network Code.'}),
('lac', ('int', {}), {'doc': 'Location Area Code. LTE networks may call this a TAC.'}),
('cid', ('int', {}), {'doc': 'The Cell ID.'}),
('radio', ('str', {'lower': 1, 'onespace': 1}), {'doc': 'Cell radio type.'}),
('latlong', ('geo:latlong', {}), {'doc': 'Last known location of the cell site.'}),
('loc', ('loc', {}), {
'doc': 'Location at which the cell is operated.'}),
('place', ('geo:place', {}), {
'doc': 'The place associated with the latlong property.'}),
)),
('tel:mob:telem', {}, (
('time', ('time', {}), {}),
('latlong', ('geo:latlong', {}), {}),
('http:request', ('inet:http:request', {}), {
'doc': 'The HTTP request that the telemetry was extracted from.',
}),
('host', ('it:host', {}), {
'doc': 'The host that generated the mobile telemetry data.'}),
('place', ('geo:place', {}), {
'doc': 'The place representing the location of the mobile telemetry sample.'}),
('loc', ('loc', {}), {
'doc': 'The geo-political location of the mobile telemetry sample.',
}),
('accuracy', ('geo:dist', {}), {
'doc': 'The reported accuracy of the latlong telemetry reading.',
}),
# telco specific data
('cell', ('tel:mob:cell', {}), {}),
('cell:carrier', ('tel:mob:carrier', {}), {}),
('imsi', ('tel:mob:imsi', {}), {}),
('imei', ('tel:mob:imei', {}), {}),
('phone', ('tel:phone', {}), {}),
# inet protocol addresses
('mac', ('inet:mac', {}), {}),
('ipv4', ('inet:ipv4', {}), {}),
('ipv6', ('inet:ipv6', {}), {}),
('wifi', ('inet:wifi:ap', {}), {}),
('wifi:ssid', ('inet:wifi:ssid', {}), {}),
('wifi:bssid', ('inet:mac', {}), {}),
# host specific data
('adid', ('it:adid', {}), {}),
('aaid', ('it:os:android:aaid', {}), {}),
('idfa', ('it:os:ios:idfa', {}), {}),
# User related data
('name', ('ps:name', {}), {}),
('email', ('inet:email', {}), {}),
('acct', ('inet:web:acct', {}), {}),
# reporting related data
('app', ('it:prod:softver', {}), {}),
('data', ('data', {}), {}),
# any other fields may be refs...
)),
)
}
name = 'tel'
return ((name, modl),)
| 38.350384 | 113 | 0.377659 |
4a267c471e4fa52ecdee9fea097a03087cd2bd55 | 85 | py | Python | preprocessing/exported_midi_chord_recognition/mir/extractors/__init__.py | Dsqvival/hierarchical-structure-analysis | 8ef0b47db1687c97b5a8cd8794b9a344fb1ee13c | [
"MIT"
] | 39 | 2020-12-22T17:55:05.000Z | 2022-03-27T07:03:19.000Z | preprocessing/exported_midi_chord_recognition/mir/extractors/__init__.py | Dsqvival/hierarchical-structure-analysis | 8ef0b47db1687c97b5a8cd8794b9a344fb1ee13c | [
"MIT"
] | 3 | 2021-06-28T16:32:11.000Z | 2021-12-27T10:58:34.000Z | preprocessing/exported_midi_chord_recognition/mir/extractors/__init__.py | Dsqvival/hierarchical-structure-analysis | 8ef0b47db1687c97b5a8cd8794b9a344fb1ee13c | [
"MIT"
] | 4 | 2021-01-03T03:13:26.000Z | 2022-03-04T07:40:51.000Z | from mir.extractors.extractor_base import ExtractorBase
__all__ =['ExtractorBase'] | 28.333333 | 56 | 0.823529 |
4a267c62c9002f9e03b7cea838c7e2e54df31f46 | 6,243 | py | Python | custom_components/aarlo/pyaarlo/cfg.py | PysX/home-assistant-conf | 614c5a67314b6c7b8af4237a918a437b79ab460d | [
"MIT"
] | 28 | 2019-05-31T12:30:15.000Z | 2022-03-10T18:54:57.000Z | custom_components/aarlo/pyaarlo/cfg.py | PysX/home-assistant-conf | 614c5a67314b6c7b8af4237a918a437b79ab460d | [
"MIT"
] | 5 | 2020-08-14T17:43:48.000Z | 2021-01-08T21:12:45.000Z | custom_components/aarlo/pyaarlo/cfg.py | PysX/home-assistant-config | 614c5a67314b6c7b8af4237a918a437b79ab460d | [
"MIT"
] | 2 | 2021-03-31T08:27:19.000Z | 2021-04-30T15:13:24.000Z | from .constant import (
DEFAULT_AUTH_HOST,
DEFAULT_HOST,
PRELOAD_DAYS,
TFA_CONSOLE_SOURCE,
TFA_DEFAULT_HOST,
TFA_DELAY,
TFA_EMAIL_TYPE,
TFA_RETRIES,
)
class ArloCfg(object):
"""Helper class to get at Arlo configuration options.
I got sick of adding in variables each time the config changed so I moved it all here. Config
is passed in a kwarg and parsed out by the property methods.
"""
def __init__(self, arlo, **kwargs):
"""The constructor.
Args:
kwargs (kwargs): Configuration options.
"""
self._arlo = arlo
self._kw = kwargs
self._arlo.debug("Cfg started")
@property
def storage_dir(self):
return self._kw.get("storage_dir", "/tmp/.aarlo")
@property
def name(self):
return self._kw.get("name", "aarlo")
@property
def username(self):
return self._kw.get("username", "unknown")
@property
def password(self):
return self._kw.get("password", "unknown")
@property
def host(self):
return self._kw.get("host", DEFAULT_HOST)
@property
def auth_host(self):
return self._kw.get("auth_host", DEFAULT_AUTH_HOST)
@property
def dump(self):
return self._kw.get("dump", False)
@property
def max_days(self):
return self._kw.get("max_days", 365)
@property
def db_motion_time(self):
return self._kw.get("db_motion_time", 30)
@property
def db_ding_time(self):
return self._kw.get("db_ding_time", 10)
@property
def request_timeout(self):
return self._kw.get("request_timeout", 60)
@property
def stream_timeout(self):
return self._kw.get("stream_timeout", 0)
@property
def recent_time(self):
return self._kw.get("recent_time", 600)
@property
def last_format(self):
return self._kw.get("last_format", "%m-%d %H:%M")
@property
def no_media_upload(self):
return self._kw.get("no_media_upload", False)
@property
def media_retry(self):
retries = self._kw.get("media_retry", [])
if not retries and self.no_media_upload:
retries = [0, 5, 10]
return retries
@property
def snapshot_checks(self):
checks = self._kw.get("snapshot_checks", [])
if not checks:
return [1, 5]
return checks
@property
def user_agent(self):
return self._kw.get("user_agent", "arlo")
@property
def mode_api(self):
return self._kw.get("mode_api", "auto")
@property
def refresh_devices_every(self):
return self._kw.get("refresh_devices_every", 0) * 60 * 60
@property
def refresh_modes_every(self):
return self._kw.get("refresh_modes_every", 0) * 60
@property
def http_connections(self):
return self._kw.get("http_connections", 20)
@property
def http_max_size(self):
return self._kw.get("http_maz_size", 10)
@property
def reconnect_every(self):
return self._kw.get("reconnect_every", 0) * 60
@property
def snapshot_timeout(self):
return self._kw.get("snapshot_timeout", 60)
@property
def verbose(self):
return self._kw.get("verbose_debug", False)
@property
def hide_deprecated_services(self):
return self._kw.get("hide_deprecated_services", False)
@property
def tfa_source(self):
return self._kw.get("tfa_source", TFA_CONSOLE_SOURCE)
@property
def tfa_type(self):
return self._kw.get("tfa_type", TFA_EMAIL_TYPE).lower()
@property
def tfa_delay(self):
return self._kw.get("tfa_delay", TFA_DELAY)
@property
def tfa_retries(self):
return self._kw.get("tfa_retries", TFA_RETRIES)
@property
def tfa_timeout(self):
return self._kw.get("tfa_timeout", 3)
@property
def tfa_total_timeout(self):
return self._kw.get("tfa_total_timeout", 60)
@property
def tfa_host(self):
h = self._kw.get("tfa_host", TFA_DEFAULT_HOST).split(":")
return h[0]
@property
def tfa_port(self):
h = self._kw.get("tfa_host", TFA_DEFAULT_HOST).split(":")
if len(h) == 1:
return 993
else:
return h[1]
@property
def tfa_username(self):
u = self._kw.get("tfa_username", None)
if u is None:
u = self.username
return u
@property
def tfa_password(self):
p = self._kw.get("tfa_password", None)
if p is None:
p = self.password
return p
@property
def wait_for_initial_setup(self):
return self._kw.get("wait_for_initial_setup", True)
@property
def save_state(self):
return self._kw.get("save_state", True)
@property
def state_file(self):
if self.save_state:
return self.storage_dir + "/" + self.name + ".pickle"
return None
@property
def session_file(self):
return self.storage_dir + "/session.pickle"
return None
@property
def save_session(self):
return self._kw.get("save_session", True)
@property
def dump_file(self):
if self.dump:
return self.storage_dir + "/" + "packets.dump"
return None
@property
def library_days(self):
return self._kw.get("library_days", PRELOAD_DAYS)
@property
def synchronous_mode(self):
return self._kw.get("synchronous_mode", False)
@property
def user_stream_delay(self):
return self._kw.get("user_stream_delay", 1)
@property
def serial_ids(self):
return self._kw.get("serial_ids", False)
@property
def stream_snapshot(self):
return self._kw.get("stream_snapshot", False)
@property
def stream_snapshot_stop(self):
return self._kw.get("stream_snapshot_stop", 10)
@property
def save_updates_to(self):
return self._kw.get("save_updates_to", "")
@property
def save_media_to(self):
return self._kw.get("save_media_to", "")
@property
def no_unicode_squash(self):
return self._kw.get("no_unicode_squash", True)
| 23.91954 | 97 | 0.61605 |
4a267cfa2e8ea502f12a011cbbfc79a9efc48c35 | 711 | py | Python | algorithms/warmup/birthday_cake_candles/harisonmg_submission.py | mazaosoko/hackerrank | d9db66a5812ab68e88953c88c511242484af4990 | [
"MIT"
] | null | null | null | algorithms/warmup/birthday_cake_candles/harisonmg_submission.py | mazaosoko/hackerrank | d9db66a5812ab68e88953c88c511242484af4990 | [
"MIT"
] | null | null | null | algorithms/warmup/birthday_cake_candles/harisonmg_submission.py | mazaosoko/hackerrank | d9db66a5812ab68e88953c88c511242484af4990 | [
"MIT"
] | 1 | 2020-11-18T19:23:05.000Z | 2020-11-18T19:23:05.000Z | #!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'birthdayCakeCandles' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER_ARRAY candles as parameter.
#
def birthdayCakeCandles(candles):
height_tallest = max(candles)
print(height_tallest)
num_tallest = candles.count(height_tallest)
return num_tallest
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
candles_count = int(input().strip())
candles = list(map(int, input().rstrip().split()))
result = birthdayCakeCandles(candles)
fptr.write(str(result) + '\n')
fptr.close()
| 20.911765 | 59 | 0.673699 |
4a267ddccf08306e119b672f733dadffec5ea494 | 731 | py | Python | boost_adaptbx/tests/tst_std_pair.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | boost_adaptbx/tests/tst_std_pair.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | boost_adaptbx/tests/tst_std_pair.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | from __future__ import division
import boost.std_pair
from libtbx.test_utils import Exception_expected
def run():
boost.std_pair.exercise(( 1, 2)) == ( 2, 1.)
boost.std_pair.exercise((-2, 4)) == (-4, 2.)
try:
boost.std_pair.exercise(1)
except Exception, err:
assert err.__class__.__name__ == 'ArgumentError'
else:
raise Exception_expected
try:
boost.std_pair.exercise((1,))
except Exception, err:
assert err.__class__.__name__ == 'ArgumentError'
else:
raise Exception_expected
try:
boost.std_pair.exercise((1,2,3,))
except Exception, err:
assert err.__class__.__name__ == 'ArgumentError'
else:
raise Exception_expected
print "OK"
if __name__ == '__main__':
run()
| 21.5 | 52 | 0.689466 |
4a267de3faec4d4240d12b0faffd88895be6f6ac | 3,640 | py | Python | dataset.py | sbharadwajj/point-based-method | 2f778cdb4e4ed218bff4aa33e57be2d5d993a77b | [
"Apache-2.0"
] | null | null | null | dataset.py | sbharadwajj/point-based-method | 2f778cdb4e4ed218bff4aa33e57be2d5d993a77b | [
"Apache-2.0"
] | null | null | null | dataset.py | sbharadwajj/point-based-method | 2f778cdb4e4ed218bff4aa33e57be2d5d993a77b | [
"Apache-2.0"
] | null | null | null | # import open3d as o3d
import torch
import numpy as np
import torch.utils.data as data
import torchvision.transforms as transforms
import os
import random
from utils.data_utils import load_h5, pad_cloudN, augment_cloud
#from utils import *
class Kitti360(data.Dataset):
def __init__(self, dataset_path, train = True, weights = False, npoints_partial = 1024, npoints = 2048):
self.train = train
self.npoints = npoints
self.weights = weights
if self.train:
self.inp = os.path.join(dataset_path, "train", "partial")
self.gt = os.path.join(dataset_path, "train", "gt")
self.X = os.listdir(self.inp)
self.Y = os.listdir(self.gt)
# sort_y = sorted(self.Y)[0::2000] # choose 10 for visualization
# sort_y = sorted(self.Y)[0::200]
self.Y = sort_y
self.len = len(self.Y)
else:
self.inp = os.path.join(dataset_path, "val", "partial")
self.gt = os.path.join(dataset_path, "val", "gt")
self.X = os.listdir(self.inp)
self.Y = os.listdir(self.gt)
# sort_y = sorted(self.Y)[0::200] # choose for visualization
# self.Y = sort_y
self.len = len(self.Y)
# print(self.inp)
# print(self.gt)
'''
loads poses to a dictonary to read
'''
self.pose = '/home/bharadwaj/dataset/KITTI-360/data_poses'
pose_dict = {}
poses = os.listdir(self.pose)
pose_folders = [os.path.join('/home/bharadwaj/dataset/KITTI-360/data_poses', folder) for folder in poses]
self.pose_dict = {path.split("/")[-1]:np.loadtxt(path+"/poses.txt") for path in pose_folders}
def get_weight_vec(self, points_z, percent, array_pcd, axis):
thresh = np.quantile(points_z, percent)
bottom = array_pcd[:, axis] < thresh
top = array_pcd[:, axis] > thresh
weights_array = (np.ones((self.npoints)).astype(float)) #* 2.0
weights_array[bottom] = 0.1 # WEIGHTS FOR BOTTOM 60 %
assert(weights_array[top] == 1.0).all()
return weights_array
def get_translation_vec(self, model_id, poses):
'''
gets poses from pose.txt for each file
'''
id = float(model_id)
vec = np.squeeze(poses[poses[:,0] == id])
reshaped = vec[1:].reshape(3,4)
return reshaped[:,3:].astype(np.float64)
def read_pcd(self, filename, center):
'''
reads pcd and normalizes
'''
point_set = np.load(filename) # .astype(np.float64) saved as float already
point_set = point_set - center
dist = np.max(np.sqrt(np.sum(point_set ** 2, axis = 1)),0)
pcd = point_set / dist # scale
return pcd #.astype(np.float)
def __getitem__(self, index):
model_id = self.Y[index]
split_list = model_id.split("_")
file_name = split_list[-1].split(".")[0]
drive_name = "_".join(split_list[:6])
center = self.get_translation_vec(file_name, self.pose_dict[drive_name]).transpose()
partial = self.read_pcd(os.path.join(self.inp, model_id), center)
complete = self.read_pcd(os.path.join(self.gt, model_id), center)
# DATA AUGMENTATION
if self.train:
complete, partial = augment_cloud([complete, partial])
if self.weights:
model_id = self.get_weight_vec(complete[:,2], 0.6, complete, axis=2) #z axis
return model_id, partial.astype(np.float32), complete.astype(np.float32)
def __len__(self):
return self.len | 38.315789 | 113 | 0.595879 |
4a267f00a1f84b70c230aadc8d2c6a2f0af55bb6 | 2,054 | py | Python | homeassistant/components/rpi_gpio/switch.py | SimonCardaire/core | 1ea3f824da6afd9adee030d5b6bccfb2cc73d46d | [
"Apache-2.0"
] | null | null | null | homeassistant/components/rpi_gpio/switch.py | SimonCardaire/core | 1ea3f824da6afd9adee030d5b6bccfb2cc73d46d | [
"Apache-2.0"
] | null | null | null | homeassistant/components/rpi_gpio/switch.py | SimonCardaire/core | 1ea3f824da6afd9adee030d5b6bccfb2cc73d46d | [
"Apache-2.0"
] | null | null | null | """Allows to configure a switch using RPi GPIO."""
from homeassistant.components import rpi_gpio
from homeassistant.components.rpi_gpio.const import (
CONF_SWITCH,
CONF_SWITCH_INVERT_LOGIC,
CONF_SWITCH_PORTS,
DOMAIN,
PLATFORMS,
)
from homeassistant.const import DEVICE_DEFAULT_NAME
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.reload import setup_reload_service
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Raspberry PI GPIO devices."""
setup_reload_service(hass, DOMAIN, PLATFORMS)
config_switch = hass.data[DOMAIN][CONF_SWITCH]
invert_logic = config_switch[CONF_SWITCH_INVERT_LOGIC]
switches = []
ports = config_switch[CONF_SWITCH_PORTS]
for port, name in ports.items():
switches.append(RPiGPIOSwitch(name, port, invert_logic))
add_entities(switches)
class RPiGPIOSwitch(ToggleEntity):
"""Representation of a Raspberry Pi GPIO."""
def __init__(self, name, port, invert_logic):
"""Initialize the pin."""
self._name = name or DEVICE_DEFAULT_NAME
self._port = port
self._invert_logic = invert_logic
self._state = False
rpi_gpio.setup_output(self._port)
rpi_gpio.write_output(self._port, 1 if self._invert_logic else 0)
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
rpi_gpio.write_output(self._port, 0 if self._invert_logic else 1)
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
rpi_gpio.write_output(self._port, 1 if self._invert_logic else 0)
self._state = False
self.schedule_update_ha_state()
| 30.205882 | 73 | 0.685005 |
4a267f31c413612a25d20abe2648bcb99eb23053 | 1,149 | py | Python | preprocess.py | devinaarvind/tensorflow-efe | 7b3001ca6f885d6c94b198d24560e2004bbd356e | [
"MIT"
] | 13 | 2017-07-28T17:30:04.000Z | 2021-03-13T01:43:18.000Z | preprocess.py | devinaarvind/tensorflow-efe | 7b3001ca6f885d6c94b198d24560e2004bbd356e | [
"MIT"
] | 3 | 2017-12-05T14:52:59.000Z | 2018-03-02T09:56:14.000Z | preprocess.py | devinaarvind/tensorflow-efe | 7b3001ca6f885d6c94b198d24560e2004bbd356e | [
"MIT"
] | 7 | 2018-05-27T02:55:13.000Z | 2019-07-16T11:12:59.000Z | import pandas as pd
from optparse import OptionParser
import config
from utils import data_utils
def preprocess(data_name):
dataset = data_utils.DataSet(config.DATASET[data_name])
df_train, df_valid, df_test = dataset.load_raw_data()
df_all = pd.concat([df_train, df_valid, df_test], ignore_index=True)
train_size = df_train.shape[0]
test_size = df_test.shape[0]
e2id = dataset.save_e2id(set(list(df_all.e1) + list(df_all.e2)))
r2id = dataset.save_r2id(set(list(df_all.r)))
df_all.e1 = df_all.e1.map(e2id)
df_all.e2 = df_all.e2.map(e2id)
df_all.r = df_all.r.map(r2id)
df_train = df_all[:train_size]
df_valid = df_all[train_size:-test_size]
df_test = df_all[-test_size:]
dataset.save_data(df_train, df_valid, df_test)
def parse_args(parser):
parser.add_option("-d", "--data", type="string", dest="data_name", default="wn18")
options, args = parser.parse_args()
return options, args
def main(options):
data_name = options.data_name
preprocess(data_name)
if __name__ == "__main__":
parser = OptionParser()
options, args = parse_args(parser)
main(options)
| 28.725 | 86 | 0.704091 |
4a2680a821dfa87a84b5c5766d0017e9633b53a3 | 1,096 | py | Python | user/migrations/0001_initial.py | superior-prog/DevBlog | 5c82e60f0360c0bad243d67d924d1ed591f9fb4b | [
"MIT"
] | null | null | null | user/migrations/0001_initial.py | superior-prog/DevBlog | 5c82e60f0360c0bad243d67d924d1ed591f9fb4b | [
"MIT"
] | null | null | null | user/migrations/0001_initial.py | superior-prog/DevBlog | 5c82e60f0360c0bad243d67d924d1ed591f9fb4b | [
"MIT"
] | null | null | null | # Generated by Django 3.1.4 on 2020-12-10 16:31
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('date_joined', models.DateTimeField(auto_now_add=True)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('is_admin', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
]
| 34.25 | 114 | 0.569343 |
4a26811698f183ef7486aa6beb68a2ab131fd039 | 3,524 | py | Python | imaging/src/radiomics_utils.py | bhuwanupadhyay/machine-learning-pipelines-for-multimodal-health-data | 19cb8faa3d2c10c5ffd13fbd1c5219d8e9fd7f10 | [
"MIT-0"
] | 7 | 2021-06-02T13:47:44.000Z | 2022-02-11T17:13:47.000Z | imaging/src/radiomics_utils.py | bhuwanupadhyay/machine-learning-pipelines-for-multimodal-health-data | 19cb8faa3d2c10c5ffd13fbd1c5219d8e9fd7f10 | [
"MIT-0"
] | 2 | 2021-12-13T16:36:47.000Z | 2022-02-17T18:52:55.000Z | imaging/src/radiomics_utils.py | bhuwanupadhyay/machine-learning-pipelines-for-multimodal-health-data | 19cb8faa3d2c10c5ffd13fbd1c5219d8e9fd7f10 | [
"MIT-0"
] | 8 | 2021-06-10T18:54:31.000Z | 2022-02-23T16:00:40.000Z | import pandas as pd
import time
import numpy as np
from radiomics import featureextractor
import boto3
import sagemaker
from sagemaker.session import Session
from sagemaker.feature_store.feature_group import FeatureGroup
from sagemaker import get_execution_role
sagemaker_session = sagemaker.Session()
region = sagemaker_session.boto_region_name
print('region is %s' % region)
boto_session = boto3.Session(region_name=region)
role = get_execution_role()
print('role is %s' % role)
sagemaker_client = boto3.client(service_name='sagemaker', region_name=region)
featurestore_runtime = boto3.client('sagemaker-featurestore-runtime', region_name=region)
feature_store_session = Session(
boto_session=boto_session,
sagemaker_client=sagemaker_client,
sagemaker_featurestore_runtime_client=featurestore_runtime
)
def cast_object_to_string(data_frame):
for label in data_frame.columns:
if data_frame.dtypes[label] == 'object':
data_frame[label] = data_frame[label].astype("str").astype("string")
def compute_features(imageName, maskName):
extractor = featureextractor.RadiomicsFeatureExtractor()
featureVector = extractor.execute(imageName, maskName)
new_dict={}
for featureName in featureVector.keys():
print("Computed %s: %s" % (featureName, featureVector[featureName]))
print(type(featureVector[featureName]))
if isinstance(featureVector[featureName], np.ndarray):
new_dict[featureName]=float(featureVector[featureName])
else:
new_dict[featureName]=featureVector[featureName]
df=pd.DataFrame.from_dict(new_dict, orient='index').T
df=df.convert_dtypes(convert_integer=False)
df['imageName']=imageName
df['maskName']=maskName
return df
def check_feature_group(feature_group_name):
feature_group = FeatureGroup(name=feature_group_name, sagemaker_session=feature_store_session)
status = None
try:
status = feature_group.describe()['FeatureGroupStatus']
except:
pass
if status == 'Created':
return feature_group
elif status is None:
return False
else:
wait_for_feature_group_creation_complete(feature_group)
return feature_group
def create_feature_group(feature_group_name, dataframe, s3uri, record_id = 'Subject', event_time = 'EventTime',
enable_online_store = True):
print(feature_group_name)
print(feature_store_session)
feature_group = FeatureGroup(name=feature_group_name, sagemaker_session=feature_store_session)
feature_group.load_feature_definitions(data_frame=dataframe)
feature_group.create(s3_uri=s3uri,
record_identifier_name=record_id,
event_time_feature_name=event_time,
role_arn=role,
enable_online_store=enable_online_store)
wait_for_feature_group_creation_complete(feature_group)
return feature_group
def wait_for_feature_group_creation_complete(feature_group):
status = feature_group.describe()['FeatureGroupStatus']
while status == "Creating":
print("Waiting for Feature Group Creation")
time.sleep(5)
status = feature_group.describe()['FeatureGroupStatus']
if status != "Created":
raise RuntimeError(f"Failed to create feature group {feature_group.name}")
print(f"FeatureGroup {feature_group.name} successfully created.") | 36.329897 | 112 | 0.719069 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.