max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/mock/tests/settings.py | magicjoey/django-knowledge | 199 | 19146 | <gh_stars>100-1000
from mock.tests.base import TestCase
from django.test.client import Client
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
from knowledge import settings
from knowledge.models import Question, Response
from knowledge.forms import QuestionForm, ResponseForm
class BasicSettingsTest(TestCase):
def test_ALLOW_ANONYMOUS(self):
self.assertFalse(settings.ALLOW_ANONYMOUS)
self.assertEqual(
None,
QuestionForm(self.anon)
)
self.assertEqual(
None,
ResponseForm(self.anon, self.question)
)
############# flip setting ##############
settings.ALLOW_ANONYMOUS = not settings.ALLOW_ANONYMOUS
############# flip setting ##############
self.assertNotEqual(
None,
QuestionForm(self.anon)
)
self.assertNotEqual(
None,
ResponseForm(self.anon, self.question)
)
form = QuestionForm(self.anon)
self.assertNotIn('status', form.fields.keys())
# missing the name/email...
QUESTION_POST = {
'title': 'This is a title friend!',
'body': 'This is the body friend!'
}
form = QuestionForm(self.anon, QUESTION_POST)
self.assertFalse(form.is_valid())
QUESTION_POST = {
'name': '<NAME>',
'email': '<EMAIL>',
'title': 'This is a title friend!',
'body': 'This is the body friend!'
}
form = QuestionForm(self.anon, QUESTION_POST)
self.assertTrue(form.is_valid())
question = form.save()
# question has no user and is public by default
self.assertFalse(question.user)
self.assertEquals(question.name, '<NAME>')
self.assertEquals(question.email, '<EMAIL>')
self.assertEquals(question.status, 'public')
############# flip setting ##############
settings.ALLOW_ANONYMOUS = not settings.ALLOW_ANONYMOUS
############# flip setting ##############
def test_AUTO_PUBLICIZE(self):
self.assertFalse(settings.AUTO_PUBLICIZE)
QUESTION_POST = {
'title': 'This is a title friend!',
'body': 'This is the body friend!',
'status': 'private'
}
question = QuestionForm(self.joe, QUESTION_POST).save()
self.assertEquals(question.status, 'private')
############# flip setting ##############
settings.AUTO_PUBLICIZE = not settings.AUTO_PUBLICIZE
############# flip setting ##############
question = QuestionForm(self.joe, QUESTION_POST).save()
self.assertEquals(question.status, 'public')
############# flip setting ##############
settings.AUTO_PUBLICIZE = not settings.AUTO_PUBLICIZE
############# flip setting ##############
def test_FREE_RESPONSE(self):
self.assertTrue(settings.FREE_RESPONSE)
# joe authored the question, it is private so any user can respond...
self.assertFalse(ResponseForm(self.anon, self.question))
self.assertTrue(ResponseForm(self.bob, self.question))
self.assertTrue(ResponseForm(self.joe, self.question))
self.assertTrue(ResponseForm(self.admin, self.question))
############# flip setting ##############
settings.FREE_RESPONSE = not settings.FREE_RESPONSE
############# flip setting ##############
# ...now bob can't respond!
self.assertFalse(ResponseForm(self.anon, self.question))
self.assertFalse(ResponseForm(self.bob, self.question))
self.assertTrue(ResponseForm(self.joe, self.question))
self.assertTrue(ResponseForm(self.admin, self.question))
############# flip setting ##############
settings.FREE_RESPONSE = not settings.FREE_RESPONSE
############# flip setting ##############
def test_SLUG_URLS(self):
self.assertTrue(settings.SLUG_URLS)
c = Client()
self.question.public()
question_url = reverse('knowledge_thread', args=[self.question.id, slugify(self.question.title)])
r = c.get(reverse('knowledge_thread', args=[self.question.id, 'a-big-long-slug']))
self.assertEquals(r.status_code, 301)
r = c.get(question_url)
self.assertEquals(r.status_code, 200)
############# flip setting ##############
settings.SLUG_URLS = not settings.SLUG_URLS
############# flip setting ##############
r = c.get(reverse('knowledge_thread', args=[self.question.id, 'a-big-long-slug']))
self.assertEquals(r.status_code, 301)
r = c.get(question_url)
self.assertEquals(r.status_code, 301)
r = c.get(reverse('knowledge_thread_no_slug', args=[self.question.id]))
self.assertEquals(r.status_code, 200)
############# flip setting ##############
settings.SLUG_URLS = not settings.SLUG_URLS
############# flip setting ##############
|
library/oci_dhcp_options.py | slmjy/oci-ansible-modules | 106 | 19168 | <reponame>slmjy/oci-ansible-modules
#!/usr/bin/python
# Copyright (c) 2017, 2018, 2019, Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_dhcp_options
short_description: Create,update and delete OCI Dhcp Options
description:
- Creates OCI Dhcp Options
- Update OCI Dhcp Options, if present, with a new display name
- Update OCI Dhcp Options, if present, by appending new options to existing options
- Update OCI Dhcp Options, if present, by purging existing options and replacing them with
specified ones
- Delete OCI Dhcp Options, if present.
version_added: "2.5"
options:
compartment_id:
description: Identifier of the compartment under which this
Dhcp Options would be created. Mandatory for create
operation.Optional for delete and update. Mutually exclusive
with dhcp_id.
required: false
vcn_id:
description: Identifier of the Virtual Cloud Network to which the
Dhcp Options should be attached. Mandatory for create
operation. Optional for delete and update. Mutually exclusive
with dhcp_id.
required: false
dhcp_id:
description: Identifier of the Dhcp Options. Mandatory for delete and update.
required: false
aliases: ['id']
display_name:
description: Name of the Dhcp Options. A user friendly name. Does not have to be unique,
and could be changed. If not specified, a default name would be provided.
required: false
aliases: ['name']
options:
description: A set of DHCP options. Mandatory for create and update.
required: false
suboptions:
type:
description: The specific DHCP option.
required: true
choices: ['DomainNameServer', 'SearchDomain']
server_type:
description: Applicable only for the I(type='DomainNameServer').Describes the
type of the server.
required: true
choices: ['VcnLocalPlusInternet', 'CustomDnsServer']
custom_dns_servers:
description: Applicable only for the I(type='DomainNameServer') and I(server_type='CustomDnsServer').
Maximum three DNS server ips are allowed as part of this option.
required: false
search_domain_names:
description: Applicable only for the I(type='SearchDomain').A single search domain name
according to RFC 952 and RFC 1123. Do not include this option with an empty
list of search domain names, or with an empty string as the value for any search
domain name.
required: true
purge_dhcp_options:
description: Purge existing Dhcp Options which are not present in the provided
Dhcp Options. If I(purge_dhcp_options=no), provided options would be
appended to existing options. I(purge_dhcp_options) and I(delete_dhcp_options)
are mutually exclusive.
required: false
default: 'yes'
type: bool
delete_dhcp_options:
description: Delete existing Dhcp Options which are present in the Dhcp Options provided by
I(options). If I(delete_dhcp_options=yes), options provided by I(options) would be
deleted from existing options, if they are part of existing dhcp options.
If they are not part of existing dhcp options, they will be ignored.
I(delete_dhcp_options) and I(purge_dhcp_options) are mutually exclusive.
required: false
default: 'no'
type: bool
state:
description: Create,update or delete Dhcp Options. For I(state=present), if it
does not exist, it gets created. If it exists, it gets updated.
required: false
default: 'present'
choices: ['present','absent']
author:
- "<NAME>(@debayan_gupta)"
extends_documentation_fragment: [ oracle, oracle_creatable_resource, oracle_wait_options, oracle_tags ]
"""
EXAMPLES = """
#Note: These examples do not set authentication details.
#Create/update Dhcp Options
- name: Create Dhcp options
oci_dhcp_options:
compartment_id: 'ocid1.compartment..xdsc'
name: 'ansible_dhcp_options'
vcn_id: 'ocid1.vcn..aaaa'
options:
- type: 'DomainNameServer'
server_type: 'VcnLocalPlusInternet'
custom_dns_servers: []
- type: 'SearchDomain'
search_domain_names: ['ansibletestvcn.oraclevcn.com']
freeform_tags:
region: 'east'
defined_tags:
features:
capacity: 'medium'
state: 'present'
# Update Dhcp Options by appending new options
- name: Update Dhcp Options by appending new options
oci_dhcp_options:
id: 'ocid1.dhcpoptions.oc1.aaa'
purge_dhcp_options: 'no'
options:
- type: 'DomainNameServer'
server_type: 'CustomDnsServer'
custom_dns_servers: ['10.0.0.8']
- type: 'SearchDomain'
search_domain_names: ['ansibletestvcn.oraclevcn.com']
state: 'present'
# Update Dhcp Options by purging existing options
- name: Update Dhcp Options by purging existing options
oci_dhcp_options:
dhcp_id: 'ocid1.dhcpoptions.oc1.aaa'
options:
- type: 'DomainNameServer'
server_type: 'CustomDnsServer'
custom_dns_servers: ['10.0.0.8', '10.0.0.10', '10.0.0.12']
- type: 'SearchDomain'
search_domain_names: ['ansibletestvcn.oraclevcn.com']
state: 'present'
# Update Dhcp Options by deleting existing options
- name: Update Dhcp Options by deleting existing options
oci_dhcp_options:
dhcp_id: 'ocid1.dhcpoptions.oc1.aaa'
options:
- type: 'DomainNameServer'
server_type: 'CustomDnsServer'
custom_dns_servers: ['10.0.0.8', '10.0.0.10', '10.0.0.12']
delete_dhcp_options: 'yes'
state: 'present'
#Delete Dhcp Options
- name: Delete Dhcp Options
oci_dhcp_options:
dhcp_id: 'ocid1.dhcpoptions..xdsc'
state: 'absent'
"""
RETURN = """
dhcp_options:
description: Attributes of the created/updated Dhcp Options.
For delete, deleted Dhcp Options description will
be returned.
returned: success
type: complex
contains:
compartment_id:
description: The identifier of the compartment containing the Dhcp Options
returned: always
type: string
sample: ocid1.compartment.oc1.xzvf..oifds
display_name:
description: Name assigned to the Dhcp Options during creation
returned: always
type: string
sample: ansible_dhcp_options
id:
description: Identifier of the Dhcp Options
returned: always
type: string
sample: ocid1.dhcpoptions.oc1.axdf
vcn_id:
description: Identifier of the Virtual Cloud Network to which the
Dhcp Options is attached.
returned: always
type: string
sample: ocid1.vcn..ixcd
lifecycle_state:
description: The current state of the Dhcp Options
returned: always
type: string
sample: AVAILABLE
options:
description: A list of dhcp options.
returned: always
type: list
sample: [{"custom_dns_servers": [],"server_type": "CustomDnsServer","type": "DomainNameServer"},
{"search_domain_names": ["myansiblevcn.oraclevcn.com"],"type": "SearchDomain"}]
time_created:
description: Date and time when the Dhcp Options was created, in
the format defined by RFC3339
returned: always
type: datetime
sample: 2016-08-25T21:10:29.600Z
sample: {
"compartment_id":"ocid1.compartment.oc1..xxxxxEXAMPLExxxxx",
"freeform_tags":{"region":"east"},
"defined_tags":{"features":{"capacity":"medium"}},
"display_name":"ansible_dhcp_options",
"id":"ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx",
"lifecycle_state":"AVAILABLE",
"options":[
{
"custom_dns_servers":[],
"server_type":"VcnLocalPlusInternet",
"type":"DomainNameServer"
},
{
"search_domain_names":["ansibletestvcn.oraclevcn.com"],
"type":"SearchDomain"
},
{
"custom_dns_servers":["10.0.0.8"],
"server_type":"CustomDnsServer",
"type":"DomainNameServer"
}
],
"time_created":"2017-11-26T16:41:06.996000+00:00",
"vcn_id":"ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.oracle import oci_utils
try:
from oci.core import VirtualNetworkClient
from oci.exceptions import ServiceError, MaximumWaitTimeExceeded, ClientError
from oci.util import to_dict
from oci.core.models import (
CreateDhcpDetails,
DhcpDnsOption,
UpdateDhcpDetails,
DhcpSearchDomainOption,
)
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
def create_or_update_dhcp_options(virtual_network_client, module):
result = dict(changed=False, dhcp_options="")
dhcp_id = module.params.get("dhcp_id")
exclude_attributes = {"display_name": True}
try:
if dhcp_id:
existing_dhcp_options = oci_utils.get_existing_resource(
virtual_network_client.get_dhcp_options, module, dhcp_id=dhcp_id
)
result = update_dhcp_options(
virtual_network_client, existing_dhcp_options, module
)
else:
result = oci_utils.check_and_create_resource(
resource_type="dhcp_options",
create_fn=create_dhcp_options,
kwargs_create={
"virtual_network_client": virtual_network_client,
"module": module,
},
list_fn=virtual_network_client.list_dhcp_options,
kwargs_list={
"compartment_id": module.params.get("compartment_id"),
"vcn_id": module.params.get("vcn_id"),
},
module=module,
exclude_attributes=exclude_attributes,
model=CreateDhcpDetails(),
)
except ServiceError as ex:
module.fail_json(msg=ex.message)
except MaximumWaitTimeExceeded as ex:
module.fail_json(msg=str(ex))
except ClientError as ex:
module.fail_json(msg=ex.args[0])
return result
def create_dhcp_options(virtual_network_client, module):
options = get_options_objects(module.params["options"])
create_dhcp_details = CreateDhcpDetails()
for attribute in create_dhcp_details.attribute_map:
create_dhcp_details.__setattr__(attribute, module.params.get(attribute))
create_dhcp_details.options = options
result = oci_utils.create_and_wait(
resource_type="dhcp_options",
create_fn=virtual_network_client.create_dhcp_options,
kwargs_create={"create_dhcp_details": create_dhcp_details},
client=virtual_network_client,
get_fn=virtual_network_client.get_dhcp_options,
get_param="dhcp_id",
module=module,
)
return result
def update_dhcp_options(virtual_network_client, existing_dhcp_options, module):
if existing_dhcp_options is None:
raise ClientError(
Exception(
"No Dhcp Options with id "
+ module.params.get("dhcp_id")
+ " is found for update"
)
)
result = dict(dhcp_options=to_dict(existing_dhcp_options), changed=False)
name_tag_changed = False
options_changed = False
input_options = module.params.get("options")
update_dhcp_details = UpdateDhcpDetails()
existing_options = existing_dhcp_options.options
attributes_to_compare = ["display_name", "freeform_tags", "defined_tags"]
for attribute in attributes_to_compare:
name_tag_changed = oci_utils.check_and_update_attributes(
update_dhcp_details,
attribute,
module.params.get(attribute),
getattr(existing_dhcp_options, attribute),
name_tag_changed,
)
if input_options is not None:
if input_options:
options, options_changed = oci_utils.get_component_list_difference(
get_options_objects(input_options),
get_hashed_options(existing_options),
module.params.get("purge_dhcp_options"),
module.params.get("delete_dhcp_options"),
)
if options_changed:
update_dhcp_details.options = options
else:
update_dhcp_details.options = existing_options
if name_tag_changed or options_changed:
result = oci_utils.update_and_wait(
resource_type="dhcp_options",
update_fn=virtual_network_client.update_dhcp_options,
kwargs_update={
"dhcp_id": existing_dhcp_options.id,
"update_dhcp_details": update_dhcp_details,
},
client=virtual_network_client,
get_fn=virtual_network_client.get_dhcp_options,
get_param="dhcp_id",
module=module,
)
return result
def get_hashed_options(options):
hashed_options = []
if options is None:
return hashed_options
for option in options:
dhcp_option = None
if option.type == "DomainNameServer":
dhcp_option = oci_utils.create_hashed_instance(DhcpDnsOption)
dhcp_option.type = "DomainNameServer"
server_type = option.server_type
dhcp_option.server_type = server_type
if server_type == "CustomDnsServer":
dhcp_option.custom_dns_servers = option.custom_dns_servers
else:
dhcp_option.custom_dns_servers = []
elif option.type == "SearchDomain":
dhcp_option = oci_utils.create_hashed_instance(DhcpSearchDomainOption)
dhcp_option.type = "SearchDomain"
dhcp_option.search_domain_names = option.search_domain_names
hashed_options.append(dhcp_option)
return hashed_options
def get_options_objects(options):
dhcp_options = []
for option in options:
dhcp_option = None
if option["type"] == "DomainNameServer":
dhcp_option = oci_utils.create_hashed_instance(DhcpDnsOption)
dhcp_option.type = "DomainNameServer"
server_type = option["server_type"]
dhcp_option.server_type = server_type
if server_type == "CustomDnsServer":
dhcp_option.custom_dns_servers = option.get("custom_dns_servers", None)
else:
dhcp_option.custom_dns_servers = []
elif option["type"] == "SearchDomain":
dhcp_option = oci_utils.create_hashed_instance(DhcpSearchDomainOption)
dhcp_option.type = "SearchDomain"
search_domain_names = option["search_domain_names"]
if search_domain_names:
dhcp_option.search_domain_names = option["search_domain_names"]
else:
raise ClientError("search_domain_names field should not be empty")
dhcp_options.append(dhcp_option)
return dhcp_options
def delete_dhcp_options(virtual_network_client, module):
return oci_utils.delete_and_wait(
resource_type="dhcp_options",
client=virtual_network_client,
get_fn=virtual_network_client.get_dhcp_options,
kwargs_get={"dhcp_id": module.params["dhcp_id"]},
delete_fn=virtual_network_client.delete_dhcp_options,
kwargs_delete={"dhcp_id": module.params["dhcp_id"]},
module=module,
)
def main():
module_args = oci_utils.get_taggable_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
compartment_id=dict(type="str", required=False),
display_name=dict(type="str", required=False, aliases=["name"]),
vcn_id=dict(type="str", required=False),
dhcp_id=dict(type="str", required=False, aliases=["id"]),
state=dict(
type="str",
required=False,
default="present",
choices=["present", "absent"],
),
options=dict(type=list, required=False),
purge_dhcp_options=dict(type="bool", required=False, default=True),
delete_dhcp_options=dict(type="bool", required=False, default=False),
)
)
module = AnsibleModule(
argument_spec=module_args,
mutually_exclusive=[["purge_dhcp_options", "delete_dhcp_options"]],
)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module")
virtual_network_client = oci_utils.create_service_client(
module, VirtualNetworkClient
)
state = module.params["state"]
if state == "present":
result = create_or_update_dhcp_options(virtual_network_client, module)
elif state == "absent":
result = delete_dhcp_options(virtual_network_client, module)
module.exit_json(**result)
if __name__ == "__main__":
main()
|
homeassistant/components/hardware/const.py | liangleslie/core | 30,023 | 19170 | <filename>homeassistant/components/hardware/const.py
"""Constants for the Hardware integration."""
DOMAIN = "hardware"
|
cmsplugin_cascade/migrations/0009_cascadepage.py | teklager/djangocms-cascade | 139 | 19172 | <gh_stars>100-1000
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cms', '0013_urlconfrevision'),
('cmsplugin_cascade', '0008_sortableinlinecascadeelement'),
]
operations = [
migrations.CreateModel(
name='CascadePage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('settings', models.JSONField(blank=True, default={}, help_text='User editable settings for this page.')),
('glossary', models.JSONField(blank=True, default={}, help_text='Store for arbitrary page data.')),
('extended_object', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, to='cms.Page')),
('public_extension', models.OneToOneField(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='draft_extension', to='cmsplugin_cascade.CascadePage')),
],
options={
'db_table': 'cmsplugin_cascade_page',
'verbose_name': 'Cascade Page Settings',
'verbose_name_plural': 'Cascade Page Settings',
},
),
]
|
Week11/765.py | bobsingh149/LeetCode | 101 | 19175 | class Solution:
def minSwapsCouples(self, row: List[int]) -> int:
parent=[i for i in range(len(row))]
for i in range(1,len(row),2):
parent[i]-=1
def findpath(u,parent):
if parent[u]!=u:
parent[u]=findpath(parent[u],parent)
return parent[u]
for i in range(0,len(row),2):
u_parent=findpath(row[i],parent)
v_parent=findpath(row[i+1],parent)
parent[u_parent]=v_parent
return (len(row)//2)-sum([1 for i in range(0,len(row),2) if parent[i]==parent[i+1]==i])
|
apps/hosts/views.py | kaustubh-s1/EvalAI | 1,470 | 19215 | <gh_stars>1000+
from django.contrib.auth.models import User
from rest_framework import permissions, status
from rest_framework.decorators import (
api_view,
authentication_classes,
permission_classes,
throttle_classes,
)
from rest_framework.response import Response
from rest_framework_expiring_authtoken.authentication import (
ExpiringTokenAuthentication,
)
from rest_framework.throttling import UserRateThrottle
from rest_framework_simplejwt.authentication import JWTAuthentication
from accounts.permissions import HasVerifiedEmail
from base.utils import get_model_object, team_paginated_queryset
from .filters import HostTeamsFilter
from .models import ChallengeHost, ChallengeHostTeam
from .serializers import (
ChallengeHostSerializer,
ChallengeHostTeamSerializer,
InviteHostToTeamSerializer,
HostTeamDetailSerializer,
)
from .utils import is_user_part_of_host_team
get_challenge_host_model = get_model_object(ChallengeHost)
@api_view(["GET", "POST"])
@throttle_classes([UserRateThrottle])
@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
@authentication_classes(
(
JWTAuthentication,
ExpiringTokenAuthentication,
)
)
def challenge_host_team_list(request):
if request.method == "GET":
challenge_host_team_ids = ChallengeHost.objects.filter(
user=request.user
).values_list("team_name", flat=True)
challenge_host_teams = ChallengeHostTeam.objects.filter(
id__in=challenge_host_team_ids
).order_by("-id")
filtered_teams = HostTeamsFilter(
request.GET, queryset=challenge_host_teams
)
paginator, result_page = team_paginated_queryset(
filtered_teams.qs, request
)
serializer = HostTeamDetailSerializer(result_page, many=True)
response_data = serializer.data
return paginator.get_paginated_response(response_data)
elif request.method == "POST":
serializer = ChallengeHostTeamSerializer(
data=request.data, context={"request": request}
)
if serializer.is_valid():
serializer.save()
response_data = serializer.data
return Response(response_data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(["GET", "PUT", "PATCH"])
@throttle_classes([UserRateThrottle])
@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
@authentication_classes((JWTAuthentication, ExpiringTokenAuthentication))
def challenge_host_team_detail(request, pk):
try:
challenge_host_team = ChallengeHostTeam.objects.get(pk=pk)
except ChallengeHostTeam.DoesNotExist:
response_data = {"error": "ChallengeHostTeam does not exist"}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
if request.method == "GET":
serializer = HostTeamDetailSerializer(challenge_host_team)
response_data = serializer.data
return Response(response_data, status=status.HTTP_200_OK)
elif request.method in ["PUT", "PATCH"]:
if request.method == "PATCH":
serializer = ChallengeHostTeamSerializer(
challenge_host_team,
data=request.data,
context={"request": request},
partial=True,
)
else:
serializer = ChallengeHostTeamSerializer(
challenge_host_team,
data=request.data,
context={"request": request},
)
if serializer.is_valid():
serializer.save()
response_data = serializer.data
return Response(response_data, status=status.HTTP_200_OK)
else:
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST
)
@api_view(["GET", "POST"])
@throttle_classes([UserRateThrottle])
@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
@authentication_classes((JWTAuthentication, ExpiringTokenAuthentication))
def challenge_host_list(request, challenge_host_team_pk):
try:
challenge_host_team = ChallengeHostTeam.objects.get(
pk=challenge_host_team_pk
)
except ChallengeHostTeam.DoesNotExist:
response_data = {"error": "ChallengeHostTeam does not exist"}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
if request.method == "GET":
challenge_host_status = request.query_params.get("status", None)
filter_condition = {
"team_name": challenge_host_team,
"user": request.user,
}
if challenge_host_status:
challenge_host_status = challenge_host_status.split(",")
filter_condition.update({"status__in": challenge_host_status})
challenge_host = ChallengeHost.objects.filter(
**filter_condition
).order_by("-id")
paginator, result_page = team_paginated_queryset(
challenge_host, request
)
serializer = ChallengeHostSerializer(result_page, many=True)
response_data = serializer.data
return paginator.get_paginated_response(response_data)
elif request.method == "POST":
serializer = ChallengeHostSerializer(
data=request.data,
context={
"challenge_host_team": challenge_host_team,
"request": request,
},
)
if serializer.is_valid():
serializer.save()
response_data = serializer.data
return Response(response_data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(["GET", "PUT", "PATCH", "DELETE"])
@throttle_classes([UserRateThrottle])
@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
@authentication_classes((JWTAuthentication, ExpiringTokenAuthentication))
def challenge_host_detail(request, challenge_host_team_pk, pk):
try:
challenge_host_team = ChallengeHostTeam.objects.get(
pk=challenge_host_team_pk
)
except ChallengeHostTeam.DoesNotExist:
response_data = {"error": "ChallengeHostTeam does not exist"}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
challenge_host = get_challenge_host_model(pk)
if request.method == "GET":
serializer = ChallengeHostSerializer(challenge_host)
response_data = serializer.data
return Response(response_data, status=status.HTTP_200_OK)
elif request.method in ["PUT", "PATCH"]:
if request.method == "PATCH":
serializer = ChallengeHostSerializer(
challenge_host,
data=request.data,
context={
"challenge_host_team": challenge_host_team,
"request": request,
},
partial=True,
)
else:
serializer = ChallengeHostSerializer(
challenge_host,
data=request.data,
context={
"challenge_host_team": challenge_host_team,
"request": request,
},
)
if serializer.is_valid():
serializer.save()
response_data = serializer.data
return Response(response_data, status=status.HTTP_200_OK)
else:
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST
)
elif request.method == "DELETE":
challenge_host.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(["POST"])
@throttle_classes([UserRateThrottle])
@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
@authentication_classes((JWTAuthentication, ExpiringTokenAuthentication))
def create_challenge_host_team(request):
serializer = ChallengeHostTeamSerializer(
data=request.data, context={"request": request}
)
if serializer.is_valid():
serializer.save()
response_data = serializer.data
challenge_host_team = serializer.instance
challenge_host = ChallengeHost(
user=request.user,
status=ChallengeHost.SELF,
permissions=ChallengeHost.ADMIN,
team_name=challenge_host_team,
)
challenge_host.save()
return Response(response_data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(["DELETE"])
@throttle_classes([UserRateThrottle])
@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
@authentication_classes((JWTAuthentication, ExpiringTokenAuthentication))
def remove_self_from_challenge_host_team(request, challenge_host_team_pk):
"""
A user can remove himself from the challenge host team.
"""
try:
ChallengeHostTeam.objects.get(pk=challenge_host_team_pk)
except ChallengeHostTeam.DoesNotExist:
response_data = {"error": "ChallengeHostTeam does not exist"}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
try:
challenge_host = ChallengeHost.objects.filter(
user=request.user.id, team_name__pk=challenge_host_team_pk
)
challenge_host.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except: # noqa E722
response_data = {"error": "Sorry, you do not belong to this team."}
return Response(response_data, status=status.HTTP_401_UNAUTHORIZED)
@api_view(["POST"])
@throttle_classes([UserRateThrottle])
@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
@authentication_classes((JWTAuthentication, ExpiringTokenAuthentication))
def invite_host_to_team(request, pk):
try:
challenge_host_team = ChallengeHostTeam.objects.get(pk=pk)
except ChallengeHostTeam.DoesNotExist:
response_data = {"error": "Host Team does not exist"}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
email = request.data.get("email")
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
response_data = {
"error": "User does not exist with this email address!"
}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
# Check if the user requesting this API is part of host team
if not is_user_part_of_host_team(request.user, challenge_host_team):
response_data = {"error": "You are not a member of this team!"}
return Response(response_data, status=status.HTTP_400_BAD_REQUEST)
host = ChallengeHost.objects.filter(
team_name=challenge_host_team, user=user
)
if host.exists():
response_data = {"error": "User is already part of the team!"}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
serializer = InviteHostToTeamSerializer(
data=request.data,
context={
"challenge_host_team": challenge_host_team,
"request": request,
},
)
if serializer.is_valid():
serializer.save()
response_data = {
"message": "User has been added successfully to the host team"
}
return Response(response_data, status=status.HTTP_202_ACCEPTED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
airbyte-integrations/connectors/source-square/source_square/utils.py | OTRI-Unipd/OTRI-airbyte | 6,215 | 19246 | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from typing import Union
def separate_by_count(total_length: int, part_count: int) -> (int, int):
"""
Calculates parts needed to separate count by part_count value
For example: separate_by_count(total_length=196582, part_count=10000) returns (19, 6582) -> 19*10000 + 6582=196582
:param total_length:
:param part_count:
:return: Returns the total_parts and last part count
"""
total_parts = total_length // part_count
last_part = total_length - (part_count * total_parts)
return total_parts, last_part
def separate_items_by_count(item_list: Union[list, tuple], part_count: int) -> list:
if not item_list:
return []
total_parts, _ = separate_by_count(len(item_list), part_count)
result_list = []
for i in range(total_parts):
result_list.append(item_list[part_count * i : part_count * (i + 1)])
if len(item_list) % part_count != 0:
result_list.append(item_list[total_parts * part_count :])
return result_list
|
examples/model_zoo/test_binaries.py | Embracing/unrealcv | 1,617 | 19313 | <filename>examples/model_zoo/test_binaries.py
import subprocess, os
win_binary_path = 'UE4Binaries/{project_name}/WindowsNoEditor/{project_name}.exe'
linux_binary_path = './UE4Binaries/{project_name}/LinuxNoEditor/{project_name}/Binaries/Linux/{project_name}'
mac_binary_path = './UE4Binaries/{project_name}/MacNoEditor/{project_name}.app'
project_names = [
'RealisticRendering', 'ArchinteriorsVol2Scene1', 'ArchinteriorsVol2Scene2',
'ArchinteriorsVol2Scene3', 'UrbanCity', 'Matinee', 'PhotorealisticCharacter'
]
binaries = []
binaries += [linux_binary_path.format(project_name = v) for v in project_names]
binaries += [win_binary_path.format(project_name = v) for v in project_names]
binaries += [mac_binary_path.format(project_name = v) for v in project_names]
if __name__ == '__main__':
if not os.path.isdir('output'):
os.mkdir('output')
for binary_path in binaries:
project_name = os.path.basename(binary_path).split('.')[0]
output_folder = os.path.join('output', project_name)
if not os.path.isfile(binary_path) and not os.path.isdir(binary_path):
print('Can not find binary "%s", skip' % binary_path)
continue
print('Testing %s ..., output will be saved to "%s"' % (binary_path, output_folder))
subprocess.call([
'python', 'examples/commands_demo.py',
binary_path, '--output', output_folder
])
|
bindings/python/examples/05b_get_output.py | GoldenPedro/iota.rs | 256 | 19315 | import iota_client
client = iota_client.Client()
print(
client.get_output("a22cba0667c922cbb1f8bdcaf970b2a881ccd6e88e2fcce50374de2aac7c37720000")
) |
aiida/storage/psql_dos/migrations/versions/django_0040_data_migration_legacy_process_attributes.py | mkrack/aiida-core | 153 | 19327 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name,no-member
"""Migrate some legacy process attributes.
Attribute keys that are renamed:
* `_sealed` -> `sealed`
Attribute keys that are removed entirely:
* `_finished`
* `_failed`
* `_aborted`
* `_do_abort`
Finally, after these first migrations, any remaining process nodes that still do not have a sealed attribute and have
it set to `True`. Excluding the nodes that have a `process_state` attribute of one of the active states `created`,
running` or `waiting`, because those are actual valid active processes that are not yet sealed.
This is identical to migration e734dd5e50d7
Revision ID: django_0040
Revises: django_0039
"""
from alembic import op
import sqlalchemy as sa
revision = 'django_0040'
down_revision = 'django_0039'
branch_labels = None
depends_on = None
def upgrade():
"""Migrations for the upgrade."""
conn = op.get_bind()
statement = sa.text(
"""
UPDATE db_dbnode
SET attributes = jsonb_set(attributes, '{"sealed"}', attributes->'_sealed')
WHERE attributes ? '_sealed' AND node_type LIKE 'process.%';
-- Copy `_sealed` -> `sealed`
UPDATE db_dbnode SET attributes = attributes - '_sealed'
WHERE attributes ? '_sealed' AND node_type LIKE 'process.%';
-- Delete `_sealed`
UPDATE db_dbnode SET attributes = attributes - '_finished'
WHERE attributes ? '_finished' AND node_type LIKE 'process.%';
-- Delete `_finished`
UPDATE db_dbnode SET attributes = attributes - '_failed'
WHERE attributes ? '_failed' AND node_type LIKE 'process.%';
-- Delete `_failed`
UPDATE db_dbnode SET attributes = attributes - '_aborted'
WHERE attributes ? '_aborted' AND node_type LIKE 'process.%';
-- Delete `_aborted`
UPDATE db_dbnode SET attributes = attributes - '_do_abort'
WHERE attributes ? '_do_abort' AND node_type LIKE 'process.%';
-- Delete `_do_abort`
UPDATE db_dbnode
SET attributes = jsonb_set(attributes, '{"sealed"}', to_jsonb(True))
WHERE
node_type LIKE 'process.%' AND
NOT (attributes ? 'sealed') AND
attributes->>'process_state' NOT IN ('created', 'running', 'waiting');
-- Set `sealed=True` for process nodes that do not yet have a `sealed` attribute AND are not in an active state
"""
)
conn.execute(statement)
def downgrade():
"""Migrations for the downgrade."""
raise NotImplementedError('Downgrade of django_0040.')
|
src/scenic/simulators/gta/map.py | cahartsell/Scenic | 141 | 19333 |
# stub to allow changing the map without having to alter gta_model.sc
import os
mapPath = 'map.npz'
def setLocalMap(module, relpath):
global mapPath
base = os.path.dirname(module)
mapPath = os.path.join(base, relpath)
|
bigflow_python/python/bigflow/pipeline/test/testdata/columns/columns/column_sum.py | advancedxy/bigflow_python | 1,236 | 19347 | #!/usr/bin/env python
# encoding: utf-8
########################################################################
#
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
########################################################################
from bigflow import transforms
def column_sum(pcollection, columns):
"""
对输入的PCollection,求所有元素按指定列相加的结果
Args:
pcollection (PCollection): 输入PCollection
columns(list):要计算的列
Returns:
PObject: 聚合结果
>>> import columns
>>> _p = _pipeline.parallelize([(1, 1, 1), (1, 2, 2), (1, 3, 1)])
>>> columns.column_sum(_p, [0, 1]).get()
[3, 6]
"""
cols = columns
def _get_columns(record):
return [record[column] for column in cols]
return pcollection.map(_get_columns) \
.reduce(lambda x, y: [a + b for a, b in zip(x, y)])
|
build-a-django-content-aggregator/source_code_step_2/podcasts/tests.py | syberflea/materials | 3,682 | 19376 | <filename>build-a-django-content-aggregator/source_code_step_2/podcasts/tests.py
from django.test import TestCase
from django.utils import timezone
from .models import Episode
class PodCastsTests(TestCase):
def setUp(self):
self.episode = Episode.objects.create(
title="My Awesome Podcast Episode",
description="Look mom, I made it!",
pub_date=timezone.now(),
link="https://myawesomeshow.com",
image="https://image.myawesomeshow.com",
podcast_name="My Python Podcast",
guid="de194720-7b4c-49e2-a05f-432436d3fetr",
)
def test_episode_content(self):
self.assertEqual(self.episode.description, "Look mom, I made it!")
self.assertEqual(self.episode.link, "https://myawesomeshow.com")
self.assertEqual(
self.episode.guid, "de194720-7b4c-49e2-a05f-432436d3fetr"
)
def test_episode_str_representation(self):
self.assertEqual(
str(self.episode), "My Python Podcast: My Awesome Podcast Episode"
)
|
superset/migrations/versions/070c043f2fdb_add_granularity_to_charts_where_missing.py | razzius/superset | 18,621 | 19377 | <reponame>razzius/superset
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add granularity to charts where missing
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2021-02-04 09:34:13.608891
"""
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "4<PASSWORD>"
import json
from alembic import op
from sqlalchemy import and_, Boolean, Column, Integer, String, Text
from sqlalchemy.ext.declarative import declarative_base
from superset import db
Base = declarative_base()
class Slice(Base):
__tablename__ = "slices"
id = Column(Integer, primary_key=True)
params = Column(Text)
datasource_id = Column(Integer)
datasource_type = Column(String(200))
class SqlaTable(Base):
__tablename__ = "tables"
id = Column(Integer, primary_key=True)
main_dttm_col = Column(String(250))
class TableColumn(Base):
__tablename__ = "table_columns"
id = Column(Integer, primary_key=True)
table_id = Column(Integer)
is_dttm = Column(Boolean)
column_name = Column(String(255))
def upgrade():
"""
Adds the granularity param to charts without it populated. This is required for
time range filtering to work properly. Uses the following approach:
- Find all charts without a granularity or granularity_sqla param.
- Get the dataset that backs the chart.
- If the dataset has the main dttm column set, use it.
- Otherwise, find all the dttm columns in the dataset and use the first one (this
matches the behavior of Explore view on the frontend)
- If no dttm columns exist in the dataset, don't change the chart.
"""
bind = op.get_bind()
session = db.Session(bind=bind)
slices_changed = 0
for slc in (
session.query(Slice)
.filter(
and_(
Slice.datasource_type == "table", Slice.params.notlike('%"granularity%')
)
)
.all()
):
try:
params = json.loads(slc.params)
if "granularity" in params or "granularity_sqla" in params:
continue
table = session.query(SqlaTable).get(slc.datasource_id)
if not table:
continue
if table.main_dttm_col:
params["granularity"] = table.main_dttm_col
slc.params = json.dumps(params, sort_keys=True)
print(f"Set granularity for slice {slc.id} to {table.main_dttm_col}")
slices_changed += 1
continue
table_columns = (
session.query(TableColumn)
.filter(TableColumn.table_id == table.id)
.filter(TableColumn.is_dttm == True)
.all()
)
if len(table_columns):
params["granularity"] = table_columns[0].column_name
slc.params = json.dumps(params, sort_keys=True)
print(
f"Set granularity for slice {slc.id} to {table_columns[0].column_name}"
)
slices_changed += 1
except Exception as e:
print(e)
print(f"Parsing params for slice {slc.id} failed.")
pass
print(f"{slices_changed} slices altered")
session.commit()
session.close()
def downgrade():
"""
It's impossible to downgrade this migration.
"""
pass
|
habitat_baselines/motion_planning/robot_target.py | srama2512/habitat-api | 355 | 19416 | <filename>habitat_baselines/motion_planning/robot_target.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import attr
import magnum as mn
import numpy as np
@attr.s(auto_attribs=True, slots=True)
class RobotTarget:
"""
Data class to define the target needed as input for the motion planner.
"""
# End-effector in world coordinate frame.
ee_target_pos: np.ndarray = None
obj_id_target: int = None
joints_target: np.ndarray = None
is_guess: bool = False
@attr.s(auto_attribs=True, slots=True)
class ObjectGraspTarget:
"""
Data class to define the target needed as input for the grasp planner.
"""
# Bounding Box
bb: mn.Range3D
translation: mn.Matrix4
|
demo_scripts/charts/bar_chart_index_translator_demo.py | webclinic017/qf-lib | 198 | 19424 | <filename>demo_scripts/charts/bar_chart_index_translator_demo.py
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
import pandas as pd
from qf_lib.common.enums.orientation import Orientation
from qf_lib.plotting.charts.bar_chart import BarChart
from qf_lib.plotting.decorators.data_element_decorator import DataElementDecorator
from qf_lib.plotting.helpers.index_translator import IndexTranslator
index = ['constant', 'b', 'c', 'd']
# index = [0, 4, 5, 6]
labels_to_locations_dict = {
'constant': 0,
'b': 4,
'c': 5,
'd': 6
}
colors = ['orange'] + ['forestgreen'] * 3
def main():
# using automatic mapping between labels and locations
bar_chart2 = BarChart(orientation=Orientation.Horizontal, index_translator=IndexTranslator(),
thickness=1.0, color=colors, align='center')
bar_chart2.add_decorator(DataElementDecorator(pd.Series(data=[1, 2, 3, 4], index=index)))
bar_chart2.add_decorator(DataElementDecorator(pd.Series(data=[3, 1, 2, 4], index=index)))
bar_chart2.plot()
# using custom mapping between labels and locations
bar_chart = BarChart(orientation=Orientation.Horizontal, index_translator=IndexTranslator(labels_to_locations_dict),
thickness=1.0, color=colors, align='center')
bar_chart.add_decorator(DataElementDecorator(pd.Series(data=[1, 2, 3, 4], index=index)))
bar_chart.add_decorator(DataElementDecorator(pd.Series(data=[3, 1, 2, 4], index=index)))
bar_chart.plot()
plt.show(block=True)
if __name__ == '__main__':
main()
|
src/oic/oauth2/util.py | alanbuxey/pyoidc | 290 | 19444 | import logging
from http import cookiejar as http_cookiejar
from http.cookiejar import http2time # type: ignore
from typing import Any # noqa
from typing import Dict # noqa
from urllib.parse import parse_qs
from urllib.parse import urlsplit
from urllib.parse import urlunsplit
from oic.exception import UnSupported
from oic.oauth2.exception import TimeFormatError
from oic.utils.sanitize import sanitize
logger = logging.getLogger(__name__)
__author__ = "roland"
URL_ENCODED = "application/x-www-form-urlencoded"
JSON_ENCODED = "application/json"
DEFAULT_POST_CONTENT_TYPE = URL_ENCODED
PAIRS = {
"port": "port_specified",
"domain": "domain_specified",
"path": "path_specified",
}
ATTRS = {
"version": None,
"name": "",
"value": None,
"port": None,
"port_specified": False,
"domain": "",
"domain_specified": False,
"domain_initial_dot": False,
"path": "",
"path_specified": False,
"secure": False,
"expires": None,
"discard": True,
"comment": None,
"comment_url": None,
"rest": "",
"rfc2109": True,
} # type: Dict[str, Any]
def get_or_post(
uri, method, req, content_type=DEFAULT_POST_CONTENT_TYPE, accept=None, **kwargs
):
"""
Construct HTTP request.
:param uri:
:param method:
:param req:
:param content_type:
:param accept:
:param kwargs:
:return:
"""
if method in ["GET", "DELETE"]:
if req.keys():
_req = req.copy()
comp = urlsplit(str(uri))
if comp.query:
_req.update(parse_qs(comp.query))
_query = str(_req.to_urlencoded())
path = urlunsplit(
(comp.scheme, comp.netloc, comp.path, _query, comp.fragment)
)
else:
path = uri
body = None
elif method in ["POST", "PUT"]:
path = uri
if content_type == URL_ENCODED:
body = req.to_urlencoded()
elif content_type == JSON_ENCODED:
body = req.to_json()
else:
raise UnSupported("Unsupported content type: '%s'" % content_type)
header_ext = {"Content-Type": content_type}
if accept:
header_ext = {"Accept": accept}
if "headers" in kwargs.keys():
kwargs["headers"].update(header_ext)
else:
kwargs["headers"] = header_ext
else:
raise UnSupported("Unsupported HTTP method: '%s'" % method)
return path, body, kwargs
def set_cookie(cookiejar, kaka):
"""
Place a cookie (a http_cookielib.Cookie based on a set-cookie header line) in the cookie jar.
Always chose the shortest expires time.
:param cookiejar:
:param kaka: Cookie
"""
# default rfc2109=False
# max-age, httponly
for cookie_name, morsel in kaka.items():
std_attr = ATTRS.copy()
std_attr["name"] = cookie_name
_tmp = morsel.coded_value
if _tmp.startswith('"') and _tmp.endswith('"'):
std_attr["value"] = _tmp[1:-1]
else:
std_attr["value"] = _tmp
std_attr["version"] = 0
attr = ""
# copy attributes that have values
try:
for attr in morsel.keys():
if attr in ATTRS:
if morsel[attr]:
if attr == "expires":
std_attr[attr] = http2time(morsel[attr])
else:
std_attr[attr] = morsel[attr]
elif attr == "max-age":
if morsel[attr]:
std_attr["expires"] = http2time(morsel[attr])
except TimeFormatError:
# Ignore cookie
logger.info(
"Time format error on %s parameter in received cookie"
% (sanitize(attr),)
)
continue
for att, spec in PAIRS.items():
if std_attr[att]:
std_attr[spec] = True
if std_attr["domain"] and std_attr["domain"].startswith("."):
std_attr["domain_initial_dot"] = True
if morsel["max-age"] == 0:
try:
cookiejar.clear(
domain=std_attr["domain"],
path=std_attr["path"],
name=std_attr["name"],
)
except ValueError:
pass
else:
# Fix for Microsoft cookie error
if "version" in std_attr:
try:
std_attr["version"] = std_attr["version"].split(",")[0]
except (TypeError, AttributeError):
pass
new_cookie = http_cookiejar.Cookie(**std_attr) # type: ignore
cookiejar.set_cookie(new_cookie)
def match_to_(val, vlist):
if isinstance(vlist, str):
if vlist.startswith(val):
return True
else:
for v in vlist:
if v.startswith(val):
return True
return False
def verify_header(reqresp, body_type):
logger.debug("resp.headers: %s" % (sanitize(reqresp.headers),))
logger.debug("resp.txt: %s" % (sanitize(reqresp.text),))
if body_type == "":
_ctype = reqresp.headers["content-type"]
if match_to_("application/json", _ctype):
body_type = "json"
elif match_to_("application/jwt", _ctype):
body_type = "jwt"
elif match_to_(URL_ENCODED, _ctype):
body_type = "urlencoded"
else:
body_type = "txt" # reasonable default ??
elif body_type == "json":
if not match_to_("application/json", reqresp.headers["content-type"]):
if match_to_("application/jwt", reqresp.headers["content-type"]):
body_type = "jwt"
else:
raise ValueError(
"content-type: %s" % (reqresp.headers["content-type"],)
)
elif body_type == "jwt":
if not match_to_("application/jwt", reqresp.headers["content-type"]):
raise ValueError(
"Wrong content-type in header, got: {} expected "
"'application/jwt'".format(reqresp.headers["content-type"])
)
elif body_type == "urlencoded":
if not match_to_(DEFAULT_POST_CONTENT_TYPE, reqresp.headers["content-type"]):
if not match_to_("text/plain", reqresp.headers["content-type"]):
raise ValueError("Wrong content-type")
else:
raise ValueError("Unknown return format: %s" % body_type)
return body_type
|
src/tfi/publish.py | ajbouh/tfi | 160 | 19456 | import decimal
import hashlib
import json
import requests
import tempfile
import uuid
import os
from tqdm import tqdm
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
def sha256_for_file(f, buf_size=65536):
pos = f.tell()
dgst = hashlib.sha256()
while True:
data = f.read(buf_size)
if not data:
break
dgst.update(data)
size = f.tell() - pos
f.seek(pos)
return size, dgst.hexdigest()
namespace = "default"
fission_url = os.environ["FISSION_URL"]
def post(rel_url, data):
response = requests.post(
"%s%s" % (fission_url, rel_url),
data=json.dumps(data),
headers={"Content-Type": "application/json"})
# print("POST", rel_url)
# print(response, response.text)
if response.status_code in [404, 409]:
return response.status_code, None
if response.status_code == 500:
raise Exception(response.text)
return response.status_code, response.json()
def get(rel_url, params=None):
response = requests.get(
"%s%s" % (fission_url, rel_url),
params=params)
if response.status_code == 404:
return response.status_code, None
if response.status_code == 500:
raise Exception(response.text)
return response.status_code, response.json()
def format_bytes(count):
label_ix = 0
labels = ["B", "KiB", "MiB", "GiB"]
while label_ix < len(labels) and count / 1024. > 1:
count = count / 1024.
label_ix += 1
count = decimal.Decimal(count)
count = count.to_integral() if count == count.to_integral() else round(count.normalize(), 2)
return "%s %s" % (count, labels[label_ix])
def lazily_define_package(environment, file):
filesize, archive_sha256 = sha256_for_file(file)
base_archive_url = "%s/proxy/storage/v1/archive" % fission_url
status_code, response = get("/v2/packages/%s" % archive_sha256)
if status_code == 200:
print("Already uploaded", flush=True)
return archive_sha256, response
progress = tqdm(
total=filesize,
desc="Uploading",
unit="B",
unit_scale=True,
unit_divisor=1024,
leave=True)
last_bytes_read = 0
def update_progress(monitor):
# Your callback function
nonlocal last_bytes_read
progress.update(monitor.bytes_read - last_bytes_read)
last_bytes_read = monitor.bytes_read
e = MultipartEncoder(fields={'uploadfile': ('uploaded', file, 'text/plain')})
m = MultipartEncoderMonitor(e, update_progress)
archive_response = requests.post(base_archive_url,
data=m,
headers={
"X-File-Size": str(filesize),
'Content-Type': m.content_type})
archive_id = archive_response.json()['id']
print(" done", flush=True)
archive_url = "%s?id=%s" % (base_archive_url, archive_id)
package = {
"metadata": {
"name": archive_sha256,
"namespace": namespace,
},
"spec": {
"environment": environment,
"deployment": {
"type": "url",
"url": archive_url,
"checksum": {
"type": "sha256",
"sum": archive_sha256,
},
},
},
"status": {
"buildstatus": "succeeded",
},
}
return archive_sha256, post("/v2/packages", package)[1]
def lazily_define_function(environment, f):
archive_sha256, package_ref = lazily_define_package(environment, f)
print("Registering ...", end='', flush=True)
function_name = archive_sha256[:8]
status_code, response = get("/v2/functions/%s" % function_name)
if status_code == 200:
return function_name
status_code, r = post("/v2/functions", {
"metadata": {
"name": function_name,
"namespace": namespace,
},
"spec": {
"environment": environment,
"package": {
"functionName": function_name,
"packageref": package_ref,
},
},
})
if status_code == 409 or status_code == 201:
print(" done", flush=True)
return function_name
print(" error", flush=True)
raise Exception(r.text)
def lazily_define_trigger2(function_name, http_method, host, relativeurl):
trigger_name = "%s-%s-%s" % (
host.replace('.', '-'),
relativeurl.replace(':.*', '').replace('{', '').replace('}', '').replace('/', '-'),
http_method.lower())
status_code, response = get("/v2/triggers/http/%s" % trigger_name)
if status_code == 200:
return
status_code, r = post("/v2/triggers/http", {
"metadata": {
"name": trigger_name,
"namespace": namespace,
},
"spec": {
"host": host,
"relativeurl": relativeurl,
"method": http_method,
"functionref": {
"Type": "name",
"Name": function_name,
},
},
})
if status_code == 409 or status_code == 201:
return
raise Exception(r.text)
def publish(environment_name, f):
environment = {
"namespace": namespace,
"name": environment_name,
}
function_name = lazily_define_function(environment, f)
host = "%s.tfi.gcp.tesserai.com" % function_name
lazily_define_trigger2(function_name, "POST", host, "/{path-info:.*}")
lazily_define_trigger2(function_name, "GET", host, "/{path-info:.*}")
lazily_define_trigger2(function_name, "GET", host, "/")
return "http://%s" % host
|
examples/images/autoencoder.py | jjpalacio/tflearn | 10,882 | 19461 | # -*- coding: utf-8 -*-
""" Auto Encoder Example.
Using an auto encoder on MNIST handwritten digits.
References:
<NAME>, <NAME>, <NAME>, and <NAME>. "Gradient-based
learning applied to document recognition." Proceedings of the IEEE,
86(11):2278-2324, November 1998.
Links:
[MNIST Dataset] http://yann.lecun.com/exdb/mnist/
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import matplotlib.pyplot as plt
import tflearn
# Data loading and preprocessing
import tflearn.datasets.mnist as mnist
X, Y, testX, testY = mnist.load_data(one_hot=True)
# Building the encoder
encoder = tflearn.input_data(shape=[None, 784])
encoder = tflearn.fully_connected(encoder, 256)
encoder = tflearn.fully_connected(encoder, 64)
# Building the decoder
decoder = tflearn.fully_connected(encoder, 256)
decoder = tflearn.fully_connected(decoder, 784, activation='sigmoid')
# Regression, with mean square error
net = tflearn.regression(decoder, optimizer='adam', learning_rate=0.001,
loss='mean_square', metric=None)
# Training the auto encoder
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(X, X, n_epoch=20, validation_set=(testX, testX),
run_id="auto_encoder", batch_size=256)
# Encoding X[0] for test
print("\nTest encoding of X[0]:")
# New model, re-using the same session, for weights sharing
encoding_model = tflearn.DNN(encoder, session=model.session)
print(encoding_model.predict([X[0]]))
# Testing the image reconstruction on new data (test set)
print("\nVisualizing results after being encoded and decoded:")
testX = tflearn.data_utils.shuffle(testX)[0]
# Applying encode and decode over test set
encode_decode = model.predict(testX)
# Compare original images with their reconstructions
f, a = plt.subplots(2, 10, figsize=(10, 2))
for i in range(10):
temp = [[ii, ii, ii] for ii in list(testX[i])]
a[0][i].imshow(np.reshape(temp, (28, 28, 3)))
temp = [[ii, ii, ii] for ii in list(encode_decode[i])]
a[1][i].imshow(np.reshape(temp, (28, 28, 3)))
f.show()
plt.draw()
plt.waitforbuttonpress()
|
src/asphalt/core/concurrent.py | agronholm/asphalt | 226 | 19485 | from __future__ import annotations
__all__ = ("executor",)
import inspect
import sys
from asyncio import get_running_loop
from concurrent.futures import Executor
from functools import partial, wraps
from typing import Awaitable, Callable, TypeVar, overload
from asphalt.core import Context
if sys.version_info >= (3, 10):
from typing import Concatenate, ParamSpec
else:
from typing_extensions import Concatenate, ParamSpec
T_Retval = TypeVar("T_Retval")
P = ParamSpec("P")
@overload
def executor(
func_or_executor: Executor | str,
) -> Callable[
[Callable[Concatenate[Context, P], T_Retval]],
Callable[Concatenate[Context, P], T_Retval | Awaitable[T_Retval]],
]:
...
@overload
def executor(
func_or_executor: Callable[Concatenate[Context, P], T_Retval]
) -> Callable[Concatenate[Context, P], T_Retval | Awaitable[T_Retval]]:
...
def executor(
func_or_executor: Executor | str | Callable[Concatenate[Context, P], T_Retval]
) -> (
Callable[
[Callable[Concatenate[Context, P], T_Retval]],
Callable[Concatenate[Context, P], T_Retval | Awaitable[T_Retval]],
]
| Callable[Concatenate[Context, P], T_Retval | Awaitable[T_Retval]]
):
"""
Decorate a function to run in an executor.
If no executor (or ``None``) is given, the current event loop's default executor is
used. Otherwise, the argument must be a PEP 3148 compliant thread pool executor or
the name of an :class:`~concurrent.futures.Executor` instance.
If a decorated callable is called in a worker thread, the executor argument is
ignored and the wrapped function is called directly.
Callables wrapped with this decorator must be used with ``await`` when called in the
event loop thread.
Example use with the default executor (``None``)::
@executor
def this_runs_in_threadpool(ctx):
return do_something_cpu_intensive()
async def request_handler(ctx):
result = await this_runs_in_threadpool(ctx)
With a named :class:`~concurrent.futures.Executor` resource::
@executor('special_ops')
def this_runs_in_threadpool(ctx):
return do_something_cpu_intensive()
async def request_handler(ctx):
result = await this_runs_in_threadpool(ctx)
:param func_or_executor: either a callable (when used as a decorator), an executor
instance or the name of an :class:`~concurrent.futures.Executor` resource
"""
def outer(
func: Callable[Concatenate[Context, P], T_Retval]
) -> Callable[Concatenate[Context, P], T_Retval | Awaitable[T_Retval]]:
def wrapper(
ctx: Context, *args: P.args, **kwargs: P.kwargs
) -> T_Retval | Awaitable[T_Retval]:
try:
loop = get_running_loop()
except RuntimeError:
# Event loop not available -- we're in a worker thread
return func(ctx, *args, **kwargs)
# Resolve the executor resource name to an Executor instance
_executor: Executor | None
if isinstance(executor, str):
_executor = ctx.require_resource(Executor, executor)
else:
_executor = executor
callback = partial(func, ctx, *args, **kwargs)
return loop.run_in_executor(_executor, callback)
assert not inspect.iscoroutinefunction(
func
), "Cannot wrap coroutine functions to be run in an executor"
return wraps(func)(wrapper)
executor: Executor | str | None = None
if isinstance(func_or_executor, (str, Executor)):
executor = func_or_executor
return outer
else:
return outer(func_or_executor)
|
pyexcel_xlsx/__init__.py | pyexcel/pyexcel-xlsx | 101 | 19506 | <gh_stars>100-1000
"""
pyexcel_xlsx
~~~~~~~~~~~~~~~~~~~
The lower level xlsx file format handler using openpyxl
:copyright: (c) 2015-2019 by Onni Software Ltd & its contributors
:license: New BSD License
"""
from pyexcel_io.io import get_data as read_data
from pyexcel_io.io import isstream
from pyexcel_io.io import save_data as write_data
from pyexcel_io.plugins import IOPluginInfoChainV2
__FILE_TYPE__ = "xlsx"
IOPluginInfoChainV2(__name__).add_a_reader(
relative_plugin_class_path="xlsxr.XLSXBook",
locations=["file", "memory"],
file_types=[__FILE_TYPE__, "xlsm"],
stream_type="binary",
).add_a_reader(
relative_plugin_class_path="xlsxr.XLSXBookInContent",
locations=["content"],
file_types=[__FILE_TYPE__, "xlsm"],
stream_type="binary",
).add_a_writer(
relative_plugin_class_path="xlsxw.XLSXWriter",
locations=["file", "memory"],
file_types=[__FILE_TYPE__, "xlsm"],
stream_type="binary",
)
def save_data(afile, data, file_type=None, **keywords):
"""standalone module function for writing module supported file type"""
if isstream(afile) and file_type is None:
file_type = __FILE_TYPE__
write_data(afile, data, file_type=file_type, **keywords)
def get_data(afile, file_type=None, **keywords):
"""standalone module function for reading module supported file type"""
if isstream(afile) and file_type is None:
file_type = __FILE_TYPE__
return read_data(afile, file_type=file_type, **keywords)
|
torch_geometric_temporal/signal/__init__.py | tforgaard/pytorch_geometric_temporal | 1,410 | 19522 | <filename>torch_geometric_temporal/signal/__init__.py
from .dynamic_graph_temporal_signal import *
from .dynamic_graph_temporal_signal_batch import *
from .static_graph_temporal_signal import *
from .static_graph_temporal_signal_batch import *
from .dynamic_graph_static_signal import *
from .dynamic_graph_static_signal_batch import *
from .train_test_split import *
|
packages/grid/apps/worker/src/main/core/database/groups/groups.py | exityan/PySyft | 425 | 19570 | # grid relative
from .. import BaseModel
from .. import db
class Group(BaseModel):
__tablename__ = "group"
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
name = db.Column(db.String(255))
def __str__(self):
return f"<Group id: {self.id}, name: {self.name}>"
|
solution/data_structure2/1302/main.py | jungyoonoh/baekjoon-1 | 2,236 | 19572 | <reponame>jungyoonoh/baekjoon-1
# Authored by : gusdn3477
# Co-authored by : -
# Link : http://boj.kr/8adc986ae26b461eadd65abdff3cfba9
import sys
def input():
return sys.stdin.readline().rstrip()
N = int(input())
book = {}
for i in range(N):
name = input()
if name not in book:
book[name] = 1
else:
book[name] += 1
book = list(book.items())
book.sort(key = lambda x : (-x[1],x[0]))
print(book[0][0])
|
helper/evaluator.py | manipopopo/TC-ResNet | 185 | 19584 | import csv
import sys
from pathlib import Path
from abc import abstractmethod
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import common.tf_utils as tf_utils
import metrics.manager as metric_manager
from common.model_loader import Ckpt
from common.utils import format_text
from common.utils import get_logger
from helper.base import AudioBase
from metrics.summaries import BaseSummaries
from metrics.summaries import Summaries
class Evaluator(object):
def __init__(self, model, session, args, dataset, dataset_name, name):
self.log = get_logger(name)
self.model = model
self.session = session
self.args = args
self.dataset = dataset
self.dataset_name = dataset_name
if Path(self.args.checkpoint_path).is_dir():
latest_checkpoint = tf.train.latest_checkpoint(self.args.checkpoint_path)
if latest_checkpoint is not None:
self.args.checkpoint_path = latest_checkpoint
self.log.info(f"Get latest checkpoint and update to it: {self.args.checkpoint_path}")
self.watch_path = self._build_watch_path()
self.session.run(tf.global_variables_initializer())
self.session.run(tf.local_variables_initializer())
self.ckpt_loader = Ckpt(
session=session,
include_scopes=args.checkpoint_include_scopes,
exclude_scopes=args.checkpoint_exclude_scopes,
ignore_missing_vars=args.ignore_missing_vars,
use_ema=self.args.use_ema,
ema_decay=self.args.ema_decay,
)
@abstractmethod
def setup_metric_manager(self):
raise NotImplementedError
@abstractmethod
def setup_metric_ops(self):
raise NotImplementedError
@abstractmethod
def build_non_tensor_data_from_eval_dict(self, eval_dict, **kwargs):
raise NotImplementedError
@abstractmethod
def setup_dataset_iterator(self):
raise NotImplementedError
def _build_watch_path(self):
if Path(self.args.checkpoint_path).is_dir():
return Path(self.args.checkpoint_path)
else:
return Path(self.args.checkpoint_path).parent
def build_evaluation_step(self, checkpoint_path):
if "-" in checkpoint_path and checkpoint_path.split("-")[-1].isdigit():
return int(checkpoint_path.split("-")[-1])
else:
return 0
def build_checkpoint_paths(self, checkpoint_path):
checkpoint_glob = Path(checkpoint_path + "*")
checkpoint_path = Path(checkpoint_path)
return checkpoint_glob, checkpoint_path
def build_miscellaneous_path(self, name):
target_dir = self.watch_path / "miscellaneous" / self.dataset_name / name
if not target_dir.exists():
target_dir.mkdir(parents=True)
return target_dir
def setup_best_keeper(self):
metric_with_modes = self.metric_manager.get_best_keep_metric_with_modes()
self.log.debug(metric_with_modes)
self.best_keeper = tf_utils.BestKeeper(
metric_with_modes,
self.dataset_name,
self.watch_path,
self.log,
)
def evaluate_once(self, checkpoint_path):
self.log.info("Evaluation started")
self.setup_dataset_iterator()
self.ckpt_loader.load(checkpoint_path)
step = self.build_evaluation_step(checkpoint_path)
checkpoint_glob, checkpoint_path = self.build_checkpoint_paths(checkpoint_path)
self.session.run(tf.local_variables_initializer())
eval_metric_dict = self.run_evaluation(step, is_training=False)
best_keep_metric_dict = self.metric_manager.filter_best_keep_metric(eval_metric_dict)
is_keep, metrics_keep = self.best_keeper.monitor(self.dataset_name, best_keep_metric_dict)
if self.args.save_best_keeper:
meta_info = {
"step": step,
"model_size": self.model.total_params,
}
self.best_keeper.remove_old_best(self.dataset_name, metrics_keep)
self.best_keeper.save_best(self.dataset_name, metrics_keep, checkpoint_glob)
self.best_keeper.remove_temp_dir()
self.best_keeper.save_scores(self.dataset_name, metrics_keep, best_keep_metric_dict, meta_info)
self.metric_manager.write_evaluation_summaries(step=step,
collection_keys=[BaseSummaries.KEY_TYPES.DEFAULT])
self.metric_manager.log_metrics(step=step)
self.log.info("Evaluation finished")
if step >= self.args.max_step_from_restore:
self.log.info("Evaluation stopped")
sys.exit()
def build_train_directory(self):
if Path(self.args.checkpoint_path).is_dir():
return str(self.args.checkpoint_path)
else:
return str(Path(self.args.checkpoint_path).parent)
@staticmethod
def add_arguments(parser):
g = parser.add_argument_group("(Evaluator) arguments")
g.add_argument("--valid_type", default="loop", type=str, choices=["loop", "once"])
g.add_argument("--max_outputs", default=5, type=int)
g.add_argument("--maximum_num_labels_for_metric", default=10, type=int,
help="Maximum number of labels for using class-specific metrics(e.g. precision/recall/f1score)")
g.add_argument("--no-save_best_keeper", dest="save_best_keeper", action="store_false")
g.add_argument("--save_best_keeper", dest="save_best_keeper", action="store_true")
g.set_defaults(save_best_keeper=True)
g.add_argument("--no-flatten_output", dest="flatten_output", action="store_false")
g.add_argument("--flatten_output", dest="flatten_output", action="store_true")
g.set_defaults(flatten_output=False)
g.add_argument("--max_step_from_restore", default=1e20, type=int)
class SingleLabelAudioEvaluator(Evaluator, AudioBase):
def __init__(self, model, session, args, dataset, dataset_name):
super().__init__(model, session, args, dataset, dataset_name, "SingleLabelAudioEvaluator")
self.setup_dataset_related_attr()
self.setup_metric_manager()
self.setup_metric_ops()
self.setup_best_keeper()
def setup_dataset_related_attr(self):
assert len(self.dataset.label_names) == self.args.num_classes
self.use_class_metrics = len(self.dataset.label_names) < self.args.maximum_num_labels_for_metric
def setup_metric_manager(self):
self.metric_manager = metric_manager.AudioMetricManager(
is_training=False,
use_class_metrics=self.use_class_metrics,
exclude_metric_names=self.args.exclude_metric_names,
summary=Summaries(
session=self.session,
train_dir=self.build_train_directory(),
is_training=False,
base_name=self.dataset.dataset_split_name,
max_summary_outputs=self.args.max_summary_outputs,
),
)
def setup_metric_ops(self):
losses = self.build_basic_loss_ops()
self.metric_tf_op = self.metric_manager.build_metric_ops({
"dataset_split_name": self.dataset_name,
"label_names": self.dataset.label_names,
"losses": losses,
"learning_rate": None,
"wavs": self.model.audio_original,
})
def build_non_tensor_data_from_eval_dict(self, eval_dict, **kwargs):
return {
"dataset_split_name": self.dataset.dataset_split_name,
"label_names": self.dataset.label_names,
"predictions_onehot": eval_dict["predictions_onehot"],
"labels_onehot": eval_dict["labels_onehot"],
}
def setup_dataset_iterator(self):
self.dataset.setup_iterator(
self.session,
self.dataset.placeholders,
self.dataset.data,
)
|
components/PyTorch/pytorch-kfp-components/setup.py | nostro-im/pipelines | 2,860 | 19617 | <reponame>nostro-im/pipelines
#!/usr/bin/env/python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script."""
import importlib
import os
import types
from setuptools import setup, find_packages
def make_required_install_packages():
return [
"pytorch-lightning>=1.4.0",
"torch>=1.7.1",
"torch-model-archiver",
]
def make_required_test_packages():
return make_required_install_packages() + [
"mock>=4.0.0",
"flake8>=3.0.0",
"pylint",
"pytest>=6.0.0",
"wget",
"pandas",
"minio"
]
def make_dependency_links():
return []
def detect_version(base_path):
loader = importlib.machinery.SourceFileLoader(
fullname="version",
path=os.path.join(base_path,
"pytorch_kfp_components/__init__.py"),
)
version = types.ModuleType(loader.name)
loader.exec_module(version)
return version.__version__
if __name__ == "__main__":
relative_directory = os.path.relpath(
os.path.dirname(os.path.abspath(__file__)))
version = detect_version(relative_directory)
setup(
name="pytorch-kfp-components",
version=version,
description="PyTorch Kubeflow Pipeline",
url="https://github.com/kubeflow/pipelines/tree/master/components/PyTorch/pytorch-kfp-components/",
author="The PyTorch Kubeflow Pipeline Components authors",
author_email="<EMAIL>",
license="Apache License 2.0",
extra_requires={"tests": make_required_test_packages()},
include_package_data=True,
python_requires=">=3.6",
install_requires=make_required_install_packages(),
dependency_links=make_dependency_links(),
keywords=[
"Kubeflow Pipelines",
"KFP",
"ML workflow",
"PyTorch",
],
classifiers=[
"Development Status :: 4 - Beta",
"Operating System :: Unix",
"Operating System :: MacOS",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
package_dir={
"pytorch_kfp_components":
os.path.join(relative_directory, "pytorch_kfp_components")
},
packages=find_packages(where=relative_directory),
)
|
Kernels/Research/FFT/config/fft.py | WoodData/EndpointAI | 190 | 19648 | #
#
# Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sympy.ntheory import factorint
import numpy as np
from sympy.combinatorics import Permutation
import io
import math
from config.strtools import *
import itertools
import struct
import config.formats
# Conversion of double to fixed point values
#
# - 8000 gives 8000 in C (int16)
# So when it is multiplied it will give the wrong sign for the result
# of the multiplication except if DSPE instructions with saturation are used
# to compute the negate (and we should get 7FFF).
#
# So for cortex-m without DSP extension, we should try to use 8001
# It is done but not yet tested.
def to_q63(v,dspe):
r = int(round(v * 2**63))
if (r > 0x07FFFFFFFFFFFFFFF):
r = 0x07FFFFFFFFFFFFFFF
if (r < -0x08000000000000000):
if dspe:
r = -0x08000000000000000
else:
r = -0x07FFFFFFFFFFFFFFF
return ("0x%s" % format(struct.unpack('<Q', struct.pack('<q', r))[0],'016X'))
def to_q31(v,dspe):
r = int(round(v * 2**31))
if (r > 0x07FFFFFFF):
r = 0x07FFFFFFF
if (r < -0x080000000):
if dspe:
r = -0x080000000
else:
r = -0x07FFFFFFF
return ("0x%s" % format(struct.unpack('<I', struct.pack('<i', r))[0],'08X'))
def to_q15(v,dspe):
r = int(round(v * 2**15))
if (r > 0x07FFF):
r = 0x07FFF
if (r < -0x08000):
if dspe:
r = -0x08000
else:
r = -0x07FFF
return ("0x%s" % format(struct.unpack('<H', struct.pack('<h', r))[0],'04X'))
def to_q7(v,dspe):
r = int(round(v * 2**7))
if (r > 0x07F):
r = 0x07F
if (r < -0x080):#
if dspe:
r = -0x080
else:
r = -0x07F
return ("0x%s" % format(struct.unpack('<B', struct.pack('<b', r))[0],'02X'))
Q7=1
Q15=2
Q31=3
F16=4
F32=5
F64=6
# In the final C++ code, we have a loop for a given radix.
# The input list here has not grouped the factors.
# The list need to be transformed into a list of pair.
# The pair being (radix,exponent)
def groupFactors(factors):
n = 0
current=-1
result=[]
for f in factors:
if f != current:
if current != -1:
result = result + [current,n]
current=f
n=1
else:
n=n+1
result = result + [current,n]
return(result)
# Compute the grouped factors for the the FFT length originaln
# where the only possible radix are in primitiveFactors list.
def getFactors(primitiveFactors,originaln):
factors=[]
length=[]
primitiveFactors.sort(reverse=True)
n = originaln
while (n > 1) and primitiveFactors:
if (n % primitiveFactors[0] == 0):
factors.append(primitiveFactors[0])
n = n // primitiveFactors[0]
else:
primitiveFactors=primitiveFactors[1:]
# When lowest factors are at the beginning (like 2)
# we use a special implementation of the loopcore template
# and it is removing some cycles.
# So, we will get (for instance) 2x8x8x8 instead of 8x8x8x2
factors.reverse()
for f in factors:
originaln = originaln // f
length.append(originaln)
groupedfactors=groupFactors(factors)
return(groupedfactors,factors,length)
# Apply the radix decomposition to compute the input -> output permutation
# computed by the FFT.
def radixReverse(f,n):
a=np.array(range(0,n)).reshape(f)
r = list(range(0,len(f)))
r.reverse()
r = tuple(r)
a = np.transpose(a,r)
return(a.reshape(n))
def radixPermutation(factors,n):
a = radixReverse(factors,n)
tps = []
vectorizable=True
for c in Permutation.from_sequence(a).cyclic_form:
if (len(c)>2):
vectorizable = False
for i in range(len(c)-1,0,-1):
# 2 because those are indexes in an array of complex numbers but
# with a real type.
tps.append([2*c[i], 2*c[i-1]])
return(np.array(tps,dtype=int).flatten(),vectorizable)
# CFFT Twiddle table
def cfft_twiddle(n):
a=2.0*math.pi*np.linspace(0,n,num=n,endpoint=False)/n
c=np.cos(-a)
s=np.sin(-a)
r = np.empty((c.size + s.size,), dtype=c.dtype)
r[0::2] = c
r[1::2] = s
return(r)
# RFFT twiddle for the merge and split steps.
def rfft_twiddle(n):
a=2.0j*math.pi*np.linspace(0,n//2,num=n // 2,endpoint=False)/n
z=-1.0j * np.exp(-a)
r = z.view(dtype=np.float64)
return(r)
# Compute the twiddle tables
def twiddle(transform,n):
if transform=="CFFT":
return(cfft_twiddle(n))
if transform=="RFFT":
return(rfft_twiddle(n))
return(None)
NB_ELEMS_PER_LINE=3
# Generate C array content for a given datatype
def printFloat64Array(f,n):
nb=0
for s in n:
print("%.20f, " % s,end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
def printFloat32Array(f,n):
nb=0
for s in n:
print("%.20ff, " % s,end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
def printFloat16Array(f,n):
nb=0
for s in n:
print("%.8ff16, " % s,end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
def printQ31Array(f,mode,n):
DSPE=False
if mode == "DSP":
DSPE=True
nb=0
for s in n:
print(to_q31(s,DSPE) + ", ",end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
def printQ15Array(f,mode,n):
DSPE=False
if mode == "DSP":
DSPE=True
nb=0
for s in n:
print(to_q15(s,DSPE) + ", ",end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
def printQ7Array(f,mode,n):
DSPE=False
if mode == "DSP":
DSPE=True
nb=0
for s in n:
print(to_q7(s,DSPE) + ", ",end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
# Print a C array
# Using the type, dpse mode, name
# (dpse mode is for knowing if 0x8000 must be generated as 8000 or 8001
# to avoid sign issues when multiplying with the twiddles)
def printArray(f,ctype,mode,name,a):
nbSamples = len(a)
define = "NB_" + name.upper()
n = a.reshape(len(a))
print("__ALIGNED(8) const %s %s[%s]={" % (ctype,name,define),file=f)
if ctype == "float64_t":
printFloat64Array(f,n)
if ctype == "float32_t":
printFloat32Array(f,n)
if ctype == "float16_t":
printFloat16Array(f,n)
if ctype == "Q31":
printQ31Array(f,mode,n)
if ctype == "Q15":
printQ15Array(f,mode,n)
if ctype == "Q7":
printQ7Array(f,mode,n)
print("};",file=f)
# Convert a float value to a given datatype.
def convertToDatatype(r,ctype,mode):
DSPE=False
if mode == "DSP":
DSPE=True
if ctype == "float64_t":
result = "%.20f" % r
if ctype == "float32_t":
result = "%.20ff" % r
if ctype == "float16_t":
result = "%.20ff16" % r
if ctype == "Q31":
result = "Q31(%s)" % to_q31(r,DSPE)
if ctype == "Q15":
result = "Q15(%s)" % to_q15(r,DSPE)
if ctype == "Q7":
result = "Q7(%s)" % to_q7(r,DSPE)
return(result)
def printArrayHeader(f,ctype,name,nbSamples):
define = "NB_" + name.upper()
print("#define %s %d" % (define, nbSamples),file=f)
print("extern __ALIGNED(8) const %s %s[%s];\n" % (ctype,name,define),file=f)
# Print UINT arrays for permutations.
def printUInt32Array(f,name,a):
nbSamples = len(a)
define = "NB_" + name.upper()
n = a.reshape(len(a))
print("__ALIGNED(8) const uint32_t %s[%s]={" % (name,define),file=f)
nb=0
for s in n:
print("%d, " % s,end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
print("};",file=f)
def printUInt16Array(f,name,a):
nbSamples = len(a)
define = "NB_" + name.upper()
n = a.reshape(len(a))
print("__ALIGNED(8) const uint16_t %s[%s]={" % (name,define),file=f)
nb=0
for s in n:
print("%d, " % s,end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
print("};",file=f)
def printUInt32ArrayHeader(f,name,a):
nbSamples = len(a)
define = "NB_" + name.upper()
n = a.reshape(len(a))
print("#define %s %d" % (define, nbSamples),file=f)
print("extern __ALIGNED(8) const uint32_t %s[%s];\n" % (name,define),file=f)
def printUInt16ArrayHeader(f,name,a):
nbSamples = len(a)
define = "NB_" + name.upper()
n = a.reshape(len(a))
print("#define %s %d" % (define, nbSamples),file=f)
print("extern __ALIGNED(8) const uint16_t %s[%s];\n" % (name,define),file=f)
def getCtype(t):
if t == 'f64':
return("float64_t")
if t == 'f32':
return("float32_t")
if t == 'f16':
return("float16_t")
if t == 'q31':
return("Q31")
if t == 'q15':
return("Q15")
if t == 'q7':
return("Q7")
return("void")
# Configuration structures for CFFT and RFFT
cfftconfig = """cfftconfig<%s> config%d={
.normalization=%s,
.nbPerms=%s,
.perms=perm%d,
.nbTwiddle=%s,
.twiddle=twiddle%d,
.nbGroupedFactors=%d,
.nbFactors=%d,
.factors=factors%d,
.lengths=lengths%d,
.format=%d,
.reversalVectorizable=%d
};"""
rfftconfig = """rfftconfig<%s> config%d={
.nbTwiddle=%s,
.twiddle=twiddle%d
};"""
fftconfigHeader = """extern %sconfig<%s> config%d;"""
fftFactorArray = """const uint16_t factors%d[%d]=%s;\n"""
fftLengthArray = """const uint16_t lengths%d[%d]=%s;\n"""
# Descriptino of a permutation
class Perm:
PermID = 0
# Grouped factors and factors.
def getFactors(core,nb,datatype):
_groupedFactors,_factors,_lens=getFactors(core.radix(datatype,nb),nb)
return(_factors)
def __init__(self,core,nb,datatype):
Perm.PermID = Perm.PermID + 1
self._nb=nb
self._id = Perm.PermID
self._radixUsed=set([])
self._groupedFactors,self._factors,self._lens=getFactors(core.radix(datatype,nb),nb)
self._perms = None
self._core=core
self._isvectorizable=False
def permutations(self):
_permFactors=list(itertools.chain(*[self._core.getPermFactor(x) for x in self._factors]))
#print(_permFactors)
self._perms,self._isvectorizable = radixPermutation(_permFactors[::-1],self._nb)
@property
def isVectorizable(self):
return(self._isvectorizable)
@property
def permID(self):
return(self._id)
@property
def perms(self):
if self._perms is not None:
return(self._perms)
else:
self.permutations()
return(self._perms)
@property
def factors(self):
return(self._factors)
@property
def nbGroupedFactors(self):
return(int(len(self._groupedFactors)/2))
@property
def nbFactors(self):
return(len(self._factors))
def writePermHeader(self,h):
printUInt16ArrayHeader(h,"perm%d" % self.permID,self.perms)
def writePermCode(self,c):
printUInt16Array(c,"perm%d" % self.permID,self.perms)
def writeFactorDesc(self,c):
radixList="{%s}" % joinStr([str(x) for x in self._groupedFactors])
lengthList="{%s}" % joinStr([str(x) for x in self._lens])
print(fftFactorArray % (self.permID,2*self.nbGroupedFactors,radixList),file=c);
print(fftLengthArray % (self.permID,len(self._lens),lengthList),file=c);
class Twiddle:
TwiddleId = 0
def __init__(self,transform,nb,datatype,mode):
Twiddle.TwiddleId = Twiddle.TwiddleId + 1
self._id = Twiddle.TwiddleId
self._datatype = datatype
self._nb=nb
self._twiddle = None
self._transform=transform
self._mode=mode
@property
def twiddleID(self):
return(self._id)
@property
def datatype(self):
return(self._datatype)
@property
def samples(self):
if self._twiddle is None:
self._twiddle=twiddle(self._transform,self._nb)
return(self._twiddle)
@property
def nbSamples(self):
return(self._nb)
@property
def nbTwiddles(self):
if self._transform=="RFFT":
return(self._nb // 2)
else:
return(self._nb)
def writeTwidHeader(self,h):
ctype=getCtype(self.datatype)
# Twiddle is a complex array so 2*nbSamples must be used
printArrayHeader(h,ctype,"twiddle%d" % self.twiddleID,2*self.nbTwiddles)
def writeTwidCode(self,c):
ctype=getCtype(self.datatype)
printArray(c,ctype,self._mode,"twiddle%d" % self.twiddleID,self.samples)
class Config:
ConfigID = 0
def __init__(self,transform,twiddle,perms,coreMode):
Config.ConfigID = Config.ConfigID + 1
self._id = Config.ConfigID
self._twiddle=twiddle
self._perms=perms
self._transform=transform
self._coreMode=coreMode
@property
def transform(self):
return(self._transform)
@property
def configID(self):
return(self._id)
@property
def perms(self):
return(self._perms)
@property
def twiddle(self):
return(self._twiddle)
@property
def nbSamples(self):
return(self.twiddle.nbSamples)
def writeConfigHeader(self,c):
ctype=getCtype(self.twiddle.datatype)
print(fftconfigHeader % (self.transform.lower(),ctype,self.configID),file=c)
def writeConfigCode(self,c):
ctype=getCtype(self.twiddle.datatype)
twiddleLen = "NB_" + ("twiddle%d"% self.twiddle.twiddleID).upper()
if self.transform == "RFFT":
print(rfftconfig % (ctype,self.configID,twiddleLen,self.twiddle.twiddleID),file=c)
else:
normfactor = 1.0 / self.twiddle.nbSamples
normFactorStr = convertToDatatype(normfactor,ctype,self._coreMode)
permsLen = "NB_" + ("perm%d"% self.perms.permID).upper()
outputFormat = 0
#print(self.twiddle.datatype)
#print(self.twiddle.nbSamples)
#print(self.perms.factors)
# For fixed point, each stage will change the output format.
# We need to cmpute the final format of the FFT
# and record it in the initialization structure
# so that the user can easily know how to recover the
# input format (q31, q15). It is encoded as a shift value.
# The shift to apply to recover the input format
# But applying this shift will saturate the result in general.
if self.twiddle.datatype == "q15" or self.twiddle.datatype == "q31":
for f in self.perms.factors:
#print(f,self.twiddle.datatype,self._coreMode)
# The file "formats.py" is decribing the format of each radix
# and is used to compute the format of the FFT based
# on the decomposition of its length.
#
# Currently (since there is no vector version for fixed point)
# this is not taking into account the format change that may
# be implied by the vectorization in case it may be different
# from the scalar version.
formatForSize = config.formats.formats[f][self._coreMode]
outputFormat += formatForSize[self.twiddle.datatype]
vectorizable=0
if self.perms.isVectorizable:
vectorizable = 1
print(cfftconfig % (ctype,self.configID,normFactorStr,permsLen,self.perms.permID,
twiddleLen,self.twiddle.twiddleID,self.perms.nbGroupedFactors,self.perms.nbFactors,
self.perms.permID,self.perms.permID,outputFormat,vectorizable
),file=c)
|
tests/tensorflow/pruning/test_tensor_processor.py | MaximProshin/nncf | 136 | 19666 | <reponame>MaximProshin/nncf<gh_stars>100-1000
import pytest
import tensorflow as tf
from nncf.tensorflow.tensor import TFNNCFTensor
from nncf.tensorflow.pruning.tensor_processor import TFNNCFPruningTensorProcessor
@pytest.mark.parametrize('device', ("CPU", 'GPU'))
def test_create_tensor(device):
if not tf.config.list_physical_devices('GPU'):
if device == 'GPU':
pytest.skip('There are no available CUDA devices')
shape = [1, 3, 10, 100]
tensor = TFNNCFPruningTensorProcessor.ones(shape, device)
assert tf.is_tensor(tensor.tensor)
assert tensor.tensor.device.split('/')[-1].split(':')[1] == device
assert list(tensor.tensor.shape) == shape
def test_repeat():
tensor_data = [0., 1.]
repeats = 5
tensor = TFNNCFTensor(tf.Variable(tensor_data))
repeated_tensor = TFNNCFPruningTensorProcessor.repeat(tensor, repeats=repeats)
ref_repeated = []
for val in tensor_data:
for _ in range(repeats):
ref_repeated.append(val)
assert tf.reduce_all(repeated_tensor.tensor == tf.Variable(ref_repeated))
def test_concat():
tensor_data = [0., 1.]
tensors = [TFNNCFTensor(tf.Variable(tensor_data)) for _ in range(3)]
concatenated_tensor = TFNNCFPruningTensorProcessor.concatenate(tensors, axis=0)
assert tf.reduce_all(concatenated_tensor.tensor == tf.Variable(tensor_data * 3))
@pytest.mark.parametrize('all_close', [False, True])
def test_assert_all_close(all_close):
tensor_data = [0., 1.]
tensors = [TFNNCFTensor(tf.Variable(tensor_data)) for _ in range(3)]
if not all_close:
tensors.append(TFNNCFTensor(tf.Variable(tensor_data[::-1])))
with pytest.raises(tf.errors.InvalidArgumentError):
TFNNCFPruningTensorProcessor.assert_allclose(tensors)
else:
TFNNCFPruningTensorProcessor.assert_allclose(tensors)
@pytest.mark.parametrize('all_close', [False, True])
def test_elementwise_mask_propagation(all_close):
tensor_data = [0., 1.]
tensors = [TFNNCFTensor(tf.Variable(tensor_data)) for _ in range(3)]
if not all_close:
tensors.append(TFNNCFTensor(tf.Variable(tensor_data[::-1])))
with pytest.raises(tf.errors.InvalidArgumentError):
TFNNCFPruningTensorProcessor.elementwise_mask_propagation(tensors)
else:
result = TFNNCFPruningTensorProcessor.elementwise_mask_propagation(tensors)
for t in tensors:
tf.debugging.assert_near(result.tensor, t.tensor)
|
languages/python/software_engineering_logging4.py | Andilyn/learntosolveit | 136 | 19672 | <filename>languages/python/software_engineering_logging4.py
import logging
logger1 = logging.getLogger('package1.module1')
logger2 = logging.getLogger('package1.module2')
logging.basicConfig(level=logging.WARNING)
logger1.warning('This is a warning message')
logger2.warning('This is a another warning message')
|
stackoverflow/venv/lib/python3.6/site-packages/scrapy/utils/markup.py | zhi-xianwei/learn_python3_spider | 9,953 | 19675 | <filename>stackoverflow/venv/lib/python3.6/site-packages/scrapy/utils/markup.py
"""
Transitional module for moving to the w3lib library.
For new code, always import from w3lib.html instead of this module
"""
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
from w3lib.html import *
warnings.warn("Module `scrapy.utils.markup` is deprecated. "
"Please import from `w3lib.html` instead.",
ScrapyDeprecationWarning, stacklevel=2) |
applications/FemToDemApplication/python_scripts/MainFEM_for_coupling.py | lkusch/Kratos | 778 | 19702 | <reponame>lkusch/Kratos<gh_stars>100-1000
import KratosMultiphysics
import KratosMultiphysics.FemToDemApplication.MainFemDem as MainFemDem
import KratosMultiphysics.FemToDemApplication as KratosFemDem
import KratosMultiphysics.DEMApplication as DEM
import KratosMultiphysics.DemStructuresCouplingApplication as DEM_Structures
# Python script created to modify the existing one due to the coupling of the DEM app in 2D
class FEM_for_coupling_Solution(MainFemDem.FEM_Solution):
def Info(self):
print("FEM part of the FEMDEM application")
def Initialize(self):
#### INITIALIZE ####
# Add variables (always before importing the model part)
self.solver.AddVariables()
# For remeshing purposes
self.main_model_part.AddNodalSolutionStepVariable(KratosFemDem.NODAL_STRESS_VECTOR)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NODAL_AREA)
self.main_model_part.AddNodalSolutionStepVariable(DEM.DEM_NODAL_AREA)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NODAL_H)
self.main_model_part.AddNodalSolutionStepVariable(KratosFemDem.EQUIVALENT_NODAL_STRESS)
self.main_model_part.AddNodalSolutionStepVariable(KratosFemDem.EQUIVALENT_NODAL_STRESS_GRADIENT)
self.main_model_part.AddNodalSolutionStepVariable(KratosFemDem.NODAL_DAMAGE)
self.main_model_part.AddNodalSolutionStepVariable(KratosFemDem.EQUIVALENT_STRESS_VM)
self.main_model_part.AddNodalSolutionStepVariable(KratosFemDem.DISPLACEMENT_INCREMENT)
# For the DE-FE contact model
self.main_model_part.AddNodalSolutionStepVariable(DEM.DEM_PRESSURE)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.TOTAL_FORCES)
self.main_model_part.AddNodalSolutionStepVariable(DEM.DELTA_DISPLACEMENT)
self.main_model_part.AddNodalSolutionStepVariable(DEM.CONTACT_FORCES)
self.main_model_part.AddNodalSolutionStepVariable(DEM.ELASTIC_FORCES)
self.main_model_part.AddNodalSolutionStepVariable(DEM.TANGENTIAL_ELASTIC_FORCES)
self.main_model_part.AddNodalSolutionStepVariable(DEM.SHEAR_STRESS)
# For the Substepping
self.main_model_part.AddNodalSolutionStepVariable(DEM_Structures.BACKUP_LAST_STRUCTURAL_VELOCITY)
self.main_model_part.AddNodalSolutionStepVariable(DEM_Structures.BACKUP_LAST_STRUCTURAL_DISPLACEMENT)
self.main_model_part.AddNodalSolutionStepVariable(DEM_Structures.SMOOTHED_STRUCTURAL_VELOCITY)
self.main_model_part.AddNodalSolutionStepVariable(DEM.CONTACT_IMPULSE)
# Read model_part (note: the buffer_size is set here) (restart is read here)
self.solver.ImportModelPart()
# Add dofs (always after importing the model part)
if((self.main_model_part.ProcessInfo).Has(KratosMultiphysics.IS_RESTARTED)):
if(self.main_model_part.ProcessInfo[KratosMultiphysics.IS_RESTARTED] == False):
self.solver.AddDofs()
else:
self.solver.AddDofs()
# Add materials (assign material to model_parts if Materials.json exists)
self.AddMaterials()
# Add processes
self.model_processes = self.AddProcesses()
self.model_processes.ExecuteInitialize()
# Print model_part and properties
if(self.echo_level > 1):
print("")
print(self.main_model_part)
for properties in self.main_model_part.Properties:
print(properties)
#### START SOLUTION ####
self.computing_model_part = self.solver.GetComputingModelPart()
if (self.ProjectParameters["solver_settings"]["strategy_type"].GetString() == "arc_length"):
neighbour_elemental_finder = KratosMultiphysics.FindElementalNeighboursProcess(self.main_model_part, 2, 5)
neighbour_elemental_finder.Execute()
self.InitializeIntegrationPointsVariables()
self.model_processes.ExecuteBeforeSolutionLoop()
self.model_processes.ExecuteInitializeSolutionStep()
self.using_arc_length = True
else:
self.using_arc_length = False
## Sets strategies, builders, linear solvers, schemes and solving info, and fills the buffer
self.solver.Initialize()
#self.solver.InitializeStrategy()
self.solver.SetEchoLevel(self.echo_level)
# Initialize GiD I/O (gid outputs, file_lists)
self.SetGraphicalOutput()
self.GraphicalOutputExecuteInitialize()
print(" ")
print("=================================================")
print(" - Kratos FemDem Application Calculation Start - ")
print("=================================================")
self.model_processes.ExecuteBeforeSolutionLoop()
self.GraphicalOutputExecuteBeforeSolutionLoop()
# Set time settings
self.step = self.main_model_part.ProcessInfo[KratosMultiphysics.STEP]
self.time = self.main_model_part.ProcessInfo[KratosMultiphysics.TIME]
self.end_time = self.ProjectParameters["problem_data"]["end_time"].GetDouble()
self.delta_time = self.ComputeDeltaTime()
#============================================================================================================================
def ComputeDeltaTime(self):
if self.ProjectParameters["problem_data"].Has("time_step"):
return self.ProjectParameters["problem_data"]["time_step"].GetDouble()
elif self.ProjectParameters["problem_data"].Has("variable_time_steps"):
current_time = self.main_model_part.ProcessInfo[KratosMultiphysics.TIME]
for key in self.ProjectParameters["problem_data"]["variable_time_steps"].keys():
interval_settings = self.ProjectParameters["problem_data"]["variable_time_steps"][key]
interval = KratosMultiphysics.IntervalUtility(interval_settings)
# Getting the time step of the interval
if interval.IsInInterval(current_time):
return interval_settings["time_step"].GetDouble()
# If we arrive here we raise an error because the intervals are not well defined
raise Exception("::[MechanicalSolver]:: Time stepping not well defined!")
else:
raise Exception("::[MechanicalSolver]:: Time stepping not defined!")
#============================================================================================================================
def InitializeIntegrationPointsVariables(self):
utils = KratosMultiphysics.VariableUtils()
elements = self.main_model_part.Elements
self.domain_size = self.main_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE]
nodes = self.main_model_part.Nodes
utils.SetNonHistoricalVariable(KratosFemDem.GENERATE_DEM, False, elements)
utils.SetNonHistoricalVariable(KratosFemDem.STRESS_THRESHOLD, 0.0, elements)
utils.SetNonHistoricalVariable(KratosFemDem.DAMAGE_ELEMENT, 0.0, elements)
utils.SetNonHistoricalVariable(KratosFemDem.PRESSURE_EXPANDED, 0, elements)
utils.SetNonHistoricalVariable(KratosFemDem.IS_SKIN, 0, elements)
utils.SetNonHistoricalVariable(KratosFemDem.SMOOTHING, 0, elements)
utils.SetNonHistoricalVariable(KratosFemDem.RECOMPUTE_NEIGHBOURS, True, elements)
if self.domain_size == 3:
utils.SetNonHistoricalVariable(KratosFemDem.VOLUME_COUNTED, False, elements)
utils.SetNonHistoricalVariable(KratosFemDem.STRESS_VECTOR, [0.0,0.0,0.0,0.0,0.0,0.0], elements)
utils.SetNonHistoricalVariable(KratosFemDem.STRAIN_VECTOR, [0.0,0.0,0.0,0.0,0.0,0.0], elements)
utils.SetNonHistoricalVariable(KratosFemDem.STRESS_VECTOR_INTEGRATED, [0.0,0.0,0.0,0.0,0.0,0.0], elements)
else: # 2D
utils.SetNonHistoricalVariable(KratosFemDem.STRESS_VECTOR, [0.0,0.0,0.0], elements)
utils.SetNonHistoricalVariable(KratosFemDem.STRAIN_VECTOR, [0.0,0.0,0.0], elements)
utils.SetNonHistoricalVariable(KratosFemDem.STRESS_VECTOR_INTEGRATED, [0.0, 0.0, 0.0], elements)
# if self.PressureLoad:
# utils.SetNonHistoricalVariable(KratosFemDem.PRESSURE_ID, 0, nodes) |
aries_cloudagent/wallet/tests/test_key_pair.py | kuraakhilesh8230/aries-cloudagent-python | 247 | 19813 | <reponame>kuraakhilesh8230/aries-cloudagent-python
from asynctest import TestCase as AsyncTestCase
import json
from ...storage.error import StorageNotFoundError
from ..util import bytes_to_b58
from ..key_type import KeyType
from ...core.in_memory import InMemoryProfile
from ...storage.in_memory import InMemoryStorage
from ..key_pair import KeyPairStorageManager, KEY_PAIR_STORAGE_TYPE
class TestKeyPairStorageManager(AsyncTestCase):
test_public_key = b"somepublickeybytes"
test_secret = b"verysecretkey"
async def setUp(self):
self.profile = InMemoryProfile.test_profile()
self.store = InMemoryStorage(self.profile)
self.key_pair_mgr = KeyPairStorageManager(self.store)
async def test_create_key_pair(self):
await self.key_pair_mgr.store_key_pair(
public_key=self.test_public_key,
secret_key=self.test_secret,
key_type=KeyType.ED25519,
)
verkey = bytes_to_b58(self.test_public_key)
record = await self.store.find_record(KEY_PAIR_STORAGE_TYPE, {"verkey": verkey})
assert record
value = json.loads(record.value)
assert record.tags == {"verkey": verkey, "key_type": KeyType.ED25519.key_type}
assert value["verkey"] == verkey
assert value["secret_key"] == bytes_to_b58(self.test_secret)
assert value["metadata"] == {}
assert value["key_type"] == KeyType.ED25519.key_type
async def test_get_key_pair(self):
await self.key_pair_mgr.store_key_pair(
public_key=self.test_public_key,
secret_key=self.test_secret,
key_type=KeyType.ED25519,
)
verkey = bytes_to_b58(self.test_public_key)
key_pair = await self.key_pair_mgr.get_key_pair(verkey)
assert key_pair["verkey"] == verkey
assert key_pair["secret_key"] == bytes_to_b58(self.test_secret)
assert key_pair["metadata"] == {}
assert key_pair["key_type"] == KeyType.ED25519.key_type
async def test_get_key_pair_x_not_found(self):
with self.assertRaises(StorageNotFoundError):
await self.key_pair_mgr.get_key_pair("not_existing_verkey")
async def test_delete_key_pair(self):
await self.key_pair_mgr.store_key_pair(
public_key=self.test_public_key,
secret_key=self.test_secret,
key_type=KeyType.ED25519,
)
verkey = bytes_to_b58(self.test_public_key)
record = await self.store.find_record(KEY_PAIR_STORAGE_TYPE, {"verkey": verkey})
assert record
await self.key_pair_mgr.delete_key_pair(verkey)
# should be deleted now
with self.assertRaises(StorageNotFoundError):
await self.key_pair_mgr.delete_key_pair(verkey)
async def test_delete_key_pair_x_not_found(self):
with self.assertRaises(StorageNotFoundError):
await self.key_pair_mgr.delete_key_pair("non_existing_verkey")
async def test_update_key_pair_metadata(self):
await self.key_pair_mgr.store_key_pair(
public_key=self.test_public_key,
secret_key=self.test_secret,
key_type=KeyType.ED25519,
metadata={"some": "data"},
)
verkey = bytes_to_b58(self.test_public_key)
record = await self.store.find_record(KEY_PAIR_STORAGE_TYPE, {"verkey": verkey})
assert record
value = json.loads(record.value)
assert value["metadata"] == {"some": "data"}
await self.key_pair_mgr.update_key_pair_metadata(verkey, {"some_other": "data"})
record = await self.store.find_record(KEY_PAIR_STORAGE_TYPE, {"verkey": verkey})
assert record
value = json.loads(record.value)
assert value["metadata"] == {"some_other": "data"}
async def test_update_key_pair_metadata_x_not_found(self):
with self.assertRaises(StorageNotFoundError):
await self.key_pair_mgr.update_key_pair_metadata("non_existing_verkey", {})
|
LeetCode/0005_Longest_Palindromic_Substring.py | Achyut-sudo/PythonAlgorithms | 144 | 19837 | <filename>LeetCode/0005_Longest_Palindromic_Substring.py
'''
Problem:-
Given a string s, find the longest palindromic substring in s.
You may assume that the maximum length of s is 1000.
Example 1:
Input: "babad"
Output: "bab"
Note: "aba" is also a valid answer.
'''
class Solution:
def longestPalindrome(self, s: str) -> str:
res = ""
resLen = 0
for i in range(len(s)):
# odd length
l, r = i, i
while l >= 0 and r < len(s) and s[l] == s[r]:
if (r - l + 1) > resLen:
res = s[l:r + 1]
resLen = r - l + 1
l -= 1
r += 1
# even length
l, r = i, i + 1
while l >= 0 and r < len(s) and s[l] == s[r]:
if (r - l + 1) > resLen:
res = s[l:r + 1]
resLen = r - l + 1
l -= 1
r += 1
return res |
cortex/export/__init__.py | mvdoc/pycortex | 423 | 19853 | <filename>cortex/export/__init__.py
from .save_views import save_3d_views
from .panels import plot_panels
from ._default_params import (
params_inflatedless_lateral_medial_ventral,
params_flatmap_lateral_medial,
params_occipital_triple_view,
params_inflated_dorsal_lateral_medial_ventral,
)
__all__ = [
"save_3d_views",
"plot_panels",
"params_flatmap_lateral_medial",
"params_occipital_triple_view",
"params_inflatedless_lateral_medial_ventral",
"params_inflated_dorsal_lateral_medial_ventral",
]
|
setup.py | may-ank/hocr-tools | 200 | 19869 | <filename>setup.py
#!/usr/bin/env python
__version__ = '1.3.0'
import glob
from setuptools import setup
setup(
name="hocr-tools",
version=__version__,
description='Advanced tools for hOCR integration',
author='<NAME>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
url='https://github.com/tmbdev/hocr-tools',
download_url='https://github.com/tmbdev/hocr-tools/tarball/v'
+ __version__,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Multimedia :: Graphics :: Graphics Conversion',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Utilities',
],
install_requires=[
'Pillow',
'lxml',
'reportlab',
],
scripts=[c for c in glob.glob("hocr-*")]
)
|
pair-ranking-cnn/utils.py | shinoyuki222/torch-light | 310 | 19918 | import const
def corpora2idx(sents, ind2idx):
return [[ind2idx[w] if w in ind2idx else const.UNK for w in s] for s in sents]
|
2020/02/07/An Introduction to Sessions in Flask/flask_session_example/app.py | kenjitagawa/youtube_video_code | 492 | 19927 | from flask import Flask, render_template, session, redirect, url_for
app = Flask(__name__)
app.config['SECRET_KEY'] = '<PASSWORD>'
@app.route('/')
def index():
return render_template('index.html')
@app.route('/set-background/<mode>')
def set_background(mode):
session['mode'] = mode
return redirect(url_for('index'))
@app.route('/drop-session')
def drop_session():
session.pop('mode', None)
return redirect(url_for('index')) |
typed_python/compiler/type_wrappers/ref_to_wrapper.py | APrioriInvestments/typed_python | 105 | 19940 | <reponame>APrioriInvestments/typed_python
# Copyright 2017-2019 typed_python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typed_python.compiler.type_wrappers.wrapper import Wrapper
from typed_python.compiler.typed_expression import TypedExpression
from typed_python._types import refTo
import typed_python.compiler.native_ast as native_ast
import typed_python.compiler
typeWrapper = lambda t: typed_python.compiler.python_object_representation.typedPythonTypeToTypeWrapper(t)
class RefToObjectWrapper(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = False
def __init__(self):
super().__init__(refTo)
def getNativeLayoutType(self):
return native_ast.Type.Void()
@Wrapper.unwrapOneOfAndValue
def convert_call(self, context, expr, args, kwargs):
if len(args) != 1 or kwargs:
return super().convert_call(context, expr, args, kwargs)
return args[0].expr_type.convert_refTo(context, args[0])
class RefToWrapper(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = False
def __init__(self, t):
super().__init__(t)
self.layoutType = typeWrapper(t.ElementType).getNativeLayoutType().pointer()
def underlyingTypeWrapper(self):
return typeWrapper(self.typeRepresentation.ElementType)
def getNativeLayoutType(self):
return self.layoutType
def convert_assign(self, context, target, toStore):
assert target.isReference
context.pushEffect(
target.expr.store(toStore.nonref_expr)
)
def convert_copy_initialize(self, context, target, toStore):
assert target.isReference
context.pushEffect(
target.expr.store(toStore.nonref_expr)
)
def deref(self, instance):
return TypedExpression(
instance.context,
instance.nonref_expr,
typeWrapper(self.typeRepresentation.ElementType),
True
)
def convert_destroy(self, context, instance):
pass
def _can_convert_to_type(self, targetType, conversionLevel):
return self.underlyingTypeWrapper._can_convert_to_type(targetType, conversionLevel)
def convert_to_type_with_target(self, context, instance, targetVal, conversionLevel, mayThrowOnFailure=False):
return self.deref(instance).convert_to_type_with_target(targetVal, conversionLevel)
def convert_bin_op(self, context, left, op, right, inplace):
return self.deref(left).convert_bin_op(op, right, inplace)
def convert_unary_op(self, context, left, op):
return self.deref(left).convert_unary_op(op)
def convert_attribute(self, context, instance, attr):
return self.deref(instance).convert_attribute(attr)
def convert_getitem(self, context, instance, key):
return self.deref(instance).convert_getitem(key)
def convert_setitem(self, context, instance, key, val):
return self.deref(instance).convert_setitem(key, val)
def convert_method_call(self, context, instance, methodname, args, kwargs):
return self.deref(instance).convert_method_call(methodname, args, kwargs)
def convert_set_attribute(self, context, instance, attribute, value):
return self.deref(instance).convert_set_attribute(attribute, value)
def convert_hash(self, context, expr):
return self.deref(expr).convert_hash()
def convert_call(self, context, expr, args, kwargs):
self.deref(expr).convert_call(args, kwargs)
def convert_len(self, context, expr):
self.deref(expr).convert_len()
def convert_abs(self, context, expr):
self.deref(expr).convert_abs()
def convert_repr(self, context, expr):
self.deref(expr).convert_repr()
def convert_builtin(self, f, context, expr, a1=None):
self.deref(expr).convert_builtin(a1)
def convert_comparison(self, context, l, op, r):
self.deref(l).convert_comparison(op, r)
def convert_bin_op_reverse(self, context, r, op, l, inplace):
self.deref(r).convert_bin_op_reverse(op, l, inplace)
|
tests/unit/utils/test_win_system.py | markgras/salt | 9,425 | 19947 | import os
import salt.utils.platform
from tests.support.mock import patch
from tests.support.unit import TestCase, skipIf
try:
import salt.utils.win_system as win_system
except Exception as exc: # pylint: disable=broad-except
win_system = exc
class WinSystemImportTestCase(TestCase):
"""
Simply importing should not raise an error, especially on Linux
"""
def test_import(self):
if isinstance(win_system, Exception):
raise Exception(
"Importing win_system caused traceback: {}".format(win_system)
)
@skipIf(not salt.utils.platform.is_windows(), "Only test on Windows systems")
class WinSystemTestCase(TestCase):
"""
Test cases for salt.utils.win_system
"""
def test_get_computer_name(self):
"""
Should return the computer name
"""
with patch("win32api.GetComputerNameEx", return_value="FAKENAME"):
self.assertEqual(win_system.get_computer_name(), "FAKENAME")
def test_get_computer_name_fail(self):
"""
If it fails, it returns False
"""
with patch("win32api.GetComputerNameEx", return_value=None):
self.assertFalse(win_system.get_computer_name())
def test_get_pending_computer_name(self):
"""
Will return the pending computer name if one is pending
"""
expected = "PendingName"
patch_value = {"vdata": expected}
with patch("salt.utils.win_reg.read_value", return_value=patch_value):
result = win_system.get_pending_computer_name()
self.assertEqual(expected, result)
def test_get_pending_computer_name_none(self):
"""
Will return the None if the pending computer is the current name
"""
patch_value = {"vdata": os.environ.get("COMPUTERNAME")}
with patch("salt.utils.win_reg.read_value", return_value=patch_value):
self.assertIsNone(win_system.get_pending_computer_name())
def test_get_pending_computer_name_false(self):
"""
Will return False if there is no pending computer name
"""
with patch("salt.utils.win_reg.read_value", return_value=False):
self.assertIsNone(win_system.get_pending_computer_name())
def test_get_pending_component_servicing(self):
"""
If none of the keys exist, should return False
"""
with patch("salt.utils.win_reg.key_exists", return_value=False):
self.assertFalse(win_system.get_pending_component_servicing())
def test_get_pending_component_servicing_true_1(self):
"""
If the RebootPending key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[True]):
self.assertTrue(win_system.get_pending_component_servicing())
def test_get_pending_component_servicing_true_2(self):
"""
If the RebootInProgress key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[False, True]):
self.assertTrue(win_system.get_pending_component_servicing())
def test_get_pending_component_servicing_true_3(self):
"""
If the PackagesPending key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[False, False, True]):
self.assertTrue(win_system.get_pending_component_servicing())
def test_get_pending_domain_join(self):
"""
If none of the keys exist, should return False
"""
with patch("salt.utils.win_reg.key_exists", return_value=False):
self.assertFalse(win_system.get_pending_domain_join())
def test_get_pending_domain_join_true_1(self):
"""
If the AvoidSpnSet key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[True]):
self.assertTrue(win_system.get_pending_domain_join())
def test_get_pending_domain_join_true_2(self):
"""
If the JoinDomain key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[False, True]):
self.assertTrue(win_system.get_pending_domain_join())
def test_get_pending_file_rename_false_1(self):
"""
If none of the value names exist, should return False
"""
patched_return = {"success": False}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertFalse(win_system.get_pending_file_rename())
def test_get_pending_file_rename_false_2(self):
"""
If one of the value names exists but is not set, should return False
"""
patched_return = {"success": True, "vdata": "(value not set)"}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertFalse(win_system.get_pending_file_rename())
def test_get_pending_file_rename_true_1(self):
"""
If one of the value names exists and is set, should return True
"""
patched_return = {"success": True, "vdata": "some value"}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertTrue(win_system.get_pending_file_rename())
def test_get_pending_servermanager_false_1(self):
"""
If the CurrentRebootAttempts value name does not exist, should return
False
"""
patched_return = {"success": False}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertFalse(win_system.get_pending_servermanager())
def test_get_pending_servermanager_false_2(self):
"""
If the CurrentRebootAttempts value name exists but is not an integer,
should return False
"""
patched_return = {"success": True, "vdata": "(value not set)"}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertFalse(win_system.get_pending_file_rename())
def test_get_pending_servermanager_true(self):
"""
If the CurrentRebootAttempts value name exists and is an integer,
should return True
"""
patched_return = {"success": True, "vdata": 1}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertTrue(win_system.get_pending_file_rename())
def test_get_pending_dvd_reboot(self):
"""
If the DVDRebootSignal value name does not exist, should return False
"""
with patch("salt.utils.win_reg.value_exists", return_value=False):
self.assertFalse(win_system.get_pending_dvd_reboot())
def test_get_pending_dvd_reboot_true(self):
"""
If the DVDRebootSignal value name exists, should return True
"""
with patch("salt.utils.win_reg.value_exists", return_value=True):
self.assertTrue(win_system.get_pending_dvd_reboot())
def test_get_pending_update(self):
"""
If none of the keys exist and there are not subkeys, should return False
"""
with patch("salt.utils.win_reg.key_exists", return_value=False), patch(
"salt.utils.win_reg.list_keys", return_value=[]
):
self.assertFalse(win_system.get_pending_update())
def test_get_pending_update_true_1(self):
"""
If the RebootRequired key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[True]):
self.assertTrue(win_system.get_pending_update())
def test_get_pending_update_true_2(self):
"""
If the PostRebootReporting key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[False, True]):
self.assertTrue(win_system.get_pending_update())
def test_get_reboot_required_witnessed_false_1(self):
"""
The ``Reboot Required`` value name does not exist, should return False
"""
patched_data = {"vdata": None}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertFalse(win_system.get_reboot_required_witnessed())
def test_get_reboot_required_witnessed_false_2(self):
"""
The ``Reboot required`` value name is set to 0, should return False
"""
patched_data = {"vdata": 0}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertFalse(win_system.get_reboot_required_witnessed())
def test_get_reboot_required_witnessed_true(self):
"""
The ``Reboot required`` value name is set to 1, should return True
"""
patched_data = {"vdata": 1}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertTrue(win_system.get_reboot_required_witnessed())
def test_set_reboot_required_witnessed(self):
"""
The call to ``set_value`` should return True and should be called with
the specified parameters
"""
with patch("salt.utils.win_reg.set_value", return_value=True) as sv:
self.assertTrue(win_system.set_reboot_required_witnessed())
sv.assert_called_once_with(
hive="HKLM",
key=win_system.MINION_VOLATILE_KEY,
volatile=True,
vname=win_system.REBOOT_REQUIRED_NAME,
vdata=1,
vtype="REG_DWORD",
)
def test_get_pending_update_exe_volatile_false_1(self):
"""
If UpdateExeVolatile value name is 0, should return False
"""
patched_data = {"success": True, "vdata": 0}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertFalse(win_system.get_pending_update_exe_volatile())
def test_get_pending_update_exe_volatile_false_2(self):
"""
If UpdateExeVolatile value name is not present, should return False
"""
patched_data = {"success": False}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertFalse(win_system.get_pending_update_exe_volatile())
def test_get_pending_update_exe_volatile_true_1(self):
"""
If UpdateExeVolatile value name is not 0, should return True
"""
patched_data = {"success": True, "vdata": 1}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertTrue(win_system.get_pending_update_exe_volatile())
def test_get_pending_reboot(self):
"""
If all functions return Falsy data, should return False
"""
with patch(
"salt.utils.win_system.get_pending_update", return_value=False
), patch("salt.utils.win_update.needs_reboot", return_value=False), patch(
"salt.utils.win_system.get_pending_update_exe_volatile", return_value=False
), patch(
"salt.utils.win_system.get_pending_file_rename", return_value=False
), patch(
"salt.utils.win_system.get_pending_servermanager", return_value=False
), patch(
"salt.utils.win_system.get_pending_component_servicing", return_value=False
), patch(
"salt.utils.win_system.get_pending_dvd_reboot", return_value=False
), patch(
"salt.utils.win_system.get_reboot_required_witnessed", return_value=False
), patch(
"salt.utils.win_system.get_pending_computer_name", return_value=None
), patch(
"salt.utils.win_system.get_pending_domain_join", return_value=False
):
self.assertFalse(win_system.get_pending_reboot())
def test_get_pending_reboot_true_1(self):
"""
If any boolean returning functions return True, should return True
"""
with patch(
"salt.utils.win_system.get_pending_update", return_value=False
), patch("salt.utils.win_update.needs_reboot", return_value=False), patch(
"salt.utils.win_system.get_pending_update_exe_volatile", return_value=False
), patch(
"salt.utils.win_system.get_pending_file_rename", return_value=False
), patch(
"salt.utils.win_system.get_pending_servermanager", return_value=False
), patch(
"salt.utils.win_system.get_pending_component_servicing", return_value=False
), patch(
"salt.utils.win_system.get_pending_dvd_reboot", return_value=False
), patch(
"salt.utils.win_system.get_reboot_required_witnessed", return_value=False
), patch(
"salt.utils.win_system.get_pending_computer_name", return_value=None
), patch(
"salt.utils.win_system.get_pending_domain_join", return_value=True
):
self.assertTrue(win_system.get_pending_reboot())
def test_get_pending_reboot_true_2(self):
"""
If a computer name is returned, should return True
"""
with patch(
"salt.utils.win_system.get_pending_update", return_value=False
), patch("salt.utils.win_update.needs_reboot", return_value=False), patch(
"salt.utils.win_system.get_pending_update_exe_volatile", return_value=False
), patch(
"salt.utils.win_system.get_pending_file_rename", return_value=False
), patch(
"salt.utils.win_system.get_pending_servermanager", return_value=False
), patch(
"salt.utils.win_system.get_pending_component_servicing", return_value=False
), patch(
"salt.utils.win_system.get_pending_dvd_reboot", return_value=False
), patch(
"salt.utils.win_system.get_reboot_required_witnessed", return_value=False
), patch(
"salt.utils.win_system.get_pending_computer_name",
return_value="pending name",
):
self.assertTrue(win_system.get_pending_reboot())
def test_get_pending_reboot_details(self):
"""
All items False should return a dictionary with all items False
"""
with patch(
"salt.utils.win_system.get_pending_update", return_value=False
), patch("salt.utils.win_update.needs_reboot", return_value=False), patch(
"salt.utils.win_system.get_pending_update_exe_volatile", return_value=False
), patch(
"salt.utils.win_system.get_pending_file_rename", return_value=False
), patch(
"salt.utils.win_system.get_pending_servermanager", return_value=False
), patch(
"salt.utils.win_system.get_pending_component_servicing", return_value=False
), patch(
"salt.utils.win_system.get_pending_dvd_reboot", return_value=False
), patch(
"salt.utils.win_system.get_reboot_required_witnessed", return_value=False
), patch(
"salt.utils.win_system.get_pending_computer_name", return_value=None
), patch(
"salt.utils.win_system.get_pending_domain_join", return_value=False
):
expected = {
"Pending Component Servicing": False,
"Pending Computer Rename": False,
"Pending DVD Reboot": False,
"Pending File Rename": False,
"Pending Join Domain": False,
"Pending ServerManager": False,
"Pending Update": False,
"Pending Windows Update": False,
"Reboot Required Witnessed": False,
"Volatile Update Exe": False,
}
result = win_system.get_pending_reboot_details()
self.assertDictEqual(expected, result)
def test_get_pending_reboot_details_true(self):
"""
All items True should return a dictionary with all items True
"""
with patch(
"salt.utils.win_system.get_pending_update", return_value=True
), patch("salt.utils.win_update.needs_reboot", return_value=True), patch(
"salt.utils.win_system.get_pending_update_exe_volatile", return_value=True
), patch(
"salt.utils.win_system.get_pending_file_rename", return_value=True
), patch(
"salt.utils.win_system.get_pending_servermanager", return_value=True
), patch(
"salt.utils.win_system.get_pending_component_servicing", return_value=True
), patch(
"salt.utils.win_system.get_pending_dvd_reboot", return_value=True
), patch(
"salt.utils.win_system.get_reboot_required_witnessed", return_value=True
), patch(
"salt.utils.win_system.get_pending_computer_name",
return_value="pending name",
), patch(
"salt.utils.win_system.get_pending_domain_join", return_value=True
):
expected = {
"Pending Component Servicing": True,
"Pending Computer Rename": True,
"Pending DVD Reboot": True,
"Pending File Rename": True,
"Pending Join Domain": True,
"Pending ServerManager": True,
"Pending Update": True,
"Pending Windows Update": True,
"Reboot Required Witnessed": True,
"Volatile Update Exe": True,
}
result = win_system.get_pending_reboot_details()
self.assertDictEqual(expected, result)
|
src/genie/libs/parser/junos/tests/ShowOspfStatistics/cli/equal/golden_output_expected.py | balmasea/genieparser | 204 | 19965 | expected_output = {
"ospf-statistics-information": {
"ospf-statistics": {
"dbds-retransmit": "203656",
"dbds-retransmit-5seconds": "0",
"flood-queue-depth": "0",
"lsas-acknowledged": "225554974",
"lsas-acknowledged-5seconds": "0",
"lsas-flooded": "66582263",
"lsas-flooded-5seconds": "0",
"lsas-high-prio-flooded": "375568998",
"lsas-high-prio-flooded-5seconds": "0",
"lsas-nbr-transmit": "3423982",
"lsas-nbr-transmit-5seconds": "0",
"lsas-requested": "3517",
"lsas-requested-5seconds": "0",
"lsas-retransmit": "8064643",
"lsas-retransmit-5seconds": "0",
"ospf-errors": {
"subnet-mismatch-error": "12"
},
"packet-statistics": [
{
"ospf-packet-type": "Hello",
"packets-received": "5703920",
"packets-received-5seconds": "3",
"packets-sent": "6202169",
"packets-sent-5seconds": "0"
},
{
"ospf-packet-type": "DbD",
"packets-received": "185459",
"packets-received-5seconds": "0",
"packets-sent": "212983",
"packets-sent-5seconds": "0"
},
{
"ospf-packet-type": "LSReq",
"packets-received": "208",
"packets-received-5seconds": "0",
"packets-sent": "214",
"packets-sent-5seconds": "0"
},
{
"ospf-packet-type": "LSUpdate",
"packets-received": "16742100",
"packets-received-5seconds": "0",
"packets-sent": "15671465",
"packets-sent-5seconds": "0"
},
{
"ospf-packet-type": "LSAck",
"packets-received": "2964236",
"packets-received-5seconds": "0",
"packets-sent": "5229203",
"packets-sent-5seconds": "0"
}
],
"total-database-summaries": "0",
"total-linkstate-request": "0",
"total-retransmits": "0"
}
}
}
|
norbert/__init__.py | AppleHolic/norbert | 142 | 19982 | import numpy as np
import itertools
from .contrib import compress_filter, smooth, residual_model
from .contrib import reduce_interferences
def expectation_maximization(y, x, iterations=2, verbose=0, eps=None):
r"""Expectation maximization algorithm, for refining source separation
estimates.
This algorithm allows to make source separation results better by
enforcing multichannel consistency for the estimates. This usually means
a better perceptual quality in terms of spatial artifacts.
The implementation follows the details presented in [1]_, taking
inspiration from the original EM algorithm proposed in [2]_ and its
weighted refinement proposed in [3]_, [4]_.
It works by iteratively:
* Re-estimate source parameters (power spectral densities and spatial
covariance matrices) through :func:`get_local_gaussian_model`.
* Separate again the mixture with the new parameters by first computing
the new modelled mixture covariance matrices with :func:`get_mix_model`,
prepare the Wiener filters through :func:`wiener_gain` and apply them
with :func:`apply_filter``.
References
----------
.. [1] <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and
<NAME> and <NAME>, "Improving music source separation based
on deep neural networks through data augmentation and network
blending." 2017 IEEE International Conference on Acoustics, Speech
and Signal Processing (ICASSP). IEEE, 2017.
.. [2] <NAME> and <NAME> and R.Gribonval. "Under-determined
reverberant audio source separation using a full-rank spatial
covariance model." IEEE Transactions on Audio, Speech, and Language
Processing 18.7 (2010): 1830-1840.
.. [3] <NAME> and <NAME> and <NAME>. "Multichannel audio source
separation with deep neural networks." IEEE/ACM Transactions on Audio,
Speech, and Language Processing 24.9 (2016): 1652-1664.
.. [4] <NAME> and <NAME> and <NAME>. "Multichannel music
separation with deep neural networks." 2016 24th European Signal
Processing Conference (EUSIPCO). IEEE, 2016.
.. [5] <NAME> and <NAME> and <NAME> "Kernel additive models for
source separation." IEEE Transactions on Signal Processing
62.16 (2014): 4298-4310.
Parameters
----------
y: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_sources)]
initial estimates for the sources
x: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
complex STFT of the mixture signal
iterations: int [scalar]
number of iterations for the EM algorithm.
verbose: boolean
display some information if True
eps: float or None [scalar]
The epsilon value to use for regularization and filters.
If None, the default will use the epsilon of np.real(x) dtype.
Returns
-------
y: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_sources)]
estimated sources after iterations
v: np.ndarray [shape=(nb_frames, nb_bins, nb_sources)]
estimated power spectral densities
R: np.ndarray [shape=(nb_bins, nb_channels, nb_channels, nb_sources)]
estimated spatial covariance matrices
Note
-----
* You need an initial estimate for the sources to apply this
algorithm. This is precisely what the :func:`wiener` function does.
* This algorithm *is not* an implementation of the "exact" EM
proposed in [1]_. In particular, it does compute the posterior
covariance matrices the same (exact) way. Instead, it uses the
simplified approximate scheme initially proposed in [5]_ and further
refined in [3]_, [4]_, that boils down to just take the empirical
covariance of the recent source estimates, followed by a weighted
average for the update of the spatial covariance matrix. It has been
empirically demonstrated that this simplified algorithm is more
robust for music separation.
Warning
-------
It is *very* important to make sure `x.dtype` is `np.complex`
if you want double precision, because this function will **not**
do such conversion for you from `np.complex64`, in case you want the
smaller RAM usage on purpose.
It is usually always better in terms of quality to have double
precision, by e.g. calling :func:`expectation_maximization`
with ``x.astype(np.complex)``.
This is notably needed if you let common deep learning frameworks like
PyTorch or TensorFlow do the STFT, because this usually happens in
single precision.
"""
# to avoid dividing by zero
if eps is None:
eps = np.finfo(np.real(x[0]).dtype).eps
# dimensions
(nb_frames, nb_bins, nb_channels) = x.shape
nb_sources = y.shape[-1]
# allocate the spatial covariance matrices and PSD
R = np.zeros((nb_bins, nb_channels, nb_channels, nb_sources), x.dtype)
v = np.zeros((nb_frames, nb_bins, nb_sources))
if verbose:
print('Number of iterations: ', iterations)
regularization = np.sqrt(eps) * (
np.tile(np.eye(nb_channels, dtype=np.complex64),
(1, nb_bins, 1, 1)))
for it in range(iterations):
# constructing the mixture covariance matrix. Doing it with a loop
# to avoid storing anytime in RAM the whole 6D tensor
if verbose:
print('EM, iteration %d' % (it+1))
for j in range(nb_sources):
# update the spectrogram model for source j
v[..., j], R[..., j] = get_local_gaussian_model(
y[..., j],
eps)
for t in range(nb_frames):
Cxx = get_mix_model(v[None, t, ...], R)
Cxx += regularization
inv_Cxx = _invert(Cxx, eps)
# separate the sources
for j in range(nb_sources):
W_j = wiener_gain(v[None, t, ..., j], R[..., j], inv_Cxx)
y[t, ..., j] = apply_filter(x[None, t, ...], W_j)[0]
return y, v, R
def wiener(v, x, iterations=1, use_softmask=True, eps=None):
"""Wiener-based separation for multichannel audio.
The method uses the (possibly multichannel) spectrograms `v` of the
sources to separate the (complex) Short Term Fourier Transform `x` of the
mix. Separation is done in a sequential way by:
* Getting an initial estimate. This can be done in two ways: either by
directly using the spectrograms with the mixture phase, or
by using :func:`softmask`.
* Refinining these initial estimates through a call to
:func:`expectation_maximization`.
This implementation also allows to specify the epsilon value used for
regularization. It is based on [1]_, [2]_, [3]_, [4]_.
References
----------
.. [1] <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and
<NAME> and <NAME>, "Improving music source separation based
on deep neural networks through data augmentation and network
blending." 2017 IEEE International Conference on Acoustics, Speech
and Signal Processing (ICASSP). IEEE, 2017.
.. [2] <NAME> and <NAME> and <NAME>. "Multichannel audio source
separation with deep neural networks." IEEE/ACM Transactions on Audio,
Speech, and Language Processing 24.9 (2016): 1652-1664.
.. [3] <NAME> and <NAME> and <NAME>. "Multichannel music
separation with deep neural networks." 2016 24th European Signal
Processing Conference (EUSIPCO). IEEE, 2016.
.. [4] <NAME> and <NAME> and <NAME> "Kernel additive models for
source separation." IEEE Transactions on Signal Processing
62.16 (2014): 4298-4310.
Parameters
----------
v: np.ndarray [shape=(nb_frames, nb_bins, {1,nb_channels}, nb_sources)]
spectrograms of the sources. This is a nonnegative tensor that is
usually the output of the actual separation method of the user. The
spectrograms may be mono, but they need to be 4-dimensional in all
cases.
x: np.ndarray [complex, shape=(nb_frames, nb_bins, nb_channels)]
STFT of the mixture signal.
iterations: int [scalar]
number of iterations for the EM algorithm
use_softmask: boolean
* if `False`, then the mixture phase will directly be used with the
spectrogram as initial estimates.
* if `True`, a softmasking strategy will be used as described in
:func:`softmask`.
eps: {None, float}
Epsilon value to use for computing the separations. This is used
whenever division with a model energy is performed, i.e. when
softmasking and when iterating the EM.
It can be understood as the energy of the additional white noise
that is taken out when separating.
If `None`, the default value is taken as `np.finfo(np.real(x[0])).eps`.
Returns
-------
y: np.ndarray
[complex, shape=(nb_frames, nb_bins, nb_channels, nb_sources)]
STFT of estimated sources
Note
----
* Be careful that you need *magnitude spectrogram estimates* for the
case `softmask==False`.
* We recommand to use `softmask=False` only if your spectrogram model is
pretty good, e.g. when the output of a deep neural net. In the case
it is not so great, opt for an initial softmasking strategy.
* The epsilon value will have a huge impact on performance. If it's large,
only the parts of the signal with a significant energy will be kept in
the sources. This epsilon then directly controls the energy of the
reconstruction error.
Warning
-------
As in :func:`expectation_maximization`, we recommend converting the
mixture `x` to double precision `np.complex` *before* calling
:func:`wiener`.
"""
if use_softmask:
y = softmask(v, x, eps=eps)
else:
y = v * np.exp(1j*np.angle(x[..., None]))
if not iterations:
return y
# we need to refine the estimates. Scales down the estimates for
# numerical stability
max_abs = max(1, np.abs(x).max()/10.)
x /= max_abs
y = expectation_maximization(y/max_abs, x, iterations, eps=eps)[0]
return y*max_abs
def softmask(v, x, logit=None, eps=None):
"""Separates a mixture with a ratio mask, using the provided sources
spectrograms estimates. Additionally allows compressing the mask with
a logit function for soft binarization.
The filter does *not* take multichannel correlations into account.
The masking strategy can be traced back to the work of <NAME> in the
case of *power* spectrograms [1]_. In the case of *fractional* spectrograms
like magnitude, this filter is often referred to a "ratio mask", and
has been shown to be the optimal separation procedure under alpha-stable
assumptions [2]_.
References
----------
.. [1] <NAME>,"Extrapolation, Inerpolation, and Smoothing of Stationary
Time Series." 1949.
.. [2] <NAME> and <NAME>. "Generalized Wiener filtering with
fractional power spectrograms." 2015 IEEE International Conference on
Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2015.
Parameters
----------
v: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_sources)]
spectrograms of the sources
x: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
mixture signal
logit: {None, float between 0 and 1}
enable a compression of the filter. If not None, it is the threshold
value for the logit function: a softmask above this threshold is
brought closer to 1, and a softmask below is brought closer to 0.
Returns
-------
ndarray, shape=(nb_frames, nb_bins, nb_channels, nb_sources)
estimated sources
"""
# to avoid dividing by zero
if eps is None:
eps = np.finfo(np.real(x[0]).dtype).eps
total_energy = np.sum(v, axis=-1, keepdims=True)
filter = v/(eps + total_energy.astype(x.dtype))
if logit is not None:
filter = compress_filter(filter, eps, thresh=logit, multichannel=False)
return filter * x[..., None]
def _invert(M, eps):
"""
Invert matrices, with special fast handling of the 1x1 and 2x2 cases.
Will generate errors if the matrices are singular: user must handle this
through his own regularization schemes.
Parameters
----------
M: np.ndarray [shape=(..., nb_channels, nb_channels)]
matrices to invert: must be square along the last two dimensions
eps: [scalar]
regularization parameter to use _only in the case of matrices
bigger than 2x2
Returns
-------
invM: np.ndarray, [shape=M.shape]
inverses of M
"""
nb_channels = M.shape[-1]
if nb_channels == 1:
# scalar case
invM = 1.0/(M+eps)
elif nb_channels == 2:
# two channels case: analytical expression
det = (
M[..., 0, 0]*M[..., 1, 1] -
M[..., 0, 1]*M[..., 1, 0])
invDet = 1.0/(det)
invM = np.empty_like(M)
invM[..., 0, 0] = invDet*M[..., 1, 1]
invM[..., 1, 0] = -invDet*M[..., 1, 0]
invM[..., 0, 1] = -invDet*M[..., 0, 1]
invM[..., 1, 1] = invDet*M[..., 0, 0]
else:
# general case : no use of analytical expression (slow!)
invM = np.linalg.pinv(M, eps)
return invM
def wiener_gain(v_j, R_j, inv_Cxx):
"""
Compute the wiener gain for separating one source, given all parameters.
It is the matrix applied to the mix to get the posterior mean of the source
as in [1]_
References
----------
.. [1] <NAME> and <NAME> and R.Gribonval. "Under-determined
reverberant audio source separation using a full-rank spatial
covariance model." IEEE Transactions on Audio, Speech, and Language
Processing 18.7 (2010): 1830-1840.
Parameters
----------
v_j: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
power spectral density of the target source.
R_j: np.ndarray [shape=(nb_bins, nb_channels, nb_channels)]
spatial covariance matrix of the target source
inv_Cxx: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_channels)]
inverse of the mixture covariance matrices
Returns
-------
G: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_channels)]
wiener filtering matrices, to apply to the mix, e.g. through
:func:`apply_filter` to get the target source estimate.
"""
(_, nb_channels) = R_j.shape[:2]
# computes multichannel Wiener gain as v_j R_j inv_Cxx
G = np.zeros_like(inv_Cxx)
for (i1, i2, i3) in itertools.product(*(range(nb_channels),)*3):
G[..., i1, i2] += (R_j[None, :, i1, i3] * inv_Cxx[..., i3, i2])
G *= v_j[..., None, None]
return G
def apply_filter(x, W):
"""
Applies a filter on the mixture. Just corresponds to a matrix
multiplication.
Parameters
----------
x: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
STFT of the signal on which to apply the filter.
W: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_channels)]
filtering matrices, as returned, e.g. by :func:`wiener_gain`
Returns
-------
y_hat: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
filtered signal
"""
nb_channels = W.shape[-1]
# apply the filter
y_hat = 0+0j
for i in range(nb_channels):
y_hat += W[..., i] * x[..., i, None]
return y_hat
def get_mix_model(v, R):
"""
Compute the model covariance of a mixture based on local Gaussian models.
simply adds up all the v[..., j] * R[..., j]
Parameters
----------
v: np.ndarray [shape=(nb_frames, nb_bins, nb_sources)]
Power spectral densities for the sources
R: np.ndarray [shape=(nb_bins, nb_channels, nb_channels, nb_sources)]
Spatial covariance matrices of each sources
Returns
-------
Cxx: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_channels)]
Covariance matrix for the mixture
"""
nb_channels = R.shape[1]
(nb_frames, nb_bins, nb_sources) = v.shape
Cxx = np.zeros((nb_frames, nb_bins, nb_channels, nb_channels), R.dtype)
for j in range(nb_sources):
Cxx += v[..., j, None, None] * R[None, ..., j]
return Cxx
def _covariance(y_j):
"""
Compute the empirical covariance for a source.
Parameters
----------
y_j: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)].
complex stft of the source.
Returns
-------
Cj: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_channels)]
just y_j * conj(y_j.T): empirical covariance for each TF bin.
"""
(nb_frames, nb_bins, nb_channels) = y_j.shape
Cj = np.zeros((nb_frames, nb_bins, nb_channels, nb_channels),
y_j.dtype)
for (i1, i2) in itertools.product(*(range(nb_channels),)*2):
Cj[..., i1, i2] += y_j[..., i1] * np.conj(y_j[..., i2])
return Cj
def get_local_gaussian_model(y_j, eps=1.):
r"""
Compute the local Gaussian model [1]_ for a source given the complex STFT.
First get the power spectral densities, and then the spatial covariance
matrix, as done in [1]_, [2]_
References
----------
.. [1] <NAME> and <NAME> and R.Gribonval. "Under-determined
reverberant audio source separation using a full-rank spatial
covariance model." IEEE Transactions on Audio, Speech, and Language
Processing 18.7 (2010): 1830-1840.
.. [2] <NAME> and <NAME> and <NAME>. "Low bitrate informed
source separation of realistic mixtures." 2013 IEEE International
Conference on Acoustics, Speech and Signal Processing. IEEE, 2013.
Parameters
----------
y_j: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
complex stft of the source.
eps: float [scalar]
regularization term
Returns
-------
v_j: np.ndarray [shape=(nb_frames, nb_bins)]
power spectral density of the source
R_J: np.ndarray [shape=(nb_bins, nb_channels, nb_channels)]
Spatial covariance matrix of the source
"""
v_j = np.mean(np.abs(y_j)**2, axis=2)
# updates the spatial covariance matrix
nb_frames = y_j.shape[0]
R_j = 0
weight = eps
for t in range(nb_frames):
R_j += _covariance(y_j[None, t, ...])
weight += v_j[None, t, ...]
R_j /= weight[..., None, None]
return v_j, R_j
|
lightnn/base/__init__.py | tongluocq/lightnn | 131 | 19997 | <gh_stars>100-1000
#!/usr/bin/env python
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .activations import *
from .losses import *
from .initializers import *
from .optimizers import *
|
2009/plotting_data_monitor/_distrib.py | mikiec84/code-for-blog | 1,199 | 20000 | from eblib import libcollect
# Create a LibCollect object
lc = libcollect.LibCollect()
# Prepare arguments for do_collect
#
# Path to the script (can be absolute or relative)
scriptname = 'plotting_data_monitor.pyw'
# Ask the resulting distribution to be placed in
# directory distrib
targetdir = 'distrib'
# Specify which libraries to exclude from the
# distribution (because you know they're installed
# on the target machine)
excludes = ["PyQt4",
"numpy",
"serial",
"pywin",
"win32api",
"win32com"]
# This does the actual work
# See the documentation of LibCollect for more options
#
lc.do_collect( scriptname,
targetdir,
excludes,
verbose=True)
|
qf_lib/backtesting/events/time_event/regular_date_time_rule.py | webclinic017/qf-lib | 198 | 20016 | # Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from qf_lib.common.utils.dateutils.relative_delta import RelativeDelta
class RegularDateTimeRule(object):
"""
RegularDateTimeRule is a helper class for TimeEvents. It has a convenience method for calculating
next trigger time for events which occur on certain date/time on regular basis (e.g. each day at 9:30,
each first day of a month, etc.).
"""
def __init__(self, year: int = None, month: int = None, day: int = None, weekday: int = None, hour: int = None,
minute: int = None, second: int = None, microsecond: int = None):
self.trigger_time = RelativeDelta(
year=year, month=month, day=day, weekday=weekday, hour=hour, minute=minute,
second=second, microsecond=microsecond
)
def next_trigger_time(self, now: datetime) -> datetime:
next_trigger_time = now + self.trigger_time
# check if next_trigger_time is in the past and if it is, it needs to be adjusted so that it's in the future
if next_trigger_time <= now:
next_trigger_time = self._get_next_trigger_time_after(next_trigger_time)
return next_trigger_time
def _get_next_trigger_time_after(self, start_time: datetime):
# calculate proper adjustment (time shift):
# if the month is important for the trigger time, than we should go to the next year
# for getting the next occurrence, if it is unimportant but day is important,
# then we should go to the next month etc.
time_adjustment = None
if self.trigger_time.year is not None:
# nothing can be done if the year is important. No way of getting next occurrence (there will never be
# the same year again)
raise ArithmeticError(
"Cannot get next occurrence of the event with `year` specified "
"(there will never be the same year again)."
)
elif self.trigger_time.month is not None:
time_adjustment = RelativeDelta(years=1)
elif self.trigger_time.day is not None:
time_adjustment = RelativeDelta(months=1)
elif self.trigger_time.weekday is not None:
time_adjustment = RelativeDelta(weeks=1)
elif self.trigger_time.hour is not None:
time_adjustment = RelativeDelta(days=1)
elif self.trigger_time.minute is not None:
time_adjustment = RelativeDelta(hours=1)
elif self.trigger_time.second is not None:
time_adjustment = RelativeDelta(minutes=1)
elif self.trigger_time.microsecond is not None:
time_adjustment = RelativeDelta(seconds=1)
next_trigger_time = start_time + time_adjustment
return next_trigger_time
|
commands/limit.py | nstra111/autovc | 177 | 20024 | <filename>commands/limit.py
import utils
import functions as func
from commands.base import Cmd
help_text = [
[
("Usage:", "<PREFIX><COMMAND>\n"
"<PREFIX><COMMAND> `N`"),
("Description:",
"Use when already in a channel - Limit the number of users allowed in your channel to either the current "
"number of users, or the specified number.\n\n"
"Use *<PREFIX>un<COMMAND>* to remove the limit."),
("Example:", "<PREFIX><COMMAND> 4"),
]
]
async def execute(ctx, params):
params_str = ' '.join(params)
guild = ctx['guild']
settings = ctx['settings']
limit = utils.strip_quotes(params_str)
author = ctx['message'].author
vc = ctx['voice_channel']
if limit:
try:
limit = abs(int(limit))
except ValueError:
return False, "`{}` is not a number.".format(limit)
else:
limit = len(vc.members)
if limit > 99:
return False, "The user limit cannot be higher than 99."
await vc.edit(user_limit=limit)
if limit != 0:
log_msg = "👪 {} (`{}`) set the user limit of \"**{}**\" (`{}`) to {}".format(
func.user_hash(author), author.id, func.esc_md(vc.name), vc.id, limit
)
else:
log_msg = "👨👩👧👦 {} (`{}`) removed the user limit of \"**{}**\" (`{}`)".format(
func.user_hash(author), author.id, func.esc_md(vc.name), vc.id
)
await func.server_log(guild, log_msg, 2, settings)
return True, None
command = Cmd(
execute=execute,
help_text=help_text,
params_required=0,
admin_required=False,
voice_required=True,
creator_only=True,
)
|
notes/migrations/0005_auto_20160130_0015.py | nicbou/markdown-notes | 121 | 20053 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('notes', '0004_auto_20151022_1517'),
]
operations = [
migrations.CreateModel(
name='Notebook',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['title'],
},
bases=(models.Model,),
),
migrations.AddField(
model_name='note',
name='notebook',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='notes.Notebook', null=True),
preserve_default=True,
),
]
|
flask_youku/__init__.py | xiaoyh121/program | 176 | 20069 | <filename>flask_youku/__init__.py
from flask import Blueprint, Markup
from flask import render_template
class Youku(object):
"""Flask-Youku extents."""
def __init__(self, app=None, **kwargs):
"""Init Flask-Youku's instance via app object"""
if app:
self.init_app(app)
def init_app(self, app):
"""Init Flask-Youku's instance via app object"""
self.register_blueprint(app)
# Create the Jinja function `youku`
app.add_template_global(youku)
def register_blueprint(self, app):
"""Register the youku blueprint into app object."""
module = Blueprint(
'youku',
__name__,
template_folder='templates')
app.register_blueprint(module)
return module
class Video(object):
"""Receive the youku_id to rendering the video.html"""
def __init__(self, video_id, cls='youku'):
self.video_id = video_id
self.cls = cls
def render(self, *args, **kwargs):
return render_template(*args, **kwargs)
@property
def html(self):
"""Tag the HTML as security string."""
return Markup(
self.render('youku/video.html', video=self))
def youku(*args, **kwargs):
"""Define the Jinja function."""
video = Video(*args, **kwargs)
return video.html
|
Common_3/Tools/ForgeShadingLanguage/generators/d3d.py | divecoder/The-Forge | 3,058 | 20078 | """ GLSL shader generation """
from utils import Stages, getHeader, getShader, getMacro, genFnCall, fsl_assert, get_whitespace
from utils import isArray, getArrayLen, getArrayBaseName, getMacroName, DescriptorSets, is_groupshared_decl
import os, sys, importlib, re
from shutil import copyfile
def pssl(fsl, dst, rootSignature=None):
return d3d(fsl, dst, pssl=True, d3d12=False, rootSignature=rootSignature)
def prospero(fsl, dst):
return d3d(fsl, dst, pssl=True, prospero=True)
def xbox(fsl, dst, rootSignature=None):
return d3d(fsl, dst, xbox=True, d3d12=True, rootSignature=rootSignature)
def d3d12(fsl, dst):
return d3d(fsl, dst, d3d12=True)
def scarlett(fsl, dst, rootSignature=None):
return xbox(fsl, dst, rootSignature)
def d3d(fsl, dst, pssl=False, prospero=False, xbox=False, rootSignature=None, d3d12=False):
shader = getShader(fsl, dst)
shader_src = getHeader(fsl)
if not (d3d12 or pssl or xbox):
shader_src += ['#define DIRECT3D11\n']
if prospero:
import prospero
pssl = prospero
shader_src += ['#define PROSPERO\n']
shader_src += prospero.preamble()
elif pssl:
import orbis
pssl = orbis
shader_src += ['#define ORBIS\n']
shader_src += orbis.preamble()
if xbox:
import xbox
shader_src += ['#define XBOX\n']
shader_src += xbox.preamble()
if d3d12:
shader_src += ['#define DIRECT3D12\n']
shader_src += ['#define STAGE_', shader.stage.name, '\n']
if shader.enable_waveops:
shader_src += ['#define ENABLE_WAVEOPS()\n']
# directly embed d3d header in shader
header_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'includes', 'd3d.h')
header_lines = open(header_path).readlines()
shader_src += header_lines + ['\n']
nonuniformresourceindex = None
# tesselation
pcf_returnType = None
# for SV_PrimitiveID usage in pixel shaders, generate a pass-through gs
passthrough_gs = False
if pssl and shader.stage == Stages.FRAG:
for dtype, dvar in shader.flat_args:
if getMacroName(dtype).upper() == 'SV_PRIMITIVEID':
passthrough_gs = True
if prospero:
prospero.gen_passthrough_gs(shader, dst.replace('frag', 'geom'))
else:
orbis.gen_passthrough_gs(shader, dst.replace('frag', 'geom'))
last_res_decl = 0
explicit_res_decl = None
srt_resources = { descriptor_set.name: [] for descriptor_set in DescriptorSets }
srt_free_resources = []
srt_references = []
defineLoc = len(shader_src)
parsing_struct = None
skip_semantics = False
struct_elements = []
srt_redirections = set()
for line in shader.lines:
def get_uid(name):
return name + '_' + str(len(shader_src))
# dont process commented lines
if line.strip().startswith('//'):
shader_src += [line]
continue
if is_groupshared_decl(line):
dtype, dname = getMacro(line)
basename = getArrayBaseName(dname)
shader_src += ['#define srt_'+basename+' '+basename+'\n']
if not pssl:
line = 'groupshared '+dtype+' '+dname+';\n'
else:
line = 'thread_group_memory '+dtype+' '+dname+';\n'
if 'DECLARE_RESOURCES' in line:
explicit_res_decl = len(shader_src) + 1
line = '//' + line
if line.strip().startswith('STRUCT(') or line.strip().startswith('CBUFFER(') or line.strip().startswith('PUSH_CONSTANT('):
parsing_struct = getMacro(line)
struct_name = parsing_struct[0]
struct_elements = []
if pssl and 'PUSH_CONSTANT' in line:
skip_semantics = True
macro = get_uid(struct_name)
shader_src += ['#define ', macro, '\n']
srt_free_resources += [(macro, pssl.declare_rootconstant(struct_name))]
if pssl and 'CBUFFER' in line:
skip_semantics = True
res_freq = parsing_struct[1]
macro = get_uid(struct_name)
shader_src += ['#define ', macro, '\n']
if 'rootcbv' in struct_name:
srt_free_resources += [(macro, pssl.declare_cbuffer(struct_name))]
else:
srt_resources[res_freq] += [(macro, pssl.declare_cbuffer(struct_name))]
if parsing_struct and line.strip().startswith('DATA('):
data_decl = getMacro(line)
if skip_semantics or data_decl[-1] == 'None':
line = get_whitespace(line) + data_decl[0] + ' ' + data_decl[1] + ';\n'
if pssl and type(parsing_struct) is not str:
basename = getArrayBaseName(data_decl[1])
macro = 'REF_' + get_uid(basename)
shader_src += ['#define ', macro, '\n']
init, ref = pssl.declare_element_reference(shader, parsing_struct, data_decl)
shader_src += [*init, '\n']
srt_redirections.add(basename)
struct_elements += [(macro, ref)]
srt_references += [(macro, (init, ref))]
shader_src += [line]
continue
if parsing_struct and '};' in line:
# if this shader is the receiving end of a passthrough_gs, insert the necessary inputs
if passthrough_gs and shader.struct_args[0][0] == parsing_struct:
shader_src += ['\tDATA(FLAT(uint), PrimitiveID, TEXCOORD8);\n']
shader_src += [line]
skip_semantics = False
if type(parsing_struct) is not str:
last_res_decl = len(shader_src)+1
parsing_struct = None
continue
resource_decl = None
if line.strip().startswith('RES('):
resource_decl = getMacro(line)
last_res_decl = len(shader_src)+1
if pssl and resource_decl:
# shader_src += ['// ', line.strip(), '\n']
_, res_name, res_freq, _, _ = resource_decl
basename = getArrayBaseName(res_name)
macro = get_uid(basename)
# shader_src += ['#define ', macro, ' //', line.strip(), '\n']
shader_src += ['#define ', macro, '\n']
srt_resources[res_freq] += [(macro, pssl.declare_resource(resource_decl))]
# macro = 'REF_' + macro
# shader_src += ['#define ', macro, '\n']
init, ref = pssl.declare_reference(shader, resource_decl)
shader_src += [*init, '\n']
srt_references += [(macro, (init, ref))]
srt_redirections.add(basename)
last_res_decl = len(shader_src)+1
# continue
if 'TESS_VS_SHADER(' in line and prospero:
vs_filename = getMacro(line).strip('"')
vs_fsl_path = os.path.join(os.path.dirname(fsl), vs_filename)
ls_vs_filename = 'ls_'+vs_filename.replace('.fsl', '')
vs_pssl = os.path.join(os.path.dirname(dst), ls_vs_filename)
d3d(vs_fsl_path, vs_pssl, pssl=True, prospero=True)
shader_src += [
'#undef VS_MAIN\n',
'#define VS_MAIN vs_main\n',
'#include "', ls_vs_filename, '"\n'
]
continue
if '_MAIN(' in line and shader.stage == Stages.TESC and prospero:
shader_src += pssl.insert_tesc('vs_main')
if '_MAIN(' in line and shader.returnType:
if shader.returnType not in shader.structs:
if shader.stage == Stages.FRAG:
if not 'SV_DEPTH' in shader.returnType.upper():
line = line[:-1] + ': SV_TARGET\n'
else:
line = line[:-1] + ': SV_DEPTH\n'
if shader.stage == Stages.VERT:
line = line[:-1] + ': SV_POSITION\n'
# manually transform Type(var) to Type var (necessary for DX11/fxc)
if '_MAIN(' in line:
for dtype, var in shader.struct_args:
line = line.replace(dtype+'('+var+')', dtype + ' ' + var)
for dtype, dvar in shader.flat_args:
sem = getMacroName(dtype).upper()
innertype = getMacro(dtype)
ldtype = line.find(dtype)
line = line[:ldtype]+innertype+line[ldtype+len(dtype):]
l0 = line.find(' '+dvar, ldtype) + len(dvar)+1
line = line[:l0]+' : '+sem+line[l0:]
# if this shader is the receiving end of a passthrough_gs, get rid of the PrimitiveID input
if passthrough_gs:
for dtype, dvar in shader.flat_args:
if 'SV_PRIMITIVEID' in dtype.upper():
upper_line = line.upper()
l0 = upper_line.find('SV_PRIMITIVEID')
l1 = upper_line.rfind(',', 0, l0)
line = line.replace(line[l1: l0+len('SV_PRIMITIVEID')], '')
if pssl:
for dtype, darg in shader.flat_args:
if 'SV_INSTANCEID' in dtype.upper():
shader_src += pssl.set_indirect_draw()
if '_MAIN(' in line and (pssl or xbox) and rootSignature:
l0 = rootSignature.find('SrtSignature')
l1 = rootSignature.find('{', l0)
srt_name = rootSignature[l0: l1].split()[-1]
res_sig = 'RootSignature' if xbox else 'SrtSignature'
shader_src += ['[', res_sig, '(', srt_name, ')]\n', line]
continue
# if 'INIT_MAIN' in line:
# if pssl:
# shader_src += ['\tinit_global_references();\n']
if 'INIT_MAIN' in line and shader.returnType:
# mName = getMacroName(shader.returnType)
# mArg = getMacro(shader.returnType)
# line = line.replace('INIT_MAIN', '{} {}'.format(mName, mArg))
line = get_whitespace(line)+'//'+line.strip()+'\n'
# if this shader is the receiving end of a passthrough_gs, copy the PrimitiveID from GS output
if passthrough_gs:
for dtype, dvar in shader.flat_args:
if 'SV_PRIMITIVEID' in dtype.upper():
shader_src += ['uint ', dvar, ' = ', shader.struct_args[0][1], '.PrimitiveID;\n']
if 'BeginNonUniformResourceIndex(' in line:
index, max_index = getMacro(line), None
assert index != [], 'No index provided for {}'.format(line)
if type(index) == list:
max_index = index[1]
index = index[0]
nonuniformresourceindex = index
if pssl:
shader_src += pssl.begin_nonuniformresourceindex(nonuniformresourceindex, max_index)
continue
else:
line = '#define {0} NonUniformResourceIndex({0})\n'.format(nonuniformresourceindex)
if 'EndNonUniformResourceIndex()' in line:
assert nonuniformresourceindex, 'EndNonUniformResourceIndex: BeginNonUniformResourceIndex not called/found'
if pssl:
shader_src += pssl.end_nonuniformresourceindex(nonuniformresourceindex)
continue
else:
line = '#undef {}\n'.format(nonuniformresourceindex)
nonuniformresourceindex = None
elif re.match('\s*RETURN', line):
if shader.returnType:
line = line.replace('RETURN', 'return ')
else:
line = line.replace('RETURN()', 'return')
# tesselation
if shader.pcf and shader.pcf in line and not pcf_returnType:
loc = line.find(shader.pcf)
pcf_returnType = line[:loc].strip()
# line = getMacroName(pcf_returnType) + ' ' + line[loc:]
for dtype, dvar in shader.pcf_arguments:
if not 'INPUT_PATCH' in dtype and not 'OUTPUT_PATCH' in dtype:
line = line.replace(dtype, getMacro(dtype))
line = line.replace(dvar, dvar+': '+getMacroName(dtype))
if pcf_returnType and re.match('\s*PCF_INIT', line):
# line = line.replace('PCF_INIT', getMacroName(pcf_returnType) + ' ' + getMacro(pcf_returnType))
line = line.replace('PCF_INIT', '')
if pcf_returnType and 'PCF_RETURN' in line:
line = line.replace('PCF_RETURN', 'return ')
# line = line.replace('PCF_RETURN', '{ return ' + getMacro(pcf_returnType) + ';}')
if 'INDIRECT_DRAW(' in line:
if pssl:
shader_src += pssl.set_indirect_draw()
line = '//' + line
if 'SET_OUTPUT_FORMAT(' in line:
if pssl:
shader_src += pssl.set_output_format(getMacro(line))
line = '//' + line
if 'PS_ZORDER_EARLYZ(' in line:
if xbox:
shader_src += xbox.set_ps_zorder_earlyz()
line = '//' + line
shader_src += [line]
if pssl:
if explicit_res_decl:
last_res_decl = explicit_res_decl
if last_res_decl > 0: # skip srt altogether if no declared resourced or not requested
srt = pssl.gen_srt(srt_resources, srt_free_resources, srt_references)
open(dst + '.srt.h', 'w').write(srt)
shader_src.insert(last_res_decl, '\n#include \"' + os.path.basename(dst) + '.srt.h\"\n')
# insert root signature at the end (not sure whether that will work for xbox)
if rootSignature and pssl:
shader_src += [_line+'\n' for _line in rootSignature.splitlines()]# + shader.lines
if rootSignature and xbox:
shader_src += rootSignature + ['\n']# + shader.lines
open(dst, 'w').writelines(shader_src)
return 0 |
tests/integration/test_between_tags.py | liorbass/pydriller | 583 | 20081 | # Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pydriller.repository import Repository
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
def test_between_revisions():
from_tag = 'tag1'
to_tag = 'tag3'
lc = list(Repository('test-repos/tags',
from_tag=from_tag,
to_tag=to_tag).traverse_commits())
assert len(lc) == 5
assert '6bb9e2c6a8080e6b5b34e6e316c894b2ddbf7fcd' == lc[0].hash
assert 'f1a90b8d7b151ceefd3e3dfc0dc1d0e12b5f48d0' == lc[1].hash
assert '4638730126d40716e230c2040751a13153fb1556' == lc[2].hash
assert 'a26f1438bd85d6b22497c0e5dae003812becd0bc' == lc[3].hash
assert '627e1ad917a188a861c9fedf6e5858b79edbe439' == lc[4].hash
def test_multiple_repos_with_tags():
from_tag = 'tag2'
to_tag = 'tag3'
repos = [
'test-repos/tags',
'test-repos/tags',
'test-repos/tags'
]
lc = list(Repository(path_to_repo=repos,
from_tag=from_tag,
to_tag=to_tag).traverse_commits())
assert len(lc) == 9
|
examples/dialogs.py | tgolsson/appJar | 666 | 20094 | from appJar import gui
def press(btn):
if btn == "info": app.infoBox("Title Here", "Message here...")
if btn == "error": app.errorBox("Title Here", "Message here...")
if btn == "warning": app.warningBox("Title Here", "Message here...")
if btn == "yesno": app.yesNoBox("Title Here", "Message here...")
if btn == "question": app.questionBox("Title Here", "Message here...")
if btn == "ok": app.okBox("Title Here", "Message here...")
if btn == "retry": app.retryBox("Title Here", "Message here...")
if btn == "text": app.textBox("Title Here", "Message here...")
if btn == "number": app.numberBox("Title Here", "Message here...")
app=gui()
app.addButtons(["info", "error", "warning", "yesno", "question"], press)
app.addButtons(["ok", "retry", "text", "number"], press)
app.go()
|
tests/polynomials.py | mernst/cozy | 188 | 20098 | <filename>tests/polynomials.py
import unittest
from cozy.polynomials import Polynomial
class TestPolynomials(unittest.TestCase):
def test_sorting(self):
self.assertLess(Polynomial([2019, 944, 95]), Polynomial([2012, 945, 95]))
self.assertGreater(Polynomial([2012, 945, 95]), Polynomial([2019, 944, 95]))
|
examples/twisted/websocket/auth_persona/server.py | rapyuta-robotics/autobahn-python | 1,670 | 20109 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import sys
import json
import urllib
import Cookie
from twisted.internet import reactor
from twisted.python import log
from twisted.web.server import Site
from twisted.web.static import File
import autobahn
from autobahn.util import newid, utcnow
from autobahn.websocket import http
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
from autobahn.twisted.resource import WebSocketResource
class PersonaServerProtocol(WebSocketServerProtocol):
"""
WebSocket server protocol that tracks WebSocket connections using HTTP cookies,
and authenticates WebSocket connections using Mozilla Persona.
"""
def onConnect(self, request):
# This is called during the initial WebSocket opening handshake.
protocol, headers = None, {}
# our cookie tracking ID
self._cbtid = None
# see if there already is a cookie set ..
if 'cookie' in request.headers:
try:
cookie = Cookie.SimpleCookie()
cookie.load(str(request.headers['cookie']))
except Cookie.CookieError:
pass
else:
if 'cbtid' in cookie:
cbtid = cookie['cbtid'].value
if cbtid in self.factory._cookies:
self._cbtid = cbtid
log.msg("Cookie already set: %s" % self._cbtid)
# if no cookie is set, create a new one ..
if self._cbtid is None:
self._cbtid = newid()
maxAge = 86400
cbtData = {'created': utcnow(),
'authenticated': None,
'maxAge': maxAge,
'connections': set()}
self.factory._cookies[self._cbtid] = cbtData
# do NOT add the "secure" cookie attribute! "secure" refers to the
# scheme of the Web page that triggered the WS, not WS itself!!
##
headers['Set-Cookie'] = 'cbtid=%s;max-age=%d' % (self._cbtid, maxAge)
log.msg("Setting new cookie: %s" % self._cbtid)
# add this WebSocket connection to the set of connections
# associated with the same cookie
self.factory._cookies[self._cbtid]['connections'].add(self)
# accept the WebSocket connection, speaking subprotocol `protocol`
# and setting HTTP headers `headers`
return (protocol, headers)
def onOpen(self):
# This is called when initial WebSocket opening handshake has
# been completed.
# see if we are authenticated ..
authenticated = self.factory._cookies[self._cbtid]['authenticated']
if not authenticated:
# .. if not, send authentication request
self.sendMessage(json.dumps({'cmd': 'AUTHENTICATION_REQUIRED'}))
else:
# .. if yes, send info on authenticated user
self.sendMessage(json.dumps({'cmd': 'AUTHENTICATED', 'email': authenticated}))
def onClose(self, wasClean, code, reason):
# This is called when WebSocket connection is gone
# remove this connection from list of connections associated with
# same cookie
self.factory._cookies[self._cbtid]['connections'].remove(self)
# if list gets empty, possibly do something ..
if not self.factory._cookies[self._cbtid]['connections']:
log.msg("All connections for {} gone".format(self._cbtid))
def onMessage(self, payload, isBinary):
# This is called when we receive a WebSocket message
if not isBinary:
msg = json.loads(payload)
if msg['cmd'] == 'AUTHENTICATE':
# The client did it's Mozilla Persona authentication thing
# and now wants to verify the authentication and login.
assertion = msg.get('assertion')
audience = msg.get('audience')
# To verify the authentication, we need to send a HTTP/POST
# to Mozilla Persona. When successful, Persona will send us
# back something like:
# {
# "audience": "http://192.168.1.130:8080/",
# "expires": 1393681951257,
# "issuer": "gmail.login.persona.org",
# "email": "<EMAIL>",
# "status": "okay"
# }
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
body = urllib.urlencode({'audience': audience, 'assertion': assertion})
from twisted.web.client import getPage
d = getPage(url="https://verifier.login.persona.org/verify",
method='POST',
postdata=body,
headers=headers)
log.msg("Authentication request sent.")
def done(res):
res = json.loads(res)
if res['status'] == 'okay':
# Mozilla Persona successfully authenticated the user
# remember the user's email address. this marks the cookie as
# authenticated
self.factory._cookies[self._cbtid]['authenticated'] = res['email']
# inform _all_ WebSocket connections of the successful auth.
msg = json.dumps({'cmd': 'AUTHENTICATED', 'email': res['email']})
for proto in self.factory._cookies[self._cbtid]['connections']:
proto.sendMessage(msg)
log.msg("Authenticated user {}".format(res['email']))
else:
log.msg("Authentication failed: {}".format(res.get('reason')))
self.sendMessage(json.dumps({'cmd': 'AUTHENTICATION_FAILED', 'reason': res.get('reason')}))
self.sendClose()
def error(err):
log.msg("Authentication request failed: {}".format(err.value))
self.sendMessage(json.dumps({'cmd': 'AUTHENTICATION_FAILED', 'reason': str(err.value)}))
self.sendClose()
d.addCallbacks(done, error)
elif msg['cmd'] == 'LOGOUT':
# user wants to logout ..
if self.factory._cookies[self._cbtid]['authenticated']:
self.factory._cookies[self._cbtid]['authenticated'] = False
# inform _all_ WebSocket connections of the logout
msg = json.dumps({'cmd': 'LOGGED_OUT'})
for proto in self.factory._cookies[self._cbtid]['connections']:
proto.sendMessage(msg)
else:
log.msg("unknown command {}".format(msg))
class PersonaServerFactory(WebSocketServerFactory):
"""
WebSocket server factory with cookie/sessions map.
"""
protocol = PersonaServerProtocol
def __init__(self, url):
WebSocketServerFactory.__init__(self, url)
# map of cookies
self._cookies = {}
if __name__ == '__main__':
log.startLogging(sys.stdout)
print("Running Autobahn|Python {}".format(autobahn.version))
# our WebSocket server factory
factory = PersonaServerFactory("ws://127.0.0.1:8080")
# we serve static files under "/" ..
root = File(".")
# .. and our WebSocket server under "/ws" (note that Twisted uses
# bytes for URIs)
resource = WebSocketResource(factory)
root.putChild(b"ws", resource)
# run both under one Twisted Web Site
site = Site(root)
site.log = lambda _: None # disable any logging
reactor.listenTCP(8080, site)
reactor.run()
|
booknlp/common/calc_coref_metrics.py | ishine/booknlp | 539 | 20111 | import subprocess, re, sys
def get_coref_score(metric, path_to_scorer, gold=None, preds=None):
output=subprocess.check_output(["perl", path_to_scorer, metric, preds, gold]).decode("utf-8")
output=output.split("\n")[-3]
matcher=re.search("Coreference: Recall: \(.*?\) (.*?)% Precision: \(.*?\) (.*?)% F1: (.*?)%", output)
if matcher is not None:
recall=float(matcher.group(1))
precision=float(matcher.group(2))
f1=float(matcher.group(3))
return recall, precision, f1
def get_conll(path_to_scorer, gold=None, preds=None):
bcub_r, bcub_p, bcub_f=get_coref_score("bcub", path_to_scorer, gold, preds)
muc_r, muc_p, muc_f=get_coref_score("muc", path_to_scorer, gold, preds)
ceaf_r, ceaf_p, ceaf_f=get_coref_score("ceafe", path_to_scorer, gold, preds)
print("bcub:\t%.1f" % bcub_f)
print("muc:\t%.1f" % muc_f)
print("ceaf:\t%.1f" % ceaf_f)
avg=(bcub_f + muc_f + ceaf_f)/3.
print("Average F1: %.1f" % (avg))
# Generate Latex table
# print("%.1f&%.1f&%.1f&%.1f" % (bcub_f, muc_f, ceaf_f, avg))
return bcub_f, avg
if __name__ == "__main__":
goldFile=sys.argv[1]
predFile=sys.argv[2]
scorer=sys.argv[3]
bcub_f, avg=get_conll(scorer, gold=goldFile, preds=predFile)
|
src/python/compressao_huffman.py | willisnou/Algoritmos-e-Estruturas-de-Dados | 653 | 20114 | <reponame>willisnou/Algoritmos-e-Estruturas-de-Dados
# Árvore Huffman
class node:
def __init__(self, freq, symbol, left=None, right=None):
# Frequência do Símbolo
self.freq = freq
# Símbolo (caracter)
self.symbol = symbol
# nó à esquerda do nó atual
self.left = left
# nó à direita do nó atual
self.right = right
# direção da árvore (0/1)
self.huff = ''
# Função utilitária para imprimir
# códigos huffman para todos os símbolos
# na nova árvore huffman que sera criada
def printNodes(node, val=''):
# código huffman para o nó atual
newVal = val + str(node.huff)
# se o nó não pertence á ponta da
# árvore então caminha dentro do mesmo
# até a ponta
if(node.left):
printNodes(node.left, newVal)
if(node.right):
printNodes(node.right, newVal)
# Se o nó estiver na ponta da árore
# então exibe o código huffman
if(not node.left and not node.right):
print(f"{node.symbol} -> {newVal}")
# caracteres para à árvore huffman
chars = ['a', 'b', 'c', 'd', 'e', 'f']
# frequência dos caracteres
freq = [5, 9, 12, 13, 16, 45]
# lista contendo os nós não utilizados
nodes = []
if __name__ == '__main__':
# convertendo caracteres e frequência em
# nós da árvore huffman
for x in range(len(chars)):
nodes.append(node(freq[x], chars[x]))
while len(nodes) > 1:
# Ordena todos os nós de forma ascendente
# baseado em sua frequência
nodes = sorted(nodes, key=lambda x: x.freq)
# Seleciona os dois nós menores
left = nodes[0]
right = nodes[1]
# Atribui um valor direcional à estes nós
# (direita ou esquerda)
left.huff = 0
right.huff = 1
# Combina os 2 nós menores para um novo nó pai
# para eles.
newNode = node(
left.freq +
right.freq,
left.symbol +
right.symbol,
left,
right)
# remove os 2 nós e adiciona o nó pai
# como um novo só sobre os outros
nodes.remove(left)
nodes.remove(right)
nodes.append(newNode)
# <NAME> pronta!
printNodes(nodes[0])
|
Tests/subset/svg_test.py | ThomasRettig/fonttools | 2,705 | 20120 | from string import ascii_letters
import textwrap
from fontTools.misc.testTools import getXML
from fontTools import subset
from fontTools.fontBuilder import FontBuilder
from fontTools.pens.ttGlyphPen import TTGlyphPen
from fontTools.ttLib import TTFont, newTable
from fontTools.subset.svg import NAMESPACES, ranges
import pytest
etree = pytest.importorskip("lxml.etree")
@pytest.fixture
def empty_svg_font():
glyph_order = [".notdef"] + list(ascii_letters)
pen = TTGlyphPen(glyphSet=None)
pen.moveTo((0, 0))
pen.lineTo((0, 500))
pen.lineTo((500, 500))
pen.lineTo((500, 0))
pen.closePath()
glyph = pen.glyph()
glyphs = {g: glyph for g in glyph_order}
fb = FontBuilder(unitsPerEm=1024, isTTF=True)
fb.setupGlyphOrder(glyph_order)
fb.setupCharacterMap({ord(c): c for c in ascii_letters})
fb.setupGlyf(glyphs)
fb.setupHorizontalMetrics({g: (500, 0) for g in glyph_order})
fb.setupHorizontalHeader()
fb.setupOS2()
fb.setupPost()
fb.setupNameTable({"familyName": "TestSVG", "styleName": "Regular"})
svg_table = newTable("SVG ")
svg_table.docList = []
fb.font["SVG "] = svg_table
return fb.font
def new_svg(**attrs):
return etree.Element("svg", {"xmlns": NAMESPACES["svg"], **attrs})
def _lines(s):
return textwrap.dedent(s).splitlines()
@pytest.mark.parametrize(
"gids, retain_gids, expected_xml",
[
# keep four glyphs in total, don't retain gids, which thus get remapped
(
"2,4-6",
False,
_lines(
"""\
<svgDoc endGlyphID="1" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph1" d="M2,2"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="2" startGlyphID="2">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph2" d="M4,4"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="3" startGlyphID="3">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph3" d="M5,5"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="4" startGlyphID="4">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph4" d="M6,6"/></svg>]]>
</svgDoc>
"""
),
),
# same four glyphs, but we now retain gids
(
"2,4-6",
True,
_lines(
"""\
<svgDoc endGlyphID="2" startGlyphID="2">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph2" d="M2,2"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="4" startGlyphID="4">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph4" d="M4,4"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="5" startGlyphID="5">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph5" d="M5,5"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="6" startGlyphID="6">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph6" d="M6,6"/></svg>]]>
</svgDoc>
"""
),
),
],
)
def test_subset_single_glyph_per_svg(
empty_svg_font, tmp_path, gids, retain_gids, expected_xml
):
font = empty_svg_font
svg_docs = font["SVG "].docList
for i in range(1, 11):
svg = new_svg()
etree.SubElement(svg, "path", {"id": f"glyph{i}", "d": f"M{i},{i}"})
svg_docs.append((etree.tostring(svg).decode(), i, i))
svg_font_path = tmp_path / "TestSVG.ttf"
font.save(svg_font_path)
subset_path = svg_font_path.with_suffix(".subset.ttf")
subset.main(
[
str(svg_font_path),
f"--output-file={subset_path}",
f"--gids={gids}",
"--retain_gids" if retain_gids else "--no-retain_gids",
]
)
subset_font = TTFont(subset_path)
assert getXML(subset_font["SVG "].toXML, subset_font) == expected_xml
# This contains a bunch of cross-references between glyphs, paths, gradients, etc.
# Note the path coordinates are completely made up and not meant to be rendered.
# We only care about the tree structure, not it's visual content.
COMPLEX_SVG = """\
<svg xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient id="lg1" x1="50" x2="50" y1="80" y2="80" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</linearGradient>
<radialGradient id="rg2" cx="50" cy="50" r="10" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</radialGradient>
<radialGradient id="rg3" xlink:href="#rg2" r="20"/>
<radialGradient id="rg4" xlink:href="#rg3" cy="100"/>
<path id="p1" d="M3,3"/>
<clipPath id="c1">
<circle cx="10" cy="10" r="1"/>
</clipPath>
</defs>
<g id="glyph1">
<g id="glyph2">
<path d="M0,0"/>
</g>
<g>
<path d="M1,1" fill="url(#lg1)"/>
<path d="M2,2"/>
</g>
</g>
<g id="glyph3">
<use xlink:href="#p1"/>
</g>
<use id="glyph4" xlink:href="#glyph1" x="10"/>
<use id="glyph5" xlink:href="#glyph2" y="-10"/>
<g id="glyph6">
<use xlink:href="#p1" transform="scale(2, 1)"/>
</g>
<g id="group1">
<g id="glyph7">
<path id="p2" d="M4,4"/>
</g>
<g id=".glyph7">
<path d="M4,4"/>
</g>
<g id="glyph8">
<g id=".glyph8">
<path id="p3" d="M5,5"/>
<path id="M6,6"/>
</g>
<path d="M7,7"/>
</g>
<g id="glyph9">
<use xlink:href="#p2"/>
</g>
<g id="glyph10">
<use xlink:href="#p3"/>
</g>
</g>
<g id="glyph11">
<path d="M7,7" fill="url(#rg4)"/>
</g>
<g id="glyph12">
<path d="M7,7" style="fill:url(#lg1);stroke:red;clip-path:url(#c1)"/>
</g>
</svg>
"""
@pytest.mark.parametrize(
"subset_gids, expected_xml",
[
# we only keep gid=2, with 'glyph2' defined inside 'glyph1': 'glyph2'
# is renamed 'glyph1' to match the new subset indices, and the old 'glyph1'
# is kept (as it contains 'glyph2') but renamed '.glyph1' to avoid clash
(
"2",
_lines(
"""\
<svgDoc endGlyphID="1" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id=".glyph1">
<g id="glyph1">
<path d="M0,0"/>
</g>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
# we keep both gid 1 and 2: the glyph elements' ids stay as they are (only the
# range endGlyphID change); a gradient is kept since it's referenced by glyph1
(
"1,2",
_lines(
"""\
<svgDoc endGlyphID="2" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient id="lg1" x1="50" x2="50" y1="80" y2="80" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</linearGradient>
</defs>
<g id="glyph1">
<g id="glyph2">
<path d="M0,0"/>
</g>
<g>
<path d="M1,1" fill="url(#lg1)"/>
<path d="M2,2"/>
</g>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
(
# both gid 3 and 6 refer (via <use xlink:href="#...") to path 'p1', which
# is thus kept in <defs>; the glyph ids and range start/end are renumbered.
"3,6",
_lines(
"""\
<svgDoc endGlyphID="2" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<path id="p1" d="M3,3"/>
</defs>
<g id="glyph1">
<use xlink:href="#p1"/>
</g>
<g id="glyph2">
<use xlink:href="#p1" transform="scale(2, 1)"/>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
(
# 'glyph4' uses the whole 'glyph1' element (translated); we keep the latter
# renamed to avoid clashes with new gids
"3-4",
_lines(
"""\
<svgDoc endGlyphID="2" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient id="lg1" x1="50" x2="50" y1="80" y2="80" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</linearGradient>
<path id="p1" d="M3,3"/>
</defs>
<g id=".glyph1">
<g id=".glyph2">
<path d="M0,0"/>
</g>
<g>
<path d="M1,1" fill="url(#lg1)"/>
<path d="M2,2"/>
</g>
</g>
<g id="glyph1">
<use xlink:href="#p1"/>
</g>
<use id="glyph2" xlink:href="#.glyph1" x="10"/>
</svg>
]]>
</svgDoc>
"""
),
),
(
# 'glyph9' uses a path 'p2' defined inside 'glyph7', the latter is excluded
# from our subset, thus gets renamed '.glyph7'; an unrelated element with
# same id=".glyph7" doesn't clash because it was dropped.
# Similarly 'glyph10' uses path 'p3' defined inside 'glyph8', also excluded
# from subset and prefixed with '.'. But since an id=".glyph8" is already
# used in the doc, we append a .{digit} suffix to disambiguate.
"9,10",
_lines(
"""\
<svgDoc endGlyphID="2" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id="group1">
<g id=".glyph7">
<path id="p2" d="M4,4"/>
</g>
<g id=".glyph8.1">
<g id=".glyph8">
<path id="p3" d="M5,5"/>
</g>
</g>
<g id="glyph1">
<use xlink:href="#p2"/>
</g>
<g id="glyph2">
<use xlink:href="#p3"/>
</g>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
(
# 'glyph11' uses gradient 'rg4' which inherits from 'rg3', which inherits
# from 'rg2', etc.
"11",
_lines(
"""\
<svgDoc endGlyphID="1" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<radialGradient id="rg2" cx="50" cy="50" r="10" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</radialGradient>
<radialGradient id="rg3" xlink:href="#rg2" r="20"/>
<radialGradient id="rg4" xlink:href="#rg3" cy="100"/>
</defs>
<g id="glyph1">
<path d="M7,7" fill="url(#rg4)"/>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
(
# 'glyph12' contains a style attribute with inline CSS declarations that
# contains references to a gradient fill and a clipPath: we keep those
"12",
_lines(
"""\
<svgDoc endGlyphID="1" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient id="lg1" x1="50" x2="50" y1="80" y2="80" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</linearGradient>
<clipPath id="c1">
<circle cx="10" cy="10" r="1"/>
</clipPath>
</defs>
<g id="glyph1">
<path d="M7,7" style="fill:url(#lg1);stroke:red;clip-path:url(#c1)"/>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
],
)
def test_subset_svg_with_references(
empty_svg_font, tmp_path, subset_gids, expected_xml
):
font = empty_svg_font
font["SVG "].docList.append((COMPLEX_SVG, 1, 12))
svg_font_path = tmp_path / "TestSVG.ttf"
font.save(svg_font_path)
subset_path = svg_font_path.with_suffix(".subset.ttf")
subset.main(
[
str(svg_font_path),
f"--output-file={subset_path}",
f"--gids={subset_gids}",
"--pretty-svg",
]
)
subset_font = TTFont(subset_path)
if expected_xml is not None:
assert getXML(subset_font["SVG "].toXML, subset_font) == expected_xml
else:
assert "SVG " not in subset_font
def test_subset_svg_empty_table(empty_svg_font, tmp_path):
font = empty_svg_font
svg = new_svg()
etree.SubElement(svg, "rect", {"id": "glyph1", "x": "1", "y": "2"})
font["SVG "].docList.append((etree.tostring(svg).decode(), 1, 1))
svg_font_path = tmp_path / "TestSVG.ttf"
font.save(svg_font_path)
subset_path = svg_font_path.with_suffix(".subset.ttf")
# there's no gid=2 in SVG table, drop the empty table
subset.main([str(svg_font_path), f"--output-file={subset_path}", f"--gids=2"])
assert "SVG " not in TTFont(subset_path)
def test_subset_svg_missing_glyph(empty_svg_font, tmp_path):
font = empty_svg_font
svg = new_svg()
etree.SubElement(svg, "rect", {"id": "glyph1", "x": "1", "y": "2"})
font["SVG "].docList.append(
(
etree.tostring(svg).decode(),
1,
# the range endGlyphID=2 declares two glyphs however our svg contains
# only one glyph element with id="glyph1", the "glyph2" one is absent.
# Techically this would be invalid according to the OT-SVG spec.
2,
)
)
svg_font_path = tmp_path / "TestSVG.ttf"
font.save(svg_font_path)
subset_path = svg_font_path.with_suffix(".subset.ttf")
# make sure we don't crash when we don't find the expected "glyph2" element
subset.main([str(svg_font_path), f"--output-file={subset_path}", f"--gids=1"])
subset_font = TTFont(subset_path)
assert getXML(subset_font["SVG "].toXML, subset_font) == [
'<svgDoc endGlyphID="1" startGlyphID="1">',
' <![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><rect id="glyph1" x="1" y="2"/></svg>]]>',
"</svgDoc>",
]
# ignore the missing gid even if included in the subset; in this test case we
# end up with an empty svg document--which is dropped, along with the empty table
subset.main([str(svg_font_path), f"--output-file={subset_path}", f"--gids=2"])
assert "SVG " not in TTFont(subset_path)
@pytest.mark.parametrize(
"ints, expected_ranges",
[
((), []),
((0,), [(0, 0)]),
((0, 1), [(0, 1)]),
((1, 1, 1, 1), [(1, 1)]),
((1, 3), [(1, 1), (3, 3)]),
((4, 2, 1, 3), [(1, 4)]),
((1, 2, 4, 5, 6, 9, 13, 14, 15), [(1, 2), (4, 6), (9, 9), (13, 15)]),
],
)
def test_ranges(ints, expected_ranges):
assert list(ranges(ints)) == expected_ranges
|
tests/components/tectonics/test_listric_kinematic_extender.py | amanaster2/landlab | 257 | 20129 | <reponame>amanaster2/landlab<filename>tests/components/tectonics/test_listric_kinematic_extender.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 08:42:24 2021
@author: gtucker
"""
from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_raises
from landlab import HexModelGrid, RadialModelGrid, RasterModelGrid
from landlab.components import Flexure, ListricKinematicExtender
def test_hangingwall_nodes():
"""Test the correct identification of hangingwall nodes."""
grid = RasterModelGrid((3, 7), xy_spacing=2500.0)
grid.add_zeros("topographic__elevation", at="node")
extender = ListricKinematicExtender(grid, fault_location=2500.0)
assert_array_equal(
extender._hangwall, [2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 16, 17, 18, 19, 20]
)
def test_subsidence_and_horiz_shift():
"""Test that elev subsides then shifts after 2 time steps."""
grid = RasterModelGrid((3, 7), xy_spacing=2500.0)
topo = grid.add_zeros("topographic__elevation", at="node")
extender = ListricKinematicExtender(
grid, extension_rate=0.01, fault_location=2500.0
)
# Run long enough to extend by half a grid cell
extender.run_one_step(dt=125000.0)
assert_array_almost_equal(
topo[7:14],
[0.0, 0.0, -1404.156819, -910.66907, -590.616478, -383.045648, -248.425118],
)
# Now extend another half cell, so cumulative extension is one cell and
# elevations should get shifted by one cell
extender.run_one_step(dt=125000.0)
assert_array_almost_equal(
topo[7:14],
[0.0, 0.0, -3514.477461, -2808.313638, -1821.338140, -1181.232956, -766.091296],
)
# Another step, and this time the hangingwall edge has moved by one cell,
# so the first 3 cells in this row should not further subside.
extender.run_one_step(dt=125000.0)
assert_array_almost_equal(
topo[7:14],
[
0.0,
0.0,
-3514.477461,
-3718.982708,
-2411.954617,
-1564.278603,
-1014.516414,
],
)
def test_with_hex_grid():
grid = HexModelGrid((5, 5), node_layout="rect")
grid.add_zeros("topographic__elevation", at="node")
ListricKinematicExtender(grid)
ListricKinematicExtender(grid, fault_location=2.0)
grid = HexModelGrid((5, 5), node_layout="rect", orientation="vertical")
grid.add_zeros("topographic__elevation", at="node")
assert_raises(NotImplementedError, ListricKinematicExtender, grid)
def test_with_flexure():
"""Test integrating with flexure."""
crust_density = 2700.0 # density of crustal column, kg/m3
dx = 2500.0 # grid spacing, m
dt = 125000.0 # time step, y
upper_crust_base_depth = 10000.0 # m
grid = RasterModelGrid((3, 7), xy_spacing=dx)
topo = grid.add_zeros("topographic__elevation", at="node")
load = grid.add_zeros("lithosphere__overlying_pressure_increment", at="node")
thickness = grid.add_zeros("upper_crust_thickness", at="node")
upper_crust_base = grid.add_zeros("upper_crust_base__elevation", at="node")
extender = ListricKinematicExtender(
grid,
extension_rate=0.01,
fault_location=2500.0,
track_crustal_thickness=True,
)
flexer = Flexure(grid, eet=5000.0, method="flexure")
deflection = grid.at_node["lithosphere_surface__elevation_increment"]
topo[
grid.x_of_node <= 7500.0
] = 1000.0 # this will force thickness to be 1 km greater at left
upper_crust_base[:] = -upper_crust_base_depth
thickness[:] = topo - upper_crust_base
unit_wt = crust_density * flexer.gravity
load[:] = unit_wt * thickness # loading pressure
# Get the initial deflection, which we'll need to calculate total current
# deflection
flexer.update()
init_deflection = deflection.copy()
# Run extension for half a grid cell. Elevations change, but thickness
# doesn't, so deflection should not change. We should be able to recover
# elevation from:
#
# topo = thickness + crust base - (deflection + subsidence)
#
extender.run_one_step(dt=dt)
flexer.update()
net_deflection = deflection - init_deflection
assert_array_almost_equal(
net_deflection[7:14],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
)
test_topo = thickness + upper_crust_base - (net_deflection + extender._cum_subs)
assert_array_almost_equal(topo, test_topo)
# Now extend for another half cell, which should force a shift. The
# cumulative subsidence will be subtracted from the thickness field,
# representing thinning as the hangingwall slides to the "right". This
# will cause net upward isostatic deflection.
extender.run_one_step(dt=dt)
load[:] = unit_wt * thickness
flexer.update()
net_deflection = deflection - init_deflection
assert_array_almost_equal(
thickness[7:14],
[
11000.0,
11000.0,
8191.686362, # greatest subsidence: lost nearly 3 km
9178.66186,
9818.767044, # thicker because shifted (only lost <200 m)
9233.908704,
9503.149763,
],
)
assert_array_almost_equal(
net_deflection[7:14],
[
-59.497362,
-65.176276,
-69.222531,
-70.334462,
-68.608952,
-64.912352,
-59.743080,
],
)
def test_error_handling():
radial_grid = RadialModelGrid(
n_rings=1, nodes_in_first_ring=8
) # , xy_of_center=(0., 0.))
assert_raises(TypeError, ListricKinematicExtender, radial_grid)
hex_grid = HexModelGrid((3, 3))
assert_raises(TypeError, ListricKinematicExtender, hex_grid)
grid = RasterModelGrid((3, 7))
grid.add_zeros("topographic__elevation", at="node")
assert_raises(
KeyError, ListricKinematicExtender, grid, track_crustal_thickness=True
)
|
tests/test_polygon.py | tilezen/mapbox-vector-tile | 121 | 20137 | # -*- coding: utf-8 -*-
"""
Tests for vector_tile/polygon.py
"""
import unittest
from mapbox_vector_tile.polygon import make_it_valid
from shapely import wkt
import os
class TestPolygonMakeValid(unittest.TestCase):
def test_dev_errors(self):
test_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(test_dir, 'errors.wkt')) as fh:
for line in fh:
geom = wkt.loads(line)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertTrue(fixed.area > 0.9 * abs(geom.area))
def test_multipolygon_with_flipped_ring(self):
geom = wkt.loads("""MULTIPOLYGON(
((0 0, 0 4, 4 4, 4 0, 0 0), (1 1, 1 3, 3 3, 3 1, 1 1)),
((5 0, 9 0, 9 4, 5 4, 5 0), (6 1, 6 3, 8 3, 8 1, 6 1))
)""")
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(24, fixed.area)
def test_polygon_self_touching(self):
geom = wkt.loads("""POLYGON(
(1 0, 5 0, 5 5, 0 5, 0 2, 2 2, 2 4, 3 4, 1 0)
)""")
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(21, fixed.area)
def test_polygon_self_touching_inner(self):
geom = wkt.loads("""POLYGON(
(-1 -1, -1 6, 6 6, 6 -1, -1 -1),
(1 0, 5 0, 5 5, 0 5, 0 2, 2 2, 2 4, 3 4, 1 0)
)""")
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(28, fixed.area)
def test_polygon_inners_touching(self):
geom = wkt.loads("""POLYGON(
(0 0, 6 0, 6 6, 0 6, 0 0),
(1 1, 1 3, 3 3, 3 1, 1 1),
(3 3, 3 5, 5 5, 5 3, 3 3)
)""")
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(28, fixed.area)
def test_polygon_inner_touching_outer(self):
geom = wkt.loads("""POLYGON(
(0 0, 3 0, 3 3, 0 3, 0 0),
(1 1, 2 3, 2 1, 1 1)
)""")
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(8, fixed.area)
def test_polygon_two_inners_touching_outer(self):
geom = wkt.loads("""POLYGON(
(0 0, 6 0, 6 3, 0 3, 0 0),
(1 1, 2 3, 2 1, 1 1),
(4 1, 5 3, 5 1, 4 1)
)""")
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(16, fixed.area)
def test_polygon_inners_touching_colinear(self):
geom = wkt.loads("""POLYGON(
(0 0, 6 0, 6 6, 0 6, 0 0),
(1 1, 1 3, 3 4, 3 1, 1 1),
(3 2, 3 5, 5 5, 5 3, 3 2)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(26, fixed.area)
def test_polygon_inner_colinear_outer(self):
geom = wkt.loads("""POLYGON(
(0 0, 3 0, 3 3, 0 3, 0 0),
(1 1, 1 3, 2 3, 2 1, 1 1)
)""")
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(7, fixed.area)
def test_polygon_many_inners_touching(self):
geom = wkt.loads("""POLYGON(
(0 0, 5 0, 5 5, 0 5, 0 0),
(1 1, 1 2, 3 2, 1 1),
(3 1, 3 3, 4 1, 3 1),
(2 2, 1 4, 2 4, 2 2),
(2 3, 4 4, 4 3, 2 3)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(21, fixed.area)
def test_polygon_inner_spike(self):
geom = wkt.loads("""POLYGON(
(0 0, 3 0, 3 4, 0 4, 0 0),
(1 1, 1 3, 2 3, 2 2, 1 2, 2 2, 2 1, 1 1)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(10, fixed.area)
def test_polygon_disconnected_inner(self):
geom = wkt.loads("""POLYGON(
(0 0, 5 0, 5 5, 0 5, 0 0),
(1 1, 1 2, 2 2, 1 1),
(2 1, 2 2, 3 2, 2 1),
(3 1, 3 2, 4 2, 3 1),
(1 2, 1 3, 2 3, 1 2),
(2 2, 2 3, 3 3, 2 2),
(3 2, 3 3, 4 3, 3 2),
(1 3, 1 4, 2 4, 1 3),
(2 3, 2 4, 3 4, 2 3),
(3 3, 3 4, 4 4, 3 3)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(20.5, fixed.area)
def test_polygon_disconnected_outer(self):
geom = wkt.loads("""POLYGON(
(0 0, 4 0, 4 3, 3 3, 3 2, 2 3, 1 2, 1 3, 0 3, 0 0),
(1 1, 1 2, 3 2, 3 1, 1 1)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(9, fixed.area)
def test_polygon_ring_of_inners(self):
geom = wkt.loads("""POLYGON(
(0 0, 4 0, 4 4, 0 4, 0 0),
(1 1, 1 2, 2 1, 1 1),
(1 2, 1 3, 2 3, 1 2),
(2 3, 3 3, 3 2, 2 3),
(2 1, 3 2, 3 1, 2 1)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(14, fixed.area)
def test_polygon_ring_of_inners_2(self):
geom = wkt.loads("""POLYGON(
(0 0, 5 0, 5 5, 0 5, 0 0),
(1 3, 1 4, 2 4, 1 3),
(3 3, 4 3, 4 2, 3 3),
(1 1, 1 2, 2 1, 1 1),
(1 2, 1 3, 2 3, 1 2),
(2 3, 3 3, 3 2, 2 3),
(2 1, 3 2, 3 1, 2 1)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(22, fixed.area)
def test_polygon_inners_crossing_outer(self):
geom = wkt.loads("""POLYGON (
(2325 1015, 2329 1021, 2419 1057, 2461 944, 2369 907, 2325 1015),
(2329 1012, 2370 909, 2457 944, 2417 1050, 2329 1012),
(2410 1053, 2410 1052, 2412 1053, 2411 1054, 2410 1053),
(2378 1040, 2378 1039, 2379 1040, 2379 1041, 2378 1040),
(2369 1037, 2370 1036, 2371 1036, 2371 1038, 2369 1037),
(2361 1034, 2362 1033, 2363 1033, 2363 1034, 2361 1034),
(2353 1031, 2354 1029, 2355 1030, 2354 1031, 2353 1031),
(2337 1024, 2338 1023, 2339 1023, 2338 1025, 2337 1024)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
# different versions of GEOS hit this bug in slightly different ways,
# meaning that some inners get included and some don't, depending on
# the version. therefore, we need quite a wide range of acceptable
# answers.
#
# the main part of this polygon (outer - largest inner) has area 1551,
# and the smaller inners sum up to area 11, so we'll take +/-6 from
# 1545.
self.assertAlmostEqual(1545, fixed.area, delta=6)
|
xp/build/scripts/gg_post_process_xcode_project.py | vladcorneci/golden-gate | 262 | 20170 | <gh_stars>100-1000
#! /urs/bin/env python
# Copyright 2017-2020 Fitbit, Inc
# SPDX-License-Identifier: Apache-2.0
#####################################################################
# This script post-processes the XCode project generated
# by CMake, so that it no longer contains absolute paths.
# It also remaps UUIDs so that they are stable across invocations
# of this script, which allows the generated project to be put under
# source code control.
#####################################################################
#####################################################################
# Imports
#####################################################################
import sys
import re
import os
import shutil
#####################################################################
# Constants
#####################################################################
XCODE_PROJECT_FILE_NAME = "project.pbxproj"
#####################################################################
def print_usage_and_exit():
sys.stderr.write("""\
Usage: gg_post_process_xcode_project.py <project_file_in> <project_file_out> <gg_root> <gg_variant>
Where <project_file_in> is the XCode project generated by CMake,
<project_file_out> is the post-processed XCode project generated by
this script, <gg_root> is the directory where the GG repo is checked
out, and <gg_variant> is 'iOS' or 'macOS'
""")
sys.exit(1)
#####################################################################
def print_error(error):
sys.stderr.write("ERROR: %s\n" % (error))
#####################################################################
def replace_substrings(original, replacements):
cursor = 0
segments = []
for replacement in replacements:
start, end, string = replacement
segments.append(original[cursor:start])
segments.append(string)
cursor = end
segments.append(original[cursor:])
return "".join(segments)
#####################################################################
# Even after making paths relative, we still have some include paths
# path point to CMake-generated directories.
# They have the form: xp/build/cmake/<platform>
# We replace them by an equivalent, pointing to the `generated` subdir
# of xp/build
#####################################################################
def fix_header_search_paths(match):
return match.group(1) + match.group(2).replace('xp/build/cmake', 'xp/build/generated')
#####################################################################
def process_project_file(input_file, output_file, gg_root, uuid_prefix):
# Read the entire project file
project = open(os.path.join(input_file, XCODE_PROJECT_FILE_NAME), "r").read()
# Remove SYMROOT entries, so that we use the default location for XCode
project = re.sub(r'(SYMROOT = )', r'// Removed by GG script \1', project)
# Remove CONFIGURATION_BUILD_DIR entries
project = re.sub(r'(CONFIGURATION_BUILD_DIR = )', r'// Removed by GG script \1', project)
# Replace defaultConfigurationName to Release
project = re.sub(r'(defaultConfigurationName = Debug)', r'defaultConfigurationName = Release', project)
# Compute the relative path from the output project to the GG root
abs_output_dir_path = os.path.abspath(os.path.dirname(output_file))
abs_gg_root_path = os.path.abspath(gg_root)
abs_gg_xp_root_path = os.path.join(abs_gg_root_path, "xp")
gg_xp_root_relpath = os.path.relpath(abs_gg_xp_root_path, abs_output_dir_path)
# Rewrite the `projectDirPath` definition in the project
project_dir_path = "projectDirPath = " + gg_xp_root_relpath + ";"
project = re.sub(r'projectDirPath = \S+;', project_dir_path, project, 1)
# Replace absolute paths with paths relative to `projectDirPath`
project = re.sub(abs_gg_root_path, '..', project)
# Replace references to object files and libraries.
# They have the form: ../xp/<some-path>/<prefix>$(EFFECTIVE_PLATFORM_NAME)/<build-variant>/<object-name>
# We replace them with just the object name, relative to the built products directory.
# NOTE: those entries can end with a quote, or a whitespace
project = re.sub(r'(\.\./xp/\S+\$\(EFFECTIVE_PLATFORM_NAME\)/[^/ ]+/)([^/" ]+[" ])', r'$(BUILT_PRODUCTS_DIR)/\2', project)
# Scan for all entity IDs and store them in a map, associating them with
# a number equal to their order or appearance in the file
# Entity IDs generated by CMake: we're looking for a block of 24 uppercase hex chars
# preceded by whitespace and followed by whitespace or a separator
entity_id_pattern = re.compile(re.compile(r'(\s)([0-9A-F]{24})(\s|[,;])'))
entity_id_map = {}
entity_ids = entity_id_pattern.findall(project)
for (_, entity_id, _) in entity_ids:
if entity_id not in entity_id_map:
entity_id_map[entity_id] = "%s%022X" % (uuid_prefix, len(entity_id_map))
# Replace IDs with their mapped value
project = entity_id_pattern.sub(
lambda match: match.group(1) + entity_id_map[match.group(2)] + match.group(3), project)
# Fix HEADER_SEARCH_PATHS elements
# Look for: HEADER_SEARCH_PATHS = (...)
project = re.sub(r'(HEADER_SEARCH_PATHS\s*=\s*\()([^\(\)]+)', fix_header_search_paths, project)
# Fix Info.plist references
project = re.sub(r'(INFOPLIST_FILE\s*=\s*)"(.*GoldenGateXP\.dir/Info.plist)"',
r'\1"bundle/Info.plist"',
project)
# Replace the shell script generated by CMake for the gg-common target
# For simplicity, we just look for a `shellScript` entry with the term `gg-common` in it
gg_common_shell_script = 'shellScript = "$PROJECT_DIR/build/scripts/gg_process_version_info_header.py \\\"$PROJECT_FILE_PATH/..\\\"";'
gg_common_input_paths = 'inputPaths = ( "$(BUILT_PRODUCTS_DIR)" );'
gg_common_output_paths = 'outputPaths = ();'
project = re.sub(r'shellScript\s*=\s*".*gg-common_preBuildCommands.*";',
gg_common_shell_script + "\n" + gg_common_input_paths + "\n" + gg_common_output_paths,
project)
# Replace the ALL_BUILD shell script so that it doesn't depend on a CMake-generated script
# We use a script file that's just a comment, because we don't need to actually do anything
all_build_shell_script = 'shellScript = "# replaced by gg_post_process_xcode_project.py";'
project = re.sub(r'shellScript\s*=\s*".*ALL_BUILD_cmakeRulesBuildPhase.*";',
all_build_shell_script,
project)
open(os.path.join(output_file, XCODE_PROJECT_FILE_NAME), "w+").write(project)
#####################################################################
def copy_generated_files(gg_root, gg_variant_dir):
for filename in ["config/lwipopts.h"]:
src = os.path.join(gg_root, "xp/build/cmake", gg_variant_dir, filename)
dst = os.path.join(gg_root, "xp/build/generated", gg_variant_dir, filename)
if not os.path.exists(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
shutil.copyfile(src, dst)
#####################################################################
# main
#####################################################################
def main():
if len(sys.argv) != 5:
print_error("ERROR: invalid/missing arguments")
print_usage_and_exit()
# Assign the parameters
input_file = sys.argv[1]
output_file = sys.argv[2]
gg_root = sys.argv[3]
gg_variant = sys.argv[4]
# Check that the input and output project files are XCode projects (XCode Project files are directories that
# contain a project.pbxproj file, and other files). For the output, it is Ok that the project.pbxproj file
# doesn't yet exist, since we will be writing it
if not os.path.isfile(os.path.join(input_file, XCODE_PROJECT_FILE_NAME)):
print_error("ERROR: input file is not a valid XCode project")
return 1
if not os.path.isdir(output_file):
print_error("ERROR: output file is not a valid XCode project")
return 1
if not os.path.isdir(gg_root):
print_error("ERROR: Golden Gate root isn't a directory")
return 1
# Pick a UUID prefix based on the variant, to try and avoid having the same UUID in two
# different project files.
uuid_prefix_map = {
'iOS': '01',
'macOS': '02'
}
uuid_prefix = uuid_prefix_map.get(gg_variant, '00')
process_project_file(input_file, output_file, gg_root, uuid_prefix)
gg_variant_dir = 'xcode-' + gg_variant
copy_generated_files(gg_root, gg_variant_dir)
return 0
if __name__ == '__main__':
sys.exit(main())
|
uq_benchmark_2019/imagenet/end_to_end_test.py | deepneuralmachine/google-research | 23,901 | 20175 | <gh_stars>1000+
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""End-to-end test for ImageNet.
Tests for imagenet.resnet50_train, run_predict, run_temp_scaling, and
run_metrics. Real data doesn't work under blaze, so execute the test binary
directly.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
import tensorflow.compat.v2 as tf
from uq_benchmark_2019.imagenet import resnet50_train # pylint: disable=line-too-long
from uq_benchmark_2019.imagenet import run_metrics
from uq_benchmark_2019.imagenet import run_predict
from uq_benchmark_2019.imagenet import run_temp_scaling
gfile = tf.io.gfile
flags.DEFINE_bool('fake_data', True, 'Use dummy random data.')
flags.DEFINE_bool('fake_training', True, 'Train with trivial number of steps.')
DATA_NAMES = ['train', 'test', 'corrupt-static-gaussian_noise-2', 'celeb_a']
METHODS = ['vanilla', 'll_dropout', 'll_svi', 'dropout']
class EndToEndTest(parameterized.TestCase):
@parameterized.parameters(*[(d, m) for d in DATA_NAMES for m in METHODS]) # pylint: disable=g-complex-comprehension
def test_end_to_end_train(self, data_name, method):
with tempfile.TemporaryDirectory() as model_dir:
metrics = ['sparse_categorical_crossentropy']
if flags.FLAGS.fake_data and (data_name != 'test'):
pass
else:
temp_model_dir = os.path.join(model_dir, data_name, method)
resnet50_train.run(
method, temp_model_dir, task_number=0, use_tpu=False, tpu=None,
metrics=metrics, fake_data=flags.FLAGS.fake_data,
fake_training=flags.FLAGS.fake_training)
run_predict.run(
data_name, temp_model_dir, batch_size=8, predictions_per_example=4,
max_examples=44, output_dir=temp_model_dir,
fake_data=flags.FLAGS.fake_data)
tmpl = os.path.join(temp_model_dir, '*_small_*')
glob_results = gfile.glob(tmpl)
path = glob_results[0]
if data_name == 'valid':
run_temp_scaling(path)
run_metrics.run(path, path, model_dir_ensemble=None,
use_temp_scaling=False)
if __name__ == '__main__':
absltest.main()
|
src/robot/parsing/parser/parser.py | bhirsz/robotframework | 7,073 | 20182 | <gh_stars>1000+
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..lexer import Token, get_tokens, get_resource_tokens, get_init_tokens
from ..model import Statement
from .fileparser import FileParser
def get_model(source, data_only=False, curdir=None):
"""Parses the given source to a model represented as an AST.
How to use the model is explained more thoroughly in the general
documentation of the :mod:`robot.parsing` module.
:param source: The source where to read the data. Can be a path to
a source file as a string or as ``pathlib.Path`` object, an already
opened file object, or Unicode text containing the date directly.
Source files must be UTF-8 encoded.
:param data_only: When ``False`` (default), returns all tokens. When set
to ``True``, omits separators, comments, continuation markers, and
other non-data tokens. Model like this cannot be saved back to
file system.
:param curdir: Directory where the source file exists. This path is used
to set the value of the built-in ``${CURDIR}`` variable during parsing.
When not given, the variable is left as-is. Should only be given
only if the model will be executed afterwards. If the model is saved
back to disk, resolving ``${CURDIR}`` is typically not a good idea.
Use :func:`get_resource_model` or :func:`get_init_model` when parsing
resource or suite initialization files, respectively.
"""
return _get_model(get_tokens, source, data_only, curdir)
def get_resource_model(source, data_only=False, curdir=None):
"""Parses the given source to a resource file model.
Otherwise same as :func:`get_model` but the source is considered to be
a resource file. This affects, for example, what settings are valid.
"""
return _get_model(get_resource_tokens, source, data_only, curdir)
def get_init_model(source, data_only=False, curdir=None):
"""Parses the given source to a init file model.
Otherwise same as :func:`get_model` but the source is considered to be
a suite initialization file. This affects, for example, what settings are
valid.
"""
return _get_model(get_init_tokens, source, data_only, curdir)
def _get_model(token_getter, source, data_only=False, curdir=None):
tokens = token_getter(source, data_only)
statements = _tokens_to_statements(tokens, curdir)
model = _statements_to_model(statements, source)
model.validate_model()
return model
def _tokens_to_statements(tokens, curdir=None):
statement = []
EOS = Token.EOS
for t in tokens:
if curdir and '${CURDIR}' in t.value:
t.value = t.value.replace('${CURDIR}', curdir)
if t.type != EOS:
statement.append(t)
else:
yield Statement.from_tokens(statement)
statement = []
def _statements_to_model(statements, source=None):
parser = FileParser(source=source)
model = parser.model
stack = [parser]
for statement in statements:
while not stack[-1].handles(statement):
stack.pop()
parser = stack[-1].parse(statement)
if parser:
stack.append(parser)
return model
|
qf_lib/backtesting/order/order_factory.py | webclinic017/qf-lib | 198 | 20219 | # Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Mapping, Dict, List
from qf_lib.backtesting.broker.broker import Broker
from qf_lib.backtesting.contract.contract import Contract
from qf_lib.backtesting.contract.contract_to_ticker_conversion.base import ContractTickerMapper
from qf_lib.backtesting.order.execution_style import ExecutionStyle
from qf_lib.backtesting.order.order import Order
from qf_lib.backtesting.order.time_in_force import TimeInForce
from qf_lib.common.enums.frequency import Frequency
from qf_lib.common.utils.logging.qf_parent_logger import qf_logger
from qf_lib.common.utils.miscellaneous.function_name import get_function_name
from qf_lib.data_providers.data_provider import DataProvider
class OrderFactory:
""" Creates Orders.
Parameters
----------
broker: Broker
broker used to access the portfolio
data_provider: DataProvider
data provider used to download prices. In case of backtesting, the DataHandler wrapper should be used.
contract_to_ticker_mapper: ContractTickerMapper
object mapping contracts to tickers
"""
def __init__(self, broker: Broker, data_provider: DataProvider, contract_to_ticker_mapper: ContractTickerMapper):
self.broker = broker
self.data_provider = data_provider
self.contract_to_ticker_mapper = contract_to_ticker_mapper
self.logger = qf_logger.getChild(self.__class__.__name__)
def orders(self, quantities: Mapping[Contract, int], execution_style: ExecutionStyle,
time_in_force: TimeInForce) -> List[Order]:
"""
Creates a list of Orders for given numbers of shares for each given asset.
Orders requiring 0 shares will be removed from resulting order list
Parameters
----------
quantities: Mapping[Contract, int]
mapping of a Contract to an amount of shares which should be bought/sold.
If number is positive then asset will be bought. Otherwise it will be sold.
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
Returns
--------
List[Order]
list of generated orders
"""
self._log_function_call(vars())
order_list = []
for contract, quantity in quantities.items():
if quantity != 0:
order_list.append(Order(contract, quantity, execution_style, time_in_force))
return order_list
def target_orders(self, target_quantities: Mapping[Contract, float], execution_style: ExecutionStyle,
time_in_force: TimeInForce, tolerance_quantities: Mapping[Contract, float] = None) -> List[Order]:
"""
Creates a list of Orders from a dictionary of desired target number of shares (number of shares which should be
present in the portfolio after executing the Order).
If the position doesn't already exist, the new Order is placed for the :target_quantity of shares.
If the position does exist the Order for the difference between the target number of shares
and the current number of shares is placed.
Parameters
----------
target_quantities: Mapping[Contract, int]
mapping of a Contract to a target number of shares which should be present in the portfolio after the Order
is executed. After comparing with tolerance the math.floor of the quantity will be taken.
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
tolerance_quantities: None, Mapping[Contract, int]
tells what is a tolerance for the target_quantities (in both directions) for each Contract.
The tolerance is expressed in shares.
For example: assume that currently the portfolio contains 100 shares of asset A.
then calling target_orders({A: 101}, ..., tolerance_quantities={A: 2}) will not generate any trades as
the tolerance of 2 allows the allocation to be 100. while target value is 101.
Another example:
assume that currently the portfolio contains 100 shares of asset A.
then calling target_value_order({A: 103}, ..., tolerance_quantities={A: 2}) will generate a BUY order
for 3 shares
if abs(target - actual) > tolerance buy or sell assets to match the target
If tolerance for a specific contract is not provided it is assumed to be 0
Returns
--------
List[Order]
list of generated orders
"""
self._log_function_call(vars())
# Dict of Contract -> Quantities of shares to buy/sell
quantities = dict()
if tolerance_quantities is None:
tolerance_quantities = {}
contract_to_positions = {position.contract(): position for position in self.broker.get_positions()}
for contract, target_quantity in target_quantities.items():
position = contract_to_positions.get(contract, None)
tolerance_quantity = tolerance_quantities.get(contract, 0)
if position is not None:
current_quantity = position.quantity()
else:
current_quantity = 0
quantity = target_quantity - current_quantity
if abs(quantity) > tolerance_quantity and quantity != 0: # tolerance_quantity can be 0
quantities[contract] = math.floor(quantity) # type: int
return self.orders(quantities, execution_style, time_in_force)
def value_orders(self, values: Mapping[Contract, float], execution_style: ExecutionStyle,
time_in_force: TimeInForce, frequency: Frequency = None) -> List[Order]:
"""
Creates a list of Orders by specifying the amount of money which should be spent on each asset rather
than the number of shares to buy/sell.
Parameters
----------
values: Mapping[Contract, int]
mapping of a Contract to the amount of money which should be spent on the asset (expressed in the currency
in which the asset is traded)
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
frequency: Frequency
frequency for the last available price sampling
Returns
--------
List[Order]
list of generated orders
"""
self._log_function_call(vars())
quantities, _ = self._calculate_target_shares_and_tolerances(values, frequency=frequency)
int_quantities = {contract: math.floor(quantity) for contract, quantity in quantities.items()}
return self.orders(int_quantities, execution_style, time_in_force)
def percent_orders(self, percentages: Mapping[Contract, float], execution_style: ExecutionStyle,
time_in_force: TimeInForce, frequency: Frequency = None) -> List[Order]:
"""
Creates a list of Orders by specifying the percentage of the current portfolio value which should be spent
on each asset.
Parameters
----------
percentages: Mapping[Contract, int]
mapping of a Contract to a percentage value of the current portfolio which should be allocated in the asset.
This is specified as a decimal value (e.g. 0.5 means 50%)
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
frequency: Frequency
frequency for the last available price sampling (daily or minutely)
Returns
--------
List[Order]
list of generated orders
"""
self._log_function_call(vars())
portfolio_value = self.broker.get_portfolio_value()
values = {contract: portfolio_value * fraction for contract, fraction in percentages.items()}
return self.value_orders(values, execution_style, time_in_force, frequency)
def target_value_orders(self, target_values: Mapping[Contract, float], execution_style: ExecutionStyle,
time_in_force: TimeInForce, tolerance_percentage: float = 0.0, frequency: Frequency = None)\
-> List[Order]:
"""
Creates a list of Orders by specifying how much should be allocated in each asset after the Orders
have been executed.
For example if we've already have 10M invested in 'SPY US Equity' and you call this method with target value of 11M
then only 1M will be spent on this asset
Parameters
----------
target_values: Mapping[Contract, int]
mapping of a Contract to a value which should be allocated in the asset after the Order has been executed
(expressed in the currency in which the asset is traded)
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
tolerance_percentage: float
tells the us what is a tolerance to the target_values (in both directions).
The tolerance is expressed as percentage of target_values.
For example: assume that currently the portfolio contains asset A with allocation 10 000$.
then calling target_value_order({A: 10 500}, ..., tolerance_percentage=0.05) will not generate any trades as
the tolerance of 0.05 allows the allocation to be 10 000$, while target value is 10 500$ (tolerance value
would be equal to 0.05 * 10 500 = 525 and the difference between current and target value would be < 525$).
Another example:
For example: assume that currently the portfolio contains asset A with allocation 10 000$.
then calling target_value_order({A: 13 000}, ..., tolerance_percentage=0.1) will generate a BUY order
corresponding to 3000$ of shares. The tolerance of 0.1 does not allow a difference of 3000$
if abs(target - actual) > tolerance_percentage * target value
frequency: Frequency
frequency for the last available price sampling (daily or minutely)
Returns
--------
List[Order]
list of generated orders
"""
self._log_function_call(vars())
assert 0.0 <= tolerance_percentage < 1.0, "The tolerance_percentage should belong to [0, 1) interval"
target_quantities, tolerance_quantities = \
self._calculate_target_shares_and_tolerances(target_values, tolerance_percentage, frequency)
return self.target_orders(target_quantities, execution_style, time_in_force, tolerance_quantities)
def target_percent_orders(self, target_percentages: Mapping[Contract, float], execution_style: ExecutionStyle,
time_in_force: TimeInForce, tolerance_percentage: float = 0.0, frequency: Frequency = None) \
-> List[Order]:
"""
Creates an Order adjusting a position to a value equal to the given percentage of the portfolio.
Parameters
----------
target_percentages: Mapping[Contract, int]
mapping of a Contract to a percentage of a current portfolio value which should be allocated in each asset
after the Order has been carried out
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
tolerance_percentage: float
tells the us what is a tolerance to the target_percentages (in both directions). The tolerance is expressed
in percentage points (0.02 corresponds to 2pp of the target_value). For more details look at the description
of target_value_orders.
frequency: Frequency
frequency for the last available price sampling (daily or minutely)
Returns
--------
List[Order]
list of generated orders
"""
self._log_function_call(vars())
assert 0.0 <= tolerance_percentage < 1.0, "The tolerance_percentage should belong to [0, 1) interval"
portfolio_value = self.broker.get_portfolio_value()
target_values = {
contract: portfolio_value * target_percent for contract, target_percent in target_percentages.items()}
return self.target_value_orders(target_values, execution_style, time_in_force, tolerance_percentage, frequency)
def _calculate_target_shares_and_tolerances(
self, contract_to_amount_of_money: Mapping[Contract, float], tolerance_percentage: float = 0.0,
frequency: Frequency = None) -> (Mapping[Contract, float], Mapping[Contract, float]):
"""
Returns
----------
Tuple(Mapping[Contract, float], Mapping[Contract, float])
Tells how many shares of each asset we should have in order to match the target and what is the tolerance
(in number of shares) for each asset
"""
tickers_to_contract_and_amount_of_money = self._make_tickers_to_contract_and_amount_of_money(
contract_to_amount_of_money)
tickers = list(tickers_to_contract_and_amount_of_money.keys())
# In case of live trading the get_last_available_price will use datetime.now() as the current time to obtain
# last price and in case of a backtest - it will use the data handlers timer to compute the date
current_prices = self.data_provider.get_last_available_price(tickers, frequency)
# Contract -> target number of shares
target_quantities = dict() # type: Dict[Contract, float]
# Contract -> tolerance expressed as number of shares
tolerance_quantities = dict() # type: Dict[Contract, float]
for ticker, (contract, amount_of_money) in tickers_to_contract_and_amount_of_money.items():
current_price = current_prices.loc[ticker]
divisor = (current_price * contract.contract_size)
target_quantity = amount_of_money / divisor # type: float
target_quantities[contract] = target_quantity
tolerance_quantity = target_quantity * tolerance_percentage
tolerance_quantities[contract] = tolerance_quantity
return target_quantities, tolerance_quantities
def _make_tickers_to_contract_and_amount_of_money(self, contract_to_amount_of_money):
tickers_to_contract_and_amount_of_money = dict()
for contract, amount_of_money in contract_to_amount_of_money.items():
ticker = self.contract_to_ticker_mapper.contract_to_ticker(contract)
tickers_to_contract_and_amount_of_money[ticker] = contract, amount_of_money
return tickers_to_contract_and_amount_of_money
def _log_function_call(self, params_dict):
if 'self' in params_dict:
del params_dict['self']
fn_name_level_above = get_function_name(1)
log_message = "Function call: '{}' with parameters:".format(fn_name_level_above)
for key, value in params_dict.items():
if isinstance(value, dict) and value:
value_str = ""
for inner_k, inner_v in value.items():
value_str += "\n\t\t{}: {}".format(inner_k, inner_v)
else:
value_str = str(value)
log_message += "\n\t{}: {}".format(key, value_str)
self.logger.debug(log_message)
|
test/test_auth.py | tjones-commits/server-client-python | 470 | 20233 | import unittest
import os.path
import requests_mock
import tableauserverclient as TSC
TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets')
SIGN_IN_XML = os.path.join(TEST_ASSET_DIR, 'auth_sign_in.xml')
SIGN_IN_IMPERSONATE_XML = os.path.join(TEST_ASSET_DIR, 'auth_sign_in_impersonate.xml')
SIGN_IN_ERROR_XML = os.path.join(TEST_ASSET_DIR, 'auth_sign_in_error.xml')
class AuthTests(unittest.TestCase):
def setUp(self):
self.server = TSC.Server('http://test')
self.baseurl = self.server.auth.baseurl
def test_sign_in(self):
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
tableau_auth = TSC.TableauAuth('testuser', 'password', site_id='Samples')
self.server.auth.sign_in(tableau_auth)
self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('6b7179ba-b82b-4f0f-91ed-812074ac5da6', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
def test_sign_in_with_personal_access_tokens(self):
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
tableau_auth = TSC.PersonalAccessTokenAuth(token_name='mytoken',
personal_access_token='<PASSWORD>', site_id='Samples')
self.server.auth.sign_in(tableau_auth)
self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('6b7179ba-b82b-4f0f-91ed-812074ac5da6', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
def test_sign_in_impersonate(self):
with open(SIGN_IN_IMPERSONATE_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
tableau_auth = TSC.TableauAuth('testuser', 'password',
user_id_to_impersonate='dd2239f6-ddf1-4107-981a-4cf94e415794')
self.server.auth.sign_in(tableau_auth)
self.assertEqual('MJonFA6HDyy2C3oqR13fRGqE6cmgz<PASSWORD>', self.server.auth_token)
self.assertEqual('dad65087-b08b-4603-af4e-2887b8aafc67', self.server.site_id)
self.assertEqual('dd2239f6-ddf1-4107-981a-4cf94e415794', self.server.user_id)
def test_sign_in_error(self):
with open(SIGN_IN_ERROR_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml, status_code=401)
tableau_auth = TSC.TableauAuth('testuser', '<PASSWORD>')
self.assertRaises(TSC.ServerResponseError, self.server.auth.sign_in, tableau_auth)
def test_sign_in_invalid_token(self):
with open(SIGN_IN_ERROR_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml, status_code=401)
tableau_auth = TSC.PersonalAccessTokenAuth(token_name='mytoken', personal_access_token='invalid')
self.assertRaises(TSC.ServerResponseError, self.server.auth.sign_in, tableau_auth)
def test_sign_in_without_auth(self):
with open(SIGN_IN_ERROR_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml, status_code=401)
tableau_auth = TSC.TableauAuth('', '')
self.assertRaises(TSC.ServerResponseError, self.server.auth.sign_in, tableau_auth)
def test_sign_out(self):
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
m.post(self.baseurl + '/signout', text='')
tableau_auth = TSC.TableauAuth('testuser', 'password')
self.server.auth.sign_in(tableau_auth)
self.server.auth.sign_out()
self.assertIsNone(self.server._auth_token)
self.assertIsNone(self.server._site_id)
self.assertIsNone(self.server._user_id)
def test_switch_site(self):
self.server.version = '2.6'
baseurl = self.server.auth.baseurl
site_id, user_id, auth_token = list('<PASSWORD>')
self.server._set_auth(site_id, user_id, auth_token)
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(baseurl + '/switchSite', text=response_xml)
site = TSC.SiteItem('Samples', 'Samples')
self.server.auth.switch_site(site)
self.assertEqual('eIX6mvFsq<PASSWORD>4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('<PASSWORD>-8120<PASSWORD>', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
def test_revoke_all_server_admin_tokens(self):
self.server.version = "3.10"
baseurl = self.server.auth.baseurl
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(baseurl + '/signin', text=response_xml)
m.post(baseurl + '/revokeAllServerAdminTokens', text='')
tableau_auth = TSC.TableauAuth('testuser', 'password')
self.server.auth.sign_in(tableau_auth)
self.server.auth.revoke_all_server_admin_tokens()
self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('<PASSWORD>ba-b82b-4f0f-91ed-812074ac5da6', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
|
awx/main/migrations/0082_v360_webhook_http_method.py | Avinesh/awx | 11,396 | 20247 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def add_webhook_notification_template_fields(apps, schema_editor):
# loop over all existing webhook notification templates and make
# sure they have the new "http_method" field filled in with "POST"
NotificationTemplate = apps.get_model('main', 'notificationtemplate')
webhooks = NotificationTemplate.objects.filter(notification_type='webhook')
for w in webhooks:
w.notification_configuration['http_method'] = 'POST'
w.save()
class Migration(migrations.Migration):
dependencies = [
('main', '0081_v360_notify_on_start'),
]
operations = [
migrations.RunPython(add_webhook_notification_template_fields, migrations.RunPython.noop),
]
|
runtests.py | resurrexi/django-restql | 545 | 20249 | #!/usr/bin/env python
import os
import sys
import subprocess
from django.core.management import execute_from_command_line
FLAKE8_ARGS = ['django_restql', 'tests', 'setup.py', 'runtests.py']
WARNING_COLOR = '\033[93m'
END_COLOR = '\033[0m'
def flake8_main(args):
print('Running flake8 code linting')
ret = subprocess.call(['flake8'] + args)
msg = (
WARNING_COLOR + 'flake8 failed\n' + END_COLOR
if ret else 'flake8 passed\n'
)
print(msg)
return ret
def runtests():
ret = flake8_main(FLAKE8_ARGS)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tests.settings')
argv = sys.argv[:1] + ['test'] + sys.argv[1:]
execute_from_command_line(argv)
sys.exit(ret) # Fail build if code linting fails
if __name__ == '__main__':
runtests()
|
src/oci/log_analytics/models/query_details.py | Manny27nyc/oci-python-sdk | 249 | 20289 | <gh_stars>100-1000
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class QueryDetails(object):
"""
Input arguments for running a log anlaytics query. If the request is set to run in asynchronous mode
then shouldIncludeColumns and shouldIncludeFields can be overwritten when retrieving the results.
"""
#: A constant which can be used with the sub_system property of a QueryDetails.
#: This constant has a value of "LOG"
SUB_SYSTEM_LOG = "LOG"
#: A constant which can be used with the async_mode property of a QueryDetails.
#: This constant has a value of "FOREGROUND"
ASYNC_MODE_FOREGROUND = "FOREGROUND"
#: A constant which can be used with the async_mode property of a QueryDetails.
#: This constant has a value of "BACKGROUND"
ASYNC_MODE_BACKGROUND = "BACKGROUND"
def __init__(self, **kwargs):
"""
Initializes a new QueryDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param compartment_id:
The value to assign to the compartment_id property of this QueryDetails.
:type compartment_id: str
:param compartment_id_in_subtree:
The value to assign to the compartment_id_in_subtree property of this QueryDetails.
:type compartment_id_in_subtree: bool
:param saved_search_id:
The value to assign to the saved_search_id property of this QueryDetails.
:type saved_search_id: str
:param query_string:
The value to assign to the query_string property of this QueryDetails.
:type query_string: str
:param sub_system:
The value to assign to the sub_system property of this QueryDetails.
Allowed values for this property are: "LOG"
:type sub_system: str
:param max_total_count:
The value to assign to the max_total_count property of this QueryDetails.
:type max_total_count: int
:param time_filter:
The value to assign to the time_filter property of this QueryDetails.
:type time_filter: oci.log_analytics.models.TimeRange
:param scope_filters:
The value to assign to the scope_filters property of this QueryDetails.
:type scope_filters: list[oci.log_analytics.models.ScopeFilter]
:param query_timeout_in_seconds:
The value to assign to the query_timeout_in_seconds property of this QueryDetails.
:type query_timeout_in_seconds: int
:param should_run_async:
The value to assign to the should_run_async property of this QueryDetails.
:type should_run_async: bool
:param async_mode:
The value to assign to the async_mode property of this QueryDetails.
Allowed values for this property are: "FOREGROUND", "BACKGROUND"
:type async_mode: str
:param should_include_total_count:
The value to assign to the should_include_total_count property of this QueryDetails.
:type should_include_total_count: bool
:param should_include_columns:
The value to assign to the should_include_columns property of this QueryDetails.
:type should_include_columns: bool
:param should_include_fields:
The value to assign to the should_include_fields property of this QueryDetails.
:type should_include_fields: bool
:param should_use_acceleration:
The value to assign to the should_use_acceleration property of this QueryDetails.
:type should_use_acceleration: bool
"""
self.swagger_types = {
'compartment_id': 'str',
'compartment_id_in_subtree': 'bool',
'saved_search_id': 'str',
'query_string': 'str',
'sub_system': 'str',
'max_total_count': 'int',
'time_filter': 'TimeRange',
'scope_filters': 'list[ScopeFilter]',
'query_timeout_in_seconds': 'int',
'should_run_async': 'bool',
'async_mode': 'str',
'should_include_total_count': 'bool',
'should_include_columns': 'bool',
'should_include_fields': 'bool',
'should_use_acceleration': 'bool'
}
self.attribute_map = {
'compartment_id': 'compartmentId',
'compartment_id_in_subtree': 'compartmentIdInSubtree',
'saved_search_id': 'savedSearchId',
'query_string': 'queryString',
'sub_system': 'subSystem',
'max_total_count': 'maxTotalCount',
'time_filter': 'timeFilter',
'scope_filters': 'scopeFilters',
'query_timeout_in_seconds': 'queryTimeoutInSeconds',
'should_run_async': 'shouldRunAsync',
'async_mode': 'asyncMode',
'should_include_total_count': 'shouldIncludeTotalCount',
'should_include_columns': 'shouldIncludeColumns',
'should_include_fields': 'shouldIncludeFields',
'should_use_acceleration': 'shouldUseAcceleration'
}
self._compartment_id = None
self._compartment_id_in_subtree = None
self._saved_search_id = None
self._query_string = None
self._sub_system = None
self._max_total_count = None
self._time_filter = None
self._scope_filters = None
self._query_timeout_in_seconds = None
self._should_run_async = None
self._async_mode = None
self._should_include_total_count = None
self._should_include_columns = None
self._should_include_fields = None
self._should_use_acceleration = None
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this QueryDetails.
Compartment Identifier `OCID]`__.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this QueryDetails.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this QueryDetails.
Compartment Identifier `OCID]`__.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this QueryDetails.
:type: str
"""
self._compartment_id = compartment_id
@property
def compartment_id_in_subtree(self):
"""
Gets the compartment_id_in_subtree of this QueryDetails.
Flag to search all child compartments of the compartment Id specified in the compartmentId query parameter.
:return: The compartment_id_in_subtree of this QueryDetails.
:rtype: bool
"""
return self._compartment_id_in_subtree
@compartment_id_in_subtree.setter
def compartment_id_in_subtree(self, compartment_id_in_subtree):
"""
Sets the compartment_id_in_subtree of this QueryDetails.
Flag to search all child compartments of the compartment Id specified in the compartmentId query parameter.
:param compartment_id_in_subtree: The compartment_id_in_subtree of this QueryDetails.
:type: bool
"""
self._compartment_id_in_subtree = compartment_id_in_subtree
@property
def saved_search_id(self):
"""
Gets the saved_search_id of this QueryDetails.
Saved search OCID for this query if known.
:return: The saved_search_id of this QueryDetails.
:rtype: str
"""
return self._saved_search_id
@saved_search_id.setter
def saved_search_id(self, saved_search_id):
"""
Sets the saved_search_id of this QueryDetails.
Saved search OCID for this query if known.
:param saved_search_id: The saved_search_id of this QueryDetails.
:type: str
"""
self._saved_search_id = saved_search_id
@property
def query_string(self):
"""
**[Required]** Gets the query_string of this QueryDetails.
Query to perform. Must conform to logging analytic querylanguage syntax. Syntax errors will be returned if present.
:return: The query_string of this QueryDetails.
:rtype: str
"""
return self._query_string
@query_string.setter
def query_string(self, query_string):
"""
Sets the query_string of this QueryDetails.
Query to perform. Must conform to logging analytic querylanguage syntax. Syntax errors will be returned if present.
:param query_string: The query_string of this QueryDetails.
:type: str
"""
self._query_string = query_string
@property
def sub_system(self):
"""
**[Required]** Gets the sub_system of this QueryDetails.
Default subsystem to qualify fields with in the queryString if not specified.
Allowed values for this property are: "LOG"
:return: The sub_system of this QueryDetails.
:rtype: str
"""
return self._sub_system
@sub_system.setter
def sub_system(self, sub_system):
"""
Sets the sub_system of this QueryDetails.
Default subsystem to qualify fields with in the queryString if not specified.
:param sub_system: The sub_system of this QueryDetails.
:type: str
"""
allowed_values = ["LOG"]
if not value_allowed_none_or_none_sentinel(sub_system, allowed_values):
raise ValueError(
"Invalid value for `sub_system`, must be None or one of {0}"
.format(allowed_values)
)
self._sub_system = sub_system
@property
def max_total_count(self):
"""
Gets the max_total_count of this QueryDetails.
Maximum number of results to count. Note a maximum of 2001 will be enforced; that is, actualMaxTotalCountUsed = Math.min(maxTotalCount, 2001).
:return: The max_total_count of this QueryDetails.
:rtype: int
"""
return self._max_total_count
@max_total_count.setter
def max_total_count(self, max_total_count):
"""
Sets the max_total_count of this QueryDetails.
Maximum number of results to count. Note a maximum of 2001 will be enforced; that is, actualMaxTotalCountUsed = Math.min(maxTotalCount, 2001).
:param max_total_count: The max_total_count of this QueryDetails.
:type: int
"""
self._max_total_count = max_total_count
@property
def time_filter(self):
"""
Gets the time_filter of this QueryDetails.
:return: The time_filter of this QueryDetails.
:rtype: oci.log_analytics.models.TimeRange
"""
return self._time_filter
@time_filter.setter
def time_filter(self, time_filter):
"""
Sets the time_filter of this QueryDetails.
:param time_filter: The time_filter of this QueryDetails.
:type: oci.log_analytics.models.TimeRange
"""
self._time_filter = time_filter
@property
def scope_filters(self):
"""
Gets the scope_filters of this QueryDetails.
List of filters to be applied when the query executes. More than one filter per field is not permitted.
:return: The scope_filters of this QueryDetails.
:rtype: list[oci.log_analytics.models.ScopeFilter]
"""
return self._scope_filters
@scope_filters.setter
def scope_filters(self, scope_filters):
"""
Sets the scope_filters of this QueryDetails.
List of filters to be applied when the query executes. More than one filter per field is not permitted.
:param scope_filters: The scope_filters of this QueryDetails.
:type: list[oci.log_analytics.models.ScopeFilter]
"""
self._scope_filters = scope_filters
@property
def query_timeout_in_seconds(self):
"""
Gets the query_timeout_in_seconds of this QueryDetails.
Amount of time, in seconds, allowed for a query to execute. If this time expires before the query is complete, any partial results will be returned.
:return: The query_timeout_in_seconds of this QueryDetails.
:rtype: int
"""
return self._query_timeout_in_seconds
@query_timeout_in_seconds.setter
def query_timeout_in_seconds(self, query_timeout_in_seconds):
"""
Sets the query_timeout_in_seconds of this QueryDetails.
Amount of time, in seconds, allowed for a query to execute. If this time expires before the query is complete, any partial results will be returned.
:param query_timeout_in_seconds: The query_timeout_in_seconds of this QueryDetails.
:type: int
"""
self._query_timeout_in_seconds = query_timeout_in_seconds
@property
def should_run_async(self):
"""
Gets the should_run_async of this QueryDetails.
Option to run the query asynchronously. This will lead to a LogAnalyticsQueryJobWorkRequest being submitted and the {workRequestId} will be returned to use for fetching the results.
:return: The should_run_async of this QueryDetails.
:rtype: bool
"""
return self._should_run_async
@should_run_async.setter
def should_run_async(self, should_run_async):
"""
Sets the should_run_async of this QueryDetails.
Option to run the query asynchronously. This will lead to a LogAnalyticsQueryJobWorkRequest being submitted and the {workRequestId} will be returned to use for fetching the results.
:param should_run_async: The should_run_async of this QueryDetails.
:type: bool
"""
self._should_run_async = should_run_async
@property
def async_mode(self):
"""
Gets the async_mode of this QueryDetails.
Execution mode for the query if running asynchronously i.e (shouldRunAsync is set to true).
Allowed values for this property are: "FOREGROUND", "BACKGROUND"
:return: The async_mode of this QueryDetails.
:rtype: str
"""
return self._async_mode
@async_mode.setter
def async_mode(self, async_mode):
"""
Sets the async_mode of this QueryDetails.
Execution mode for the query if running asynchronously i.e (shouldRunAsync is set to true).
:param async_mode: The async_mode of this QueryDetails.
:type: str
"""
allowed_values = ["FOREGROUND", "BACKGROUND"]
if not value_allowed_none_or_none_sentinel(async_mode, allowed_values):
raise ValueError(
"Invalid value for `async_mode`, must be None or one of {0}"
.format(allowed_values)
)
self._async_mode = async_mode
@property
def should_include_total_count(self):
"""
Gets the should_include_total_count of this QueryDetails.
Include the total number of results from the query. Note, this value will always be equal to or less than maxTotalCount.
:return: The should_include_total_count of this QueryDetails.
:rtype: bool
"""
return self._should_include_total_count
@should_include_total_count.setter
def should_include_total_count(self, should_include_total_count):
"""
Sets the should_include_total_count of this QueryDetails.
Include the total number of results from the query. Note, this value will always be equal to or less than maxTotalCount.
:param should_include_total_count: The should_include_total_count of this QueryDetails.
:type: bool
"""
self._should_include_total_count = should_include_total_count
@property
def should_include_columns(self):
"""
Gets the should_include_columns of this QueryDetails.
Include columns in response
:return: The should_include_columns of this QueryDetails.
:rtype: bool
"""
return self._should_include_columns
@should_include_columns.setter
def should_include_columns(self, should_include_columns):
"""
Sets the should_include_columns of this QueryDetails.
Include columns in response
:param should_include_columns: The should_include_columns of this QueryDetails.
:type: bool
"""
self._should_include_columns = should_include_columns
@property
def should_include_fields(self):
"""
Gets the should_include_fields of this QueryDetails.
Include fields in response
:return: The should_include_fields of this QueryDetails.
:rtype: bool
"""
return self._should_include_fields
@should_include_fields.setter
def should_include_fields(self, should_include_fields):
"""
Sets the should_include_fields of this QueryDetails.
Include fields in response
:param should_include_fields: The should_include_fields of this QueryDetails.
:type: bool
"""
self._should_include_fields = should_include_fields
@property
def should_use_acceleration(self):
"""
Gets the should_use_acceleration of this QueryDetails.
Controls if query should ignore pre-calculated results if available and only use raw data. If set and no acceleration data is found it will fallback to raw data.
:return: The should_use_acceleration of this QueryDetails.
:rtype: bool
"""
return self._should_use_acceleration
@should_use_acceleration.setter
def should_use_acceleration(self, should_use_acceleration):
"""
Sets the should_use_acceleration of this QueryDetails.
Controls if query should ignore pre-calculated results if available and only use raw data. If set and no acceleration data is found it will fallback to raw data.
:param should_use_acceleration: The should_use_acceleration of this QueryDetails.
:type: bool
"""
self._should_use_acceleration = should_use_acceleration
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
ledfx/color.py | broccoliboy/LedFx | 524 | 20310 | from collections import namedtuple
RGB = namedtuple("RGB", "red, green, blue")
COLORS = {
"red": RGB(255, 0, 0),
"orange-deep": RGB(255, 40, 0),
"orange": RGB(255, 120, 0),
"yellow": RGB(255, 200, 0),
"yellow-acid": RGB(160, 255, 0),
"green": RGB(0, 255, 0),
"green-forest": RGB(34, 139, 34),
"green-spring": RGB(0, 255, 127),
"green-teal": RGB(0, 128, 128),
"green-turquoise": RGB(0, 199, 140),
"green-coral": RGB(0, 255, 50),
"cyan": RGB(0, 255, 255),
"blue": RGB(0, 0, 255),
"blue-light": RGB(65, 105, 225),
"blue-navy": RGB(0, 0, 128),
"blue-aqua": RGB(0, 255, 255),
"purple": RGB(128, 0, 128),
"pink": RGB(255, 0, 178),
"magenta": RGB(255, 0, 255),
"black": RGB(0, 0, 0),
"white": RGB(255, 255, 255),
"brown": RGB(139, 69, 19),
"gold": RGB(255, 215, 0),
"hotpink": RGB(255, 105, 180),
"lightblue": RGB(173, 216, 230),
"lightgreen": RGB(152, 251, 152),
"lightpink": RGB(255, 182, 193),
"lightyellow": RGB(255, 255, 224),
"maroon": RGB(128, 0, 0),
"mint": RGB(189, 252, 201),
"olive": RGB(85, 107, 47),
"peach": RGB(255, 100, 100),
"plum": RGB(221, 160, 221),
"sepia": RGB(94, 38, 18),
"skyblue": RGB(135, 206, 235),
"steelblue": RGB(70, 130, 180),
"tan": RGB(210, 180, 140),
"violetred": RGB(208, 32, 144),
}
GRADIENTS = {
"Rainbow": {
"colors": [
"red",
"orange",
"yellow",
"green",
"green-turquoise",
"blue",
"purple",
"pink",
]
},
"Dancefloor": {"colors": ["red", "pink", "blue"]},
"Plasma": {"colors": ["blue", "purple", "red", "orange-deep", "yellow"]},
"Ocean": {"colors": ["blue-aqua", "blue"]},
"Viridis": {"colors": ["purple", "blue", "green-teal", "green", "yellow"]},
"Jungle": {"colors": ["green", "green-forest", "orange"]},
"Spring": {"colors": ["pink", "orange-deep", "yellow"]},
"Winter": {"colors": ["green-turquoise", "green-coral"]},
"Frost": {"colors": ["blue", "blue-aqua", "purple", "pink"]},
"Sunset": {"colors": ["blue-navy", "orange", "red"]},
"Borealis": {
"colors": [
"orange-deep",
"purple",
"green-turquoise",
"green",
]
},
"Rust": {"colors": ["orange-deep", "red"]},
"Christmas": {
"colors": [
"red",
"red",
"red",
"red",
"red",
"green",
"green",
"green",
"green",
"green",
],
"method": "repeat",
},
"Winamp": {
"colors": [
"green",
"yellow",
"orange",
"orange-deep",
"red",
]
},
}
|
DeepAlignmentNetwork/menpofit/lk/result.py | chiawei-liu/DeepAlignmentNetwork | 220 | 20325 | from menpofit.result import (ParametricIterativeResult,
MultiScaleParametricIterativeResult)
class LucasKanadeAlgorithmResult(ParametricIterativeResult):
r"""
Class for storing the iterative result of a Lucas-Kanade Image Alignment
optimization algorithm.
Parameters
----------
shapes : `list` of `menpo.shape.PointCloud`
The `list` of shapes per iteration. The first and last members
correspond to the initial and final shapes, respectively.
homogeneous_parameters : `list` of ``(n_parameters,)`` `ndarray`
The `list` of parameters of the homogeneous transform per iteration.
The first and last members correspond to the initial and final
shapes, respectively.
initial_shape : `menpo.shape.PointCloud` or ``None``, optional
The initial shape from which the fitting process started. If
``None``, then no initial shape is assigned.
image : `menpo.image.Image` or `subclass` or ``None``, optional
The image on which the fitting process was applied. Note that a copy
of the image will be assigned as an attribute. If ``None``, then no
image is assigned.
gt_shape : `menpo.shape.PointCloud` or ``None``, optional
The ground truth shape associated with the image. If ``None``, then no
ground truth shape is assigned.
costs : `list` of `float` or ``None``, optional
The `list` of cost per iteration. If ``None``, then it is assumed that
the cost function cannot be computed for the specific algorithm.
"""
def __init__(self, shapes, homogeneous_parameters, initial_shape=None,
image=None, gt_shape=None, costs=None):
super(LucasKanadeAlgorithmResult, self).__init__(
shapes=shapes, shape_parameters=homogeneous_parameters,
initial_shape=initial_shape, image=image, gt_shape=gt_shape,
costs=costs)
self._homogeneous_parameters = homogeneous_parameters
@property
def homogeneous_parameters(self):
r"""
Returns the `list` of parameters of the homogeneous transform
obtained at each iteration of the fitting process. The `list`
includes the parameters of the `initial_shape` (if it exists) and
`final_shape`.
:type: `list` of ``(n_params,)`` `ndarray`
"""
return self._shape_parameters
class LucasKanadeResult(MultiScaleParametricIterativeResult):
r"""
Class for storing the multi-scale iterative fitting result of an ATM. It
holds the shapes, shape parameters and costs per iteration.
Parameters
----------
results : `list` of :map:`ATMAlgorithmResult`
The `list` of optimization results per scale.
scales : `list` or `tuple`
The `list` of scale values per scale (low to high).
affine_transforms : `list` of `menpo.transform.Affine`
The list of affine transforms per scale that transform the shapes into
the original image space.
scale_transforms : `list` of `menpo.shape.Scale`
The list of scaling transforms per scale.
image : `menpo.image.Image` or `subclass` or ``None``, optional
The image on which the fitting process was applied. Note that a copy
of the image will be assigned as an attribute. If ``None``, then no
image is assigned.
gt_shape : `menpo.shape.PointCloud` or ``None``, optional
The ground truth shape associated with the image. If ``None``, then no
ground truth shape is assigned.
"""
def __init__(self, results, scales, affine_transforms, scale_transforms,
image=None, gt_shape=None):
super(LucasKanadeResult, self).__init__(
results=results, scales=scales, affine_transforms=affine_transforms,
scale_transforms=scale_transforms, image=image, gt_shape=gt_shape)
# Create parameters list
self._homogeneous_parameters = []
for r in results:
self._homogeneous_parameters += r.homogeneous_parameters
# Correct n_iters
self._n_iters -= len(scales)
@property
def homogeneous_parameters(self):
r"""
Returns the `list` of parameters of the homogeneous transform
obtained at each iteration of the fitting process. The `list`
includes the parameters of the `initial_shape` (if it exists) and
`final_shape`.
:type: `list` of ``(n_params,)`` `ndarray`
"""
return self._homogeneous_parameters
@property
def shape_parameters(self):
# Use homogeneous_parameters instead.
raise AttributeError
|
bob-ross/cluster-paintings.py | h4ckfu/data | 16,124 | 20348 | <reponame>h4ckfu/data<filename>bob-ross/cluster-paintings.py
"""
Clusters Bob Ross paintings by features.
By <NAME> <<EMAIL>>
See http://fivethirtyeight.com/features/a-statistical-analysis-of-the-work-of-bob-ross/
"""
import numpy as np
from scipy.cluster.vq import vq, kmeans, whiten
import math
import csv
def main():
# load data into vectors of 1s and 0s for each tag
with open('elements-by-episode.csv','r') as csvfile:
reader = csv.reader(csvfile)
reader.next() # skip header
data = []
for row in reader:
data.append(map(lambda x: int(x), row[2:])) # exclude EPISODE and TITLE columns
# convert to numpy matrix
matrix = np.array(data)
# remove colums that have been tagged less than 5 times
columns_to_remove = []
for col in range(np.shape(matrix)[1]):
if sum(matrix[:,col]) <= 5:
columns_to_remove.append(col)
matrix = np.delete(matrix, columns_to_remove, axis=1)
# normalize according to stddev
whitened = whiten(matrix)
output = kmeans(whitened, 10)
print "episode", "distance", "cluster"
# determine distance between each of 403 vectors and each centroid, find closest neighbor
for i, v in enumerate(whitened):
# distance between centroid 0 and feature vector
distance = math.sqrt(sum((v - output[0][0]) ** 2))
# group is the centroid it is closest to so far, set initally to centroid 0
group = 0
closest_match = (distance, group)
# test the vector i against the 10 centroids, find nearest neighbor
for x in range (0, 10):
dist_x = math.sqrt(sum((v - output[0][x]) ** 2))
if dist_x < closest_match[0]:
closest_match = (dist_x, x)
print i+1, closest_match[0], closest_match[1]
if __name__ == "__main__":
main() |
alipay/aop/api/domain/MybankCreditSceneprodCommonQueryModel.py | antopen/alipay-sdk-python-all | 213 | 20355 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class MybankCreditSceneprodCommonQueryModel(object):
def __init__(self):
self._app_seq_no = None
self._ext_param = None
self._operation_type = None
self._org_code = None
self._product_code = None
self._seq_no = None
@property
def app_seq_no(self):
return self._app_seq_no
@app_seq_no.setter
def app_seq_no(self, value):
self._app_seq_no = value
@property
def ext_param(self):
return self._ext_param
@ext_param.setter
def ext_param(self, value):
self._ext_param = value
@property
def operation_type(self):
return self._operation_type
@operation_type.setter
def operation_type(self, value):
self._operation_type = value
@property
def org_code(self):
return self._org_code
@org_code.setter
def org_code(self, value):
self._org_code = value
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
@property
def seq_no(self):
return self._seq_no
@seq_no.setter
def seq_no(self, value):
self._seq_no = value
def to_alipay_dict(self):
params = dict()
if self.app_seq_no:
if hasattr(self.app_seq_no, 'to_alipay_dict'):
params['app_seq_no'] = self.app_seq_no.to_alipay_dict()
else:
params['app_seq_no'] = self.app_seq_no
if self.ext_param:
if hasattr(self.ext_param, 'to_alipay_dict'):
params['ext_param'] = self.ext_param.to_alipay_dict()
else:
params['ext_param'] = self.ext_param
if self.operation_type:
if hasattr(self.operation_type, 'to_alipay_dict'):
params['operation_type'] = self.operation_type.to_alipay_dict()
else:
params['operation_type'] = self.operation_type
if self.org_code:
if hasattr(self.org_code, 'to_alipay_dict'):
params['org_code'] = self.org_code.to_alipay_dict()
else:
params['org_code'] = self.org_code
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
if self.seq_no:
if hasattr(self.seq_no, 'to_alipay_dict'):
params['seq_no'] = self.seq_no.to_alipay_dict()
else:
params['seq_no'] = self.seq_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MybankCreditSceneprodCommonQueryModel()
if 'app_seq_no' in d:
o.app_seq_no = d['app_seq_no']
if 'ext_param' in d:
o.ext_param = d['ext_param']
if 'operation_type' in d:
o.operation_type = d['operation_type']
if 'org_code' in d:
o.org_code = d['org_code']
if 'product_code' in d:
o.product_code = d['product_code']
if 'seq_no' in d:
o.seq_no = d['seq_no']
return o
|
examples/plugin_example/setup.py | linshoK/pysen | 423 | 20367 | from setuptools import setup
setup(
name="example-advanced-package", version="0.0.0", packages=[],
)
|
venv/lib/python3.9/site-packages/py2app/recipes/PIL/prescript.py | dequeb/asmbattle | 193 | 20376 | def _recipes_pil_prescript(plugins):
try:
import Image
have_PIL = False
except ImportError:
from PIL import Image
have_PIL = True
import sys
def init():
if Image._initialized >= 2:
return
if have_PIL:
try:
import PIL.JpegPresets
sys.modules["JpegPresets"] = PIL.JpegPresets
except ImportError:
pass
for plugin in plugins:
try:
if have_PIL:
try:
# First try absolute import through PIL (for
# Pillow support) only then try relative imports
m = __import__("PIL." + plugin, globals(), locals(), [])
m = getattr(m, plugin)
sys.modules[plugin] = m
continue
except ImportError:
pass
__import__(plugin, globals(), locals(), [])
except ImportError:
print("Image: failed to import")
if Image.OPEN or Image.SAVE:
Image._initialized = 2
return 1
Image.init = init
|
turkish_morphology/validate_test.py | nogeeky/turkish-morphology | 157 | 20384 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for turkish_morphology.validate."""
import os
from turkish_morphology import analysis_pb2
from turkish_morphology import validate
from absl.testing import absltest
from absl.testing import parameterized
from google.protobuf import text_format
_TESTDATA_DIR = "turkish_morphology/testdata"
def _read_file(path):
with open(path, "r") as f:
read = f.read()
return read
def _read_analysis(basename):
path = os.path.join(_TESTDATA_DIR, f"{basename}.pbtxt")
return text_format.Parse(_read_file(path), analysis_pb2.Analysis())
class AnalysisTest(parameterized.TestCase):
@parameterized.named_parameters([
{
"testcase_name": "SingleInflectionalGroupsWithProperFeature",
"basename": "araba_with_proper",
},
{
"testcase_name": "SingleInflectionalGroupsWithoutProperFeature",
"basename": "araba_without_proper",
},
{
"testcase_name": "MultipleInflectionalGroupsWithProperFeature",
"basename": "yasa_with_proper",
},
{
"testcase_name": "MultipleInflectionalGroupsWithoutProperFeature",
"basename": "yasa_without_proper",
},
])
def test_success(self, basename):
analysis = _read_analysis(basename)
actual = validate.analysis(analysis)
self.assertIsNone(actual)
@parameterized.named_parameters([
{
"testcase_name": "AnalysisMissingInflectionalGroups",
"basename": "invalid_empty_analysis",
"message": "Analysis is missing inflectional groups",
},
{
"testcase_name": "InflectionalGroupMissingPartOfSpeechTag",
"basename": "invalid_ig_missing_pos",
"message": "Inflectional group 2 is missing part-of-speech tag",
},
{
"testcase_name": "InflectionalGroupEmptyPartOfSpeechTag",
"basename": "invalid_ig_empty_pos",
"message": "Inflectional group 2 part-of-speech tag is empty",
},
{
"testcase_name": "FirstInflectionalGroupMissingRoot",
"basename": "invalid_first_ig_missing_root",
"message": "Inflectional group 1 is missing root",
},
{
"testcase_name": "DerivedInflectionalGroupMissingDerivation",
"basename": "invalid_derived_ig_missing_derivation",
"message": "Inflectional group 2 is missing derivational affix",
},
{
"testcase_name": "AffixMissingFeature",
"basename": "invalid_affix_missing_feature",
"message": "Affix is missing feature",
},
{
"testcase_name": "DerivationalAffixMissingMetaMorpheme",
"basename": "invalid_derivational_affix_missing_meta_morpheme",
"message": "Derivational affix is missing meta-morpheme",
},
{
"testcase_name": "DerivationalAffixEmptyMetaMorpheme",
"basename": "invalid_derivational_affix_empty_meta_morpheme",
"message": "Derivational affix meta-morpheme is empty",
},
{
"testcase_name": "FeatureMissingCategory",
"basename": "invalid_feature_missing_category",
"message": "Feature is missing category",
},
{
"testcase_name": "FeatureEmptyCategory",
"basename": "invalid_feature_empty_category",
"message": "Feature category is empty",
},
{
"testcase_name": "FeatureMissingValue",
"basename": "invalid_feature_missing_value",
"message": "Feature is missing value",
},
{
"testcase_name": "FeatureEmptyValue",
"basename": "invalid_feature_empty_value",
"message": "Feature value is empty",
},
{
"testcase_name": "RootMissingMorpheme",
"basename": "invalid_root_missing_morpheme",
"message": "Root is missing morpheme",
},
{
"testcase_name": "RootEmptyMorpheme",
"basename": "invalid_root_empty_morpheme",
"message": "Root morpheme is empty",
},
])
def test_raises_exception(self, basename, message):
analysis = _read_analysis(basename)
with self.assertRaisesRegexp(validate.IllformedAnalysisError, message):
validate.analysis(analysis)
if __name__ == "__main__":
absltest.main()
|
news-category-classifcation/build_vocab.py | lyeoni/pytorch-nlp-tutorial | 1,433 | 20392 | <filename>news-category-classifcation/build_vocab.py
import argparse
import pickle
from tokenization import Vocab, Tokenizer
TOKENIZER = ('treebank', 'mecab')
def argparser():
p = argparse.ArgumentParser()
# Required parameters
p.add_argument('--corpus', default=None, type=str, required=True)
p.add_argument('--vocab', default=None, type=str, required=True)
# Other parameters
p.add_argument('--pretrained_vectors', default=None, type=str)
p.add_argument('--is_sentence', action='store_true',
help='Whether the corpus is already split into sentences')
p.add_argument('--tokenizer', default='treebank', type=str,
help='Tokenizer used for input corpus tokenization: ' + ', '.join(TOKENIZER))
p.add_argument('--max_seq_length', default=1024, type=int,
help='The maximum total input sequence length after tokenization')
p.add_argument('--unk_token', default='<unk>', type=str,
help='The representation for any unknown token')
p.add_argument('--pad_token', default='<pad>', type=str,
help='The representation for the special token of padding token')
p.add_argument('--bos_token', default='<bos>', type=str,
help='The representation for the special token of beginning-of-sequence token')
p.add_argument('--eos_token', default='<eos>', type=str,
help='The representation for the special token of end-of-sequence token')
p.add_argument('--min_freq', default=3, type=int,
help='The minimum frequency required for a token')
p.add_argument('--lower', action='store_true',
help='Whether to convert the texts to lowercase')
config = p.parse_args()
return config
def load_pretrained(fname):
"""
Load pre-trained FastText word vectors
:param fname: text file containing the word vectors, one per line.
"""
fin = open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
n, d = map(int, fin.readline().split())
print('Loading {} word vectors(dim={})...'.format(n, d))
word2vec_dict = {}
for line in fin:
tokens = line.rstrip().split(' ')
word2vec_dict[tokens[0]] = list(map(float, tokens[1:]))
print('#pretrained_word_vectors:', len(word2vec_dict))
return word2vec_dict
if __name__=='__main__':
config = argparser()
print(config)
# Select tokenizer
config.tokenizer = config.tokenizer.lower()
if config.tokenizer==TOKENIZER[0]:
from nltk.tokenize import word_tokenize
tokenization_fn = word_tokenize
elif config.tokenizer ==TOKENIZER[1]:
from konlpy.tag import Mecab
tokenization_fn = Mecab().morphs
tokenizer = Tokenizer(tokenization_fn=tokenization_fn,
is_sentence=config.is_sentence,
max_seq_length=config.max_seq_length)
# Tokenization & read tokens
list_of_tokens = []
with open(config.corpus, 'r', encoding='-utf-8', errors='ignore') as reader:
for li, line in enumerate(reader):
text = ' '.join(line.split('\t')[1:]).strip()
list_of_tokens += tokenizer.tokenize(text)
# Build vocabulary
vocab = Vocab(list_of_tokens=list_of_tokens,
unk_token=config.unk_token,
pad_token=config.pad_token,
bos_token=config.bos_token,
eos_token=config.eos_token,
min_freq=config.min_freq,
lower=config.lower)
vocab.build()
if config.pretrained_vectors:
pretrained_vectors = load_pretrained(fname=config.pretrained_vectors)
vocab.from_pretrained(pretrained_vectors=pretrained_vectors)
print('Vocabulary size: ', len(vocab))
# Save vocabulary
with open(config.vocab, 'wb') as writer:
pickle.dump(vocab, writer)
print('Vocabulary saved to', config.vocab) |
tests/encryption/aes_decrypter.py | dfjxs/dfvfs | 176 | 20394 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the AES decrypter object."""
import unittest
from dfvfs.encryption import aes_decrypter
from dfvfs.lib import definitions
from tests.encryption import test_lib
class AESDecrypterTestCase(test_lib.DecrypterTestCase):
"""Tests for the AES decrypter object."""
_AES_INITIALIZATION_VECTOR = b'This is an IV456'
_AES_KEY = b'This is a key123'
def testInitialization(self):
"""Tests the initialization method."""
# Test missing arguments.
with self.assertRaises(ValueError):
aes_decrypter.AESDecrypter()
# Test unsupported block cipher mode.
with self.assertRaises(ValueError):
aes_decrypter.AESDecrypter(
cipher_mode='bogus', key=self._AES_KEY)
# Test missing initialization vector.
with self.assertRaises(ValueError):
aes_decrypter.AESDecrypter(
cipher_mode=definitions.ENCRYPTION_MODE_CBC, key=self._AES_KEY)
# Test missing initialization vector with valid block cipher mode.
aes_decrypter.AESDecrypter(
cipher_mode=definitions.ENCRYPTION_MODE_ECB, key=self._AES_KEY)
# Test incorrect key size.
with self.assertRaises(ValueError):
aes_decrypter.AESDecrypter(
cipher_mode=definitions.ENCRYPTION_MODE_ECB, key=b'Wrong key size')
# Test incorrect initialization vector type.
with self.assertRaises(TypeError):
aes_decrypter.AESDecrypter(
cipher_mode=definitions.ENCRYPTION_MODE_CBC,
initialization_vector='Wrong IV type', key=self._AES_KEY)
# Test incorrect initialization vector size.
with self.assertRaises(ValueError):
aes_decrypter.AESDecrypter(
cipher_mode=definitions.ENCRYPTION_MODE_CBC,
initialization_vector=b'Wrong IV size', key=self._AES_KEY)
def testDecrypt(self):
"""Tests the Decrypt method."""
decrypter = aes_decrypter.AESDecrypter(
cipher_mode=definitions.ENCRYPTION_MODE_CBC,
initialization_vector=self._AES_INITIALIZATION_VECTOR,
key=self._AES_KEY)
# Test full decryption.
expected_decrypted_data = b'This is secret encrypted text!!!'
decrypted_data, remaining_encrypted_data = decrypter.Decrypt(
b'2|\x7f\xd7\xff\xbay\xf9\x95?\x81\xc7\xaafV\xceB\x01\xdb8E7\xfe'
b'\x92j\xf0\x1d(\xb9\x9f\xad\x13', finalize=True)
self.assertEqual(decrypted_data, expected_decrypted_data)
self.assertEqual(remaining_encrypted_data, b'')
# Reset decrypter.
decrypter = aes_decrypter.AESDecrypter(
cipher_mode=definitions.ENCRYPTION_MODE_CBC,
initialization_vector=self._AES_INITIALIZATION_VECTOR,
key=self._AES_KEY)
# Test partial decryption.
partial_encrypted_data = (
b'2|\x7f\xd7\xff\xbay\xf9\x95?\x81\xc7\xaafV\xceB\x01\xdb8E7\xfe')
decrypted_data, remaining_encrypted_data = decrypter.Decrypt(
partial_encrypted_data)
self.assertEqual(decrypted_data, b'')
self.assertEqual(remaining_encrypted_data, partial_encrypted_data)
if __name__ == '__main__':
unittest.main()
|
Modo/Kits/OD_ModoCopyPasteExternal/lxserv/cmd_copyToExternal.py | heimlich1024/OD_CopyPasteExternal | 278 | 20399 | <filename>Modo/Kits/OD_ModoCopyPasteExternal/lxserv/cmd_copyToExternal.py
################################################################################
#
# cmd_copyToExternal.py
#
# Author: <NAME> | <NAME>
#
# Description: Copies Geo/Weights/Morphs/UV's to External File
#
# Last Update:
#
################################################################################
import lx
import lxifc
import lxu.command
from od_copy_paste_external import copy_to_external
class ODCopyToExternal(lxu.command.BasicCommand):
def __init__(self):
lxu.command.BasicCommand.__init__(self)
def cmd_Flags(self):
return lx.symbol.fCMD_MODEL | lx.symbol.fCMD_UNDO
def basic_Enable(self, msg):
return True
def cmd_Interact(self):
pass
def basic_Execute(self, msg, flags):
# TODO: Disable reload for release
reload(copy_to_external)
copy_to_external.execute()
def cmd_Query(self, index, vaQuery):
lx.notimpl()
lx.bless(ODCopyToExternal, "OD_CopyToExternal")
|
mayan/apps/mimetype/apps.py | eshbeata/open-paperless | 2,743 | 20441 | from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from common import MayanAppConfig
from .licenses import * # NOQA
class MIMETypesApp(MayanAppConfig):
name = 'mimetype'
verbose_name = _('MIME types')
def ready(self, *args, **kwargs):
super(MIMETypesApp, self).ready(*args, **kwargs)
|
trove/tests/unittests/taskmanager/test_galera_clusters.py | a4913994/openstack_trove | 244 | 20457 | <reponame>a4913994/openstack_trove
# Copyright [2015] Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from unittest.mock import Mock
from unittest.mock import patch
from trove.cluster.models import ClusterTasks as ClusterTaskStatus
from trove.cluster.models import DBCluster
from trove.common.exception import GuestError
from trove.common.strategies.cluster.experimental.galera_common.taskmanager \
import GaleraCommonClusterTasks
from trove.common.strategies.cluster.experimental.galera_common.taskmanager \
import GaleraCommonTaskManagerStrategy
from trove.datastore import models as datastore_models
from trove.instance.models import BaseInstance
from trove.instance.models import DBInstance
from trove.instance.models import Instance
from trove.instance.models import InstanceServiceStatus
from trove.instance.models import InstanceTasks
from trove.instance.service_status import ServiceStatuses
from trove.tests.unittests import trove_testtools
from trove.tests.unittests.util import util
class GaleraClusterTasksTest(trove_testtools.TestCase):
def setUp(self):
super(GaleraClusterTasksTest, self).setUp()
util.init_db()
self.cluster_id = "1232"
self.cluster_name = "Cluster-1234"
self.tenant_id = "6789"
self.db_cluster = DBCluster(ClusterTaskStatus.NONE,
id=self.cluster_id,
created=str(datetime.date),
updated=str(datetime.date),
name=self.cluster_name,
task_id=ClusterTaskStatus.NONE._code,
tenant_id=self.tenant_id,
datastore_version_id="1",
deleted=False)
self.dbinst1 = DBInstance(InstanceTasks.NONE, id="1", name="member1",
compute_instance_id="compute-1",
task_id=InstanceTasks.NONE._code,
task_description=InstanceTasks.NONE._db_text,
volume_id="volume-1",
datastore_version_id="1",
cluster_id=self.cluster_id,
type="member")
self.dbinst2 = DBInstance(InstanceTasks.NONE, id="2", name="member2",
compute_instance_id="compute-2",
task_id=InstanceTasks.NONE._code,
task_description=InstanceTasks.NONE._db_text,
volume_id="volume-2",
datastore_version_id="1",
cluster_id=self.cluster_id,
type="member")
self.dbinst3 = DBInstance(InstanceTasks.NONE, id="3", name="member3",
compute_instance_id="compute-3",
task_id=InstanceTasks.NONE._code,
task_description=InstanceTasks.NONE._db_text,
volume_id="volume-3",
datastore_version_id="1",
cluster_id=self.cluster_id,
type="member")
mock_ds1 = Mock()
mock_ds1.name = 'pxc'
mock_dv1 = Mock()
mock_dv1.name = '7.1'
self.clustertasks = GaleraCommonClusterTasks(
Mock(), self.db_cluster, datastore=mock_ds1,
datastore_version=mock_dv1)
self.cluster_context = {
'replication_user': {
'name': "name",
'password': "password",
},
'cluster_name': self.cluster_name,
'admin_password': "<PASSWORD>"
}
@patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure')
@patch.object(DBInstance, 'find_by')
@patch.object(InstanceServiceStatus, 'find_by')
@patch('trove.taskmanager.models.LOG')
def test_all_instances_ready_with_server_error(self,
mock_logging, mock_find,
mock_db_find, mock_update):
(mock_find.return_value.
get_status.return_value) = ServiceStatuses.NEW
(mock_db_find.return_value.
get_task_status.return_value) = InstanceTasks.BUILDING_ERROR_SERVER
ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"],
self.cluster_id)
mock_update.assert_called_with(self.cluster_id, None)
self.assertFalse(ret_val)
@patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure')
@patch.object(DBInstance, 'find_by')
@patch.object(InstanceServiceStatus, 'find_by')
@patch('trove.taskmanager.models.LOG')
def test_all_instances_ready_bad_status(self, mock_logging,
mock_find, mock_db_find,
mock_update):
(mock_find.return_value.
get_status.return_value) = ServiceStatuses.FAILED
(mock_db_find.return_value.
get_task_status.return_value) = InstanceTasks.NONE
ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"],
self.cluster_id)
mock_update.assert_called_with(self.cluster_id, None)
self.assertFalse(ret_val)
@patch.object(DBInstance, 'find_by')
@patch.object(InstanceServiceStatus, 'find_by')
def test_all_instances_ready(self, mock_find, mock_db_find):
(mock_find.return_value.
get_status.return_value) = ServiceStatuses.INSTANCE_READY
(mock_db_find.return_value.
get_task_status.return_value) = InstanceTasks.NONE
ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"],
self.cluster_id)
self.assertTrue(ret_val)
@patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure')
@patch.object(GaleraCommonClusterTasks, '_all_instances_ready',
return_value=False)
@patch.object(Instance, 'load')
@patch.object(DBInstance, 'find_all')
@patch.object(datastore_models.Datastore, 'load')
@patch.object(datastore_models.DatastoreVersion, 'load_by_uuid')
@patch('trove.common.strategies.cluster.experimental.galera_common.'
'taskmanager.LOG')
def test_create_cluster_instance_not_ready(self, mock_logging, mock_dv,
mock_ds, mock_find_all,
mock_load, mock_ready,
mock_update):
mock_find_all.return_value.all.return_value = [self.dbinst1]
mock_load.return_value = BaseInstance(Mock(),
self.dbinst1, Mock(),
InstanceServiceStatus(
ServiceStatuses.NEW))
self.clustertasks.create_cluster(Mock(), self.cluster_id)
mock_update.assert_called_with(self.cluster_id)
@patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure')
@patch.object(GaleraCommonClusterTasks, 'reset_task')
@patch.object(GaleraCommonClusterTasks, 'get_ip')
@patch.object(GaleraCommonClusterTasks, '_all_instances_ready')
@patch.object(Instance, 'load')
@patch.object(DBInstance, 'find_all')
@patch.object(datastore_models.Datastore, 'load')
@patch.object(datastore_models.DatastoreVersion, 'load_by_uuid')
@patch('trove.common.strategies.cluster.experimental.galera_common.'
'taskmanager.LOG')
def test_create_cluster_fail(self, mock_logging, mock_dv, mock_ds,
mock_find_all, mock_load, mock_ready, mock_ip,
mock_reset_task, mock_update_status):
mock_find_all.return_value.all.return_value = [self.dbinst1]
mock_load.return_value = BaseInstance(Mock(),
self.dbinst1, Mock(),
InstanceServiceStatus(
ServiceStatuses.NEW))
mock_ip.return_value = "10.0.0.2"
guest_client = Mock()
guest_client.install_cluster = Mock(side_effect=GuestError("Error"))
with patch.object(GaleraCommonClusterTasks, 'get_guest',
return_value=guest_client):
self.clustertasks.create_cluster(Mock(), self.cluster_id)
mock_update_status.assert_called_with('1232')
mock_reset_task.assert_called_with()
@patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure')
@patch('trove.common.strategies.cluster.experimental.galera_common.'
'taskmanager.LOG')
def test_grow_cluster_does_not_exist(self, mock_logging,
mock_update_status):
context = Mock()
bad_cluster_id = '1234'
new_instances = [Mock(), Mock()]
self.clustertasks.grow_cluster(context, bad_cluster_id, new_instances)
mock_update_status.assert_called_with(
'1234',
status=InstanceTasks.GROWING_ERROR)
@patch.object(GaleraCommonClusterTasks, '_check_cluster_for_root')
@patch.object(GaleraCommonClusterTasks, 'reset_task')
@patch.object(GaleraCommonClusterTasks, '_render_cluster_config')
@patch.object(GaleraCommonClusterTasks, 'get_ip')
@patch.object(GaleraCommonClusterTasks, 'get_guest')
@patch.object(GaleraCommonClusterTasks, '_all_instances_ready',
return_value=True)
@patch.object(Instance, 'load')
@patch.object(DBInstance, 'find_all')
@patch.object(datastore_models.Datastore, 'load')
@patch.object(datastore_models.DatastoreVersion, 'load_by_uuid')
def test_grow_cluster_successs(self, mock_dv, mock_ds, mock_find_all,
mock_load, mock_ready, mock_guest, mock_ip,
mock_render, mock_reset_task,
mock_check_root):
mock_find_all.return_value.all.return_value = [self.dbinst1]
mock_ip.return_value = "10.0.0.2"
context = Mock()
new_instances = [Mock(), Mock()]
mock_guest.get_cluster_context = Mock(
return_value=self.cluster_context)
mock_guest.reset_admin_password = <PASSWORD>()
self.clustertasks.grow_cluster(context, self.cluster_id,
new_instances)
mock_reset_task.assert_called_with()
@patch.object(GaleraCommonClusterTasks, 'reset_task')
@patch.object(Instance, 'load')
@patch.object(Instance, 'delete')
@patch.object(DBInstance, 'find_all')
@patch.object(GaleraCommonClusterTasks, 'get_guest')
@patch.object(GaleraCommonClusterTasks, 'get_ip')
@patch.object(GaleraCommonClusterTasks, '_render_cluster_config')
def test_shrink_cluster_success(self, mock_render, mock_ip, mock_guest,
mock_find_all, mock_delete, mock_load,
mock_reset_task):
mock_find_all.return_value.all.return_value = [self.dbinst1]
context = Mock()
remove_instances = [Mock()]
mock_ip.return_value = "10.0.0.2"
mock_guest.get_cluster_context = Mock(
return_value=self.cluster_context)
self.clustertasks.shrink_cluster(context, self.cluster_id,
remove_instances)
mock_reset_task.assert_called_with()
@patch.object(Instance, 'load')
@patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure')
@patch('trove.common.strategies.cluster.experimental.galera_common.'
'taskmanager.LOG')
def test_shrink_cluster_does_not_exist(self, mock_logging,
mock_update_status,
mock_load):
context = Mock()
bad_cluster_id = '1234'
remove_instances = [Mock()]
self.clustertasks.shrink_cluster(context, bad_cluster_id,
remove_instances)
mock_update_status.assert_called_with(
'1234',
status=InstanceTasks.SHRINKING_ERROR)
class GaleraTaskManagerStrategyTest(trove_testtools.TestCase):
def test_task_manager_cluster_tasks_class(self):
strategy = GaleraCommonTaskManagerStrategy()
self.assertFalse(
hasattr(strategy.task_manager_cluster_tasks_class,
'rebuild_cluster'))
self.assertTrue(callable(
strategy.task_manager_cluster_tasks_class.create_cluster))
def test_task_manager_api_class(self):
strategy = GaleraCommonTaskManagerStrategy()
self.assertFalse(hasattr(strategy.task_manager_api_class,
'add_new_node'))
|
dataloaders/voc.py | psui3905/CCT | 308 | 20474 | from base import BaseDataSet, BaseDataLoader
from utils import pallete
import numpy as np
import os
import scipy
import torch
from PIL import Image
import cv2
from torch.utils.data import Dataset
from torchvision import transforms
import json
class VOCDataset(BaseDataSet):
def __init__(self, **kwargs):
self.num_classes = 21
self.palette = pallete.get_voc_pallete(self.num_classes)
super(VOCDataset, self).__init__(**kwargs)
def _set_files(self):
self.root = os.path.join(self.root, 'VOCdevkit/VOC2012')
if self.split == "val":
file_list = os.path.join("dataloaders/voc_splits", f"{self.split}" + ".txt")
elif self.split in ["train_supervised", "train_unsupervised"]:
file_list = os.path.join("dataloaders/voc_splits", f"{self.n_labeled_examples}_{self.split}" + ".txt")
else:
raise ValueError(f"Invalid split name {self.split}")
file_list = [line.rstrip().split(' ') for line in tuple(open(file_list, "r"))]
self.files, self.labels = list(zip(*file_list))
def _load_data(self, index):
image_path = os.path.join(self.root, self.files[index][1:])
image = np.asarray(Image.open(image_path), dtype=np.float32)
image_id = self.files[index].split("/")[-1].split(".")[0]
if self.use_weak_lables:
label_path = os.path.join(self.weak_labels_output, image_id+".png")
else:
label_path = os.path.join(self.root, self.labels[index][1:])
label = np.asarray(Image.open(label_path), dtype=np.int32)
return image, label, image_id
class VOC(BaseDataLoader):
def __init__(self, kwargs):
self.MEAN = [0.485, 0.456, 0.406]
self.STD = [0.229, 0.224, 0.225]
self.batch_size = kwargs.pop('batch_size')
kwargs['mean'] = self.MEAN
kwargs['std'] = self.STD
kwargs['ignore_index'] = 255
try:
shuffle = kwargs.pop('shuffle')
except:
shuffle = False
num_workers = kwargs.pop('num_workers')
self.dataset = VOCDataset(**kwargs)
super(VOC, self).__init__(self.dataset, self.batch_size, shuffle, num_workers, val_split=None)
|
scripts/pretty-printers/gdb/install.py | tobireinhard/cbmc | 412 | 20484 | <gh_stars>100-1000
#!/usr/bin/env python3
import os
from shutil import copyfile
def create_gdbinit_file():
"""
Create and insert into a .gdbinit file the python code to set-up cbmc pretty-printers.
"""
print("Attempting to enable cbmc-specific pretty-printers.")
home_folder = os.path.expanduser("~")
if not home_folder:
print(home_folder + " is an invalid home folder, can't auto-configure .gdbinit.")
return
# This is the code that should be copied if you're applying the changes by hand.
gdb_directory = os.path.dirname(os.path.abspath(__file__))
code_block_start = "cbmc_printers_folder = "
code_block = \
[
"{0}'{1}'".format(code_block_start, gdb_directory),
"if os.path.exists(cbmc_printers_folder):",
" sys.path.insert(1, cbmc_printers_folder)",
" from pretty_printers import load_cbmc_printers",
" load_cbmc_printers()",
]
gdbinit_file = os.path.join(home_folder, ".gdbinit")
lines = []
imports = { "os", "sys" }
if os.path.exists(gdbinit_file):
with open(gdbinit_file, 'r') as file:
lines = [ line.rstrip() for line in file ]
line_no = 0
while line_no < len(lines):
if lines[line_no].startswith('import '):
imports.add(lines[line_no][len("import "):].strip())
lines.pop(line_no)
else:
if lines[line_no].startswith(code_block_start):
print(".gdbinit already contains our pretty printers, not changing it")
return
line_no += 1
while len(lines) != 0 and (lines[0] == "" or lines[0] == "python"):
lines.pop(0)
backup_file = os.path.join(home_folder, "backup.gdbinit")
if os.path.exists(backup_file):
print("backup.gdbinit file already exists. Type 'y' if you would like to overwrite it or any other key to exit.")
choice = input().lower()
if choice != 'y':
return
print("Backing up {0}".format(gdbinit_file))
copyfile(gdbinit_file, backup_file)
lines = [ "python" ] + list(map("import {}".format, sorted(imports))) + [ "", "" ] + code_block + [ "", "" ] + lines + [ "" ]
print("Adding pretty-print commands to {0}.".format(gdbinit_file))
try:
with open(gdbinit_file, 'w+') as file:
file.write('\n'.join(lines))
print("Commands added.")
except:
print("Exception occured writing to file. Please apply changes manually.")
if __name__ == "__main__":
create_gdbinit_file()
|
losses/dice_loss.py | CharlesAuthier/geo-deep-learning | 121 | 20493 | import torch
import torch.nn as nn
import torch.nn.functional as F
def soft_dice_score(
output: torch.Tensor, target: torch.Tensor, smooth: float = 0.0, eps: float = 1e-7, dims=None) -> torch.Tensor:
assert output.size() == target.size()
if dims is not None:
intersection = torch.sum(output * target, dim=dims)
cardinality = torch.sum(output + target, dim=dims)
# print('cardinality', cardinality, 'intersection', intersection)
else:
intersection = torch.sum(output * target)
cardinality = torch.sum(output + target)
dice_score = (2.0 * intersection + smooth) / (cardinality + smooth).clamp_min(eps)
# print('dice_score', dice_score)
return dice_score
class DiceLoss(nn.Module):
def __init__(self, smooth=1.0, eps=1e-7, ignore_index=None, weight=None, mode='MULTICLASS_MODE'):
"""Implementation of Dice loss for image segmentation task.
https://github.com/qubvel/segmentation_models.pytorch
"""
super().__init__()
self.smooth = smooth
self.eps = eps
self.ignore_index = ignore_index
self.weight = weight
self.mode = mode
def forward(self, output, target):
bs = target.size(0)
num_classes = output.size(1)
dims = (0, 2)
# print(self.mode, self.ignore_index)
if self.mode == 'MULTICLASS_MODE':
output = output.log_softmax(dim=1).exp()
else:
output = F.logsigmoid(output).exp()
# output = output.log_softmax(dim=1).exp()
if self.mode == 'BINARY_MODE':
target = target.view(bs, 1, -1)
output = output.view(bs, 1, -1)
if self.ignore_index is not None:
mask = target != self.ignore_index
output = output * mask
target = target * mask
else:
target = target.view(bs, -1)
output = output.view(bs, num_classes, -1)
if self.ignore_index is not None:
mask = target != self.ignore_index
output = output * mask.unsqueeze(1)
target = F.one_hot((target * mask).to(torch.long), num_classes) # N,H*W -> N,H*W, C
target = target.permute(0, 2, 1) * mask.unsqueeze(1)
else:
target = F.one_hot(target, num_classes) # N,H*W -> N,H*W, C
target = target.permute(0, 2, 1) # H, C, H*W
scores = soft_dice_score(output, target.type_as(output), smooth=self.smooth, eps=self.eps, dims=dims)
loss = 1.0 - scores
mask = target.sum(dims) > 0
loss *= mask.to(loss.dtype)
return loss.mean()
|
modules/cudaobjdetect/misc/python/test/test_cudaobjdetect.py | ptelang/opencv_contrib | 7,158 | 20497 | #!/usr/bin/env python
import os
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests, unittest
class cudaobjdetect_test(NewOpenCVTests):
def setUp(self):
super(cudaobjdetect_test, self).setUp()
if not cv.cuda.getCudaEnabledDeviceCount():
self.skipTest("No CUDA-capable device is detected")
@unittest.skipIf('OPENCV_TEST_DATA_PATH' not in os.environ,
"OPENCV_TEST_DATA_PATH is not defined")
def test_hog(self):
img_path = os.environ['OPENCV_TEST_DATA_PATH'] + '/gpu/caltech/image_00000009_0.png'
npMat = cv.cvtColor(cv.imread(img_path),cv.COLOR_BGR2BGRA)
cuMat = cv.cuda_GpuMat(npMat)
cuHog = cv.cuda.HOG_create()
cuHog.setSVMDetector(cuHog.getDefaultPeopleDetector())
loc, conf = cuHog.detect(cuMat)
self.assertTrue(len(loc) == len(conf) and len(loc) > 0 and len(loc[0]) == 2)
loc = cuHog.detectWithoutConf(cuMat)
self.assertTrue(len(loc) > 0 and len(loc[0]) == 2)
loc = cuHog.detectMultiScaleWithoutConf(cuMat)
self.assertTrue(len(loc) > 0 and len(loc[0]) == 4)
cuHog.setGroupThreshold(0)
loc, conf = cuHog.detectMultiScale(cuMat)
self.assertTrue(len(loc) == len(conf) and len(loc) > 0 and len(loc[0]) == 4)
if __name__ == '__main__':
NewOpenCVTests.bootstrap() |
test/ryu/vsw-602_mp_port_desc.py | iMasaruOki/lagopus | 281 | 20505 | <gh_stars>100-1000
from ryu.base.app_manager import RyuApp
from ryu.controller.ofp_event import EventOFPSwitchFeatures
from ryu.controller.ofp_event import EventOFPPortDescStatsReply
from ryu.controller.handler import set_ev_cls
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.ofproto.ofproto_v1_2 import OFPG_ANY
from ryu.ofproto.ofproto_v1_3 import OFP_VERSION
from ryu.lib.mac import haddr_to_bin
class App(RyuApp):
OFP_VERSIONS = [OFP_VERSION]
def __init__(self, *args, **kwargs):
super(App, self).__init__(*args, **kwargs)
@set_ev_cls(EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
[self.install_sample(datapath, n) for n in [0]]
def create_meter_mod(self, datapath, command, flags_, meter_id, bands):
ofproto = datapath.ofproto
ofp_parser = datapath.ofproto_parser
meter_mod = ofp_parser.OFPMeterMod(datapath, command, flags_,
meter_id, bands)
return meter_mod
def install_sample(self, datapath, table_id):
parser = datapath.ofproto_parser
ofproto = datapath.ofproto
req = parser.OFPPortDescStatsRequest(datapath, 0)
datapath.send_msg(req)
@set_ev_cls(EventOFPPortDescStatsReply, MAIN_DISPATCHER)
def port_desc_stats_reply_handler(self, ev):
ports = []
for p in ev.msg.body:
ports.append('port_no=%d hw_addr=%s name=%s config=0x%08x '
'state=0x%08x curr=0x%08x advertised=0x%08x '
'supported=0x%08x peer=0x%08x curr_speed=%d '
'max_speed=%d' %
(p.port_no, p.hw_addr,
p.name, p.config,
p.state, p.curr, p.advertised,
p.supported, p.peer, p.curr_speed,
p.max_speed))
self.logger.info('OFPPortDescStatsReply received: %s', ports)
|
conda/update_versions.py | PicoJr/StereoPipeline | 323 | 20519 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
'''
Use dependency versions from a conda enviornment .yaml file to update
a recipe/meta.yaml file of a given package. Such an input file can
be created from the given environment with:
conda env export > myenv.yaml
'''
import sys, os, re
if len(sys.argv) < 3:
print("Usage: " + os.path.basename(sys.argv[0]) + " input.yaml mypackage-feedstock")
sys.exit(1)
inFile = sys.argv[1]
outDir = sys.argv[2]
outFile = outDir + "/recipe/meta.yaml"
if not os.path.exists(outFile):
print("Cannot open file: " + outFile)
sys.exit(1)
# parse the versions from the conda env
conda_env = {}
print("Reading: " + inFile)
inHandle = open(inFile, 'r')
lines = inHandle.readlines()
for line in lines:
# Wipe comments
m = re.match('^(.*?)\#', line)
if m:
line = m.group(1)
# Match the package
m = re.match('^\s*-\s*(.*?)\s*=+\s*(.*?)(=|\s|$)', line)
if not m:
continue
package = m.group(1)
version = m.group(2)
if re.match('^\s*$', package):
continue # ignore empty lines
conda_env[package] = version
#print("got ", package, version)
# Update the lines in the output ile
outHandle = open(outFile, 'r')
lines = outHandle.readlines()
for it in range(len(lines)):
line = lines[it]
# Ignore comments
m = re.match('^\#', line)
if m:
continue
# Match the package
m = re.match('^(\s+-[\t ]+)([^\s]+)(\s*)(.*?)$', line)
if not m:
continue
pre = m.group(1)
package = m.group(2)
spaces = m.group(3).rstrip("\n")
old_version = m.group(4).rstrip("\n")
if spaces == "":
# Ensure there's at least one space
spaces = " "
if old_version == "":
# If there was no version before, don't put one now
continue
if not package in conda_env:
continue
version = conda_env[package]
if old_version != version:
if ('[linux]' in old_version) or ('[osx]' in old_version):
# In this case the user better take a closer look
print("For package " + package + ", not replacing " +
old_version + " with " + version + ", a closer look is suggested.")
else:
print("For package " + package + ", replacing version "
+ old_version + " with " + version)
lines[it] = pre + package + spaces + version + ".\n"
# Save the updated lines to disk
print("Updating: " + outFile)
outHandle = open(outFile, "w")
outHandle.writelines(lines)
outHandle.close()
|
test.py | uuidd/SimilarCharacter | 199 | 20520 | import cv2
import ProcessWithCV2
img1 = cv2.imread("D:/py/chinese/7.png")
img2 = cv2.imread("D:/py/chinese/8.png")
a = ProcessWithCV2.dHash(img1, img2, 1)
print(a)
|
Medium/valid-ip-addresses.py | SaumyaRai2010/algoexpert-data-structures-algorithms | 152 | 20558 |
# VALID IP ADDRESSES
# O(1) time and space
def validIPAddresses(string):
# Write your code here.
validIPAddresses = []
if len(string) < 4:
return []
for i in range(3):
if not isValidPart(string[:i+1]):
continue
for j in range(i+1, i+4):
if not isValidPart(string[i+1:j+1]):
continue
for k in range(j+1, j+4):
if not isValidPart(string[j+1:k+1]) or not isValidPart(string[k+1:]):
continue
validIP = string[:i+1] + "." + string[i+1:j+1] + "." + string[j+1:k+1] + "." + string[k+1:]
validIPAddresses.append(validIP)
return validIPAddresses
def isValidPart(string):
if len(string) == 1:
return True
if not 0 < len(string) < 4 or string[0] == "0":
return False
return 0 <= int(string) <= 255
|
src/packagedcode/windows.py | Siddhant-K-code/scancode-toolkit | 1,511 | 20560 | <reponame>Siddhant-K-code/scancode-toolkit
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import attr
import xmltodict
from packagedcode import models
from commoncode import filetype
# Tracing flags
TRACE = False
def logger_debug(*args):
pass
if TRACE:
import logging
import sys
logger = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, str) and a or repr(a) for a in args))
@attr.s()
class MicrosoftUpdatePackage(models.Package, models.PackageManifest):
extensions = ('.mum',)
filetypes = ('xml 1.0 document',)
mimetypes = ('text/xml',)
default_type = 'windows-update'
@attr.s()
class MicrosoftUpdateManifest(MicrosoftUpdatePackage, models.PackageManifest):
@classmethod
def is_manifest(cls, location):
"""
Return True if the file at ``location`` is likely a manifest of this type.
"""
return filetype.is_file(location) and location.endswith('.mum')
@classmethod
def recognize(cls, location):
"""
Yield one or more Package manifest objects given a file ``location`` pointing to a
package archive, manifest or similar.
"""
with open(location , 'rb') as loc:
parsed = xmltodict.parse(loc)
if TRACE:
logger_debug('parsed:', parsed)
if not parsed:
return
assembly = parsed.get('assembly', {})
description = assembly.get('@description', '')
company = assembly.get('@company', '')
copyright = assembly.get('@copyright', '')
support_url = assembly.get('@supportInformation', '')
assembly_identity = assembly.get('assemblyIdentity', {})
name = assembly_identity.get('@name', '')
version = assembly_identity.get('@version', '')
parties = []
if company:
parties.append(
models.Party(
name=company,
type=models.party_org,
role='owner',
)
)
yield cls(
name=name,
version=version,
description=description,
homepage_url=support_url,
parties=parties,
copyright=copyright,
)
|
platform-tools/systrace/catapult/devil/devil/utils/run_tests_helper.py | NBPS-Robotics/FTC-Code-Team-9987---2022 | 1,894 | 20566 | <gh_stars>1000+
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper functions common to native, java and host-driven test runners."""
import collections
import logging
from devil.utils import logging_common
CustomFormatter = logging_common.CustomFormatter
_WrappedLoggingArgs = collections.namedtuple('_WrappedLoggingArgs',
['verbose', 'quiet'])
def SetLogLevel(verbose_count, add_handler=True):
"""Sets log level as |verbose_count|.
Args:
verbose_count: Verbosity level.
add_handler: If true, adds a handler with |CustomFormatter|.
"""
logging_common.InitializeLogging(
_WrappedLoggingArgs(verbose_count, 0),
handler=None if add_handler else logging.NullHandler())
|
main/models/sign.py | fakegit/gxgk-wechat-server | 1,564 | 20572 | <reponame>fakegit/gxgk-wechat-server
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from . import db
class Sign(db.Model):
__table_args__ = {
'mysql_engine': 'InnoDB',
'mysql_charset': 'utf8mb4'
}
openid = db.Column(db.String(32), primary_key=True, unique=True,
nullable=False)
lastsigntime = db.Column(db.BigInteger, default=0, nullable=False)
totaldays = db.Column(db.SmallInteger, default=0, nullable=False)
keepdays = db.Column(db.SmallInteger, default=0, nullable=False)
def __init__(self, openid, lastsigntime, totaldays, keepdays):
self.openid = openid
self.lastsigntime = lastsigntime
self.totaldays = totaldays
self.keepdays = keepdays
def __repr__(self):
return '<openid %r>' % self.openid
def save(self):
db.session.add(self)
db.session.commit()
return self
def update(self):
db.session.commit()
return self
|
src/oscar/apps/customer/__init__.py | QueoLda/django-oscar | 4,639 | 20591 | default_app_config = 'oscar.apps.customer.apps.CustomerConfig'
|
Game22/modules/online/__init__.py | ttkaixin1998/pikachupythongames | 4,013 | 20631 | '''初始化'''
from .server import gobangSever
from .client import gobangClient
from .playOnline import playOnlineUI |
TrainingExtensions/tensorflow/src/python/aimet_tensorflow/utils/op/fusedbatchnorm.py | quic-ykota/aimet | 945 | 20663 | # /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2019-2020, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" utilities for fused batchnorm op """
from typing import Union
import numpy as np
import tensorflow as tf
from tensorflow.contrib import graph_editor as ge
from aimet_common.utils import AimetLogger
from aimet_tensorflow.utils import constants
logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Utils)
_BN_STRUCTURE_ERROR_MSG = "BN op doesn't have the expected structure"
class BNUtils:
""" Batch Norm/ fused Batch Norm op related utils"""
@staticmethod
def skip_bn_op(sess: tf.compat.v1.Session, bn_op: tf.Operation, in_tensor: tf.Tensor, out_tensor: tf.Tensor):
"""
Skip given bn op specified (fused batch norm op).
Note: supports only Fused bn op types.
:param sess: Tensorflow session
:param bn_op: Batchnorm op to be skipped
:param in_tensor: Input tensor to the batchnorm op
:param out_tensor: Output tensor of the batchnorm op
"""
if in_tensor is None or out_tensor is None:
logger.error("Error, input and output tensors must be provided for skipping the op")
assert False
else:
with sess.graph.as_default():
if bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']:
ge.detach_outputs(in_tensor.op)
ge.reroute_ts(in_tensor, out_tensor)
BNUtils.remove_bn_op_from_update_ops(sess, bn_op)
else:
logger.error("Error, Unknown BN op")
assert False
@staticmethod
def _get_tensor_read_var_op_trainable_bn_op(input_tensor: tf.Tensor) -> tf.Tensor:
"""
Generic helper to find a read op tensor associated with input tensor that can be evaluated, when the bn op is
marked trainable.
:param input_tensor: Input tensor to find corresponding read op tensor that can be evaluated
:return: read var op type tensor as tf.Tensor type.
"""
logger.debug('Fetching params from trainable BN op type')
assert input_tensor.op.inputs[0].op.inputs is not None
# inputs of 0 is beta tensor , get readVarOp associated with it
var_tensor = input_tensor.op.inputs[0].op.inputs[0]
assert var_tensor.op.outputs is not None
assert len(var_tensor.consumers()) >= 3
tensor_consumers = var_tensor.consumers()
var_read_tensor = None
# get read variable op tensor from these consumers
# do not pick the one with _1 , it is not fetch-able
for consumer in tensor_consumers:
if consumer.type == 'ReadVariableOp' and 'ReadVariableOp_1' not in consumer.name:
assert consumer.outputs is not None
var_read_tensor = consumer.outputs[0]
break
assert var_read_tensor is not None
return var_read_tensor
@staticmethod
def get_beta_read_op(bn_op: tf.Operation) -> tf.Operation:
"""
Get beta read op from BN op specified.
:param bn_op: bn_op obtained from connected graph using get_modules (is mul_1 op inside BN scope)
:return: beta read op
"""
if bn_op.type in ['Mul']:
# For regular BN
# mul_1 -> add_1 <-- sub <-- beta_read
assert len(bn_op.outputs) >= 1, _BN_STRUCTURE_ERROR_MSG
add_1 = bn_op.outputs[0].consumers()[0]
assert len(add_1.inputs) >= 2, _BN_STRUCTURE_ERROR_MSG
sub = add_1.inputs[1].op
assert len(sub.inputs) >= 1, _BN_STRUCTURE_ERROR_MSG
beta_read = sub.inputs[0].op
elif bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']:
assert len(bn_op.inputs) == 5
beta_read = bn_op.inputs[constants.BN_OP_PARAM_INDICES['beta']].op
if beta_read.type == 'Switch': # tf slim bn using training tensor form
beta_read = beta_read.inputs[0].op
assert 'read' in beta_read.name
else:
logger.error("Error, unknown BN op")
assert False
assert beta_read.type in ['ReadVariableOp', 'Identity'] # Will be identity for tf slim BNs
return beta_read
@staticmethod
def _get_beta_read_var_op_tensor_using_structure(bn_op: tf.Operation) -> tf.Tensor:
"""
Get beta readVariableOp tensor from BN op specified.
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op beta readVariableOp type, as tf.Tensor
"""
assert bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm', 'Mul']
beta_read_tensor = BNUtils.get_beta_read_op(bn_op).outputs[0]
assert beta_read_tensor is not None
if beta_read_tensor.op.inputs[0].op.type == 'Switch':
logger.debug('Fetching params from trainable BN op type')
beta_read_tensor = BNUtils._get_tensor_read_var_op_trainable_bn_op(beta_read_tensor)
return beta_read_tensor
@staticmethod
def get_beta_read_var_op_tensor(graph: tf.Graph, bn_op: tf.Operation) -> tf.Tensor:
"""
Get beta readVariableOp tensor from BN op specified.
:param graph: TensorFlow graph
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op beta readVariableOp type, as tf.Tensor
"""
try:
# try name based tensor look up for Keras layers
beta_read_tensor = BNUtils._get_bn_param_tensor_using_name(graph, bn_op,
constants.BNOpParamType.beta)
except KeyError:
# if we can't find the tensor name, use structure match
# to figure out the read tensor for param
beta_read_tensor = BNUtils._get_beta_read_var_op_tensor_using_structure(bn_op)
return beta_read_tensor
@staticmethod
def get_beta_as_numpy_data(sess: tf.compat.v1.Session, bn_op: tf.Operation) -> np.ndarray:
"""
Get beta param from BN op specified.
:param sess: tensorflow session
:param bn_op: bn_op as tf.Operation
:return: beta tensor as numpy data
"""
beta_tensor = BNUtils.get_beta_read_var_op_tensor(sess.graph, bn_op)
with sess.graph.as_default():
numpy_data = sess.run(beta_tensor)
return numpy_data
@staticmethod
def get_gamma_as_read_op(bn_op: tf.Operation) -> tf.Operation:
"""
Get gamma read op from BN op specified.
:param bn_op: bn_op obtained from connected graph using get_modules (is mul_1 op inside BN scope)
:return: gamma read op
"""
if bn_op.type in ['Mul']:
# For regular BN
# mul_1 <-- mul <-- gamma_read <-- gamma_tensor
assert len(bn_op.inputs) >= 2, _BN_STRUCTURE_ERROR_MSG
mul = bn_op.inputs[1].op
assert len(mul.inputs) >= 2, _BN_STRUCTURE_ERROR_MSG
gamma_read = mul.inputs[1].op
elif bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']:
assert len(bn_op.inputs) == 5
gamma_read = bn_op.inputs[constants.BN_OP_PARAM_INDICES['gamma']].op
if gamma_read.type == 'Switch': # tf slim bn using training tensor form
gamma_read = gamma_read.inputs[0].op
assert 'read' in gamma_read.name or gamma_read.type == 'Const'
else:
logger.error("Error, unknown BN op")
assert False
assert gamma_read.type in ['ReadVariableOp', 'Identity', 'Const'] # Will be identity for tf slim BNs
return gamma_read
@staticmethod
def _get_gamma_read_var_op_tensor_using_structure(bn_op: tf.Operation) -> tf.Tensor:
"""
Get the gamma read var op tensor associated with the batchnorm op.
:param bn_op: Batchnorm op to get gamma read var op tensor from
:return: Gamma read var op tensor associated with bn_op
"""
assert bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm', 'Mul']
gamma_read_tensor = BNUtils.get_gamma_as_read_op(bn_op).outputs[0]
assert gamma_read_tensor is not None
if gamma_read_tensor.op.inputs and gamma_read_tensor.op.inputs[0].op.type == 'Switch':
logger.debug('Fetching params from trainable BN op type')
gamma_read_tensor = BNUtils._get_tensor_read_var_op_trainable_bn_op(gamma_read_tensor)
return gamma_read_tensor
@staticmethod
def get_gamma_read_var_op_tensor(graph: tf.Graph, bn_op: tf.Operation) -> tf.Tensor:
"""
Get the gamma read var op tensor associated with the batchnorm op.
:param graph: TensorFlow graph
:param bn_op: Batchnorm op to get gamma read var op tensor from
:return: Gamma read var op tensor associated with bn_op
"""
try:
# try name based tensor look up for Keras layers
gamma_read_tensor = BNUtils._get_bn_param_tensor_using_name(graph, bn_op,
constants.BNOpParamType.gamma)
except KeyError:
# if we can't find the tensor name, use structure match
# to figure out the read tensor for param
gamma_read_tensor = BNUtils._get_gamma_read_var_op_tensor_using_structure(bn_op)
return gamma_read_tensor
@staticmethod
def get_gamma_as_numpy_data(sess: tf.compat.v1.Session, bn_op: tf.Operation) -> np.ndarray:
"""
Get gamma param from BN op specified.
:param sess: tensorflow session
:param bn_op: bn_op obtained from connected graph using get_modules (is mul_1 op inside BN scope)
:return: gamma as numpy data
"""
gamma_tensor = BNUtils.get_gamma_read_var_op_tensor(sess.graph, bn_op)
with sess.graph.as_default():
numpy_data = sess.run(gamma_tensor)
return numpy_data
@staticmethod
def _bn_op_var_struct_1(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Return moving_variance op corresponding to batchnorm with training tensor.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: Read operation for moving_variance
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
rsqrt_op = mul_op.inputs[0].op
assert rsqrt_op.type == 'Rsqrt'
add_op = rsqrt_op.inputs[0].op
assert add_op.type == 'AddV2'
merge_op = add_op.inputs[0].op
assert merge_op.type == 'Merge'
read_op = merge_op.inputs[0].op
assert read_op.type in ['ReadVariableOp']
return read_op
except: # pylint: disable=bare-except
return None
@staticmethod
def _bn_op_var_struct_2(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Return moving_variance op corresponding to batchnorm with training=True.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: Read operation for moving_variance
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
rsqrt_op = mul_op.inputs[0].op
assert rsqrt_op.type == 'Rsqrt'
add_op = rsqrt_op.inputs[0].op
assert add_op.type == 'AddV2'
squeeze_1_op = add_op.inputs[0].op
assert squeeze_1_op.type == 'Squeeze'
sub_op = squeeze_1_op.outputs[0].consumers()[0]
assert sub_op.type == 'Sub'
read_op = sub_op.inputs[0].op
assert read_op.type in ['ReadVariableOp']
return read_op
except: # pylint: disable=bare-except
return None
@staticmethod
def _bn_op_var_struct_3(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Return moving_variance op corresponding to batchnorm with training=False.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: Read operation for moving_variance
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
rsqrt_op = mul_op.inputs[0].op
assert rsqrt_op.type == 'Rsqrt'
add_op = rsqrt_op.inputs[0].op
assert add_op.type == 'AddV2'
read_op = add_op.inputs[0].op
assert read_op.type in ['ReadVariableOp']
return read_op
except: # pylint: disable=bare-except
return None
@staticmethod
def get_moving_variance_as_read_op(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Get moving variance read op from BN op specified.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: moving variance as read op
"""
# register handlers for different structures
bn_op_struct_for_variance_handlers = [BNUtils._bn_op_var_struct_1,
BNUtils._bn_op_var_struct_2,
BNUtils._bn_op_var_struct_3]
if bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']:
assert len(bn_op.inputs) == 5
moving_var_read = bn_op.inputs[constants.BN_OP_PARAM_INDICES['movingvariance']].op
if moving_var_read.type == 'Switch': # tf slim bn using training tensor form
moving_var_read = moving_var_read.inputs[0].op
assert 'read' in moving_var_read.name
elif bn_op.type in ['Mul']:
# For regular BN
moving_var_read = None
# try all handlers available
for handler in bn_op_struct_for_variance_handlers:
if moving_var_read is None:
moving_var_read = handler(bn_op)
else:
break
assert moving_var_read is not None, _BN_STRUCTURE_ERROR_MSG
else:
logger.error("Error, unknown BN op")
assert False
if moving_var_read.type == 'Identity':
assert len(moving_var_read.inputs) == 1, _BN_STRUCTURE_ERROR_MSG
assert moving_var_read.type in ['ReadVariableOp', 'Const', 'Identity']
return moving_var_read
@staticmethod
def _get_moving_variance_read_var_op_tensor_using_structure(bn_op: tf.Operation) -> tf.Tensor:
"""
Get moving variance readVariableOp tensor from BN op specified.
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op moving variance readVariableOp type, as tf.Tensor
"""
# only support fused BN
assert bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm', 'Mul']
moving_var_read_tensor = BNUtils.get_moving_variance_as_read_op(bn_op).outputs[0]
assert moving_var_read_tensor is not None
if moving_var_read_tensor.op.type == 'Const':
logger.debug("BN op has const type op for moving variance")
# get the sub_1 op associated with moving variance read op
assert len(bn_op.outputs) >= 2
moving_avg_1_sub_1 = bn_op.outputs[2].consumers()[0]
all_inputs = moving_avg_1_sub_1.inputs
# among inputs figure out the read var op type that can be "evaluated"
for input_t in all_inputs:
if input_t.op.type == 'ReadVariableOp':
moving_var_read_tensor = input_t
elif input_t.op.type == 'Identity' and 'read:0' in input_t.name: # tf slim form
moving_var_read_tensor = input_t
elif moving_var_read_tensor.op.inputs[0].op.type == 'Switch':
logger.debug("Fetch moving var from a trainable BN op structure")
moving_var_read_tensor = BNUtils._get_tensor_read_var_op_trainable_bn_op(moving_var_read_tensor)
return moving_var_read_tensor
@staticmethod
def get_moving_variance_read_var_op_tensor(graph: tf.Graph, bn_op: tf.Operation) -> tf.Tensor:
"""
Get moving variance readVariableOp tensor from BN op specified.
:param graph: TensorFlow graph
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op moving variance readVariableOp type, as tf.Tensor
"""
try:
# try name based tensor look up for Keras layers
moving_var_read_tensor = BNUtils._get_bn_param_tensor_using_name(graph, bn_op,
constants.BNOpParamType.moving_variance)
except KeyError:
# if we can't find the tensor name, use structure match
# to figure out the read tensor for param
moving_var_read_tensor = BNUtils._get_moving_variance_read_var_op_tensor_using_structure(bn_op)
return moving_var_read_tensor
@staticmethod
def get_moving_variance_as_numpy_data(sess: tf.compat.v1.Session, bn_op: tf.Operation) -> np.ndarray:
"""
Get moving variance param from BN op specified.
:param sess: tensorflow session
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: moving variance as numpy data
"""
moving_var_tensor = BNUtils.get_moving_variance_read_var_op_tensor(sess.graph, bn_op)
with sess.graph.as_default():
numpy_data = sess.run(moving_var_tensor)
return numpy_data
@staticmethod
def _bn_op_mean_struct_1(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Return moving_mean op corresponding to batchnorm with training tensor.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: Read operation for moving_mean
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
mul_2_op = mul_op.outputs[0].consumers()[1]
assert mul_2_op.type == 'Mul'
merge_op = mul_2_op.inputs[0].op
assert merge_op.type == 'Merge'
read_op = merge_op.inputs[0].op
assert read_op.type in ['ReadVariableOp']
return read_op
except: # pylint: disable=bare-except
return None
@staticmethod
def _bn_op_mean_struct_2(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Return moving_mean op corresponding to batchnorm with training=True.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: Read operation for moving_mean
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
mul_2_op = mul_op.outputs[0].consumers()[1]
assert mul_2_op.type == 'Mul'
squeeze_op = mul_2_op.inputs[0].op
assert squeeze_op.type == 'Squeeze'
sub_op = squeeze_op.outputs[0].consumers()[0]
assert sub_op.type == 'Sub'
read_op = sub_op.inputs[0].op
assert read_op.type in ['ReadVariableOp']
return read_op
except: # pylint: disable=bare-except
return None
@staticmethod
def _bn_op_mean_struct_3(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Return moving_mean op corresponding to batchnorm with training=False.
:param bn_op: bn_op obtained from connected graph using get_modules
a mul_1 op inside BN scope.
:return: Read operation for moving_mean
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
mul_2_op = mul_op.outputs[0].consumers()[1]
assert mul_2_op.type == 'Mul'
read_op = mul_2_op.inputs[0].op
assert read_op.type in ['ReadVariableOp']
return read_op
except: # pylint: disable=bare-except
return None
@staticmethod
def get_moving_mean_as_read_op(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Get moving mean read op from BN op specified.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: moving mean read op
"""
if bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']:
assert len(bn_op.inputs) == 5
moving_mean_read = bn_op.inputs[constants.BN_OP_PARAM_INDICES['movingmean']].op
if moving_mean_read.type == 'Switch': # tf slim bn using training tensor form
moving_mean_read = moving_mean_read.inputs[0].op
assert 'read' in moving_mean_read.name
elif bn_op.type in ['Mul']:
# For regular BN
# mul_1 << - mul --> mul_2 <-- cond/merge <-- switch2 <-- moving mean read < moving mean tensor
# inputs[1] is mul .op.inputs[1] is gamma:read op whose input is gamma tensor as variable v2
# register handlers for different structures
bn_op_struct_for_mean_handlers = [BNUtils._bn_op_mean_struct_1,
BNUtils._bn_op_mean_struct_2,
BNUtils._bn_op_mean_struct_3]
moving_mean_read = None
# try all handlers available
for handler in bn_op_struct_for_mean_handlers:
if moving_mean_read is None:
moving_mean_read = handler(bn_op)
else:
break
assert moving_mean_read is not None, _BN_STRUCTURE_ERROR_MSG
else:
logger.error("Error, unknown BN op")
assert False
if moving_mean_read.type == 'Identity':
assert len(moving_mean_read.inputs) == 1, _BN_STRUCTURE_ERROR_MSG
assert moving_mean_read.type in ['ReadVariableOp', 'Const', 'Identity']
return moving_mean_read
@staticmethod
def _get_moving_mean_read_var_op_tensor_using_structure(bn_op: tf.Operation) -> tf.Tensor:
"""
Get moving mean readVariableOp tensor from BN op specified.
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op moving mean readVariableOp type, as tf.Tensor
"""
# only support fused BN
assert bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm', 'Mul']
moving_mean_read_tensor = BNUtils.get_moving_mean_as_read_op(bn_op).outputs[0]
assert moving_mean_read_tensor is not None
if moving_mean_read_tensor.op.type == 'Const':
logger.debug("BN op has const type op for moving variance")
# get the read var type from bn op
# get the sub_1 op associated with moving mean read op
assert len(bn_op.outputs) > 1
moving_avg_sub_1 = bn_op.outputs[1].consumers()[0]
all_inputs = moving_avg_sub_1.inputs
# among inputs figure out the read var op type that can be "evaluated"
for input_t in all_inputs:
if input_t.op.type == 'ReadVariableOp':
moving_mean_read_tensor = input_t
elif input_t.op.type == 'Identity' and 'read:0' in input_t.name: # tf slim form
moving_mean_read_tensor = input_t
elif moving_mean_read_tensor.op.inputs[0].op.type == 'Switch':
logger.debug("Fetch moving var from a trainable BN op structure")
moving_mean_read_tensor = BNUtils._get_tensor_read_var_op_trainable_bn_op(moving_mean_read_tensor)
return moving_mean_read_tensor
@staticmethod
def get_moving_mean_read_var_op_tensor(graph: tf.Graph, bn_op: tf.Operation) -> tf.Tensor:
"""
Get moving mean readVariableOp tensor from BN op specified.
:param graph: TensorFlow graph
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op moving mean readVariableOp type, as tf.Tensor
"""
try:
# try name based tensor look up for Keras layers
moving_mean_read_tensor = BNUtils._get_bn_param_tensor_using_name(graph, bn_op,
constants.BNOpParamType.moving_mean)
except KeyError:
# if we can't find the tensor name, use structure match
# to figure out the read tensor for param
moving_mean_read_tensor = BNUtils._get_moving_mean_read_var_op_tensor_using_structure(bn_op)
return moving_mean_read_tensor
@staticmethod
def get_moving_mean_as_numpy_data(sess: tf.compat.v1.Session, bn_op: tf.Operation) -> np.ndarray:
"""
Get moving mean param from BN op specified.
:param sess: tensorflow session
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: moving mean as numpy data
"""
moving_mean_tensor = BNUtils.get_moving_mean_read_var_op_tensor(sess.graph, bn_op)
with sess.graph.as_default():
numpy_data = sess.run(moving_mean_tensor)
return numpy_data
@staticmethod
def get_epsilon(bn_op: tf.Operation) -> float:
"""
Returns epsilon extracted from given bn op.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: epsilon value
"""
if bn_op.type in ['Mul']:
assert len(bn_op.inputs) >= 2, _BN_STRUCTURE_ERROR_MSG
mul = bn_op.inputs[1].op
assert len(mul.inputs) >= 1, _BN_STRUCTURE_ERROR_MSG
rsqrt = mul.inputs[0].op
assert len(rsqrt.inputs) >= 1, _BN_STRUCTURE_ERROR_MSG
add = rsqrt.inputs[0].op
assert len(add.inputs) >= 2, _BN_STRUCTURE_ERROR_MSG
epsilon = add.inputs[1].op
numpy_epsilon = epsilon.get_attr('value').float_val[0]
elif bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']:
# epsilon can be derived as attribute value
numpy_epsilon = bn_op.get_attr("epsilon")
else:
logger.error("Error, unknown BN op")
assert False
return numpy_epsilon
@staticmethod
def get_assign_moving_avg_op(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Get assign_moving_avg op corresponding with the bn_op, if it exists.
:param bn_op: Batchnorm op to search for corresponding assign_moving_avg op
:return: assign_moving_op corresponding with the bn op, or None if it does not exist.
"""
assert bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']
assert len(bn_op.outputs) == 6 or len(bn_op.outputs) == 5
if bn_op.outputs[1].consumers():
child_op = bn_op.outputs[1].consumers()[0]
if child_op.type == 'Merge':
sub_op = child_op.outputs[0].consumers()[0]
else:
sub_op = child_op
assert sub_op.type == 'Sub'
mul_op = sub_op.outputs[0].consumers()[0]
assert mul_op.type == 'Mul'
assign_moving_avg_op = mul_op.outputs[0].consumers()[0]
assert assign_moving_avg_op.type in ['AssignSub', 'AssignSubVariableOp']
return assign_moving_avg_op
return None
@staticmethod
def get_assign_moving_avg_1_op(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Get assign_moving_avg_1 op corresponding with the bn_op, if it exists.
:param bn_op: Batchnorm op to search for corresponding assign_moving_avg_1 op
:return: assign_moving_avg_1 corresponding with the bn op, or None if it does not exist.
"""
assert bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']
assert len(bn_op.outputs) == 6 or len(bn_op.outputs) == 5
if bn_op.outputs[2].consumers():
child_op = bn_op.outputs[2].consumers()[0]
if child_op.type == 'Merge':
sub_op = child_op.outputs[0].consumers()[0]
else:
sub_op = child_op
assert sub_op.type == 'Sub'
mul_op = sub_op.outputs[0].consumers()[0]
assert mul_op.type == 'Mul'
assign_moving_avg_op = mul_op.outputs[0].consumers()[0]
assert assign_moving_avg_op.type in ['AssignSub', 'AssignSubVariableOp']
return assign_moving_avg_op
return None
@staticmethod
def remove_bn_op_from_update_ops(sess: tf.compat.v1.Session, bn_op: tf.Operation):
"""
Remove batchnorm assign_moving_avg and assign_moving_avg_1 ops from update ops.
:param sess: tf.compat.v1.Session
:param bn_op: BatchNorm operation whose assign_moving_avg and assign_moving_avg_1 ops should be removed.
"""
with sess.graph.as_default():
update_ops = tf.compat.v1.get_collection_ref(tf.compat.v1.GraphKeys.UPDATE_OPS)
assign_moving_avg_op = BNUtils.get_assign_moving_avg_op(bn_op)
assign_moving_avg_op_1 = BNUtils.get_assign_moving_avg_1_op(bn_op)
if assign_moving_avg_op and assign_moving_avg_op in update_ops:
update_ops.remove(assign_moving_avg_op)
logger.debug('Removed %s from update ops', assign_moving_avg_op.name)
if assign_moving_avg_op_1 and assign_moving_avg_op_1 in update_ops:
update_ops.remove(assign_moving_avg_op_1)
logger.debug('Removed %s from update ops', assign_moving_avg_op_1.name)
@staticmethod
def _get_bn_param_tensor_using_name(graph: tf.Graph, bn_op: tf.Operation, param_type: constants.BNOpParamType):
"""
Helper to get BN op param read tensor.
:param graph: TensorFlow graph
:param bn_op: BN op from which param read tensor is to be extracted
:param param_type: param type for which param tensor is to be extracted, as constants.BNOpParamType (supported
types are beta, gamma, moving_mean or moving_variance)
:return: param read tensor
"""
if param_type not in vars(constants.BNOpParamType).values():
assert 0, 'Error, get_bn_param_using_name() invalid param type requested'
# name of the fused bn contains bn_name/FusedBatchNormV3 or
# bn_name/cond/FusedBatchNormV3_1
# we need only the bn_name to make param tensor names
op_name = bn_op.name.split('/')[0]
param_tensor_name = op_name + constants.BN_OP_PARAM_NAME_SUFFIX[param_type]
param_tensor = graph.get_tensor_by_name(param_tensor_name)
return param_tensor
@staticmethod
def _bn_op_momentum_struct_1(bn_op: tf.Operation) -> Union[float, None]:
"""
Return momentum value corresponding to batchnorm with training tensor.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: momentum value
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
mul_2_op = mul_op.outputs[0].consumers()[1]
assert mul_2_op.type == 'Mul'
merge_op = mul_2_op.inputs[0].op
assert merge_op.type == 'Merge'
switch_1_op = merge_op.outputs[0].consumers()[0]
assert switch_1_op.type == 'Switch'
sub_op = switch_1_op.outputs[1].consumers()[0]
assert sub_op.type == 'Sub'
assign_moving_avg_mul_op = sub_op.outputs[0].consumers()[0]
assert assign_moving_avg_mul_op.type == 'Mul'
decay_op = assign_moving_avg_mul_op.inputs[1].op
assert decay_op.type == 'Const'
decay = decay_op.get_attr('value').float_val[0]
return 1 - decay
except: # pylint: disable=bare-except
return None
@staticmethod
def _bn_op_momentum_struct_2(bn_op: tf.Operation) -> Union[float, None]:
"""
Return momentum value corresponding to batchnorm with training=True.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: momentum value
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
mul_2_op = mul_op.outputs[0].consumers()[1]
assert mul_2_op.type == 'Mul'
squeeze_op = mul_2_op.inputs[0].op
assert squeeze_op.type == 'Squeeze'
sub_op = squeeze_op.outputs[0].consumers()[0]
assert sub_op.type == 'Sub'
assign_moving_avg_mul_op = sub_op.outputs[0].consumers()[0]
assert assign_moving_avg_mul_op.type == 'Mul'
decay_op = assign_moving_avg_mul_op.inputs[1].op
assert decay_op.type == 'Const'
decay = decay_op.get_attr('value').float_val[0]
return 1 - decay
except: # pylint: disable=bare-except
return None
@staticmethod
def _fused_bn_op_momentum_struct_1(bn_op: tf.Operation) -> Union[float, None]:
"""
Return momentum value corresponding to fused batchnorm with training tensor.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: momentum value
"""
try:
merge_1_op = bn_op.outputs[1].consumers()[0]
assert merge_1_op.type == 'Merge'
sub_op = merge_1_op.outputs[0].consumers()[0]
assert sub_op.type == 'Sub'
mul_op = sub_op.outputs[0].consumers()[0]
assert mul_op.type == 'Mul'
sub_2_op = mul_op.inputs[1].op
assert sub_2_op.type == 'Sub'
merge_op = sub_2_op.inputs[1].op
assert merge_op.type == 'Merge'
decay_op = merge_op.inputs[1].op
assert decay_op.type == 'Const'
decay = decay_op.get_attr('value').float_val[0]
return decay
except: # pylint: disable=bare-except
return None
@staticmethod
def _fused_bn_op_momentum_struct_2(bn_op: tf.Operation) -> Union[float, None]:
"""
Return momentum value corresponding to fused batchnorm with training=True.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: momentum value
"""
try:
sub_op = bn_op.outputs[1].consumers()[0]
assert sub_op.type == 'Sub'
mul_op = sub_op.outputs[0].consumers()[0]
assert mul_op.type == 'Mul'
sub_2_op = mul_op.inputs[1].op
assert sub_2_op.type == 'Sub'
decay_op = sub_2_op.inputs[1].op
assert decay_op.type == 'Const'
decay = decay_op.get_attr('value').float_val[0]
return decay
except: # pylint: disable=bare-except
return None
@staticmethod
def get_momentum(bn_op: tf.Operation) -> float:
"""
Returns momentum extracted from given bn op. If bn op is training=False mode, momentum will be none.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: momentum value
"""
# register handlers for different structures
bn_op_struct_for_momentum_handlers = [BNUtils._bn_op_momentum_struct_1,
BNUtils._bn_op_momentum_struct_2]
fused_bn_op_struct_for_momentum_handlers = [BNUtils._fused_bn_op_momentum_struct_1,
BNUtils._fused_bn_op_momentum_struct_2]
decay = None
if bn_op.type in ['Mul']:
# try all handlers available
for handler in bn_op_struct_for_momentum_handlers:
if decay is None:
decay = handler(bn_op)
else:
break
elif bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']:
# try all handlers available
for handler in fused_bn_op_struct_for_momentum_handlers:
if decay is None:
decay = handler(bn_op)
else:
break
else:
logger.error("Error, unknown BN op")
assert False
return decay
@staticmethod
def get_training(bn_op: tf.Operation) -> Union[None, bool, tf.Tensor]:
"""
Returns either a boolean of whether the BN op training mode is True or False, or the is_training tensor
feeding into the BN op if it is using a tensor to determine the mode dynamically.
:param bn_op: bn_op obtained in the connected graph
:return: True or False for training mode, or tf.Tensor that determines the mode dynamically.
"""
assert bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm', 'Mul']
if bn_op.type == 'FusedBatchNormV3' or bn_op.type == 'FusedBatchNorm':
if 'FusedBatchNormV3_1' in bn_op.name:
switch_op = bn_op.inputs[0].op
pred_id_op = switch_op.inputs[1].op
training = pred_id_op.inputs[0]
else:
training = bn_op.get_attr('is_training')
return training
# Non fused batchnorm case
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
rsqrt_op = mul_op.inputs[0].op
assert rsqrt_op.type == 'Rsqrt'
add_op = rsqrt_op.inputs[0].op
assert add_op.type == 'AddV2'
add_input_op = add_op.inputs[0].op
if add_input_op.type == 'Squeeze':
return True
if add_input_op.type == 'ReadVariableOp':
return False
if add_input_op.type == 'Merge':
switch_op = add_input_op.inputs[1].op
assert switch_op.type == 'Switch'
pred_id_op = switch_op.inputs[1].op
assert pred_id_op.type == 'Identity'
return pred_id_op.inputs[0]
logger.error('Error, unknown BN structure')
return None
|
release/alert.py | 77loopin/ray | 21,382 | 20675 | <filename>release/alert.py<gh_stars>1000+
import argparse
from collections import defaultdict, Counter
from typing import Any, List, Tuple, Mapping, Optional
import datetime
import hashlib
import json
import logging
import os
import requests
import sys
import boto3
from e2e import GLOBAL_CONFIG
from alerts.default import handle_result as default_handle_result
from alerts.rllib_tests import handle_result as rllib_tests_handle_result
from alerts.long_running_tests import handle_result as \
long_running_tests_handle_result
from alerts.tune_tests import handle_result as tune_tests_handle_result
from alerts.xgboost_tests import handle_result as xgboost_tests_handle_result
SUITE_TO_FN = {
"long_running_tests": long_running_tests_handle_result,
"rllib_tests": rllib_tests_handle_result,
"tune_tests": tune_tests_handle_result,
"xgboost_tests": xgboost_tests_handle_result,
}
GLOBAL_CONFIG["RELEASE_AWS_DB_STATE_TABLE"] = "alert_state"
GLOBAL_CONFIG["SLACK_WEBHOOK"] = os.environ.get("SLACK_WEBHOOK", "")
GLOBAL_CONFIG["SLACK_CHANNEL"] = os.environ.get("SLACK_CHANNEL",
"#oss-test-cop")
RESULTS_LIMIT = 120
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter(fmt="[%(levelname)s %(asctime)s] "
"%(filename)s: %(lineno)d "
"%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def maybe_fetch_slack_webhook():
if GLOBAL_CONFIG["SLACK_WEBHOOK"] in [None, ""]:
print("Missing SLACK_WEBHOOK, retrieving from AWS secrets store")
GLOBAL_CONFIG["SLACK_WEBHOOK"] = boto3.client(
"secretsmanager", region_name="us-west-2"
).get_secret_value(
SecretId="arn:aws:secretsmanager:us-west-2:029272617770:secret:"
"release-automation/"
"slack-webhook-Na0CFP")["SecretString"]
def _obj_hash(obj: Any) -> str:
json_str = json.dumps(obj, sort_keys=True, ensure_ascii=True)
sha = hashlib.sha256()
sha.update(json_str.encode())
return sha.hexdigest()
def fetch_latest_alerts(rds_data_client):
schema = GLOBAL_CONFIG["RELEASE_AWS_DB_STATE_TABLE"]
sql = (f"""
SELECT DISTINCT ON (category, test_suite, test_name)
category, test_suite, test_name, last_result_hash,
last_notification_dt
FROM {schema}
ORDER BY category, test_suite, test_name, last_notification_dt DESC
LIMIT {RESULTS_LIMIT}
""")
result = rds_data_client.execute_statement(
database=GLOBAL_CONFIG["RELEASE_AWS_DB_NAME"],
secretArn=GLOBAL_CONFIG["RELEASE_AWS_DB_SECRET_ARN"],
resourceArn=GLOBAL_CONFIG["RELEASE_AWS_DB_RESOURCE_ARN"],
schema=schema,
sql=sql,
)
for row in result["records"]:
category, test_suite, test_name, last_result_hash, \
last_notification_dt = (
r["stringValue"]
if "stringValue" in r else None
for r in row
)
last_notification_dt = datetime.datetime.strptime(
last_notification_dt, "%Y-%m-%d %H:%M:%S")
yield category, test_suite, test_name, last_result_hash, \
last_notification_dt
def fetch_latest_results(rds_data_client,
fetch_since: Optional[datetime.datetime] = None):
schema = GLOBAL_CONFIG["RELEASE_AWS_DB_TABLE"]
sql = (f"""
SELECT DISTINCT ON (category, test_suite, test_name)
created_on, category, test_suite, test_name, status, results,
artifacts, last_logs
FROM {schema} """)
parameters = []
if fetch_since is not None:
sql += "WHERE created_on >= :created_on "
parameters = [
{
"name": "created_on",
"typeHint": "TIMESTAMP",
"value": {
"stringValue": fetch_since.strftime("%Y-%m-%d %H:%M:%S")
},
},
]
sql += "ORDER BY category, test_suite, test_name, created_on DESC "
sql += f"LIMIT {RESULTS_LIMIT}"
result = rds_data_client.execute_statement(
database=GLOBAL_CONFIG["RELEASE_AWS_DB_NAME"],
secretArn=GLOBAL_CONFIG["RELEASE_AWS_DB_SECRET_ARN"],
resourceArn=GLOBAL_CONFIG["RELEASE_AWS_DB_RESOURCE_ARN"],
schema=schema,
sql=sql,
parameters=parameters,
)
for row in result["records"]:
created_on, category, test_suite, test_name, status, results, \
artifacts, last_logs = (
r["stringValue"] if "stringValue" in r else None for r in row)
# Calculate hash before converting strings to objects
result_obj = (created_on, category, test_suite, test_name, status,
results, artifacts, last_logs)
result_json = json.dumps(result_obj)
result_hash = _obj_hash(result_json)
# Convert some strings to python objects
created_on = datetime.datetime.strptime(created_on,
"%Y-%m-%d %H:%M:%S")
results = json.loads(results)
artifacts = json.loads(artifacts)
yield result_hash, created_on, category, test_suite, test_name, \
status, results, artifacts, last_logs
def mark_as_handled(rds_data_client, update: bool, category: str,
test_suite: str, test_name: str, result_hash: str,
last_notification_dt: datetime.datetime):
schema = GLOBAL_CONFIG["RELEASE_AWS_DB_STATE_TABLE"]
if not update:
sql = (f"""
INSERT INTO {schema}
(category, test_suite, test_name,
last_result_hash, last_notification_dt)
VALUES (:category, :test_suite, :test_name,
:last_result_hash, :last_notification_dt)
""")
else:
sql = (f"""
UPDATE {schema}
SET last_result_hash=:last_result_hash,
last_notification_dt=:last_notification_dt
WHERE category=:category AND test_suite=:test_suite
AND test_name=:test_name
""")
rds_data_client.execute_statement(
database=GLOBAL_CONFIG["RELEASE_AWS_DB_NAME"],
parameters=[
{
"name": "category",
"value": {
"stringValue": category
}
},
{
"name": "test_suite",
"value": {
"stringValue": test_suite or ""
}
},
{
"name": "test_name",
"value": {
"stringValue": test_name
}
},
{
"name": "last_result_hash",
"value": {
"stringValue": result_hash
}
},
{
"name": "last_notification_dt",
"typeHint": "TIMESTAMP",
"value": {
"stringValue": last_notification_dt.strftime(
"%Y-%m-%d %H:%M:%S")
},
},
],
secretArn=GLOBAL_CONFIG["RELEASE_AWS_DB_SECRET_ARN"],
resourceArn=GLOBAL_CONFIG["RELEASE_AWS_DB_RESOURCE_ARN"],
schema=schema,
sql=sql,
)
def post_alerts_to_slack(channel: str, alerts: List[Tuple[str, str, str, str]],
non_alerts: Mapping[str, int]):
if len(alerts) == 0:
logger.info("No alerts to post to slack.")
return
markdown_lines = [
f"* {len(alerts)} new release test failures found!*",
"",
]
category_alerts = defaultdict(list)
for (category, test_suite, test_name, alert) in alerts:
category_alerts[category].append(
f" *{test_suite}/{test_name}* failed: {alert}")
for category, alert_list in category_alerts.items():
markdown_lines.append(f"Branch: *{category}*")
markdown_lines.extend(alert_list)
markdown_lines.append("")
total_non_alerts = sum(n for n in non_alerts.values())
non_alert_detail = [f"{n} on {c}" for c, n in non_alerts.items()]
markdown_lines += [
f"Additionally, {total_non_alerts} tests passed successfully "
f"({', '.join(non_alert_detail)})."
]
slack_url = GLOBAL_CONFIG["SLACK_WEBHOOK"]
resp = requests.post(
slack_url,
json={
"text": "\n".join(markdown_lines),
"channel": channel,
"username": "Fail Bot",
"icon_emoji": ":red_circle:",
},
)
print(resp.status_code)
print(resp.text)
def post_statistics_to_slack(channel: str,
alerts: List[Tuple[str, str, str, str]],
non_alerts: Mapping[str, int]):
total_alerts = len(alerts)
category_alerts = defaultdict(list)
for (category, test_suite, test_name, alert) in alerts:
category_alerts[category].append(f"`{test_suite}/{test_name}`")
alert_detail = [f"{len(a)} on {c}" for c, a in category_alerts.items()]
total_non_alerts = sum(n for n in non_alerts.values())
non_alert_detail = [f"{n} on {c}" for c, n in non_alerts.items()]
markdown_lines = [
"*Periodic release test report*", "", f"In the past 24 hours, "
f"*{total_non_alerts}* release tests finished successfully, and "
f"*{total_alerts}* release tests failed."
]
markdown_lines.append("")
if total_alerts:
markdown_lines.append(f"*Failing:* {', '.join(alert_detail)}")
for c, a in category_alerts.items():
markdown_lines.append(f" *{c}*: {', '.join(sorted(a))}")
else:
markdown_lines.append("*Failing:* None")
markdown_lines.append("")
if total_non_alerts:
markdown_lines.append(f"*Passing:* {', '.join(non_alert_detail)}")
else:
markdown_lines.append("*Passing:* None")
slack_url = GLOBAL_CONFIG["SLACK_WEBHOOK"]
resp = requests.post(
slack_url,
json={
"text": "\n".join(markdown_lines),
"channel": channel,
"username": "Fail Bot",
"icon_emoji": ":red_circle:",
},
)
print(resp.status_code)
print(resp.text)
def handle_results_and_get_alerts(
rds_data_client,
fetch_since: Optional[datetime.datetime] = None,
always_try_alert: bool = False,
no_status_update: bool = False):
# First build a map of last notifications
last_notifications_map = {}
for category, test_suite, test_name, last_result_hash, \
last_notification_dt in fetch_latest_alerts(rds_data_client):
last_notifications_map[(category, test_suite,
test_name)] = (last_result_hash,
last_notification_dt)
alerts = []
non_alerts = Counter()
# Then fetch latest results
for result_hash, created_on, category, test_suite, test_name, status, \
results, artifacts, last_logs in fetch_latest_results(
rds_data_client, fetch_since=fetch_since):
key = (category, test_suite, test_name)
try_alert = always_try_alert
if key in last_notifications_map:
# If we have an alert for this key, fetch info
last_result_hash, last_notification_dt = last_notifications_map[
key]
if last_result_hash != result_hash:
# If we got a new result, handle new result
try_alert = True
# Todo: maybe alert again after some time?
else:
try_alert = True
if try_alert:
handle_fn = SUITE_TO_FN.get(test_suite, None)
if not handle_fn:
logger.warning(f"No handle for suite {test_suite}")
alert = default_handle_result(created_on, category, test_suite,
test_name, status, results,
artifacts, last_logs)
else:
alert = handle_fn(created_on, category, test_suite, test_name,
status, results, artifacts, last_logs)
if alert:
logger.warning(
f"Alert raised for test {test_suite}/{test_name} "
f"({category}): {alert}")
alerts.append((category, test_suite, test_name, alert))
else:
logger.debug(
f"No alert raised for test {test_suite}/{test_name} "
f"({category})")
non_alerts[category] += 1
if not no_status_update:
mark_as_handled(rds_data_client, key in last_notifications_map,
category, test_suite, test_name, result_hash,
datetime.datetime.now())
return alerts, non_alerts
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--stats",
action="store_true",
default=False,
help="Finish quickly for training.")
args = parser.parse_args()
maybe_fetch_slack_webhook()
rds_data_client = boto3.client("rds-data", region_name="us-west-2")
if args.stats:
# Only update last 24 hour stats
fetch_since = datetime.datetime.now() - datetime.timedelta(days=1)
alerts, non_alerts = handle_results_and_get_alerts(
rds_data_client,
fetch_since=fetch_since,
always_try_alert=True,
no_status_update=True)
post_statistics_to_slack(GLOBAL_CONFIG["SLACK_CHANNEL"], alerts,
non_alerts)
else:
alerts, non_alerts = handle_results_and_get_alerts(rds_data_client)
post_alerts_to_slack(GLOBAL_CONFIG["SLACK_CHANNEL"], alerts,
non_alerts)
|
d3rlpy/algos/torch/td3_impl.py | ningyixue/AIPI530_Final_Project | 565 | 20717 | from typing import Optional, Sequence
import torch
from ...gpu import Device
from ...models.encoders import EncoderFactory
from ...models.optimizers import OptimizerFactory
from ...models.q_functions import QFunctionFactory
from ...preprocessing import ActionScaler, RewardScaler, Scaler
from ...torch_utility import TorchMiniBatch
from .ddpg_impl import DDPGImpl
class TD3Impl(DDPGImpl):
_target_smoothing_sigma: float
_target_smoothing_clip: float
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
actor_learning_rate: float,
critic_learning_rate: float,
actor_optim_factory: OptimizerFactory,
critic_optim_factory: OptimizerFactory,
actor_encoder_factory: EncoderFactory,
critic_encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
tau: float,
n_critics: int,
target_reduction_type: str,
target_smoothing_sigma: float,
target_smoothing_clip: float,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
reward_scaler: Optional[RewardScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=actor_learning_rate,
critic_learning_rate=critic_learning_rate,
actor_optim_factory=actor_optim_factory,
critic_optim_factory=critic_optim_factory,
actor_encoder_factory=actor_encoder_factory,
critic_encoder_factory=critic_encoder_factory,
q_func_factory=q_func_factory,
gamma=gamma,
tau=tau,
n_critics=n_critics,
target_reduction_type=target_reduction_type,
use_gpu=use_gpu,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
)
self._target_smoothing_sigma = target_smoothing_sigma
self._target_smoothing_clip = target_smoothing_clip
def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._targ_policy is not None
assert self._targ_q_func is not None
with torch.no_grad():
action = self._targ_policy(batch.next_observations)
# smoothing target
noise = torch.randn(action.shape, device=batch.device)
scaled_noise = self._target_smoothing_sigma * noise
clipped_noise = scaled_noise.clamp(
-self._target_smoothing_clip, self._target_smoothing_clip
)
smoothed_action = action + clipped_noise
clipped_action = smoothed_action.clamp(-1.0, 1.0)
return self._targ_q_func.compute_target(
batch.next_observations,
clipped_action,
reduction=self._target_reduction_type,
)
|
docs/modelserving/detect/aif/germancredit/simulate_predicts.py | chinhuang007/website | 1,146 | 20768 | <reponame>chinhuang007/website
import sys
import json
import time
import requests
if len(sys.argv) < 3:
raise Exception("No endpoint specified. ")
endpoint = sys.argv[1]
headers = {
'Host': sys.argv[2]
}
with open('input.json') as file:
sample_file = json.load(file)
inputs = sample_file["instances"]
# Split inputs into chunks of size 15 and send them to the predict server
print("Sending prediction requests...")
time_before = time.time()
res = requests.post(endpoint, json={"instances": inputs}, headers=headers)
for x in range(0, len(inputs), 15):
query_inputs = inputs[x: x+20]
payload = {"instances": query_inputs}
res = requests.post(endpoint, json=payload, headers=headers)
print(res)
if not res.ok:
res.raise_for_status()
print("TIME TAKEN: ", time.time() - time_before)
print("Last response: ", res.json())
|
sdc/ysdc_dataset_api/utils/serialization.py | sty61010/shifts | 156 | 20769 | import io
import zlib
import numpy as np
def maybe_compress(str, compress):
return zlib.compress(str) if compress else str
def maybe_decompress(str, decompress):
return zlib.decompress(str) if decompress else str
def serialize_numpy(arr: np.ndarray, compress: bool = False) -> str:
"""Serializes numpy array to string with optional zlib compression.
Args:
arr (np.ndarray): Numpy array to serialize.
compress (bool, optional): Whether to compress resulting string with zlib or not.
Defaults to False.
Returns:
str: serialized string
"""
buf = io.BytesIO()
assert isinstance(arr, np.ndarray)
np.save(buf, arr)
result = buf.getvalue()
return maybe_compress(result, compress)
def deserialize_numpy(serialized_string: str, decompress: bool = False) -> np.ndarray:
"""Deserializes numpy array from compressed string.
Args:
serialized_string (str): Serialized numpy array
decompress (bool, optional): Whether to decompress string with zlib before laoding.
Defaults to False.
Returns:
np.ndarray: deserialized numpy array
"""
str = maybe_decompress(serialized_string, decompress)
buf = io.BytesIO(str)
return np.load(buf)
|
hubspot/discovery/crm/extensions/videoconferencing/discovery.py | fakepop/hubspot-api-python | 117 | 20770 | <filename>hubspot/discovery/crm/extensions/videoconferencing/discovery.py
import hubspot.crm.extensions.videoconferencing as api_client
from ....discovery_base import DiscoveryBase
class Discovery(DiscoveryBase):
@property
def settings_api(self) -> api_client.SettingsApi:
return self._configure_api_client(api_client, "SettingsApi")
|
tests/inputs/config.py | hsh-nids/python-betterproto | 708 | 20808 | # Test cases that are expected to fail, e.g. unimplemented features or bug-fixes.
# Remove from list when fixed.
xfail = {
"namespace_keywords", # 70
"googletypes_struct", # 9
"googletypes_value", # 9
"import_capitalized_package",
"example", # This is the example in the readme. Not a test.
}
services = {
"googletypes_response",
"googletypes_response_embedded",
"service",
"service_separate_packages",
"import_service_input_message",
"googletypes_service_returns_empty",
"googletypes_service_returns_googletype",
"example_service",
"empty_service",
}
# Indicate json sample messages to skip when testing that json (de)serialization
# is symmetrical becuase some cases legitimately are not symmetrical.
# Each key references the name of the test scenario and the values in the tuple
# Are the names of the json files.
non_symmetrical_json = {"empty_repeated": ("empty_repeated",)}
|
dp/kadane.py | williamsmj/prakhar1989-algorithms | 2,797 | 20835 | """
Problem: The maximum subarray problem is the task of finding the
contiguous subarray within a one-dimensional array of numbers
(containing at least one positive number) which has the largest sum.
Solution:
The recurrence relation that we solve at each step is the following -
Let S[i] = be the max value contigous subsequence till the ith element
of the array.
Then S[i] = max(A[i], A[i] + S[i - 1])
At each step, we have two options
1) We add the ith element to the sum till the i-1th elem
2) We start a new array starting at i
We take a max of both these options and accordingly build up the array.
"""
def max_value_contigous_subsequence(arr):
A = [arr[0]] + [0] * (len(arr) - 1)
max_to_here = arr[0]
for i in range(1, len(arr)):
A[i] = max(arr[i], arr[i] + A[i-1])
max_to_here = max(max_to_here, A[i])
return max_to_here
if __name__ == "__main__":
x = [-2, -3, 4, -1, -2, 1, 5, -3]
y = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
z = [-1, 3, -5, 4, 6, -1, 2, -7, 13, -3]
print map(max_value_contigous_subsequence, [x, y, z])
|
venv/Lib/site-packages/mpl_toolkits/axes_grid1/axes_rgb.py | EkremBayar/bayar | 603 | 20851 | import numpy as np
from matplotlib import _api
from .axes_divider import make_axes_locatable, Size
from .mpl_axes import Axes
@_api.delete_parameter("3.3", "add_all")
def make_rgb_axes(ax, pad=0.01, axes_class=None, add_all=True, **kwargs):
"""
Parameters
----------
pad : float
Fraction of the axes height.
"""
divider = make_axes_locatable(ax)
pad_size = pad * Size.AxesY(ax)
xsize = ((1-2*pad)/3) * Size.AxesX(ax)
ysize = ((1-2*pad)/3) * Size.AxesY(ax)
divider.set_horizontal([Size.AxesX(ax), pad_size, xsize])
divider.set_vertical([ysize, pad_size, ysize, pad_size, ysize])
ax.set_axes_locator(divider.new_locator(0, 0, ny1=-1))
ax_rgb = []
if axes_class is None:
try:
axes_class = ax._axes_class
except AttributeError:
axes_class = type(ax)
for ny in [4, 2, 0]:
ax1 = axes_class(ax.get_figure(), ax.get_position(original=True),
sharex=ax, sharey=ax, **kwargs)
locator = divider.new_locator(nx=2, ny=ny)
ax1.set_axes_locator(locator)
for t in ax1.yaxis.get_ticklabels() + ax1.xaxis.get_ticklabels():
t.set_visible(False)
try:
for axis in ax1.axis.values():
axis.major_ticklabels.set_visible(False)
except AttributeError:
pass
ax_rgb.append(ax1)
if add_all:
fig = ax.get_figure()
for ax1 in ax_rgb:
fig.add_axes(ax1)
return ax_rgb
@_api.deprecated("3.3", alternative="ax.imshow(np.dstack([r, g, b]))")
def imshow_rgb(ax, r, g, b, **kwargs):
return ax.imshow(np.dstack([r, g, b]), **kwargs)
class RGBAxes:
"""
4-panel imshow (RGB, R, G, B).
Layout:
+---------------+-----+
| | R |
+ +-----+
| RGB | G |
+ +-----+
| | B |
+---------------+-----+
Subclasses can override the ``_defaultAxesClass`` attribute.
Attributes
----------
RGB : ``_defaultAxesClass``
The axes object for the three-channel imshow.
R : ``_defaultAxesClass``
The axes object for the red channel imshow.
G : ``_defaultAxesClass``
The axes object for the green channel imshow.
B : ``_defaultAxesClass``
The axes object for the blue channel imshow.
"""
_defaultAxesClass = Axes
@_api.delete_parameter("3.3", "add_all")
def __init__(self, *args, pad=0, add_all=True, **kwargs):
"""
Parameters
----------
pad : float, default: 0
fraction of the axes height to put as padding.
add_all : bool, default: True
Whether to add the {rgb, r, g, b} axes to the figure.
This parameter is deprecated.
axes_class : matplotlib.axes.Axes
*args
Unpacked into axes_class() init for RGB
**kwargs
Unpacked into axes_class() init for RGB, R, G, B axes
"""
axes_class = kwargs.pop("axes_class", self._defaultAxesClass)
self.RGB = ax = axes_class(*args, **kwargs)
if add_all:
ax.get_figure().add_axes(ax)
else:
kwargs["add_all"] = add_all # only show deprecation in that case
self.R, self.G, self.B = make_rgb_axes(
ax, pad=pad, axes_class=axes_class, **kwargs)
# Set the line color and ticks for the axes.
for ax1 in [self.RGB, self.R, self.G, self.B]:
ax1.axis[:].line.set_color("w")
ax1.axis[:].major_ticks.set_markeredgecolor("w")
@_api.deprecated("3.3")
def add_RGB_to_figure(self):
"""Add red, green and blue axes to the RGB composite's axes figure."""
self.RGB.get_figure().add_axes(self.R)
self.RGB.get_figure().add_axes(self.G)
self.RGB.get_figure().add_axes(self.B)
def imshow_rgb(self, r, g, b, **kwargs):
"""
Create the four images {rgb, r, g, b}.
Parameters
----------
r, g, b : array-like
The red, green, and blue arrays.
kwargs : imshow kwargs
kwargs get unpacked into the imshow calls for the four images.
Returns
-------
rgb : matplotlib.image.AxesImage
r : matplotlib.image.AxesImage
g : matplotlib.image.AxesImage
b : matplotlib.image.AxesImage
"""
if not (r.shape == g.shape == b.shape):
raise ValueError(
f'Input shapes ({r.shape}, {g.shape}, {b.shape}) do not match')
RGB = np.dstack([r, g, b])
R = np.zeros_like(RGB)
R[:, :, 0] = r
G = np.zeros_like(RGB)
G[:, :, 1] = g
B = np.zeros_like(RGB)
B[:, :, 2] = b
im_rgb = self.RGB.imshow(RGB, **kwargs)
im_r = self.R.imshow(R, **kwargs)
im_g = self.G.imshow(G, **kwargs)
im_b = self.B.imshow(B, **kwargs)
return im_rgb, im_r, im_g, im_b
@_api.deprecated("3.3", alternative="RGBAxes")
class RGBAxesBase(RGBAxes):
pass
|
weasyl/test/test_http.py | hyena/weasyl | 111 | 20883 | <reponame>hyena/weasyl
import pytest
from weasyl import http
@pytest.mark.parametrize(('wsgi_env', 'expected'), [
({}, {}),
({'PATH_INFO': '/search', 'QUERY_STRING': 'q=example'}, {}),
({'HTTP_ACCEPT': '*/*'}, {'Accept': '*/*'}),
(
{'CONTENT_LENGTH': '', 'HTTP_ACCEPT_ENCODING': 'gzip', 'HTTP_UPGRADE_INSECURE_REQUESTS': '1'},
{'Accept-Encoding': 'gzip', 'Upgrade-Insecure-Requests': '1'},
),
])
def test_get_headers(wsgi_env, expected):
assert http.get_headers(wsgi_env) == expected
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.