hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a20a485df7f275a72f3399ed3c46ee21273835e | 2,707 | py | Python | vectors_simple_arithmetics.py | pavlovcoder/linear-algebra-python-library | 8c56d94c38a4318e69b2292f8a781aed00a3cb2c | [
"MIT"
] | null | null | null | vectors_simple_arithmetics.py | pavlovcoder/linear-algebra-python-library | 8c56d94c38a4318e69b2292f8a781aed00a3cb2c | [
"MIT"
] | null | null | null | vectors_simple_arithmetics.py | pavlovcoder/linear-algebra-python-library | 8c56d94c38a4318e69b2292f8a781aed00a3cb2c | [
"MIT"
] | null | null | null | print(
'-----------------------------------------\n'\
'Python Library for Linear Algebra Operations | Vectors Initialization:\n'\
'-----------------------------------------\n'
)
print(
'Task:\n'\
'-----------------------------------------\n'\
'Write a simple Python module for adding, substract and scalar multiply few default vectors.\n'
)
print(
'Solution:\n'\
'-----------------------------------------'\
)
#Vector module for defining a new vector on the system:
class Vector(object):
def __init__(self, coordinates):
try:
if not coordinates:
raise ValueError
self.coordinates = tuple(coordinates)
self.dimension = len(coordinates)
except ValueError:
raise ValueError('The coordinates must be nonempty')
except TypeError:
raise TypeError('The coordinates must be an iterable')
def plus(self, v):
new_coordinates = [x+y for x,y in zip(self.coordinates, v.coordinates)]
return Vector(new_coordinates)
def minus(self, v):
new_coordinates = [x-y for x,y in zip(self.coordinates, v.coordinates)]
return Vector(new_coordinates)
def times_scalar(self, c):
new_coordinates = [c*x for x in self.coordinates]
return Vector(new_coordinates)
def __str__(self):
return 'Vector: {}'.format(self.coordinates)
def __eq__(self, v):
return self.coordinates == v.coordinates
#Default function for handling execution loop:
def execution_loop():
data = int(input("Do you want to try again ? Enter [1] - for continue / [0] - for quit :\n>>>"))
if data == 1:
return True
elif data == 0:
return False
else:
print("Error: you entered incorrect command. Please, try again...")
execution_loop()
#Default parameters for handling execution loop:
again_exec = True
counter_exec = 0
#Default loop for handling execution:
while again_exec:
A = Vector([8.218, -9.341])
B = Vector([-1.129, 2.111])
print('Adding vectors: {0} + {1} = {2}'.format(A, B, A.plus(B)))
A = Vector([7.119, 8.215])
B = Vector([-8.223, 0.878])
print('Subtracting vectors: {0} - {1} = {2}'.format(A, B, A.minus(B)))
A = 7.41
B = Vector([1.671, -1.012, -0.318])
print('Scalar multiply vector by value: {0} * {1} = {2}'.format(A, B, B.times_scalar(A)))
again_exec = execution_loop()
counter_exec = counter_exec + 1
#The end of execution:
if again_exec == False:
print("Program was executed: ",counter_exec, ' times.')
break
print(
'\n-----------------------------------------\n'\
'Copyright 2019 Vladimir Pavlov. All Rights Reserved.\n'\
'-----------------------------------------'
) | 30.077778 | 98 | 0.576653 |
4a20a4a3840f8e0be87449347523d3d285af4b9f | 1,161 | py | Python | tests/test_pitch_spelling.py | CPJKU/partitura | b1053d663fc10fc839a3c9c81c9b3dbf53d9e4b2 | [
"Apache-2.0"
] | 56 | 2021-01-12T10:11:54.000Z | 2022-03-14T04:37:38.000Z | tests/test_pitch_spelling.py | mgrachten/partitura | b1053d663fc10fc839a3c9c81c9b3dbf53d9e4b2 | [
"Apache-2.0"
] | 57 | 2020-12-06T18:53:38.000Z | 2022-03-23T12:07:38.000Z | tests/test_pitch_spelling.py | mgrachten/partitura | b1053d663fc10fc839a3c9c81c9b3dbf53d9e4b2 | [
"Apache-2.0"
] | 6 | 2021-03-09T07:05:16.000Z | 2021-11-13T20:01:38.000Z | import numpy as np
import unittest
from partitura import EXAMPLE_MUSICXML
from partitura import load_musicxml
from partitura.musicanalysis import estimate_spelling
def compare_spelling(spelling, notes):
comparisons = np.zeros((len(spelling), 3))
for i, (n, s) in enumerate(zip(notes, spelling)):
comparisons[i, 0] = int(n.step == s["step"])
if n.alter is None and s["alter"] == 0:
comparisons[i, 1] = 1
else:
comparisons[i, 1] = int(n.alter == s["alter"])
comparisons[i, 2] = int(n.octave == s["octave"])
return comparisons
class TestKeyEstimation(unittest.TestCase):
"""
Test key estimation
"""
score = load_musicxml(EXAMPLE_MUSICXML)
def test_part(self):
spelling = estimate_spelling(self.score)
comparisons = compare_spelling(spelling, self.score.notes)
self.assertTrue(np.all(comparisons), "Incorrect spelling")
def test_note_array(self):
spelling = estimate_spelling(self.score.note_array)
comparisons = compare_spelling(spelling, self.score.notes)
self.assertTrue(np.all(comparisons), "Incorrect spelling")
| 31.378378 | 66 | 0.671835 |
4a20a56adc692940f9e414a30ed4bfa2c9e1a542 | 6,285 | py | Python | tests/instruments/test_manifest.py | jerjohste/exopy | 0fe3eb94f440ead88c396a1abccf7c22dd633a61 | [
"BSD-3-Clause"
] | 16 | 2018-03-20T09:06:23.000Z | 2021-09-08T18:46:15.000Z | tests/instruments/test_manifest.py | jerjohste/exopy | 0fe3eb94f440ead88c396a1abccf7c22dd633a61 | [
"BSD-3-Clause"
] | 118 | 2015-05-13T07:50:04.000Z | 2018-02-14T17:37:20.000Z | tests/instruments/test_manifest.py | jerjohste/exopy | 0fe3eb94f440ead88c396a1abccf7c22dd633a61 | [
"BSD-3-Clause"
] | 11 | 2018-03-02T11:17:26.000Z | 2021-06-23T22:25:40.000Z | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018-2018 by Exopy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Tests for the instrument manager manifest.
"""
import enaml
from enaml.widgets.api import MultilineField
from exopy.app.errors.widgets import BasicErrorsDisplay
from exopy.instruments.infos import DriverInfos
from exopy.testing.util import handle_dialog, show_and_close_widget
with enaml.imports():
from .contributors import InstrContributor1
def test_driver_validation_error_handler(exopy_qtbot, instr_workbench):
"""Test the error handler dedicated to driver validation issues.
"""
core = instr_workbench.get_plugin('enaml.workbench.core')
p = instr_workbench.get_plugin('exopy.instruments')
d = DriverInfos(starter='starter', connections={'c1': {}, 'c2': {}},
settings={'s2': {}, 's3': {}})
cmd = 'exopy.app.errors.signal'
def check_dialog(bot, dial):
w = dial.errors['exopy.driver-validation']
assert 'd' in w.errors
for err in ('starter', 'connections', 'settings'):
assert err in w.errors['d']
with handle_dialog(exopy_qtbot, 'accept', check_dialog):
core.invoke_command(cmd, {'kind': 'exopy.driver-validation',
'details': {'d': d.validate(p)[1]}})
def test_reporting_on_extension_errors(exopy_qtbot, instr_workbench):
"""Check reporting extension errors.
"""
plugin = instr_workbench.get_plugin('exopy.app.errors')
handler = plugin._errors_handlers.contributions['exopy.driver-validation']
widget = handler.report(instr_workbench)
assert isinstance(widget, MultilineField)
show_and_close_widget(exopy_qtbot, widget)
handler.errors = {'test': 'msg'}
widget = handler.report(instr_workbench)
assert isinstance(widget, BasicErrorsDisplay)
show_and_close_widget(exopy_qtbot, widget)
def test_validate_runtime_dependencies_driver(instr_workbench):
"""Test the validation of drivers as runtime dependencies.
"""
instr_workbench.register(InstrContributor1())
d_p = instr_workbench.get_plugin('exopy.app.dependencies')
d_c = d_p.run_deps_collectors.contributions['exopy.instruments.drivers']
err = {}
d_c.validate(instr_workbench, ('tests.test.FalseDriver', 'dummy'), err)
assert len(err) == 1
assert ('instruments.test.FalseDriver' not in
err['unknown-drivers'])
assert 'dummy' in err['unknown-drivers']
def test_collect_runtime_dependencies_driver(instr_workbench):
"""Test the collection of drivers as runtime dependencies.
"""
instr_workbench.register(InstrContributor1())
d_p = instr_workbench.get_plugin('exopy.app.dependencies')
d_c = d_p.run_deps_collectors.contributions['exopy.instruments.drivers']
dep = dict.fromkeys(('instruments.test.FalseDriver', 'dummy'))
err = {}
un = set()
d_c.collect(instr_workbench, 'tests', dep, un, err)
assert len(err) == 1
assert 'instruments.test.FalseDriver' not in err['unknown-drivers']
assert 'dummy' in err['unknown-drivers']
assert not un
assert dep['instruments.test.FalseDriver'] is not None
assert dep['dummy'] is None
def test_validate_runtime_dependencies_profiles(prof_plugin):
"""Test the validation of profiles as runtime dependencies.
"""
w = prof_plugin.workbench
d_p = w.get_plugin('exopy.app.dependencies')
d_c = d_p.run_deps_collectors.contributions['exopy.instruments.profiles']
err = {}
d_c.validate(w, ('fp1', 'dummy'), err)
assert len(err) == 1
assert 'fp1' not in err['unknown-profiles']
assert 'dummy' in err['unknown-profiles']
def test_collect_release_runtime_dependencies_profiles(prof_plugin):
"""Test the collection and release of profiles as runtime dependencies.
"""
w = prof_plugin.workbench
d_p = w.get_plugin('exopy.app.dependencies')
d_c = d_p.run_deps_collectors.contributions['exopy.instruments.profiles']
dep = dict.fromkeys(('fp1', 'dummy'))
err = {}
un = set()
d_c.collect(w, 'tests', dep, un, err)
assert len(err) == 1
assert 'dummy' in err['unknown-profiles']
assert not un
assert dep['fp1'] is not None
assert dep['dummy'] is None
assert 'fp1' in prof_plugin.used_profiles
d_c.release(w, 'tests', list(dep))
assert not prof_plugin.used_profiles
prof_plugin.used_profiles = {'fp2': 'tests2'}
dep = dict.fromkeys(('fp1', 'fp2', 'dummy'))
err = {}
un = set()
d_c.collect(w, 'tests', dep, un, err)
assert len(err) == 1
assert 'dummy' in err['unknown-profiles']
assert 'fp2' in un
assert dep['fp1'] is None
assert dep['fp2'] is None
assert dep['dummy'] is None
def test_select_instrument_profile_command(exopy_qtbot, prof_plugin):
"""Test selecting an instrument profile.
"""
core = prof_plugin.workbench.get_plugin('enaml.workbench.core')
with handle_dialog(exopy_qtbot, 'reject'):
res = core.invoke_command('exopy.instruments.select_instrument')
assert res is None
with handle_dialog(exopy_qtbot, 'accept'):
res = core.invoke_command('exopy.instruments.select_instrument',
dict(profile='fp1',
driver='instruments.test.FalseDriver',
connection='false_connection3',
settings='false_settings3'))
assert res == ('fp1', 'instruments.test.FalseDriver', 'false_connection3',
'false_settings3')
def test_open_browser_command(exopy_qtbot, prof_plugin):
"""Test opening the browsing window.
"""
with enaml.imports():
from enaml.workbench.ui.ui_manifest import UIManifest
prof_plugin.workbench.register(UIManifest())
core = prof_plugin.workbench.get_plugin('enaml.workbench.core')
with handle_dialog(exopy_qtbot):
core.invoke_command('exopy.instruments.open_browser')
| 32.066327 | 79 | 0.661893 |
4a20a5b9361d950b1d1bc1198cb5df60fc54d50c | 5,788 | py | Python | brambling/tests/functional/test_organizer_forms.py | Shivanjain023/django-brambling | 17539b82df37f22bd2b4293e73142b887c916344 | [
"BSD-3-Clause"
] | 8 | 2015-05-06T18:26:15.000Z | 2018-02-07T22:18:32.000Z | brambling/tests/functional/test_organizer_forms.py | Shivanjain023/django-brambling | 17539b82df37f22bd2b4293e73142b887c916344 | [
"BSD-3-Clause"
] | 578 | 2015-01-05T21:37:17.000Z | 2018-02-14T16:43:50.000Z | brambling/tests/functional/test_organizer_forms.py | Shivanjain023/django-brambling | 17539b82df37f22bd2b4293e73142b887c916344 | [
"BSD-3-Clause"
] | 1 | 2015-08-20T16:59:32.000Z | 2015-08-20T16:59:32.000Z | from datetime import datetime, date, timedelta
from django.test import TestCase, RequestFactory
from django.utils import timezone
from brambling.forms.organizer import ManualPaymentForm, EventCreateForm
from brambling.models import Transaction, Event, OrganizationMember
from brambling.tests.factories import (
OrderFactory,
PersonFactory,
EventFactory,
ItemFactory,
ItemImageFactory,
ItemOptionFactory,
DiscountFactory,
SavedReportFactory,
CustomFormFactory,
CustomFormFieldFactory,
)
class ManualPaymentFormTestCase(TestCase):
def test_creation(self):
order = OrderFactory()
user = PersonFactory()
form = ManualPaymentForm(order=order, user=user, data={'amount': 10, 'method': Transaction.FAKE})
self.assertFalse(form.errors)
self.assertTrue(form.is_bound)
txn = form.save()
self.assertEqual(txn.amount, 10)
self.assertEqual(txn.order, order)
self.assertEqual(txn.event, order.event)
self.assertEqual(txn.transaction_type, Transaction.PURCHASE)
self.assertEqual(txn.method, Transaction.FAKE)
self.assertEqual(txn.created_by, user)
self.assertEqual(txn.is_confirmed, True)
self.assertEqual(txn.api_type, order.event.api_type)
class EventCreateFormTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
def test_adjust_date(self):
request = self.factory.get('/')
request.user = PersonFactory()
td = timedelta(days=3)
old_date = date(2015, 10, 2)
old_event = EventFactory(start_date=date(2015, 10, 5))
new_event = EventFactory(start_date=old_event.start_date + td)
form = EventCreateForm(request)
new_date = form._adjust_date(old_event, new_event, old_date)
self.assertEqual(new_date, old_date + td)
def test_adjust_datetime(self):
request = self.factory.get('/')
request.user = PersonFactory()
td = timedelta(days=3)
old_date = datetime(2015, 10, 2, 5, 5, 5)
old_event = EventFactory(start_date=date(2015, 10, 5))
new_event = EventFactory(start_date=old_event.start_date + td)
form = EventCreateForm(request)
new_date = form._adjust_date(old_event, new_event, old_date)
self.assertEqual(new_date, old_date + td)
def test_duplication(self):
"""Passing in a template_event should result in settings and relevant related objects being copied"""
threedays = timedelta(days=3)
template = EventFactory(
start_date=timezone.now() - threedays,
end_date=timezone.now() - threedays,
)
item = ItemFactory(event=template)
ItemOptionFactory(item=item)
ItemImageFactory(item=item)
DiscountFactory(event=template)
SavedReportFactory(event=template)
custom_form = CustomFormFactory(event=template)
CustomFormFieldFactory(form=custom_form)
request = self.factory.post('/', {
'name': 'New event',
'slug': 'new-event',
'start_date': timezone.now().strftime("%Y-%m-%d"),
'end_date': timezone.now().strftime("%Y-%m-%d"),
'organization': str(template.organization.pk),
'template_event': str(template.pk),
})
owner = PersonFactory()
OrganizationMember.objects.create(
person=owner,
organization=template.organization,
role=OrganizationMember.OWNER,
)
request.user = owner
form = EventCreateForm(request, data=request.POST)
self.assertFalse(form.errors)
event = form.save()
# Get a refreshed version from the db
event = Event.objects.get(pk=event.pk)
fields = (
'description', 'website_url', 'banner_image', 'city',
'state_or_province', 'country', 'timezone', 'currency',
'has_dances', 'has_classes', 'liability_waiver', 'privacy',
'collect_housing_data', 'collect_survey_data', 'cart_timeout',
'check_postmark_cutoff', 'transfers_allowed', 'facebook_url',
)
self.assertEqual(
dict((f, getattr(event, f)) for f in fields),
dict((f, getattr(template, f)) for f in fields)
)
# Make sure things haven't been moved off old events.
self.assertEqual(template.items.count(), 1)
item = template.items.all()[0]
self.assertEqual(item.options.count(), 1)
self.assertEqual(template.discount_set.count(), 1)
self.assertEqual(template.savedreport_set.count(), 1)
self.assertEqual(template.forms.count(), 1)
custom_form = template.forms.all()[0]
self.assertEqual(custom_form.fields.count(), 1)
# Make sure things have been copied to new event.
self.assertEqual(event.items.count(), 1)
item = event.items.all()[0]
self.assertEqual(item.options.count(), 1)
self.assertEqual(event.discount_set.count(), 1)
self.assertEqual(event.savedreport_set.count(), 1)
self.assertEqual(event.forms.count(), 1)
custom_form = event.forms.all()[0]
self.assertEqual(custom_form.fields.count(), 1)
# Check that dates have been adjusted properly.
old_item = template.items.all()[0]
old_option = old_item.options.all()[0]
new_item = event.items.all()[0]
new_option = new_item.options.all()[0]
self.assertEqual(new_option.available_start - old_option.available_start, threedays)
old_discount = template.discount_set.all()[0]
new_discount = event.discount_set.all()[0]
self.assertEqual(new_discount.available_start - old_discount.available_start, threedays)
| 40.194444 | 109 | 0.65152 |
4a20a635f5e037078a0e577c56c6a289da083b87 | 3,856 | py | Python | Spy-Games/code.py | Saradwata-Bandyopadhyay/greyatom-python-for-data-science | fe15b5bb27b928b0f7fe151e630d89ab07a95a0b | [
"MIT"
] | 1 | 2020-07-02T07:38:07.000Z | 2020-07-02T07:38:07.000Z | Spy-Games/code.py | Saradwata-Bandyopadhyay/greyatom-python-for-data-science | fe15b5bb27b928b0f7fe151e630d89ab07a95a0b | [
"MIT"
] | null | null | null | Spy-Games/code.py | Saradwata-Bandyopadhyay/greyatom-python-for-data-science | fe15b5bb27b928b0f7fe151e630d89ab07a95a0b | [
"MIT"
] | null | null | null | # --------------
#Code starts here
#Function to read file
def read_file(path):
#Opening of the file located in the path in 'read' mode
file=open(path,'r')
#Reading of the first line of the file and storing it in a variable
sentence=file.readline()
#Closing of the file
file.close()
#Returning the first line of the file
return sentence
#Calling the function to read file
sample_message=read_file(file_path)
#Printing the line of the file
print(sample_message)
#Function to fuse message
def fuse_msg(message_a,message_b):
#Integer division of two numbers
quotient=int(message_b)//int(message_a)
#Returning the quotient in string format
return str(quotient)
#Calling the function to read file
message_1=read_file(file_path_1)
#Calling the function to read file
message_2=read_file(file_path_2)
#Calling the function 'fuse_msg'
secret_msg_1=fuse_msg(message_1,message_2)
#Printing the secret message
print(message_1)
print(message_2)
#Function to substitute the message
def substitute_msg(message_c):
#If-else to compare the contents of the file
if message_c == 'Red':
sub='Army General'
elif message_c == 'Green':
sub='Data Scientist'
else :
sub= 'Marine Biologist'
#Returning the substitute of the message
return sub
#Calling the function to read file
message_3=read_file(file_path_3)
print(message_3)
#Calling the function 'substitute_msg'
secret_msg_2=substitute_msg(message_3)
#Printing the secret message
print(secret_msg_2)
#Function to compare message
def compare_msg(message_d,message_e):
#Splitting the message into a list
a_list=message_d.split()
#Splitting the message into a list
b_list=message_e.split()
#Comparing the elements from both the lists
c_list=[i for i in a_list if i not in b_list ]
#Combining the words of a list back to a single string sentence
final_msg=" ".join(c_list)
#Returning the sentence
return final_msg
#Calling the function to read file
message_4=read_file(file_path_4)
#Calling the function to read file
message_5=read_file(file_path_5)
#Calling the function 'compare messages'
secret_msg_3=compare_msg(message_4,message_5)
#Printing the secret message
print(message_4)
print(message_5)
#Function to filter message
def extract_msg(message_f):
#Splitting the message into a list
a_list=message_f.split()
#Creating the lambda function to identify even length words
even_word=lambda x:(len(x)%2==0)
#Splitting the message into a list
b_list=list(filter(even_word,a_list))
#Combining the words of a list back to a single string sentence
final_msg=" ".join(b_list)
#Returning the sentence
return final_msg
#Calling the function to read file
message_6=read_file(file_path_6)
#Calling the function 'filter_msg'
secret_msg_4=extract_msg(message_6)
#Printing the secret message
print(message_6)
#Secret message parts in the correct order
message_parts=[secret_msg_3, secret_msg_1, secret_msg_4, secret_msg_2]
# define the path where you
final_path= user_data_dir + '/secret_message.txt'
#Combine the secret message parts into a single complete secret message
secret_msg=" ".join(message_parts)
#Function to write inside a file
def write_file(secret_msg,path):
#Opening a file named 'secret_message' in 'write' mode
file1=open(path,'a+')
#Writing to the file
file1.write(secret_msg)
#Closing the file
file1.close()
#Calling the function to write inside the file
write_file(secret_msg,final_path)
#Printing the entire secret message
print(secret_msg)
#Code ends here
| 25.202614 | 72 | 0.706432 |
4a20a699f17fc64adfa91e1d5de7bb9340ba55e2 | 11,418 | py | Python | salesforce/fields.py | bryancolligan/django-salesforce | cec08115f97d75d9b7b96bb34c40e48974c7269f | [
"MIT"
] | null | null | null | salesforce/fields.py | bryancolligan/django-salesforce | cec08115f97d75d9b7b96bb34c40e48974c7269f | [
"MIT"
] | null | null | null | salesforce/fields.py | bryancolligan/django-salesforce | cec08115f97d75d9b7b96bb34c40e48974c7269f | [
"MIT"
] | null | null | null | # django-salesforce
#
# by Phil Christensen
# (c) 2012-2013 Freelancers Union (http://www.freelancersunion.org)
# See LICENSE.md for details
#
"""
Customized fields for Salesforce, especially the primary key. (like django.db.models.fields)
"""
import warnings
from decimal import Decimal
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
from django.db.models import fields
from django.db.models import PROTECT, DO_NOTHING # NOQA pylint:disable=unused-import
from django.db import models
from django.utils.encoding import smart_text
from six import string_types
from salesforce.backend.operations import DefaultedOnCreate
# None of field types defined in this module need a "deconstruct" method,
# in Django 1.7+, because their parameters only describe fixed nature of SF
# standard objects that can not be modified no ways by no API or spell.
FULL_WRITABLE = 0
NOT_UPDATEABLE = 1
NOT_CREATEABLE = 2
READ_ONLY = 3 # (NOT_UPDATEABLE & NOT_CREATEABLE)
DEFAULTED_ON_CREATE = DefaultedOnCreate()
SF_PK = getattr(settings, 'SF_PK', 'id')
if SF_PK not in ('id', 'Id'):
raise ImproperlyConfigured("Value of settings.SF_PK must be 'id' or 'Id' or undefined.")
class SalesforceAutoField(fields.AutoField):
"""
An AutoField that works with Salesforce primary keys.
It is used by SalesforceModel as a custom primary key. It doesn't convert
its value to int.
"""
description = _("Text")
default_error_messages = {
'invalid': _('This value must be a valid Salesforce ID.'),
}
def to_python(self, value):
if isinstance(value, string_types) or value is None:
return value
return smart_text(value)
def get_prep_value(self, value):
return self.to_python(value)
def contribute_to_class(self, cls, name, **kwargs):
name = name if self.name is None else self.name
# we can't require "self.auto_created==True" due to backward compatibility
# with old migrations created before v0.6. Other conditions are enough.
if name != SF_PK or not self.primary_key:
raise ImproperlyConfigured(
"SalesforceAutoField must be a primary key"
"with the name '%s' (configurable by settings)." % SF_PK)
if cls._meta.auto_field:
# pylint:disable=unidiomatic-typecheck
if not (type(self) == type(cls._meta.auto_field) and self.model._meta.abstract and # NOQA type eq
cls._meta.auto_field.name == SF_PK):
raise ImproperlyConfigured(
"The model %s can not have more than one AutoField, "
"but currently: (%s=%s, %s=%s)" % (
cls,
cls._meta.auto_field.name, cls._meta.auto_field,
name, self
)
)
# A model is created that inherits fields from more abstract classes
# with the same default SalesforceAutoFieldy. Therefore the second should be
# ignored.
return
super(SalesforceAutoField, self).contribute_to_class(cls, name, **kwargs)
cls._meta.auto_field = self
class SfField(models.Field):
"""
Add support of 'sf_read_only' and 'custom' parameters to Salesforce fields.
sf_read_only=3 (READ_ONLY): The field can not be specified neither on insert or update.
e.g. LastModifiedDate (the most frequent type of read only)
sf_read_only=1 (NOT_UPDATEABLE): The field can be specified on insert but can not be later never modified.
e.g. ContactId in User object (relative frequent)
sf_read_only=2 (NOT_CREATEABLE): The field can not be specified on insert but can be later modified.
e.g. RecordType.IsActive or Lead.EmailBouncedReason
sf_read_only=0: normal writable (default)
custom=True : Add '__c' to the column name if no db_column is defined.
"""
def __init__(self, *args, **kwargs):
self.sf_read_only = kwargs.pop('sf_read_only', 0)
self.sf_custom = kwargs.pop('custom', None)
self.sf_namespace = ''
super(SfField, self).__init__(*args, **kwargs)
def get_attname_column(self):
"""
Get the database column name automatically in most cases.
"""
# See "A guide to Field parameters": django/db/models/fields/__init__.py
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
attname = self.get_attname()
if self.db_column is not None:
# explicit name
column = self.db_column
else:
if not self.name.islower():
# a Salesforce style name e.g. 'LastName' or 'MyCustomField'
column = self.name
else:
# a Django style name like 'last_name' or 'my_custom_field'
column = self.name.title().replace('_', '')
# Fix custom fields
if self.sf_custom:
column = self.sf_namespace + column + '__c'
return attname, column
def contribute_to_class(self, cls, name, private_only=False, **kwargs):
# Different arguments are in Django 1.11 vs. 2.0, therefore we use universal **kwargs
# pylint:disable=arguments-differ
super(SfField, self).contribute_to_class(cls, name, private_only=private_only, **kwargs)
if self.sf_custom is None and hasattr(cls._meta, 'sf_custom'):
# Only custom fields in models explicitly marked by
# Meta custom=True are recognized automatically - for
# backward compatibility reasons.
self.sf_custom = cls._meta.sf_custom
if self.sf_custom and '__' in cls._meta.db_table[:-3]:
self.sf_namespace = cls._meta.db_table.split('__')[0] + '__'
self.set_attributes_from_name(name)
# pylint:disable=unnecessary-pass,too-many-ancestors
class CharField(SfField, models.CharField):
"""CharField with sf_read_only attribute for Salesforce."""
pass
class EmailField(SfField, models.EmailField):
"""EmailField with sf_read_only attribute for Salesforce."""
pass
class URLField(SfField, models.URLField):
"""URLField with sf_read_only attribute for Salesforce."""
pass
class TextField(SfField, models.TextField):
"""TextField with sf_read_only attribute for Salesforce."""
pass
class IntegerField(SfField, models.IntegerField):
"""IntegerField with sf_read_only attribute for Salesforce."""
pass
class BigIntegerField(SfField, models.BigIntegerField):
"""BigIntegerField with sf_read_only attribute for Salesforce."""
# important for other database backends, e.g. in tests
# The biggest exact value is +-(2 ** 53 -1 ), approx. 9.007E15
pass
class SmallIntegerField(SfField, models.SmallIntegerField):
"""SmallIntegerField with sf_read_only attribute for Salesforce."""
pass
class DecimalField(SfField, models.DecimalField):
"""
DecimalField with sf_read_only attribute for Salesforce.
Salesforce has only one numeric type xsd:double, but no integer.
Even a numeric field with declared zero decimal_places can contain
pi=3.14159265358979 in the database accidentally, but if also the value
is integer,then it is without '.0'.
DecimalField is the default numeric type used by itrospection inspectdb.
"""
def to_python(self, value):
if str(value) == 'DEFAULTED_ON_CREATE':
return value
ret = super(DecimalField, self).to_python(value)
if ret is not None and self.decimal_places == 0:
# this is because Salesforce has no numeric integer type
if ret == int(ret):
ret = Decimal(int(ret))
return ret
# parameter "context" is for Django 1.11 and older (the same is in more classes here)
def from_db_value(self, value, expression, connection, context=None):
# pylint:disable=unused-argument
# TODO refactor and move to the driver like in other backends
if isinstance(value, float):
value = str(value)
return self.to_python(value)
class FloatField(SfField, models.FloatField):
"""FloatField for Salesforce.
It is Float in Python and the same as DecimalField in the database.
"""
pass
class BooleanField(SfField, models.BooleanField):
"""BooleanField with sf_read_only attribute for Salesforce."""
def __init__(self, default=False, **kwargs):
super(BooleanField, self).__init__(default=default, **kwargs)
def to_python(self, value):
if isinstance(value, DefaultedOnCreate):
return value
return super(BooleanField, self).to_python(value)
class DateTimeField(SfField, models.DateTimeField):
"""DateTimeField with sf_read_only attribute for Salesforce."""
class DateField(SfField, models.DateField):
"""DateField with sf_read_only attribute for Salesforce."""
def from_db_value(self, value, expression, connection, context=None):
# pylint:disable=unused-argument
return self.to_python(value)
class TimeField(SfField, models.TimeField):
"""TimeField with sf_read_only attribute for Salesforce."""
def from_db_value(self, value, expression, connection, context=None):
# pylint:disable=unused-argument
return self.to_python(value)
class ForeignKey(SfField, models.ForeignKey):
"""ForeignKey with sf_read_only attribute and acceptable by Salesforce."""
def __init__(self, to, on_delete, *args, **kwargs):
# Checks parameters before call to ancestor.
if on_delete.__name__ not in ('PROTECT', 'DO_NOTHING'):
# The option CASCADE (currently fails) would be unsafe after a fix
# of on_delete because Cascade delete is not usually enabled in SF
# for safety reasons for most fields objects, namely for Owner,
# CreatedBy etc. Some related objects are deleted automatically
# by SF even with DO_NOTHING in Django, e.g. for
# Campaign/CampaignMember
warnings.warn(
"Only foreign keys with on_delete = PROTECT or "
"DO_NOTHING are currently supported, not %s related to %s"
% (on_delete, to))
super(ForeignKey, self).__init__(to, on_delete, *args, **kwargs)
def get_attname(self):
if self.name.islower(): # pylint:disable=no-else-return
# the same as django.db.models.fields.related.ForeignKey.get_attname
return '%s_id' % self.name
else:
return '%sId' % self.name
def get_attname_column(self):
attname, column = super(ForeignKey, self).get_attname_column()
if self.db_column is None and not self.sf_custom:
column += 'Id'
return attname, column
class OneToOneField(ForeignKey, models.OneToOneField):
"""OneToOneField with sf_read_only attribute and acceptable by Salesforce."""
pass
AutoField = SalesforceAutoField
| 38.969283 | 115 | 0.664039 |
4a20a7ecd9a50e4b719b00927fd375db2aa83df5 | 5,610 | py | Python | evalml/pipelines/components/estimators/classifiers/catboost_classifier.py | Mahesh1822/evalml | aa0ec2379aeba12bbd0dcaaa000f9a2a62064169 | [
"BSD-3-Clause"
] | null | null | null | evalml/pipelines/components/estimators/classifiers/catboost_classifier.py | Mahesh1822/evalml | aa0ec2379aeba12bbd0dcaaa000f9a2a62064169 | [
"BSD-3-Clause"
] | null | null | null | evalml/pipelines/components/estimators/classifiers/catboost_classifier.py | Mahesh1822/evalml | aa0ec2379aeba12bbd0dcaaa000f9a2a62064169 | [
"BSD-3-Clause"
] | null | null | null | """CatBoost Classifier, a classifier that uses gradient-boosting on decision trees. CatBoost is an open-source library and natively supports categorical features."""
import copy
import warnings
import numpy as np
from skopt.space import Integer, Real
from evalml.model_family import ModelFamily
from evalml.pipelines.components.estimators import Estimator
from evalml.pipelines.components.transformers import LabelEncoder
from evalml.problem_types import ProblemTypes
from evalml.utils import import_or_raise, infer_feature_types
class CatBoostClassifier(Estimator):
"""CatBoost Classifier, a classifier that uses gradient-boosting on decision trees. CatBoost is an open-source library and natively supports categorical features.
For more information, check out https://catboost.ai/
Args:
n_estimators (float): The maximum number of trees to build. Defaults to 10.
eta (float): The learning rate. Defaults to 0.03.
max_depth (int): The maximum tree depth for base learners. Defaults to 6.
bootstrap_type (string): Defines the method for sampling the weights of objects. Available methods are 'Bayesian', 'Bernoulli', 'MVS'. Defaults to None.
silent (boolean): Whether to use the "silent" logging mode. Defaults to True.
allow_writing_files (boolean): Whether to allow writing snapshot files while training. Defaults to False.
n_jobs (int or None): Number of jobs to run in parallel. -1 uses all processes. Defaults to -1.
random_seed (int): Seed for the random number generator. Defaults to 0.
"""
name = "CatBoost Classifier"
hyperparameter_ranges = {
"n_estimators": Integer(4, 100),
"eta": Real(0.000001, 1),
"max_depth": Integer(4, 10),
}
"""{
"n_estimators": Integer(4, 100),
"eta": Real(0.000001, 1),
"max_depth": Integer(4, 10),
}"""
model_family = ModelFamily.CATBOOST
"""ModelFamily.CATBOOST"""
supported_problem_types = [
ProblemTypes.BINARY,
ProblemTypes.MULTICLASS,
ProblemTypes.TIME_SERIES_BINARY,
ProblemTypes.TIME_SERIES_MULTICLASS,
]
"""[
ProblemTypes.BINARY,
ProblemTypes.MULTICLASS,
ProblemTypes.TIME_SERIES_BINARY,
ProblemTypes.TIME_SERIES_MULTICLASS,
]"""
def __init__(
self,
n_estimators=10,
eta=0.03,
max_depth=6,
bootstrap_type=None,
silent=True,
allow_writing_files=False,
random_seed=0,
n_jobs=-1,
**kwargs,
):
parameters = {
"n_estimators": n_estimators,
"eta": eta,
"max_depth": max_depth,
"bootstrap_type": bootstrap_type,
"silent": silent,
"allow_writing_files": allow_writing_files,
}
if kwargs.get("thread_count", None) is not None:
warnings.warn(
"Parameter 'thread_count' will be ignored. To use parallel threads, use the 'n_jobs' parameter instead."
)
parameters.update(kwargs)
cb_error_msg = (
"catboost is not installed. Please install using `pip install catboost.`"
)
catboost = import_or_raise("catboost", error_msg=cb_error_msg)
self._label_encoder = None
# catboost will choose an intelligent default for bootstrap_type, so only set if provided
cb_parameters = copy.copy(parameters)
if bootstrap_type is None:
cb_parameters.pop("bootstrap_type")
cb_parameters["thread_count"] = n_jobs
cb_classifier = catboost.CatBoostClassifier(
**cb_parameters, random_seed=random_seed
)
parameters["n_jobs"] = n_jobs
super().__init__(
parameters=parameters, component_obj=cb_classifier, random_seed=random_seed
)
def fit(self, X, y=None):
"""Fits CatBoost classifier component to data.
Args:
X (pd.DataFrame): The input training data of shape [n_samples, n_features].
y (pd.Series): The target training data of length [n_samples].
Returns:
self
"""
X = infer_feature_types(X)
cat_cols = list(X.ww.select("category", return_schema=True).columns)
self.input_feature_names = list(X.columns)
X, y = super()._manage_woodwork(X, y)
# For binary classification, catboost expects numeric values, so encoding before.
if y.nunique() <= 2:
self._label_encoder = LabelEncoder()
y = self._label_encoder.fit_transform(None, y)[1]
self._component_obj.fit(X, y, silent=True, cat_features=cat_cols)
return self
def predict(self, X):
"""Make predictions using the fitted CatBoost classifier.
Args:
X (pd.DataFrame): Data of shape [n_samples, n_features].
Returns:
pd.DataFrame: Predicted values.
"""
X = infer_feature_types(X)
predictions = self._component_obj.predict(X)
if predictions.ndim == 2 and predictions.shape[1] == 1:
predictions = predictions.flatten()
if self._label_encoder:
predictions = self._label_encoder.inverse_transform(
predictions.astype(np.int64)
)
predictions = infer_feature_types(predictions)
predictions.index = X.index
return predictions
@property
def feature_importance(self):
"""Feature importance of fitted CatBoost classifier."""
return self._component_obj.get_feature_importance()
| 38.163265 | 166 | 0.64902 |
4a20a83a84dfbe7805d4d1459a3050f5bebfae75 | 13,362 | py | Python | injection_curve_analysis_shenzhen/main.py | shixin2013/python-study | acedb1d25bfd2ee53d5bf12c66df4305138359b0 | [
"MIT"
] | null | null | null | injection_curve_analysis_shenzhen/main.py | shixin2013/python-study | acedb1d25bfd2ee53d5bf12c66df4305138359b0 | [
"MIT"
] | null | null | null | injection_curve_analysis_shenzhen/main.py | shixin2013/python-study | acedb1d25bfd2ee53d5bf12c66df4305138359b0 | [
"MIT"
] | null | null | null | #python分为可变结构与不可变结构,不可变结构基本等于复杂结构,包括list map等。复杂结构的赋值和传参都是传递的引用
# for it in list 中,it是只读的,修改它不会改变list值
# ctrl+[,] 代码缩进
import os
import datetime
import shutil
import configparser
import sys
import numpy as np
from scipy.optimize import leastsq
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import pandas as pd
import subprocess
import time
import threading
import math
import xlrd
def pres_loss_friction(L,D,rou,Q,visc,e):
if Q<1e-3 :
return 0
Cf = 4.343e-15 #metric
#get reynolds number
Cr = 0.01474 #metric
Re = Cr*rou*Q/(D*visc)
#get fanning friction factor
if Re<4000:
f = 16/Re
else:
t=pow(e/(3.7*D),10/9)
t=t+6.9/Re
t = math.log10(t)*(-3.6)
f = 1/t/t
return Cf*f*L*rou*Q*Q/pow(D,5)*0.1 #bar to MPa
def pres_loss_friction_array_ver(L,D,rou,Q,visc,e):
Cf = 4.343e-15 #metric
Cr = 0.01474 #metric
rev = Q.copy()
for i in range(len(Q)):
if Q[i]<1e-3 :
rev[i]=0
else:
#get reynolds number
Re = Cr*rou*Q[i]/(D*visc)
#get fanning friction factor
if Re<4000:
f = 16/Re
else:
t=pow(e/(3.7*D),10/9)
t=t+6.9/Re
t = math.log10(t)*(-3.6)
f = 1/t/t
rev[i] = Cf*f*L*rou*Q[i]*Q[i]/pow(D,5)*0.1 #bar to MPa
return rev
def pres_loss_head(H,rou):
return rou*9.8*H*1e-6
def pres_loss_singleICD(fld,q,strength):
global fluid_wat
dens_cal = fluid_wat.dens
visc_cal = fluid_wat.visc
K = strength # / dens_cal =1.0 #str:bars/(rm3/day)2
dp = pow((dens_cal/fld.dens)*(fld.visc/visc_cal),0.25)*fld.dens/dens_cal*K*q*q
return dp*0.1 #bar 2 MPa
class Fluid:
def __init__(self,rou,visc):
self.dens = rou
self.visc = visc
class Well:
def __init__(self,l,l2,h,d,e,wn,fluid,coff,strength=0.00021,PI=1.0): #ecl typical value
self.length_v = l
self.length_h = l2-l
self.tvd = h
self.diam = d*0.0254
self.rough = e*0.001
self.name = wn
self.fluid = fluid
self.coff = coff
self.strength = strength
self.PI = PI
def get_pres_loss_icd(self,Q,f):
global icd_interval
nIcds = (int)(self.length_h/icd_interval)
#print(Q)
dp = pres_loss_singleICD(f,Q/nIcds,self.strength)
return dp
def get_bhp(self,Q,thp):
bhp = thp+pres_loss_head( self.tvd, self.fluid.dens)- \
self.coff*pres_loss_friction(self.length_v,self.diam,self.fluid.dens,Q,self.fluid.visc,self.rough)- \
self.get_pres_loss_icd(Q,fluid)
#pres_loss_local(self.fluid.dens,self.coff,Q,self.diam) #3.7
# if bhp<resP:
# bhp = resP
return bhp
def get_effective_pres(self,Q,thp,resP):
tmp = self.get_bhp(Q,thp)-resP
if tmp<0:
tmp=0
return tmp
def read_thp_curve(self,fname):
wb = xlrd.open_workbook(fname)
st = wb.sheet_by_name(self.name)
self.timeT = st.col_values(0)[1:]
self.presT = st.col_values(1)[1:]
self.injT = st.col_values(2)[1:] #list 不同于array,不能直接乘。乘代表重复列表多次组成新列表
self.injT = [i*1e-3*60*24 for i in self.injT]
self.retT = st.col_values(3)[1:]
self.retT = [abs(i)*1e-3*60*24 for i in self.retT]
#对流量做平均,避免剧烈波动。向前5步平均
self.ave_rate(self.injT,200)
self.effec_pres = [i for i in range(len(self.timeT))]
global resp, plot_icd_pl,plot_pres_loss
self.effec_PI = self.effec_pres.copy()
for i in range(len(self.timeT)):
if not plot_icd_pl and not plot_pres_loss:
self.effec_pres[i] = self.get_effective_pres(abs(self.injT[i]),self.presT[i],resp)
if self.effec_pres[i]>0.1:
self.effec_PI[i]= (self.injT[i]-self.retT[i])/self.effec_pres[i]/10
if self.effec_PI[i]>self.PI:
self.effec_PI[i]=self.PI
elif self.effec_PI[i]<0:
self.effec_PI[i]=0
else:
self.effec_PI[i]= 0
elif plot_pres_loss:
self.effec_pres[i] = self.coff*pres_loss_friction(self.length_v,self.diam,self.fluid.dens,abs(self.injT[i]),self.fluid.visc,self.rough)+ \
self.get_pres_loss_icd(abs(self.injT[i]),fluid)
else:
self.effec_pres[i] = self.get_pres_loss_icd(abs(self.injT[i]),self.fluid)
#处理时间,标准化
for i in range(len(self.timeT)):
if i==0 :
continue
if self.timeT[i]<self.timeT[i-1]:
self.timeT[i:] = [it+1.0 for it in self.timeT[i:]]
self.timeT = [it-self.timeT[0] for it in self.timeT]
def read_wat_test_dat(self,fname,ithst=0):
wb = xlrd.open_workbook(fname)
st = wb.sheet_by_index(ithst)
start_line = 1
self.wattest_tT = st.col_values(1)[start_line:]
self.wattest_pT = st.col_values(2)[start_line:] #MPa
self.wattest_inT = st.col_values(3)[start_line:] #L/min
self.wattest_outT = st.col_values(4)[start_line:] #L/min
self.wattest_pT = np.array( self.wattest_pT )
self.wattest_inT = np.array( [i*1e-3*60*24 for i in self.wattest_inT] )#L/min to m3/day
self.wattest_outT = np.array( [abs(i)*1e-3*60*24 for i in self.wattest_outT] )#L/min to m3/day
def ave_rate(self,tarT,istep):
#average rate value to avoid huge oscillation. for water test data only for now
tmpl = tarT.copy()
for i in range(len(tmpl)):
j=i
n=0
total = 0.0
while j>=0 and n<istep:
total+=tmpl[j]
j-=1
n+=1
tarT[i] = total / n
def wat_test_calc_p(self):
global resp,fluid_wat
intoResT = self.wattest_inT - self.wattest_outT
#intoResT[intoResT<0] = 0
#q = pi(bhp-resp)
bhp = intoResT/self.PI*0.1 + resp
#thp = bhp-head+dp
dp_icd = self.get_pres_loss_icd(self.wattest_inT,fluid_wat)
dp_fric = self.coff*pres_loss_friction_array_ver(self.length_v,self.diam,fluid_wat.dens,self.wattest_inT,fluid_wat.visc,self.rough)
dp_head = pres_loss_head( self.tvd, fluid_wat.dens)
self.wattest_calcpT = bhp-dp_head+dp_icd+dp_fric
self.wattest_DP = dp_icd+dp_fric
def set_PI(self,pi):
self.PI = pi
def set_K(self,k):
self.strength = k
def set_coff(self,c):
self.coff = c
def pres_loss(coff,L,D,rou,Q,visc,e):
return pres_loss_friction(L,D,rou,Q,visc,e)*coff
def wat_test_thp_calc(p,well):
#well.set_coff(p[0])
#well.set_K(p)
well.set_PI(p[1])
well.wat_test_calc_p()
return well.wattest_calcpT
def error_thp_calc(p,well,y):
return wat_test_thp_calc(p,well)-y
#脚本使用流程:1) 使用斜井段长度,拟合A4H5实测摩阻 5.4
# 2)使用得到的摩阻系数计算斜井段压力损失 3)认为流量沿ICD均匀分布,压差也相同(相当于定了ICD参数),算出第一个ICD压降即可
# 总注入/ICD数 即为流入ICD的流速,按这个流速算压降。
# WELL V_MD H_MD
# A4H5 3500 4300
# C3H4 4000 5068
# A3H3 1650 2500
# A5H1 1483 2205
#region match
# v=[8,7,6,5,4,2] #in bpm, 1 bpm = 1800 m3/day
# bpm_2_m3_day = 228.9417
# p2=[7,5.4,4.5,3.1,2,0.65] #in MPa
# v = [it*bpm_2_m3_day for it in v]
# L = 3500 #m
# D = 4.7*0.0254 #inch 2 m
# rou = 1000 # kg/m3
# visc = 0.7 #cp
# e = 0.3*0.001 #mm 2 m
# p_test = [pres_loss(5.4,L,D,rou,it,visc,e) for it in v]
# plt.plot(v,p2,'o',label='实测阻力')
# plt.plot(v,p_test,'r',label='计算阻力')
# plt.legend(loc='upper left')
# #plt.set_xlabel(xlabel='流速(m3/day)',ylabel='压力损失(MPa)')
# plt.xlabel('流速(m3/day)')
# plt.ylabel('压力损失(MPa)')
# plt.show()
# # # y = np.array(p2)
# # # x = np.array(v)
# # # pset = [L,D,rou,visc,e]
# # # para =leastsq(error, 3.7, args=(x,y)) # 进行拟合
# # # p_match = [pres_loss(para,L,D,rou,it,visc,e) for it in v]
# # # plt.plot(v,p_match,'g',label='拟合阻力')
# sys.exit()
#endregion
dat_file_nam = R"C:\Users\shixi\Desktop\workContracts\ShenZhenLh11\WorkSpace\inje_dat_for_reading.xlsx"
resp = 12.4 #Mpa
dens = 1050
visc = 2.0
fluid = Fluid(dens,visc)
fluid_wat = Fluid(1000,0.7)
#a4h5 len tvd diam_in rough_mm
# wA4H5 = Well(3450,1240,4.7,0.3,'A4H5',fluid)
# wA4H5.read_thp_curve(dat_file_nam)
# wA5H1 = Well(3450,1240,4.7,0.3,'A5H1',fluid)
# wA5H1.read_thp_curve(dat_file_nam)
# wA4H5 = Well(3450,1240,4.7,0.3,'A4H5',fluid)
# wA4H5.read_thp_curve(dat_file_nam)
# wA4H5 = Well(3450,1240,4.7,0.3,'A4H5',fluid)
# wA4H5.read_thp_curve(dat_file_nam)
# #plot
# plt.plot(wA4H5.timeT,wA4H5.effec_pres,label='有效压力')
# plt.plot(wA4H5.timeT,wA4H5.presT,label='泵注压力')
# plt.legend(loc='upper right')
# plt.show()
icd_interval=10 #m ?需确定 K需确定(开放注入?)
plot_icd_pl = False
plot_wat_test_match = False
plot_pres_loss = True
wlist = []
# 摩阻系数清水拟合值:5.4
# vlen hlen tvd diam_in rough_mm, coff, K PI(40-100),
wlist.append(Well(3500,4300, 1240,4.7, 0.3, 'A4H5',fluid, 2.37,0.021,60))
wlist.append(Well(4000,5068, 1246,4.7, 0.3, 'C3H4',fluid, 2.37, 0.021,100 )) #最大5mpa摩阻,考虑到a4h5他们估算是4mpa,c3h4较长,基本合理
wlist.append(Well(1650,2500, 1214,4.7, 0.3, 'A3H3',fluid, 2.37, 0.021,94))#1.7
wlist.append(Well(1483,2205, 1240,4.7, 0.3, 'A5H1',fluid, 2.37, 0.021,55 ))#3.4 太大了,有点扯
# wlist.append(Well(2200,3000, 1240,4.7, 0.3, 'test',fluid, 2.37, 0.021,55 ))#3.4 太大了,有点扯
# qqq = 1200*1e-3*60*24
# dp1 = -pres_loss_head( wlist[0].tvd, wlist[0].fluid.dens)
# dp2 = wlist[0].coff*pres_loss_friction(wlist[0].length_v,wlist[0].diam,wlist[0].fluid.dens,qqq,wlist[0].fluid.visc,wlist[0].rough)
# dp3 = wlist[0].get_pres_loss_icd(qqq,fluid)
# dp = dp1+dp2+dp3+resp
# print(dp1,'',dp2,'',dp3,'',dp)
# exit(0)
nfig = 4
fig, axs = plt.subplots(nfig)
plt.figure()
i=0
if plot_wat_test_match:
root = R"C:\Users\shixi\Desktop\workContracts\深圳礁灰岩\资料\4连续封隔体控水\清水实验资料\\"
root = os.path.dirname(root)
ave_step = 1
for w in wlist:
fnam = os.path.join(root, w.name+'井清水测试.xlsx')
for i in range(1): #根据是否注释掉,切换一个井多个测试数据和一个井一个测试数据的情况
#w.read_wat_test_dat(fnam)
#print('reading sheet 0 of c3h4/a5h1')
w.read_wat_test_dat(fnam,2)
print('reading sheet 1-6 of a3h3')
#axs[i].plot(w.wattest_tT,w.wattest_inT,'go',label=w.name+'ori in')
#axs[i].plot(w.wattest_tT,w.wattest_outT,label=w.name+'ori out')
w.ave_rate(w.wattest_inT,ave_step)
w.ave_rate(w.wattest_outT,ave_step)
#axs[i].plot(w.wattest_tT,w.wattest_inT,'r',label=w.name+'ave in')
#axs[i].plot(w.wattest_tT,w.wattest_outT,label=w.name+'ave out')
#axs[i].legend(loc='upper left')
axs[i].set(ylabel='MPa')
axs[i].plot(w.wattest_tT,w.wattest_pT,label=w.name+'记录井口压力')
w.wat_test_calc_p()
axs[i].plot(w.wattest_tT,w.wattest_calcpT,label=w.name+'计算井口压力')
axs[i].plot(w.wattest_tT,w.wattest_DP,label=w.name+'压力损失')
axs[i].grid()
#match
# p0 = [2.37,100] # 拟合的初始参数设置
# para =leastsq(error_thp_calc, p0, args=(w,w.wattest_pT)) # 进行拟合
# y_fitted = wat_test_thp_calc(para[0],w) # 画出拟合后的曲线
# axs[i].plot(w.wattest_tT,y_fitted,label=w.name+'拟合c,pi:'+str(para[0][0])+' '+str(para[0][1]))
# w.set_K(0.015) #w.set_PI(100)
# w.wat_test_calc_p()
# axs[i].plot(w.wattest_tT,w.wattest_calcpT,label=w.name+'计算井口压力'+str(w.strength))
# w.set_K(0.01) #w.set_PI(100)
# w.wat_test_calc_p()
# axs[i].plot(w.wattest_tT,w.wattest_calcpT,label=w.name+'计算井口压力'+str(w.strength))
# ax2 = axs[i].twinx()
# ax2.plot(w.wattest_tT,w.wattest_inT,'g',label=w.name+'入口流量')
# ax2.plot(w.wattest_tT,w.wattest_inT-w.wattest_outT,'b',label=w.name+'净注入量')
# ax2.set(ylabel='m3/day')
axs[i].legend(loc='upper left')
#dataframe = pd.DataFrame({'Time':w.wattest_tT,'Caculate Pres(MPa)':w.wattest_calcpT,'Record Pres(MPa)':w.wattest_pT})
#dataframe.to_csv(os.path.join(root, w.name+'_'+str(i)+'th_wat_test.csv'),index=False,sep=',')
i+=1
else:
root = os.path.dirname(dat_file_nam)
for w in wlist:
print(w.name+'begin')
w.read_thp_curve(dat_file_nam)
axs[i].plot(w.timeT,w.effec_pres,'r',label=w.name+'有效压力')
axs[i].plot(w.timeT,w.presT,'b',label=w.name+'泵注压力')
ax2 = axs[i].twinx()
ax2.plot(w.timeT,w.effec_PI,'g',label=w.name+'有效PI')
axs[i].legend(loc='upper right')
axs[i].set(ylabel='MPa')
ax2.set(ylabel='PI')
ax2.legend(loc='upper right')
plt.plot(w.timeT,w.effec_pres,label=w.name+'有效压力')
#plt.plot(w.timeT,w.effec_PI,label=w.name+'有效PI')
plt.legend(loc='upper right')
#plt.set(ylabel='MPa')
#output csv
#dataframe = pd.DataFrame({'Time':w.timeT,'Effective_pressure':w.effec_pres})
#dataframe.to_csv(os.path.join(root, w.name+'_effcPres.csv'),index=False,sep=',')
i+=1
plt.show()
| 34.349614 | 154 | 0.592277 |
4a20a86e7d839bab12b45f3485abd44eeb5ea4f3 | 186 | py | Python | src/xliff/apps.py | Integreat/integreat-cms | b3f80964a6182d714f26ac229342b47e1c7c4f29 | [
"Apache-2.0"
] | 14 | 2020-12-03T07:56:30.000Z | 2021-10-30T13:09:50.000Z | src/xliff/apps.py | Integreat/integreat-cms | b3f80964a6182d714f26ac229342b47e1c7c4f29 | [
"Apache-2.0"
] | 367 | 2020-11-20T00:34:20.000Z | 2021-12-14T15:20:42.000Z | src/xliff/apps.py | Integreat/integreat-cms | b3f80964a6182d714f26ac229342b47e1c7c4f29 | [
"Apache-2.0"
] | 3 | 2021-02-09T18:46:52.000Z | 2021-12-07T10:41:39.000Z | """
Configuration of XLIFF app
"""
from django.apps import AppConfig
class XLIFFConfig(AppConfig):
"""
XLIFF config inheriting the django AppConfig
"""
name = "xliff"
| 14.307692 | 48 | 0.672043 |
4a20a8bf59ab3c96c69f1159acc44ff088d1d57d | 305 | py | Python | assignmenthub/page_edits/migrations/0011_delete_privacypolicy.py | webspace95/assignment_hub_prototype | bf1b588fa817e1d622f965cb9536c986a4a965cf | [
"MIT"
] | null | null | null | assignmenthub/page_edits/migrations/0011_delete_privacypolicy.py | webspace95/assignment_hub_prototype | bf1b588fa817e1d622f965cb9536c986a4a965cf | [
"MIT"
] | null | null | null | assignmenthub/page_edits/migrations/0011_delete_privacypolicy.py | webspace95/assignment_hub_prototype | bf1b588fa817e1d622f965cb9536c986a4a965cf | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2022-03-11 09:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('page_edits', '0010_howweworktext_body'),
]
operations = [
migrations.DeleteModel(
name='PrivacyPolicy',
),
]
| 17.941176 | 50 | 0.616393 |
4a20a8c427f7355818383d0c14289c7ed04fc51c | 319 | py | Python | tests/test_utils.py | matt-carr/cloudstorage | 6bcf731f14678ad1469234311a7748691be529cd | [
"MIT"
] | null | null | null | tests/test_utils.py | matt-carr/cloudstorage | 6bcf731f14678ad1469234311a7748691be529cd | [
"MIT"
] | null | null | null | tests/test_utils.py | matt-carr/cloudstorage | 6bcf731f14678ad1469234311a7748691be529cd | [
"MIT"
] | null | null | null | from cloudstorage.utils import rgetattr, rsetattr
def test_rgetattr():
b = type('B', (), {'c': True})()
a = type('A', (), {'b': b})()
assert rgetattr(a, 'b.c')
def test_rsetattr():
b = type('B', (), {'c': True})()
a = type('A', (), {'b': b})()
rsetattr(a, 'b.c', False)
assert not a.b.c
| 21.266667 | 49 | 0.495298 |
4a20aa3496f82f952cc332c68731c9339df26881 | 12,525 | py | Python | peering/tables.py | zimage/peering-manager | 8765c276b01b182d267d9be81413087a1c6f7ce9 | [
"Apache-2.0"
] | null | null | null | peering/tables.py | zimage/peering-manager | 8765c276b01b182d267d9be81413087a1c6f7ce9 | [
"Apache-2.0"
] | null | null | null | peering/tables.py | zimage/peering-manager | 8765c276b01b182d267d9be81413087a1c6f7ce9 | [
"Apache-2.0"
] | 1 | 2020-03-31T02:04:39.000Z | 2020-03-31T02:04:39.000Z | import django_tables2 as tables
from .models import (
AutonomousSystem,
Community,
ConfigurationTemplate,
DirectPeeringSession,
InternetExchange,
InternetExchangePeeringSession,
Router,
RoutingPolicy,
)
from peeringdb.models import PeerRecord
from peering_manager import settings
from utils.tables import ActionsColumn, BaseTable, SelectColumn
from utils.templatetags.helpers import markdown
AUTONOMOUS_SYSTEM_ACTIONS = """
{% if perms.peering.change_autonomoussystem %}
<a href="{% url 'peering:autonomous_system_edit' asn=record.asn %}" class="btn btn-xs btn-warning"><i class="fas fa-edit"></i></a>
{% endif %}
"""
AUTONOMOUS_SYSTEM_HAS_POTENTIAL_IX_PEERING_SESSIONS = """
{% if record.has_potential_ix_peering_sessions %}
<span class="text-right" data-toggle="tooltip" data-placement="left" title="Potential Peering Sessions">
<i class="fas fa-exclamation-circle text-warning"></i>
</span>
{% endif %}
"""
BGPSESSION_STATUS = """
{% if record.enabled %}
<i class="fas fa-check-square text-success"></i>
{% else %}
<i class="fas fa-times text-danger"></i>
{% endif %}
"""
BGP_RELATIONSHIP = "{{ record.get_relationship_html }}"
COMMUNITY_ACTIONS = """
{% if perms.peering.change_community %}
<a href="{% url 'peering:community_edit' pk=record.pk %}" class="btn btn-xs btn-warning"><i class="fas fa-edit"></i></a>
{% endif %}
"""
COMMUNITY_TYPE = "{{ record.get_type_html }}"
CONFIGURATION_TEMPLATE_ACTIONS = """
{% if perms.peering.change_configurationtemplate %}
<a href="{% url 'peering:configuration_template_edit' pk=record.pk %}" class="btn btn-xs btn-warning"><i class="fas fa-edit"></i></a>
{% endif %}
"""
DIRECT_PEERING_SESSION_ACTIONS = """
{% load helpers %}
{% if record.comment %}
<button type="button" class="btn btn-xs btn-info popover-hover" data-toggle="popover" data-html="true" title="Peering Session Comments" data-content="{{ record.comment | markdown }}"><i class="fas fa-comment"></i></button>
{% endif %}
{% if record.autonomous_system.comment %}
<button type="button" class="btn btn-xs btn-info popover-hover" data-toggle="popover" data-html="true" title="Autonomous System Comments" data-content="{{ record.autonomous_system.comment | markdown }}"><i class="fas fa-comments"></i></button>
{% endif %}
{% if perms.peering.change_directpeeringsession %}
<a href="{% url 'peering:direct_peering_session_edit' pk=record.pk %}" class="btn btn-xs btn-warning"><i class="fas fa-edit"></i></a>
{% endif %}
"""
INTERNET_EXCHANGE_ACTIONS = """
{% if perms.peering.change_internetexchange %}
<a href="{% url 'peering:internet_exchange_edit' slug=record.slug %}" class="btn btn-xs btn-warning"><i class="fas fa-edit"></i></a>
{% endif %}
"""
INTERNET_EXCHANGE_PEERING_SESSION_ACTIONS = """
{% load helpers %}
{% if record.comment %}
<button type="button" class="btn btn-xs btn-info popover-hover" data-toggle="popover" data-html="true" title="Peering Session Comments" data-content="{{ record.comment | markdown }}"><i class="fas fa-comment"></i></button>
{% endif %}
{% if record.autonomous_system.comment %}
<button type="button" class="btn btn-xs btn-info popover-hover" data-toggle="popover" data-html="true" title="Autonomous System Comments" data-content="{{ record.autonomous_system.comment | markdown }}"><i class="fas fa-comments"></i></button>
{% endif %}
{% if record.internet_exchange.comment %}
<button type="button" class="btn btn-xs btn-info popover-hover" data-toggle="popover" data-html="true" title="Internet Exchange Comments" data-content="{{ record.internet_exchange.comment | markdown }}"><i class="fas fa-comment-dots"></i></button>
{% endif %}
{% if perms.peering.change_internetexchangepeeringsession %}
<a href="{% url 'peering:internet_exchange_peering_session_edit' pk=record.pk %}" class="btn btn-xs btn-warning"><i class="fas fa-edit"></i></a>
{% endif %}
"""
INTERNET_EXCHANGE_PEERING_SESSION_IS_ROUTE_SERVER = """
{% if record.is_route_server %}
<i class="fas fa-check-square text-success"></i>
{% else %}
<i class="fas fa-times text-danger"></i>
{% endif %}
"""
ROUTER_ACTIONS = """
{% if perms.peering.change_router %}
<a href="{% url 'peering:router_edit' pk=record.pk %}" class="btn btn-xs btn-warning"><i class="fas fa-edit"></i></a>
{% endif %}
"""
ROUTER_ENCRYPT_PASSWORD = """
{% if record.encrypt_passwords %}
<i class="fas fa-check-square text-success"></i>
{% else %}
<i class="fas fa-times text-danger"></i>
{% endif %}
"""
ROUTING_POLICY_ACTIONS = """
{% if perms.peering.change_routingpolicy %}
<a href="{% url 'peering:routing_policy_edit' pk=record.pk %}" class="btn btn-xs btn-warning"><i class="fas fa-edit"></i></a>
{% endif %}
"""
ROUTING_POLICY_TYPE = "{{ record.get_type_html }}"
class BGPSessionStateColumn(tables.TemplateColumn):
def __init__(self, *args, **kwargs):
default = kwargs.pop("default", "")
visible = kwargs.pop(
"visible", settings.NAPALM_USERNAME and settings.NAPALM_PASSWORD
)
verbose_name = kwargs.pop("verbose_name", "State")
template_code = kwargs.pop("template_code", "{{ record.get_bgp_state_html }}")
super().__init__(
*args,
default=default,
verbose_name=verbose_name,
template_code=template_code,
visible=visible,
**kwargs
)
class AutonomousSystemTable(BaseTable):
"""
Table for AutonomousSystem lists
"""
pk = SelectColumn()
asn = tables.Column(verbose_name="ASN")
name = tables.LinkColumn()
irr_as_set = tables.Column(verbose_name="IRR AS-SET", orderable=False)
ipv6_max_prefixes = tables.Column(verbose_name="IPv6 Max Prefixes")
ipv4_max_prefixes = tables.Column(verbose_name="IPv4 Max Prefixes")
has_potential_ix_peering_sessions = tables.TemplateColumn(
verbose_name="",
orderable=False,
template_code=AUTONOMOUS_SYSTEM_HAS_POTENTIAL_IX_PEERING_SESSIONS,
)
actions = ActionsColumn(template_code=AUTONOMOUS_SYSTEM_ACTIONS)
class Meta(BaseTable.Meta):
model = AutonomousSystem
fields = (
"pk",
"asn",
"name",
"irr_as_set",
"ipv6_max_prefixes",
"ipv4_max_prefixes",
"actions",
)
class CommunityTable(BaseTable):
"""
Table for Community lists
"""
pk = SelectColumn()
name = tables.LinkColumn()
type = tables.TemplateColumn(template_code=COMMUNITY_TYPE)
actions = ActionsColumn(template_code=COMMUNITY_ACTIONS)
class Meta(BaseTable.Meta):
model = Community
fields = ("pk", "name", "value", "type", "actions")
class ConfigurationTemplateTable(BaseTable):
"""
Table for ConfigurationTemplate lists
"""
pk = SelectColumn()
name = tables.LinkColumn()
actions = ActionsColumn(template_code=CONFIGURATION_TEMPLATE_ACTIONS)
class Meta(BaseTable.Meta):
model = ConfigurationTemplate
fields = ("pk", "name", "updated", "actions")
class DirectPeeringSessionTable(BaseTable):
"""
Table for DirectPeeringSession lists
"""
pk = SelectColumn()
local_asn = tables.Column(verbose_name="Local ASN")
autonomous_system = tables.LinkColumn(verbose_name="AS")
ip_address = tables.LinkColumn(verbose_name="IP Address")
relationship = tables.TemplateColumn(
verbose_name="Relationship", template_code=BGP_RELATIONSHIP
)
enabled = tables.TemplateColumn(
verbose_name="Status", template_code=BGPSESSION_STATUS
)
session_state = BGPSessionStateColumn(accessor="bgp_state")
router = tables.RelatedLinkColumn(verbose_name="Router", accessor="router")
actions = ActionsColumn(template_code=DIRECT_PEERING_SESSION_ACTIONS)
class Meta(BaseTable.Meta):
model = DirectPeeringSession
fields = (
"pk",
"local_asn",
"autonomous_system",
"ip_address",
"relationship",
"enabled",
"session_state",
"router",
"actions",
)
class InternetExchangeTable(BaseTable):
"""
Table for InternetExchange lists
"""
pk = SelectColumn()
name = tables.LinkColumn()
ipv6_address = tables.Column(verbose_name="IPv6 Address")
ipv4_address = tables.Column(verbose_name="IPv4 Address")
configuration_template = tables.RelatedLinkColumn(
verbose_name="Template", accessor="configuration_template"
)
router = tables.RelatedLinkColumn(verbose_name="Router", accessor="router")
actions = ActionsColumn(template_code=INTERNET_EXCHANGE_ACTIONS)
class Meta(BaseTable.Meta):
model = InternetExchange
fields = (
"pk",
"name",
"ipv6_address",
"ipv4_address",
"configuration_template",
"router",
"actions",
)
class InternetExchangePeeringSessionTable(BaseTable):
"""
Table for InternetExchangePeeringSession lists
"""
pk = SelectColumn()
asn = tables.Column(verbose_name="ASN", accessor="autonomous_system.asn")
autonomous_system = tables.RelatedLinkColumn(
verbose_name="AS Name",
accessor="autonomous_system",
text=lambda record: record.autonomous_system.name,
)
internet_exchange = tables.RelatedLinkColumn(
verbose_name="IX Name", accessor="internet_exchange"
)
ip_address = tables.LinkColumn(verbose_name="IP Address")
is_route_server = tables.TemplateColumn(
verbose_name="Route Server",
template_code=INTERNET_EXCHANGE_PEERING_SESSION_IS_ROUTE_SERVER,
attrs={"td": {"class": "text-center"}, "th": {"class": "text-center"}},
)
enabled = tables.TemplateColumn(
verbose_name="Enabled",
template_code=BGPSESSION_STATUS,
attrs={"td": {"class": "text-center"}, "th": {"class": "text-center"}},
)
session_state = BGPSessionStateColumn(accessor="bgp_state")
actions = ActionsColumn(template_code=INTERNET_EXCHANGE_PEERING_SESSION_ACTIONS)
class Meta(BaseTable.Meta):
model = InternetExchangePeeringSession
fields = (
"pk",
"asn",
"autonomous_system",
"internet_exchange",
"ip_address",
"is_route_server",
"enabled",
"session_state",
"actions",
)
class PeerRecordTable(BaseTable):
"""
Table for PeerRecord lists
"""
empty_text = "No available peers found."
pk = SelectColumn()
asn = tables.Column(verbose_name="ASN", accessor="network.asn")
name = tables.Column(verbose_name="AS Name", accessor="network.name")
irr_as_set = tables.Column(
verbose_name="IRR AS-SET", accessor="network.irr_as_set", orderable=False
)
ipv6_max_prefixes = tables.Column(
verbose_name="IPv6", accessor="network.info_prefixes6"
)
ipv4_max_prefixes = tables.Column(
verbose_name="IPv4", accessor="network.info_prefixes4"
)
ipv6_address = tables.Column(
verbose_name="IPv6 Address", accessor="network_ixlan.ipaddr6"
)
ipv4_address = tables.Column(
verbose_name="IPv4 Address", accessor="network_ixlan.ipaddr4"
)
class Meta(BaseTable.Meta):
model = PeerRecord
fields = (
"pk",
"asn",
"name",
"irr_as_set",
"ipv6_max_prefixes",
"ipv4_max_prefixes",
"ipv6_address",
"ipv4_address",
)
class RouterTable(BaseTable):
"""
Table for Router lists
"""
pk = SelectColumn()
name = tables.LinkColumn()
encrypt_passwords = tables.TemplateColumn(
verbose_name="Encrypt Password",
template_code=ROUTER_ENCRYPT_PASSWORD,
attrs={"td": {"class": "text-center"}, "th": {"class": "text-center"}},
)
actions = ActionsColumn(template_code=ROUTER_ACTIONS)
class Meta(BaseTable.Meta):
model = Router
fields = ("pk", "name", "hostname", "platform", "encrypt_passwords", "actions")
class RoutingPolicyTable(BaseTable):
"""
Table for RoutingPolicy lists
"""
pk = SelectColumn()
name = tables.LinkColumn()
type = tables.TemplateColumn(template_code=ROUTING_POLICY_TYPE)
actions = ActionsColumn(template_code=ROUTING_POLICY_ACTIONS)
class Meta(BaseTable.Meta):
model = RoutingPolicy
fields = ("pk", "name", "type", "weight", "address_family", "actions")
| 34.409341 | 247 | 0.663393 |
4a20aa63561c3f9e86f385d00836641dc6ddc374 | 1,487 | py | Python | src/utils.py | YechielK/rcp2 | 7135d4fe2538dd5e7ac7e61a7351ed09473c95dc | [
"MIT"
] | 10 | 2019-02-22T03:48:16.000Z | 2022-02-06T00:23:53.000Z | src/utils.py | YechielK/rcp2 | 7135d4fe2538dd5e7ac7e61a7351ed09473c95dc | [
"MIT"
] | 82 | 2019-02-19T20:28:00.000Z | 2022-03-20T16:55:45.000Z | src/utils.py | YechielK/rcp2 | 7135d4fe2538dd5e7ac7e61a7351ed09473c95dc | [
"MIT"
] | 30 | 2019-02-18T17:25:31.000Z | 2022-01-22T14:38:29.000Z | """General project utilities.
The following top-level variables make it easier to reference specific
directories in other scripts and notebooks:
- :data:`src.utils.ROOT`: The project root (usually named ``rcp2/``).
- :data:`src.utils.DATA`: The project data directory and subdirectories
- :GEOID:`src.utils.GEOID`: GEOIDS are the ID system for census geographies
stored as key value pairs of 'geoid_name': len(GEOID)
.. note::
The ``DATA`` variable assumes that you have downloaded and set up the data
directory as follows: ::
Data/
├── 03_Shapefiles
│ ├── 2010_Census_shapefiles
│ └── SVI2016_US_shapefiles
├── Master Project Data
├── processed
└── raw
"""
import pathlib
# The project root diretory.
ROOT = pathlib.Path(__file__, "..", "..").resolve()
# Paths to project data directories.
DATA = {
"data": ROOT / "Data",
"raw": ROOT / "Data" / "raw",
"interim": ROOT / "Data" / 'interim',
"master": ROOT / "Data" / "Master Project Data",
"model-outputs": ROOT / "Data" / "Model Outputs",
"processed": ROOT / "Data" / "processed",
"shapefiles": ROOT / "Data" / "03_Shapefiles",
"shapefiles-census": ROOT / "Data" / "03_Shapefiles" / "2010_Census_shapefiles",
"shapefiles-svi": ROOT / "Data" / "03_Shapefiles" / "SVI2016_US_shapefiles",
'acs' : ROOT /'Data' / 'Model Outputs' / 'ACS_Extraction'
}
GEOID = {
'state': 2,
'county': 5,
'tract' : 11,
'block_group' : 12
}
| 28.056604 | 84 | 0.635508 |
4a20aa651cd49e38411e9cd47333c4b5633f2b0a | 6,443 | py | Python | homeassistant/components/opentherm_gw/sensor.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 6 | 2017-08-02T19:26:39.000Z | 2020-03-14T22:47:41.000Z | homeassistant/components/opentherm_gw/sensor.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 58 | 2020-08-03T07:33:02.000Z | 2022-03-31T06:02:05.000Z | homeassistant/components/opentherm_gw/sensor.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 14 | 2018-08-19T16:28:26.000Z | 2021-09-02T18:26:53.000Z | """Support for OpenTherm Gateway sensors."""
import logging
from pprint import pformat
from homeassistant.components.sensor import ENTITY_ID_FORMAT
from homeassistant.const import CONF_ID
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity, async_generate_entity_id
from homeassistant.helpers.entity_registry import async_get_registry
from . import DOMAIN
from .const import (
DATA_GATEWAYS,
DATA_OPENTHERM_GW,
DEPRECATED_SENSOR_SOURCE_LOOKUP,
SENSOR_INFO,
TRANSLATE_SOURCE,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the OpenTherm Gateway sensors."""
sensors = []
deprecated_sensors = []
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][config_entry.data[CONF_ID]]
ent_reg = await async_get_registry(hass)
for var, info in SENSOR_INFO.items():
device_class = info[0]
unit = info[1]
friendly_name_format = info[2]
status_sources = info[3]
for source in status_sources:
sensors.append(
OpenThermSensor(
gw_dev,
var,
source,
device_class,
unit,
friendly_name_format,
)
)
old_style_entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, f"{var}_{gw_dev.gw_id}", hass=gw_dev.hass
)
old_ent = ent_reg.async_get(old_style_entity_id)
if old_ent and old_ent.config_entry_id == config_entry.entry_id:
if old_ent.disabled:
ent_reg.async_remove(old_style_entity_id)
else:
deprecated_sensors.append(
DeprecatedOpenThermSensor(
gw_dev,
var,
device_class,
unit,
friendly_name_format,
)
)
sensors.extend(deprecated_sensors)
if deprecated_sensors:
_LOGGER.warning(
"The following sensor entities are deprecated and may no "
"longer behave as expected. They will be removed in a future "
"version. You can force removal of these entities by disabling "
"them and restarting Home Assistant.\n%s",
pformat([s.entity_id for s in deprecated_sensors]),
)
async_add_entities(sensors)
class OpenThermSensor(Entity):
"""Representation of an OpenTherm Gateway sensor."""
def __init__(self, gw_dev, var, source, device_class, unit, friendly_name_format):
"""Initialize the OpenTherm Gateway sensor."""
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, f"{var}_{source}_{gw_dev.gw_id}", hass=gw_dev.hass
)
self._gateway = gw_dev
self._var = var
self._source = source
self._value = None
self._device_class = device_class
self._unit = unit
if TRANSLATE_SOURCE[source] is not None:
friendly_name_format = (
f"{friendly_name_format} ({TRANSLATE_SOURCE[source]})"
)
self._friendly_name = friendly_name_format.format(gw_dev.name)
self._unsub_updates = None
async def async_added_to_hass(self):
"""Subscribe to updates from the component."""
_LOGGER.debug("Added OpenTherm Gateway sensor %s", self._friendly_name)
self._unsub_updates = async_dispatcher_connect(
self.hass, self._gateway.update_signal, self.receive_report
)
async def async_will_remove_from_hass(self):
"""Unsubscribe from updates from the component."""
_LOGGER.debug("Removing OpenTherm Gateway sensor %s", self._friendly_name)
self._unsub_updates()
@property
def available(self):
"""Return availability of the sensor."""
return self._value is not None
@property
def entity_registry_enabled_default(self):
"""Disable sensors by default."""
return False
@callback
def receive_report(self, status):
"""Handle status updates from the component."""
value = status[self._source].get(self._var)
if isinstance(value, float):
value = f"{value:2.1f}"
self._value = value
self.async_write_ha_state()
@property
def name(self):
"""Return the friendly name of the sensor."""
return self._friendly_name
@property
def device_info(self):
"""Return device info."""
return {
"identifiers": {(DOMAIN, self._gateway.gw_id)},
"name": self._gateway.name,
"manufacturer": "Schelte Bron",
"model": "OpenTherm Gateway",
"sw_version": self._gateway.gw_version,
}
@property
def unique_id(self):
"""Return a unique ID."""
return f"{self._gateway.gw_id}-{self._source}-{self._var}"
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def state(self):
"""Return the state of the device."""
return self._value
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
@property
def should_poll(self):
"""Return False because entity pushes its state."""
return False
class DeprecatedOpenThermSensor(OpenThermSensor):
"""Represent a deprecated OpenTherm Gateway Sensor."""
# pylint: disable=super-init-not-called
def __init__(self, gw_dev, var, device_class, unit, friendly_name_format):
"""Initialize the OpenTherm Gateway sensor."""
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, f"{var}_{gw_dev.gw_id}", hass=gw_dev.hass
)
self._gateway = gw_dev
self._var = var
self._source = DEPRECATED_SENSOR_SOURCE_LOOKUP[var]
self._value = None
self._device_class = device_class
self._unit = unit
self._friendly_name = friendly_name_format.format(gw_dev.name)
self._unsub_updates = None
@property
def unique_id(self):
"""Return a unique ID."""
return f"{self._gateway.gw_id}-{self._var}"
| 32.872449 | 86 | 0.625485 |
4a20aadc2e223c042e4faf492ce59ccaae0cddfd | 1,881 | py | Python | lib/_included_packages/plexnet/mediachoice.py | aleenator/plex-for-kodi | e6610e42ce1afd115cf59632b949e18597625323 | [
"BSD-3-Clause"
] | 233 | 2016-12-01T10:00:33.000Z | 2022-03-26T21:43:47.000Z | lib/_included_packages/plexnet/mediachoice.py | aleenator/plex-for-kodi | e6610e42ce1afd115cf59632b949e18597625323 | [
"BSD-3-Clause"
] | 327 | 2016-11-30T18:27:32.000Z | 2022-03-13T20:49:27.000Z | lib/_included_packages/plexnet/mediachoice.py | aleenator/plex-for-kodi | e6610e42ce1afd115cf59632b949e18597625323 | [
"BSD-3-Clause"
] | 140 | 2016-12-01T18:41:24.000Z | 2022-03-08T17:08:44.000Z | from __future__ import absolute_import
from . import plexstream
from . import util
class MediaChoice(object):
SUBTITLES_DEFAULT = 0
SUBTITLES_BURN = 1
SUBTITLES_SOFT_DP = 2
SUBTITLES_SOFT_ANY = 3
def __init__(self, media=None, partIndex=0):
self.media = media
self.part = None
self.forceTranscode = False
self.isDirectPlayable = False
self.videoStream = None
self.audioStream = None
self.subtitleStream = None
self.isSelected = False
self.subtitleDecision = self.SUBTITLES_DEFAULT
self.sorts = util.AttributeDict()
if media:
self.indirectHeaders = media.indirectHeaders
self.part = media.parts[partIndex]
if self.part:
# We generally just rely on PMS to have told us selected streams, so
# initialize our streams accordingly.
self.videoStream = self.part.getSelectedStreamOfType(plexstream.PlexStream.TYPE_VIDEO)
self.audioStream = self.part.getSelectedStreamOfType(plexstream.PlexStream.TYPE_AUDIO)
self.subtitleStream = self.part.getSelectedStreamOfType(plexstream.PlexStream.TYPE_SUBTITLE)
else:
util.WARN_LOG("Media does not contain a valid part")
util.LOG("Choice media: {0} part:{1}".format(media, partIndex))
for streamType in ("videoStream", "audioStream", "subtitleStream"):
attr = getattr(self, streamType)
if attr:
util.LOG("Choice {0}: {1}".format(streamType, repr(attr)))
else:
util.WARN_LOG("Could not create media choice for invalid media")
def __str__(self):
return "direct playable={0} version={1}".format(self.isDirectPlayable, self.media)
def __repr__(self):
return self.__str__()
| 36.882353 | 108 | 0.633705 |
4a20ab1ebbc9ccce112835135639f5a6355837db | 655 | py | Python | foodData/processFile.py | IBM/mpnn | 218697020de8ecb48b908cc1ed58c4633de398a1 | [
"Apache-2.0"
] | 1 | 2020-08-01T15:52:17.000Z | 2020-08-01T15:52:17.000Z | foodData/processFile.py | IBM/mpnn | 218697020de8ecb48b908cc1ed58c4633de398a1 | [
"Apache-2.0"
] | 6 | 2019-12-16T21:59:48.000Z | 2022-02-10T00:13:21.000Z | foodData/processFile.py | IBM/mpnn | 218697020de8ecb48b908cc1ed58c4633de398a1 | [
"Apache-2.0"
] | 6 | 2019-11-02T05:48:42.000Z | 2021-09-03T03:38:27.000Z | import numpy as np
with open("./foodData/doinfer.txt") as f:
data=f.readlines()
seq=[]
ang1=[]
ang2=[]
for i in range(len(data)):
if(i%3==0):
line=np.asarray(list(data[i].strip()))
seq.append(line)
elif(i%3==1):
ang1s=np.asarray(data[i].strip().split(" "))
ang1.append(ang1s[np.where(ang1s!='')])
else:
ang2s=np.asarray(data[i].strip().split(" "))
ang2.append(ang2s[np.where(ang2s!='')])
#
# res=np.asarray(res)
np.save("doinfer.npy", np.asarray(seq))
np.save("doinfer_ang1_tgt.npy", np.asarray(ang1))
np.save("doinfer_ang2_tgt.npy", np.asarray(ang2))
print(seq)
print(ang1)
print(ang2) | 22.586207 | 52 | 0.610687 |
4a20ab49c67c68a77a93bb079909eaa8f1336f63 | 1,448 | py | Python | runtests.py | Frojd/Fabrik | 9f2edbba97a7fd236b72a9b3010f6e912ab5c001 | [
"MIT"
] | 12 | 2015-11-03T20:41:29.000Z | 2019-02-15T17:13:27.000Z | runtests.py | Frojd/Fabrik | 9f2edbba97a7fd236b72a9b3010f6e912ab5c001 | [
"MIT"
] | 35 | 2015-08-23T17:10:00.000Z | 2017-05-10T12:08:57.000Z | runtests.py | Frojd/Fabrik | 9f2edbba97a7fd236b72a9b3010f6e912ab5c001 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
import sys
from fabric.state import env
from fabric.state import output
test_suite = [
"tests.test_api",
"tests.test_ext",
"tests.test_recipes",
"tests.test_cli",
]
def prepare_test():
import logging
from fabrik.logger import logger
# Mute fabric
output["status"] = False
output["aborts"] = False
output["warnings"] = False
output["running"] = False
output["stdout"] = False
output["stderr"] = False
output["exceptions"] = False
output["debug"] = False
# Raise exceptions on errors
env.raise_errors = True
# Disable fabrik logging
logger.setLevel(logging.CRITICAL)
def runtests(test_modules=None):
prepare_test()
# List of modules to test
if (not test_modules):
test_modules = test_suite
# Construct and run test suite
suite = unittest.TestSuite()
for t in test_modules:
try:
mod = __import__(t, globals(), locals(), ["suite"])
suitefn = getattr(mod, "suite")
suite.addTest(suitefn())
except (ImportError, AttributeError):
suite.addTest(unittest.defaultTestLoader.loadTestsFromName(t))
result = unittest.TextTestRunner().run(suite)
sys.exit(not result.wasSuccessful())
if __name__ == "__main__":
test_modules = None
if len(sys.argv) > 1:
test_modules = [sys.argv[1]]
runtests(test_modules)
| 21.61194 | 74 | 0.638122 |
4a20abda3b0a642c283108bad2c1a485b3c3c4ec | 1,212 | py | Python | iamheadless_publisher_site_homepages/pydantic_models.py | plain-ie/iamheadless_publisher_site_homepages | f7263de520c895664b508702495b34aac4aed8ae | [
"MIT"
] | null | null | null | iamheadless_publisher_site_homepages/pydantic_models.py | plain-ie/iamheadless_publisher_site_homepages | f7263de520c895664b508702495b34aac4aed8ae | [
"MIT"
] | null | null | null | iamheadless_publisher_site_homepages/pydantic_models.py | plain-ie/iamheadless_publisher_site_homepages | f7263de520c895664b508702495b34aac4aed8ae | [
"MIT"
] | null | null | null | from typing import List, Optional
from django.shortcuts import reverse
from iamheadless_publisher_site.pydantic_models import BaseItemContentsPydanticModel, BaseItemDataPydanticModel, BaseItemPydanticModel
from .conf import settings
from .urls import urlpatterns
class HomepageContentPydanticModel(BaseItemContentsPydanticModel):
title: str
language: str
content: Optional[str]
seo_keywords: Optional[str]
seo_description: Optional[str]
class HomepageDataPydanticModel(BaseItemDataPydanticModel):
contents: List[HomepageContentPydanticModel]
class HomepagePydanticModel(BaseItemPydanticModel):
_content_model = HomepageContentPydanticModel
_data_model = HomepageDataPydanticModel
_display_name_plural = 'homepages'
_display_name_singular = 'homepage'
_item_type = 'homepage'
_searchable = False
_browsable = True
_urlpatterns = urlpatterns
data: HomepageDataPydanticModel
def get_item_url(self, language):
return reverse(
settings.URLNAME_HOMEPAGE,
kwargs={
'language': language
}
)
@property
def CONTENTS(self):
return self.dict()['data']['contents']
| 26.347826 | 134 | 0.738449 |
4a20ac5c60790cf00cf9c5c037e3f3dc134dc263 | 5,264 | py | Python | ask-sdk-model/ask_sdk_model/events/skillevents/permission_accepted_request.py | nikhilym/alexa-apis-for-python | be6b31529dba4c9b4db7d5e5b7cb509761b0c58d | [
"Apache-2.0"
] | null | null | null | ask-sdk-model/ask_sdk_model/events/skillevents/permission_accepted_request.py | nikhilym/alexa-apis-for-python | be6b31529dba4c9b4db7d5e5b7cb509761b0c58d | [
"Apache-2.0"
] | null | null | null | ask-sdk-model/ask_sdk_model/events/skillevents/permission_accepted_request.py | nikhilym/alexa-apis-for-python | be6b31529dba4c9b4db7d5e5b7cb509761b0c58d | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_sdk_model.request import Request
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
from ask_sdk_model.events.skillevents.permission_body import PermissionBody
class PermissionAcceptedRequest(Request):
"""
:param request_id: Represents the unique identifier for the specific request.
:type request_id: (optional) str
:param timestamp: Provides the date and time when Alexa sent the request as an ISO 8601 formatted string. Used to verify the request when hosting your skill as a web service.
:type timestamp: (optional) datetime
:param body:
:type body: (optional) ask_sdk_model.events.skillevents.permission_body.PermissionBody
:param event_creation_time:
:type event_creation_time: (optional) datetime
:param event_publishing_time:
:type event_publishing_time: (optional) datetime
"""
deserialized_types = {
'object_type': 'str',
'request_id': 'str',
'timestamp': 'datetime',
'body': 'ask_sdk_model.events.skillevents.permission_body.PermissionBody',
'event_creation_time': 'datetime',
'event_publishing_time': 'datetime'
}
attribute_map = {
'object_type': 'type',
'request_id': 'requestId',
'timestamp': 'timestamp',
'body': 'body',
'event_creation_time': 'eventCreationTime',
'event_publishing_time': 'eventPublishingTime'
}
def __init__(self, request_id=None, timestamp=None, body=None, event_creation_time=None, event_publishing_time=None):
# type: (Optional[str], Optional[datetime], Optional[PermissionBody], Optional[datetime], Optional[datetime]) -> None
"""
:param request_id: Represents the unique identifier for the specific request.
:type request_id: (optional) str
:param timestamp: Provides the date and time when Alexa sent the request as an ISO 8601 formatted string. Used to verify the request when hosting your skill as a web service.
:type timestamp: (optional) datetime
:param body:
:type body: (optional) ask_sdk_model.events.skillevents.permission_body.PermissionBody
:param event_creation_time:
:type event_creation_time: (optional) datetime
:param event_publishing_time:
:type event_publishing_time: (optional) datetime
"""
self.__discriminator_value = "AlexaSkillEvent.SkillPermissionAccepted"
self.object_type = self.__discriminator_value
super(PermissionAcceptedRequest, self).__init__(object_type=self.__discriminator_value, request_id=request_id, timestamp=timestamp)
self.body = body
self.event_creation_time = event_creation_time
self.event_publishing_time = event_publishing_time
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, PermissionAcceptedRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 38.144928 | 182 | 0.644377 |
4a20aed61d0204e37a37c7f23b6baeb5000eb8a9 | 4,663 | py | Python | google/ads/google_ads/v6/proto/enums/user_list_prepopulation_status_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v6/proto/enums/user_list_prepopulation_status_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v6/proto/enums/user_list_prepopulation_status_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v6/enums/user_list_prepopulation_status.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v6/enums/user_list_prepopulation_status.proto',
package='google.ads.googleads.v6.enums',
syntax='proto3',
serialized_options=b'\n!com.google.ads.googleads.v6.enumsB UserListPrepopulationStatusProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v6/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V6.Enums\312\002\035Google\\Ads\\GoogleAds\\V6\\Enums\352\002!Google::Ads::GoogleAds::V6::Enums',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\nBgoogle/ads/googleads/v6/enums/user_list_prepopulation_status.proto\x12\x1dgoogle.ads.googleads.v6.enums\x1a\x1cgoogle/api/annotations.proto\"\x87\x01\n\x1fUserListPrepopulationStatusEnum\"d\n\x1bUserListPrepopulationStatus\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\r\n\tREQUESTED\x10\x02\x12\x0c\n\x08\x46INISHED\x10\x03\x12\n\n\x06\x46\x41ILED\x10\x04\x42\xf5\x01\n!com.google.ads.googleads.v6.enumsB UserListPrepopulationStatusProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v6/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V6.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V6\\Enums\xea\x02!Google::Ads::GoogleAds::V6::Enumsb\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_USERLISTPREPOPULATIONSTATUSENUM_USERLISTPREPOPULATIONSTATUS = _descriptor.EnumDescriptor(
name='UserListPrepopulationStatus',
full_name='google.ads.googleads.v6.enums.UserListPrepopulationStatusEnum.UserListPrepopulationStatus',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REQUESTED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FINISHED', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FAILED', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=167,
serialized_end=267,
)
_sym_db.RegisterEnumDescriptor(_USERLISTPREPOPULATIONSTATUSENUM_USERLISTPREPOPULATIONSTATUS)
_USERLISTPREPOPULATIONSTATUSENUM = _descriptor.Descriptor(
name='UserListPrepopulationStatusEnum',
full_name='google.ads.googleads.v6.enums.UserListPrepopulationStatusEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_USERLISTPREPOPULATIONSTATUSENUM_USERLISTPREPOPULATIONSTATUS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=132,
serialized_end=267,
)
_USERLISTPREPOPULATIONSTATUSENUM_USERLISTPREPOPULATIONSTATUS.containing_type = _USERLISTPREPOPULATIONSTATUSENUM
DESCRIPTOR.message_types_by_name['UserListPrepopulationStatusEnum'] = _USERLISTPREPOPULATIONSTATUSENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UserListPrepopulationStatusEnum = _reflection.GeneratedProtocolMessageType('UserListPrepopulationStatusEnum', (_message.Message,), {
'DESCRIPTOR' : _USERLISTPREPOPULATIONSTATUSENUM,
'__module__' : 'google.ads.googleads.v6.enums.user_list_prepopulation_status_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.enums.UserListPrepopulationStatusEnum)
})
_sym_db.RegisterMessage(UserListPrepopulationStatusEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 42.779817 | 707 | 0.802702 |
4a20afc19233a7e37a9e29715e16e35c544240a4 | 2,958 | py | Python | azure-mgmt-hdinsight/azure/mgmt/hdinsight/models/cluster_create_properties_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-hdinsight/azure/mgmt/hdinsight/models/cluster_create_properties_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-hdinsight/azure/mgmt/hdinsight/models/cluster_create_properties_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ClusterCreateProperties(Model):
"""The cluster create parameters.
:param cluster_version: The version of the cluster.
:type cluster_version: str
:param os_type: The type of operating system. Possible values include:
'Windows', 'Linux'
:type os_type: str or ~azure.mgmt.hdinsight.models.OSType
:param tier: The cluster tier. Possible values include: 'Standard',
'Premium'
:type tier: str or ~azure.mgmt.hdinsight.models.Tier
:param cluster_definition: The cluster definition.
:type cluster_definition: ~azure.mgmt.hdinsight.models.ClusterDefinition
:param security_profile: The security profile.
:type security_profile: ~azure.mgmt.hdinsight.models.SecurityProfile
:param compute_profile: The compute profile.
:type compute_profile: ~azure.mgmt.hdinsight.models.ComputeProfile
:param storage_profile: The storage profile.
:type storage_profile: ~azure.mgmt.hdinsight.models.StorageProfile
:param disk_encryption_properties: The disk encryption properties.
:type disk_encryption_properties:
~azure.mgmt.hdinsight.models.DiskEncryptionProperties
"""
_attribute_map = {
'cluster_version': {'key': 'clusterVersion', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'OSType'},
'tier': {'key': 'tier', 'type': 'Tier'},
'cluster_definition': {'key': 'clusterDefinition', 'type': 'ClusterDefinition'},
'security_profile': {'key': 'securityProfile', 'type': 'SecurityProfile'},
'compute_profile': {'key': 'computeProfile', 'type': 'ComputeProfile'},
'storage_profile': {'key': 'storageProfile', 'type': 'StorageProfile'},
'disk_encryption_properties': {'key': 'diskEncryptionProperties', 'type': 'DiskEncryptionProperties'},
}
def __init__(self, *, cluster_version: str=None, os_type=None, tier=None, cluster_definition=None, security_profile=None, compute_profile=None, storage_profile=None, disk_encryption_properties=None, **kwargs) -> None:
super(ClusterCreateProperties, self).__init__(**kwargs)
self.cluster_version = cluster_version
self.os_type = os_type
self.tier = tier
self.cluster_definition = cluster_definition
self.security_profile = security_profile
self.compute_profile = compute_profile
self.storage_profile = storage_profile
self.disk_encryption_properties = disk_encryption_properties
| 49.3 | 221 | 0.683908 |
4a20afe462651150f3ec887cb82a129364f3bfc5 | 3,433 | py | Python | autotabular/pipeline/components/data_preprocessing/label_encoder/label_encoder.py | jianzhnie/AutoTabular | fb407300adf97532a26d33f7442d2a606fa30512 | [
"Apache-2.0"
] | 48 | 2021-09-06T08:09:26.000Z | 2022-03-28T13:02:54.000Z | autotabular/pipeline/components/data_preprocessing/label_encoder/label_encoder.py | Fanxingye/Autotabular | d630c78290a52f8c73885afb16884e18135c34f6 | [
"Apache-2.0"
] | null | null | null | autotabular/pipeline/components/data_preprocessing/label_encoder/label_encoder.py | Fanxingye/Autotabular | d630c78290a52f8c73885afb16884e18135c34f6 | [
"Apache-2.0"
] | 7 | 2021-09-23T07:28:46.000Z | 2021-10-02T21:15:18.000Z | from decimal import Decimal
from typing import Dict, Optional, Tuple, Union
import numpy as np
from autotabular.pipeline.base import DATASET_PROPERTIES_TYPE, PIPELINE_DATA_DTYPE
from autotabular.pipeline.components.base import AutotabularPreprocessingAlgorithm
from autotabular.pipeline.constants import DENSE, INPUT, SPARSE, UNSIGNED_DATA
from ConfigSpace.configuration_space import ConfigurationSpace
class LabelEncoder(object):
def __init__(self, try_to_fit_numeric=False):
from sklearn.preprocessing import LabelEncoder
self.lbl = LabelEncoder()
self._try_to_fit_numeric = try_to_fit_numeric
def fit(self, x):
self.lbl.fit(x) # list(x.values))
if self._try_to_fit_numeric:
try:
arr = {Decimal(c): c for c in self.lbl.classes_}
sorted_arr = dict(sorted(arr.items()))
self.lbl.classes_ = np.array(
list(sorted_arr.values()), dtype=self.lbl.classes_.dtype)
except Exception as e:
print(e)
pass
def transform(self, x):
try:
return self.lbl.transform(x) # list(x.values))
except ValueError as ve:
print(ve)
# rescue
classes = np.unique(x) # list(x.values))
diff = np.setdiff1d(classes, self.lbl.classes_)
self.lbl.classes_ = np.concatenate((self.lbl.classes_, diff))
return self.lbl.transform(x) # list(x.values))
def inverse_transform(self, x):
return self.lbl.inverse_transform(x) # (list(x.values))
class LabelEncoderTransformer(AutotabularPreprocessingAlgorithm):
def __init__(
self,
random_state: Optional[np.random.RandomState] = None,
):
self.random_state = random_state
def fit(
self,
X: Optional[PIPELINE_DATA_DTYPE] = None,
y: PIPELINE_DATA_DTYPE = None,
) -> 'LabelEncoderTransformer':
self.preprocessor = LabelEncoder(try_to_fit_numeric=False)
self.preprocessor.fit(y)
return self
def transform(self, X: PIPELINE_DATA_DTYPE) -> PIPELINE_DATA_DTYPE:
if self.preprocessor is None:
raise NotImplementedError()
# Notice we are shifting the unseen categories during fit to 1
# from -1, 0, ... to 0,..., cat + 1
# This is done because Category shift requires non negative integers
# Consider removing this if that step is removed
return self.preprocessor.transform(X)
@staticmethod
def get_properties(
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None
) -> Dict[str, Optional[Union[str, int, bool, Tuple]]]:
return {
'shortname': 'LabelEncoderTransformer',
'name': 'LabelEncoder Transformer',
'handles_regression': False,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'handles_multioutput': True,
# TODO find out of this is right!
'handles_sparse': True,
'handles_dense': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (INPUT, ),
}
@staticmethod
def get_hyperparameter_search_space(
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
) -> ConfigurationSpace:
return ConfigurationSpace()
| 36.136842 | 82 | 0.635304 |
4a20b1067f827fadd75ce7cab3b1fe45ddca37fd | 634 | py | Python | app/config.py | Berthedusabeyezu/News-Highlight | cd4af3edb031a5bf1013684058434e05d07f5c99 | [
"MIT"
] | null | null | null | app/config.py | Berthedusabeyezu/News-Highlight | cd4af3edb031a5bf1013684058434e05d07f5c99 | [
"MIT"
] | null | null | null | app/config.py | Berthedusabeyezu/News-Highlight | cd4af3edb031a5bf1013684058434e05d07f5c99 | [
"MIT"
] | null | null | null | class Config:
'''
General configuration parent class
'''
NEWS_API_BASE_URL ='https://newsapi.org/v2/sources?category={}&apiKey={}'
ARTICLES_API_BASE_URL = 'https://newsapi.org/v2/everything?sources={}&apiKey={}'
class ProdConfig(Config):
'''
Production configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
pass
class DevConfig(Config):
'''
Development configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
DEBUG = True
| 21.133333 | 84 | 0.657729 |
4a20b37c735c455139512fa2d7c7f96b1d836065 | 206 | py | Python | services/catalog/src/simcore_service_catalog/db/repositories/_base.py | elisabettai/osparc-simcore | ad7b6e05111b50fe95e49306a992170490a7247f | [
"MIT"
] | null | null | null | services/catalog/src/simcore_service_catalog/db/repositories/_base.py | elisabettai/osparc-simcore | ad7b6e05111b50fe95e49306a992170490a7247f | [
"MIT"
] | 1 | 2021-11-29T13:38:09.000Z | 2021-11-29T13:38:09.000Z | services/catalog/src/simcore_service_catalog/db/repositories/_base.py | mrnicegyu11/osparc-simcore | b6fa6c245dbfbc18cc74a387111a52de9b05d1f4 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from sqlalchemy.ext.asyncio import AsyncEngine
@dataclass
class BaseRepository:
"""
Repositories are pulled at every request
"""
db_engine: AsyncEngine
| 15.846154 | 46 | 0.747573 |
4a20b490512902e94929b357933dbd9ce1abcdee | 2,154 | py | Python | trainer.py | vicioux/mobile-semantic-segmentation | 4879e76b274c90323960996c42d8aec916e44ee2 | [
"MIT"
] | 34 | 2019-01-21T14:21:06.000Z | 2022-03-23T12:27:35.000Z | trainer.py | tsingjinyun/mobile-semantic-segmentation | 4879e76b274c90323960996c42d8aec916e44ee2 | [
"MIT"
] | 2 | 2019-10-26T09:19:42.000Z | 2021-07-18T21:59:26.000Z | trainer.py | tsingjinyun/mobile-semantic-segmentation | 4879e76b274c90323960996c42d8aec916e44ee2 | [
"MIT"
] | 8 | 2019-06-12T07:58:58.000Z | 2020-11-06T02:29:56.000Z | import pandas as pd
import torch
class Trainer:
def __init__(self, data_loaders, criterion, device, on_after_epoch=None):
self.data_loaders = data_loaders
self.criterion = criterion
self.device = device
self.history = []
self.on_after_epoch = on_after_epoch
def train(self, model, optimizer, num_epochs):
for epoch in range(num_epochs):
train_epoch_loss = self._train_on_epoch(model, optimizer)
val_epoch_loss = self._val_on_epoch(model, optimizer)
hist = {
'epoch': epoch,
'train_loss': train_epoch_loss,
'val_loss': val_epoch_loss,
}
self.history.append(hist)
if self.on_after_epoch is not None:
self.on_after_epoch(model, pd.DataFrame(self.history))
return pd.DataFrame(self.history)
def _train_on_epoch(self, model, optimizer):
model.train()
data_loader = self.data_loaders[0]
running_loss = 0.0
for inputs, labels in data_loader:
inputs = inputs.to(self.device)
labels = labels.to(self.device)
optimizer.zero_grad()
with torch.set_grad_enabled(True):
outputs = model(inputs)
loss = self.criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
epoch_loss = running_loss / len(data_loader.dataset)
return epoch_loss
def _val_on_epoch(self, model, optimizer):
model.eval()
data_loader = self.data_loaders[1]
running_loss = 0.0
for inputs, labels in data_loader:
inputs = inputs.to(self.device)
labels = labels.to(self.device)
optimizer.zero_grad()
with torch.set_grad_enabled(False):
outputs = model(inputs)
loss = self.criterion(outputs, labels)
running_loss += loss.item() * inputs.size(0)
epoch_loss = running_loss / len(data_loader.dataset)
return epoch_loss
| 29.506849 | 77 | 0.589136 |
4a20b4b4ccfbcfefb214f689ab9338ce7e4531b7 | 527 | py | Python | env/lib/python3.8/site-packages/plotly/validators/splom/hoverlabel/_bordercolor.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | env/lib/python3.8/site-packages/plotly/validators/splom/hoverlabel/_bordercolor.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | env/lib/python3.8/site-packages/plotly/validators/splom/hoverlabel/_bordercolor.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bordercolor", parent_name="splom.hoverlabel", **kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "style"),
**kwargs
)
| 32.9375 | 81 | 0.639469 |
4a20b61b2a5c91e76a0264fb45c79c023867c30b | 2,440 | py | Python | apps/project/ActuatorAdaptor.py | GANESHRAMKANAKASABAI1996/CONNECTED-DEVICES-_PYTHON | da231c97bacbc3aa26096b1f94ee9201466678ca | [
"CNRI-Python"
] | null | null | null | apps/project/ActuatorAdaptor.py | GANESHRAMKANAKASABAI1996/CONNECTED-DEVICES-_PYTHON | da231c97bacbc3aa26096b1f94ee9201466678ca | [
"CNRI-Python"
] | null | null | null | apps/project/ActuatorAdaptor.py | GANESHRAMKANAKASABAI1996/CONNECTED-DEVICES-_PYTHON | da231c97bacbc3aa26096b1f94ee9201466678ca | [
"CNRI-Python"
] | null | null | null | '''
Created on Jan 26, 2019
ActuatorAdaptor.py : class to process actuator message and set message to SenseHat
@author: GANESHRAM KANAKASABAI
'''
from project import ActuatorData
from project import SenseHatLedActivator
from project import SmtpClientConnector
class ActuatorAdaptor(object):
'''
ActuatorAdaptor - class to process actuator message, update it and set message to SenseHat
@variable ActuatorData: instance of ActuatorData class
@variable SenseHatLedActivator: instance of SenseHatLedActivator class
'''
ActuatorData = None
SenseHatLedActivator = None
def __init__(self):
'''
ActuatorAdaptor Constructor
'''
self.ActuatorData = ActuatorData.ActuatorData()
self.connector = SmtpClientConnector.SmtpClientConnector()
self.SenseHatLedActivator = SenseHatLedActivator.SenseHatLedActivator()
self.SenseHatLedActivator.setEnableLedFlag(True) #Enable the SenseHatLedActivator Thread
self.SenseHatLedActivator.start() #Start the SenseHatLedActivator Thread
def processMessage(self, key, pActuatorData):
'''
function to process ActuatorData, update it and set message to SenseHat
@param pActuatorData: ActuatorData that needs to be processed
'''
if (key != None):
if(self.ActuatorData != pActuatorData):
if (key == "tempact"):
self.SenseHatLedActivator.setDisplayMessage('temperature set to ' + str(pActuatorData.temperatureactuator) + ' ˚C ');
self.connector.publishMessage('Alert ! Temperature range breached' , 'setting temperature to' + str(abs(pActuatorData.temperatureactuator)) + ' ˚C ')
elif(key == "preact"):
self.SenseHatLedActivator.setDisplayMessage('pressure set to ' + str(pActuatorData.pressureactuator) + ' Pa ');
self.connector.publishMessage('Alert ! Pressure range breached' , 'setting pressure to ' + str(abs(pActuatorData.pressureactuator)) + ' Pa ')
elif(key == "humidact"):
self.SenseHatLedActivator.setDisplayMessage('humidity set to ' + str(pActuatorData.humidityactuator) + ' RH ');
self.connector.publishMessage('Alert ! Humidity range breached', 'setting humidity to ' + str(abs(pActuatorData.humidityactuator)) + ' RH ')
self.ActuatorData.updateData(pActuatorData)
| 49.795918 | 169 | 0.684426 |
4a20b6c4205121c200c7e1aab9edb4541d97b347 | 726 | py | Python | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/EXT/stencil_two_side.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/EXT/stencil_two_side.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/EXT/stencil_two_side.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_EXT_stencil_two_side'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_EXT_stencil_two_side',error_checker=_errors._error_checker)
GL_ACTIVE_STENCIL_FACE_EXT=_C('GL_ACTIVE_STENCIL_FACE_EXT',0x8911)
GL_STENCIL_TEST_TWO_SIDE_EXT=_C('GL_STENCIL_TEST_TWO_SIDE_EXT',0x8910)
@_f
@_p.types(None,_cs.GLenum)
def glActiveStencilFaceEXT(face):pass
| 38.210526 | 118 | 0.796143 |
4a20b6dd7221700126c11c2c26569db6c53f35dc | 34,351 | py | Python | baselines/policy/bounded_mlp_policy.py | WolfLo/OPTIMIST | b6b49f34e53b3f626ae764bb1e9672fcabb54d19 | [
"MIT"
] | 5 | 2019-06-17T13:15:56.000Z | 2019-10-16T04:38:00.000Z | baselines/policy/bounded_mlp_policy.py | WolfLo/OPTIMIST | b6b49f34e53b3f626ae764bb1e9672fcabb54d19 | [
"MIT"
] | null | null | null | baselines/policy/bounded_mlp_policy.py | WolfLo/OPTIMIST | b6b49f34e53b3f626ae764bb1e9672fcabb54d19 | [
"MIT"
] | 2 | 2019-08-11T22:38:21.000Z | 2019-11-04T15:25:32.000Z | from baselines.common.mpi_running_mean_std import RunningMeanStd
import baselines.common.tf_util as U
import tensorflow as tf
import gym
from baselines.common import set_global_seeds
from baselines.common.distributions import make_pdtype
import numpy as np
import scipy.stats as sts
#import time
class MlpPolicyBounded(object):
"""Gaussian policy with critic, based on multi-layer perceptron"""
recurrent = False
def __init__(self, name, *args, **kwargs):
# with tf.device('/cpu:0'):
with tf.variable_scope(name):
self._init(*args, **kwargs)
U.initialize()
self.scope = tf.get_variable_scope().name
self._prepare_getsetters()
def _init(self, ob_space, ac_space, hid_size, num_hid_layers,
max_mean=None, min_mean=None, max_std=None, min_std=None,
gaussian_fixed_var=True, trainable_std=True, use_bias=True,
use_critic=True, seed=None,
hidden_W_init=U.normc_initializer(1.0),
std_init=1, gain_init=None):
"""Params:
ob_space: task observation space
ac_space : task action space
hid_size: width of hidden layers
num_hid_layers: depth
gaussian_fixed_var: True->separate parameter for logstd, False->two-headed mlp
use_bias: whether to include bias in neurons
use_critic: whether to learn a value predictor
seed: random seed
max_mean: maximum policy mean
max_std: maximum policy standard deviation
min_mean: minimum policy mean
min_std: minimum policy standard deviation
"""
# Check environment's shapes
assert isinstance(ob_space, gym.spaces.Box)
# Set hidden layers' size
if isinstance(hid_size, list):
num_hid_layers = len(hid_size)
else:
hid_size = [hid_size] * num_hid_layers
# Set seed
if seed is not None:
set_global_seeds(seed)
# Boundaries
# Default values
if max_mean is None:
max_mean = ob_space.high
if min_mean is None:
min_mean = ob_space.low
if min_std is None:
min_std = std_init/np.sqrt(2)
if max_std is None:
max_std = np.sqrt(2) * std_init
# Illegal values
if(max_mean <= min_mean):
raise ValueError("max_mean should be greater than min_mean!")
if(min_std <= 0):
raise ValueError("min_std should be greater than 0!")
if(max_std <= min_std):
raise ValueError("max_std should be greater than min_std!")
if(std_init > max_std or std_init < min_std):
raise ValueError("Initial std out of range!")
self.max_mean = max_mean
self.min_mean = min_mean
self.max_std = max_std
self.min_std = min_std
self.std_init = std_init
self.pdtype = pdtype = make_pdtype(ac_space)
sequence_length = None
ob = U.get_placeholder(name="ob", dtype=tf.float32,
shape=[sequence_length] + list(ob_space.shape))
with tf.variable_scope("obfilter"):
self.ob_rms = RunningMeanStd(shape=ob_space.shape)
# Critic
if use_critic:
with tf.variable_scope('vf'):
obz = tf.clip_by_value(
(ob - self.ob_rms.mean) / self.ob_rms.std, -5.0, 5.0)
last_out = obz
for i in range(num_hid_layers):
last_out = tf.nn.tanh(
tf.layers.dense(
last_out, hid_size[i],
name="fc%i" % (i+1),
kernel_initializer=hidden_W_init))
self.vpred = tf.layers.dense(
last_out, 1, name='final',
kernel_initializer=hidden_W_init)[:, 0]
# Actor
with tf.variable_scope('pol'):
obz = tf.clip_by_value(
(ob - self.ob_rms.mean) / self.ob_rms.std, -5.0, 5.0)
last_out = obz
for i in range(num_hid_layers):
last_out = tf.nn.tanh(
tf.layers.dense(last_out,
hid_size[i],
name='fc%i' % (i+1),
kernel_initializer=hidden_W_init,
use_bias=use_bias))
if gaussian_fixed_var and isinstance(ac_space, gym.spaces.Box):
# Bounded mean
mu_range = max_mean - min_mean
if gain_init is not None:
mean_initializer = tf.constant_initializer(
np.arctanh(2./mu_range
* (gain_init + mu_range/2. - max_mean)))
mean = mean = tf.nn.tanh(
tf.layers.dense(last_out, pdtype.param_shape()[0]//2,
kernel_initializer=mean_initializer,
use_bias=use_bias))
mean = mean * mu_range/2.
self.mean = mean = tf.add(mean,
- mu_range/2 + max_mean,
name='final')
# Bounded std
logstd_range = np.log(max_std) - np.log(min_std)
std_param_initializer = tf.constant_initializer(
np.arctanh(2./logstd_range * (np.log(std_init)
+ logstd_range/2.
- np.log(max_std))))
std_param = tf.get_variable(
name="std_param", shape=[1, pdtype.param_shape()[0]//2],
initializer=std_param_initializer,
trainable=trainable_std)
logstd = tf.nn.tanh(std_param)
logstd = logstd * logstd_range/2.
logstd = self.logstd = tf.add(logstd,
- logstd_range/2
+ np.log(max_std),
name="pol_logstd")
self.logstd = logstd
pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1)
else:
raise NotImplementedError
"""
pdparam = tf.layers.dense(last_out, pdtype.param_shape()[0],
name='final',
kernel_initializer=output_W_init)
"""
# Acting
self.pd = pdtype.pdfromflat(pdparam)
self.state_in = []
self.state_out = []
stochastic = tf.placeholder(dtype=tf.bool, shape=())
ac = U.switch(stochastic, self.pd.sample(), self.pd.mode())
if use_critic:
self._act = U.function([stochastic, ob], [ac, self.vpred])
else:
self._act = U.function([stochastic, ob], [ac, tf.zeros(1)])
# Evaluating
self.ob = ob
self.ac_in = U.get_placeholder(name="ac_in", dtype=ac_space.dtype,
shape=[sequence_length]
+ list(ac_space.shape))
self.gamma = U.get_placeholder(name="gamma", dtype=tf.float32,
shape=[])
self.rew = U.get_placeholder(name="rew", dtype=tf.float32,
shape=[sequence_length]+[1])
self.logprobs = self.pd.logp(self.ac_in) # [\log\pi(a|s)]
self._get_mean = U.function([ob], [self.mean])
self._get_std = U.function([], [tf.exp(self.logstd)])
self._get_stuff = U.function([ob], [obz, last_out, pdparam])
# Fisher
with tf.variable_scope('pol') as vs:
self.weights = weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, \
scope=vs.name)
self.flat_weights = flat_weights = tf.concat([tf.reshape(w, [-1]) for w in weights], axis=0)
self.n_weights = flat_weights.shape[0].value
self.score = score = U.flatgrad(self.logprobs, weights) # \nabla\log p(\tau)
self.fisher = tf.einsum('i,j->ij', score, score)
# Performance graph initializations
self._setting = []
# Acting
def act(self, stochastic, ob):
"""
Actions sampled from the policy
Params:
stochastic: use noise
ob: current state
"""
oneD = len(ob.shape) == 1
if oneD:
ob = ob[None]
ac1, vpred1 = self._act(stochastic, ob)
if oneD:
ac1, vpred1 = ac1[0], vpred1[0]
return ac1, vpred1
# Distribution parameters
def eval_mean(self, ob):
return self._get_mean(ob)[0]
def eval_std(self):
return self._get_std()[0]
def eval_stuff(self, ob):
return self._get_stuff(ob)
# Divergence
def eval_renyi(self, states, other, order=2):
"""Exponentiated Renyi divergence exp(Renyi(self, other)) for each state
Params:
states: flat list of states
other: other policy
order: order \alpha of the divergence
"""
if order<2:
raise NotImplementedError('Only order>=2 is currently supported')
to_check = order/tf.exp(self.logstd) + (1 - order)/tf.exp(other.logstd)
if not (U.function([self.ob],[to_check])(states)[0] > 0).all():
raise ValueError('Conditions on standard deviations are not met')
detSigma = tf.exp(tf.reduce_sum(self.logstd))
detOtherSigma = tf.exp(tf.reduce_sum(other.logstd))
mixSigma = order*tf.exp(self.logstd) + (1 - order) * tf.exp(other.logstd)
detMixSigma = tf.reduce_prod(mixSigma)
renyi = order/2 * (self.mean - other.mean)/mixSigma*(self.mean - other.mean) - \
1./(2*(order - 1))*(tf.log(detMixSigma) - (1-order)*tf.log(detSigma) - order*tf.log(detOtherSigma))
e_renyi = tf.exp(renyi)
fun = U.function([self.ob],[e_renyi])
return fun(states)[0]
# Performance evaluation
def eval_J(self, states, actions, rewards, lens_or_batch_size=1, horizon=None, gamma=.99, behavioral=None, per_decision=False,
normalize=False, truncate_at=np.infty):
"""
Performance evaluation, possibly off-policy
Params:
states, actions, rewards as lists, flat wrt time
lens_or_batch_size: list with episode lengths or scalar representing the number of (equally long) episodes
horizon: max task horizon
behavioral: policy used to collect (s, a, r) tuples
per_decision: whether to use Per-Decision IS in place of regular episodic IS
gamma: discount factor
Returns:
sample variance of episodic performance Var_J_hat,
"""
# Prepare data
batch_size, horizon, _states, _actions, _rewards, _mask = self._prepare_data(states, actions, rewards, lens_or_batch_size, horizon)
# Build performance evaluation graph (lazy)
assert horizon>0 and batch_size>0
self._build(batch_size, horizon, behavioral, per_decision, normalize, truncate_at)
# Evaluate performance stats
result = self._get_avg_J(_states, _actions, _rewards, gamma, _mask)[0]
return np.asscalar(result)
def eval_var_J(self, states, actions, rewards, lens_or_batch_size=1, horizon=None, gamma=.99,
behavioral=None, per_decision=False, normalize=False, truncate_at=np.infty):
"""
Performance variance evaluation, possibly off-policy
Params:
states, actions, rewards as lists, flat wrt time
lens_or_batch_size: list with episode lengths or scalar representing the number of (equally long) episodes
horizon: max task horizon
behavioral: policy used to collect (s, a, r) tuples
per_decision: whether to use Per-Decision IS in place of regular episodic IS
gamma: discount factor
Returns:
sample variance of episodic performance J_hat
"""
#Prepare data
batch_size, horizon, _states, _actions, _rewards, _mask = self._prepare_data(states, actions, rewards, lens_or_batch_size, horizon)
#Build performance evaluation graph (lazy)
assert horizon>0 and batch_size>0
self._build(batch_size, horizon, behavioral, per_decision, normalize, truncate_at)
#Evaluate performance stats
result = self._get_var_J(_states, _actions, _rewards, gamma, _mask)[0]
return np.asscalar(result)
def eval_iw_stats(self, states, actions, rewards, lens_or_batch_size=1, horizon=None, gamma=.99,
behavioral=None, per_decision=False, normalize=False, truncate_at=np.infty):
batch_size, horizon, _states, _actions, _rewards, _mask = (
self._prepare_data(states, actions, rewards, lens_or_batch_size, horizon))
self._build(batch_size, horizon, behavioral, per_decision, normalize, truncate_at)
results = self._get_iw_stats(_states, _actions, _rewards, gamma, _mask)
return tuple(map(np.asscalar, results))
def eval_ret_stats(self, states, actions, rewards, lens_or_batch_size=1, horizon=None, gamma=.99,
behavioral=None, per_decision=False, normalize=False, truncate_at=np.infty):
batch_size, horizon, _states, _actions, _rewards, _mask = self._prepare_data(states, actions, rewards, lens_or_batch_size, horizon)
self._build(batch_size, horizon, behavioral, per_decision, normalize, truncate_at)
results = self._get_ret_stats(_states, _actions, _rewards, gamma, _mask)
return tuple(map(np.asscalar, results))
def eval_grad_J(self, states, actions, rewards, lens_or_batch_size=1, horizon=None, gamma=.99,
behavioral=None, per_decision=False, normalize=False, truncate_at=np.infty):
"""
Gradients of performance
Params:
states, actions, rewards as lists, flat wrt time
lens_or_batch_size: list with episode lengths or scalar representing the number of (equally long) episodes
horizon: max task horizon
behavioral: policy used to collect (s, a, r) tuples
per_decision: whether to use Per-Decision IS in place of regular episodic IS
gamma: discount factor
Returns:
gradient of average episodic performance wrt actor weights,
"""
#Prepare data
batch_size, horizon, _states, _actions, _rewards, _mask = self._prepare_data(states, actions, rewards, lens_or_batch_size, horizon)
#Build performance evaluation graph (lazy)
assert batch_size>0
self._build(batch_size, horizon, behavioral, per_decision, normalize, truncate_at)
#Evaluate gradients
result = self._get_grad_J(_states, _actions, _rewards, gamma, _mask)[0]
return np.ravel(result)
def eval_grad_var_J(self, states, actions, rewards, lens_or_batch_size=1, horizon=None, gamma=.99,
behavioral=None, per_decision=False, normalize=False, truncate_at=np.infty):
"""
Gradients of performance stats
Params:
states, actions, rewards as lists, flat wrt time
lens_or_batch_size: list with episode lengths or scalar representing the number of (equally long) episodes
horizon: max task horizon
behavioral: policy used to collect (s, a, r) tuples
per_decision: whether to use Per-Decision IS in place of regular episodic IS
gamma: discount factor
Returns:
gradient of sample variance of episodic performance wrt actor weights
"""
#Prepare data
batch_size, horizon, _states, _actions, _rewards, _mask = self._prepare_data(states, actions, rewards, lens_or_batch_size, horizon)
#Build performance evaluation graph (lazy)
assert batch_size>0
self._build(batch_size, horizon, behavioral, per_decision, normalize, truncate_at)
#Evaluate gradients
result = self._get_grad_var_J(_states, _actions, _rewards, gamma, _mask)[0]
return np.ravel(result)
def eval_bound(self, states, actions, rewards, lens_or_batch_size=1, horizon=None, gamma=.99,
behavioral=None, per_decision=False, normalize=False,
truncate_at=np.infty, delta=0.2, use_ess=False):
"""
Student-t bound on performance
Params:
states, actions, rewards as lists, flat wrt time
lens_or_batch_size: list with episode lengths or scalar representing the number of (equally long) episodes
horizon: max task horizon
behavioral: policy used to collect (s, a, r) tuples
per_decision: whether to use Per-Decision IS in place of regular episodic IS
gamma: discount factor
delta: 1 - confidence
"""
#Prepare data
batch_size, horizon, _states, _actions, _rewards, _mask = self._prepare_data(states, actions, rewards, lens_or_batch_size, horizon)
#Build performance evaluation graph (lazy)
assert horizon>0 and batch_size>0
self._build(batch_size, horizon, behavioral, per_decision, normalize, truncate_at)
#Evaluate bound
N = self._get_ess(_states, _actions, _rewards, gamma, _mask)[0] if use_ess else batch_size
N = max(N, 2)
bound = self._avg_J - sts.t.ppf(1 - delta, N - 1) / np.sqrt(N) * tf.sqrt(self._var_J)
return np.asscalar(U.function([self.ob, self.ac_in, self.rew,
self.gamma, self.mask],[bound])(
_states,
_actions,
_rewards,
gamma,
_mask)[0])
def eval_grad_bound(self, states, actions, rewards, lens_or_batch_size=1, horizon=None, gamma=.99,
behavioral=None, per_decision=False, normalize=False,
truncate_at=np.infty, delta=.2, use_ess=False):
"""
Gradient of student-t bound
Params:
states, actions, rewards as lists, flat wrt time
lens_or_batch_size: list with episode lengths or scalar representing the number of (equally long) episodes
horizon: max task horizon
gamma: discount factor
behavioral: policy used to collect (s, a, r) tuples
per_decision: whether to use Per-Decision IS in place of regular episodic IS
normalize: whether to apply self-normalization
truncate_at: upper bound on importance weights (infinite by
default); ignored in case of self-normalization
delta: 1 - confidence
"""
#Prepare data
batch_size, horizon, _states, _actions, _rewards, _mask = self._prepare_data(states, actions, rewards, lens_or_batch_size, horizon)
#Build performance evaluation graph (lazy)
assert horizon>0 and batch_size>0
self._build(batch_size, horizon, behavioral, per_decision, normalize, truncate_at)
#Evaluate bound gradient
N = self._get_ess(_states, _actions, _rewards, gamma, _mask)[0] if use_ess else batch_size
N = max(N, 2)
bound = self._avg_J - sts.t.ppf(1 - delta, N - 1) / np.sqrt(N) * tf.sqrt(self._var_J)
grad_bound = U.flatgrad(bound, self.get_param())
return np.ravel(U.function([self.ob, self.ac_in, self.rew,
self.gamma, self.mask],[grad_bound])(
_states,
_actions,
_rewards,
gamma,
_mask)[0])
def _prepare_data(self, states, actions, rewards, lens_or_batch_size, horizon, do_pad=True, do_concat=True):
assert len(states) > 0
assert len(states)==len(actions)
if actions is not None:
assert len(actions)==len(states)
if type(lens_or_batch_size) is list:
lens = lens_or_batch_size
no_of_samples = sum(lens)
assert no_of_samples > 0
batch_size = len(lens)
if horizon is None:
horizon = max(lens)
assert all(np.array(lens) <= horizon)
else:
assert type(lens_or_batch_size) is int
batch_size = lens_or_batch_size
assert len(states)%batch_size == 0
if horizon is None:
horizon = len(states)/batch_size
no_of_samples = horizon * batch_size
lens = [horizon] * batch_size
mask = np.ones(no_of_samples) if do_pad else None
indexes = np.cumsum(lens)
to_resize = [states, actions, rewards, mask]
to_resize = [x for x in to_resize if x is not None]
resized = [batch_size, horizon]
for v in to_resize:
v = np.array(v[:no_of_samples])
if v.ndim == 1:
v = np.expand_dims(v, axis=1)
v = np.split(v, indexes, axis=0)
if do_pad:
padding_shapes = [tuple([horizon - m.shape[0]] + list(m.shape[1:])) for m in v if m.shape[0]>0]
paddings = [np.zeros(shape, dtype=np.float32) for shape in padding_shapes]
v = [np.concatenate((m, pad)) for (m, pad) in zip(v, paddings)]
if do_concat:
v = np.concatenate(v, axis=0)
resized.append(v)
return tuple(resized)
def _build(self, batch_size, horizon, behavioral, per_decision, normalize=False, truncate_at=np.infty):
if [batch_size, horizon, behavioral, per_decision, normalize,
truncate_at]!=self._setting:
#checkpoint = time.time()
self._setting = [batch_size, horizon, behavioral, per_decision,
normalize, truncate_at]
self.mask = tf.placeholder(name="mask", dtype=tf.float32, shape=[batch_size*horizon, 1])
rews_by_episode = tf.split(self.rew, batch_size)
rews_by_episode = tf.stack(rews_by_episode)
disc = self.gamma + 0*rews_by_episode
disc = tf.cumprod(disc, axis=1, exclusive=True)
disc_rews = rews_by_episode * disc
rets = tf.reduce_sum(disc_rews, axis=1)
if behavioral is None:
#On policy
avg_J, var_J = tf.nn.moments(tf.reduce_sum(disc_rews, axis=1), axes=[0])
grad_avg_J = tf.constant(0)
grad_var_J = tf.constant(0)
avg_iw = tf.constant(1)
var_iw = tf.constant(0)
max_iw = tf.constant(1)
ess = batch_size
else:
#Off policy -> importance weighting :(
log_ratios = self.logprobs - behavioral.pd.logp(self.ac_in)
log_ratios = tf.expand_dims(log_ratios, axis=1)
log_ratios = tf.multiply(log_ratios, self.mask)
log_ratios_by_episode = tf.split(log_ratios, batch_size)
log_ratios_by_episode = tf.stack(log_ratios_by_episode)
if per_decision:
#Per-decision
iw = tf.exp(tf.cumsum(log_ratios_by_episode, axis=1))
if not normalize:
#Per-decision, unnormalized (possibly truncated)
iw = tf.clip_by_value(iw, 0, truncate_at)
weighted_rets = tf.reduce_sum(tf.multiply(disc_rews,iw), axis=1)
avg_J, var_J = tf.nn.moments(weighted_rets, axes=[0])
else:
#Per-decision, self-normalized
iw = batch_size*iw/tf.reduce_sum(iw, axis=0)
avg_J_t = tf.reduce_mean(disc_rews* iw,
axis=0)
avg_J = tf.reduce_sum(avg_J_t)
var_J = 1./batch_size * tf.reduce_sum(disc**2 * tf.reduce_mean(iw**2 *
(rews_by_episode -
avg_J_t)**2,
axis=0)) #Da controllare
weighted_rets = tf.reduce_sum(tf.multiply(disc_rews,iw), axis=1)
eff_iw = weighted_rets/rets
avg_iw, var_iw = tf.nn.moments(eff_iw, axes=[0])
max_iw = tf.reduce_max(eff_iw)
else:
#Per-trajectory
iw = tf.exp(tf.reduce_sum(log_ratios_by_episode, axis=1))
if not normalize:
#Per trajectory, unnormalized (possibly truncated)
iw = tf.clip_by_value(iw, 0, truncate_at)
weighted_rets = tf.multiply(rets, iw)
avg_J, var_J = tf.nn.moments(weighted_rets, axes=[0])
avg_iw, var_iw = tf.nn.moments(iw, axes=[0])
ess = tf.round(tf.reduce_sum(iw)**2 / tf.reduce_sum(iw**2))
else:
#Per-trajectory, self-normalized
iw = batch_size*iw/tf.reduce_sum(iw, axis=0)
avg_J = tf.reduce_mean(rets*iw, axis=0)
var_J = 1./batch_size * tf.reduce_mean(iw**2 *
(rets - avg_J)**2)
avg_iw = tf.reduce_mean(iw, axis=0)
var_iw = 1./batch_size * tf.reduce_mean((iw - 1)**2)
ess = tf.round(tf.reduce_sum(iw)**2 / tf.reduce_sum(iw**2))
max_iw = tf.reduce_max(iw)
grad_avg_J = U.flatgrad(avg_J, self.get_param())
grad_var_J = U.flatgrad(var_J, self.get_param())
avg_ret, var_ret = tf.nn.moments(tf.reduce_sum(disc_rews, axis=1), axes=[0])
max_ret = tf.reduce_max(tf.reduce_sum(disc_rews, axis=1))
self._avg_J = avg_J
self._var_J = var_J
self._grad_avg_J = grad_avg_J
self._grad_var_J = grad_var_J
self._get_avg_J = U.function([self.ob, self.ac_in, self.rew, self.gamma, self.mask], [avg_J])
self._get_var_J = U.function([self.ob, self.ac_in, self.rew, self.gamma, self.mask], [var_J])
self._get_grad_J = U.function([self.ob, self.ac_in, self.rew, self.gamma, self.mask], [grad_avg_J])
self._get_grad_var_J = U.function([self.ob, self.ac_in, self.rew, self.gamma, self.mask], [grad_var_J])
self._get_all = U.function([self.ob, self.ac_in, self.rew, self.gamma, self.mask], [avg_J, var_J, grad_avg_J, grad_var_J])
self._get_ess = U.function([self.ob, self.ac_in, self.rew,
self.gamma, self.mask], [ess])
self._get_iw_stats = U.function([self.ob, self.ac_in, self.rew,
self.gamma, self.mask], [avg_iw,
var_iw,
max_iw,
ess])
self._get_ret_stats = U.function([self.ob, self.ac_in, self.rew, self.gamma, self.mask], [avg_ret, var_ret, max_ret])
#print('Recompile time:', time.time() - checkpoint)
#Fisher
def eval_fisher(self, states, actions, lens_or_batch_size, horizon=None, behavioral=None):
"""
Fisher information matrix
Params:
states, actions as lists, flat wrt time
lens_or_batch_size: list with episode lengths or scalar representing the number of (equally long) episodes
horizon: max task horizon
behavioral: policy used to collect (s, a, r) tuples
"""
#Prepare data
batch_size, horizon, _states, _actions = self._prepare_data(states,
actions,
None,
lens_or_batch_size,
horizon,
do_pad=False,
do_concat=False)
fisher = self.fisher
with tf.device('/cpu:0'):
if behavioral is not None:
log_ratios = self.logprobs - behavioral.pd.logp(self.ac_in)
iw = tf.exp(tf.reduce_sum(log_ratios))
fisher = tf.multiply(iw, fisher)
fun = U.function([self.ob, self.ac_in], [fisher])
fisher_samples = np.array([fun(s, a)[0] for (s,a) in zip(_states, _actions)]) #one call per EPISODE
return np.mean(fisher_samples, axis=0)
def _prepare_getsetters(self):
with tf.variable_scope('pol') as vs:
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope=vs.name)
self.get_parameter = U.GetFlat(self.var_list)
self.set_parameter = U.SetFromFlat(self.var_list)
# Weight manipulation
def eval_param(self):
""""Policy parameters (numeric,flat)"""
return self.get_parameter()
def get_param(self):
return self.weights
def set_param(self,param):
"""Set policy parameters to (flat) param"""
self.set_parameter(param)
# Used by original implementation
def get_variables(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.scope)
def get_trainable_variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
def get_initial_state(self):
return []
def set_script_test(env, policy, horizon, seed, bounded_policy, trainable_std,
gain_init, max_mean, min_mean, max_std, min_std, std_init):
# Common imports
import sys, re, os, time, logging
from collections import defaultdict
# Framework imports
import gym
import tensorflow as tf
# Self imports: utils
from baselines.common import set_global_seeds
from baselines import logger
import baselines.common.tf_util as U
from baselines.common.rllab_utils import Rllab2GymWrapper, rllab_env_from_name
from baselines.common.atari_wrappers import make_atari, wrap_deepmind
# Import custom envs
import baselines.envs.lqg1d # registered at import as gym env
def get_env_type(env_id):
# First load all envs
_game_envs = defaultdict(set)
for env in gym.envs.registry.all():
env_type = env._entry_point.split(':')[0].split('.')[-1]
_game_envs[env_type].add(env.id)
# Get env type
env_type = None
for g, e in _game_envs.items():
if env_id in e:
env_type = g
break
return env_type
env = 'LQG1D-v0'
# Prepare environment maker
if env.startswith('rllab.'):
# Get env name and class
env_name = re.match('rllab.(\w+)', env).group(1)
env_rllab_class = rllab_env_from_name(env_name)
# Define env maker
def make_env():
env_rllab = env_rllab_class()
_env = Rllab2GymWrapper(env_rllab)
return _env
# Used later
env_type = 'rllab'
else:
# Normal gym, get if Atari or not.
env_type = get_env_type(env)
assert env_type is not None, "Env not recognized."
# Define the correct env maker
if env_type == 'atari':
# Atari, custom env creation
def make_env():
_env = make_atari(env)
return wrap_deepmind(_env)
else:
# Not atari, standard env creation
def make_env():
env_rllab = gym.make(env)
return env_rllab
# Prepare policy maker
if policy == 'linear':
hid_size = num_hid_layers = 0
elif policy == 'nn':
hid_size = [100, 50, 25]
num_hid_layers = 3
def make_policy(name, ob_space, ac_space):
return MlpPolicyBounded(
name=name, ob_space=ob_space, ac_space=ac_space,
hid_size=hid_size, num_hid_layers=num_hid_layers,
gaussian_fixed_var=True, trainable_std=trainable_std,
use_bias=False, use_critic=False,
#hidden_W_init=tf.constant_initializer(1.1),
gain_init=gain_init,
max_mean=max_mean,
min_mean=min_mean,
max_std=max_std,
min_std=min_std,
std_init=std_init)
# Initialize
affinity = len(os.sched_getaffinity(0))
sess = U.make_session(affinity)
sess.__enter__()
set_global_seeds(seed)
gym.logger.setLevel(logging.WARN)
env = make_env()
ob_space = env.observation_space
ac_space = env.action_space
pi = make_policy('pi', ob_space, ac_space)
return pi
if __name__ == '__main__':
pi = set_script_test(
env='LQG1D-v0',
policy='linear',
horizon=20,
seed=1,
bounded_policy=True,
trainable_std=False,
gain_init=-0.61525125,
max_mean=1,
min_mean=-1,
max_std=None,
min_std=0.1,
std_init=0.11)
mu = pi.eval_mean([[1]])
print('mu', mu)
obz, last_out, pdparam = pi.eval_stuff([[-1]])
print('obz', obz)
print('last_out', last_out)
print('pdparam', pdparam[0])
sigma = pi.eval_std()
print('sigma', sigma)
| 44.323871 | 139 | 0.562254 |
4a20b74a81048abd2b2832f147a0673c8f733498 | 33,049 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_authentication_scheme.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_authentication_scheme.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_authentication_scheme.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_authentication_scheme
short_description: Configure Authentication Schemes in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify authentication feature and scheme category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.0
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
enable_log:
description:
- Enable/Disable logging for task.
type: bool
required: false
default: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
member_path:
type: str
description:
- Member attribute path to operate on.
- Delimited by a slash character if there are more than one attribute.
- Parameter marked with member_path is legitimate for doing member operation.
member_state:
type: str
description:
- Add or delete a member under specified attribute path.
- When member_state is specified, the state option is ignored.
choices:
- present
- absent
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
authentication_scheme:
description:
- Configure Authentication Schemes.
default: null
type: dict
suboptions:
domain_controller:
description:
- Domain controller setting. Source user.domain-controller.name.
type: str
ems_device_owner:
description:
- Enable/disable SSH public-key authentication with device owner .
type: str
choices:
- enable
- disable
fsso_agent_for_ntlm:
description:
- FSSO agent to use for NTLM authentication. Source user.fsso.name.
type: str
fsso_guest:
description:
- Enable/disable user fsso-guest authentication .
type: str
choices:
- enable
- disable
kerberos_keytab:
description:
- Kerberos keytab setting. Source user.krb-keytab.name.
type: str
method:
description:
- Authentication methods .
type: list
choices:
- ntlm
- basic
- digest
- form
- negotiate
- fsso
- rsso
- ssh-publickey
- cert
- saml
name:
description:
- Authentication scheme name.
required: true
type: str
negotiate_ntlm:
description:
- Enable/disable negotiate authentication for NTLM .
type: str
choices:
- enable
- disable
require_tfa:
description:
- Enable/disable two-factor authentication .
type: str
choices:
- enable
- disable
saml_server:
description:
- SAML configuration. Source user.saml.name.
type: str
saml_timeout:
description:
- SAML authentication timeout in seconds.
type: int
ssh_ca:
description:
- SSH CA name. Source firewall.ssh.local-ca.name.
type: str
user_cert:
description:
- Enable/disable authentication with user certificate .
type: str
choices:
- enable
- disable
user_database:
description:
- Authentication server to contain user information; "local" (default) or "123" (for LDAP).
type: list
suboptions:
name:
description:
- Authentication server name. Source system.datasource.name user.radius.name user.tacacs+.name user.ldap.name user.group.name.
required: true
type: str
'''
EXAMPLES = '''
- collections:
- fortinet.fortios
connection: httpapi
hosts: fortigate01
vars:
ansible_httpapi_port: 443
ansible_httpapi_use_ssl: true
ansible_httpapi_validate_certs: false
vdom: root
tasks:
- name: fortios_authentication_scheme
fortios_authentication_scheme:
vdom: root
state: present
authentication_scheme:
fsso_guest: disable
method: basic
name: terr-auth-scheme
negotiate_ntlm: enable
require_tfa: disable
user_database:
- name: local-user-db
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import schema_to_module_spec
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_schema_versioning
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import is_same_comparison
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import serialize
def filter_authentication_scheme_data(json):
option_list = ['domain_controller', 'ems_device_owner', 'fsso_agent_for_ntlm',
'fsso_guest', 'kerberos_keytab', 'method',
'name', 'negotiate_ntlm', 'require_tfa',
'saml_server', 'saml_timeout', 'ssh_ca',
'user_cert', 'user_database']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def flatten_single_path(data, path, index):
if not data or index == len(path) or path[index] not in data or not data[path[index]]:
return
if index == len(path) - 1:
data[path[index]] = ' '.join(str(elem) for elem in data[path[index]])
elif isinstance(data[path[index]], list):
for value in data[path[index]]:
flatten_single_path(value, path, index + 1)
else:
flatten_single_path(data[path[index]], path, index + 1)
def flatten_multilists_attributes(data):
multilist_attrs = [
[u'method'],
]
for attr in multilist_attrs:
flatten_single_path(data, attr, 0)
return data
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def authentication_scheme(data, fos, check_mode=False):
vdom = data['vdom']
state = data['state']
authentication_scheme_data = data['authentication_scheme']
authentication_scheme_data = flatten_multilists_attributes(authentication_scheme_data)
filtered_data = underscore_to_hyphen(filter_authentication_scheme_data(authentication_scheme_data))
# check_mode starts from here
if check_mode:
mkey = fos.get_mkey('authentication', 'scheme', filtered_data, vdom=vdom)
current_data = fos.get('authentication', 'scheme', vdom=vdom, mkey=mkey)
is_existed = current_data and current_data.get('http_status') == 200 \
and isinstance(current_data.get('results'), list) \
and len(current_data['results']) > 0
# 2. if it exists and the state is 'present' then compare current settings with desired
if state == 'present' or state is True:
if mkey is None:
return False, True, filtered_data
# if mkey exists then compare each other
# record exits and they're matched or not
if is_existed:
is_same = is_same_comparison(
serialize(current_data['results'][0]), serialize(filtered_data))
return False, not is_same, filtered_data
# record does not exist
return False, True, filtered_data
if state == 'absent':
if mkey is None:
return False, False, filtered_data
if is_existed:
return False, True, filtered_data
return False, False, filtered_data
return True, False, {'reason: ': 'Must provide state parameter'}
if state == "present" or state is True:
return fos.set('authentication',
'scheme',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('authentication',
'scheme',
mkey=filtered_data['name'],
vdom=vdom)
else:
fos._module.fail_json(msg='state must be present or absent!')
def is_successful_status(resp):
return 'status' in resp and resp['status'] == 'success' or \
'http_status' in resp and resp['http_status'] == 200 or \
'http_method' in resp and resp['http_method'] == "DELETE" and resp['http_status'] == 404
def fortios_authentication(data, fos, check_mode):
fos.do_member_operation('authentication_scheme')
if data['authentication_scheme']:
resp = authentication_scheme(data, fos, check_mode)
else:
fos._module.fail_json(msg='missing task body: %s' % ('authentication_scheme'))
if check_mode:
return resp
return not is_successful_status(resp), \
is_successful_status(resp) and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
versioned_schema = {
"type": "list",
"children": {
"user_cert": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v7.0.1": True
}
},
{
"value": "disable",
"revisions": {
"v7.0.1": True
}
}
],
"revisions": {
"v7.0.1": True
}
},
"negotiate_ntlm": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"saml_server": {
"type": "string",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"saml_timeout": {
"type": "integer",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"fsso_guest": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ems_device_owner": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v7.0.0": True
}
},
{
"value": "disable",
"revisions": {
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": False,
"v7.0.0": True
}
},
"kerberos_keytab": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"fsso_agent_for_ntlm": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"domain_controller": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ssh_ca": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"require_tfa": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"method": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "ntlm",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "basic",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "digest",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "form",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "negotiate",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "fsso",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "rsso",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "ssh-publickey",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "cert",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "saml",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"user_database": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = 'name'
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"enable_log": {"required": False, "type": bool},
"vdom": {"required": False, "type": "str", "default": "root"},
"member_path": {"required": False, "type": "str"},
"member_state": {
"type": "str",
"required": False,
"choices": ["present", "absent"]
},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"authentication_scheme": {
"required": False, "type": "dict", "default": None,
"options": {
}
}
}
for attribute_name in module_spec['options']:
fields["authentication_scheme"]['options'][attribute_name] = module_spec['options'][attribute_name]
if mkeyname and mkeyname == attribute_name:
fields["authentication_scheme"]['options'][attribute_name]['required'] = True
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=True)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
if 'enable_log' in module.params:
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, "authentication_scheme")
is_error, has_changed, result = fortios_authentication(module.params, fos, module.check_mode)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 32.983034 | 154 | 0.414052 |
4a20b7fffcf477295cf6aacd13446c84740dc405 | 2,393 | py | Python | 4. Convolutional Neural Networks/yad2k/models/keras_darknet19.py | Boonichi/Deep-Learning-Specialization | 4061c4a2b38cadeb12d582cdcb2e7de7b1d3b407 | [
"MIT"
] | null | null | null | 4. Convolutional Neural Networks/yad2k/models/keras_darknet19.py | Boonichi/Deep-Learning-Specialization | 4061c4a2b38cadeb12d582cdcb2e7de7b1d3b407 | [
"MIT"
] | null | null | null | 4. Convolutional Neural Networks/yad2k/models/keras_darknet19.py | Boonichi/Deep-Learning-Specialization | 4061c4a2b38cadeb12d582cdcb2e7de7b1d3b407 | [
"MIT"
] | null | null | null | """Darknet19 Model Defined in Keras."""
import functools
from functools import partial
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.regularizers import l2
from ..utils.utils import compose
# Partial wrapper for Convolution2D with static default argument.
_DarknetConv2D = partial(Conv2D, padding='same')
@functools.wraps(Conv2D)
def DarknetConv2D(*args, **kwargs):
"""Wrapper to set Darknet weight regularizer for Convolution2D."""
darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
darknet_conv_kwargs.update(kwargs)
return _DarknetConv2D(*args, **darknet_conv_kwargs)
def DarknetConv2D_BN_Leaky(*args, **kwargs):
"""Darknet Convolution2D followed by BatchNormalization and LeakyReLU."""
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
return compose(
DarknetConv2D(*args, **no_bias_kwargs),
BatchNormalization(),
LeakyReLU(alpha=0.1))
def bottleneck_block(outer_filters, bottleneck_filters):
"""Bottleneck block of 3x3, 1x1, 3x3 convolutions."""
return compose(
DarknetConv2D_BN_Leaky(outer_filters, (3, 3)),
DarknetConv2D_BN_Leaky(bottleneck_filters, (1, 1)),
DarknetConv2D_BN_Leaky(outer_filters, (3, 3)))
def bottleneck_x2_block(outer_filters, bottleneck_filters):
"""Bottleneck block of 3x3, 1x1, 3x3, 1x1, 3x3 convolutions."""
return compose(
bottleneck_block(outer_filters, bottleneck_filters),
DarknetConv2D_BN_Leaky(bottleneck_filters, (1, 1)),
DarknetConv2D_BN_Leaky(outer_filters, (3, 3)))
def darknet_body():
"""Generate first 18 conv layers of Darknet-19."""
return compose(
DarknetConv2D_BN_Leaky(32, (3, 3)),
MaxPooling2D(),
DarknetConv2D_BN_Leaky(64, (3, 3)),
MaxPooling2D(),
bottleneck_block(128, 64),
MaxPooling2D(),
bottleneck_block(256, 128),
MaxPooling2D(),
bottleneck_x2_block(512, 256),
MaxPooling2D(),
bottleneck_x2_block(1024, 512))
def darknet19(inputs):
"""Generate Darknet-19 model for Imagenet classification."""
body = darknet_body()(inputs)
logits = DarknetConv2D(1000, (1, 1), activation='softmax')(body)
return Model(inputs, logits) | 33.704225 | 77 | 0.712077 |
4a20b86888ac0635902dcb9c8181891a3ed8a937 | 8,948 | py | Python | tests/synth_tests/test_synth_workload.py | agalup/GPUMemManSurvey | 494e41c2587b69472ad5a17a602ff290b1b1a6ab | [
"MIT"
] | 13 | 2021-02-26T15:15:29.000Z | 2021-09-22T19:06:27.000Z | tests/synth_tests/test_synth_workload.py | agalup/GPUMemManSurvey | 494e41c2587b69472ad5a17a602ff290b1b1a6ab | [
"MIT"
] | null | null | null | tests/synth_tests/test_synth_workload.py | agalup/GPUMemManSurvey | 494e41c2587b69472ad5a17a602ff290b1b1a6ab | [
"MIT"
] | 1 | 2021-09-29T21:24:15.000Z | 2021-09-29T21:24:15.000Z | import sys
sys.path.append('../../scripts')
import os
import shutil
import time
from datetime import datetime
from timedprocess import Command
from Helper import generateResultsFromSynthetic
# from Helper import plotMean
import csv
import argparse
def main():
# Run all files from a directory
print("##############################################################################")
print("Callable as: python test_synth_workload.py")
print("##############################################################################")
# Specify which test configuration to use
testcases = {}
num_allocations = 10000
smallest_allocation_size = 4
largest_allocation_size = 1024
alloc_size = 8
num_iterations = 1
free_memory = 1
generate_results = True
generate_plots = True
clean_temporary_files = True
test_warp_based = False
filetype = "pdf"
time_out_val = 100
if os.name == 'nt': # If on Windows
build_path = os.path.join("build", "Release")
sync_build_path = os.path.join("sync_build", "Release")
else:
build_path = "build/"
sync_build_path = "sync_build/"
parser = argparse.ArgumentParser(description='Test workload for various frameworks')
parser.add_argument('-t', type=str, help='Specify which frameworks to test, separated by +, e.g. o+s+h+c+f+r+x+b ---> c : cuda | s : scatteralloc | h : halloc | o : ouroboros | f : fdgmalloc | r : register-efficient | x : xmalloc')
parser.add_argument('-threadrange', type=str, help='Specify number of threads, given as powers of two, e.g. 0-5 -> results in 1-32')
parser.add_argument('-range', type=str, help='Sepcify Allocation Range, e.g. 4-1024')
parser.add_argument('-iter', type=int, help='How many iterations?')
parser.add_argument('-runtest', action='store_true', default=False, help='Run testcases')
parser.add_argument('-genres', action='store_true', default=False, help='Generate results')
parser.add_argument('-genplot', action='store_true', default=False, help='Generate results file and plot')
parser.add_argument('-testwrite', action='store_true', default=False, help='If set tests write performance, not allocation performance')
parser.add_argument('-timeout', type=int, help='Timeout Value in Seconds, process will be killed after as many seconds')
parser.add_argument('-plotscale', type=str, help='log/linear')
parser.add_argument('-filetype', type=str, help='png or pdf')
parser.add_argument('-allocsize', type=int, help='How large is the manageable memory in GiB?')
parser.add_argument('-device', type=int, help='Which device to use', default=0)
args = parser.parse_args()
executable_extension = ""
if os.name == 'nt': # If on Windows
executable_extension = ".exe"
# Parse approaches
if(args.t):
if any("c" in s for s in args.t):
testcases["CUDA"] = os.path.join(build_path, str("c_synth_test") + executable_extension)
if any("x" in s for s in args.t):
testcases["XMalloc"] = os.path.join(sync_build_path, str("x_synth_test") + executable_extension)
if any("h" in s for s in args.t):
testcases["Halloc"] = os.path.join(sync_build_path, str("h_synth_test") + executable_extension)
if any("s" in s for s in args.t):
testcases["ScatterAlloc"] = os.path.join(sync_build_path, str("s_synth_test") + executable_extension)
if any("o" in s for s in args.t):
testcases["Ouroboros-P-S"] = os.path.join(build_path, str("o_synth_test_p") + executable_extension)
testcases["Ouroboros-P-VA"] = os.path.join(build_path, str("o_synth_test_vap") + executable_extension)
testcases["Ouroboros-P-VL"] = os.path.join(build_path, str("o_synth_test_vlp") + executable_extension)
testcases["Ouroboros-C-S"] = os.path.join(build_path, str("o_synth_test_c") + executable_extension)
testcases["Ouroboros-C-VA"] = os.path.join(build_path, str("o_synth_test_vac") + executable_extension)
testcases["Ouroboros-C-VL"] = os.path.join(build_path, str("o_synth_test_vlc") + executable_extension)
if any("f" in s for s in args.t):
testcases["FDGMalloc"] = os.path.join(sync_build_path, str("f_synth_test") + executable_extension)
if any("r" in s for s in args.t):
# testcases["RegEff-A"] = os.path.join(sync_build_path, str("r_synth_test_a") + executable_extension)
testcases["RegEff-AW"] = os.path.join(sync_build_path, str("r_synth_test_aw") + executable_extension)
testcases["RegEff-C"] = os.path.join(sync_build_path, str("r_synth_test_c") + executable_extension)
testcases["RegEff-CF"] = os.path.join(sync_build_path, str("r_synth_test_cf") + executable_extension)
testcases["RegEff-CM"] = os.path.join(sync_build_path, str("r_synth_test_cm") + executable_extension)
testcases["RegEff-CFM"] = os.path.join(sync_build_path, str("r_synth_test_cfm") + executable_extension)
if any("b" in s for s in args.t):
testcases["Baseline"] = os.path.join(sync_build_path, str("b_synth_test") + executable_extension)
# Parse range
if(args.threadrange):
selected_range = args.threadrange.split('-')
smallest_num_threads = 2 ** int(selected_range[0])
largest_num_threads = 2 ** int(selected_range[1])
# Parse range
if(args.range):
selected_range = args.range.split('-')
smallest_allocation_size = int(selected_range[0])
largest_allocation_size = int(selected_range[1])
# Parse num iterations
if(args.iter):
num_iterations = args.iter
# Run Testcases
run_testcases = args.runtest
# Generate results
generate_results = args.genres
# Generate plots
generate_plots = args.genplot
# Plot Axis scaling
plotscale = args.plotscale
# Timeout (in seconds)
if(args.timeout):
time_out_val = args.timeout
if(args.filetype):
filetype = args.filetype
if(args.allocsize):
alloc_size = args.allocsize
####################################################################################################
####################################################################################################
# Run testcases
####################################################################################################
####################################################################################################
if run_testcases:
testwritestr = "0"
namestr = ""
if args.testwrite:
testwritestr = "1"
namestr = "write_"
for name, executable in testcases.items():
csv_path = "results/synth_" + namestr + name + "_" + str(smallest_num_threads)+ "-" + str(largest_num_threads) + "_" + str(smallest_allocation_size) + "-" + str(largest_allocation_size) + ".csv"
if(os.path.isfile(csv_path)):
print("This file <" + csv_path + "> already exists, do you really want to OVERWRITE?")
inputfromconsole = input()
if not (inputfromconsole == "yes" or inputfromconsole == "y"):
continue
with open(csv_path, "w", newline='') as csv_file:
csv_file.write("NumThreads, mean, std-dev, min, max, median\n")
num_threads = smallest_num_threads
while num_threads <= largest_num_threads:
with open(csv_path, "a", newline='') as csv_file:
csv_file.write(str(num_threads) + ", ")
run_config = str(num_threads) + " " + str(smallest_allocation_size) + " " + str(largest_allocation_size) + " " + str(num_iterations) + " 0 " + csv_path + " " + str(alloc_size) + " " + testwritestr + " " + str(args.device)
executecommand = "{0} {1}".format(executable, run_config)
print("#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#")
print("Running " + name + " with command -> " + executecommand)
print("#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#")
print(executecommand)
_, process_killed = Command(executecommand).run(timeout=time_out_val)
if process_killed :
print("We killed the process!")
with open(csv_path, "a", newline='') as csv_file:
csv_file.write("0,0,-------------------> Ran longer than " + str(time_out_val) + "\n")
else:
print("Success!")
with open(csv_path, "a", newline='') as csv_file:
csv_file.write("\n")
num_threads *= 2
# ####################################################################################################
# ####################################################################################################
# # Generate new Results
# ####################################################################################################
# ####################################################################################################
if generate_results:
if not os.path.exists("results/aggregate"):
os.mkdir("results/aggregate")
if args.testwrite:
generateResultsFromSynthetic(testcases, "results", smallest_num_threads, largest_num_threads, smallest_allocation_size, largest_allocation_size, "Num Threads", "synth_write", 2)
else:
generateResultsFromSynthetic(testcases, "results", smallest_num_threads, largest_num_threads, smallest_allocation_size, largest_allocation_size, "Num Threads", "synth", 1)
print("Done")
if __name__ == "__main__":
main() | 48.367568 | 232 | 0.625726 |
4a20b9436646cb889c258999ca83affbaec971a1 | 775 | py | Python | docs/examples/data_access/django/example/app/views.py | connec/oso | a12d94206807b69beb6fe7a9070b9afcacdfc845 | [
"Apache-2.0"
] | 2,167 | 2020-07-28T15:49:48.000Z | 2022-03-31T06:11:28.000Z | docs/examples/data_access/django/example/app/views.py | connec/oso | a12d94206807b69beb6fe7a9070b9afcacdfc845 | [
"Apache-2.0"
] | 1,060 | 2020-07-25T18:37:07.000Z | 2022-03-30T05:49:44.000Z | docs/examples/data_access/django/example/app/views.py | connec/oso | a12d94206807b69beb6fe7a9070b9afcacdfc845 | [
"Apache-2.0"
] | 118 | 2020-08-05T19:27:14.000Z | 2022-03-31T16:37:39.000Z | from base64 import urlsafe_b64decode
from django.http import HttpResponse
from app.models import Post, User
def user_from_request(request):
try:
username = (
urlsafe_b64decode(request.headers.get("Authorization").split(" ")[1])
.decode("utf-8")
.split(":")[0]
)
return User.objects.get(username=username)
except:
return User(username="guest")
def index(request):
request.user = user_from_request(request)
authorized_posts = Post.objects.authorize(request)
formatted = [
f"{post.pk} - @{post.creator.username} - {post.access_level} - {post.contents}"
for post in authorized_posts
]
return HttpResponse("\n".join(formatted) + "\n", content_type="text/plain")
| 27.678571 | 87 | 0.643871 |
4a20bb31bd413d5ecb81423145b4590b8d0a29e9 | 160 | py | Python | Note-6 A3CNet/Note 6 simple ACNet/Agent_test.py | summerRainn/DeepLearningNotes | 6657694d5e22e73969e47699b4e31a28385d0f19 | [
"MIT"
] | 345 | 2017-08-23T13:48:50.000Z | 2022-03-17T05:43:34.000Z | Note-6 A3CNet/Note 6 simple ACNet/Agent_test.py | summerRainn/DeepLearningNotes | 6657694d5e22e73969e47699b4e31a28385d0f19 | [
"MIT"
] | 8 | 2017-09-30T15:01:23.000Z | 2019-12-18T08:46:08.000Z | Note-6 A3CNet/Note 6 simple ACNet/Agent_test.py | summerRainn/DeepLearningNotes | 6657694d5e22e73969e47699b4e31a28385d0f19 | [
"MIT"
] | 224 | 2017-08-31T01:10:55.000Z | 2022-03-09T06:14:12.000Z | from Agent import Agent
GAME = 'CartPole-v0'
MAX_EPISODES = 100
def main():
A = Agent(GAME)
A.run(MAX_EPISODES)
if __name__ == '__main__':
main() | 14.545455 | 26 | 0.65 |
4a20bbf8e2fd44536cbd6c45b559eb3783b09e47 | 964 | py | Python | testkraut/tests/localtests/check_fsl_workflow/bet_wf.py | hanke/testkraut | 8a4c942e19e0381e26770c58fbd67b85db50c1d8 | [
"MIT"
] | 2 | 2017-10-16T14:56:59.000Z | 2019-07-18T13:49:58.000Z | testkraut/tests/localtests/check_fsl_workflow/bet_wf.py | hanke/testkraut | 8a4c942e19e0381e26770c58fbd67b85db50c1d8 | [
"MIT"
] | null | null | null | testkraut/tests/localtests/check_fsl_workflow/bet_wf.py | hanke/testkraut | 8a4c942e19e0381e26770c58fbd67b85db50c1d8 | [
"MIT"
] | 5 | 2015-03-05T07:11:36.000Z | 2019-11-25T10:46:15.000Z | import os
import nipype.interfaces.fsl as fsl # fsl
import nipype.interfaces.io as nio # Data i/o
import nipype.pipeline.engine as pe # pypeline engine
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
test_workflow = pe.Workflow(name='justbet')
datasource = pe.Node(interface=nio.DataGrabber(outfields=['head']),
name = 'datasource')
datasource.inputs.base_directory = os.path.abspath(os.curdir)
datasource.inputs.template = '%s.nii.gz'
datasource.inputs.template_args = dict(head=[['head']])
bet = pe.Node(interface=fsl.BET(mask=True),
name='bet')
datasink = pe.Node(interface=nio.DataSink(parameterization=False),
name="datasink")
datasink.inputs.base_directory = os.path.abspath(os.curdir)
test_workflow.connect(datasource, 'head', bet, 'in_file')
test_workflow.connect(bet, 'out_file', datasink, '@brain')
test_workflow.connect(bet, 'mask_file', datasink, '@brainmask')
| 37.076923 | 67 | 0.70332 |
4a20bcea7c2d9c858609521074d50dffedfe1c2b | 3,495 | py | Python | experiments/camera_demo.py | lxy5513/Multi-Style-Transfer | 469bb322c21cf1c22143cb154baa5c31f9774113 | [
"MIT"
] | 4 | 2019-07-15T04:33:02.000Z | 2021-04-13T13:34:39.000Z | experiments/camera_demo.py | lxy5513/Multi-Style-Transfer | 469bb322c21cf1c22143cb154baa5c31f9774113 | [
"MIT"
] | 1 | 2020-07-09T10:25:44.000Z | 2020-07-09T10:25:44.000Z | experiments/camera_demo.py | lxy5513/Multi-Style-Transfer | 469bb322c21cf1c22143cb154baa5c31f9774113 | [
"MIT"
] | 1 | 2019-04-09T23:18:35.000Z | 2019-04-09T23:18:35.000Z | import os
import cv2
import numpy as np
import torch
from torch.autograd import Variable
from net import Net
from option import Options
import utils
from utils import StyleLoader
def run_demo(args, mirror=False):
# style_model = Net(ngf=args.ngf)
# style_model.load_state_dict(torch.load(args.model))
model_dict = torch.load(args.model)
model_dict_clone = model_dict.copy() # We can't mutate while iterating
for key, value in model_dict_clone.items():
if key.endswith(('running_mean', 'running_var')):
del model_dict[key]
style_model = Net(ngf=args.ngf)
style_model.load_state_dict(model_dict, False)
style_model.eval()
if args.cuda:
style_loader = StyleLoader(args.style_folder, args.style_size)
style_model.cuda()
else:
style_loader = StyleLoader(args.style_folder, args.style_size, False)
# Define the codec and create VideoWriter object
height = args.demo_size
width = int(4.0/3*args.demo_size)
swidth = int(width/4)
sheight = int(height/4)
if args.record:
fourcc = cv2.VideoWriter_fourcc('F','M','P','4')
out = cv2.VideoWriter('output.mp4', fourcc, 20.0, (2*width, height))
cam = cv2.VideoCapture(0)
cam.set(3, width)
cam.set(4, height)
key = 0
idx = 0
while True:
# read frame
idx += 1
ret_val, img = cam.read()
if mirror:
img = cv2.flip(img, 1)
cimg = img.copy()
img = np.array(img).transpose(2, 0, 1)
# changing style
if idx%10 == 1:
if int(idx/10) == 21:
break
style_v = style_loader.get(int(idx/10))
print('style version ----------------> ', idx)
style_v = Variable(style_v.data)
# print('style change to ', style_v)
style_model.setTarget(style_v)
img=torch.from_numpy(img).unsqueeze(0).float()
if args.cuda:
img=img.cuda()
img = Variable(img)
## handle image
img = style_model(img)
if args.cuda:
simg = style_v.cpu().data[0].numpy()
img = img.cpu().clamp(0, 255).data[0].numpy()
else:
simg = style_v.data().numpy()
img = img.clamp(0, 255).data[0].numpy()
img = img.transpose(1, 2, 0).astype('uint8')
simg = simg.transpose(1, 2, 0).astype('uint8')
# display
simg = cv2.resize(simg,(swidth, sheight), interpolation = cv2.INTER_CUBIC)
cimg[0:sheight,0:swidth,:]=simg
img = np.concatenate((cimg,img),axis=1)
cv2.imshow('MSG Demo', img)
#cv2.imwrite('stylized/%i.jpg'%idx,img)
key = cv2.waitKey(1)
if args.record:
out.write(img)
if key == 27:
break
cam.release()
if args.record:
out.release()
cv2.destroyAllWindows()
def main():
# getting things ready
args = Options().parse()
if args.subcommand is None:
raise ValueError("ERROR: specify the experiment type")
if args.cuda and not torch.cuda.is_available():
raise ValueError("ERROR: cuda is not available, try running on CPU")
# run demo
run_demo(args, mirror=True)
if __name__ == '__main__':
main()
| 31.205357 | 86 | 0.553362 |
4a20bd1a64630d45f4404859b7aee37ca3b6fe17 | 4,567 | py | Python | third_party/cpuinfo/configure.py | gautamkmr/caffe2 | cde7f21d1e34ec714bc08dbfab945a1ad30e92ff | [
"MIT"
] | null | null | null | third_party/cpuinfo/configure.py | gautamkmr/caffe2 | cde7f21d1e34ec714bc08dbfab945a1ad30e92ff | [
"MIT"
] | null | null | null | third_party/cpuinfo/configure.py | gautamkmr/caffe2 | cde7f21d1e34ec714bc08dbfab945a1ad30e92ff | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import confu
parser = confu.standard_parser("cpuinfo configuration script")
parser.add_argument("--log", dest="log_level",
choices=("none", "error", "warning", "info", "debug"), default="error")
parser.add_argument("--mock", dest="mock", action="store_true")
def main(args):
options = parser.parse_args(args)
build = confu.Build.from_options(options)
macros = {
"CPUINFO_LOG_LEVEL": {"none": 0, "error": 1, "warning": 2, "info": 3, "debug": 4}[options.log_level],
"CPUINFO_LOG_TO_STDIO": int(not options.mock),
"CPUINFO_MOCK": int(options.mock),
}
build.export_cpath("include", ["cpuinfo.h"])
with build.options(source_dir="src", macros=macros, extra_include_dirs="src"):
sources = ["init.c", "api.c", "log.c"]
if build.target.is_x86 or build.target.is_x86_64:
sources += [
"x86/init.c", "x86/info.c", "x86/vendor.c", "x86/uarch.c", "x86/name.c",
"x86/topology.c",
"x86/cache/init.c", "x86/cache/descriptor.c", "x86/cache/deterministic.c",
]
if build.target.is_macos:
sources += ["x86/mach/init.c"]
elif build.target.is_linux or build.target.is_android:
sources += [
"x86/linux/init.c",
"x86/linux/cpuinfo.c",
]
sources.append("x86/isa.c" if not build.target.is_nacl else "x86/nacl/isa.c")
if build.target.is_arm or build.target.is_arm64:
sources += ["arm/uarch.c", "arm/cache.c"]
if build.target.is_linux or build.target.is_android:
sources += [
"arm/linux/init.c",
"arm/linux/cpuinfo.c",
"arm/linux/clusters.c",
"arm/linux/midr.c",
"arm/linux/chipset.c",
"arm/linux/hwcap.c",
]
if build.target.is_arm:
sources.append("arm/linux/aarch32-isa.c")
elif build.target.is_arm64:
sources.append("arm/linux/aarch64-isa.c")
if build.target.is_android:
sources += [
"arm/android/gpu.c",
"arm/android/properties.c",
]
if build.target.is_macos:
sources += ["mach/topology.c"]
if build.target.is_linux or build.target.is_android:
sources += [
"linux/current.c",
"linux/cpulist.c",
"linux/smallfile.c",
"linux/multiline.c",
"linux/processors.c",
]
if options.mock:
sources += ["linux/mockfile.c"]
if build.target.is_android:
sources.append("linux/gpu.c")
if options.mock:
sources.append("gpu/gles2-mock.c")
else:
sources.append("gpu/gles2.c")
build.static_library("cpuinfo", map(build.cc, sources))
with build.options(source_dir="tools", deps=build):
build.executable("cpu-info", build.cc("cpu-info.c"))
build.executable("isa-info", build.cc("isa-info.c"))
build.executable("cache-info", build.cc("cache-info.c"))
if build.target.is_x86_64:
with build.options(source_dir="tools", include_dirs=["src", "include"]):
build.executable("cpuid-dump", build.cc("cpuid-dump.c"))
with build.options(source_dir="test", deps=[build, build.deps.googletest]):
build.smoketest("init-test", build.cxx("init.cc"))
if build.target.is_linux:
build.smoketest("get-current-test", build.cxx("get-current.cc"))
if build.target.is_x86_64:
build.smoketest("brand-string-test", build.cxx("name/brand-string.cc"))
if options.mock:
with build.options(source_dir="test", include_dirs="test", macros="CPUINFO_MOCK", deps=[build, build.deps.googletest]):
if build.target.is_arm64 and build.target.is_linux:
build.unittest("scaleway-test", build.cxx("scaleway.cc"))
if not options.mock:
with build.options(source_dir="bench", deps=[build, build.deps.googlebenchmark]):
build.benchmark("init-bench", build.cxx("init.cc"))
if not build.target.is_macos:
build.benchmark("get-current-bench", build.cxx("get-current.cc"))
return build
if __name__ == "__main__":
import sys
main(sys.argv[1:]).generate()
| 40.776786 | 127 | 0.554631 |
4a20bdf15f6152a01b76ce227aeccc28d7600f54 | 2,998 | py | Python | aim/ql/grammar/atom.py | VkoHov/aim | 92567b48437a8c71b4bde3b034fc0e5c61479cf9 | [
"Apache-2.0"
] | 1 | 2021-07-19T19:21:30.000Z | 2021-07-19T19:21:30.000Z | aim/ql/grammar/atom.py | VkoHov/aim | 92567b48437a8c71b4bde3b034fc0e5c61479cf9 | [
"Apache-2.0"
] | 2 | 2021-08-25T16:17:16.000Z | 2022-02-10T05:49:55.000Z | aim/ql/grammar/atom.py | paulmchen/aim | 53212cdce7a80cb8dadfaf7869a31fbf4ee6ce5b | [
"Apache-2.0"
] | 1 | 2021-01-29T02:10:14.000Z | 2021-01-29T02:10:14.000Z | from pyrser import grammar, meta
from aim.ql.tokens.token import TokenList, Token
class Atom(grammar.Grammar):
entry = "input"
grammar = """
input = [ atom:>_ eof ]
atom =
[
[
number
| list
| path
]:>_
|
[
"True":l #is_literal(_, l, 'Boolean')
| "False":l #is_literal(_, l, 'Boolean')
| "None":l #is_literal(_, l, 'None')
| id:i #is_identifier(_, i)
| string:l #is_literal(_, l, 'String')
]
]
path =
[
#is_path(_)
[
id:i #append_identifier(_, i)
[ '.' id:i #append_identifier(_, i) ]+
]
]
list =
[
[
'[' #is_list(_)
atom:a #append_atom(_, a) [',' atom:a #append_atom(_, a)]*
']'
]
|
[
'(' #is_list(_)
atom:a #append_atom(_, a) ','
')'
]
|
[
'(' #is_list(_)
atom:a #append_atom(_, a)
[',' atom:a #append_atom(_, a)]+
')'
]
]
number =
[
@ignore("null")
[
int frac? exp?
]:l #is_literal(_, l, 'Number')
]
int = [ '-'? [ digit1_9s | digit ] ]
frac = [ '.' digits ]
exp = [ e digits ]
digit = [ '0'..'9' ]
digit1_9 = [ '1'..'9' ]
digits = [ digit+ ]
digit1_9s = [ digit1_9 digits]
e = [ ['e'|'E'] ['+'|'-']? ]
"""
@meta.hook(Atom)
def is_literal(self, ast, lit, ltype):
value = self.value(lit)
ast.node = Token(value, ltype)
return True
@meta.hook(Atom)
def is_identifier(self, ast, identifier):
value = self.value(identifier)
ast.node = Token(value, 'Identifier')
return True
# Compound literals
@meta.hook(Atom)
def is_list(self, ast):
ast.node = TokenList('List')
return True
@meta.hook(Atom)
def is_path(self, ast):
ast.node = TokenList('Path')
return True
@meta.hook(Atom)
def append_atom(self, ast, item):
ast.node.append(item.node)
return True
@meta.hook(Atom)
def append_identifier(self, ast, item):
value = self.value(item)
node = Token(value, 'Identifier')
ast.node.append(node)
return True
if __name__ == '__main__':
parser = Atom()
print(parser.parse('12'))
print(parser.parse('1.2'))
print(parser.parse('asdasd'))
print(parser.parse('asdasd.asd'))
print(parser.parse('"asdasd.asd"'))
print(parser.parse('"True"'))
print(parser.parse('True'))
print(parser.parse('None'))
print(parser.parse('[1, 2]'))
print(parser.parse('(1, 2)'))
print(parser.parse('([1,2, "str"], 2)'))
| 22.541353 | 79 | 0.441961 |
4a20be9548d6bda876577d34fddfeebb9746802b | 4,381 | py | Python | TIDALDL-PY/tidal_dl/lang/czech.py | calliswell/Tidal-Media-Downloader | f2716e014ea8c721761a41a151df8c07394f1993 | [
"Apache-2.0"
] | 2 | 2021-07-10T09:31:48.000Z | 2021-07-11T14:45:52.000Z | TIDALDL-PY/tidal_dl/lang/czech.py | calliswell/Tidal-Media-Downloader | f2716e014ea8c721761a41a151df8c07394f1993 | [
"Apache-2.0"
] | null | null | null | TIDALDL-PY/tidal_dl/lang/czech.py | calliswell/Tidal-Media-Downloader | f2716e014ea8c721761a41a151df8c07394f1993 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : czech.py
@Time : 2020/08/20
@Author : Tomikk
@Version : 1.0
@Contact : [email protected]
@Desc :
'''
class LangCzech(object):
SETTING = "Nastavení"
VALUE = "Hodnota"
SETTING_DOWNLOAD_PATH = "Umístění staženého souboru"
SETTING_ONLY_M4A = "Konvertovat mp4 na m4a"
SETTING_ADD_EXPLICIT_TAG = "Přidat explicitní značku"
SETTING_ADD_HYPHEN = "Používat pomlčky místo mezer"
SETTING_ADD_YEAR = "Přidat rok před jméno složky"
SETTING_USE_TRACK_NUM = "Přidat číslo skladby"
SETTING_AUDIO_QUALITY = "Kvalita hudby"
SETTING_VIDEO_QUALITY = "Kvalita videa"
SETTING_CHECK_EXIST = "Zkontrolovat jestli soubor již existuje"
SETTING_ARTIST_BEFORE_TITLE = "Jméno interpreta před jménem skladby"
SETTING_ALBUMID_BEFORE_FOLDER = "Id před složkou alba"
SETTING_INCLUDE_EP = "Zahrnout single&ep"
SETTING_SAVE_COVERS = "Uložit obal alba"
SETTING_LANGUAGE = "Změna jazyka"
SETTING_USE_PLAYLIST_FOLDER = "Use playlist folder"
SETTING_MULITHREAD_DOWNLOAD = "Multi thread download"
SETTING_ALBUM_FOLDER_FORMAT = "Album folder format"
SETTING_TRACK_FILE_FORMAT = "Track file format"
SETTING_SHOW_PROGRESS = "Show progress"
CHOICE = "Výběr"
FUNCTION = "Funkce"
CHOICE_ENTER = "Enter"
CHOICE_ENTER_URLID = "Vložit 'Url/ID':"
CHOICE_EXIT = "Ukončit"
CHOICE_LOGIN = "Check AccessToken"
CHOICE_SETTINGS = "Nastavení"
CHOICE_SET_ACCESS_TOKEN = "Nastavit přístupový token"
CHOICE_DOWNLOAD_BY_URL = "Stáhnout buď url nebo id"
CHOICE_LOGOUT = "Logout"
PRINT_ERR = "[Error]"
PRINT_INFO = "[Info]"
PRINT_SUCCESS = "[Staženo]"
PRINT_ENTER_CHOICE = "Zvolit volbu:"
PRINT_LATEST_VERSION = "Nejnovější verze:"
#PRINT_USERNAME = "přihlašovací jméno:"
#PRINT_PASSWORD = "heslo"
CHANGE_START_SETTINGS = "Start settings('0'-Zpět,'1'-Ano):"
CHANGE_DOWNLOAD_PATH = "Cesta stažení('0' not modify):"
CHANGE_AUDIO_QUALITY = "Kvalita hudby('0'-Normal,'1'-High,'2'-HiFi,'3'-Master):"
CHANGE_VIDEO_QUALITY = "Kvalita videa('0'-1080,'1'-720,'2'-480,'3'-360):"
CHANGE_ONLYM4A = "Konvertovat mp4 na m4a('0'-Ne,'1'-Ano):"
CHANGE_ADD_EXPLICIT_TAG = "Přidat explicitní značku k souborům('0'-Ne,'1'-Ano):"
CHANGE_ADD_HYPHEN = "V názvech souborů používat místo mezer pomlčky('0'-Ne,'1'-Ano):"
CHANGE_ADD_YEAR = "Přidat rok vydání do názvu složky('0'-Ne,'1'-Ano):"
CHANGE_USE_TRACK_NUM = "Přidat číslo skladby před název skladby('0'-Ne,'1'-Ano):"
CHANGE_CHECK_EXIST = "Zkontrolovat existujicí soubor před stažením('0'-Ne,'1'-Ano):"
CHANGE_ARTIST_BEFORE_TITLE = "Přidat jméno interpreta před názvem skladby('0'-Ne,'1'-Ano):"
CHANGE_INCLUDE_EP = "Při stahování alba interpreta zahrnout singly a EP('0'-Ne,'1'-Ano):"
CHANGE_ALBUMID_BEFORE_FOLDER = "Přidat ID před složku do alba('0'-Ne,'1'-Ano):"
CHANGE_SAVE_COVERS = "Uložit obaly alb('0'-Ne,'1'-Ano):"
CHANGE_LANGUAGE = "Zvolit jazyk"
CHANGE_ALBUM_FOLDER_FORMAT = "Album folder format('0' not modify):"
CHANGE_TRACK_FILE_FORMAT = "Track file format('0' not modify):"
CHANGE_SHOW_PROGRESS = "Show progress('0'-No,'1'-Yes):"
# {} are required in these strings
AUTH_START_LOGIN = "Starting login process..."
AUTH_LOGIN_CODE = "Your login code is {}"
AUTH_NEXT_STEP = "Go to {} within the next {} to complete setup."
AUTH_WAITING = "Waiting for authorization..."
AUTH_TIMEOUT = "Operation timed out."
MSG_VALID_ACCESSTOKEN = "AccessToken good for {}."
MSG_INVAILD_ACCESSTOKEN = "Expired AccessToken. Attempting to refresh it."
MSG_PATH_ERR = "Cesta neexistuje!"
MSG_INPUT_ERR = "Chyba vstupu!"
MODEL_ALBUM_PROPERTY = "ALBUM-PROPERTY"
MODEL_TRACK_PROPERTY = "TRACK-PROPERTY"
MODEL_VIDEO_PROPERTY = "VIDEO-PROPERTY"
MODEL_ARTIST_PROPERTY = "ARTIST-PROPERTY"
MODEL_PLAYLIST_PROPERTY = "PLAYLIST-PROPERTY"
MODEL_TITLE = 'Název skladby'
MODEL_TRACK_NUMBER = 'Číslo skladby'
MODEL_VIDEO_NUMBER = 'Číslo videa'
MODEL_RELEASE_DATE = 'Datum vydání'
MODEL_VERSION = 'Verze'
MODEL_EXPLICIT = 'Explicit'
MODEL_ALBUM = 'Album'
MODEL_ID = 'ID'
MODEL_NAME = 'Jméno'
MODEL_TYPE = 'Typ'
| 42.95098 | 96 | 0.683862 |
4a20c0e4e755b53673a835ed797ebed63bcc898f | 10,400 | py | Python | jsonpickle/ext/numpy.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | jsonpickle/ext/numpy.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | jsonpickle/ext/numpy.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import sys
import zlib
import warnings
import numpy as np
import ast
from ..handlers import BaseHandler, register, unregister
from ..compat import unicode
from ..util import b64decode, b64encode
__all__ = ['register_handlers', 'unregister_handlers']
native_byteorder = '<' if sys.byteorder == 'little' else '>'
def get_byteorder(arr):
"""translate equals sign to native order"""
byteorder = arr.dtype.byteorder
return native_byteorder if byteorder == '=' else byteorder
class NumpyBaseHandler(BaseHandler):
def flatten_dtype(self, dtype, data):
if hasattr(dtype, 'tostring'):
data['dtype'] = dtype.tostring()
else:
dtype = unicode(dtype)
prefix = '(numpy.record, '
if dtype.startswith(prefix):
dtype = dtype[len(prefix):-1]
data['dtype'] = dtype
def restore_dtype(self, data):
dtype = data['dtype']
if dtype.startswith(('{', '[')):
dtype = ast.literal_eval(dtype)
return np.dtype(dtype)
class NumpyDTypeHandler(NumpyBaseHandler):
def flatten(self, obj, data):
self.flatten_dtype(obj, data)
return data
def restore(self, data):
return self.restore_dtype(data)
class NumpyGenericHandler(NumpyBaseHandler):
def flatten(self, obj, data):
self.flatten_dtype(obj.dtype.newbyteorder('N'), data)
data['value'] = self.context.flatten(obj.tolist(), reset=False)
return data
def restore(self, data):
value = self.context.restore(data['value'], reset=False)
return self.restore_dtype(data).type(value)
class NumpyNDArrayHandler(NumpyBaseHandler):
"""Stores arrays as text representation, without regard for views
"""
def flatten_flags(self, obj, data):
if obj.flags.writeable is False:
data['writeable'] = False
def restore_flags(self, data, arr):
if not data.get('writeable', True):
arr.flags.writeable = False
def flatten(self, obj, data):
self.flatten_dtype(obj.dtype.newbyteorder('N'), data)
self.flatten_flags(obj, data)
data['values'] = self.context.flatten(obj.tolist(), reset=False)
if 0 in obj.shape:
# add shape information explicitly as it cannot be inferred from an empty list
data['shape'] = obj.shape
return data
def restore(self, data):
values = self.context.restore(data['values'], reset=False)
arr = np.array(
values,
dtype=self.restore_dtype(data),
order=data.get('order', 'C')
)
shape = data.get('shape', None)
if shape is not None:
arr = arr.reshape(shape)
self.restore_flags(data, arr)
return arr
class NumpyNDArrayHandlerBinary(NumpyNDArrayHandler):
"""stores arrays with size greater than 'size_treshold' as (optionally) compressed base64
Notes
-----
This would be easier to implement using np.save/np.load, but that would be less language-agnostic
"""
def __init__(self, size_treshold=16, compression=zlib):
"""
:param size_treshold: nonnegative int or None
valid values for 'size_treshold' are all nonnegative integers and None
if size_treshold is None, values are always stored as nested lists
:param compression: a compression module or None
valid values for 'compression' are {zlib, bz2, None}
if compresion is None, no compression is applied
"""
self.size_treshold = size_treshold
self.compression = compression
def flatten_byteorder(self, obj, data):
byteorder = obj.dtype.byteorder
if byteorder != '|':
data['byteorder'] = get_byteorder(obj)
def restore_byteorder(self, data, arr):
byteorder = data.get('byteorder', None)
if byteorder:
arr.dtype = arr.dtype.newbyteorder(byteorder)
def flatten(self, obj, data):
"""encode numpy to json"""
if self.size_treshold >= obj.size or self.size_treshold is None:
# encode as text
data = super(NumpyNDArrayHandlerBinary, self).flatten(obj, data)
else:
# encode as binary
buffer = obj.tobytes(order='a') # numpy docstring is lacking as of 1.11.2, but this is the option we need
if self.compression:
buffer = self.compression.compress(buffer)
data['values'] = b64encode(buffer)
data['shape'] = obj.shape
self.flatten_dtype(obj.dtype.newbyteorder('N'), data)
self.flatten_byteorder(obj, data)
self.flatten_flags(obj, data)
if not obj.flags.c_contiguous:
data['order'] = 'F'
return data
def restore(self, data):
"""decode numpy from json"""
values = data['values']
if isinstance(values, list):
# decode text representation
arr = super(NumpyNDArrayHandlerBinary, self).restore(data)
else:
# decode binary representation
buffer = b64decode(values)
if self.compression:
buffer = self.compression.decompress(buffer)
arr = np.ndarray(
buffer=buffer,
dtype=self.restore_dtype(data),
shape=data.get('shape'),
order=data.get('order', 'C')
).copy() # make a copy, to force the result to own the data
self.restore_byteorder(data, arr)
self.restore_flags(data, arr)
return arr
class NumpyNDArrayHandlerView(NumpyNDArrayHandlerBinary):
"""Pickles references inside ndarrays, or array-views
Notes
-----
The current implementation has some restrictions.
'base' arrays, or arrays which are viewed by other arrays, must be f-or-c-contiguous.
This is not such a large restriction in practice, because all numpy array creation is c-contiguous by default.
Relaxing this restriction would be nice though; especially if it can be done without bloating the design too much.
Furthermore, ndarrays which are views of array-like objects implementing __array_interface__,
but which are not themselves nd-arrays, are deepcopied with a warning (by default),
as we cannot guarantee whatever custom logic such classes implement is correctly reproduced.
"""
def __init__(self, mode='warn', size_treshold=16, compression=zlib):
"""
:param mode: {'warn', 'raise', 'ignore'}
How to react when encountering array-like objects whos references we cannot safely serialize
:param size_treshold: nonnegative int or None
valid values for 'size_treshold' are all nonnegative integers and None
if size_treshold is None, values are always stored as nested lists
:param compression: a compression module or None
valid values for 'compression' are {zlib, bz2, None}
if compresion is None, no compression is applied
"""
super(NumpyNDArrayHandlerView, self).__init__(size_treshold, compression)
self.mode = mode
def flatten(self, obj, data):
"""encode numpy to json"""
base = obj.base
if base is None and obj.flags.forc:
# store by value
data = super(NumpyNDArrayHandlerView, self).flatten(obj, data)
# ensure that views on arrays stored as text are interpreted correctly
if not obj.flags.c_contiguous:
data['order'] = 'F'
elif isinstance(base, np.ndarray) and base.flags.forc:
# store by reference
data['base'] = self.context.flatten(base, reset=False)
offset = obj.ctypes.data - base.ctypes.data
if offset:
data['offset'] = offset
if not obj.flags.c_contiguous:
data['strides'] = obj.strides
data['shape'] = obj.shape
self.flatten_dtype(obj.dtype.newbyteorder('N'), data)
self.flatten_flags(obj, data)
if get_byteorder(obj) != '|':
byteorder = 'S' if get_byteorder(obj) != get_byteorder(base) else None
if byteorder:
data['byteorder'] = byteorder
if self.size_treshold >= obj.size:
# not used in restore since base is present, but include values for human-readability
super(NumpyNDArrayHandlerBinary, self).flatten(obj, data)
else:
# store a deepcopy or fail
if self.mode == 'warn':
msg = "ndarray is defined by reference to an object we do not know how to serialize. " \
"A deep copy is serialized instead, breaking memory aliasing."
warnings.warn(msg)
elif self.mode == 'raise':
msg = "ndarray is defined by reference to an object we do not know how to serialize."
raise ValueError(msg)
data = super(NumpyNDArrayHandlerView, self).flatten(obj.copy(), data)
return data
def restore(self, data):
"""decode numpy from json"""
base = data.get('base', None)
if base is None:
# decode array with owndata=True
arr = super(NumpyNDArrayHandlerView, self).restore(data)
else:
# decode array view, which references the data of another array
base = self.context.restore(base, reset=False)
assert base.flags.forc, \
"Current implementation assumes base is C or F contiguous"
arr = np.ndarray(
buffer=base.data,
dtype=self.restore_dtype(data).newbyteorder(data.get('byteorder', '|')),
shape=data.get('shape'),
offset=data.get('offset', 0),
strides=data.get('strides', None)
)
self.restore_flags(data, arr)
return arr
def register_handlers():
register(np.dtype, NumpyDTypeHandler, base=True)
register(np.generic, NumpyGenericHandler, base=True)
register(np.ndarray, NumpyNDArrayHandlerView(), base=True)
def unregister_handlers():
unregister(np.dtype)
unregister(np.generic)
unregister(np.ndarray)
| 36.491228 | 118 | 0.616154 |
4a20c199ad66c7b12449a20d0d8b3d6006aabef4 | 28,659 | py | Python | github_to_sqlite/utils.py | AlvaroJoseLopes/github-to-sqlite | 6f61f94f8b43b3021e79c6b6ef4d5f47f6e8785b | [
"Apache-2.0"
] | null | null | null | github_to_sqlite/utils.py | AlvaroJoseLopes/github-to-sqlite | 6f61f94f8b43b3021e79c6b6ef4d5f47f6e8785b | [
"Apache-2.0"
] | null | null | null | github_to_sqlite/utils.py | AlvaroJoseLopes/github-to-sqlite | 6f61f94f8b43b3021e79c6b6ef4d5f47f6e8785b | [
"Apache-2.0"
] | null | null | null | import base64
import requests
import re
import time
import yaml
FTS_CONFIG = {
# table: columns
"commits": ["message"],
"issue_comments": ["body"],
"issues": ["title", "body"],
"pull_requests": ["title", "body"],
"labels": ["name", "description"],
"licenses": ["name"],
"milestones": ["title", "description"],
"releases": ["name", "body"],
"repos": ["name", "description"],
"users": ["login", "name"],
}
VIEWS = {
# Name: (required_tables, SQL)
"dependent_repos": (
{"repos", "dependents"},
"""select
repos.full_name as repo,
'https://github.com/' || dependent_repos.full_name as dependent,
dependent_repos.created_at as dependent_created,
dependent_repos.updated_at as dependent_updated,
dependent_repos.stargazers_count as dependent_stars,
dependent_repos.watchers_count as dependent_watchers
from
dependents
join repos as dependent_repos on dependents.dependent = dependent_repos.id
join repos on dependents.repo = repos.id
order by
dependent_repos.created_at desc""",
),
"repos_starred": (
{"stars", "repos", "users"},
"""select
stars.starred_at,
starring_user.login as starred_by,
repos.*
from
repos
join stars on repos.id = stars.repo
join users as starring_user on stars.user = starring_user.id
join users on repos.owner = users.id
order by
starred_at desc""",
),
"recent_releases": (
{"repos", "releases"},
"""select
repos.rowid as rowid,
repos.html_url as repo,
releases.html_url as release,
substr(releases.published_at, 0, 11) as date,
releases.body as body_markdown,
releases.published_at,
coalesce(repos.topics, '[]') as topics
from
releases
join repos on repos.id = releases.repo
order by
releases.published_at desc""",
),
}
FOREIGN_KEYS = [
("repos", "license", "licenses", "key"),
]
class GitHubError(Exception):
def __init__(self, message, status_code):
self.message = message
self.status_code = status_code
@classmethod
def from_response(cls, response):
message = response.json()["message"]
if "git repository is empty" in message.lower():
cls = GitHubRepositoryEmpty
return cls(message, response.status_code)
class GitHubRepositoryEmpty(GitHubError):
pass
def save_issues(db, issues, repo):
if "milestones" not in db.table_names():
if "users" not in db.table_names():
# So we can define the foreign key from milestones:
db["users"].create({"id": int}, pk="id")
db["milestones"].create(
{"id": int, "title": str, "description": str, "creator": int, "repo": int},
pk="id",
foreign_keys=(("repo", "repos", "id"), ("creator", "users", "id")),
)
for original in issues:
# Ignore all of the _url fields
issue = {
key: value for key, value in original.items() if not key.endswith("url")
}
# Add repo key
issue["repo"] = repo["id"]
# Pull request can be flattened to just their URL
if issue.get("pull_request"):
issue["pull_request"] = issue["pull_request"]["url"].split(
"https://api.github.com/repos/"
)[1]
# Extract user
issue["user"] = save_user(db, issue["user"])
labels = issue.pop("labels")
# Extract milestone
if issue["milestone"]:
issue["milestone"] = save_milestone(db, issue["milestone"], repo["id"])
# For the moment we ignore the assignees=[] array but we DO turn assignee
# singular into a foreign key reference
issue.pop("assignees", None)
if issue["assignee"]:
issue["assignee"] = save_user(db, issue["assignee"])
# Add a type field to distinguish issues from pulls
issue["type"] = "pull" if issue.get("pull_request") else "issue"
# Insert record
table = db["issues"].insert(
issue,
pk="id",
foreign_keys=[
("user", "users", "id"),
("assignee", "users", "id"),
("milestone", "milestones", "id"),
("repo", "repos", "id"),
],
alter=True,
replace=True,
columns={
"user": int,
"assignee": int,
"milestone": int,
"repo": int,
"title": str,
"body": str,
},
)
# m2m for labels
for label in labels:
table.m2m("labels", label, pk="id")
def save_pull_requests(db, pull_requests, repo):
if "milestones" not in db.table_names():
if "users" not in db.table_names():
# So we can define the foreign key from milestones:
db["users"].create({"id": int}, pk="id")
db["milestones"].create(
{"id": int, "title": str, "description": str, "creator": int, "repo": int},
pk="id",
foreign_keys=(("repo", "repos", "id"), ("creator", "users", "id")),
)
for original in pull_requests:
# Ignore all of the _url fields
pull_request = {
key: value for key, value in original.items() if not key.endswith("url")
}
# Add repo key
pull_request["repo"] = repo["id"]
# Pull request _links can be flattened to just their URL
pull_request["url"] = pull_request["_links"]["html"]["href"]
pull_request.pop("_links")
# Extract user
pull_request["user"] = save_user(db, pull_request["user"])
labels = pull_request.pop("labels")
# Extract merged_by, if it exists
if pull_request.get("merged_by"):
pull_request["merged_by"] = save_user(db, pull_request["merged_by"])
# Head sha
pull_request["head"] = pull_request["head"]["sha"]
pull_request["base"] = pull_request["base"]["sha"]
# Extract milestone
if pull_request["milestone"]:
pull_request["milestone"] = save_milestone(
db, pull_request["milestone"], repo["id"]
)
# For the moment we ignore the assignees=[] array but we DO turn assignee
# singular into a foreign key reference
pull_request.pop("assignees", None)
if original["assignee"]:
pull_request["assignee"] = save_user(db, pull_request["assignee"])
pull_request.pop("active_lock_reason")
# ignore requested_reviewers and requested_teams
pull_request.pop("requested_reviewers", None)
pull_request.pop("requested_teams", None)
# Insert record
table = db["pull_requests"].insert(
pull_request,
pk="id",
foreign_keys=[
("user", "users", "id"),
("merged_by", "users", "id"),
("assignee", "users", "id"),
("milestone", "milestones", "id"),
("repo", "repos", "id"),
],
alter=True,
replace=True,
columns={
"user": int,
"assignee": int,
"milestone": int,
"repo": int,
"title": str,
"body": str,
"merged_by": int,
},
)
# m2m for labels
for label in labels:
table.m2m("labels", label, pk="id")
def save_user(db, user):
# Remove all url fields except avatar_url and html_url
to_save = {
key: value
for key, value in user.items()
if (key in ("avatar_url", "html_url") or not key.endswith("url"))
}
# If this user was nested in repo they will be missing several fields
# so fill in 'name' from 'login' so Datasette foreign keys display
if to_save.get("name") is None:
to_save["name"] = to_save["login"]
return db["users"].upsert(to_save, pk="id", alter=True).last_pk
def save_milestone(db, milestone, repo_id):
milestone = dict(milestone)
milestone["creator"] = save_user(db, milestone["creator"])
milestone["repo"] = repo_id
milestone.pop("labels_url", None)
milestone.pop("url", None)
return (
db["milestones"]
.insert(
milestone,
pk="id",
foreign_keys=[("creator", "users", "id"), ("repo", "repos", "id")],
alter=True,
replace=True,
columns={"creator": int, "repo": int},
)
.last_pk
)
def save_issue_comment(db, comment):
comment = dict(comment)
comment["user"] = save_user(db, comment["user"])
# We set up a 'issue' foreign key, but only if issue is in the DB
comment["issue"] = None
issue_url = comment["issue_url"]
bits = issue_url.split("/")
user_slug, repo_slug, issue_number = bits[-4], bits[-3], bits[-1]
# Is the issue in the DB already?
issue_rows = list(
db["issues"].rows_where(
"number = :number and repo = (select id from repos where full_name = :repo)",
{"repo": "{}/{}".format(user_slug, repo_slug), "number": issue_number},
)
)
if len(issue_rows) == 1:
comment["issue"] = issue_rows[0]["id"]
comment.pop("url", None)
if "url" in comment.get("reactions", {}):
comment["reactions"].pop("url")
last_pk = (
db["issue_comments"]
.insert(
comment, pk="id", foreign_keys=("user", "issue"), alter=True, replace=True
)
.last_pk
)
return last_pk
def fetch_repo(full_name, token=None):
headers = make_headers(token)
# Get topics:
headers["Accept"] = "application/vnd.github.mercy-preview+json"
print(full_name)
owner, slug = full_name.split("/")
url = "https://api.github.com/repos/{}/{}".format(owner, slug)
response = requests.get(url, headers=headers)
response.raise_for_status()
return response.json()
def save_repo(db, repo):
assert isinstance(repo, dict), "Repo should be a dict: {}".format(repr(repo))
# Remove all url fields except html_url
to_save = {
key: value
for key, value in repo.items()
if (key == "html_url") or not key.endswith("url")
}
to_save["owner"] = save_user(db, to_save["owner"])
to_save["license"] = save_license(db, to_save["license"])
if "organization" in to_save:
to_save["organization"] = save_user(db, to_save["organization"])
else:
to_save["organization"] = None
repo_id = (
db["repos"]
.insert(
to_save,
pk="id",
foreign_keys=(("owner", "users", "id"), ("organization", "users", "id")),
alter=True,
replace=True,
columns={
"organization": int,
"topics": str,
"name": str,
"description": str,
},
)
.last_pk
)
return repo_id
def save_license(db, license):
if license is None:
return None
return db["licenses"].insert(license, pk="key", replace=True).last_pk
def fetch_issues(repo, token=None, issue_ids=None):
headers = make_headers(token)
headers["accept"] = "application/vnd.github.v3+json"
if issue_ids:
for issue_id in issue_ids:
url = "https://api.github.com/repos/{}/issues/{}".format(repo, issue_id)
response = requests.get(url, headers=headers)
response.raise_for_status()
yield response.json()
else:
url = "https://api.github.com/repos/{}/issues?state=all&filter=all".format(repo)
for issues in paginate(url, headers):
yield from issues
def fetch_pull_requests(repo, token=None, pull_request_ids=None):
headers = make_headers(token)
headers["accept"] = "application/vnd.github.v3+json"
if pull_request_ids:
for pull_request_id in pull_request_ids:
url = "https://api.github.com/repos/{}/pulls/{}".format(
repo, pull_request_id
)
response = requests.get(url, headers=headers)
response.raise_for_status()
yield response.json()
else:
url = "https://api.github.com/repos/{}/pulls?state=all&filter=all".format(repo)
for pull_requests in paginate(url, headers):
yield from pull_requests
def fetch_issue_comments(repo, token=None, issue=None):
assert "/" in repo
headers = make_headers(token)
# Get reactions:
headers["Accept"] = "application/vnd.github.squirrel-girl-preview"
path = "/repos/{}/issues/comments".format(repo)
if issue is not None:
path = "/repos/{}/issues/{}/comments".format(repo, issue)
url = "https://api.github.com{}".format(path)
for comments in paginate(url, headers):
yield from comments
def fetch_releases(repo, token=None):
headers = make_headers(token)
url = "https://api.github.com/repos/{}/releases".format(repo)
for releases in paginate(url, headers):
yield from releases
def fetch_contributors(repo, token=None):
headers = make_headers(token)
url = "https://api.github.com/repos/{}/contributors".format(repo)
for contributors in paginate(url, headers):
yield from contributors
def fetch_tags(repo, token=None):
headers = make_headers(token)
url = "https://api.github.com/repos/{}/tags".format(repo)
for tags in paginate(url, headers):
yield from tags
def fetch_commits(repo, token=None, stop_when=None):
if stop_when is None:
stop_when = lambda commit: False
headers = make_headers(token)
url = "https://api.github.com/repos/{}/commits".format(repo)
try:
for commits in paginate(url, headers):
for commit in commits:
if stop_when(commit):
return
else:
yield commit
except GitHubRepositoryEmpty:
return
def fetch_all_starred(username=None, token=None):
assert username or token, "Must provide username= or token= or both"
headers = make_headers(token)
headers["Accept"] = "application/vnd.github.v3.star+json"
if username:
url = "https://api.github.com/users/{}/starred".format(username)
else:
url = "https://api.github.com/user/starred"
for stars in paginate(url, headers):
yield from stars
def fetch_stargazers(repo, token=None):
headers = make_headers(token)
headers["Accept"] = "application/vnd.github.v3.star+json"
url = "https://api.github.com/repos/{}/stargazers".format(repo)
for stargazers in paginate(url, headers):
yield from stargazers
def fetch_all_repos(username=None, token=None):
assert username or token, "Must provide username= or token= or both"
headers = make_headers(token)
# Get topics for each repo:
headers["Accept"] = "application/vnd.github.mercy-preview+json"
if username:
url = "https://api.github.com/users/{}/repos".format(username)
else:
url = "https://api.github.com/user/repos"
for repos in paginate(url, headers):
yield from repos
def fetch_user(username=None, token=None):
assert username or token, "Must provide username= or token= or both"
headers = make_headers(token)
if username:
url = "https://api.github.com/users/{}".format(username)
else:
url = "https://api.github.com/user"
return requests.get(url, headers=headers).json()
def paginate(url, headers=None):
while url:
response = requests.get(url, headers=headers)
# For HTTP 204 no-content this yields an empty list
if response.status_code == 204:
return
data = response.json()
if isinstance(data, dict) and data.get("message"):
raise GitHubError.from_response(response)
try:
url = response.links.get("next").get("url")
except AttributeError:
url = None
yield data
def make_headers(token=None):
headers = {}
if token is not None:
headers["Authorization"] = "token {}".format(token)
return headers
def save_stars(db, user, stars):
user_id = save_user(db, user)
for star in stars:
starred_at = star["starred_at"]
repo = star["repo"]
repo_id = save_repo(db, repo)
db["stars"].insert(
{"user": user_id, "repo": repo_id, "starred_at": starred_at},
pk=("user", "repo"),
foreign_keys=("user", "repo"),
replace=True,
)
def save_stargazers(db, repo_id, stargazers):
for stargazer in stargazers:
starred_at = stargazer["starred_at"]
user_id = save_user(db, stargazer["user"])
db["stars"].upsert(
{"user": user_id, "repo": repo_id, "starred_at": starred_at},
pk=("user", "repo"),
foreign_keys=("user", "repo"),
)
def save_releases(db, releases, repo_id=None):
foreign_keys = [("author", "users", "id")]
if repo_id:
foreign_keys.append(("repo", "repos", "id"))
for original in releases:
# Ignore all of the _url fields except html_url
release = {
key: value
for key, value in original.items()
if key == "html_url" or not key.endswith("url")
}
assets = release.pop("assets") or []
release["repo"] = repo_id
release["author"] = save_user(db, release["author"])
release_id = (
db["releases"]
.insert(
release, pk="id", foreign_keys=foreign_keys, alter=True, replace=True
)
.last_pk
)
# Handle assets
for asset in assets:
asset["uploader"] = save_user(db, asset["uploader"])
asset["release"] = release_id
db["assets"].upsert_all(
assets,
pk="id",
foreign_keys=[
("uploader", "users", "id"),
("release", "releases", "id"),
],
alter=True,
)
def save_contributors(db, contributors, repo_id):
contributor_rows_to_add = []
for contributor in contributors:
contributions = contributor.pop("contributions")
user_id = save_user(db, contributor)
contributor_rows_to_add.append(
{"repo_id": repo_id, "user_id": user_id, "contributions": contributions}
)
db["contributors"].insert_all(
contributor_rows_to_add,
pk=("repo_id", "user_id"),
foreign_keys=[("repo_id", "repos", "id"), ("user_id", "users", "id")],
replace=True,
)
def save_tags(db, tags, repo_id):
if not db["tags"].exists():
db["tags"].create(
{
"repo": int,
"name": str,
"sha": str,
},
pk=("repo", "name"),
foreign_keys=[("repo", "repos", "id")],
)
db["tags"].insert_all(
(
{
"repo": repo_id,
"name": tag["name"],
"sha": tag["commit"]["sha"],
}
for tag in tags
),
replace=True,
)
def save_commits(db, commits, repo_id=None):
foreign_keys = [
("author", "users", "id"),
("committer", "users", "id"),
("raw_author", "raw_authors", "id"),
("raw_committer", "raw_authors", "id"),
("repo", "repos", "id"),
]
if not db["raw_authors"].exists():
db["raw_authors"].create(
{
"id": str,
"name": str,
"email": str,
},
pk="id",
)
if not db["commits"].exists():
# We explicitly create the table because otherwise we may create it
# with incorrect column types, since author/committer can be null
db["commits"].create(
{
"sha": str,
"message": str,
"author_date": str,
"committer_date": str,
"raw_author": str,
"raw_committer": str,
"repo": int,
"author": int,
"committer": int,
},
pk="sha",
foreign_keys=foreign_keys,
)
for commit in commits:
commit_to_insert = {
"sha": commit["sha"],
"message": commit["commit"]["message"],
"author_date": commit["commit"]["author"]["date"],
"committer_date": commit["commit"]["committer"]["date"],
"raw_author": save_commit_author(db, commit["commit"]["author"]),
"raw_committer": save_commit_author(db, commit["commit"]["committer"]),
}
commit_to_insert["repo"] = repo_id
commit_to_insert["author"] = (
save_user(db, commit["author"]) if commit["author"] else None
)
commit_to_insert["committer"] = (
save_user(db, commit["committer"]) if commit["committer"] else None
)
db["commits"].insert(
commit_to_insert,
alter=True,
replace=True,
)
def save_commit_author(db, raw_author):
name = raw_author.get("name")
email = raw_author.get("email")
return (
db["raw_authors"]
.insert(
{
"name": name,
"email": email,
},
hash_id="id",
replace=True,
)
.last_pk
)
def ensure_foreign_keys(db):
for expected_foreign_key in FOREIGN_KEYS:
table, column, table2, column2 = expected_foreign_key
if (
expected_foreign_key not in db[table].foreign_keys
and
# Ensure all tables and columns exist
db[table].exists()
and db[table2].exists()
and column in db[table].columns_dict
and column2 in db[table2].columns_dict
):
db[table].add_foreign_key(column, table2, column2)
def ensure_db_shape(db):
"Ensure FTS is configured and expected FKS, views and (soon) indexes are present"
# Foreign keys:
ensure_foreign_keys(db)
db.index_foreign_keys()
# FTS:
existing_tables = set(db.table_names())
for table, columns in FTS_CONFIG.items():
if "{}_fts".format(table) in existing_tables:
continue
if table not in existing_tables:
continue
db[table].enable_fts(columns, create_triggers=True)
# Views:
existing_views = set(db.view_names())
existing_tables = set(db.table_names())
for view, (tables, sql) in VIEWS.items():
# Do all of the tables exist?
if not tables.issubset(existing_tables):
continue
db.create_view(view, sql, replace=True)
def scrape_dependents(repo, verbose=False):
# Optional dependency:
from bs4 import BeautifulSoup
url = "https://github.com/{}/network/dependents".format(repo)
while url:
if verbose:
print(url)
response = requests.get(url)
soup = BeautifulSoup(response.content, "html.parser")
repos = [
a["href"].lstrip("/")
for a in soup.select("a[data-hovercard-type=repository]")
]
if verbose:
print(repos)
yield from repos
# next page?
try:
next_link = soup.select(".paginate-container")[0].find("a", text="Next")
except IndexError:
break
if next_link is not None:
url = next_link["href"]
time.sleep(1)
else:
url = None
def fetch_emojis(token=None):
headers = make_headers(token)
response = requests.get("https://api.github.com/emojis", headers=headers)
response.raise_for_status()
return [{"name": key, "url": value} for key, value in response.json().items()]
def fetch_image(url):
return requests.get(url).content
def get(url, token=None, accept=None):
headers = make_headers(token)
if accept:
headers["accept"] = accept
if url.startswith("/"):
url = "https://api.github.com{}".format(url)
response = requests.get(url, headers=headers)
response.raise_for_status()
return response
def fetch_readme(token, full_name, html=False):
headers = make_headers(token)
if html:
headers["accept"] = "application/vnd.github.VERSION.html"
url = "https://api.github.com/repos/{}/readme".format(full_name)
response = requests.get(url, headers=headers)
if response.status_code != 200:
return None
if html:
return rewrite_readme_html(response.text)
else:
return base64.b64decode(response.json()["content"]).decode("utf-8")
_href_re = re.compile(r'\shref="#([^"]+)"')
_id_re = re.compile(r'\sid="([^"]+)"')
def rewrite_readme_html(html):
# href="#filtering-tables" => href="#user-content-filtering-tables"
hrefs = set(_href_re.findall(html))
ids = _id_re.findall(html)
for href in hrefs:
if "user-content-{}".format(href) not in ids:
continue
if href.startswith("user-content-"):
continue
# This href should be rewritten to user-content
html = html.replace(
' href="#{}"'.format(href), ' href="#user-content-{}"'.format(href)
)
return html
def fetch_workflows(token, full_name):
headers = make_headers(token)
url = "https://api.github.com/repos/{}/contents/.github/workflows".format(full_name)
response = requests.get(url, headers=headers)
if response.status_code == 404:
return {}
workflows = {}
for item in response.json():
name = item["name"]
content = requests.get(item["download_url"]).text
workflows[name] = content
return workflows
def save_workflow(db, repo_id, filename, content):
workflow = yaml.safe_load(content)
jobs = workflow.pop("jobs", None) or {}
# If there's a `True` key it was probably meant to be "on" - grr YAML
if True in workflow:
workflow["on"] = workflow.pop(True)
# Replace workflow if one exists already
existing = list(
db["workflows"].rows_where("repo = ? and filename = ?", [repo_id, filename])
)
if existing:
# Delete jobs, steps and this record
existing_id = existing[0]["id"]
db["steps"].delete_where(
"job in (select id from jobs where workflow = ?)", [existing_id]
)
db["jobs"].delete_where("workflow = ?", [existing_id])
db["workflows"].delete_where("id = ?", [existing_id])
workflow_id = (
db["workflows"]
.insert(
{
**workflow,
**{
"repo": repo_id,
"filename": filename,
"name": workflow.get("name", filename),
},
},
pk="id",
column_order=["id", "filename", "name"],
alter=True,
foreign_keys=["repo"],
)
.last_pk
)
db["workflows"].create_index(["repo", "filename"], unique=True, if_not_exists=True)
for job_name, job_details in jobs.items():
steps = job_details.pop("steps", None) or []
job_id = (
db["jobs"]
.insert(
{
**{
"workflow": workflow_id,
"name": job_name,
"repo": repo_id,
},
**job_details,
},
pk="id",
alter=True,
foreign_keys=["workflow", "repo"],
)
.last_pk
)
db["steps"].insert_all(
[
{
**{
"seq": i + 1,
"job": job_id,
"repo": repo_id,
},
**step,
}
for i, step in enumerate(steps)
],
alter=True,
pk="id",
foreign_keys=["job", "repo"],
)
def getRepoID(repo_name, db, token):
existing = list(db["repos"].rows_where("full_name = ?", [repo_name]))
dependencie_id = None
if not existing:
dependencie_full = fetch_repo(repo_name, token)
time.sleep(1)
save_repo(db, dependencie_full)
dependencie_id = dependencie_full["id"]
else:
dependencie_id = existing[0]["id"]
return dependencie_id
| 32.021229 | 89 | 0.559545 |
4a20c455b9e3eab990d31739667b7094d254ce97 | 9,207 | py | Python | d2lbook/library.py | sxjscience/d2l-book | 557dfb40424f21e2c00a61f808275d3bc2b855ab | [
"Apache-2.0"
] | null | null | null | d2lbook/library.py | sxjscience/d2l-book | 557dfb40424f21e2c00a61f808275d3bc2b855ab | [
"Apache-2.0"
] | null | null | null | d2lbook/library.py | sxjscience/d2l-book | 557dfb40424f21e2c00a61f808275d3bc2b855ab | [
"Apache-2.0"
] | null | null | null | """Save codes into library"""
from typing import List
from d2lbook import notebook
import logging
import os
import copy
import re
import pathlib
import ast
import astor
from yapf.yapflib.yapf_api import FormatCode
import isort
def _write_header(f):
f.write('# This file is generated automatically through:\n')
f.write('# d2lbook build lib\n')
f.write('# Don\'t edit it directly\n\n')
def save_file(root_dir: str, nbfile: str):
nbfile = pathlib.Path(nbfile)
pyfile = root_dir / nbfile.with_suffix('.py')
with nbfile.open('r') as f:
nb = notebook.read_markdown(f.read())
saved = []
save_all = False
for cell in nb.cells:
if cell.cell_type == 'code':
src = cell.source.lstrip()
if re.search('# *@save_all', src):
save_all = True
if save_all or re.search('# *@save_cell', src):
saved.append(src)
else:
blk = _save_block(src, '@save')
if blk:
saved.append(blk)
if saved:
with pyfile.open('w') as f:
f.write(
f'# This file is generated from {str(nbfile)} automatically through:\n'
)
f.write('# d2lbook build lib\n')
f.write('# Don\'t edit it directly\n\n')
for blk in saved:
f.write(blk + '\n\n')
logging.info(f'Found {len(saved)} blocks in {str(nbfile)}')
def save_mark(notebooks: List[str], lib_fname: str, save_mark: str):
logging.info('Matching with the pattern: "%s"', save_mark)
with open(lib_fname, 'w') as f:
_write_header(f)
lib_name = os.path.dirname(lib_fname)
lib_name = lib_name.split('/')[-1]
f.write('import sys\n' + lib_name + ' = sys.modules[__name__]\n\n')
for nb in notebooks:
_save_code(nb, f, save_mark=save_mark)
logging.info('Saved into %s', lib_fname)
def save_tab(notebooks: List[str], lib_fname: str, tab: str, default_tab: str):
logging.info(
f'Matching with the pattern: "#@save", seaching for tab {tab}')
with open(lib_fname, 'w') as f:
_write_header(f)
for nb in notebooks:
_save_code(nb, f, tab=tab, default_tab=default_tab)
logging.info('Saved into %s', lib_fname)
def save_version(version: str, version_fn: str):
if version and version_fn:
with open(version_fn, 'r', encoding='UTF-8') as f:
lines = f.read().split('\n')
for i, l in enumerate(lines):
if '__version__' in l:
lines[i] = f'__version__ = "{version}"'
logging.info(f'save {lines[i]} into {version_fn}')
with open(version_fn, 'w') as f:
f.write('\n'.join(lines))
def _save_block(source: str, save_mark: str):
if not save_mark: return ''
lines = source.splitlines()
block = []
for i, l in enumerate(lines):
m = re.search(f'# *{save_mark}', l)
if m:
l = l[:m.span()[0]].rstrip()
if l: block.append(l)
for j in range(i + 1, len(lines)):
l = lines[j]
if not l.startswith(' ') and len(l):
block.append(lines[j])
else:
for k in range(j, len(lines)):
if lines[k].startswith(' ') or not len(lines[k]):
block.append(lines[k])
else:
break
break
return format_code('\n'.join(block))
def _save_code(input_fn, output_fp, save_mark='@save', tab=None,
default_tab=None):
"""get the code blocks (import, class, def) that will be saved"""
with open(input_fn, 'r', encoding='UTF-8') as f:
nb = notebook.read_markdown(f.read())
if tab:
nb = notebook.get_tab_notebook(nb, tab, default_tab)
if not nb:
return
saved = []
for cell in nb.cells:
if cell.cell_type == 'code':
block = _save_block(cell.source, save_mark)
if block: saved.append(block)
if saved:
logging.info('Found %d blocks in %s', len(saved), input_fn)
for block in saved:
code = '# Defined in file: %s\n%s\n\n\n' % (input_fn, block)
output_fp.write(code)
def _parse_mapping_config(config: str):
"""Parse config such as: numpy -> asnumpy, reshape, ...
Return a list of string pairs
"""
mapping = []
for line in config.splitlines():
for term in line.split(','):
term = term.strip()
if not term:
continue
if len(term.split('->')) == 2:
a, b = term.split('->')
mapping.append((a.strip(), b.strip()))
else:
mapping.append((term, term))
return mapping
def save_alias(tab_lib):
"""Save alias into the library file"""
alias = ''
if 'alias' in tab_lib:
alias += tab_lib['alias'].strip() + '\n'
if 'lib_name' in tab_lib:
lib_name = tab_lib["lib_name"]
if 'simple_alias' in tab_lib:
mapping = _parse_mapping_config(tab_lib['simple_alias'])
alias += '\n' + '\n'.join([
f'{a} = {lib_name}.{b}' for a, b in mapping])
if 'fluent_alias' in tab_lib:
mapping = _parse_mapping_config(tab_lib['fluent_alias'])
alias += '\n' + '\n'.join([
f'{a} = lambda x, *args, **kwargs: x.{b}(*args, **kwargs)'
for a, b in mapping])
if alias:
lib_file = tab_lib['lib_file']
with open(lib_file, 'a') as f:
logging.info(
f'Wrote {len(alias.splitlines())} alias into {lib_file}')
f.write('# Alias defined in config.ini\n')
f.write(alias + '\n\n')
def replace_fluent_alias(source, fluent_mapping):
fluent_mapping = {a: b for a, b in fluent_mapping}
new_src = source
for _ in range(100): # 100 is a (random) big enough number
replaced = False
tree = ast.parse(new_src)
for node in ast.walk(tree):
if (isinstance(node, ast.Call) and
isinstance(node.func, ast.Attribute) and
isinstance(node.func.value, ast.Name) and
node.func.value.id == 'd2l' and
node.func.attr in fluent_mapping):
new_node = ast.Call(
ast.Attribute(value=node.args[0],
attr=fluent_mapping[node.func.attr]),
node.args[1:], node.keywords)
new_src = new_src.replace(
ast.get_source_segment(new_src, node),
astor.to_source(new_node).rstrip())
replaced = True
break
if not replaced:
break
return new_src
def replace_alias(nb, tab_lib):
nb = copy.deepcopy(nb)
patterns = []
fluent_mapping = []
if 'reverse_alias' in tab_lib:
patterns += _parse_mapping_config(tab_lib['reverse_alias'])
if 'lib_name' in tab_lib:
lib_name = tab_lib["lib_name"]
if 'simple_alias' in tab_lib:
mapping = _parse_mapping_config(tab_lib['simple_alias'])
patterns += [(f'd2l.{a}', f'{lib_name}.{b}') for a, b in mapping]
if 'fluent_alias' in tab_lib:
fluent_mapping = _parse_mapping_config(tab_lib['fluent_alias'])
for cell in nb.cells:
if cell.cell_type == 'code':
for p, r in patterns:
cell.source = re.sub(p, r, cell.source)
if fluent_mapping:
for a, _ in fluent_mapping:
if 'd2l.' + a in cell.source:
cell.source = replace_fluent_alias(
cell.source, fluent_mapping)
break
return nb
def format_code(source: str):
if 'import ' in source:
config = isort.settings.Config(no_lines_before=[
isort.settings.FUTURE, isort.settings.STDLIB, isort.settings.
THIRDPARTY, isort.settings.FIRSTPARTY, isort.settings.LOCALFOLDER])
source = isort.code(source, config=config)
# fix the bug that yapf cannot handle jupyter magic
for l in source.splitlines():
if l.startswith('%') or l.startswith('!'):
return source
# fix the bug that yapf remove the tailling ;
has_tailling_semicolon = source.rstrip().endswith(';')
style = {
'DISABLE_ENDING_COMMA_HEURISTIC': True,
'SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET': False,
'SPLIT_BEFORE_CLOSING_BRACKET': False,
'SPLIT_BEFORE_DICT_SET_GENERATOR': False,
'SPLIT_BEFORE_LOGICAL_OPERATOR': False,
'SPLIT_BEFORE_NAMED_ASSIGNS': False,
'COLUMN_LIMIT': 78,
'BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION': 1,}
source = FormatCode(source, style_config=style)[0].strip()
if has_tailling_semicolon: source += ';'
return source
def format_code_nb(nb):
for cell in nb.cells:
if cell.cell_type == 'code':
cell.source = format_code(cell.source)
return nb
| 36.828 | 87 | 0.557076 |
4a20c53430bf3e8e0271e436c916b592bf4225a2 | 5,994 | py | Python | uge/api/impl/qrstat_executor.py | guiguem/config-api | 40bff0ed498c571910f76ef08fb5a8b35a686e78 | [
"Apache-2.0"
] | null | null | null | uge/api/impl/qrstat_executor.py | guiguem/config-api | 40bff0ed498c571910f76ef08fb5a8b35a686e78 | [
"Apache-2.0"
] | null | null | null | uge/api/impl/qrstat_executor.py | guiguem/config-api | 40bff0ed498c571910f76ef08fb5a8b35a686e78 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
#___INFO__MARK_BEGIN__
##########################################################################
# Copyright 2016,2017 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###########################################################################
#___INFO__MARK_END__
#
import re
import os
import tempfile
import xmltodict
import json
from uge.utility.uge_subprocess import UgeSubprocess
from uge.log.log_manager import LogManager
from uge.exceptions.ar_exception import AdvanceReservationException
from uge.exceptions.command_failed import CommandFailed
from uge.exceptions.qmaster_unreachable import QmasterUnreachable
from uge.exceptions.authorization_error import AuthorizationError
from uge.exceptions.object_not_found import ObjectNotFound
from uge.exceptions.object_already_exists import ObjectAlreadyExists
class QrstatExecutor(object):
QRSTAT_ERROR_REGEX_LIST = [
(re.compile('.*unable to send message to qmaster.*'),QmasterUnreachable),
(re.compile('.*must be manager.*'),AuthorizationError),
(re.compile('denied.*'),AuthorizationError),
(re.compile('.*does not exist.*'),ObjectNotFound),
(re.compile('.*no.*defined.*'),ObjectNotFound),
(re.compile('.*'),AdvanceReservationException)
]
QRSTAT_SUCCESS_REGEX_LIST = [] # for successful outcome incorrectly classified as failure
QRSTAT_FAILURE_REGEX_LIST = [] # for failure incorrectly classified as successful outcome
def __init__(self, sge_root, sge_cell, sge_qmaster_port, sge_execd_port):
self.logger = LogManager.get_instance().get_logger(self.__class__.__name__)
self.env_dict = {
'SGE_ROOT' : sge_root,
'SGE_CELL' : sge_cell,
'SGE_QMASTER_PORT' : str(sge_qmaster_port),
'SGE_EXECD_PORT' : str(sge_execd_port),
'SGE_SINGLE_LINE' : '1',
}
self.uge_version = None
self.__configure()
def __configure(self):
self.logger.trace('Retrieving UGE version')
uge_version = self.get_uge_version()
self.logger.debug('UGE version: %s' % uge_version)
def get_uge_version(self):
if not self.uge_version:
p = self.execute_qrstat('-help')
lines = p.get_stdout().split('\n')
if not len(lines):
raise AdvanceReservationException('Cannot determine UGE version from output: %s' % p.get_stdout())
self.uge_version = lines[0].split()[-1]
return self.uge_version
def execute_qrstat(self, cmd, error_regex_list=[], error_details=None, combine_error_lines=False, success_regex_list=[], failure_regex_list=[]):
try:
command = '. %s/%s/common/settings.sh; qrstat %s' % (self.env_dict['SGE_ROOT'], self.env_dict['SGE_CELL'], cmd)
p = UgeSubprocess(command, env=self.env_dict)
p.run()
# In some cases successful outcome is actually a failure
error = p.get_stderr()
if error:
for (pattern,qrstatExClass) in failure_regex_list+QrstatExecutor.QRSTAT_FAILURE_REGEX_LIST:
if pattern.match(error):
raise qrstatExClass(error, error_details=error_details)
return p
except CommandFailed, ex:
error = str(ex)
if combine_error_lines:
error = error.replace('\n', '; ')
for (pattern,result) in success_regex_list+QrstatExecutor.QRSTAT_SUCCESS_REGEX_LIST:
if pattern.match(error):
self.logger.debug('Ignoring command failed for success pattern, replacing stdout with result: "%s"' % result)
p.stdout_ = result
return p
for (pattern,qrstatExClass) in error_regex_list+QrstatExecutor.QRSTAT_ERROR_REGEX_LIST:
if pattern.match(error):
raise qrstatExClass(error, error_details=error_details)
raise
def get_ar(self, name):
p = self.execute_qrstat('-json -ar ' + name)
lines = p.get_stdout()
aro = json.loads(lines)
return aro
def get_ar_summary(self):
p = self.execute_qrstat('-json')
lines = p.get_stdout()
aro = json.loads(lines)
return aro
def get_ar_list(self):
p = self.execute_qrstat('-json -u "*"')
lines = p.get_stdout()
aro = json.loads(lines)
arl = []
if 'ar_summary' in aro['qrstat']:
for item in aro['qrstat']['ar_summary']:
arl.append(item['id'])
return arl
#############################################################################
# Testing.
if __name__ == '__main__':
from uge.exceptions.command_failed import CommandFailed
executor = QrstatExecutor(sge_root='/Users/aalefeld/univa/trash/UGE86', sge_cell='default',sge_qmaster_port=5560, sge_execd_port=5561)
try:
print 'Version: ', executor.get_uge_version()
p = executor.execute_qrstat('-help')
print p.get_stdout()
print p.get_exit_status()
#p = executor.execute_qrstat("-xml")
p = executor.execute_qrstat("-xml -ar 311")
s = p.get_stdout();
o = xmltodict.parse(s)
print json.dumps(o)
print p.get_exit_status()
except CommandFailed, ex:
print 'Exit Status: ', ex.get_command_exit_status()
print 'Std Error : ', ex.get_command_stderr()
| 41.054795 | 148 | 0.627794 |
4a20c5bc3e19670ae70cdc980c8c2eed32e8c6ba | 1,059 | py | Python | primeFactorList.py | lshdfp726/GetFactors | bb5aa353df63542cf97dfd9da3678d9c8c3cec93 | [
"MIT"
] | null | null | null | primeFactorList.py | lshdfp726/GetFactors | bb5aa353df63542cf97dfd9da3678d9c8c3cec93 | [
"MIT"
] | null | null | null | primeFactorList.py | lshdfp726/GetFactors | bb5aa353df63542cf97dfd9da3678d9c8c3cec93 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding:UTF8 -*-
'prime factor'
import isPrime
import getFactors
primefactorArray = [] #一个数的质因数列表
def primeFactorList(number):
'get prime factor list'
if isPrime.prime(number):#如果是质数,返回1和其本身
return [1,number]
else:
array = getFactors.getfactors(number)
array.remove(1)
array.remove(number)
global primefactorArray
temArray = [x for x in array if isPrime.prime(x)]#一个数的质因数数集合
primefactorArray.extend(temArray)#添加到质数因数列表
temp = 1 #质数的乘积
for x in temArray:
temp *=x
if temp == number:#如果temp 已经等于number,那么已经是质因数成积
return primefactorArray
else:#利用number 和 temp 商在进行分解
return getProductList(number,temp)
def getProductList(number,temp):
'get product list'
global primefactorArray
x = number / temp
if isPrime.prime(x):
primefactorArray.append(x) #如果x 是质数 ,那么 number = temp * x (temp 已经是质数的成积了)
else:
primeFactorList(x)#迭代求质数
return primefactorArray
if __name__ == '__main__':
promot = 'plsease input a number: '
number = int(raw_input(promot))
print primeFactorList(number) | 23.021739 | 76 | 0.729934 |
4a20c74416cd35e999514b8963034293a84b53a0 | 7,843 | py | Python | python/setup.py | lorepieri8/he-transformer | 894b2204c9f9b62519207493d13756e12268e12b | [
"Apache-2.0"
] | null | null | null | python/setup.py | lorepieri8/he-transformer | 894b2204c9f9b62519207493d13756e12268e12b | [
"Apache-2.0"
] | null | null | null | python/setup.py | lorepieri8/he-transformer | 894b2204c9f9b62519207493d13756e12268e12b | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
import sys
import setuptools
import os
# TODO: get from environment
__version__ = '0.0.0.dev0'
PYNGRAPH_ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
BOOST_ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
def find_he_transformer_dist_dir():
"""Return location of he-transformer library home"""
if os.environ.get('NGRAPH_HE_BUILD_PATH'):
ngraph_he_dist_dir = os.environ.get('NGRAPH_HE_BUILD_PATH')
else:
print('Must set NGRAPH_HE_BUILD_PATH')
sys.exit(1)
found = os.path.exists(os.path.join(ngraph_he_dist_dir, 'include'))
found = found and os.path.exists(os.path.join(ngraph_he_dist_dir, 'lib'))
if not found:
print(
'Cannot find he-transformer library in {} make sure that '
'NGRAPH_HE_BUILD_PATH is set correctly'.format(ngraph_he_dist_dir))
sys.exit(1)
else:
print('he-transformer library found in {}'.format(ngraph_he_dist_dir))
return ngraph_he_dist_dir
def find_pybind_headers_dir():
"""Return location of pybind11 headers."""
if os.environ.get('PYBIND_HEADERS_PATH'):
pybind_headers_dir = os.environ.get('PYBIND_HEADERS_PATH')
else:
pybind_headers_dir = os.path.join(PYNGRAPH_ROOT_DIR, 'pybind11')
found = os.path.exists(
os.path.join(pybind_headers_dir, 'include/pybind11'))
if not found:
print(
'Cannot find pybind11 library in {} make sure that '
'PYBIND_HEADERS_PATH is set correctly'.format(pybind_headers_dir))
sys.exit(1)
else:
print('pybind11 library found in {}'.format(pybind_headers_dir))
return pybind_headers_dir
def find_boost_headers_dir():
"""Return location of boost headers."""
if os.environ.get('BOOST_HEADERS_PATH'):
boost_headers_dir = os.environ.get('BOOST_HEADERS_PATH')
else:
boost_headers_dir = [os.path.join(BOOST_ROOT_DIR)]
found = os.path.exists(os.path.join(boost_headers_dir, 'boost/asio'))
if not found:
print('Cannot find boost library in {} make sure that '
'BOOST_HEADERS_PATH is set correctly'.format(boost_headers_dir))
print('boost library found in {}'.format(boost_headers_dir))
return boost_headers_dir
def find_cxx_compiler():
"""Returns C++ compiler."""
if os.environ.get('CXX_COMPILER'):
print('CXX_COMPILER', os.environ.get('CXX_COMPILER'))
return os.environ.get('CXX_COMPILER')
else:
return 'CXX'
def find_c_compiler():
"""Returns C compiler."""
if os.environ.get('C_COMPILER'):
print('C_COMPILER', os.environ.get('C_COMPILER'))
return os.environ.get('C_COMPILER')
else:
return 'CC'
def find_project_root_dir():
"""Returns PROJECT_ROOT_DIR"""
if os.environ.get("PROJECT_ROOT_DIR"):
return os.environ.get("PROJECT_ROOT_DIR")
else:
print('Cannot find PROJECT_ROOT_DIR')
sys.exit(1)
os.environ["CXX"] = find_cxx_compiler()
os.environ["CC"] = find_c_compiler()
PYBIND11_INCLUDE_DIR = find_pybind_headers_dir() + '/include'
NGRAPH_HE_DIST_DIR = find_he_transformer_dist_dir()
NGRAPH_HE_INCLUDE_DIR = os.path.join(NGRAPH_HE_DIST_DIR, 'include')
NGRAPH_HE_LIB_DIR = os.path.join(NGRAPH_HE_DIST_DIR, 'lib')
BOOST_INCLUDE_DIR = find_boost_headers_dir()
PROJECT_ROOT_DIR = find_project_root_dir()
print('NGRAPH_HE_DIST_DIR', NGRAPH_HE_DIST_DIR)
print('NGRAPH_HE_LIB_DIR ', NGRAPH_HE_LIB_DIR)
print('NGRAPH_HE_INCLUDE_DIR', NGRAPH_HE_INCLUDE_DIR)
print('BOOST_INCLUDE_DIR', BOOST_INCLUDE_DIR)
print('PROJECT_ROOT_DIR', PROJECT_ROOT_DIR)
include_dirs = [
PYNGRAPH_ROOT_DIR, NGRAPH_HE_INCLUDE_DIR, PYBIND11_INCLUDE_DIR,
BOOST_INCLUDE_DIR
]
library_dirs = [NGRAPH_HE_LIB_DIR]
libraries = ['he_seal_backend']
data_files = [('lib', [(NGRAPH_HE_LIB_DIR + '/' + library)
for library in os.listdir(NGRAPH_HE_LIB_DIR)])]
sources = ['py_he_seal_client/he_seal_client.cpp']
sources = [PYNGRAPH_ROOT_DIR + '/' + source for source in sources]
ext_modules = [
Extension(
'he_seal_client',
sources=sources,
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries,
language='c++'),
]
# As of Python 3.6, CCompiler has a `has_flag` method.
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
import tempfile
with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f:
f.write('int main (int argc, char **argv) { return 0; }')
try:
compiler.compile([f.name], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
return True
def cpp_flag(compiler):
"""Return the -std=c++[11/14] compiler flag.
The c++14 is prefered over c++11 (when it is available).
"""
if has_flag(compiler, '-std=c++1z'):
return '-std=c++1z'
elif has_flag(compiler, '-std=c++14'):
return '-std=c++14'
elif has_flag(compiler, '-std=c++11'):
return '-std=c++11'
else:
raise RuntimeError('Unsupported compiler -- at least C++11 support '
'is needed!')
def add_platform_specific_link_args(link_args):
"""Add linker flags specific for actual OS."""
if sys.platform.startswith('linux'):
link_args += ['-Wl,-rpath,$ORIGIN/../..']
link_args += ['-z', 'noexecstack']
link_args += ['-z', 'relro']
link_args += ['-z', 'now']
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
def _add_extra_compile_arg(self, flag, compile_args):
"""Return True if successfully added given flag to compiler args."""
if has_flag(self.compiler, flag):
compile_args += [flag]
return True
return False
def build_extensions(self):
"""Build extension providing extra compiler flags."""
# -Wstrict-prototypes is not a valid option for c++
try:
self.compiler.compiler_so.remove('-Wstrict-prototypes')
except (AttributeError, ValueError):
pass
for ext in self.extensions:
ext.extra_compile_args += [cpp_flag(self.compiler)]
if not self._add_extra_compile_arg('-fstack-protector-strong',
ext.extra_compile_args):
self._add_extra_compile_arg('-fstack-protector',
ext.extra_compile_args)
self._add_extra_compile_arg('-fvisibility=hidden',
ext.extra_compile_args)
self._add_extra_compile_arg('-flto', ext.extra_compile_args)
self._add_extra_compile_arg('-fPIC', ext.extra_compile_args)
self._add_extra_compile_arg('-fopenmp', ext.extra_compile_args)
self._add_extra_compile_arg(
'-DPROJECT_ROOT_DIR="' + PROJECT_ROOT_DIR + '"',
ext.extra_compile_args)
add_platform_specific_link_args(ext.extra_link_args)
ext.extra_compile_args += ['-Wformat', '-Wformat-security']
ext.extra_compile_args += ['-O2', '-D_FORTIFY_SOURCE=2']
build_ext.build_extensions(self)
setup(
name='he_seal_client',
version=__version__,
author='Intel Corporation',
url='https://github.com/NervanaSystems/he-transformer',
description='Client for HE-transformer',
long_description='',
ext_modules=ext_modules,
data_files=data_files,
install_requires=['pybind11>=2.2'],
cmdclass={'build_ext': BuildExt},
zip_safe=False)
| 33.660944 | 79 | 0.659569 |
4a20c7c734d163a83bf377946183c9383cc18966 | 350 | py | Python | home/views.py | ishaileshmishra/todo_api_jango | 8a78d018b635c55b40f88b04deada86b873cc883 | [
"MIT"
] | 2 | 2020-06-21T06:44:58.000Z | 2021-09-22T05:38:33.000Z | home/views.py | ishaileshmishra/todo_api_jango | 8a78d018b635c55b40f88b04deada86b873cc883 | [
"MIT"
] | null | null | null | home/views.py | ishaileshmishra/todo_api_jango | 8a78d018b635c55b40f88b04deada86b873cc883 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.views.generic import TemplateView
# Create your views here.
# home/views.py
# Create your views here.
class HomePageView(TemplateView):
def get(self, request, **kwargs):
return render(request, 'index.html', context=None)
class AboutPageView(TemplateView):
template_name = "about.html" | 29.166667 | 58 | 0.751429 |
4a20c8ceea3a4c256f3a6cfda2908b8e2e6055e1 | 463 | py | Python | accounts/views/__init__.py | bbengfort/ledger | f3b961be568d3a373228ee2ccf52afc7fb7ca9a8 | [
"MIT"
] | 1 | 2018-08-15T22:34:33.000Z | 2018-08-15T22:34:33.000Z | accounts/views/__init__.py | bbengfort/ledger | f3b961be568d3a373228ee2ccf52afc7fb7ca9a8 | [
"MIT"
] | 75 | 2018-05-07T21:13:27.000Z | 2021-09-22T17:45:20.000Z | accounts/views/__init__.py | bbengfort/ledger | f3b961be568d3a373228ee2ccf52afc7fb7ca9a8 | [
"MIT"
] | 2 | 2018-08-15T22:34:34.000Z | 2020-07-04T17:27:41.000Z | # accounts.views
# Views and controllers for the account app.
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Wed May 02 15:42:02 2018 -0400
#
# ID: __init__.py [] [email protected] $
"""
Views and controllers for the account app.
"""
##########################################################################
## Imports
##########################################################################
from .api import *
from .balance import *
| 24.368421 | 74 | 0.477322 |
4a20c8d21037d3242e6498e01c12ab402706dee0 | 2,188 | py | Python | doc/plugin_samples/vm_balance.py | oVirt/ovirt-scheduler-proxy | 33f0db60549d1fb2efbbd86cdfb29dfa8a8b5d74 | [
"Apache-2.0"
] | 2 | 2017-08-03T16:45:00.000Z | 2022-02-05T14:03:09.000Z | doc/plugin_samples/vm_balance.py | oVirt/ovirt-scheduler-proxy | 33f0db60549d1fb2efbbd86cdfb29dfa8a8b5d74 | [
"Apache-2.0"
] | null | null | null | doc/plugin_samples/vm_balance.py | oVirt/ovirt-scheduler-proxy | 33f0db60549d1fb2efbbd86cdfb29dfa8a8b5d74 | [
"Apache-2.0"
] | 1 | 2017-08-03T16:45:05.000Z | 2017-08-03T16:45:05.000Z | from __future__ import print_function
from ovirtsdk.api import API
import sys
class vm_balance():
"""moves a vm from a host with to many"""
# What are the values this module will accept, used to present
# the user with options
properties_validation = 'maximum_vm_count=[0-9]*'
def _get_connection(self):
# open a connection to the rest api
connection = None
try:
connection = API(url='http://host:port',
username='user@domain', password='')
except BaseException as ex:
# letting the external proxy know there was an error
print(ex, file=sys.stderr)
return None
return connection
def _get_hosts(self, host_ids, connection):
# get all the hosts with the given ids
engine_hosts = connection.hosts.list(
query=" or ".join(["id=%s" % u for u in host_ids]))
return engine_hosts
def do_balance(self, hosts_ids, args_map):
conn = self._get_connection()
if conn is None:
return
# get our parameters from the map
maximum_vm_count = int(args_map.get('maximum_vm_count', 100))
# get all the hosts with the given ids
engine_hosts = self._get_hosts(hosts_ids, conn)
# iterate over them and decide which to balance from
over_loaded_host = None
white_listed_hosts = []
for engine_host in engine_hosts:
if engine_host:
if engine_host.summary.active < maximum_vm_count:
white_listed_hosts.append(engine_host.id)
continue
if(not over_loaded_host or
over_loaded_host.summary.active
< engine_host.summary.active):
over_loaded_host = engine_host
if not over_loaded_host:
return
selected_vm = None
# just pick the first we find
host_vms = conn.vms.list('host=' + over_loaded_host.name)
if host_vms:
selected_vm = host_vms[0].id
else:
return
print(selected_vm, white_listed_hosts)
| 31.710145 | 69 | 0.59415 |
4a20c9bccb42937fa4c22449a3ba43b3d3eeb75a | 5,163 | py | Python | src/python/pants/backend/python/subsystems/setuptools_test.py | mpcusack-color/pants | 0ce449f9181c690dbf60b2bc51b0e97f285eb9ae | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/python/subsystems/setuptools_test.py | mpcusack-color/pants | 0ce449f9181c690dbf60b2bc51b0e97f285eb9ae | [
"Apache-2.0"
] | 6 | 2022-01-25T15:49:26.000Z | 2022-02-09T11:21:13.000Z | src/python/pants/backend/python/subsystems/setuptools_test.py | thejcannon/pants | 7c24f42cb78cc462b63698cef736eda7a85c40e0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
from pants.backend.python.goals.lockfile import GeneratePythonLockfile
from pants.backend.python.macros.python_artifact import PythonArtifact
from pants.backend.python.subsystems import setuptools
from pants.backend.python.subsystems.setuptools import SetuptoolsLockfileSentinel
from pants.backend.python.target_types import PythonDistribution, PythonSourcesGeneratorTarget
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.testutil.rule_runner import QueryRule, RuleRunner
def test_setup_lockfile_interpreter_constraints() -> None:
rule_runner = RuleRunner(
rules=[
*setuptools.rules(),
QueryRule(GeneratePythonLockfile, [SetuptoolsLockfileSentinel]),
],
target_types=[PythonSourcesGeneratorTarget, PythonDistribution],
objects={"setup_py": PythonArtifact},
)
global_constraint = "==3.9.*"
rule_runner.set_options(
["--setuptools-lockfile=lockfile.txt"],
env={"PANTS_PYTHON_INTERPRETER_CONSTRAINTS": f"['{global_constraint}']"},
)
def assert_ics(build_file: str, expected: list[str]) -> None:
rule_runner.write_files({"project/BUILD": build_file})
lockfile_request = rule_runner.request(
GeneratePythonLockfile, [SetuptoolsLockfileSentinel()]
)
assert lockfile_request.interpreter_constraints == InterpreterConstraints(expected)
# If no dependencies for python_distribution, fall back to global [python] constraints.
assert_ics("python_distribution(provides=setup_py(name='dist'))", [global_constraint])
assert_ics(
dedent(
"""\
python_sources(name="lib")
python_distribution(
name="dist",
dependencies=[":lib"],
provides=setup_py(name="dist"),
)
"""
),
[global_constraint],
)
assert_ics(
dedent(
"""\
python_sources(name="lib", interpreter_constraints=["==2.7.*"])
python_distribution(
name="dist",
dependencies=[":lib"],
provides=setup_py(name="dist"),
)
"""
),
["==2.7.*"],
)
assert_ics(
dedent(
"""\
python_sources(name="lib", interpreter_constraints=["==2.7.*", "==3.5.*"])
python_distribution(
name="dist",
dependencies=[":lib"],
provides=setup_py(name="dist"),
)
"""
),
["==2.7.*", "==3.5.*"],
)
# If no python_distribution targets in repo, fall back to global [python] constraints.
assert_ics("python_sources()", [global_constraint])
# If there are multiple distinct ICs in the repo, we OR them. This is because setup_py.py will
# build each Python distribution independently.
assert_ics(
dedent(
"""\
python_sources(name="lib1", interpreter_constraints=["==2.7.*"])
python_distribution(
name="dist1",
dependencies=[":lib1"],
provides=setup_py(name="dist"),
)
python_sources(name="lib2", interpreter_constraints=["==3.5.*"])
python_distribution(
name="dist2",
dependencies=[":lib2"],
provides=setup_py(name="dist"),
)
"""
),
["==2.7.*", "==3.5.*"],
)
assert_ics(
dedent(
"""\
python_sources(name="lib1", interpreter_constraints=["==2.7.*", "==3.5.*"])
python_distribution(
name="dist1",
dependencies=[":lib1"],
provides=setup_py(name="dist"),
)
python_sources(name="lib2", interpreter_constraints=[">=3.5"])
python_distribution(
name="dist2",
dependencies=[":lib2"],
provides=setup_py(name="dist"),
)
"""
),
["==2.7.*", "==3.5.*", ">=3.5"],
)
assert_ics(
dedent(
"""\
python_sources(name="lib1")
python_distribution(
name="dist1",
dependencies=[":lib1"],
provides=setup_py(name="dist"),
)
python_sources(name="lib2", interpreter_constraints=["==2.7.*"])
python_distribution(
name="dist2",
dependencies=[":lib2"],
provides=setup_py(name="dist"),
)
python_sources(name="lib3", interpreter_constraints=[">=3.6"])
python_distribution(
name="dist3",
dependencies=[":lib3"],
provides=setup_py(name="dist"),
)
"""
),
["==2.7.*", global_constraint, ">=3.6"],
)
| 33.309677 | 98 | 0.550843 |
4a20c9f888612469e5b8f547f25e16a294e55008 | 3,988 | py | Python | aries_cloudagent/vc/vc_ld/models/linked_data_proof.py | kuraakhilesh8230/aries-cloudagent-python | ee384d1330f6a50ff45a507392ce54f92900f23a | [
"Apache-2.0"
] | 247 | 2019-07-02T21:10:21.000Z | 2022-03-30T13:55:33.000Z | aries_cloudagent/vc/vc_ld/models/linked_data_proof.py | kuraakhilesh8230/aries-cloudagent-python | ee384d1330f6a50ff45a507392ce54f92900f23a | [
"Apache-2.0"
] | 1,462 | 2019-07-02T20:57:30.000Z | 2022-03-31T23:13:35.000Z | aries_cloudagent/vc/vc_ld/models/linked_data_proof.py | kuraakhilesh8230/aries-cloudagent-python | ee384d1330f6a50ff45a507392ce54f92900f23a | [
"Apache-2.0"
] | 377 | 2019-06-20T21:01:31.000Z | 2022-03-30T08:27:53.000Z | """LinkedDataProof."""
from marshmallow import INCLUDE, fields, post_dump
from ....messaging.valid import INDY_ISO8601_DATETIME, UUIDFour, Uri
from ....messaging.models.base import BaseModel, BaseModelSchema
from typing import Optional
class LDProof(BaseModel):
"""Linked Data Proof model."""
class Meta:
"""LinkedDataProof metadata."""
schema_class = "LinkedDataProofSchema"
def __init__(
self,
type: Optional[str] = None,
proof_purpose: Optional[str] = None,
verification_method: Optional[str] = None,
created: Optional[str] = None,
domain: Optional[str] = None,
challenge: Optional[str] = None,
jws: Optional[str] = None,
proof_value: Optional[str] = None,
nonce: Optional[str] = None,
**kwargs,
) -> None:
"""Initialize the LDProof instance."""
self.type = type
self.proof_purpose = proof_purpose
self.verification_method = verification_method
self.created = created
self.domain = domain
self.challenge = challenge
self.jws = jws
self.proof_value = proof_value
self.nonce = nonce
self.extra = kwargs
class LinkedDataProofSchema(BaseModelSchema):
"""Linked data proof schema.
Based on https://w3c-ccg.github.io/ld-proofs
"""
class Meta:
"""Accept parameter overload."""
unknown = INCLUDE
model_class = LDProof
type = fields.Str(
required=True,
description=(
"Identifies the digital signature suite"
" that was used to create the signature"
),
example="Ed25519Signature2018",
)
proof_purpose = fields.Str(
data_key="proofPurpose",
required=True,
description="Proof purpose",
example="assertionMethod",
)
verification_method = fields.Str(
data_key="verificationMethod",
required=True,
description="Information used for proof verification",
example=(
"did:key:z6Mkgg342Ycpuk263R9d8Aq6MUaxPn1DDeHyGo38EefXmgDL"
"#z6Mkgg342Ycpuk263R9d8Aq6MUaxPn1DDeHyGo38EefXmgDL"
),
validate=Uri(),
)
created = fields.Str(
required=True,
description=(
"The string value of an ISO8601 combined date"
" and time string generated by the Signature Algorithm"
),
**INDY_ISO8601_DATETIME,
)
domain = fields.Str(
required=False,
description="A string value specifying the restricted domain of the signature.",
example="example.com",
validate=Uri(),
)
challenge = fields.Str(
required=False,
description=(
"Associates a challenge with a proof, for use"
" with a proofPurpose such as authentication"
),
example=UUIDFour.EXAMPLE,
)
jws = fields.Str(
required=False,
description="Associates a Detached Json Web Signature with a proof",
example=(
"eyJhbGciOiAiRWREUc2UsICJjcml0IjogWyJiNjQiXX0..lKJU0Df_ke"
"blRKhZAS9Qq6zybm-HqUXNVZ8vgEPNTAjQ1Ch6YBKY7UBAjg6iBX5qBQ"
),
)
proof_value = fields.Str(
required=False,
data_key="proofValue",
description="The proof value of a proof",
example=(
"sy1AahqbzJQ63n9RtekmwzqZeVj494VppdAVJBnMYrTwft6cLJJGeTSSxCCJ6HKnRtwE7"
"jjDh6sB2z2AAiZY9BBnCD8wUVgwqH3qchGRCuC2RugA4eQ9fUrR4Yuycac3caiaaay"
),
)
nonce = fields.Str(
required=False,
description="The nonce",
example=(
"CF69iO3nfvqRsRBNElE8b4wO39SyJHPM7Gg1nExltW5vSfQA1lvDCR/zXX1To0/4NLo=="
),
)
@post_dump(pass_original=True)
def add_unknown_properties(self, data: dict, original, **kwargs):
"""Add back unknown properties before outputting."""
data.update(original.extra)
return data
| 27.503448 | 88 | 0.621113 |
4a20caa6731635b89943624c01d838076fa82fbc | 5,963 | py | Python | scripts/var_visualization.py | BalazsHoranyi/TimeSeries-D3M-Wrappers | 61d97bf5235fcfa9ffd95f7e0982e04f650e54de | [
"MIT"
] | null | null | null | scripts/var_visualization.py | BalazsHoranyi/TimeSeries-D3M-Wrappers | 61d97bf5235fcfa9ffd95f7e0982e04f650e54de | [
"MIT"
] | null | null | null | scripts/var_visualization.py | BalazsHoranyi/TimeSeries-D3M-Wrappers | 61d97bf5235fcfa9ffd95f7e0982e04f650e54de | [
"MIT"
] | null | null | null | # compare ARIMA and VAR predictions on examples from population spawn dataset
from common_primitives import dataset_to_dataframe as DatasetToDataFrame
from d3m import container
from d3m.primitives.time_series_forecasting.vector_autoregression import VAR
from Sloth.predict import Arima
from d3m.container import DataFrame as d3m_DataFrame
import pandas as pd
import matplotlib.pyplot as plt
import math
import numpy as np
from collections import OrderedDict
from sklearn.metrics import mean_absolute_error as mae
# dataset to dataframe
input_dataset = container.Dataset.load('file:///datasets/seed_datasets_current/LL1_736_population_spawn_simpler/TRAIN/dataset_TRAIN/datasetDoc.json')
hyperparams_class = DatasetToDataFrame.DatasetToDataFramePrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
ds2df_client = DatasetToDataFrame.DatasetToDataFramePrimitive(hyperparams = hyperparams_class.defaults().replace({"dataframe_resource":"learningData"}))
df = d3m_DataFrame(ds2df_client.produce(inputs = input_dataset).value)
original = df.copy()
# apply VAR to predict each species in each sector
n_periods = 25
var_hp = VAR.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
var = VAR(hyperparams = var_hp.defaults().replace({'filter_index_two':1, 'filter_index_one':2, 'n_periods':n_periods, 'interval':25, 'datetime_index_unit':'D'}))
var.set_training_data(inputs = df, outputs = None)
var.fit()
test_dataset = container.Dataset.load('file:///datasets/seed_datasets_current/LL1_736_population_spawn_simpler/TEST/dataset_TEST/datasetDoc.json')
test_df = d3m_DataFrame(ds2df_client.produce(inputs = test_dataset).value)
test_df = test_df.drop(columns = 'count')
var_pred = var.produce(inputs = d3m_DataFrame(ds2df_client.produce(inputs = test_dataset).value)).value
var_pred = var_pred.merge(test_df, on = 'd3mIndex', how='left')
# load targets data
targets = pd.read_csv('file:///datasets/seed_datasets_current/LL1_736_population_spawn_simpler/SCORE/targets.csv')
test_df['d3mIndex'] = test_df['d3mIndex'].astype(int)
targets = targets.merge(test_df,on = 'd3mIndex', how = 'left')
# compare VAR predictions to ARIMA predictions for individual species / sectors
sector = 'S_0002'
species = ['cas9_YABE', 'cas9_MBI']
original = original[original['sector'] == sector]
var_pred = var_pred[var_pred['sector'] == sector]
targets = targets[targets['sector'] == sector]
# instantiate arima primitive
clf = Arima(True)
COLORS = ["#FA5655", "#F79690", "#B9BC2D", "#86B6B2", "#955B99", "#252B7A"]
for specie in species:
x_train = original[original['species'] == specie]['day'].values.astype(int)
train = original[original['species'] == specie]['count'].values.astype(float)
v_pred = var_pred[var_pred['species'] == specie]['count'].values.astype(float)
true = targets[targets['species'] == specie]['count'].values.astype(float)
x_pred = var_pred[var_pred['species'] == specie]['day'].values.astype(int)
clf.fit(train)
a_pred = clf.predict(n_periods)[-1:]
# plot results
plt.clf()
plt.scatter(x_train, train, c = COLORS[3], label = 'training data')
plt.scatter(x_pred, true, c = COLORS[3], label = 'ground truth prediction')
plt.scatter(x_pred, v_pred, c = COLORS[2], label = 'VAR prediction', edgecolor = 'black', linewidth = 1)
plt.scatter(x_pred, a_pred, c = COLORS[0], label = 'ARIMA prediction', edgecolor = 'black', linewidth = 1)
plt.xlabel('Day of the Year')
plt.ylabel('Population')
plt.title(f'VAR vs. ARIMA Comparison for Prediction of Species {specie} in Sector {sector}')
plt.legend()
plt.savefig(f'{specie}.png')
# # compare VAR predictions to ARIMA for each sector
# clf = Arima(True)
# #sectors = ['S_3102', 'S_4102', 'S_5102']
# #species = ['cas9_VBBA', 'cas9_FAB', 'cas9_JAC', 'cas9_CAD', 'cas9_YABE', 'cas9_HNAF', 'cas9_NIAG', 'cas9_MBI']
# COLORS = ["#FA5655", "#F79690", "#B9BC2D", "#86B6B2", "#955B99", "#252B7A"]
# for sector in targets['sector'].unique()[15:30]:
# original_1 = original[original['sector'] == sector]
# var_pred_1 = var_pred[var_pred['sector'] == sector]
# targets_1 = targets[targets['sector'] == sector].sort_values(by='species')
# print(var_pred_1['count'].values)
# print(var_pred_1.head())
# print(targets_1['count'].values)
# print(targets_1.head())
# # arima prediction on each species in sector
# a_pred = []
# print(f'a_pred: {a_pred}')
# print(np.sort(targets_1['species'].unique()))
# for specie in np.sort(targets_1['species'].unique()):
# train = original_1[original_1['species'] == specie]['count'].values.astype(float)
# clf.fit(train)
# a_pred.append(clf.predict(n_periods)[-1:][0])
# ap = mae(targets_1['count'].values, a_pred)
# vp = mae(targets_1['count'].values, var_pred_1['count'].values)
# linewidth = '1' if vp <= ap else '0'
# print(f"arima: {mae(targets_1['count'].values, a_pred)}")
# print(f"var: {mae(targets_1['count'].values, var_pred_1['count'].values)}")
# plt.scatter(sector, mae(targets_1['count'].values, a_pred), c = COLORS[0], label = 'MAE of ARIMA prediction')
# if linewidth == '0':
# plt.scatter(sector, mae(targets_1['count'].values, var_pred_1['count'].values), edgecolor = 'black', linewidth = linewidth, c = COLORS[2], label = 'MAE of VAR prediction')
# else:
# plt.scatter(sector, mae(targets_1['count'].values, var_pred_1['count'].values), edgecolor = 'black', linewidth = linewidth, c = COLORS[2], label = 'VAR prediction better than ARIMA prediction')
# plt.xlabel(f'Sector')
# plt.xticks(rotation = 45)
# plt.ylabel('Mean Absolute Error')
# plt.title(f'VAR vs. ARIMA Prediction on Population Spawn Dataset')
# handles, labels = plt.gca().get_legend_handles_labels()
# by_label = OrderedDict(zip(labels, handles))
# plt.legend(by_label.values(), by_label.keys())
# plt.tight_layout()
# plt.savefig(f'mae_comp_1.png')
| 50.109244 | 203 | 0.714406 |
4a20caf3a41b350b746a569b806b6f9d148809be | 219 | py | Python | mysite/mysite/listDb.py | xinkaiwang/robotJump | 622e97451f450b755aecbd60e15b2cd47d875f47 | [
"MIT"
] | null | null | null | mysite/mysite/listDb.py | xinkaiwang/robotJump | 622e97451f450b755aecbd60e15b2cd47d875f47 | [
"MIT"
] | null | null | null | mysite/mysite/listDb.py | xinkaiwang/robotJump | 622e97451f450b755aecbd60e15b2cd47d875f47 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
import django
django.setup()
# your imports, e.g. Django models
from buckets.models import *
all = ImageInfo.objects.all()
| 16.846154 | 59 | 0.753425 |
4a20cb0e1297b40af29ba359338e6430bf62ff25 | 5,992 | py | Python | gym/gym/envs/my_games/Atari/physics.py | tailintalent/mela | 6220f530ff8516d0652ca0849a57ab883b893199 | [
"MIT"
] | 3 | 2019-11-12T08:15:07.000Z | 2020-05-07T16:49:15.000Z | gym/gym/envs/my_games/Atari/physics.py | tailintalent/mela | 6220f530ff8516d0652ca0849a57ab883b893199 | [
"MIT"
] | 1 | 2020-12-07T14:29:57.000Z | 2020-12-07T14:29:57.000Z | gym/gym/envs/my_games/Atari/physics.py | tailintalent/mela | 6220f530ff8516d0652ca0849a57ab883b893199 | [
"MIT"
] | null | null | null | import numpy as np
def get_physics(physics_settings, dt):
physics_list = []
for physics_name, physics_params in physics_settings:
if physics_name == "linear":
physics_class = Linear
elif physics_name == "gravity":
physics_class = Uniform_Gravity
elif physics_name == "Brownian_force":
physics_class = Brownian_Force
elif physics_name == "drag":
physics_class = Drag
elif physics_name == "central_force":
physics_class = Central_Force
elif physics_name == "point_force_paddle":
physics_class = Point_Force_Paddle
elif physics_name == "pairwise_force":
physics_class = Pairwise_Force
else:
raise Exception("physics_name {0} not recognized!".format(physics_name))
physics = physics_class(params = physics_params, dt = dt)
physics_list.append(physics)
return physics_list
class Physics(object):
def __init__(self, dt, params = {}):
self.dt = dt
self.params = params
def is_in_domain(self, ball, **kwargs):
in_domain = True
if "domain" in self.params:
(x_min, x_max), (y_min, y_max) = self.params["domain"]
if (x_min is not None and ball.x < x_min) or \
(x_max is not None and ball.x > x_max) or \
(y_min is not None and ball.y < y_min) or \
(y_max is not None and ball.y > y_max):
in_domain = False
return in_domain
def exert(self, dt, ball_list, **kwargs):
raise NotImplementedError
class Linear(Physics):
def __init__(self, dt, params = {}):
self.dt = dt
self.params = params
def exert(self, ball_dict, **kwargs):
pass
class Uniform_Gravity(Physics):
def __init__(self, dt, params = {}):
assert "g" in params
self.dt = dt
self.params = params
def exert(self, ball_dict, **kwargs):
for ball in ball_dict.values():
if self.is_in_domain(ball):
ball.vy += self.params["g"] * self.dt
class Brownian_Force(Physics):
def __init__(self, dt, params = {}):
assert "force_amp" in params
self.dt = dt
self.params = params
def exert(self, ball_dict, **kwargs):
for ball in ball_dict.values():
if self.is_in_domain(ball):
ball.vx += np.random.randn() * self.params["force_amp"] * self.dt
ball.vy += np.random.randn() * self.params["force_amp"] * self.dt
class Drag(Physics):
def __init__(self, dt, params = {}):
assert "coeff" in params and "power" in params
self.dt = dt
self.params = params
def exert(self, ball_dict, **kwargs):
for ball in ball_dict.values():
if self.is_in_domain(ball):
v = np.sqrt(ball.vx ** 2 + ball.vy ** 2)
ball.vx -= self.params["coeff"] * ball.vx / v * v ** self.params["power"] * self.dt
ball.vy -= self.params["coeff"] * ball.vy / v * v ** self.params["power"] * self.dt
class Central_Force(Physics):
def __init__(self, dt, params = {}):
assert "coeff" in params and "power" in params and "center" in params
self.dt = dt
self.params = params
def exert(self, ball_dict, **kwargs):
center_x, center_y = self.params["center"]
force_coeff = self.params["coeff"]
force_power = self.params["power"]
for ball in ball_dict.values():
if self.is_in_domain(ball):
force_x = 0
force_y = 0
r = np.sqrt((ball.x - center_x) ** 2 + (ball.y - center_y) ** 2)
force = force_coeff * r ** (force_power - 1)
force_x += force * (center_x - ball.x)
force_y += force * (center_y - ball.y)
ball.vx += force_x * self.dt
ball.vy += force_y * self.dt
class Point_Force_Paddle(Physics):
def __init__(self, dt, params = {}):
assert "coeff" in params and "power" in params
self.dt = dt
self.params = params
def exert(self, ball_dict, **kwargs):
paddle_dict = kwargs["paddle_dict"]
force_coeff = self.params["coeff"]
force_power = self.params["power"]
for ball in ball_dict.values():
if self.is_in_domain(ball):
force_x = 0
force_y = 0
for paddle in paddle_dict.values():
r = np.sqrt((ball.x - paddle.x) ** 2 + (ball.y - paddle.y) ** 2)
force = force_coeff * r ** (force_power - 1)
force_x += force * (paddle.x - ball.x)
force_y += force * (paddle.y - ball.y)
ball.vx += force_x * self.dt
ball.vy += force_y * self.dt
# Interaction forces:
class Pairwise_Force(Physics):
def __init__(self, dt, params = {}):
assert "coeff" in params and "power" in params
self.dt = dt
self.params = params
def exert(self, ball_dict, **kwargs):
force_coeff = self.params["coeff"]
force_power = self.params["power"]
for i, ball1 in enumerate(ball_dict.values()):
if self.is_in_domain(ball1):
force_x = 0
force_y = 0
for j, ball2 in enumerate(ball_dict.values()):
if j != i:
if self.is_in_domain(ball2):
r_x = ball2.x - ball1.x
r_y = ball2.y - ball1.y
r = np.sqrt(r_x ** 2 + r_y ** 2)
r = np.maximum(r, 1)
force = force_coeff * r ** (force_power - 1)
force_x += force * r_x
force_y += force * r_y
ball1.vx += force_x * self.dt
ball1.vy += force_y * self.dt
| 35.455621 | 99 | 0.535547 |
4a20cd1a211fb78bdf888dd10abb78dc0d383b02 | 1,254 | py | Python | location/factory.py | iandees/loco | 5369decfc1c12a46a1b5011cff52225f90138761 | [
"MIT"
] | 2 | 2018-07-04T23:35:24.000Z | 2019-01-22T01:54:57.000Z | location/factory.py | iandees/loco | 5369decfc1c12a46a1b5011cff52225f90138761 | [
"MIT"
] | 5 | 2018-06-30T21:51:48.000Z | 2018-06-30T22:12:18.000Z | location/factory.py | iandees/loco | 5369decfc1c12a46a1b5011cff52225f90138761 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from flask import Flask, session
from flask_sslify import SSLify
from werkzeug.contrib.fixers import ProxyFix
from location import blueprints, oauth_providers
from location.core import cache, db, env, login_manager, migrate, oauth
def create_app(package_name):
app = Flask(package_name, instance_relative_config=True)
config_name = "location.config.{}".format(env)
app.config.from_object(config_name)
cache.init_app(app)
db.init_app(app)
migrate.init_app(app, db)
sslify = SSLify(app)
oauth.init_app(app)
app.oauth_provider = oauth_providers.get_provider(
app.config.get('OAUTH_PROVIDER'),
app.config.get('OAUTH_CLIENT_ID'),
app.config.get('OAUTH_CLIENT_SECRET'),
app.config.get('ALLOWED_DOMAIN')
)
oauth_app = oauth.remote_app(**app.oauth_provider.settings())
@oauth_app.tokengetter
def get_access_token():
return session.get('access_token')
login_manager.init_app(app)
app.register_blueprint(blueprints.root_bp)
app.register_blueprint(blueprints.auth_bp)
app.register_blueprint(blueprints.locations_bp)
app.register_blueprint(blueprints.teams_bp)
app.wsgi_app = ProxyFix(app.wsgi_app)
return app
| 27.866667 | 71 | 0.72807 |
4a20cfb50fdcf31f32b5eb01c4c3bec04b14d09c | 553 | py | Python | judge/config.py | Zoctan/WYUOpenJudge | 59c74dc12f22012d6e28f7dffa3cb633c87d4686 | [
"Apache-2.0"
] | null | null | null | judge/config.py | Zoctan/WYUOpenJudge | 59c74dc12f22012d6e28f7dffa3cb633c87d4686 | [
"Apache-2.0"
] | 1 | 2018-05-26T10:39:24.000Z | 2018-05-26T10:39:24.000Z | judge/config.py | Zoctan/WYUOpenJudge | 59c74dc12f22012d6e28f7dffa3cb633c87d4686 | [
"Apache-2.0"
] | 1 | 2018-05-18T10:29:06.000Z | 2018-05-18T10:29:06.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
setting = 'charset=utf8'
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or \
'the quick brown fox jumps over the lazy dog'
SQLALCHEMY_TRACK_MODIFICATIONS = True
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
pass
class TestingConfig(Config):
pass
class ProductionConfig(Config):
pass
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig
}
| 16.757576 | 62 | 0.663653 |
4a20cfcac68e5b7476f45f583c8ee923a0278fb0 | 5,107 | py | Python | Tests/test_closure.py | 0xFireball/exascript2 | f6c69ac41f8fa1e5e9e42a677717e85b9ff9d0c3 | [
"Apache-2.0"
] | null | null | null | Tests/test_closure.py | 0xFireball/exascript2 | f6c69ac41f8fa1e5e9e42a677717e85b9ff9d0c3 | [
"Apache-2.0"
] | null | null | null | Tests/test_closure.py | 0xFireball/exascript2 | f6c69ac41f8fa1e5e9e42a677717e85b9ff9d0c3 | [
"Apache-2.0"
] | 1 | 2019-09-18T05:37:46.000Z | 2019-09-18T05:37:46.000Z | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
from iptest.assert_util import *
x = 123456 # global to possibly mislead the closures
# Simple cases
def f():
x = 1
def g():
return x
return g
AreEqual(f()(), 1)
def f():
x = 2
def g(y):
return x + y
return g
AreEqual(f()(3), 5)
def f(y):
x = 3
def g():
return x + y
return g
AreEqual(f(5)(), 8)
def f(x,y):
def g():
return x**2 + y**2
return g
AreEqual(f(2,3)(), 13)
def f(p):
x = 3
def g(q):
y = 5
def h(r):
z = 7
def i(s):
return x,y,z,p,q,r,s
return i(13)
return h(11)
return g(9)
AreEqual(f(17), (3, 5, 7, 17, 9, 11, 13))
# Find the right binding
def f():
def g():
def h():
return x
x = 13
return h
x = 17
return g
AreEqual(f()()(), 13)
def f():
def g():
def h(x):
return x
x = 13
return h
x = 17
return g
AreEqual(f()()(19), 19)
def f():
def g():
x = p
def h(y):
return x + y
return h
p = 3
three = g()
p = 5
five = g()
p = 7
seven = g()
return three(4), five(4), seven(4)
AreEqual(f(), (7, 9, 11))
# Skip levels
def f():
x = 4
def g():
def h():
return x + 2
return h
return g
AreEqual(f()()(),6)
def f():
x = 5
def g():
def h():
def i():
return x + 3
return i
return h
return g
AreEqual(f()()()(), 8)
def f():
x = 6
def g():
def h():
def i():
return x
return i()
return h()
return g()
AreEqual(f(), 6)
# Recursion
def f():
y = []
def g(x):
y.append(x)
if (x > 0): g(x - 1)
g(10)
return y
AreEqual(f(), [10,9,8,7,6,5,4,3,2,1,0])
# Classes
def f():
x = 23
class c:
y = x
return c()
AreEqual(f().y, 23)
def f():
x = 23
class c:
def m(self):
return x
x = 29
return c().m()
AreEqual(f(), 29)
# Generators
def f():
x = 10
class c:
def m(self):
def n():
return i
for i in range(x):
yield n()
return c()
AreEqual(list(f().m()), [0,1,2,3,4,5,6,7,8,9])
def f(i):
def g(j):
for k in range(i+j):
def h():
return k
yield h()
return g
AreEqual(list(f(3)(5)), [0, 1, 2, 3, 4, 5, 6, 7])
class C:
def __init__(self):
self.setm (lambda: self.m ('lambda and self test'))
def m(self, t):
return t
def setm(self, n):
self.n = n
AreEqual(C().n(), "lambda and self test")
class c:
x = 5
y = x
x = 7
z = x
AreEqual(c.y, 5)
AreEqual(x, 123456)
AreEqual(c.z, c.x)
AreEqual(c.x, 7)
class c:
global x
AreEqual(x, 123456)
def f(self):
return x
AreEqual(c().f(), 123456)
def f():
global x
def g():
def h():
return x
return h
x = 654321
return g
AreEqual(f()()(), 654321)
def f():
x = 10
class c:
x = 5
def m(self):
return x
return c()
AreEqual(f().m(), 10)
def f():
def g():
print a
g()
a = 10
AssertError(NameError, f)
x = 123456
def f():
x = 123
def g():
global x
AreEqual(x, 123456)
def h():
return x
return h()
return g()
AreEqual(f(), 123456)
def f():
x = 7
def g():
global x
def h():
return x
return h()
return g()
AreEqual(f(), 123456)
y = 654321
def f():
[x, y] = 3, 7
def g():
AreEqual(x, 3)
AreEqual(y, 7)
g()
AreEqual(x, 3)
AreEqual(y, 7)
return x, y
AreEqual(f(), (3, 7))
AreEqual(x, 123456)
AreEqual(y, 654321)
def f():
def f1():
[a, b] = [2,3]
AreEqual(a, 2)
AreEqual(b, 3)
f1()
a = 3
AreEqual(a, 3)
del a
f()
x = "global x"
y = "global y"
z = "global z"
def test():
AreEqual(y, "global y")
exec "y = 10"
AreEqual(y, 10)
test()
def test2():
AreEqual(x, "global x")
exec "x = 5"
AreEqual(x, 5)
yield x
AreEqual(test2().next(), 5)
class C:
AreEqual(z, "global z")
exec "z = 7"
AreEqual(z, 7)
| 15.336336 | 96 | 0.447425 |
4a20d043e1808e9cc5923fd54110496eab7e33a6 | 812 | py | Python | demo/demo/urls.py | SureeLee/TestDjango | 70edef664ef9b537cf0aa507277b3efb37cf7d39 | [
"Apache-2.0"
] | null | null | null | demo/demo/urls.py | SureeLee/TestDjango | 70edef664ef9b537cf0aa507277b3efb37cf7d39 | [
"Apache-2.0"
] | null | null | null | demo/demo/urls.py | SureeLee/TestDjango | 70edef664ef9b537cf0aa507277b3efb37cf7d39 | [
"Apache-2.0"
] | null | null | null | """demo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('model_one.urls')),
]
| 33.833333 | 79 | 0.697044 |
4a20d10509d5fc8572416ed54640bc42a55913f8 | 191 | py | Python | text_generation/text_generation.py | ivan-jaramillo/text-generation | c6d4b52179cabeeb577549be7fbc5bc421adafe5 | [
"MIT"
] | null | null | null | text_generation/text_generation.py | ivan-jaramillo/text-generation | c6d4b52179cabeeb577549be7fbc5bc421adafe5 | [
"MIT"
] | 4 | 2021-07-11T16:21:01.000Z | 2021-08-20T03:30:20.000Z | text_generation/text_generation.py | ivan-jaramillo/text-generation | c6d4b52179cabeeb577549be7fbc5bc421adafe5 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.keras.layers.experimental import preprocessing
import numpy as np
import os
import time
def main():
pass
if __name__ == '__main__':
main()
| 13.642857 | 62 | 0.743455 |
4a20d24d17e68a3914225d77f90c9afb5286d63a | 4,555 | py | Python | test/test_postgres.py | pennsignals/dsdk | fc8d40444fd077b4341e8dfe6d36f5a4b04ca8b5 | [
"MIT"
] | 6 | 2020-03-27T20:56:27.000Z | 2022-01-11T16:10:22.000Z | test/test_postgres.py | pennsignals/dsdk | fc8d40444fd077b4341e8dfe6d36f5a4b04ca8b5 | [
"MIT"
] | 37 | 2019-12-11T21:15:21.000Z | 2021-12-22T14:57:38.000Z | test/test_postgres.py | pennsignals/dsdk | fc8d40444fd077b4341e8dfe6d36f5a4b04ca8b5 | [
"MIT"
] | 5 | 2020-03-31T16:30:48.000Z | 2022-02-10T20:50:34.000Z | # -*- coding: utf-8 -*-
"""Test postgres."""
from contextlib import contextmanager
from os import environ as os_env
from typing import Any, Generator
from pandas import DataFrame, read_sql_query
from dsdk import Asset, Batch, Postgres, configure_logger
from dsdk.model import Batch as ModelBatch
logger = configure_logger(__name__)
class Persistor(Postgres):
"""Persistor."""
def __init__(
self,
env=None,
**kwargs,
):
"""__init__."""
if env is None:
env = os_env
self.attempts = 0
super().__init__(
username=kwargs.get(
"username", env.get("POSTGRES_USERNAME", "postgres")
),
password=kwargs.get(
"password", env.get("POSTGRES_PASSWORD", "postgres")
),
host=kwargs.get("host", env.get("POSTGRES_HOST", "postgres")),
port=kwargs.get("port", int(env.get("POSTGRES_PORT", "5432"))),
database=kwargs.get(
"database", env.get("POSTGRES_DATABASE", "test")
),
sql=Asset.build(
path=kwargs.get(
"sql", env.get("POSTGRES_SQL", "./assets/postgres")
),
ext=".sql",
),
tables=kwargs.get(
"tables",
env.get(
"POSTGRES_TABLES",
",".join(
(
"example.models",
"example.microservices",
"example.runs",
"example.predictions",
)
),
).split(","),
),
)
@contextmanager
def connect(self) -> Generator[Any, None, None]:
"""Connect."""
self.attempts += 1
with super().connect() as con:
yield con
def test_connect():
"""Test connect."""
persistor = Persistor()
with persistor.connect() as con:
logger.info(con.info)
def test_check_ok():
"""Test check OK."""
persistor = Persistor()
with persistor.rollback() as cur:
persistor.check(cur)
def test_check_not_ok():
"""Test check not OK."""
persistor = Persistor(tables=("test.dne",))
try:
with persistor.rollback() as cur:
persistor.check(cur)
except RuntimeError:
return
raise AssertionError("Schema check passed even though table dne.")
def test_cursor():
"""Test cursor."""
persistor = Persistor()
with persistor.rollback() as cur:
cur.execute("""select 1 as n""")
for row in cur.fetchall():
n, *_ = row
assert n == 1
def test_open_run(
data=(
(0, 0.75, True, False, False),
(1, 0.25, True, False, False),
(2, 0.75, False, True, False),
(3, 0.25, False, True, False),
(4, 0.75, False, False, True),
(5, 0.25, False, False, True),
),
in_columns=(
"subject_id",
"greenish",
"is_animal",
"is_vegetable",
"is_mineral",
),
check="""
select
run_id,
subject_id,
score,
greenish,
is_animal,
is_vegetable,
is_mineral
from
predictions
natural join features id
where
run_id = %(run_id)s""",
):
"""Test open_run."""
batch = Batch(
as_of=None,
duration=None,
microservice_version="1.0.0",
time_zone=None,
)
persistor = Persistor()
model_batch = ModelBatch(model_version="1.0.0", parent=batch)
with persistor.open_run(parent=model_batch) as run:
df = DataFrame(data=list(data), columns=in_columns)
df.set_index("subject_id")
df["score"] = ~df["is_mineral"] * (
(df["is_animal"] * df["greenish"])
+ (df["is_vegetable"] * (1.0 - df["greenish"]))
)
run.predictions = df
with persistor.rollback() as cur:
cur.execute(persistor.sql.schema)
df = read_sql_query(
sql=check, con=cur.connection, params={"run_id": run.id}
)
df.set_index("subject_id")
# reorder columns to match run.predictions
df = df[run.predictions.columns]
# logger.error(df.head(10))
# logger.error(run.predictions.head(10))
assert df.equals(run.predictions)
def test_retry_connect():
"""Test retry_connect."""
def test_store_evidence():
"""Test store evidence."""
def test_store_df():
"""Test store_df."""
| 25.589888 | 75 | 0.528211 |
4a20d300a4d9427c1cca5969dd4dd8b1cb1b51a8 | 15,827 | py | Python | sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/operations/_recovery_points_operations.py | JianpingChen/azure-sdk-for-python | 3072fc8c0366287fbaea1b02493a50259c3248a2 | [
"MIT"
] | 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/operations/_recovery_points_operations.py | JianpingChen/azure-sdk-for-python | 3072fc8c0366287fbaea1b02493a50259c3248a2 | [
"MIT"
] | 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/operations/_recovery_points_operations.py | JianpingChen/azure-sdk-for-python | 3072fc8c0366287fbaea1b02493a50259c3248a2 | [
"MIT"
] | 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RecoveryPointsOperations(object):
"""RecoveryPointsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservicesbackup.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
vault_name, # type: str
resource_group_name, # type: str
fabric_name, # type: str
container_name, # type: str
protected_item_name, # type: str
filter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RecoveryPointResourceList"]
"""Lists the backup copies for the backed up item.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param fabric_name: Fabric name associated with the backed up item.
:type fabric_name: str
:param container_name: Container name associated with the backed up item.
:type container_name: str
:param protected_item_name: Backed up item whose backup copies are to be fetched.
:type protected_item_name: str
:param filter: OData filter options.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RecoveryPointResourceList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.recoveryservicesbackup.models.RecoveryPointResourceList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RecoveryPointResourceList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str'),
'protectedItemName': self._serialize.url("protected_item_name", protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RecoveryPointResourceList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/protectedItems/{protectedItemName}/recoveryPoints'} # type: ignore
def get(
self,
vault_name, # type: str
resource_group_name, # type: str
fabric_name, # type: str
container_name, # type: str
protected_item_name, # type: str
recovery_point_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.RecoveryPointResource"
"""Provides the information of the backed up data identified using RecoveryPointID. This is an
asynchronous operation.
To know the status of the operation, call the GetProtectedItemOperationResult API.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param fabric_name: Fabric name associated with backed up item.
:type fabric_name: str
:param container_name: Container name associated with backed up item.
:type container_name: str
:param protected_item_name: Backed up item name whose backup data needs to be fetched.
:type protected_item_name: str
:param recovery_point_id: RecoveryPointID represents the backed up data to be fetched.
:type recovery_point_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RecoveryPointResource, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.models.RecoveryPointResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RecoveryPointResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str'),
'protectedItemName': self._serialize.url("protected_item_name", protected_item_name, 'str'),
'recoveryPointId': self._serialize.url("recovery_point_id", recovery_point_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RecoveryPointResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/protectedItems/{protectedItemName}/recoveryPoints/{recoveryPointId}'} # type: ignore
def get_access_token(
self,
vault_name, # type: str
resource_group_name, # type: str
fabric_name, # type: str
container_name, # type: str
protected_item_name, # type: str
recovery_point_id, # type: str
parameters, # type: "_models.AADPropertiesResource"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.CrrAccessTokenResource"]
"""Returns the Access token for communication between BMS and Protection service.
Returns the Access token for communication between BMS and Protection service.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param fabric_name: Fabric name associated with the container.
:type fabric_name: str
:param container_name: Name of the container.
:type container_name: str
:param protected_item_name: Name of the Protected Item.
:type protected_item_name: str
:param recovery_point_id: Recovery Point Id.
:type recovery_point_id: str
:param parameters: Get Access Token request.
:type parameters: ~azure.mgmt.recoveryservicesbackup.models.AADPropertiesResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CrrAccessTokenResource, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.models.CrrAccessTokenResource or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.CrrAccessTokenResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-20"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.get_access_token.metadata['url'] # type: ignore
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str'),
'protectedItemName': self._serialize.url("protected_item_name", protected_item_name, 'str'),
'recoveryPointId': self._serialize.url("recovery_point_id", recovery_point_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AADPropertiesResource')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.NewErrorResponseAutoGenerated, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CrrAccessTokenResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_access_token.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/protectedItems/{protectedItemName}/recoveryPoints/{recoveryPointId}/accessToken'} # type: ignore
| 51.553746 | 325 | 0.672774 |
4a20d3a97b20991dd5a0c4a6bfeb3b6d4a4df73e | 1,209 | py | Python | api/utils/transfer.py | AutoCoinDCF/NEW_API | f4abc48fff907a0785372b941afcd67e62eec825 | [
"Apache-2.0"
] | null | null | null | api/utils/transfer.py | AutoCoinDCF/NEW_API | f4abc48fff907a0785372b941afcd67e62eec825 | [
"Apache-2.0"
] | null | null | null | api/utils/transfer.py | AutoCoinDCF/NEW_API | f4abc48fff907a0785372b941afcd67e62eec825 | [
"Apache-2.0"
] | null | null | null | import pexpect
class Transfer(object):
def __init__(self, ip, user, passwd,):
self.ip = ip
self.user = user
self.passwd = passwd
def file_transfer(self, dst_path, filename, to_local=False):
print("远程传输开始...")
passwd_key = '.*assword.*'
if not to_local:
cmdline = 'scp %s %s@%s:%s' % (filename, self.user, self.ip, dst_path)
else:
cmdline = 'scp %s@%s:%s %s' % (self.user, self.ip, filename, dst_path)
try:
child = pexpect.spawn(cmdline)
child.sendline('yes')
#child.expect_exact(passwd_key, timeout=None)
child.expect(passwd_key, timeout=None)
child.sendline(self.passwd)
child.expect(pexpect.EOF, timeout=None)
print("Transfer Work Finish!")
except:
raise
if __name__ == "__main__":
triger = Transfer('10.60.1.142', 'sqlgraph', 'sqlgraph')
# triger.file_transfer('/home/sqlgraph/ssd/search_script', '/home/ssd/git_lab_102/v_0610/QBInterface/set_relation.csv')
triger.file_transfer('/home/ssd/git_lab_102/v_0610/QBInterface', '/home/sqlgraph/ssd/search_script/create_eed_graph2_v2.sh', True)
| 36.636364 | 134 | 0.612076 |
4a20d3aeb5be6a283c3d65224facd995093f95b0 | 23,754 | py | Python | plgx-esp-ui/polylogyx/blueprints/v1/hosts.py | dhoomakethu/plgx-esp | b466b52a5e16a0d12a61e505e48add83bee5bad4 | [
"MIT"
] | 20 | 2019-12-09T13:55:13.000Z | 2022-01-10T09:10:42.000Z | plgx-esp-ui/polylogyx/blueprints/v1/hosts.py | dhoomakethu/plgx-esp | b466b52a5e16a0d12a61e505e48add83bee5bad4 | [
"MIT"
] | 13 | 2019-12-03T13:27:27.000Z | 2021-12-03T05:22:49.000Z | plgx-esp-ui/polylogyx/blueprints/v1/hosts.py | dhoomakethu/plgx-esp | b466b52a5e16a0d12a61e505e48add83bee5bad4 | [
"MIT"
] | 16 | 2019-11-15T11:45:06.000Z | 2022-01-07T08:07:11.000Z | from flask_restplus import Namespace, Resource, inputs
from polylogyx.blueprints.v1.utils import *
from polylogyx.utils import require_api_key, assemble_configuration, assemble_additional_configuration
from polylogyx.dao.v1 import hosts_dao as dao, tags_dao
from polylogyx.wrappers.v1 import host_wrappers as wrapper, parent_wrappers as parentwrapper
ns = Namespace('hosts', description='nodes related operations')
@require_api_key
@ns.route('', endpoint='hosts_list')
class HostsList(Resource):
"""List all Nodes Filtered"""
parser = requestparse(['status', 'platform', 'searchterm', 'start', 'limit', 'enabled', 'alerts_count'], [bool, str, str, int, int, inputs.boolean, inputs.boolean],
['status(true/false)', 'platform(windows/linux/darwin)', 'searchterm', 'start', 'limit', 'enabled(true/false)', 'alerts_count(true/false)'], [False, False, False, False, False, False, False], [None, ["windows", "linux", "darwin"], None, None, None, None, None], [None, None, "", None, None, True, True])
@ns.expect(parser)
def post(self):
args = self.parser.parse_args()
query_set = dao.get_hosts_paginated(args['status'], args['platform'], args['searchterm'], args['enabled'], args['alerts_count']).offset(args['start']).limit(args['limit']).all()
total_count = dao.get_hosts_total_count(args['status'], args['platform'], args['enabled'])
if query_set:
results = []
for node_alert_count_pair in query_set:
if args['alerts_count']:
node_dict = node_alert_count_pair[0].get_dict()
node_dict['alerts_count'] = node_alert_count_pair[1]
else:
node_dict = node_alert_count_pair.get_dict()
results.append(node_dict)
data = {'results': results, 'count': dao.get_hosts_paginated(args['status'], args['platform'], args['searchterm'], args['enabled'], args['alerts_count']).count(), 'total_count':total_count}
else:
data = {'results': [], 'count': 0, 'total_count': total_count}
status = "success"
message = "Successfully fetched the hosts details"
return marshal(respcls(message, status, data), parentwrapper.common_response_wrapper, skip_none=True)
@require_api_key
@ns.route('/export')
@ns.doc(params = {})
class NodesCSV(Resource):
'''Returns a csv file object with nodes info as data'''
def get(self):
from sqlalchemy import desc, and_
record_query = Node.query.filter(and_(Node.state!=Node.REMOVED, Node.state!=Node.DELETED)).order_by(desc(Node.id)).all()
results = []
for value in record_query:
res = {}
data = value.to_dict()
res['Host_Identifier'] = value.display_name
if value.os_info:
res['os'] = value.os_info['name']
else:
res['os'] = value.platform
res['last_ip'] = data["last_ip"]
res['tags'] = [tag.to_dict() for tag in value.tags]
res['id'] = data['id']
res['health'] = value.node_is_active()
res['platform'] = data["platform"]
results.append(res)
headers = []
if results:
firstRecord = results[0]
for key in firstRecord.keys():
headers.append(key)
bio = BytesIO()
writer = csv.writer(bio)
writer.writerow(headers)
for data in results:
row = []
row.extend([data.get(column, '') for column in headers])
writer.writerow(row)
bio.seek(0)
file_data = send_file(
bio,
mimetype='text/csv',
as_attachment=True,
attachment_filename='nodes.csv'
)
return file_data
@require_api_key
@ns.route('/<string:host_identifier>', endpoint='node_details')
@ns.route('/<int:node_id>', endpoint='node_details_by_id')
class NodeDetailsList(Resource):
"""List a Node Details"""
def get(self, host_identifier=None, node_id=None):
data = None
if node_id:
queryset = dao.getNodeById(node_id)
elif host_identifier:
queryset = dao.get_node_by_host_identifier(host_identifier)
else: queryset = None
db.session.commit()
if not queryset:
message = "There is no host exists with this host identifier or node id given!"
status = "failure"
else:
data = marshal(queryset, wrapper.nodewrapper)
if not data: data={}
message = "Node details are fetched successfully"
status = "success"
return marshal(respcls(message, status, data), parentwrapper.common_response_wrapper, skip_none=True)
@require_api_key
@ns.route('/<string:host_identifier>/alerts/distribution', endpoint='host_alerts_count_for_host_identifier')
@ns.route('/<int:node_id>/alerts/distribution', endpoint='host_alerts_count_for_node_id')
class HostAlertsDistribution(Resource):
"""List a Node Details"""
def get(self, host_identifier=None, node_id=None):
if node_id:
node = dao.getNodeById(node_id)
elif host_identifier:
node = dao.get_node_by_host_identifier(host_identifier)
else:
node = None
if not node:
data = None
message = "There is no host exists with this host identifier or node id given!"
status = "failure"
else:
data = {}
data['sources'] = dao.host_alerts_distribution_by_source(node)
data['rules'] = [{"name": rule_count_pair[0], "count": rule_count_pair[1]} for rule_count_pair in dao.host_alerts_distribution_by_rule(node)]
message = "Alerts distribution details are fetched for the host"
status = "success"
return marshal(respcls(message, status, data), parentwrapper.common_response_wrapper, skip_none=True)
@require_api_key
@ns.route('/count', endpoint='nodes_related_count')
class NodeCountList(Resource):
"""Lists all Nodes Filtered count"""
def get(self):
data = dao.get_hosts_filtered_status_platform_count()
return marshal(respcls("Successfully fetched the nodes status count", 'success', data),
parentwrapper.common_response_wrapper, skip_none=True)
@require_api_key
@ns.route('/status_logs', endpoint='node_status_logs')
class HostStatusLogs(Resource):
"""Host Status Logs"""
parser = requestparse(['host_identifier', 'node_id', 'start', 'limit', 'searchterm'], [str, int, int, int, str], ["host identifier of the node", "id of the node", 'start', 'limit', 'searchterm'], [False, False, False, False, False], [None, None, None, None, None], [None, None, None, None, ''])
@ns.expect(parser)
def post(self):
args = self.parser.parse_args()
data = None
status = "failure"
if args['node_id'] is not None or args['host_identifier'] is not None:
if args['host_identifier'] is not None:
qs = dao.get_node_by_host_identifier(args['host_identifier'])
else:
node_id = args['node_id']
qs = dao.getNodeById(node_id)
if qs:
data = {'results': marshal(dao.get_status_logs_of_a_node(qs, args['searchterm']).offset(args['start']).limit(args['limit']).all(), wrapper.node_status_log_wrapper), 'count':dao.get_status_logs_of_a_node(qs, args['searchterm']).count(), 'total_count':dao.get_status_logs_total_count(qs)}
message = "Successfully fetched the host's status logs"
status = "success"
else:
message = "Host identifier or node id passed is not correct!"
else:
message = "Please pass one of node id or host identifier!"
return marshal(respcls(message, status, data), parentwrapper.common_response_wrapper, skip_none=True)
@require_api_key
@ns.route('/additional_config', endpoint='node_additional_config')
class HostAdditionalConfig(Resource):
"""Additional Config of a Node"""
parser = requestparse(['host_identifier', 'node_id'], [str, int], ["host identifier of the node", "id of the node"], [False, False])
@ns.expect(parser)
def post(self):
args = self.parser.parse_args()
config = None
status = "failure"
if args['node_id'] is not None or args['host_identifier'] is not None:
if args['host_identifier'] is not None:
node = dao.get_node_by_host_identifier(args['host_identifier'])
else:
node_id = args['node_id']
node = dao.getNodeById(node_id)
if node:
config = assemble_additional_configuration(node)
status = "success"
message = "Successfully fetched additional config of the node for the host identifier passed"
else:
message = "Host identifier or node id passed is not correct!"
else:
message = "Atleast one of host identifier or node id should be given!"
return marshal(respcls(message, status, config), parentwrapper.common_response_wrapper, skip_none=True)
@require_api_key
@ns.route('/config', endpoint='node_full_config')
class HostFullConfig(Resource):
"""Full Config of a Node"""
parser = requestparse(['host_identifier', 'node_id'], [str, int], ["host identifier of the node", "id of the node"], [False, False])
@ns.expect(parser)
def post(self):
args = self.parser.parse_args()
config = None
status = "failure"
if args['node_id'] is not None or args['host_identifier'] is not None:
if args['host_identifier'] is not None:
node = dao.get_node_by_host_identifier(args['host_identifier'])
else:
node_id = args['node_id']
node = dao.getNodeById(node_id)
if node:
config = assemble_configuration(node)
status = "success"
message = "Successfully fetched full config of the node for the host identifier passed"
else:
message = "Host identifier or node id passed is not correct!"
else:
message = "Atleast one of host identifier or node id should be given!"
return marshal(respcls(message, status, config), parentwrapper.common_response_wrapper, skip_none=True)
@require_api_key
@ns.route('/recent_activity/count', endpoint='node_recent_activity_count')
@ns.doc(params={'host_identifier': 'Host identifier of the Node', 'node_id': 'id of the Node'})
class RecentActivityCount(Resource):
"""Recent Activity count of a Node"""
parser = requestparse(['host_identifier', 'node_id'], [str, int], ["host identifier of the node", "id of the node"], [False, False])
@ns.expect(parser)
def post(self):
args = self.parser.parse_args()
status = "failure"
data = None
if args['node_id'] is not None or args['host_identifier'] is not None:
if args['host_identifier'] is not None:
node = dao.get_node_by_host_identifier(args['host_identifier'])
if node: node_id = node.id
else: node_id = None
else:
node_id = args['node_id']
if not node_id: message = "Please pass correct host identifier or node id to get the results"
else:
data = [{'name': query[0], 'count': query[1]} for query in dao.get_result_log_count(node_id)]
status = "success"
message = "Successfully fetched the count of schedule query results count of host identifier passed"
else:
message = "Atleast one of host identifier or node id should be given!"
return marshal(respcls(message, status, data), parentwrapper.common_response_wrapper, skip_none=True)
@require_api_key
@ns.route('/recent_activity', endpoint='node_recent_activity_results')
@ns.doc(params={'host_identifier': 'Host identifier of the Node', 'query_name': 'query name', 'start': 'start count',
'limit': 'end count'})
class RecentActivityResults(Resource):
"""Recent Activity results of a query of a Node"""
parser = requestparse(['host_identifier', 'node_id', 'query_name', 'start', 'limit', 'searchterm'], [str, int, str, int, int, str],
["host identifier of the node", "node_id", "query", "start count", "end count", "searchterm"],
[False, False, True, False, False, False], [None, None, None, None, None, None], [None, None, None, 0, 10, ""])
@ns.expect(parser)
def post(self):
args = self.parser.parse_args()
status = "failure"
data = {}
if args['node_id'] is not None or args['host_identifier'] is not None:
if args['host_identifier'] is not None:
node = dao.get_node_by_host_identifier(args['host_identifier'])
if node:
node_id = node.id
else:
node_id = None
else:
node_id = args['node_id']
if not node_id:
message = "Please pass correct host identifier or node id to get the results"
else:
qs = dao.get_result_log_of_a_query(node_id, args['query_name'], args['start'],
args['limit'], args['searchterm'])
data = {'count': qs[0], 'total_count': qs[2], 'results': [
{'timestamp': list_ele[1].strftime('%m/%d/%Y %H/%M/%S'), 'action': list_ele[2],
'columns': list_ele[3]} for list_ele in qs[1]]}
status = "success"
message = "Successfully fetched the count of schedule query results count of host identifier passed"
else:
message = "Atleast one of host identifier or node id should be given!"
return marshal(respcls(message, status, data), parentwrapper.common_response_wrapper, skip_none=True)
# Modify Tag section
@require_api_key
@ns.route('/<string:host_identifier>/tags', endpoint='node_tags')
@ns.route('/<int:node_id>/tags', endpoint='node_tags_by_node_id')
class ListTagsOfNode(Resource):
"""Resource for tags of a host"""
parser = requestparse(['tag'], [str],
["tag to add/remove for the node"], [True])
@ns.doc(params={'host_identifier': 'Host identifier of the Node', 'node_id': 'id of the Node'})
def get(self, host_identifier=None, node_id=None):
"""Lists tags of a node by its host_identifier"""
status = 'failure'
if host_identifier: node = dao.get_node_by_host_identifier(host_identifier)
elif node_id: node = dao.getNodeById(node_id)
else: node = None
if not node:
message = "Host id or node id passed it not correct"
data = None
else:
data = [tag.value for tag in node.tags]
status = "success"
message = "Successfully fetched the tags of host"
return marshal(respcls(message, status, data), parentwrapper.common_response_wrapper, skip_none=True)
@ns.doc(params={'host_identifier': 'Host identifier of the Node', 'node_id': 'id of the Node', 'tag':"tag to add to host"})
@ns.expect(parser)
def post(self, host_identifier=None, node_id=None):
"""Creates tags of a node by its host_identifier"""
args = self.parser.parse_args()
status = 'failure'
if host_identifier: node = dao.get_node_by_host_identifier(host_identifier)
elif node_id: node = dao.getNodeById(node_id)
else: node = None
if node:
tag = args['tag'].strip()
if not tag:
message = "Tag provided is invalid!"
else:
tag = tags_dao.create_tag_obj(tag)
node.tags.append(tag)
node.save()
status="success"
message = "Successfully created tags to host"
else:
message = "Host id or node id passed it not correct"
return marshal(respcls(message, status), parentwrapper.common_response_wrapper, skip_none=True)
@ns.doc(params={'host_identifier': 'Host identifier of the Node', 'node_id': 'id of the Node', 'tag': 'tag to remove'})
@ns.expect(parser)
def delete(self, host_identifier=None, node_id=None):
"""Remove tags of a node by its host_identifier"""
args = self.parser.parse_args()
status = 'failure'
if host_identifier:
node = dao.get_node_by_host_identifier(host_identifier)
elif node_id:
node = dao.getNodeById(node_id)
else:
node = None
if node:
tag = args['tag'].strip()
tag = tags_dao.get_tag_by_value(tag)
if tag:
if dao.is_tag_of_node(node, tag):
node.tags.remove(tag)
node.save()
message = "Successfully removed tags from host"
status = "success"
else:
message = "Tag provided is not in host's tag list, Please check tag once again"
else:
message = "Tag provided doesnot exists"
else:
message = "Host id or node id passed it not correct"
return marshal(respcls(message, status), parentwrapper.common_response_wrapper, skip_none=True)
@require_api_key
@ns.route('/search/export', endpoint="nodes_search_export")
@ns.doc(params={})
class ExportNodeSearchQueryCSV(Resource):
'''Export node search query to csv'''
parser = requestparse(['conditions', 'host_identifier', 'query_name', 'node_id'], [dict, str, str, int],
["conditions to search for", 'host_identifier of the node', 'name of the schedule query', 'id of the node'], [False, False, True, False])
@ns.expect(parser)
def post(self):
args = self.parser.parse_args()
host_identifier = args['host_identifier']
conditions = args['conditions']
query_name = args['query_name']
node_id = args['node_id']
if node_id or host_identifier:
if host_identifier:
node_id = get_node_id_by_host_id(host_identifier)
if not node_id:
return marshal(respcls("Host identifier given is invalid!", "failure"),
parentwrapper.common_response_wrapper, skip_none=True)
else:
return marshal(respcls("Atleast one of host identifier or node id is required!", "failure"), parentwrapper.common_response_wrapper, skip_none=True)
if conditions:
try:
search_rules = SearchParser()
root = search_rules.parse_group(conditions)
filter = root.run('', [], 'result_log')
except Exception as e:
message = str(e)
return marshal(respcls(message, "failure"), parentwrapper.common_response_wrapper, skip_none=True)
try:
results = dao.node_result_log_search_results(filter, node_id, query_name)
except:
message = "Unable to find data for the payload given"
return marshal(respcls(message, "failure"), parentwrapper.common_response_wrapper, skip_none=True)
else:
results = dao.node_result_log_results(node_id, query_name)
if results:
results = [r for r, in results]
headers = []
if not len(results) == 0:
firstRecord = results[0]
for key in firstRecord.keys():
headers.append(key)
bio = BytesIO()
writer = csv.writer(bio)
writer.writerow(headers)
for data in results:
row = []
row.extend([data.get(column, '') for column in headers])
writer.writerow(row)
bio.seek(0)
response = send_file(
bio,
mimetype='text/csv',
as_attachment=True,
attachment_filename=query_name+'_'+str(node_id)+str(dt.datetime.now())+'.csv'
)
return response
else:
message = "There are no matching results for the payload given"
return marshal(respcls(message, "failure"), parentwrapper.common_response_wrapper, skip_none=True)
@ns.route('/<string:host_identifier>/delete', endpoint='node_removed')
@ns.route('/<int:node_id>/delete', endpoint='node_removed_by_id')
class NodeRemoved(Resource):
@ns.doc(params={'node_id': "id of the host", 'host_identifier': "host identifier of the host"})
def delete(self, host_identifier=None, node_id=None):
node = None
message = "Node is not present with this node id or host identifier"
status = "failure"
if host_identifier:
node = dao.get_node_by_host_identifier(host_identifier)
if node_id:
node = dao.getNodeById(node_id)
if node:
current_app.logger.info("Host {} is requested for permanent deletion".format(node.host_identifier))
dao.delete_host(node)
message = "Successfully deleted the host"
status = "Success"
return marshal(respcls(message, status), parentwrapper.common_response_wrapper, skip_none=True)
return marshal(respcls(message, status), parentwrapper.common_response_wrapper, skip_none=True)
@ns.doc(params={'node_id': "id of the host", 'host_identifier': "host identifier of the host"})
def put(self, host_identifier=None, node_id=None):
node = None
message = "Node is not present with this node id or host identifier"
status = "failure"
if host_identifier:
node = dao.get_node_by_host_identifier(host_identifier)
if node_id:
node = dao.getNodeById(node_id)
if node:
current_app.logger.info("Host {} is requested to be disabled for all his activities from agent".format(node.host_identifier))
dao.soft_remove_host(node)
message = "Successfully removed the host"
status = "Success"
return marshal(respcls(message, status), parentwrapper.common_response_wrapper, skip_none=True)
return marshal(respcls(message, status), parentwrapper.common_response_wrapper, skip_none=True)
@ns.route('/<string:host_identifier>/enable', endpoint='node_enabled')
@ns.route('/<int:node_id>/enable', endpoint='node_enabled_by_id')
class NodeEnabled(Resource):
@ns.doc(params={'node_id': "id of the host", 'host_identifier': "host identifier of the host"})
def put(self, host_identifier=None, node_id=None):
node = None
message = "Node is not present with this node id or host identifier"
status = "failure"
if host_identifier:
node = dao.get_disable_node_by_host_identifier(host_identifier)
if node_id:
node = dao.getDisableNodeById(node_id)
if node:
current_app.logger.info("Host {} is requested to be enabled again".format(node.host_identifier))
dao.enable_host(node)
message = "Successfully enabled the host"
status = "Success"
return marshal(respcls(message, status), parentwrapper.common_response_wrapper, skip_none=True)
return marshal(respcls(message, status), parentwrapper.common_response_wrapper, skip_none=True)
| 44.903592 | 329 | 0.615896 |
4a20d54b6022592ce577936c8a87736324e0056c | 3,742 | py | Python | axiom/tags.py | hawkowl/axiom | 8636e29aa224d05388a0f58106dc29f895b82209 | [
"MIT"
] | 23 | 2015-01-08T07:29:32.000Z | 2021-11-03T11:27:48.000Z | axiom/tags.py | hawkowl/axiom | 8636e29aa224d05388a0f58106dc29f895b82209 | [
"MIT"
] | 109 | 2015-01-08T05:54:56.000Z | 2022-03-02T04:06:40.000Z | axiom/tags.py | hawkowl/axiom | 8636e29aa224d05388a0f58106dc29f895b82209 | [
"MIT"
] | 16 | 2015-01-08T07:08:43.000Z | 2021-07-28T02:53:17.000Z |
from epsilon.extime import Time
from axiom.item import Item
from axiom.attributes import text, reference, integer, AND, timestamp
class Tag(Item):
typeName = 'tag'
schemaVersion = 1
name = text(doc="""
The short string which is being applied as a tag to an Item.
""")
created = timestamp(doc="""
When this tag was applied to the Item to which it applies.
""")
object = reference(doc="""
The Item to which this tag applies.
""")
catalog = reference(doc="""
The L{Catalog} item in which this tag was created.
""")
tagger = reference(doc="""
An optional reference to the Item which is responsible for this tag's
existence.
""")
class _TagName(Item):
"""
Helper class to make Catalog.tagNames very fast. One of these is created
for each distinct tag name that is created. _TagName Items are never
deleted from the database.
"""
typeName = 'tagname'
name = text(doc="""
The short string which uniquely represents this tag.
""", indexed=True)
catalog = reference(doc="""
The L{Catalog} item in which this tag exists.
""")
class Catalog(Item):
typeName = 'tag_catalog'
schemaVersion = 2
tagCount = integer(default=0)
def tag(self, obj, tagName, tagger=None):
"""
"""
# check to see if that tag exists. Put the object attribute first,
# since each object should only have a handful of tags and the object
# reference is indexed. As long as this is the case, it doesn't matter
# whether the name or catalog attributes are indexed because selecting
# from a small set of results is fast even without an index.
if self.store.findFirst(Tag,
AND(Tag.object == obj,
Tag.name == tagName,
Tag.catalog == self)):
return
# if the tag doesn't exist, maybe we need to create a new tagname object
self.store.findOrCreate(_TagName, name=tagName, catalog=self)
# Increment only if we are creating a new tag
self.tagCount += 1
Tag(store=self.store, object=obj,
name=tagName, catalog=self,
created=Time(), tagger=tagger)
def tagNames(self):
"""
Return an iterator of unicode strings - the unique tag names which have
been applied objects in this catalog.
"""
return self.store.query(_TagName, _TagName.catalog == self).getColumn("name")
def tagsOf(self, obj):
"""
Return an iterator of unicode strings - the tag names which apply to
the given object.
"""
return self.store.query(
Tag,
AND(Tag.catalog == self,
Tag.object == obj)).getColumn("name")
def objectsIn(self, tagName):
return self.store.query(
Tag,
AND(Tag.catalog == self,
Tag.name == tagName)).getColumn("object")
def upgradeCatalog1to2(oldCatalog):
"""
Create _TagName instances which version 2 of Catalog automatically creates
for use in determining the tagNames result, but which version 1 of Catalog
did not create.
"""
newCatalog = oldCatalog.upgradeVersion('tag_catalog', 1, 2,
tagCount=oldCatalog.tagCount)
tags = newCatalog.store.query(Tag, Tag.catalog == newCatalog)
tagNames = tags.getColumn("name").distinct()
for t in tagNames:
_TagName(store=newCatalog.store, catalog=newCatalog, name=t)
return newCatalog
from axiom.upgrade import registerUpgrader
registerUpgrader(upgradeCatalog1to2, 'tag_catalog', 1, 2)
| 29.698413 | 85 | 0.614645 |
4a20d5a70974506d0bd75e0638db9a5df1642bf8 | 18,608 | py | Python | ava_nmt/build_model.py | SwordYork/sequencing | bcbc2006bf17315411ac3d629f7014f790b70418 | [
"MIT"
] | 45 | 2017-08-06T15:02:12.000Z | 2021-01-24T19:12:13.000Z | ava_nmt/build_model.py | SwordYork/sequencing | bcbc2006bf17315411ac3d629f7014f790b70418 | [
"MIT"
] | null | null | null | ava_nmt/build_model.py | SwordYork/sequencing | bcbc2006bf17315411ac3d629f7014f790b70418 | [
"MIT"
] | 14 | 2017-08-07T04:56:55.000Z | 2019-01-07T09:43:24.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Sword York
# GitHub: https://github.com/SwordYork/sequencing
# No rights reserved.
#
import json
import numpy
import sequencing as sq
import tensorflow as tf
from sequencing import TIME_MAJOR, MODE, LinearOp
from sequencing.utils.metrics import Delta_BLEU
from sequencing.utils.misc import get_rnn_cell, EncoderDecoderBridge
def cross_entropy_sequence_loss(logits, targets, sequence_length):
with tf.name_scope('cross_entropy_sequence_loss'):
total_length = tf.to_float(tf.reduce_sum(sequence_length))
batch_size = tf.to_float(tf.shape(sequence_length)[0])
entropy_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=targets)
# Mask out the losses we don't care about
loss_mask = tf.sequence_mask(
tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0]))
loss_mask = tf.transpose(tf.to_float(loss_mask), [1, 0])
losses = entropy_losses * loss_mask
# losses.shape: T * B
# sequence_length: B
total_loss_avg = tf.reduce_sum(losses) / batch_size
return total_loss_avg
def rl_sequence_loss(logits, predict_ids, sequence_length,
baseline_states, reward, start_rl_step):
# reward: T * B
with tf.name_scope('rl_sequence_loss'):
max_ml_step = tf.to_int32(tf.maximum(tf.reduce_max(start_rl_step), 0))
min_ml_step = tf.to_int32(tf.maximum(tf.reduce_min(start_rl_step), 0))
# entropy loss:
# before start_rl_step is ml entropy
# after start_rl_step should be rl entropy
entropy_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=predict_ids)
# ML loss
ml_entropy_losses = tf.slice(entropy_losses, [0, 0], [max_ml_step, -1])
# Mask out the losses we don't care about
ml_loss_mask = tf.sequence_mask(
tf.to_int32(start_rl_step), max_ml_step)
ml_loss_mask = tf.transpose(tf.to_float(ml_loss_mask), [1, 0])
ml_loss = tf.reduce_sum(ml_entropy_losses * ml_loss_mask) / \
tf.maximum(tf.reduce_sum(ml_loss_mask), 1)
# RL
rl_entropy_losses = tf.slice(entropy_losses, [min_ml_step, 0], [-1, -1])
# Mask out the losses we don't care about
rl_loss_mask = (
tf.to_float(tf.sequence_mask(
tf.to_int32(sequence_length - min_ml_step),
tf.to_int32(tf.shape(predict_ids)[0] - min_ml_step)))
- tf.to_float(tf.sequence_mask(
tf.to_int32(start_rl_step - min_ml_step),
tf.to_int32(tf.shape(predict_ids)[0] - min_ml_step))))
rl_loss_mask = tf.transpose(tf.to_float(rl_loss_mask), [1, 0])
baseline_states = tf.slice(baseline_states, [min_ml_step, 0, 0],
[-1, -1, -1])
reward = tf.slice(reward, [min_ml_step, 0], [-1, -1])
# prevent from dividing by zero
rl_total = tf.maximum(tf.reduce_sum(rl_loss_mask), 1)
with tf.variable_scope('baseline'):
reward_predicted_m = tf.contrib.layers.fully_connected(
baseline_states, baseline_states.get_shape().as_list()[-1],
activation_fn=tf.nn.relu, scope='middle')
# note, there is no negative reward, so we could use relu
reward_predicted = tf.contrib.layers.fully_connected(
reward_predicted_m, 1, activation_fn=None)
reward_predicted = tf.squeeze(reward_predicted, axis=[2])
reward_losses = tf.pow(reward_predicted - reward, 2)
reward_loss_rmse = tf.sqrt(
tf.reduce_sum(reward_losses * rl_loss_mask) / rl_total + 1e-12)
reward_entropy_losses = (reward - tf.stop_gradient(reward_predicted)) \
* rl_entropy_losses * rl_loss_mask
reward_entropy_loss = tf.reduce_sum(reward_entropy_losses) / rl_total
predict_reward = tf.cond(tf.greater(tf.shape(reward_predicted)[0], 0),
lambda: tf.reduce_mean(
tf.slice(reward_predicted, [0, 0],
[1, -1])),
lambda: tf.to_float(0))
# Calculate the average log perplexity in each batch
total_loss_avg = ml_loss + reward_entropy_loss + reward_loss_rmse
# the first reward predict is total reward
return total_loss_avg, \
ml_loss, \
reward_loss_rmse, \
predict_reward
def _py_func(predict_target_ids, ground_truth_ids, eos_id):
n = 4 # 4-gram
delta = True # delta future reward
batch_size = predict_target_ids.shape[1]
length = numpy.zeros(batch_size, dtype=numpy.int32)
reward = numpy.zeros_like(predict_target_ids, dtype=numpy.float32)
for i in range(batch_size):
p_id = predict_target_ids[:, i].tolist()
p_len = p_id.index(eos_id) + 1 if eos_id in p_id else len(p_id)
length[i] = p_len
p_id = p_id[:p_len]
t_id = ground_truth_ids[:, i].tolist()
t_len = t_id.index(eos_id) + 1 if eos_id in t_id else len(t_id)
t_id = t_id[:t_len]
bleu_scores = Delta_BLEU(p_id, t_id, n)
reward_i = bleu_scores[:, n - 1].copy()
if delta:
reward_i[1:] = reward_i[1:] - reward_i[:-1]
reward[:p_len, i] = reward_i[::-1].cumsum()[::-1]
else:
reward[:p_len, i] = reward_i[-1]
return reward, length
def build_attention_model(params, src_vocab, trg_vocab,
source_placeholders, target_placeholders,
beam_size=1, mode=MODE.TRAIN,
burn_in_step=100000, increment_step=10000,
teacher_rate=1.0, max_step=100):
"""
Build a model.
:param params: dict.
{encoder: {rnn_cell: {},
...},
decoder: {rnn_cell: {},
...}}
for example:
{'encoder': {'rnn_cell': {'state_size': 512,
'cell_name': 'BasicLSTMCell',
'num_layers': 2,
'input_keep_prob': 1.0,
'output_keep_prob': 1.0},
'attention_key_size': attention_size},
'decoder': {'rnn_cell': {'cell_name': 'BasicLSTMCell',
'state_size': 512,
'num_layers': 1,
'input_keep_prob': 1.0,
'output_keep_prob': 1.0},
'trg_vocab_size': trg_vocab_size}}
:param src_vocab: Vocab of source symbols.
:param trg_vocab: Vocab of target symbols.
:param source_ids: placeholder
:param source_seq_length: placeholder
:param target_ids: placeholder
:param target_seq_length: placeholder
:param beam_size: used in beam inference
:param mode:
:return:
"""
if mode != MODE.TRAIN:
params = sq.disable_dropout(params)
tf.logging.info(json.dumps(params, indent=4))
decoder_params = params['decoder']
# parameters
source_ids = source_placeholders['src']
source_seq_length = source_placeholders['src_len']
source_sample_matrix = source_placeholders['src_sample_matrix']
source_word_seq_length = source_placeholders['src_word_len']
target_ids = target_placeholders['trg']
target_seq_length = target_placeholders['trg_len']
# Because source encoder is different to the target feedback,
# we construct source_embedding_table manually
source_char_embedding_table = sq.LookUpOp(src_vocab.vocab_size,
src_vocab.embedding_dim,
name='source')
source_char_embedded = source_char_embedding_table(source_ids)
# encode char to word
char_encoder = sq.StackRNNEncoder(params['char_encoder'],
params['attention_key_size']['char'],
name='char_rnn',
mode=mode)
# char_encoder_outputs: T_c B F
char_encoded_representation = char_encoder.encode(source_char_embedded, source_seq_length)
char_encoder_outputs = char_encoded_representation.outputs
#dynamical_batch_size = tf.shape(char_encoder_outputs)[1]
#space_indices = tf.where(tf.equal(tf.transpose(source_ids), src_vocab.space_id))
##space_indices = tf.transpose(tf.gather_nd(tf.transpose(space_indices), [[1], [0]]))
#space_indices = tf.concat(tf.split(space_indices, 2, axis=1)[::-1], axis=1)
#space_indices = tf.transpose(tf.reshape(space_indices, [dynamical_batch_size, -1, 2]),
# [1, 0, 2])
## T_w * B * F
#source_embedded = tf.gather_nd(char_encoder_outputs, space_indices)
# must be time major
char_encoder_outputs = tf.transpose(char_encoder_outputs, perm=(1, 0, 2))
sampled_word_embedded = tf.matmul(source_sample_matrix, char_encoder_outputs)
source_embedded = tf.transpose(sampled_word_embedded, perm=(1, 0, 2))
char_attention_keys = char_encoded_representation.attention_keys
char_attention_values = char_encoded_representation.attention_values
char_attention_length = char_encoded_representation.attention_length
encoder = sq.StackBidirectionalRNNEncoder(params['encoder'],
params['attention_key_size']['word'],
name='stack_rnn',
mode=mode)
encoded_representation = encoder.encode(source_embedded, source_word_seq_length)
attention_keys = encoded_representation.attention_keys
attention_values = encoded_representation.attention_values
attention_length = encoded_representation.attention_length
encoder_final_states_bw = encoded_representation.final_state[-1][-1].h
# feedback
if mode == MODE.RL:
tf.logging.info('BUILDING RL TRAIN FEEDBACK......')
dynamical_batch_size = tf.shape(attention_keys)[1]
feedback = sq.RLTrainingFeedBack(target_ids, target_seq_length,
trg_vocab, dynamical_batch_size,
burn_in_step=burn_in_step,
increment_step=increment_step,
max_step=max_step)
elif mode == MODE.TRAIN:
tf.logging.info('BUILDING TRAIN FEEDBACK WITH {} TEACHER_RATE'
'......'.format(teacher_rate))
feedback = sq.TrainingFeedBack(target_ids, target_seq_length,
trg_vocab, teacher_rate,
max_step=max_step)
elif mode == MODE.EVAL:
tf.logging.info('BUILDING EVAL FEEDBACK ......')
feedback = sq.TrainingFeedBack(target_ids, target_seq_length,
trg_vocab, 0.,
max_step=max_step)
else:
tf.logging.info('BUILDING INFER FEEDBACK WITH BEAM_SIZE {}'
'......'.format(beam_size))
infer_key_size = attention_keys.get_shape().as_list()[-1]
infer_value_size = attention_values.get_shape().as_list()[-1]
infer_states_bw_shape = encoder_final_states_bw.get_shape().as_list()[-1]
infer_char_key_size = char_attention_keys.get_shape().as_list()[-1]
infer_char_value_size = char_attention_values.get_shape().as_list()[-1]
encoder_final_states_bw = tf.reshape(tf.tile(encoder_final_states_bw, [1, beam_size]),
[-1, infer_states_bw_shape])
# expand beam
if TIME_MAJOR:
# batch size should be dynamical
dynamical_batch_size = tf.shape(attention_keys)[1]
final_key_shape = [-1, dynamical_batch_size * beam_size,
infer_key_size]
final_value_shape = [-1, dynamical_batch_size * beam_size,
infer_value_size]
attention_keys = tf.reshape(
(tf.tile(attention_keys, [1, 1, beam_size])), final_key_shape)
attention_values = tf.reshape(
(tf.tile(attention_values, [1, 1, beam_size])),
final_value_shape)
final_char_key_shape = [-1, dynamical_batch_size * beam_size,
infer_char_key_size]
final_char_value_shape = [-1, dynamical_batch_size * beam_size,
infer_char_value_size]
char_attention_keys = tf.reshape(
(tf.tile(char_attention_keys, [1, 1, beam_size])), final_char_key_shape)
char_attention_values = tf.reshape(
(tf.tile(char_attention_values, [1, 1, beam_size])),
final_char_value_shape)
else:
dynamical_batch_size = tf.shape(attention_keys)[0]
final_key_shape = [dynamical_batch_size * beam_size, -1,
infer_key_size]
final_value_shape = [dynamical_batch_size * beam_size, -1,
infer_value_size]
final_char_key_shape = [dynamical_batch_size * beam_size, -1,
infer_char_key_size]
final_char_value_shape = [dynamical_batch_size * beam_size, -1,
infer_char_value_size]
attention_keys = tf.reshape(
(tf.tile(attention_keys, [1, beam_size, 1])), final_key_shape)
attention_values = tf.reshape(
(tf.tile(attention_values, [1, beam_size, 1])),
final_value_shape)
char_attention_keys = tf.reshape(
(tf.tile(char_attention_keys, [1, beam_size, 1])), final_char_key_shape)
char_attention_values = tf.reshape(
(tf.tile(char_attention_values, [1, beam_size, 1])),
final_char_value_shape)
attention_length = tf.reshape(
tf.transpose(tf.tile([attention_length], [beam_size, 1])), [-1])
char_attention_length = tf.reshape(
tf.transpose(tf.tile([char_attention_length], [beam_size, 1])), [-1])
feedback = sq.BeamFeedBack(trg_vocab, beam_size, dynamical_batch_size,
max_step=max_step)
encoder_decoder_bridge = EncoderDecoderBridge(encoder_final_states_bw.get_shape().as_list()[-1],
decoder_params['rnn_cell'])
decoder_state_size = decoder_params['rnn_cell']['state_size']
# attention
attention = sq.AvAttention(decoder_state_size,
attention_keys, attention_values, attention_length,
char_attention_keys, char_attention_values, char_attention_length)
context_size = attention.context_size
with tf.variable_scope('logits_func'):
attention_mix = LinearOp(
context_size + feedback.embedding_dim + decoder_state_size,
decoder_state_size , name='attention_mix')
attention_mix_middle = LinearOp(
decoder_state_size, decoder_state_size // 2,
name='attention_mix_middle')
logits_trans = LinearOp(decoder_state_size // 2, feedback.vocab_size,
name='logits_trans')
logits_func = lambda _softmax: logits_trans(
tf.nn.relu(attention_mix_middle(
tf.nn.relu(attention_mix(_softmax)))))
# decoder
decoder = sq.AttentionRNNDecoder(decoder_params, attention,
feedback,
logits_func=logits_func,
init_state=encoder_decoder_bridge(encoder_final_states_bw),
mode=mode)
decoder_output, decoder_final_state = sq.dynamic_decode(decoder,
swap_memory=True,
scope='decoder')
# not training
if mode == MODE.EVAL or mode == MODE.INFER:
return decoder_output, decoder_final_state
# bos is added in feedback
# so target_ids is predict_ids
if not TIME_MAJOR:
ground_truth_ids = tf.transpose(target_ids, [1, 0])
else:
ground_truth_ids = target_ids
# construct the loss
if mode == MODE.RL:
# Creates a variable to hold the global_step.
global_step_tensor = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='global_step')[0]
rl_time_steps = tf.floordiv(tf.maximum(global_step_tensor -
burn_in_step, 0),
increment_step)
start_rl_step = target_seq_length - rl_time_steps
baseline_states = tf.stop_gradient(decoder_output.baseline_states)
predict_ids = tf.stop_gradient(decoder_output.predicted_ids)
# TODO: bug in tensorflow
ground_or_predict_ids = tf.cond(tf.greater(rl_time_steps, 0),
lambda: predict_ids,
lambda: ground_truth_ids)
reward, sequence_length = tf.py_func(
func=_py_func,
inp=[ground_or_predict_ids, ground_truth_ids, trg_vocab.eos_id],
Tout=[tf.float32, tf.int32],
name='reward')
sequence_length.set_shape((None,))
total_loss_avg, entropy_loss_avg, reward_loss_rmse, reward_predicted \
= rl_sequence_loss(
logits=decoder_output.logits,
predict_ids=predict_ids,
sequence_length=sequence_length,
baseline_states=baseline_states,
start_rl_step=start_rl_step,
reward=reward)
return decoder_output, total_loss_avg, entropy_loss_avg, \
reward_loss_rmse, reward_predicted
else:
total_loss_avg = cross_entropy_sequence_loss(
logits=decoder_output.logits,
targets=ground_truth_ids,
sequence_length=target_seq_length)
return decoder_output, total_loss_avg, total_loss_avg, \
tf.to_float(0.), tf.to_float(0.)
| 44.623501 | 100 | 0.594153 |
4a20d75f6b8429fbcaf8846ba18df6aba06ef0e1 | 1,449 | py | Python | WeatherScreens/ImageScreen.py | ondrejkoren/WeatherFrame | 2f9da22e5fa162e9ec7af218f353c16eb1250572 | [
"MIT"
] | 2 | 2020-10-26T19:18:11.000Z | 2020-10-27T17:27:40.000Z | WeatherScreens/ImageScreen.py | ondrejkoren/WeatherFrame | 2f9da22e5fa162e9ec7af218f353c16eb1250572 | [
"MIT"
] | null | null | null | WeatherScreens/ImageScreen.py | ondrejkoren/WeatherFrame | 2f9da22e5fa162e9ec7af218f353c16eb1250572 | [
"MIT"
] | 1 | 2020-12-18T03:42:42.000Z | 2020-12-18T03:42:42.000Z | from PIL import Image, ImageDraw, ImageFont
from suntime import Sun, SunTimeException
from dateutil import tz
from datetime import datetime, date, timedelta
import pyowm
import math
from random import randrange
from . import constants
import dateutil.parser
from . import ScreenBase
class ImageScreen(ScreenBase.ScreenBase):
def __init__(self,
path: str,
resolution: tuple = (constants.DISPLAY_WIDTH, constants.DISPLAY_HEIGHT)):
ScreenBase.ScreenBase.__init__(self)
self.path = path
self.resolution = resolution
def render(self):
image_file = Image.open(self.path) # open colour image
width, height = image_file.size # Get dimensions
scale = width/self.resolution[0]
print("scale: ", scale)
image_file = image_file.resize((int(width/scale), int(height/scale)), Image.ANTIALIAS)
width, height = image_file.size # Get dimensions
new_x, new_y = 800, 480
if width < height:
new_x, new_y = new_y, new_x
left = (width - new_x) / 2
top = (height - new_y) / 2
right = (width + new_x) / 2
bottom = (height + new_y) / 2
# Crop the center of the image
image_file = image_file.crop((left, top, right, bottom))
self.image = image_file.convert('1') # convert image to black and white
draw = ImageDraw.Draw(self.image)
return self.image
| 34.5 | 94 | 0.643892 |
4a20d8c8785576d9bad952a75a13645b72f2ff75 | 104,386 | py | Python | trainer.py | qianrusun1015/Disentangled-Person-Image-Generation | e4703860bb1b351050ce50f339985ff0811f1d64 | [
"MIT"
] | 165 | 2018-03-28T10:50:36.000Z | 2022-01-09T10:56:58.000Z | trainer.py | qianrusun1015/Disentangled-Person-Image-Generation | e4703860bb1b351050ce50f339985ff0811f1d64 | [
"MIT"
] | 13 | 2018-04-11T08:04:44.000Z | 2020-11-20T12:50:33.000Z | trainer.py | saswat0/Disentangled-Person-Image-Generation | e4703860bb1b351050ce50f339985ff0811f1d64 | [
"MIT"
] | 38 | 2018-05-16T08:50:11.000Z | 2021-05-14T02:56:52.000Z | from __future__ import print_function
# from trainer_Base import *
import os, pdb
import StringIO
import scipy.misc
import numpy as np
import glob
from itertools import chain
from collections import deque
import pickle, shutil
from tqdm import tqdm
from tqdm import trange
from skimage.measure import compare_ssim as ssim
from skimage.color import rgb2gray
from PIL import Image
from tensorflow.python.ops import control_flow_ops, sparse_ops
import models
from utils import *
import tflib as lib
from wgan_gp import *
from datasets import market1501, deepfashion, dataset_utils
##############################################################################################
######################### Market1501 with FgBgPose BodyROIVis ################################
class DPIG_Encoder_GAN_BodyROI(object):
def __init__(self, config):
self._common_init(config)
self.keypoint_num = 18
self.D_arch = config.D_arch
self.part_num = 37 ## Also change *7 --> *37 in datasets/market1501.py
if 'market' in config.dataset.lower():
if config.is_train:
self.dataset_obj = market1501.get_split('train', config.data_path)
else:
self.dataset_obj = market1501.get_split('test', config.data_path)
self.x, self.x_target, self.pose, self.pose_target, self.pose_rcv, self.pose_rcv_target, self.mask_r4, self.mask_r4_target, self.mask_r6, self.mask_r6_target, \
self.part_bbox, self.part_bbox_target, self.part_vis, self.part_vis_target = self._load_batch_pair_pose(self.dataset_obj)
def _common_init(self, config):
self.config = config
self.data_loader = None
self.dataset = config.dataset
self.beta1 = config.beta1
self.beta2 = config.beta2
self.optimizer = config.optimizer
self.batch_size = config.batch_size
self.step = tf.Variable(config.start_step, name='step', trainable=False)
self.g_lr = tf.Variable(config.g_lr, dtype=tf.float32, name='g_lr')
self.d_lr = tf.Variable(config.d_lr, dtype=tf.float32, name='d_lr')
self.g_lr_update = tf.assign(self.g_lr, self.g_lr * 0.5, name='g_lr_update')
self.d_lr_update = tf.assign(self.d_lr, self.d_lr * 0.5, name='d_lr_update')
self.gamma = config.gamma
self.lambda_k = config.lambda_k
self.z_num = config.z_num
self.conv_hidden_num = config.conv_hidden_num
self.img_H, self.img_W = config.img_H, config.img_W
self.model_dir = config.model_dir
self.load_path = config.load_path
self.use_gpu = config.use_gpu
self.data_format = config.data_format
_, height, width, self.channel = self._get_conv_shape()
self.repeat_num = int(np.log2(height)) - 2
self.data_path = config.data_path
self.pretrained_path = config.pretrained_path
self.pretrained_appSample_path = config.pretrained_appSample_path
self.pretrained_poseAE_path = config.pretrained_poseAE_path
self.pretrained_poseSample_path = config.pretrained_poseSample_path
self.ckpt_path = config.ckpt_path
self.z_emb_dir = config.z_emb_dir
self.start_step = config.start_step
self.log_step = config.log_step
self.max_step = config.max_step
# self.save_model_secs = config.save_model_secs
self.lr_update_step = config.lr_update_step
self.is_train = config.is_train
self.sample_app = config.sample_app
self.sample_fg = config.sample_fg
self.sample_bg = config.sample_bg
self.sample_pose = config.sample_pose
self.one_app_per_batch = config.one_app_per_batch
self.interpolate_fg = config.interpolate_fg
self.interpolate_fg_up = config.interpolate_fg_up
self.interpolate_fg_down = config.interpolate_fg_down
self.interpolate_bg = config.interpolate_bg
self.interpolate_pose = config.interpolate_pose
self.inverse_fg = config.inverse_fg
self.inverse_bg = config.inverse_bg
self.inverse_pose = config.inverse_pose
self.config = config
if self.is_train:
self.num_threads = 4
self.capacityCoff = 2
else: # during testing to keep the order of the input data
self.num_threads = 1
self.capacityCoff = 1
def _get_conv_shape(self):
shape = [self.batch_size, self.img_H, self.img_W, 3]
return shape
def _getOptimizer(self, wgan_gp, gen_cost, disc_cost, G_var, D_var):
clip_disc_weights = None
if wgan_gp.MODE == 'wgan':
gen_train_op = tf.train.RMSPropOptimizer(learning_rate=self.g_lr).minimize(gen_cost,
var_list=G_var, colocate_gradients_with_ops=True)
disc_train_op = tf.train.RMSPropOptimizer(learning_rate=self.d_lr).minimize(disc_cost,
var_list=D_var, colocate_gradients_with_ops=True)
clip_ops = []
for var in lib.params_with_name('Discriminator'):
clip_bounds = [-.01, .01]
clip_ops.append(tf.assign(var, tf.clip_by_value(var, clip_bounds[0], clip_bounds[1])))
clip_disc_weights = tf.group(*clip_ops)
elif wgan_gp.MODE == 'wgan-gp':
gen_train_op = tf.train.AdamOptimizer(learning_rate=self.g_lr, beta1=0.5, beta2=0.9).minimize(gen_cost,
var_list=G_var, colocate_gradients_with_ops=True)
disc_train_op = tf.train.AdamOptimizer(learning_rate=self.d_lr, beta1=0.5, beta2=0.9).minimize(disc_cost,
var_list=D_var, colocate_gradients_with_ops=True)
elif wgan_gp.MODE == 'dcgan':
gen_train_op = tf.train.AdamOptimizer(learning_rate=self.g_lr, beta1=0.5).minimize(gen_cost,
var_list=G_var, colocate_gradients_with_ops=True)
disc_train_op = tf.train.AdamOptimizer(learning_rate=self.d_lr, beta1=0.5).minimize(disc_cost,
var_list=D_var, colocate_gradients_with_ops=True)
elif wgan_gp.MODE == 'lsgan':
gen_train_op = tf.train.RMSPropOptimizer(learning_rate=self.g_lr).minimize(gen_cost,
var_list=G_var, colocate_gradients_with_ops=True)
disc_train_op = tf.train.RMSPropOptimizer(learning_rate=self.d_lr).minimize(disc_cost,
var_list=D_var, colocate_gradients_with_ops=True)
else:
raise Exception()
return gen_train_op, disc_train_op, clip_disc_weights
def _getDiscriminator(self, wgan_gp, arch='DCGAN'):
if 'DCGAN'==arch:
return wgan_gp.DCGANDiscriminator
elif 'FCDis'==arch:
return wgan_gp.FCDiscriminator
if arch.startswith('DCGANRegion'):
return wgan_gp.DCGANDiscriminatoRegion
raise Exception('You must choose an architecture!')
# def _getDiscriminator(self, wgan_gp, arch='DCGAN'):
# if 'Patch70x70'==arch:
# return wgan_gp.PatchDiscriminator_70x70
# elif 'Patch46x46'==arch:
# return wgan_gp.PatchDiscriminator_46x46
# elif 'Patch28x28'==arch:
# return wgan_gp.PatchDiscriminator_28x28
# elif 'Patch16x16'==arch:
# return wgan_gp.PatchDiscriminator_16x16
# elif 'Patch13x13'==arch:
# return wgan_gp.PatchDiscriminator_13x13
# elif 'DCGAN'==arch:
# # Baseline (G: DCGAN, D: DCGAN)
# return wgan_gp.DCGANDiscriminator
# elif 'FCDis'==arch:
# return wgan_gp.FCDiscriminator
# raise Exception('You must choose an architecture!')
def init_net(self):
self.build_model()
if self.pretrained_path is not None:
var1 = tf.get_collection(tf.GraphKeys.VARIABLES, scope='Encoder')
var2 = tf.get_collection(tf.GraphKeys.VARIABLES, scope='ID_AE')
self.saverPart = tf.train.Saver(var1+var2, max_to_keep=20)
if self.pretrained_poseAE_path is not None:
var = tf.get_collection(tf.GraphKeys.VARIABLES, scope='PoseAE')
self.saverPoseAEPart = tf.train.Saver(var, max_to_keep=20)
self.saver = tf.train.Saver(max_to_keep=20)
self.summary_writer = tf.summary.FileWriter(self.model_dir)
sv = tf.train.Supervisor(logdir=self.model_dir,
is_chief=True,
saver=None,
summary_op=None,
summary_writer=self.summary_writer,
global_step=self.step,
save_model_secs=0,
ready_for_local_init_op=None)
gpu_options = tf.GPUOptions(allow_growth=True)
sess_config = tf.ConfigProto(allow_soft_placement=True,
gpu_options=gpu_options)
self.sess = sv.prepare_or_wait_for_session(config=sess_config)
if self.pretrained_path is not None:
self.saverPart.restore(self.sess, self.pretrained_path)
print('restored from pretrained_path:', self.pretrained_path)
if self.pretrained_poseAE_path is not None:
self.saverPoseAEPart.restore(self.sess, self.pretrained_poseAE_path)
print('restored from pretrained_poseAE_path:', self.pretrained_poseAE_path)
if self.ckpt_path is not None:
self.saver.restore(self.sess, self.ckpt_path)
print('restored from ckpt_path:', self.ckpt_path)
self.test_dir_name = 'test_result'
def _gan_loss(self, wgan_gp, Discriminator, disc_real, disc_fake, real_data=None, fake_data=None, arch=None):
if wgan_gp.MODE == 'wgan':
gen_cost = -tf.reduce_mean(disc_fake)
disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real)
elif wgan_gp.MODE == 'wgan-gp':
gen_cost = -tf.reduce_mean(disc_fake)
disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real)
alpha = tf.random_uniform(
shape=[wgan_gp.BATCH_SIZE/len(wgan_gp.DEVICES),1,1,1],
minval=0.,
maxval=1.
)
differences = fake_data - real_data
interpolates = real_data + (tf.squeeze(alpha,[2,3])*differences)
gradients = tf.gradients(Discriminator(interpolates), [interpolates])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes-1.)**2)
disc_cost += wgan_gp.LAMBDA*gradient_penalty
elif wgan_gp.MODE == 'dcgan':
gen_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake,
labels=tf.ones_like(disc_fake)))
disc_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake,
labels=tf.zeros_like(disc_fake)))
disc_cost += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_real,
labels=tf.ones_like(disc_real)))
disc_cost /= 2.
elif wgan_gp.MODE == 'lsgan':
gen_cost = tf.reduce_mean((disc_fake - 1)**2)
disc_cost = (tf.reduce_mean((disc_real - 1)**2) + tf.reduce_mean((disc_fake - 0)**2))/2.
else:
raise Exception()
return gen_cost, disc_cost
def _define_input(self):
self.is_train_tensor = tf.Variable(self.is_train, name='phase')
self.Generator_fn = models.GeneratorCNN_ID_UAEAfterResidual
self.wgan_gp = WGAN_GP(DATA_DIR='', MODE='dcgan', DIM=64, BATCH_SIZE=self.batch_size, ITERS=200000,
LAMBDA=10, G_OUTPUT_DIM=self.img_H*self.img_W*3)
self.Discriminator_fn = self._getDiscriminator(self.wgan_gp, arch=self.D_arch)
def build_model(self):
self._define_input()
with tf.variable_scope("Encoder") as vs:
pb_list = tf.split(self.part_bbox, self.part_num, axis=1)
pv_list = tf.split(self.part_vis, self.part_num, axis=1)
## Part 1-3 (totally 3)
# self.embs, self.Encoder_var = GeneratorCNN_ID_Encoder_BodyROI(self.x, self.part_bbox, len(indices), 64,
# self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, reuse=False)
# Part 1-7 (totally 7)
indices = range(7)
select_part_bbox = tf.concat([pb_list[i] for i in indices], axis=1)
self.embs, _, self.Encoder_var = GeneratorCNN_ID_Encoder_BodyROI(self.x, select_part_bbox, len(indices), 32,
self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, keep_part_prob=1.0, reuse=False)
# self.embs, _, self.Encoder_var = GeneratorCNN_ID_Encoder_BodyROI2(self.x, select_part_bbox, len(indices), 32,
# self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, keep_part_prob=0.9, reuse=False)
## Part 1,4-8 (totally 6)
# indices = [1] + range(4,9)
# select_part_bbox = tf.concat([pb_list[i] for i in indices], axis=1)
# self.embs, _, self.Encoder_var = GeneratorCNN_ID_Encoder_BodyROI(self.x, select_part_bbox, len(indices), 32,
# self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, keep_part_prob=1.0, reuse=False)
## Part 1,8-16 (totally 10)
# indices = [0] + range(7,16)
# select_part_bbox = tf.concat([pb_list[i] for i in indices], axis=1)
# select_part_vis = tf.cast(tf.concat([pv_list[i] for i in indices], axis=1), tf.float32)
# self.embs, _, self.Encoder_var = GeneratorCNN_ID_Encoder_BodyROIVis(self.x, select_part_bbox, select_part_vis, len(indices), 32,
# self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, keep_part_prob=1.0, reuse=False)
self.embs_rep = tf.tile(tf.expand_dims(self.embs,-1), [1, 1, self.img_H*self.img_W])
self.embs_rep = tf.reshape(self.embs_rep, [self.batch_size, -1, self.img_H, self.img_W])
self.embs_rep = nchw_to_nhwc(self.embs_rep)
with tf.variable_scope("ID_AE") as vs:
G, _, self.G_var = self.Generator_fn(
self.embs_rep, self.pose,
self.channel, self.z_num, self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, reuse=False)
self.G_var += self.Encoder_var
self.G = denorm_img(G, self.data_format)
pair = tf.concat([self.x, G], 0)
self.D_z = self.Discriminator_fn(tf.transpose( pair, [0,3,1,2] ), input_dim=3)
self.D_var = lib.params_with_name('Discriminator.')
D_z_pos, D_z_neg = tf.split(self.D_z, 2)
self.g_loss, self.d_loss = self._gan_loss(self.wgan_gp, self.Discriminator_fn, D_z_pos, D_z_neg, arch=self.D_arch)
self.PoseMaskLoss = tf.reduce_mean(tf.abs(G - self.x) * (self.mask_r6))
self.L1Loss = tf.reduce_mean(tf.abs(G - self.x))
self.g_loss_only = self.g_loss
self._define_loss_optim()
self.summary_op = tf.summary.merge([
tf.summary.image("G", self.G),
tf.summary.scalar("loss/PoseMaskLoss", self.PoseMaskLoss),
tf.summary.scalar("loss/L1Loss", self.L1Loss),
tf.summary.scalar("loss/g_loss", self.g_loss),
tf.summary.scalar("loss/g_loss_only", self.g_loss_only),
tf.summary.scalar("loss/d_loss", self.d_loss),
tf.summary.scalar("misc/d_lr", self.d_lr),
tf.summary.scalar("misc/g_lr", self.g_lr),
])
def _define_loss_optim(self):
self.g_loss += self.L1Loss * 20
self.g_optim, self.d_optim, self.clip_disc_weights = self._getOptimizer(self.wgan_gp,
self.g_loss, self.d_loss, self.G_var, self.D_var)
def train(self):
x_fixed, x_target_fixed, pose_fixed, pose_target_fixed, mask_fixed, mask_target_fixed, part_bbox_fixed, \
part_bbox_target_fixed, part_vis_fixed, part_vis_target_fixed = self.get_image_from_loader()
save_image(x_fixed, '{}/x_fixed.png'.format(self.model_dir))
save_image(x_target_fixed, '{}/x_target_fixed.png'.format(self.model_dir))
save_image((np.amax(pose_fixed, axis=-1, keepdims=True)+1.0)*127.5, '{}/pose_fixed.png'.format(self.model_dir))
save_image((np.amax(pose_target_fixed, axis=-1, keepdims=True)+1.0)*127.5, '{}/pose_target_fixed.png'.format(self.model_dir))
save_image(mask_fixed, '{}/mask_fixed.png'.format(self.model_dir))
save_image(mask_target_fixed, '{}/mask_target_fixed.png'.format(self.model_dir))
for step in trange(self.start_step, self.max_step):
if step>0:
self.sess.run([self.g_optim])
# Train critic
if (self.wgan_gp.MODE == 'dcgan') or (self.wgan_gp.MODE == 'lsgan'):
disc_ITERS = 1
else:
disc_ITERS = self.wgan_gp.CRITIC_ITERS
for i in xrange(disc_ITERS):
self.sess.run(self.d_optim)
if self.wgan_gp.MODE == 'wgan':
self.sess.run(self.clip_disc_weights)
if 0==step or step % self.log_step == self.log_step-1:
fetch_dict = {
"summary": self.summary_op
}
result = self.sess.run(fetch_dict)
self.summary_writer.add_summary(result['summary'], step)
self.summary_writer.flush()
if 0==step or step % (self.log_step * 3) == (self.log_step * 3)-1:
x = process_image(x_fixed, 127.5, 127.5)
x_target = process_image(x_target_fixed, 127.5, 127.5)
self.generate(x, x_target, pose_fixed, part_bbox_fixed, part_vis_fixed, self.model_dir, idx=step)
if step % self.lr_update_step == self.lr_update_step - 1:
self.sess.run([self.g_lr_update, self.d_lr_update])
if step % (self.log_step * 30) == (self.log_step * 30)-1:
self.saver.save(self.sess, os.path.join(self.model_dir, 'model.ckpt'), global_step=step)
def test(self):
test_result_dir = os.path.join(self.model_dir, 'test_result')
test_result_dir_x = os.path.join(test_result_dir, 'x')
test_result_dir_x_target = os.path.join(test_result_dir, 'x_target')
test_result_dir_G = os.path.join(test_result_dir, 'G')
test_result_dir_pose = os.path.join(test_result_dir, 'pose')
test_result_dir_pose_target = os.path.join(test_result_dir, 'pose_target')
test_result_dir_mask = os.path.join(test_result_dir, 'mask')
test_result_dir_mask_target = os.path.join(test_result_dir, 'mask_target')
if not os.path.exists(test_result_dir):
os.makedirs(test_result_dir)
if not os.path.exists(test_result_dir_x):
os.makedirs(test_result_dir_x)
if not os.path.exists(test_result_dir_x_target):
os.makedirs(test_result_dir_x_target)
if not os.path.exists(test_result_dir_G):
os.makedirs(test_result_dir_G)
if not os.path.exists(test_result_dir_pose):
os.makedirs(test_result_dir_pose)
if not os.path.exists(test_result_dir_pose_target):
os.makedirs(test_result_dir_pose_target)
if not os.path.exists(test_result_dir_mask):
os.makedirs(test_result_dir_mask)
if not os.path.exists(test_result_dir_mask_target):
os.makedirs(test_result_dir_mask_target)
for i in xrange(100):
x_fixed, x_target_fixed, pose_fixed, pose_target_fixed, mask_fixed, mask_target_fixed, part_bbox_fixed, \
part_bbox_target_fixed, part_vis_fixed, part_vis_target_fixed = self.get_image_from_loader()
x = process_image(x_fixed, 127.5, 127.5)
x_target = process_image(x_target_fixed, 127.5, 127.5)
if 0==i:
x_fake = self.generate(x, x_target, pose_fixed, part_bbox_fixed, part_vis_fixed, test_result_dir, idx=self.start_step, save=True)
else:
x_fake = self.generate(x, x_target, pose_fixed, part_bbox_fixed, part_vis_fixed, test_result_dir, idx=self.start_step, save=False)
p = (np.amax(pose_fixed, axis=-1, keepdims=False)+1.0)*127.5
pt = (np.amax(pose_target_fixed, axis=-1, keepdims=False)+1.0)*127.5
for j in xrange(self.batch_size):
idx = i*self.batch_size+j
im = Image.fromarray(x_fixed[j,:].astype(np.uint8))
im.save('%s/%05d.png'%(test_result_dir_x, idx))
im = Image.fromarray(x_target_fixed[j,:].astype(np.uint8))
im.save('%s/%05d.png'%(test_result_dir_x_target, idx))
im = Image.fromarray(x_fake[j,:].astype(np.uint8))
im.save('%s/%05d.png'%(test_result_dir_G, idx))
im = Image.fromarray(p[j,:].astype(np.uint8))
im.save('%s/%05d.png'%(test_result_dir_pose, idx))
im = Image.fromarray(pt[j,:].astype(np.uint8))
im.save('%s/%05d.png'%(test_result_dir_pose_target, idx))
im = Image.fromarray(mask_fixed[j,:].squeeze().astype(np.uint8))
im.save('%s/%05d.png'%(test_result_dir_mask, idx))
im = Image.fromarray(mask_target_fixed[j,:].squeeze().astype(np.uint8))
im.save('%s/%05d.png'%(test_result_dir_mask_target, idx))
if 0==i:
save_image(x_fixed, '{}/x_fixed.png'.format(test_result_dir))
save_image(x_target_fixed, '{}/x_target_fixed.png'.format(test_result_dir))
save_image(mask_fixed, '{}/mask_fixed.png'.format(test_result_dir))
save_image(mask_target_fixed, '{}/mask_target_fixed.png'.format(test_result_dir))
save_image((np.amax(pose_fixed, axis=-1, keepdims=True)+1.0)*127.5, '{}/pose_fixed.png'.format(test_result_dir))
save_image((np.amax(pose_target_fixed, axis=-1, keepdims=True)+1.0)*127.5, '{}/pose_target_fixed.png'.format(test_result_dir))
def test_one_by_one(self, img_dir, pair_path, all_peaks_path, subsets_path,
pair_num=500, shuffle=True, random_int=0, result_dir_name='test_demo'):
test_result_dir = os.path.join(self.model_dir, result_dir_name)
test_result_dir_x = os.path.join(test_result_dir, 'x')
test_result_dir_x_target = os.path.join(test_result_dir, 'x_target')
test_result_dir_G = os.path.join(test_result_dir, 'G')
test_result_dir_pose = os.path.join(test_result_dir, 'pose')
test_result_dir_pose_target = os.path.join(test_result_dir, 'pose_target')
test_result_dir_mask = os.path.join(test_result_dir, 'mask')
test_result_dir_mask_target = os.path.join(test_result_dir, 'mask_target')
if not os.path.exists(test_result_dir):
os.makedirs(test_result_dir)
if not os.path.exists(test_result_dir_x):
os.makedirs(test_result_dir_x)
if not os.path.exists(test_result_dir_x_target):
os.makedirs(test_result_dir_x_target)
if not os.path.exists(test_result_dir_G):
os.makedirs(test_result_dir_G)
if not os.path.exists(test_result_dir_pose):
os.makedirs(test_result_dir_pose)
if not os.path.exists(test_result_dir_pose_target):
os.makedirs(test_result_dir_pose_target)
if not os.path.exists(test_result_dir_mask):
os.makedirs(test_result_dir_mask)
if not os.path.exists(test_result_dir_mask_target):
os.makedirs(test_result_dir_mask_target)
pairs = pickle.load(open(pair_path,'r'))
all_peaks_dic = pickle.load(open(all_peaks_path,'r'))
subsets_dic = pickle.load(open(subsets_path,'r'))
if shuffle:
np.random.seed(0)
idx_all = np.random.permutation(len(pairs))
else:
idx_all = np.array(range(len(pairs)))
# idx_list = idx_all[:test_pair_num]
height, width, _ = scipy.misc.imread(os.path.join(img_dir, pairs[0][0])).shape
cnt = -1
for i in trange(len(idx_all)):
if cnt>= pair_num-1:
break
idx = idx_all[i]
if (pairs[idx][0] in all_peaks_dic) and (pairs[idx][1] in all_peaks_dic):
cnt += 1
## Pose 0
peaks_0 = _get_valid_peaks(all_peaks_dic[pairs[idx][0]], subsets_dic[pairs[idx][0]])
indices_r4_0, values_r4_0, shape = _getSparsePose(peaks_0, height, width, self.keypoint_num, radius=4, mode='Solid')
pose_dense_0 = _sparse2dense(indices_r4_0, values_r4_0, shape)
pose_mask_r4_0 = _getPoseMask(peaks_0, height, width, radius=4, mode='Solid')
## Pose 1
peaks_1 = _get_valid_peaks(all_peaks_dic[pairs[idx][1]], subsets_dic[pairs[idx][1]])
indices_r4_1, values_r4_1, shape = _getSparsePose(peaks_1, height, width, self.keypoint_num, radius=4, mode='Solid')
pose_dense_1 = _sparse2dense(indices_r4_1, values_r4_1, shape)
pose_mask_r4_1 = _getPoseMask(peaks_1, height, width, radius=4, mode='Solid')
## Generate image
x = scipy.misc.imread(os.path.join(img_dir, pairs[idx][0]))
x = process_image(x, 127.5, 127.5)
x_target = scipy.misc.imread(os.path.join(img_dir, pairs[idx][1]))
x_target = process_image(x_target, 127.5, 127.5)
x_batch = np.expand_dims(x,axis=0)
pose_batch = np.expand_dims(pose_dense_1*2-1,axis=0)
G = self.sess.run(self.G, {self.x: x_batch, self.pose_target: pose_batch})
## Save
shutil.copy(os.path.join(img_dir, pairs[idx][0]), os.path.join(test_result_dir_x, 'pair%05d-%s'%(cnt, pairs[idx][0])))
shutil.copy(os.path.join(img_dir, pairs[idx][1]), os.path.join(test_result_dir_x_target, 'pair%05d-%s'%(cnt, pairs[idx][1])))
im = Image.fromarray(G.squeeze().astype(np.uint8))
im.save('%s/pair%05d-%s-%s.jpg'%(test_result_dir_G, cnt, pairs[idx][0], pairs[idx][1]))
im = np.amax(pose_dense_0, axis=-1, keepdims=False)*255
im = Image.fromarray(im.astype(np.uint8))
im.save('%s/pair%05d-%s.jpg'%(test_result_dir_pose, cnt, pairs[idx][0]))
im = np.amax(pose_dense_1, axis=-1, keepdims=False)*255
im = Image.fromarray(im.astype(np.uint8))
im.save('%s/pair%05d-%s.jpg'%(test_result_dir_pose_target, cnt, pairs[idx][1]))
im = pose_mask_r4_0*255
im = Image.fromarray(im.astype(np.uint8))
im.save('%s/pair%05d-%s.jpg'%(test_result_dir_mask, cnt, pairs[idx][0]))
im = pose_mask_r4_1*255
im = Image.fromarray(im.astype(np.uint8))
im.save('%s/pair%05d-%s.jpg'%(test_result_dir_mask_target, cnt, pairs[idx][1]))
else:
continue
def generate(self, x_fixed, x_target_fixed, pose_fixed, part_bbox_fixed, part_vis_fixed, root_path=None, path=None, idx=None, save=True):
G = self.sess.run(self.G, {self.x: x_fixed, self.pose: pose_fixed, self.part_bbox: part_bbox_fixed, self.part_vis: part_vis_fixed})
ssim_G_x_list = []
for i in xrange(G.shape[0]):
G_gray = rgb2gray((G[i,:]).clip(min=0,max=255).astype(np.uint8))
x_gray = rgb2gray(((x_fixed[i,:]+1)*127.5).clip(min=0,max=255).astype(np.uint8))
ssim_G_x_list.append(ssim(G_gray, x_gray, data_range=x_gray.max() - x_gray.min(), multichannel=False))
ssim_G_x_mean = np.mean(ssim_G_x_list)
if path is None and save:
path = os.path.join(root_path, '{}_G_ssim{}.png'.format(idx,ssim_G_x_mean))
save_image(G, path)
print("[*] Samples saved: {}".format(path))
return G
def get_image_from_loader(self):
x, x_target, pose, pose_target, mask, mask_target, part_bbox, part_bbox_target, part_vis, part_vis_target = self.sess.run([self.x, self.x_target, self.pose, self.pose_target,
self.mask_r6, self.mask_r6_target, self.part_bbox, self.part_bbox_target, self.part_vis, self.part_vis_target])
x = unprocess_image(x, 127.5, 127.5)
x_target = unprocess_image(x_target, 127.5, 127.5)
mask = mask*255
mask_target = mask_target*255
return x, x_target, pose, pose_target, mask, mask_target, part_bbox, part_bbox_target, part_vis, part_vis_target
def _load_batch_pair_pose(self, dataset):
data_provider = slim.dataset_data_provider.DatasetDataProvider(dataset, common_queue_capacity=32, common_queue_min=8)
image_raw_0, image_raw_1, label, pose_rcv_0, pose_rcv_1, mask_r4_0, mask_r4_1, mask_r6_0, mask_r6_1, part_bbox_0, part_bbox_1, part_vis_0, part_vis_1 = data_provider.get([
'image_raw_0', 'image_raw_1', 'label', 'pose_peaks_0_rcv', 'pose_peaks_1_rcv', 'pose_mask_r4_0', 'pose_mask_r4_1', 'pose_mask_r6_0', 'pose_mask_r6_1',
'part_bbox_0', 'part_bbox_1', 'part_vis_0', 'part_vis_1'])
image_raw_0 = tf.reshape(image_raw_0, [128, 64, 3])
image_raw_1 = tf.reshape(image_raw_1, [128, 64, 3])
mask_r4_0 = tf.cast(tf.reshape(mask_r4_0, [128, 64, 1]), tf.float32)
mask_r4_1 = tf.cast(tf.reshape(mask_r4_1, [128, 64, 1]), tf.float32)
mask_r6_0 = tf.cast(tf.reshape(mask_r6_0, [128, 64, 1]), tf.float32)
mask_r6_1 = tf.cast(tf.reshape(mask_r6_1, [128, 64, 1]), tf.float32)
part_bbox_0 = tf.reshape(part_bbox_0, [self.part_num, 4])
part_bbox_1 = tf.reshape(part_bbox_1, [self.part_num, 4])
images_0, images_1, poses_rcv_0, poses_rcv_1, masks_r4_0, masks_r4_1, masks_r6_0, masks_r6_1, part_bboxs_0, part_bboxs_1, part_viss_0, part_viss_1 = tf.train.batch([image_raw_0,
image_raw_1, pose_rcv_0, pose_rcv_1, mask_r4_0, mask_r4_1, mask_r6_0, mask_r6_1, part_bbox_0, part_bbox_1, part_vis_0, part_vis_1],
batch_size=self.batch_size, num_threads=self.num_threads, capacity=self.capacityCoff * self.batch_size)
images_0 = process_image(tf.to_float(images_0), 127.5, 127.5)
images_1 = process_image(tf.to_float(images_1), 127.5, 127.5)
poses_0 = tf.cast(coord2channel_simple_rcv(poses_rcv_0, keypoint_num=18, is_normalized=False, img_H=128, img_W=64), tf.float32)
poses_1 = tf.cast(coord2channel_simple_rcv(poses_rcv_1, keypoint_num=18, is_normalized=False, img_H=128, img_W=64), tf.float32)
poses_0 = tf_poseInflate(poses_0, keypoint_num=18, radius=4, img_H=self.img_H, img_W=self.img_W)
poses_1 = tf_poseInflate(poses_1, keypoint_num=18, radius=4, img_H=self.img_H, img_W=self.img_W)
return images_0, images_1, poses_0, poses_1, poses_rcv_0, poses_rcv_1, masks_r4_0, masks_r4_1, masks_r6_0, masks_r6_1, part_bboxs_0, part_bboxs_1, part_viss_0, part_viss_1
class DPIG_Encoder_GAN_BodyROI_FgBg(DPIG_Encoder_GAN_BodyROI):
def build_model(self):
self._define_input()
with tf.variable_scope("Encoder") as vs:
pb_list = tf.split(self.part_bbox, self.part_num, axis=1)
pv_list = tf.split(self.part_vis, self.part_num, axis=1)
## Part 1,8-16 (totally 10)
# indices = [1] + range(8,17)
## Part 1-7 (totally 7)
indices = range(0,7)
select_part_bbox = tf.concat([pb_list[i] for i in indices], axis=1)
select_part_vis = tf.cast(tf.concat([pv_list[i] for i in indices], axis=1), tf.float32)
# self.embs, _, self.Encoder_var = models.GeneratorCNN_ID_Encoder_BodyROIVis_FgBgFeaOneBranch(self.x, self.mask_r6, select_part_bbox, select_part_vis, len(indices), 32,
# self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, keep_part_prob=1.0, reuse=False)
self.embs, _, _, self.Encoder_var = models.GeneratorCNN_ID_Encoder_BodyROIVis_FgBgFeaTwoBranch(self.x, self.mask_r6, select_part_bbox, select_part_vis, len(indices), 32,
self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, keep_part_prob=1.0, reuse=False)
# self.embs, _, self.Encoder_var = models.GeneratorCNN_ID_Encoder_BodyROIVis_FgBgImgOneBranch(self.x, self.mask_r6, select_part_bbox, select_part_vis, len(indices), 32,
# self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, keep_part_prob=1.0, reuse=False)
# self.embs, _, self.Encoder_var = models.GeneratorCNN_ID_Encoder_BodyROIVis_FgBgImgTwoBranch(self.x, self.mask_r6, select_part_bbox, select_part_vis, len(indices), 32,
# self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, keep_part_prob=1.0, reuse=False)
self.embs_rep = tf.tile(tf.expand_dims(self.embs,-1), [1, 1, self.img_H*self.img_W])
self.embs_rep = tf.reshape(self.embs_rep, [self.batch_size, -1, self.img_H, self.img_W])
self.embs_rep = nchw_to_nhwc(self.embs_rep)
with tf.variable_scope("ID_AE") as vs:
G, _, self.G_var = self.Generator_fn(
self.embs_rep, self.pose,
self.channel, self.z_num, self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, reuse=False)
self.G_var += self.Encoder_var
self.G = denorm_img(G, self.data_format)
D_z_pos = self.Discriminator_fn(tf.transpose( self.x, [0,3,1,2] ), input_dim=3)
D_z_neg = self.Discriminator_fn(tf.transpose( G, [0,3,1,2] ), input_dim=3)
self.D_var = lib.params_with_name('Discriminator.')
self.g_loss, self.d_loss = self._gan_loss(self.wgan_gp, self.Discriminator_fn, D_z_pos, D_z_neg, arch=self.D_arch)
self.PoseMaskLoss = tf.reduce_mean(tf.abs(G - self.x) * (self.mask_r6))
self.L1Loss = tf.reduce_mean(tf.abs(G - self.x))
self.g_loss_only = self.g_loss
self._define_loss_optim()
self.summary_op = tf.summary.merge([
tf.summary.image("G", self.G),
tf.summary.scalar("loss/PoseMaskLoss", self.PoseMaskLoss),
tf.summary.scalar("loss/L1Loss", self.L1Loss),
tf.summary.scalar("loss/g_loss", self.g_loss),
tf.summary.scalar("loss/g_loss_only", self.g_loss_only),
tf.summary.scalar("loss/d_loss", self.d_loss),
tf.summary.scalar("misc/d_lr", self.d_lr),
tf.summary.scalar("misc/g_lr", self.g_lr),
])
def _define_loss_optim(self):
self.g_loss += self.L1Loss * 20
self.g_optim, self.d_optim, self.clip_disc_weights = self._getOptimizer(self.wgan_gp,
self.g_loss, self.d_loss, self.G_var, self.D_var)
class DPIG_PoseRCV_AE_BodyROI(DPIG_Encoder_GAN_BodyROI):
def _define_input(self):
self.is_train_tensor = tf.Variable(self.is_train, name='phase')
self.Generator_encoder_fn = models.GeneratorCNN_ID_Encoder
self.Generator_fn = models.GeneratorCNN_ID_UAEAfterResidual
self.wgan_gp = WGAN_GP(DATA_DIR='', MODE='dcgan', DIM=64, BATCH_SIZE=self.batch_size, ITERS=200000,
LAMBDA=10, G_OUTPUT_DIM=self.keypoint_num*3)
self.Discriminator_fn = self._getDiscriminator(self.wgan_gp, arch='FCDis')
def build_model(self):
self._define_input()
with tf.variable_scope("PoseAE") as vs:
## Norm to [-1, 1]
pose_rcv_norm = tf.reshape(self.pose_rcv, [self.batch_size, self.keypoint_num, 3])
R = tf.cast(tf.slice(pose_rcv_norm, [0,0,0], [-1,-1,1]), tf.float32)/float(self.img_H)*2.0 - 1
C = tf.cast(tf.slice(pose_rcv_norm, [0,0,1], [-1,-1,1]), tf.float32)/float(self.img_W)*2.0 - 1
V = tf.cast(tf.slice(pose_rcv_norm, [0,0,2], [-1,-1,1]), tf.float32)
pose_rcv_norm = tf.concat([R,C,V], axis=-1)
self.pose_embs, self.G_var_encoder = models.PoseEncoderFCRes(tf.reshape(pose_rcv_norm, [self.batch_size,-1]),
z_num=32, repeat_num=4, hidden_num=512, data_format=self.data_format, activation_fn=LeakyReLU, reuse=False)
if self.sample_pose: ## Sampling new poses during testing
self.pose_embs = tf.random_normal(tf.shape(self.pose_embs), mean=0.0, stddev=0.2, dtype=tf.float32)
G_pose_coord, G_pose_visible, self.G_var_decoder = models.PoseDecoderFCRes(self.pose_embs, self.keypoint_num,
repeat_num=4, hidden_num=512, data_format=self.data_format, activation_fn=LeakyReLU, reuse=False)
self.G_var_pose = self.G_var_encoder + self.G_var_decoder
G_pose_rcv = tf.concat([tf.reshape(G_pose_coord, [self.batch_size,self.keypoint_num,2]), tf.expand_dims(G_pose_visible,-1)], axis=-1)
G_pose = coord2channel_simple_rcv(G_pose_rcv, self.keypoint_num, is_normalized=True, img_H=self.img_H, img_W=self.img_W)
self.G_pose = denorm_img(tf.tile(tf.reduce_max(G_pose, axis=-1, keep_dims=True), [1,1,1,3]), self.data_format)
self.G_pose_rcv = G_pose_rcv
self.reconstruct_loss = tf.reduce_mean(tf.square(pose_rcv_norm - G_pose_rcv))
self._define_loss_optim()
self.summary_op = tf.summary.merge([
tf.summary.scalar("loss/reconstruct_loss", self.reconstruct_loss),
])
def _define_loss_optim(self):
self.g_optim = tf.train.AdamOptimizer(learning_rate=self.g_lr, beta1=0.5).minimize(self.reconstruct_loss * 20,
var_list=self.G_var_pose, colocate_gradients_with_ops=True)
def train(self):
x_fixed, x_target_fixed, pose_fixed, pose_target_fixed, pose_rcv_fixed, mask_fixed, mask_target_fixed, \
part_bbox_fixed, part_bbox_target_fixed = self.get_image_from_loader()
save_image(x_fixed, '{}/x_fixed.png'.format(self.model_dir))
save_image(x_target_fixed, '{}/x_target_fixed.png'.format(self.model_dir))
save_image((np.amax(pose_fixed, axis=-1, keepdims=True)+1.0)*127.5, '{}/pose_fixed.png'.format(self.model_dir))
save_image((np.amax(pose_target_fixed, axis=-1, keepdims=True)+1.0)*127.5, '{}/pose_target_fixed.png'.format(self.model_dir))
save_image(mask_fixed, '{}/mask_fixed.png'.format(self.model_dir))
save_image(mask_target_fixed, '{}/mask_target_fixed.png'.format(self.model_dir))
for step in trange(self.start_step, self.max_step):
if step>0:
self.sess.run([self.g_optim])
fetch_dict = {}
if step % self.log_step == self.log_step-1:
fetch_dict.update({
"summary": self.summary_op
})
result = self.sess.run(fetch_dict)
if step % self.log_step == self.log_step-1:
self.summary_writer.add_summary(result['summary'], step)
self.summary_writer.flush()
if step % self.lr_update_step == self.lr_update_step - 1:
self.sess.run([self.g_lr_update, self.d_lr_update])
if step % (self.log_step * 30) == (self.log_step * 30)-1:
self.saver.save(self.sess, os.path.join(self.model_dir, 'model.ckpt'), global_step=step)
def get_image_from_loader(self):
x, x_target, pose, pose_target, pose_rcv, mask, mask_target, part_bbox, part_bbox_target = self.sess.run([self.x, self.x_target, self.pose, self.pose_target,
self.pose_rcv, self.mask_r6, self.mask_r6_target, self.part_bbox, self.part_bbox_target])
x = unprocess_image(x, 127.5, 127.5)
x_target = unprocess_image(x_target, 127.5, 127.5)
mask = mask*255
mask_target = mask_target*255
return x, x_target, pose, pose_target, pose_rcv, mask, mask_target, part_bbox, part_bbox_target
################### Subnet of Appearance/Pose Sampling #################
class DPIG_Encoder_subSampleAppNetFgBg_GAN_BodyROI(DPIG_Encoder_GAN_BodyROI):
def _define_input(self):
self.is_train_tensor = tf.Variable(self.is_train, name='phase')
self.Generator_encoder_fn = models.GeneratorCNN_ID_Encoder
self.Generator_fn = models.GeneratorCNN_ID_UAEAfterResidual
self.wgan_gp_fg = WGAN_GP(DATA_DIR='', MODE='wgan', DIM=64, BATCH_SIZE=self.batch_size, ITERS=200000,
LAMBDA=10)
self.Discriminator_fg_fn = self._getDiscriminator(self.wgan_gp_fg, arch='FCDis')
self.wgan_gp_bg = WGAN_GP(DATA_DIR='', MODE='wgan', DIM=64, BATCH_SIZE=self.batch_size, ITERS=200000,
LAMBDA=10)
self.Discriminator_bg_fn = self._getDiscriminator(self.wgan_gp_bg, arch='FCDis')
def build_model(self):
self._define_input()
with tf.variable_scope("Encoder") as vs:
pb_list = tf.split(self.part_bbox, self.part_num, axis=1)
pv_list = tf.split(self.part_vis, self.part_num, axis=1)
## Part 1,8-16 (totally 10)
# indices = [0] + range(7,16)
## Part 1-7 (totally 7)
indices = range(0,7)
select_part_bbox = tf.concat([pb_list[i] for i in indices], axis=1)
select_part_vis = tf.cast(tf.concat([pv_list[i] for i in indices], axis=1), tf.float32)
self.embs, _, _, self.Encoder_var = models.GeneratorCNN_ID_Encoder_BodyROIVis_FgBgFeaTwoBranch(self.x, self.mask_r6, select_part_bbox, select_part_vis, len(indices), 32,
self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, keep_part_prob=1.0, reuse=False)
self.fg_embs = tf.slice(self.embs, [0,0], [-1,len(indices)*32])
self.bg_embs = tf.slice(self.embs, [0,len(indices)*32], [-1,-1])
self.embs_rep = tf.tile(tf.expand_dims(self.embs,-1), [1, 1, self.img_H*self.img_W])
self.embs_rep = tf.reshape(self.embs_rep, [self.batch_size, -1, self.img_H, self.img_W])
self.embs_rep = nchw_to_nhwc(self.embs_rep)
with tf.variable_scope("ID_AE") as vs:
G, _, self.G_var = self.Generator_fn(
self.embs_rep, self.pose,
self.channel, self.z_num, self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, reuse=False)
with tf.variable_scope("Gaussian_FC_Fg") as vs:
embs_shape = self.fg_embs.get_shape().as_list()
self.app_embs_fg, self.G_var_app_embs_fg = models.GaussianFCRes(embs_shape, embs_shape[-1], repeat_num=4, hidden_num=512, data_format=self.data_format, activation_fn=LeakyReLU)
with tf.variable_scope("Gaussian_FC_Bg") as vs:
embs_shape = self.bg_embs.get_shape().as_list()
self.app_embs_bg, self.G_var_app_embs_bg = models.GaussianFCRes(embs_shape, embs_shape[-1], repeat_num=4, hidden_num=256, data_format=self.data_format, activation_fn=LeakyReLU)
## Adversarial for Gaussian Fg
# encode_pair_fg = tf.concat([self.fg_embs, self.app_embs_fg], 0)
# D_z_embs_fg = self.Discriminator_fg_fn(encode_pair_fg, input_dim=encode_pair_fg.get_shape().as_list()[-1], FC_DIM=512, n_layers=3, reuse=False, name='Fg_FCDis_')
# D_z_pos_embs_fg, D_z_neg_embs_fg = tf.split(D_z_embs_fg, 2)
D_z_pos_embs_fg = self.Discriminator_fg_fn(self.fg_embs, input_dim=self.fg_embs.get_shape().as_list()[-1], FC_DIM=512, n_layers=3, reuse=False, name='Fg_FCDis_')
D_z_neg_embs_fg = self.Discriminator_fg_fn(self.app_embs_fg, input_dim=self.app_embs_fg.get_shape().as_list()[-1], FC_DIM=512, n_layers=3, reuse=False, name='Fg_FCDis_')
self.D_var_embs_fg = lib.params_with_name('Fg_FCDis_Discriminator.')
self.g_loss_embs_fg, self.d_loss_embs_fg = self._gan_loss(self.wgan_gp_fg, self.Discriminator_fg_fn,
D_z_pos_embs_fg, D_z_neg_embs_fg, self.fg_embs, self.app_embs_fg)
## Adversarial for Gaussian Bg
# encode_pair_bg = tf.concat([self.bg_embs, self.app_embs_bg], 0)
# D_z_embs_bg = self.Discriminator_bg_fn(encode_pair_bg, input_dim=encode_pair_bg.get_shape().as_list()[-1], FC_DIM=512, n_layers=3, reuse=False, name='Bg_FCDis_')
# D_z_pos_embs_bg, D_z_neg_embs_bg = tf.split(D_z_embs_bg, 2)
D_z_pos_embs_bg = self.Discriminator_bg_fn(self.bg_embs, input_dim=self.bg_embs.get_shape().as_list()[-1], FC_DIM=512, n_layers=3, reuse=False, name='Bg_FCDis_')
D_z_neg_embs_bg = self.Discriminator_bg_fn(self.app_embs_bg, input_dim=self.app_embs_bg.get_shape().as_list()[-1], FC_DIM=512, n_layers=3, reuse=False, name='Bg_FCDis_')
self.D_var_embs_bg = lib.params_with_name('Bg_FCDis_Discriminator.')
self.g_loss_embs_bg, self.d_loss_embs_bg = self._gan_loss(self.wgan_gp_bg, self.Discriminator_bg_fn,
D_z_pos_embs_bg, D_z_neg_embs_bg, self.bg_embs, self.app_embs_bg)
assert (self.batch_size>0)and(self.batch_size%2==0), 'batch should be Even and >0'
self.app_embs_fixFg = tf.tile(tf.slice(self.app_embs_fg, [0,0], [1,-1]), [self.batch_size/2,1])
self.app_embs_varyFg = tf.slice(self.app_embs_fg, [self.batch_size/2,0], [self.batch_size/2,-1])
self.app_embs_fixBg = tf.tile(tf.slice(self.app_embs_bg, [0,0], [1,-1]), [self.batch_size/2,1])
self.app_embs_varyBg = tf.slice(self.app_embs_bg, [self.batch_size/2,0], [self.batch_size/2,-1])
self.app_embs = tf.concat([tf.concat([self.app_embs_fixFg,self.app_embs_varyFg],axis=0), tf.concat([self.app_embs_varyBg,self.app_embs_fixBg],axis=0)], axis=-1)
self.embs_app_rep = tf.tile(tf.expand_dims(self.app_embs,-1), [1, 1, self.img_H*self.img_W])
self.embs_app_rep = tf.reshape(self.embs_app_rep, [self.batch_size, -1, self.img_H, self.img_W])
self.embs_app_rep = nchw_to_nhwc(self.embs_app_rep)
with tf.variable_scope("ID_AE") as vs:
# pdb.set_trace()
G_gaussian_app, _, _ = self.Generator_fn(
self.embs_app_rep, self.pose,
self.channel, self.z_num, self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, reuse=True)
self.G = denorm_img(G_gaussian_app, self.data_format)
self._define_loss_optim()
self.summary_op = tf.summary.merge([
tf.summary.image("G", self.G),
tf.summary.scalar("loss/g_loss_embs_fg", self.g_loss_embs_fg),
tf.summary.scalar("loss/d_loss_embs_fg", self.d_loss_embs_fg),
tf.summary.scalar("loss/g_loss_embs_bg", self.g_loss_embs_bg),
tf.summary.scalar("loss/d_loss_embs_bg", self.d_loss_embs_bg),
tf.summary.scalar("misc/d_lr", self.d_lr),
tf.summary.scalar("misc/g_lr", self.g_lr),
])
def _define_loss_optim(self):
self.g_optim_embs_fg, self.d_optim_embs_fg, self.clip_disc_weights_embs_fg = self._getOptimizer(self.wgan_gp_fg,
self.g_loss_embs_fg, self.d_loss_embs_fg, self.G_var_app_embs_fg, self.D_var_embs_fg)
self.g_optim_embs_bg, self.d_optim_embs_bg, self.clip_disc_weights_embs_bg = self._getOptimizer(self.wgan_gp_bg,
self.g_loss_embs_bg, self.d_loss_embs_bg, self.G_var_app_embs_bg, self.D_var_embs_bg)
def train(self):
x_fixed, x_target_fixed, pose_fixed, pose_target_fixed, mask_fixed, mask_target_fixed, part_bbox_fixed, part_bbox_target_fixed, part_vis_fixed, part_vis_target_fixed = self.get_image_from_loader()
save_image(x_fixed, '{}/x_fixed.png'.format(self.model_dir))
save_image(x_target_fixed, '{}/x_target_fixed.png'.format(self.model_dir))
save_image((np.amax(pose_fixed, axis=-1, keepdims=True)+1.0)*127.5, '{}/pose_fixed.png'.format(self.model_dir))
save_image((np.amax(pose_target_fixed, axis=-1, keepdims=True)+1.0)*127.5, '{}/pose_target_fixed.png'.format(self.model_dir))
save_image(mask_fixed, '{}/mask_fixed.png'.format(self.model_dir))
save_image(mask_target_fixed, '{}/mask_target_fixed.png'.format(self.model_dir))
for step in trange(self.start_step, self.max_step):
## Fg
if step>0:
self.sess.run([self.g_optim_embs_fg])
# Train critic
if (self.wgan_gp_fg.MODE == 'dcgan') or (self.wgan_gp_fg.MODE == 'lsgan'):
disc_ITERS = 1
else:
disc_ITERS = self.wgan_gp_fg.CRITIC_ITERS
for i in xrange(disc_ITERS):
self.sess.run(self.d_optim_embs_fg)
if self.wgan_gp_fg.MODE == 'wgan':
self.sess.run(self.clip_disc_weights_embs_fg)
## Bg
if step>0:
self.sess.run([self.g_optim_embs_bg])
# Train critic
if (self.wgan_gp_bg.MODE == 'dcgan') or (self.wgan_gp_bg.MODE == 'lsgan'):
disc_ITERS = 1
else:
disc_ITERS = self.wgan_gp_bg.CRITIC_ITERS
for i in xrange(disc_ITERS):
self.sess.run(self.d_optim_embs_bg)
if self.wgan_gp_bg.MODE == 'wgan':
self.sess.run(self.clip_disc_weights_embs_bg)
fetch_dict = {}
if step % self.log_step == self.log_step-1:
fetch_dict.update({
"summary": self.summary_op
})
result = self.sess.run(fetch_dict)
if step % self.log_step == self.log_step-1:
self.summary_writer.add_summary(result['summary'], step)
self.summary_writer.flush()
if 0==step or step % (self.log_step * 1.5) == (self.log_step * 1.5)-1 or step % (self.log_step * 5) == (self.log_step * 5)-1:
x = process_image(x_fixed, 127.5, 127.5)
x_target = process_image(x_target_fixed, 127.5, 127.5)
self.generate(x, x_target, pose_fixed, part_bbox_fixed, part_vis_fixed, self.model_dir, idx=step)
if step % self.lr_update_step == self.lr_update_step - 1:
self.sess.run([self.g_lr_update, self.d_lr_update])
if step % (self.log_step * 5) == (self.log_step * 5)-1:
self.saver.save(self.sess, os.path.join(self.model_dir, 'model.ckpt'), global_step=step)
class DPIG_subnetSamplePoseRCV_GAN_BodyROI(DPIG_PoseRCV_AE_BodyROI):
def _define_input(self):
self.is_train_tensor = tf.Variable(self.is_train, name='phase')
self.Generator_encoder_fn = models.GeneratorCNN_ID_Encoder
self.Generator_fn = models.GeneratorCNN_ID_UAEAfterResidual
self.wgan_gp_encoder = WGAN_GP(DATA_DIR='', MODE='wgan', DIM=64, BATCH_SIZE=self.batch_size, ITERS=200000,
LAMBDA=10, G_OUTPUT_DIM=self.keypoint_num*3)
self.Discriminator_encoder_fn = self._getDiscriminator(self.wgan_gp_encoder, arch='FCDis')
def build_model(self):
self._define_input()
with tf.variable_scope("PoseAE") as vs:
## Norm to [-1, 1]
pose_rcv_norm = tf.reshape(self.pose_rcv, [self.batch_size, self.keypoint_num, 3])
R = tf.cast(tf.slice(pose_rcv_norm, [0,0,0], [-1,-1,1]), tf.float32)/float(self.img_H)*2.0 - 1
C = tf.cast(tf.slice(pose_rcv_norm, [0,0,1], [-1,-1,1]), tf.float32)/float(self.img_W)*2.0 - 1
V = tf.cast(tf.slice(pose_rcv_norm, [0,0,2], [-1,-1,1]), tf.float32)
pose_rcv_norm = tf.concat([R,C,V], axis=-1)
self.pose_embs, self.G_var_encoder = models.PoseEncoderFCRes(tf.reshape(pose_rcv_norm, [self.batch_size,-1]),
z_num=32, repeat_num=4, hidden_num=512, data_format=self.data_format, activation_fn=LeakyReLU, reuse=False)
embs_shape = self.pose_embs.get_shape().as_list()
# with tf.variable_scope("Gaussian_FC") as vs:
with tf.variable_scope("PoseGaussian") as vs:
self.G_pose_embs, self.G_var_embs = models.GaussianFCRes(embs_shape, embs_shape[-1], repeat_num=4, hidden_num=512, data_format=self.data_format, activation_fn=LeakyReLU)
with tf.variable_scope("PoseAE") as vs:
G_pose_coord, G_pose_visible, self.G_var_decoder = models.PoseDecoderFCRes(self.G_pose_embs, self.keypoint_num,
repeat_num=4, hidden_num=512, data_format=self.data_format, activation_fn=LeakyReLU, reuse=False)
G_pose_rcv = tf.concat([tf.reshape(G_pose_coord, [self.batch_size,self.keypoint_num,2]), tf.expand_dims(G_pose_visible,-1)], axis=-1)
G_pose = coord2channel_simple_rcv(G_pose_rcv, self.keypoint_num, is_normalized=True, img_H=self.img_H, img_W=self.img_W)
self.G_pose = denorm_img(tf.tile(tf.reduce_max(G_pose, axis=-1, keep_dims=True), [1,1,1,3]), self.data_format)
self.G_pose_rcv = G_pose_rcv
## Adversarial for pose_embs
self.pose_embs = tf.reshape(self.pose_embs, [self.batch_size,-1])
self.G_pose_embs = tf.reshape(self.G_pose_embs, [self.batch_size,-1])
encode_pair = tf.concat([self.pose_embs, self.G_pose_embs], 0)
self.D_z_embs = self.Discriminator_encoder_fn(encode_pair, input_dim=encode_pair.get_shape().as_list()[-1], FC_DIM=512, n_layers=3, reuse=False, name='Pose_emb_')
self.D_var_embs = lib.params_with_name('Pose_emb_Discriminator.')
D_z_pos, D_z_neg = tf.split(self.D_z_embs, 2)
self.g_loss_embs, self.d_loss_embs = self._gan_loss(self.wgan_gp_encoder, self.Discriminator_encoder_fn,
D_z_pos, D_z_neg, self.pose_embs, self.G_pose_embs)
## Use the pose to generate person with pretrained generator
with tf.variable_scope("Encoder") as vs:
pb_list = tf.split(self.part_bbox, self.part_num, axis=1)
pv_list = tf.split(self.part_vis, self.part_num, axis=1)
## Part 1,1-7 (totally 7)
indices = range(7)
## Part 1,8-16 (totally 10)
# indices = [1] + range(8,17)
# indices = [0] + range(7,16)
select_part_bbox = tf.concat([pb_list[i] for i in indices], axis=1)
select_part_vis = tf.cast(tf.concat([pv_list[i] for i in indices], axis=1), tf.float32)
# pdb.set_trace()
self.embs, _, _, _ = models.GeneratorCNN_ID_Encoder_BodyROIVis_FgBgFeaTwoBranch(self.x, self.mask_r6, select_part_bbox, select_part_vis, len(indices), 32,
self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, keep_part_prob=1.0, reuse=False)
# self.embs, _, _ = GeneratorCNN_ID_Encoder_BodyROIVis(self.x, select_part_bbox, select_part_vis, len(indices), 32,
# self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, keep_part_prob=1.0, reuse=False)
# self.embs, _, self.Encoder_var = GeneratorCNN_ID_Encoder_BodyROI(self.x, self.part_bbox, 7, 32,
# self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, reuse=False)
self.embs_rep = tf.tile(tf.expand_dims(self.embs,-1), [1, 1, self.img_H*self.img_W])
self.embs_rep = tf.reshape(self.embs_rep, [self.batch_size, -1, self.img_H, self.img_W])
self.embs_rep = nchw_to_nhwc(self.embs_rep)
## Use py code to get G_pose_inflated, so the op is out of the graph
self.G_pose_inflated = tf.placeholder(tf.float32, shape=G_pose.get_shape())
with tf.variable_scope("ID_AE") as vs:
G, _, _ = self.Generator_fn(
self.embs_rep, self.G_pose_inflated,
self.channel, self.z_num, self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, reuse=False)
self.G = denorm_img(G, self.data_format)
self._define_loss_optim()
self.summary_op = tf.summary.merge([
tf.summary.image("G_pose", self.G_pose),
tf.summary.scalar("loss/g_loss_embs", self.g_loss_embs),
tf.summary.scalar("loss/d_loss_embs", self.d_loss_embs),
tf.summary.scalar("misc/d_lr", self.d_lr),
tf.summary.scalar("misc/g_lr", self.g_lr),
tf.summary.histogram("distribution/pose_emb", self.pose_embs),
tf.summary.histogram("distribution/G_pose_embs", self.G_pose_embs),
tf.summary.histogram("distribution/app_emb", self.embs),
])
def _define_loss_optim(self):
self.g_optim_embs, self.d_optim_embs, self.clip_disc_weights_embs = self._getOptimizer(self.wgan_gp_encoder,
self.g_loss_embs, self.d_loss_embs, self.G_var_embs, self.D_var_embs)
def train(self):
x_fixed, x_target_fixed, pose_fixed, pose_target_fixed, pose_rcv_fixed, mask_fixed, mask_target_fixed, \
part_bbox_fixed, part_bbox_target_fixed = self.get_image_from_loader()
save_image(x_fixed, '{}/x_fixed.png'.format(self.model_dir))
save_image(x_target_fixed, '{}/x_target_fixed.png'.format(self.model_dir))
save_image((np.amax(pose_fixed, axis=-1, keepdims=True)+1.0)*127.5, '{}/pose_fixed.png'.format(self.model_dir))
save_image((np.amax(pose_target_fixed, axis=-1, keepdims=True)+1.0)*127.5, '{}/pose_target_fixed.png'.format(self.model_dir))
save_image(mask_fixed, '{}/mask_fixed.png'.format(self.model_dir))
save_image(mask_target_fixed, '{}/mask_target_fixed.png'.format(self.model_dir))
for step in trange(self.start_step, self.max_step):
# Use GAN for Pose Embedding
if step>0:
self.sess.run([self.g_optim_embs])
# Train critic
if (self.wgan_gp_encoder.MODE == 'dcgan') or (self.wgan_gp_encoder.MODE == 'lsgan'):
disc_ITERS = 1
else:
disc_ITERS = self.wgan_gp_encoder.CRITIC_ITERS
for i in xrange(disc_ITERS):
self.sess.run([self.d_optim_embs])
if self.wgan_gp_encoder.MODE == 'wgan':
self.sess.run(self.clip_disc_weights_embs)
fetch_dict = {}
if step % self.log_step == self.log_step-1:
fetch_dict.update({
"summary": self.summary_op
})
result = self.sess.run(fetch_dict)
if step % self.log_step == self.log_step-1:
self.summary_writer.add_summary(result['summary'], step)
self.summary_writer.flush()
if 0==step or 10==step or 200==step or step % (self.log_step * 3) == (self.log_step * 3)-1:
x = process_image(x_fixed, 127.5, 127.5)
x_target = process_image(x_target_fixed, 127.5, 127.5)
self.generate(x, x_target, pose_fixed, part_bbox_fixed, self.model_dir, idx=step)
if step % self.lr_update_step == self.lr_update_step - 1:
self.sess.run([self.g_lr_update, self.d_lr_update])
if step % (self.log_step * 30) == (self.log_step * 30)-1:
self.saver.save(self.sess, os.path.join(self.model_dir, 'model.ckpt'), global_step=step)
def generate(self, x_fixed, x_target_fixed, pose_fixed, part_bbox_fixed, root_path=None, path=None, idx=None, save=True):
G_pose_rcv, G_pose = self.sess.run([self.G_pose_rcv, self.G_pose])
G_pose_inflated = py_poseInflate(G_pose_rcv, is_normalized=True, radius=4, img_H=128, img_W=64)
G = self.sess.run(self.G, {self.x: x_fixed, self.G_pose_inflated: G_pose_inflated, self.part_bbox: part_bbox_fixed})
G_pose_inflated_img = np.tile(np.amax((G_pose_inflated+1)*127.5, axis=-1, keepdims=True), [1,1,1,3])
ssim_G_x_list = []
for i in xrange(G.shape[0]):
G_gray = rgb2gray((G[i,:]).clip(min=0,max=255).astype(np.uint8))
x_gray = rgb2gray(((x_fixed[i,:]+1)*127.5).clip(min=0,max=255).astype(np.uint8))
ssim_G_x_list.append(ssim(G_gray, x_gray, data_range=x_gray.max() - x_gray.min(), multichannel=False))
ssim_G_x_mean = np.mean(ssim_G_x_list)
if path is None and save:
path = os.path.join(root_path, '{}_G_ssim{}.png'.format(idx,ssim_G_x_mean))
save_image(G, path)
print("[*] Samples saved: {}".format(path))
path = os.path.join(root_path, '{}_G_pose.png'.format(idx))
save_image(G_pose, path)
print("[*] Samples saved: {}".format(path))
path = os.path.join(root_path, '{}_G_pose_inflated.png'.format(idx))
save_image(G_pose_inflated_img, path)
print("[*] Samples saved: {}".format(path))
return G
#################################################################################################
####################################### DF train models #########################################
######################### DeepFashion with AppPose BodyROI ################################
class DPIG_Encoder_GAN_BodyROI_256(DPIG_Encoder_GAN_BodyROI):
def __init__(self, config):
self._common_init(config)
self.keypoint_num = 18
self.part_num = 37
self.D_arch = config.D_arch
if 'deepfashion' in config.dataset.lower() or 'df' in config.dataset.lower():
if config.is_train:
self.dataset_obj = deepfashion.get_split('train', config.data_path)
else:
self.dataset_obj = deepfashion.get_split('test', config.data_path)
self.x, self.x_target, self.pose, self.pose_target, self.pose_rcv, self.pose_rcv_target, self.mask, self.mask_target, \
self.part_bbox, self.part_bbox_target, self.part_vis, self.part_vis_target = self._load_batch_pair_pose(self.dataset_obj)
def _define_input(self):
self.is_train_tensor = tf.Variable(self.is_train, name='phase')
self.Generator_fn = models.GeneratorCNN_ID_UAEAfterResidual
self.wgan_gp = WGAN_GP(DATA_DIR='', MODE='dcgan', DIM=64, BATCH_SIZE=self.batch_size, ITERS=200000,
LAMBDA=10, G_OUTPUT_DIM=self.img_H*self.img_W*3)
self.Discriminator_fn = self._getDiscriminator(self.wgan_gp, arch=self.D_arch)
def build_model(self):
self._define_input()
with tf.variable_scope("Encoder") as vs:
pb_list = tf.split(self.part_bbox, self.part_num, axis=1)
pv_list = tf.split(self.part_vis, self.part_num, axis=1)
## Part 1-7 (totally 7)
indices = range(7)
select_part_bbox = tf.concat([pb_list[i] for i in indices], axis=1)
select_part_vis = tf.cast(tf.concat([pv_list[i] for i in indices], axis=1), tf.float32)
self.embs, _, self.Encoder_var = models.GeneratorCNN_ID_Encoder_BodyROIVis(self.x,select_part_bbox, select_part_vis, len(indices), 32,
self.repeat_num+1, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, keep_part_prob=1.0, roi_size=64, reuse=False)
## Part 1,4-8 (totally 6)
# # indices = [1] + range(4,9)
# indices = [0] + range(3,8)
## Part 1,8-16 (totally 10)
# indices = [0] + range(7,16)
self.embs_rep = tf.tile(tf.expand_dims(self.embs,-1), [1, 1, self.img_H*self.img_W])
self.embs_rep = tf.reshape(self.embs_rep, [self.batch_size, -1, self.img_H, self.img_W])
self.embs_rep = nchw_to_nhwc(self.embs_rep)
with tf.variable_scope("ID_AE") as vs:
G, _, self.G_var = self.Generator_fn(
self.embs_rep, self.pose,
self.channel, self.z_num, self.repeat_num-1, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, reuse=False)
self.G_var += self.Encoder_var
self.G = denorm_img(G, self.data_format)
pair = tf.concat([self.x, G], 0)
self.D_z = self.Discriminator_fn(tf.transpose( pair, [0,3,1,2] ), input_dim=3)
self.D_var = lib.params_with_name('Discriminator.')
D_z_pos, D_z_neg = tf.split(self.D_z, 2)
self.g_loss, self.d_loss = self._gan_loss(self.wgan_gp, self.Discriminator_fn, D_z_pos, D_z_neg, arch=self.D_arch)
self.PoseMaskLoss = tf.reduce_mean(tf.abs(G - self.x) * (self.mask))
self.L1Loss = tf.reduce_mean(tf.abs(G - self.x))
self.g_loss_only = self.g_loss
self._define_loss_optim()
self.summary_op = tf.summary.merge([
tf.summary.image("G", self.G),
tf.summary.scalar("loss/PoseMaskLoss", self.PoseMaskLoss),
tf.summary.scalar("loss/L1Loss", self.L1Loss),
tf.summary.scalar("loss/g_loss", self.g_loss),
tf.summary.scalar("loss/g_loss_only", self.g_loss_only),
tf.summary.scalar("loss/d_loss", self.d_loss),
tf.summary.scalar("misc/d_lr", self.d_lr),
tf.summary.scalar("misc/g_lr", self.g_lr),
])
def _define_loss_optim(self):
self.g_loss += self.L1Loss * 20
self.g_optim, self.d_optim, self.clip_disc_weights = self._getOptimizer(self.wgan_gp,
self.g_loss, self.d_loss, self.G_var, self.D_var)
def train(self):
x_fixed, x_target_fixed, pose_fixed, pose_target_fixed, pose_rcv_fixed, pose_rcv_target_fixed, mask_fixed, mask_target_fixed, \
part_bbox_fixed, part_bbox_target_fixed, part_vis_fixed, part_vis_target_fixed = self.get_image_from_loader()
save_image(x_fixed, '{}/x_fixed.png'.format(self.model_dir))
save_image(x_target_fixed, '{}/x_target_fixed.png'.format(self.model_dir))
save_image((np.amax(pose_fixed, axis=-1, keepdims=True)+1.0)*127.5, '{}/pose_fixed.png'.format(self.model_dir))
save_image((np.amax(pose_target_fixed, axis=-1, keepdims=True)+1.0)*127.5, '{}/pose_target_fixed.png'.format(self.model_dir))
save_image(mask_fixed, '{}/mask_fixed.png'.format(self.model_dir))
save_image(mask_target_fixed, '{}/mask_target_fixed.png'.format(self.model_dir))
for step in trange(self.start_step, self.max_step):
if step>0:
self.sess.run([self.g_optim])
# Train critic
if (self.wgan_gp.MODE == 'dcgan') or (self.wgan_gp.MODE == 'lsgan'):
disc_ITERS = 1
else:
disc_ITERS = self.wgan_gp.CRITIC_ITERS
for i in xrange(disc_ITERS):
self.sess.run(self.d_optim)
if self.wgan_gp.MODE == 'wgan':
self.sess.run(self.clip_disc_weights)
fetch_dict = {}
if step % self.log_step == self.log_step-1:
fetch_dict.update({
"summary": self.summary_op
})
# "k_t": self.k_t,
result = self.sess.run(fetch_dict)
if step % self.log_step == self.log_step-1:
self.summary_writer.add_summary(result['summary'], step)
self.summary_writer.flush()
if 0==step or 10==step or step % (self.log_step * 3) == (self.log_step * 3)-1:
x = process_image(x_fixed, 127.5, 127.5)
x_target = process_image(x_target_fixed, 127.5, 127.5)
self.generate(x, x_target, pose_fixed, part_bbox_fixed, part_vis_fixed, self.model_dir, idx=step)
if step % self.lr_update_step == self.lr_update_step - 1:
self.sess.run([self.g_lr_update, self.d_lr_update])
if step % (self.log_step * 30) == (self.log_step * 30)-1:
self.saver.save(self.sess, os.path.join(self.model_dir, 'model.ckpt'), global_step=step)
def test(self):
test_result_dir = os.path.join(self.model_dir, self.test_dir_name)
test_result_dir_x = os.path.join(test_result_dir, 'x')
test_result_dir_x_target = os.path.join(test_result_dir, 'x_target')
test_result_dir_G = os.path.join(test_result_dir, 'G')
test_result_dir_pose = os.path.join(test_result_dir, 'pose')
test_result_dir_pose_target = os.path.join(test_result_dir, 'pose_target')
test_result_dir_mask = os.path.join(test_result_dir, 'mask')
test_result_dir_mask_target = os.path.join(test_result_dir, 'mask_target')
if not os.path.exists(test_result_dir):
os.makedirs(test_result_dir)
if not os.path.exists(test_result_dir_x):
os.makedirs(test_result_dir_x)
if not os.path.exists(test_result_dir_x_target):
os.makedirs(test_result_dir_x_target)
if not os.path.exists(test_result_dir_G):
os.makedirs(test_result_dir_G)
if not os.path.exists(test_result_dir_pose):
os.makedirs(test_result_dir_pose)
if not os.path.exists(test_result_dir_pose_target):
os.makedirs(test_result_dir_pose_target)
if not os.path.exists(test_result_dir_mask):
os.makedirs(test_result_dir_mask)
if not os.path.exists(test_result_dir_mask_target):
os.makedirs(test_result_dir_mask_target)
for i in xrange(100): ## for test Samples
# for i in xrange(800): ## for IS score
x_fixed, x_target_fixed, pose_fixed, pose_target_fixed, pose_rcv_fixed, pose_rcv_target_fixed, mask_fixed, mask_target_fixed, \
part_bbox_fixed, part_bbox_target_fixed, part_vis_fixed, part_vis_target_fixed = self.get_image_from_loader()
x = process_image(x_fixed, 127.5, 127.5)
x_target = process_image(x_target_fixed, 127.5, 127.5)
if 0==i:
x_fake = self.generate(x, x_target, pose_fixed, part_bbox_fixed, test_result_dir, part_vis_fixed, idx=i, save=True)
else:
x_fake = self.generate(x, x_target, pose_fixed, part_bbox_fixed, test_result_dir, part_vis_fixed, idx=i, save=False)
p = (np.amax(pose_fixed, axis=-1, keepdims=False)+1.0)*127.5
pt = (np.amax(pose_target_fixed, axis=-1, keepdims=False)+1.0)*127.5
for j in xrange(self.batch_size):
idx = i*self.batch_size+j
im = Image.fromarray(x_fixed[j,:].astype(np.uint8))
im.save('%s/%05d.png'%(test_result_dir_x, idx))
im = Image.fromarray(x_target_fixed[j,:].astype(np.uint8))
im.save('%s/%05d.png'%(test_result_dir_x_target, idx))
im = Image.fromarray(x_fake[j,:].astype(np.uint8))
im.save('%s/%05d.png'%(test_result_dir_G, idx))
im = Image.fromarray(p[j,:].astype(np.uint8))
im.save('%s/%05d.png'%(test_result_dir_pose, idx))
im = Image.fromarray(pt[j,:].astype(np.uint8))
im.save('%s/%05d.png'%(test_result_dir_pose_target, idx))
# pdb.set_trace()
im = Image.fromarray(mask_fixed[j,:].squeeze().astype(np.uint8))
im.save('%s/%05d.png'%(test_result_dir_mask, idx))
im = Image.fromarray(mask_target_fixed[j,:].squeeze().astype(np.uint8))
im.save('%s/%05d.png'%(test_result_dir_mask_target, idx))
# pdb.set_trace()
if 0==i:
save_image(x_fixed, '{}/x_fixed.png'.format(test_result_dir))
save_image(x_target_fixed, '{}/x_target_fixed.png'.format(test_result_dir))
save_image(mask_fixed, '{}/mask_fixed.png'.format(test_result_dir))
save_image(mask_target_fixed, '{}/mask_target_fixed.png'.format(test_result_dir))
save_image((np.amax(pose_fixed, axis=-1, keepdims=True)+1.0)*127.5, '{}/pose_fixed.png'.format(test_result_dir))
save_image((np.amax(pose_target_fixed, axis=-1, keepdims=True)+1.0)*127.5, '{}/pose_target_fixed.png'.format(test_result_dir))
def generate(self, x_fixed, x_target_fixed, pose_fixed, part_bbox_fixed, part_vis_fixed, root_path=None, path=None, idx=None, save=True):
G = self.sess.run(self.G, {self.x: x_fixed, self.pose: pose_fixed, self.part_bbox: part_bbox_fixed, self.part_vis: part_vis_fixed})
ssim_G_x_list = []
for i in xrange(G.shape[0]):
G_gray = rgb2gray((G[i,:]).clip(min=0,max=255).astype(np.uint8))
x_gray = rgb2gray(((x_fixed[i,:]+1)*127.5).clip(min=0,max=255).astype(np.uint8))
ssim_G_x_list.append(ssim(G_gray, x_gray, data_range=x_gray.max() - x_gray.min(), multichannel=False))
ssim_G_x_mean = np.mean(ssim_G_x_list)
if path is None and save:
path = os.path.join(root_path, '{}_G_ssim{}.png'.format(idx,ssim_G_x_mean))
save_image(G, path)
print("[*] Samples saved: {}".format(path))
return G
# def generate(self, x_fixed, x_target_fixed, pose_fixed, pose_target_fixed, part_bbox_fixed, root_path=None, path=None, idx=None, save=True):
# G = self.sess.run(self.G, {self.x: x_fixed, self.pose: pose_fixed, self.pose_target: pose_target_fixed, self.part_bbox: part_bbox_fixed})
# ssim_G_x_list = []
# for i in xrange(G.shape[0]):
# G_gray = rgb2gray((G[i,:]).clip(min=0,max=255).astype(np.uint8))
# x_gray = rgb2gray(((x_fixed[i,:]+1)*127.5).clip(min=0,max=255).astype(np.uint8))
# ssim_G_x_list.append(ssim(G_gray, x_gray, data_range=x_gray.max() - x_gray.min(), multichannel=False))
# ssim_G_x_mean = np.mean(ssim_G_x_list)
# if path is None and save:
# path = os.path.join(root_path, '{}_G_ssim{}.png'.format(idx,ssim_G_x_mean))
# save_image(G, path)
# print("[*] Samples saved: {}".format(path))
# return G
def get_image_from_loader(self):
x, x_target, pose, pose_target, pose_rcv, pose_rcv_target, mask, mask_target, part_bbox, part_bbox_target, part_vis, part_vis_target = self.sess.run([self.x, self.x_target, self.pose, self.pose_target,
self.pose_rcv, self.pose_rcv_target, self.mask, self.mask_target, self.part_bbox, self.part_bbox_target, self.part_vis, self.part_vis_target])
x = unprocess_image(x, 127.5, 127.5)
x_target = unprocess_image(x_target, 127.5, 127.5)
mask = mask*255
mask_target = mask_target*255
return x, x_target, pose, pose_target, pose_rcv, pose_rcv_target, mask, mask_target, part_bbox, part_bbox_target, part_vis, part_vis_target
def _load_batch_pair_pose(self, dataset):
data_provider = slim.dataset_data_provider.DatasetDataProvider(dataset, common_queue_capacity=32, common_queue_min=8)
image_raw_0, image_raw_1, label, pose_rcv_0, pose_rcv_1, mask_0, mask_1, part_bbox_0, part_bbox_1, part_vis_0, part_vis_1 = data_provider.get([
'image_raw_0', 'image_raw_1', 'label', 'pose_peaks_0_rcv', 'pose_peaks_1_rcv', 'pose_mask_r4_0', 'pose_mask_r4_1',
'part_bbox_0', 'part_bbox_1', 'part_vis_0', 'part_vis_1'])
image_raw_0 = tf.reshape(image_raw_0, [256, 256, 3])
image_raw_1 = tf.reshape(image_raw_1, [256, 256, 3])
mask_0 = tf.cast(tf.reshape(mask_0, [256, 256, 1]), tf.float32)
mask_1 = tf.cast(tf.reshape(mask_1, [256, 256, 1]), tf.float32)
part_bbox_0 = tf.reshape(part_bbox_0, [self.part_num, 4])
part_bbox_1 = tf.reshape(part_bbox_1, [self.part_num, 4])
images_0, images_1, poses_rcv_0, poses_rcv_1, masks_0, masks_1, part_bboxs_0, part_bboxs_1, part_viss_0, part_viss_1 = tf.train.batch([image_raw_0,
image_raw_1, pose_rcv_0, pose_rcv_1, mask_0, mask_1, part_bbox_0, part_bbox_1, part_vis_0, part_vis_1],
batch_size=self.batch_size, num_threads=self.num_threads, capacity=self.capacityCoff * self.batch_size)
images_0 = process_image(tf.to_float(images_0), 127.5, 127.5)
images_1 = process_image(tf.to_float(images_1), 127.5, 127.5)
poses_0 = tf.cast(coord2channel_simple_rcv(poses_rcv_0, keypoint_num=18, is_normalized=False, img_H=256, img_W=256), tf.float32)
poses_1 = tf.cast(coord2channel_simple_rcv(poses_rcv_1, keypoint_num=18, is_normalized=False, img_H=256, img_W=256), tf.float32)
poses_0 = tf_poseInflate(poses_0, keypoint_num=18, radius=4, img_H=self.img_H, img_W=self.img_W)
poses_1 = tf_poseInflate(poses_1, keypoint_num=18, radius=4, img_H=self.img_H, img_W=self.img_W)
return images_0, images_1, poses_0, poses_1, poses_rcv_0, poses_rcv_1, masks_0, masks_1, part_bboxs_0, part_bboxs_1, part_viss_0, part_viss_1
class DPIG_Encoder_subSampleAppNet_GAN_BodyROI_256(DPIG_Encoder_GAN_BodyROI_256):
def init_net(self):
self.build_model()
if self.pretrained_path is not None:
var1 = tf.get_collection(tf.GraphKeys.VARIABLES, scope='Encoder')
var2 = tf.get_collection(tf.GraphKeys.VARIABLES, scope='ID_AE')
self.saverPart = tf.train.Saver(var1+var2, max_to_keep=20)
self.saver = tf.train.Saver(max_to_keep=20)
self.summary_writer = tf.summary.FileWriter(self.model_dir)
sv = tf.train.Supervisor(logdir=self.model_dir,
is_chief=True,
saver=None,
summary_op=None,
summary_writer=self.summary_writer,
global_step=self.step,
save_model_secs=0,
ready_for_local_init_op=None)
gpu_options = tf.GPUOptions(allow_growth=False)
sess_config = tf.ConfigProto(allow_soft_placement=True,
gpu_options=gpu_options)
self.sess = sv.prepare_or_wait_for_session(config=sess_config)
if self.pretrained_path is not None:
self.saverPart.restore(self.sess, self.pretrained_path)
print('restored from pretrained_path:', self.pretrained_path)
elif self.ckpt_path is not None:
self.saver.restore(self.sess, self.ckpt_path)
print('restored from ckpt_path:', self.ckpt_path)
def _define_input(self):
self.is_train_tensor = tf.Variable(self.is_train, name='phase')
self.Generator_encoder_fn = models.GeneratorCNN_ID_Encoder
self.Generator_fn = models.GeneratorCNN_ID_UAEAfterResidual
self.wgan_gp_encoder = WGAN_GP(DATA_DIR='', MODE='wgan', DIM=64, BATCH_SIZE=self.batch_size, ITERS=200000,
LAMBDA=10, G_OUTPUT_DIM=7*32)
self.Discriminator_encoder_fn = self._getDiscriminator(self.wgan_gp_encoder, arch='FCDis')
def build_model(self):
self._define_input()
with tf.variable_scope("Encoder") as vs:
pb_list = tf.split(self.part_bbox, self.part_num, axis=1)
pv_list = tf.split(self.part_vis, self.part_num, axis=1)
## Part 1-7 (totally 7)
indices = range(7)
select_part_bbox = tf.concat([pb_list[i] for i in indices], axis=1)
self.embs, _, self.Encoder_var = models.GeneratorCNN_ID_Encoder_BodyROI(self.x, select_part_bbox, len(indices), 32,
self.repeat_num+1, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, keep_part_prob=1.0, reuse=False)
self.embs_rep = tf.tile(tf.expand_dims(self.embs,-1), [1, 1, self.img_H*self.img_W])
self.embs_rep = tf.reshape(self.embs_rep, [self.batch_size, -1, self.img_H, self.img_W])
self.embs_rep = nchw_to_nhwc(self.embs_rep)
with tf.variable_scope("ID_AE") as vs:
G, _, self.G_var = self.Generator_fn(
self.embs_rep, self.pose,
self.channel, self.z_num, self.repeat_num-1, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, reuse=False)
embs_shape = self.embs.get_shape().as_list()
with tf.variable_scope("Gaussian_FC") as vs:
self.app_embs, self.G_var_app_embs = models.GaussianFCRes(embs_shape, embs_shape[-1], repeat_num=4, hidden_num=512, data_format=self.data_format, activation_fn=LeakyReLU)
## Adversarial for Gaussian
encode_pair = tf.concat([self.embs, self.app_embs], 0)
self.D_z_embs = self.Discriminator_encoder_fn(encode_pair, input_dim=encode_pair.get_shape().as_list()[-1], FC_DIM=512, n_layers=3, reuse=False, name='FCDis_')
self.D_var_embs = lib.params_with_name('FCDis_Discriminator.')
D_z_pos_embs, D_z_neg_embs = tf.split(self.D_z_embs, 2)
self.g_loss_embs, self.d_loss_embs = self._gan_loss(self.wgan_gp_encoder, self.Discriminator_encoder_fn,
D_z_pos_embs, D_z_neg_embs, self.embs, self.app_embs)
self.embs_app_rep = tf.tile(tf.expand_dims(self.app_embs,-1), [1, 1, self.img_H*self.img_W])
self.embs_app_rep = tf.reshape(self.embs_app_rep, [self.batch_size, -1, self.img_H, self.img_W])
self.embs_app_rep = nchw_to_nhwc(self.embs_app_rep)
with tf.variable_scope("ID_AE") as vs:
G_gaussian_app, _, _ = self.Generator_fn(
self.embs_app_rep, self.pose,
self.channel, self.z_num, self.repeat_num-1, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, reuse=True)
self.G = denorm_img(G_gaussian_app, self.data_format)
self._define_loss_optim()
self.summary_op = tf.summary.merge([
tf.summary.image("G", self.G),
tf.summary.scalar("loss/g_loss_embs", self.g_loss_embs),
tf.summary.scalar("loss/d_loss_embs", self.d_loss_embs),
tf.summary.scalar("misc/d_lr", self.d_lr),
tf.summary.scalar("misc/g_lr", self.g_lr),
])
def _define_loss_optim(self):
self.g_optim_embs, self.d_optim_embs, self.clip_disc_weights_embs = self._getOptimizer(self.wgan_gp_encoder,
self.g_loss_embs, self.d_loss_embs, self.G_var_app_embs, self.D_var_embs)
def train(self):
x_fixed, x_target_fixed, pose_fixed, pose_target_fixed, mask_fixed, mask_target_fixed, part_bbox_fixed, part_bbox_target_fixed = self.get_image_from_loader()
save_image(x_fixed, '{}/x_fixed.png'.format(self.model_dir))
save_image(x_target_fixed, '{}/x_target_fixed.png'.format(self.model_dir))
save_image((np.amax(pose_fixed, axis=-1, keepdims=True)+1.0)*127.5, '{}/pose_fixed.png'.format(self.model_dir))
save_image((np.amax(pose_target_fixed, axis=-1, keepdims=True)+1.0)*127.5, '{}/pose_target_fixed.png'.format(self.model_dir))
save_image(mask_fixed, '{}/mask_fixed.png'.format(self.model_dir))
save_image(mask_target_fixed, '{}/mask_target_fixed.png'.format(self.model_dir))
for step in trange(self.start_step, self.max_step):
if step>0:
self.sess.run([self.g_optim_embs])
# Train critic
if (self.wgan_gp_encoder.MODE == 'dcgan') or (self.wgan_gp_encoder.MODE == 'lsgan'):
disc_ITERS = 1
else:
disc_ITERS = self.wgan_gp_encoder.CRITIC_ITERS
for i in xrange(disc_ITERS):
self.sess.run(self.d_optim_embs)
if self.wgan_gp_encoder.MODE == 'wgan':
self.sess.run(self.clip_disc_weights_embs)
fetch_dict = {}
if step % self.log_step == self.log_step-1:
fetch_dict.update({
"summary": self.summary_op
})
result = self.sess.run(fetch_dict)
if step % self.log_step == self.log_step-1:
self.summary_writer.add_summary(result['summary'], step)
self.summary_writer.flush()
if 0==step or step % (self.log_step * 3) == (self.log_step * 3)-1:
x = process_image(x_fixed, 127.5, 127.5)
x_target = process_image(x_target_fixed, 127.5, 127.5)
self.generate(x, x_target, pose_fixed, pose_target_fixed, part_bbox_fixed, self.model_dir, idx=step)
if step % self.lr_update_step == self.lr_update_step - 1:
self.sess.run([self.g_lr_update, self.d_lr_update])
if step % (self.log_step * 30) == (self.log_step * 30)-1:
self.saver.save(self.sess, os.path.join(self.model_dir, 'model.ckpt'), global_step=step)
class DPIG_PoseRCV_AE_BodyROI_256(DPIG_Encoder_GAN_BodyROI_256):
def init_net(self):
self.build_model()
if self.pretrained_path is not None:
var1 = tf.get_collection(tf.GraphKeys.VARIABLES, scope='Encoder')
var2 = tf.get_collection(tf.GraphKeys.VARIABLES, scope='ID_AE')
self.saverPart = tf.train.Saver(var1+var2, max_to_keep=20)
self.saver = tf.train.Saver(max_to_keep=20)
self.summary_writer = tf.summary.FileWriter(self.model_dir)
sv = tf.train.Supervisor(logdir=self.model_dir,
is_chief=True,
saver=None,
summary_op=None,
summary_writer=self.summary_writer,
global_step=self.step,
save_model_secs=0,
ready_for_local_init_op=None)
gpu_options = tf.GPUOptions(allow_growth=True)
sess_config = tf.ConfigProto(allow_soft_placement=True,
gpu_options=gpu_options)
self.sess = sv.prepare_or_wait_for_session(config=sess_config)
if self.pretrained_path is not None:
self.saverPart.restore(self.sess, self.pretrained_path)
print('restored from pretrained_path:', self.pretrained_path)
elif self.ckpt_path is not None:
self.saver.restore(self.sess, self.ckpt_path)
print('restored from ckpt_path:', self.ckpt_path)
def _define_input(self):
self.is_train_tensor = tf.Variable(self.is_train, name='phase')
self.Generator_encoder_fn = models.GeneratorCNN_ID_Encoder
self.Generator_fn = models.GeneratorCNN_ID_UAEAfterResidual
self.wgan_gp = WGAN_GP(DATA_DIR='', MODE='dcgan', DIM=64, BATCH_SIZE=self.batch_size, ITERS=200000,
LAMBDA=10, G_OUTPUT_DIM=self.keypoint_num*3)
self.Discriminator_fn = self._getDiscriminator(self.wgan_gp, arch='FCDis')
def build_model(self):
self._define_input()
with tf.variable_scope("PoseAE") as vs:
## Norm to [-1, 1]
pose_rcv_norm = tf.reshape(self.pose_rcv, [self.batch_size, self.keypoint_num, 3])
R = tf.cast(tf.slice(pose_rcv_norm, [0,0,0], [-1,-1,1]), tf.float32)/float(self.img_H)*2.0 - 1
C = tf.cast(tf.slice(pose_rcv_norm, [0,0,1], [-1,-1,1]), tf.float32)/float(self.img_W)*2.0 - 1
V = tf.cast(tf.slice(pose_rcv_norm, [0,0,2], [-1,-1,1]), tf.float32)
pose_rcv_norm = tf.concat([R,C,V], axis=-1)
self.pose_embs, self.G_var_encoder = models.PoseEncoderFCRes(tf.reshape(pose_rcv_norm, [self.batch_size,-1]),
z_num=32, repeat_num=4, hidden_num=512, data_format=self.data_format, activation_fn=LeakyReLU, reuse=False)
if self.sample_pose: ## Sampling new poses during testing
self.pose_embs = tf.random_normal(tf.shape(self.pose_embs), mean=0.0, stddev=0.2, dtype=tf.float32)
G_pose_coord, G_pose_visible, self.G_var_decoder = models.PoseDecoderFCRes(self.pose_embs, self.keypoint_num,
repeat_num=4, hidden_num=512, data_format=self.data_format, activation_fn=LeakyReLU, reuse=False)
self.G_var_pose = self.G_var_encoder + self.G_var_decoder
G_pose_rcv = tf.concat([tf.reshape(G_pose_coord, [self.batch_size,self.keypoint_num,2]), tf.expand_dims(G_pose_visible,-1)], axis=-1)
self.G_pose_rcv = G_pose_rcv
self.reconstruct_loss = tf.reduce_mean(tf.square(pose_rcv_norm - G_pose_rcv))
self._define_loss_optim()
self.summary_op = tf.summary.merge([
tf.summary.scalar("loss/reconstruct_loss", self.reconstruct_loss),
])
def _define_loss_optim(self):
self.g_optim = tf.train.AdamOptimizer(learning_rate=self.g_lr, beta1=0.5).minimize(self.reconstruct_loss * 20,
var_list=self.G_var_pose, colocate_gradients_with_ops=True)
def train(self):
x_fixed, x_target_fixed, pose_fixed, pose_target_fixed, pose_rcv_fixed, pose_rcv_target_fixed, mask_fixed, mask_target_fixed, \
part_bbox_fixed, part_bbox_target_fixed, part_vis_fixed, part_vis_target_fixed = self.get_image_from_loader()
save_image(x_fixed, '{}/x_fixed.png'.format(self.model_dir))
save_image(x_target_fixed, '{}/x_target_fixed.png'.format(self.model_dir))
# save_image((np.amax(pose_fixed, axis=-1, keepdims=True)+1.0)*127.5, '{}/pose_fixed.png'.format(self.model_dir))
# save_image((np.amax(pose_target_fixed, axis=-1, keepdims=True)+1.0)*127.5, '{}/pose_target_fixed.png'.format(self.model_dir))
save_image(mask_fixed, '{}/mask_fixed.png'.format(self.model_dir))
save_image(mask_target_fixed, '{}/mask_target_fixed.png'.format(self.model_dir))
for step in trange(self.start_step, self.max_step):
if step>0:
self.sess.run([self.g_optim])
if step % self.log_step == self.log_step-1:
fetch_dict = {
"summary": self.summary_op,
"reconstruct_loss": self.reconstruct_loss
}
result = self.sess.run(fetch_dict)
self.summary_writer.add_summary(result['summary'], step)
self.summary_writer.flush()
print('reconstruct_loss:%f'%result['reconstruct_loss'])
if step % self.lr_update_step == self.lr_update_step - 1:
self.sess.run([self.g_lr_update, self.d_lr_update])
if step % (self.log_step * 30) == (self.log_step * 30)-1:
self.saver.save(self.sess, os.path.join(self.model_dir, 'model.ckpt'), global_step=step)
class DPIG_subnetSamplePoseRCV_GAN_BodyROI_256(DPIG_PoseRCV_AE_BodyROI_256):
def init_net(self):
self.build_model()
if self.pretrained_path is not None:
var1 = tf.get_collection(tf.GraphKeys.VARIABLES, scope='Encoder')
var2 = tf.get_collection(tf.GraphKeys.VARIABLES, scope='ID_AE')
self.saverPart = tf.train.Saver(var1+var2, max_to_keep=20)
if self.pretrained_poseAE_path is not None:
var = tf.get_collection(tf.GraphKeys.VARIABLES, scope='PoseAE')
self.saverPoseAEPart = tf.train.Saver(var, max_to_keep=20)
self.saver = tf.train.Saver(max_to_keep=20)
self.summary_writer = tf.summary.FileWriter(self.model_dir)
sv = tf.train.Supervisor(logdir=self.model_dir,
is_chief=True,
saver=None,
summary_op=None,
summary_writer=self.summary_writer,
global_step=self.step,
save_model_secs=0,
ready_for_local_init_op=None)
gpu_options = tf.GPUOptions(allow_growth=True)
sess_config = tf.ConfigProto(allow_soft_placement=True,
gpu_options=gpu_options)
self.sess = sv.prepare_or_wait_for_session(config=sess_config)
if self.pretrained_path is not None:
self.saverPart.restore(self.sess, self.pretrained_path)
print('restored from pretrained_path:', self.pretrained_path)
if self.pretrained_poseAE_path is not None:
self.saverPoseAEPart.restore(self.sess, self.pretrained_poseAE_path)
print('restored from pretrained_poseAE_path:', self.pretrained_poseAE_path)
if self.ckpt_path is not None:
self.saver.restore(self.sess, self.ckpt_path)
print('restored from ckpt_path:', self.ckpt_path)
def _define_input(self):
self.is_train_tensor = tf.Variable(self.is_train, name='phase')
self.Generator_encoder_fn = models.GeneratorCNN_ID_Encoder
self.Generator_fn = models.GeneratorCNN_ID_UAEAfterResidual
self.wgan_gp_encoder = WGAN_GP(DATA_DIR='', MODE='wgan', DIM=64, BATCH_SIZE=self.batch_size, ITERS=200000,
LAMBDA=10, G_OUTPUT_DIM=self.keypoint_num*3)
self.Discriminator_encoder_fn = self._getDiscriminator(self.wgan_gp_encoder, arch='FCDis')
def build_model(self):
self._define_input()
with tf.variable_scope("PoseAE") as vs:
## Norm to [-1, 1]
pose_rcv_norm = tf.reshape(self.pose_rcv, [self.batch_size, self.keypoint_num, 3])
R = tf.cast(tf.slice(pose_rcv_norm, [0,0,0], [-1,-1,1]), tf.float32)/float(self.img_H)*2.0 - 1
C = tf.cast(tf.slice(pose_rcv_norm, [0,0,1], [-1,-1,1]), tf.float32)/float(self.img_W)*2.0 - 1
V = tf.cast(tf.slice(pose_rcv_norm, [0,0,2], [-1,-1,1]), tf.float32)
pose_rcv_norm = tf.concat([R,C,V], axis=-1)
self.pose_embs, self.G_var_encoder = models.PoseEncoderFCRes(tf.reshape(pose_rcv_norm, [self.batch_size,-1]),
z_num=32, repeat_num=4, hidden_num=512, data_format=self.data_format, activation_fn=LeakyReLU, reuse=False)
embs_shape = self.pose_embs.get_shape().as_list()
# with tf.variable_scope("Gaussian_FC") as vs:
with tf.variable_scope("PoseGaussian") as vs:
self.G_pose_embs, self.G_var_embs = models.GaussianFCRes(embs_shape, embs_shape[-1], repeat_num=4, hidden_num=512, data_format=self.data_format, activation_fn=LeakyReLU)
# self.G_pose_embs, self.G_var_embs = models.GaussianFCRes(embs_shape, embs_shape[-1], repeat_num=6, hidden_num=1024, data_format=self.data_format, activation_fn=LeakyReLU)
with tf.variable_scope("PoseAE") as vs:
G_pose_coord, G_pose_visible, self.G_var_decoder = models.PoseDecoderFCRes(self.G_pose_embs, self.keypoint_num,
repeat_num=4, hidden_num=512, data_format=self.data_format, activation_fn=LeakyReLU, reuse=False)
G_pose_rcv = tf.concat([tf.reshape(G_pose_coord, [self.batch_size,self.keypoint_num,2]), tf.expand_dims(G_pose_visible,-1)], axis=-1)
G_pose = coord2channel_simple_rcv(G_pose_rcv, self.keypoint_num, is_normalized=True, img_H=self.img_H, img_W=self.img_W)
self.G_pose = denorm_img(tf.tile(tf.reduce_max(G_pose, axis=-1, keep_dims=True), [1,1,1,3]), self.data_format)
self.G_pose_rcv = G_pose_rcv
## Adversarial for pose_embs
self.pose_embs = tf.reshape(self.pose_embs, [self.batch_size,-1])
self.G_pose_embs = tf.reshape(self.G_pose_embs, [self.batch_size,-1])
encode_pair = tf.concat([self.pose_embs, self.G_pose_embs], 0)
self.D_z_embs = self.Discriminator_encoder_fn(encode_pair, input_dim=encode_pair.get_shape().as_list()[-1], FC_DIM=512, n_layers=3, reuse=False, name='Pose_emb_')
self.D_var_embs = lib.params_with_name('Pose_emb_Discriminator.')
D_z_pos, D_z_neg = tf.split(self.D_z_embs, 2)
self.g_loss_embs, self.d_loss_embs = self._gan_loss(self.wgan_gp_encoder, self.Discriminator_encoder_fn,
D_z_pos, D_z_neg, self.pose_embs, self.G_pose_embs)
# ## Use the pose to generate person with pretrained generator
# with tf.variable_scope("Encoder") as vs:
# pb_list = tf.split(self.part_bbox, self.part_num, axis=1)
# pv_list = tf.split(self.part_vis, self.part_num, axis=1)
# ## Part 1,1-7 (totally 7)
# indices = range(7)
# ## Part 1,8-16 (totally 10)
# # indices = [1] + range(8,17)
# # indices = [0] + range(7,16)
# select_part_bbox = tf.concat([pb_list[i] for i in indices], axis=1)
# select_part_vis = tf.cast(tf.concat([pv_list[i] for i in indices], axis=1), tf.float32)
# # pdb.set_trace()
# self.embs, _, _, _ = models.GeneratorCNN_ID_Encoder_BodyROIVis_FgBgFeaTwoBranch(self.x, self.mask_r6, select_part_bbox, select_part_vis, len(indices), 32,
# self.repeat_num-1, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, keep_part_prob=1.0, reuse=False)
# # self.embs, _, _ = models.GeneratorCNN_ID_Encoder_BodyROIVis(self.x, select_part_bbox, select_part_vis, len(indices), 32,
# # self.repeat_num-1, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, keep_part_prob=1.0, reuse=False)
# # self.embs, _, self.Encoder_var = models.GeneratorCNN_ID_Encoder_BodyROI(self.x, self.part_bbox, 7, 32,
# # self.repeat_num-1, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, reuse=False)
# self.embs_rep = tf.tile(tf.expand_dims(self.embs,-1), [1, 1, self.img_H*self.img_W])
# self.embs_rep = tf.reshape(self.embs_rep, [self.batch_size, -1, self.img_H, self.img_W])
# self.embs_rep = nchw_to_nhwc(self.embs_rep)
# ## Use py code to get G_pose_inflated, so the op is out of the graph
# self.G_pose_inflated = tf.placeholder(tf.float32, shape=G_pose.get_shape())
# with tf.variable_scope("ID_AE") as vs:
# G, _, _ = self.Generator_fn(
# self.embs_rep, self.G_pose_inflated,
# self.channel, self.z_num, self.repeat_num-1, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, reuse=False)
# self.G = denorm_img(G, self.data_format)
self._define_loss_optim()
self.summary_op = tf.summary.merge([
tf.summary.image("G_pose", self.G_pose),
tf.summary.scalar("loss/g_loss_embs", self.g_loss_embs),
tf.summary.scalar("loss/d_loss_embs", self.d_loss_embs),
tf.summary.scalar("misc/d_lr", self.d_lr),
tf.summary.scalar("misc/g_lr", self.g_lr),
# tf.summary.histogram("distribution/pose_emb", self.pose_embs),
# tf.summary.histogram("distribution/G_pose_embs", self.G_pose_embs),
# tf.summary.histogram("distribution/app_emb", self.embs),
])
def _define_loss_optim(self):
self.g_optim_embs, self.d_optim_embs, self.clip_disc_weights_embs = self._getOptimizer(self.wgan_gp_encoder,
self.g_loss_embs, self.d_loss_embs, self.G_var_embs, self.D_var_embs)
def train(self):
x_fixed, x_target_fixed, pose_fixed, pose_target_fixed, pose_rcv_fixed, pose_rcv_target_fixed, mask_fixed, mask_target_fixed, \
part_bbox_fixed, part_bbox_target_fixed, part_vis_fixed, part_vis_target_fixed = self.get_image_from_loader()
save_image(x_fixed, '{}/x_fixed.png'.format(self.model_dir))
save_image(x_target_fixed, '{}/x_target_fixed.png'.format(self.model_dir))
save_image((np.amax(pose_fixed, axis=-1, keepdims=True)+1.0)*127.5, '{}/pose_fixed.png'.format(self.model_dir))
save_image((np.amax(pose_target_fixed, axis=-1, keepdims=True)+1.0)*127.5, '{}/pose_target_fixed.png'.format(self.model_dir))
save_image(mask_fixed, '{}/mask_fixed.png'.format(self.model_dir))
save_image(mask_target_fixed, '{}/mask_target_fixed.png'.format(self.model_dir))
for step in trange(self.start_step, self.max_step):
# Use GAN for Pose Embedding
if step>0:
self.sess.run([self.g_optim_embs])
# Train critic
if (self.wgan_gp_encoder.MODE == 'dcgan') or (self.wgan_gp_encoder.MODE == 'lsgan'):
disc_ITERS = 1
else:
disc_ITERS = self.wgan_gp_encoder.CRITIC_ITERS
for i in xrange(disc_ITERS):
self.sess.run([self.d_optim_embs])
if self.wgan_gp_encoder.MODE == 'wgan':
self.sess.run(self.clip_disc_weights_embs)
fetch_dict = {}
if step % self.log_step == self.log_step-1:
fetch_dict.update({
"summary": self.summary_op
})
result = self.sess.run(fetch_dict)
if step % self.log_step == self.log_step-1:
self.summary_writer.add_summary(result['summary'], step)
self.summary_writer.flush()
if 0==step or 10==step or 200==step or step % (self.log_step * 3) == (self.log_step * 3)-1:
x = process_image(x_fixed, 127.5, 127.5)
x_target = process_image(x_target_fixed, 127.5, 127.5)
self.generate(x, x_target, pose_fixed, part_bbox_fixed, self.model_dir, idx=step)
if step % self.lr_update_step == self.lr_update_step - 1:
self.sess.run([self.g_lr_update, self.d_lr_update])
if step % (self.log_step * 30) == (self.log_step * 30)-1:
self.saver.save(self.sess, os.path.join(self.model_dir, 'model.ckpt'), global_step=step)
def generate(self, x_fixed, x_target_fixed, pose_fixed, part_bbox_fixed, root_path=None, path=None, idx=None, save=True):
G_pose_rcv, G_pose = self.sess.run([self.G_pose_rcv, self.G_pose])
G_pose_inflated = py_poseInflate(G_pose_rcv, is_normalized=True, radius=4, img_H=256, img_W=256)
# G = self.sess.run(self.G, {self.x: x_fixed, self.G_pose_inflated: G_pose_inflated, self.part_bbox: part_bbox_fixed})
G_pose_inflated_img = np.tile(np.amax((G_pose_inflated+1)*127.5, axis=-1, keepdims=True), [1,1,1,3])
# ssim_G_x_list = []
# for i in xrange(G_pose.shape[0]):
# G_gray = rgb2gray((G[i,:]).clip(min=0,max=255).astype(np.uint8))
# x_gray = rgb2gray(((x_fixed[i,:]+1)*127.5).clip(min=0,max=255).astype(np.uint8))
# ssim_G_x_list.append(ssim(G_gray, x_gray, data_range=x_gray.max() - x_gray.min(), multichannel=False))
# ssim_G_x_mean = np.mean(ssim_G_x_list)
if path is None and save:
# path = os.path.join(root_path, '{}_G_ssim{}.png'.format(idx,ssim_G_x_mean))
# save_image(G, path)
# print("[*] Samples saved: {}".format(path))
path = os.path.join(root_path, '{}_G_pose.png'.format(idx))
save_image(G_pose, path)
print("[*] Samples saved: {}".format(path))
path = os.path.join(root_path, '{}_G_pose_inflated.png'.format(idx))
save_image(G_pose_inflated_img, path)
print("[*] Samples saved: {}".format(path))
return G_pose
| 59.751574 | 210 | 0.632776 |
4a20d93affcc8ad2ed570ef54764661ad5943e47 | 16,763 | py | Python | original/fast_jtnn/chemutils.py | sehandev/JT-VAE | 8e60eb560034bbc23d9989938d36b08a739edbb6 | [
"MIT"
] | 4 | 2021-03-31T08:15:54.000Z | 2022-02-04T03:42:52.000Z | original/fast_jtnn/chemutils.py | sehandev/JT-VAE | 8e60eb560034bbc23d9989938d36b08a739edbb6 | [
"MIT"
] | 1 | 2021-03-31T08:30:23.000Z | 2021-03-31T08:30:23.000Z | original/fast_jtnn/chemutils.py | sehandev/JT-VAE | 8e60eb560034bbc23d9989938d36b08a739edbb6 | [
"MIT"
] | null | null | null | import rdkit
import rdkit.Chem as Chem
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
from collections import defaultdict
from rdkit.Chem.EnumerateStereoisomers import EnumerateStereoisomers, StereoEnumerationOptions
from .vocab import Vocab
MST_MAX_WEIGHT = 100
MAX_NCAND = 2000
def set_atommap(mol, num=0):
for atom in mol.GetAtoms():
atom.SetAtomMapNum(num)
def get_mol(smiles):
mol = Chem.MolFromSmiles(smiles)
if mol is None:
return None
Chem.Kekulize(mol)
return mol
def get_smiles(mol):
return Chem.MolToSmiles(mol, kekuleSmiles=True)
def decode_stereo(smiles2D):
mol = Chem.MolFromSmiles(smiles2D)
dec_isomers = list(EnumerateStereoisomers(mol))
dec_isomers = [Chem.MolFromSmiles(Chem.MolToSmiles(mol, isomericSmiles=True)) for mol in dec_isomers]
smiles3D = [Chem.MolToSmiles(mol, isomericSmiles=True) for mol in dec_isomers]
chiralN = [atom.GetIdx() for atom in dec_isomers[0].GetAtoms() if int(atom.GetChiralTag()) > 0 and atom.GetSymbol() == "N"]
if len(chiralN) > 0:
for mol in dec_isomers:
for idx in chiralN:
mol.GetAtomWithIdx(idx).SetChiralTag(Chem.rdchem.ChiralType.CHI_UNSPECIFIED)
smiles3D.append(Chem.MolToSmiles(mol, isomericSmiles=True))
return smiles3D
def sanitize(mol):
try:
smiles = get_smiles(mol)
mol = get_mol(smiles)
except Exception as e:
return None
return mol
def copy_atom(atom):
new_atom = Chem.Atom(atom.GetSymbol())
new_atom.SetFormalCharge(atom.GetFormalCharge())
new_atom.SetAtomMapNum(atom.GetAtomMapNum())
return new_atom
def copy_edit_mol(mol):
new_mol = Chem.RWMol(Chem.MolFromSmiles(''))
for atom in mol.GetAtoms():
new_atom = copy_atom(atom)
new_mol.AddAtom(new_atom)
for bond in mol.GetBonds():
a1 = bond.GetBeginAtom().GetIdx()
a2 = bond.GetEndAtom().GetIdx()
bt = bond.GetBondType()
new_mol.AddBond(a1, a2, bt)
return new_mol
def get_clique_mol(mol, atoms):
smiles = Chem.MolFragmentToSmiles(mol, atoms, kekuleSmiles=True)
new_mol = Chem.MolFromSmiles(smiles, sanitize=False)
new_mol = copy_edit_mol(new_mol).GetMol()
new_mol = sanitize(new_mol) #We assume this is not None
return new_mol
def tree_decomp(mol):
n_atoms = mol.GetNumAtoms()
if n_atoms == 1: #special case
return [[0]], []
cliques = []
for bond in mol.GetBonds():
a1 = bond.GetBeginAtom().GetIdx()
a2 = bond.GetEndAtom().GetIdx()
if not bond.IsInRing():
cliques.append([a1,a2])
ssr = [list(x) for x in Chem.GetSymmSSSR(mol)]
cliques.extend(ssr)
nei_list = [[] for i in range(n_atoms)]
for i in range(len(cliques)):
for atom in cliques[i]:
nei_list[atom].append(i)
#Merge Rings with intersection > 2 atoms
for i in range(len(cliques)):
if len(cliques[i]) <= 2: continue
for atom in cliques[i]:
for j in nei_list[atom]:
if i >= j or len(cliques[j]) <= 2: continue
inter = set(cliques[i]) & set(cliques[j])
if len(inter) > 2:
cliques[i].extend(cliques[j])
cliques[i] = list(set(cliques[i]))
cliques[j] = []
cliques = [c for c in cliques if len(c) > 0]
nei_list = [[] for i in range(n_atoms)]
for i in range(len(cliques)):
for atom in cliques[i]:
nei_list[atom].append(i)
#Build edges and add singleton cliques
edges = defaultdict(int)
for atom in range(n_atoms):
if len(nei_list[atom]) <= 1:
continue
cnei = nei_list[atom]
bonds = [c for c in cnei if len(cliques[c]) == 2]
rings = [c for c in cnei if len(cliques[c]) > 4]
if len(bonds) > 2 or (len(bonds) == 2 and len(cnei) > 2): #In general, if len(cnei) >= 3, a singleton should be added, but 1 bond + 2 ring is currently not dealt with.
cliques.append([atom])
c2 = len(cliques) - 1
for c1 in cnei:
edges[(c1,c2)] = 1
elif len(rings) > 2: #Multiple (n>2) complex rings
cliques.append([atom])
c2 = len(cliques) - 1
for c1 in cnei:
edges[(c1,c2)] = MST_MAX_WEIGHT - 1
else:
for i in range(len(cnei)):
for j in range(i + 1, len(cnei)):
c1,c2 = cnei[i],cnei[j]
inter = set(cliques[c1]) & set(cliques[c2])
if edges[(c1,c2)] < len(inter):
edges[(c1,c2)] = len(inter) #cnei[i] < cnei[j] by construction
edges = [u + (MST_MAX_WEIGHT-v,) for u,v in edges.items()]
if len(edges) == 0:
return cliques, edges
#Compute Maximum Spanning Tree
row,col,data = list(zip(*edges))
n_clique = len(cliques)
clique_graph = csr_matrix( (data,(row,col)), shape=(n_clique,n_clique) )
junc_tree = minimum_spanning_tree(clique_graph)
row,col = junc_tree.nonzero()
edges = [(row[i],col[i]) for i in range(len(row))]
return (cliques, edges)
def atom_equal(a1, a2):
return a1.GetSymbol() == a2.GetSymbol() and a1.GetFormalCharge() == a2.GetFormalCharge()
#Bond type not considered because all aromatic (so SINGLE matches DOUBLE)
def ring_bond_equal(b1, b2, reverse=False):
b1 = (b1.GetBeginAtom(), b1.GetEndAtom())
if reverse:
b2 = (b2.GetEndAtom(), b2.GetBeginAtom())
else:
b2 = (b2.GetBeginAtom(), b2.GetEndAtom())
return atom_equal(b1[0], b2[0]) and atom_equal(b1[1], b2[1])
def attach_mols(ctr_mol, neighbors, prev_nodes, nei_amap):
prev_nids = [node.nid for node in prev_nodes]
for nei_node in prev_nodes + neighbors:
nei_id,nei_mol = nei_node.nid,nei_node.mol
amap = nei_amap[nei_id]
for atom in nei_mol.GetAtoms():
if atom.GetIdx() not in amap:
new_atom = copy_atom(atom)
amap[atom.GetIdx()] = ctr_mol.AddAtom(new_atom)
if nei_mol.GetNumBonds() == 0:
nei_atom = nei_mol.GetAtomWithIdx(0)
ctr_atom = ctr_mol.GetAtomWithIdx(amap[0])
ctr_atom.SetAtomMapNum(nei_atom.GetAtomMapNum())
else:
for bond in nei_mol.GetBonds():
a1 = amap[bond.GetBeginAtom().GetIdx()]
a2 = amap[bond.GetEndAtom().GetIdx()]
if ctr_mol.GetBondBetweenAtoms(a1, a2) is None:
ctr_mol.AddBond(a1, a2, bond.GetBondType())
elif nei_id in prev_nids: #father node overrides
ctr_mol.RemoveBond(a1, a2)
ctr_mol.AddBond(a1, a2, bond.GetBondType())
return ctr_mol
def local_attach(ctr_mol, neighbors, prev_nodes, amap_list):
ctr_mol = copy_edit_mol(ctr_mol)
nei_amap = {nei.nid:{} for nei in prev_nodes + neighbors}
for nei_id,ctr_atom,nei_atom in amap_list:
nei_amap[nei_id][nei_atom] = ctr_atom
ctr_mol = attach_mols(ctr_mol, neighbors, prev_nodes, nei_amap)
return ctr_mol.GetMol()
#This version records idx mapping between ctr_mol and nei_mol
def enum_attach(ctr_mol, nei_node, amap, singletons):
nei_mol,nei_idx = nei_node.mol,nei_node.nid
att_confs = []
black_list = [atom_idx for nei_id,atom_idx,_ in amap if nei_id in singletons]
ctr_atoms = [atom for atom in ctr_mol.GetAtoms() if atom.GetIdx() not in black_list]
ctr_bonds = [bond for bond in ctr_mol.GetBonds()]
if nei_mol.GetNumBonds() == 0: #neighbor singleton
nei_atom = nei_mol.GetAtomWithIdx(0)
used_list = [atom_idx for _,atom_idx,_ in amap]
for atom in ctr_atoms:
if atom_equal(atom, nei_atom) and atom.GetIdx() not in used_list:
new_amap = amap + [(nei_idx, atom.GetIdx(), 0)]
att_confs.append( new_amap )
elif nei_mol.GetNumBonds() == 1: #neighbor is a bond
bond = nei_mol.GetBondWithIdx(0)
bond_val = int(bond.GetBondTypeAsDouble())
b1,b2 = bond.GetBeginAtom(), bond.GetEndAtom()
for atom in ctr_atoms:
#Optimize if atom is carbon (other atoms may change valence)
if atom.GetAtomicNum() == 6 and atom.GetTotalNumHs() < bond_val:
continue
if atom_equal(atom, b1):
new_amap = amap + [(nei_idx, atom.GetIdx(), b1.GetIdx())]
att_confs.append( new_amap )
elif atom_equal(atom, b2):
new_amap = amap + [(nei_idx, atom.GetIdx(), b2.GetIdx())]
att_confs.append( new_amap )
else:
#intersection is an atom
for a1 in ctr_atoms:
for a2 in nei_mol.GetAtoms():
if atom_equal(a1, a2):
#Optimize if atom is carbon (other atoms may change valence)
if a1.GetAtomicNum() == 6 and a1.GetTotalNumHs() + a2.GetTotalNumHs() < 4:
continue
new_amap = amap + [(nei_idx, a1.GetIdx(), a2.GetIdx())]
att_confs.append( new_amap )
#intersection is an bond
if ctr_mol.GetNumBonds() > 1:
for b1 in ctr_bonds:
for b2 in nei_mol.GetBonds():
if ring_bond_equal(b1, b2):
new_amap = amap + [(nei_idx, b1.GetBeginAtom().GetIdx(), b2.GetBeginAtom().GetIdx()), (nei_idx, b1.GetEndAtom().GetIdx(), b2.GetEndAtom().GetIdx())]
att_confs.append( new_amap )
if ring_bond_equal(b1, b2, reverse=True):
new_amap = amap + [(nei_idx, b1.GetBeginAtom().GetIdx(), b2.GetEndAtom().GetIdx()), (nei_idx, b1.GetEndAtom().GetIdx(), b2.GetBeginAtom().GetIdx())]
att_confs.append( new_amap )
return att_confs
#Try rings first: Speed-Up
def enum_assemble(node, neighbors, prev_nodes=[], prev_amap=[]):
all_attach_confs = []
singletons = [nei_node.nid for nei_node in neighbors + prev_nodes if nei_node.mol.GetNumAtoms() == 1]
def search(cur_amap, depth):
if len(all_attach_confs) > MAX_NCAND:
return
if depth == len(neighbors):
all_attach_confs.append(cur_amap)
return
nei_node = neighbors[depth]
cand_amap = enum_attach(node.mol, nei_node, cur_amap, singletons)
cand_smiles = set()
candidates = []
for amap in cand_amap:
cand_mol = local_attach(node.mol, neighbors[:depth+1], prev_nodes, amap)
cand_mol = sanitize(cand_mol)
if cand_mol is None:
continue
smiles = get_smiles(cand_mol)
if smiles in cand_smiles:
continue
cand_smiles.add(smiles)
candidates.append(amap)
if len(candidates) == 0:
return
for new_amap in candidates:
search(new_amap, depth + 1)
search(prev_amap, 0)
cand_smiles = set()
candidates = []
aroma_score = []
for amap in all_attach_confs:
cand_mol = local_attach(node.mol, neighbors, prev_nodes, amap)
cand_mol = Chem.MolFromSmiles(Chem.MolToSmiles(cand_mol))
smiles = Chem.MolToSmiles(cand_mol)
if smiles in cand_smiles or check_singleton(cand_mol, node, neighbors) == False:
continue
cand_smiles.add(smiles)
candidates.append( (smiles,amap) )
aroma_score.append( check_aroma(cand_mol, node, neighbors) )
return candidates, aroma_score
def check_singleton(cand_mol, ctr_node, nei_nodes):
rings = [node for node in nei_nodes + [ctr_node] if node.mol.GetNumAtoms() > 2]
singletons = [node for node in nei_nodes + [ctr_node] if node.mol.GetNumAtoms() == 1]
if len(singletons) > 0 or len(rings) == 0: return True
n_leaf2_atoms = 0
for atom in cand_mol.GetAtoms():
nei_leaf_atoms = [a for a in atom.GetNeighbors() if not a.IsInRing()] #a.GetDegree() == 1]
if len(nei_leaf_atoms) > 1:
n_leaf2_atoms += 1
return n_leaf2_atoms == 0
def check_aroma(cand_mol, ctr_node, nei_nodes):
rings = [node for node in nei_nodes + [ctr_node] if node.mol.GetNumAtoms() >= 3]
if len(rings) < 2: return 0 #Only multi-ring system needs to be checked
get_nid = lambda x: 0 if x.is_leaf else x.nid
benzynes = [get_nid(node) for node in nei_nodes + [ctr_node] if node.smiles in Vocab.benzynes]
penzynes = [get_nid(node) for node in nei_nodes + [ctr_node] if node.smiles in Vocab.penzynes]
if len(benzynes) + len(penzynes) == 0:
return 0 #No specific aromatic rings
n_aroma_atoms = 0
for atom in cand_mol.GetAtoms():
if atom.GetAtomMapNum() in benzynes+penzynes and atom.GetIsAromatic():
n_aroma_atoms += 1
if n_aroma_atoms >= len(benzynes) * 4 + len(penzynes) * 3:
return 1000
else:
return -0.001
#Only used for debugging purpose
def dfs_assemble(cur_mol, global_amap, fa_amap, cur_node, fa_node):
fa_nid = fa_node.nid if fa_node is not None else -1
prev_nodes = [fa_node] if fa_node is not None else []
children = [nei for nei in cur_node.neighbors if nei.nid != fa_nid]
neighbors = [nei for nei in children if nei.mol.GetNumAtoms() > 1]
neighbors = sorted(neighbors, key=lambda x:x.mol.GetNumAtoms(), reverse=True)
singletons = [nei for nei in children if nei.mol.GetNumAtoms() == 1]
neighbors = singletons + neighbors
cur_amap = [(fa_nid,a2,a1) for nid,a1,a2 in fa_amap if nid == cur_node.nid]
cands = enum_assemble(cur_node, neighbors, prev_nodes, cur_amap)
cand_smiles,cand_amap = list(zip(*cands))
label_idx = cand_smiles.index(cur_node.label)
label_amap = cand_amap[label_idx]
for nei_id,ctr_atom,nei_atom in label_amap:
if nei_id == fa_nid:
continue
global_amap[nei_id][nei_atom] = global_amap[cur_node.nid][ctr_atom]
cur_mol = attach_mols(cur_mol, children, [], global_amap) #father is already attached
for nei_node in children:
if not nei_node.is_leaf:
dfs_assemble(cur_mol, global_amap, label_amap, nei_node, cur_node)
if __name__ == "__main__":
import sys
from .mol_tree import MolTree
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
smiles = ["O=C1[C@@H]2C=C[C@@H](C=CC2)C1(c1ccccc1)c1ccccc1","O=C([O-])CC[C@@]12CCCC[C@]1(O)OC(=O)CC2", "ON=C1C[C@H]2CC3(C[C@@H](C1)c1ccccc12)OCCO3", "C[C@H]1CC(=O)[C@H]2[C@@]3(O)C(=O)c4cccc(O)c4[C@@H]4O[C@@]43[C@@H](O)C[C@]2(O)C1", 'Cc1cc(NC(=O)CSc2nnc3c4ccccc4n(C)c3n2)ccc1Br', 'CC(C)(C)c1ccc(C(=O)N[C@H]2CCN3CCCc4cccc2c43)cc1', "O=c1c2ccc3c(=O)n(-c4nccs4)c(=O)c4ccc(c(=O)n1-c1nccs1)c2c34", "O=C(N1CCc2c(F)ccc(F)c2C1)C1(O)Cc2ccccc2C1"]
def tree_test():
for s in sys.stdin:
s = s.split()[0]
tree = MolTree(s)
print('-------------------------------------------')
print(s)
for node in tree.nodes:
print(node.smiles, [x.smiles for x in node.neighbors])
def decode_test():
wrong = 0
for tot,s in enumerate(sys.stdin):
s = s.split()[0]
tree = MolTree(s)
tree.recover()
cur_mol = copy_edit_mol(tree.nodes[0].mol)
global_amap = [{}] + [{} for node in tree.nodes]
global_amap[1] = {atom.GetIdx():atom.GetIdx() for atom in cur_mol.GetAtoms()}
dfs_assemble(cur_mol, global_amap, [], tree.nodes[0], None)
cur_mol = cur_mol.GetMol()
cur_mol = Chem.MolFromSmiles(Chem.MolToSmiles(cur_mol))
set_atommap(cur_mol)
dec_smiles = Chem.MolToSmiles(cur_mol)
gold_smiles = Chem.MolToSmiles(Chem.MolFromSmiles(s))
if gold_smiles != dec_smiles:
print(gold_smiles, dec_smiles)
wrong += 1
print(wrong, tot + 1)
def enum_test():
for s in sys.stdin:
s = s.split()[0]
tree = MolTree(s)
tree.recover()
tree.assemble()
for node in tree.nodes:
if node.label not in node.cands:
print(tree.smiles)
print(node.smiles, [x.smiles for x in node.neighbors])
print(node.label, len(node.cands))
def count():
cnt,n = 0,0
for s in sys.stdin:
s = s.split()[0]
tree = MolTree(s)
tree.recover()
tree.assemble()
for node in tree.nodes:
cnt += len(node.cands)
n += len(tree.nodes)
#print cnt * 1.0 / n
count()
| 38.983721 | 440 | 0.602756 |
4a20dac80ece721a7004f3f28f0b0aa53c85edf9 | 4,899 | py | Python | client_code/Switch/__init__.py | jshaffstall/anvil-extras | 521eec32a98cc038fea27379ff6c0ab024f31c7f | [
"MIT"
] | null | null | null | client_code/Switch/__init__.py | jshaffstall/anvil-extras | 521eec32a98cc038fea27379ff6c0ab024f31c7f | [
"MIT"
] | null | null | null | client_code/Switch/__init__.py | jshaffstall/anvil-extras | 521eec32a98cc038fea27379ff6c0ab024f31c7f | [
"MIT"
] | null | null | null | # SPDX-License-Identifier: MIT
#
# Copyright (c) 2021 The Anvil Extras project team members listed at
# https://github.com/anvilistas/anvil-extras/graphs/contributors
#
# This software is published at https://github.com/anvilistas/anvil-extras
from anvil import CheckBox, app
from anvil.js import get_dom_node as _get_dom_node
from anvil.js.window import document as _document
from ..utils._component_helpers import _get_rgb, _html_injector
__version__ = "2.0.1"
primary = app.theme_colors.get("Primary 500", "#2196F3")
css = """
.switch,
.switch * {
-webkit-tap-highlight-color: transparent;
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
}
.switch label {
cursor: pointer;
}
.switch label input[type=checkbox] {
opacity: 0;
width: 0;
height: 0;
}
.switch label input[type=checkbox]:checked+.lever {
background-color: rgba(var(--color), .5);
}
.switch label input[type=checkbox]:checked+.lever:after,
.switch label input[type=checkbox]:checked+.lever:before {
left: 18px;
}
.switch label input[type=checkbox]:checked+.lever:after {
background-color: rgb(var(--color));
}
.switch label .lever {
content: "";
display: inline-block;
position: relative;
width: 36px;
height: 14px;
background-color: rgba(0,0,0,0.38);
border-radius: 15px;
margin-right: 10px;
-webkit-transition: background 0.3s ease;
transition: background 0.3s ease;
vertical-align: middle;
margin: 0 16px;
}
.switch label .lever:after,
.switch label .lever:before {
content: "";
position: absolute;
display: inline-block;
width: 20px;
height: 20px;
border-radius: 50%;
left: 0;
top: -3px;
-webkit-transition: left 0.3s ease, background 0.3s ease, -webkit-box-shadow 0.1s ease, -webkit-transform 0.1s ease;
transition: left 0.3s ease, background 0.3s ease, -webkit-box-shadow 0.1s ease, -webkit-transform 0.1s ease;
transition: left 0.3s ease, background 0.3s ease, box-shadow 0.1s ease, transform 0.1s ease;
transition: left 0.3s ease, background 0.3s ease, box-shadow 0.1s ease, transform 0.1s ease, -webkit-box-shadow 0.1s ease, -webkit-transform 0.1s ease;
}
.switch label .lever:before {
background-color: rgb(var(--color), 0.15);
}
.switch label .lever:after {
background-color: #F1F1F1;
-webkit-box-shadow: 0 3px 1px -2px rgba(0,0,0,0.2),0px 2px 2px 0 rgba(0,0,0,0.14),0px 1px 5px 0 rgba(0,0,0,0.12);
box-shadow: 0 3px 1px -2px rgba(0,0,0,0.2),0px 2px 2px 0 rgba(0,0,0,0.14),0px 1px 5px 0 rgba(0,0,0,0.12);
}
input[type=checkbox]:checked:not(:disabled) ~ .lever:active::before,
input[type=checkbox]:checked:not(:disabled).tabbed:focus ~ .lever::before {
-webkit-transform: scale(2.4);
transform: scale(2.4);
background-color: rgb(var(--color), 0.15);
}
input[type=checkbox]:not(:disabled) ~ .lever:active:before,
input[type=checkbox]:not(:disabled).tabbed:focus ~ .lever::before {
-webkit-transform: scale(2.4);
transform: scale(2.4);
background-color: rgba(0,0,0,0.08);
}
.switch input[type=checkbox][disabled]+.lever {
cursor: default;
background-color: rgba(0,0,0,0.12);
}
.switch label input[type=checkbox][disabled]+.lever:after,
.switch label input[type=checkbox][disabled]:checked+.lever:after {
background-color: #949494;
}
"""
_html_injector.css(css)
class Switch(CheckBox):
def __init__(self, checked_color=primary, text_pre="", text_post="", **properties):
dom_node = self._dom_node = _get_dom_node(self)
dom_node.querySelector(".checkbox").classList.add("switch")
span = dom_node.querySelector("span")
span.classList.add("lever")
span.removeAttribute("style")
input = dom_node.querySelector("input")
input.removeAttribute("style")
input.style.marginTop = 0
label = dom_node.querySelector("label")
label.style.padding = "7px 0"
self._textnode_pre = _document.createTextNode(text_pre)
self._textnode_post = _document.createTextNode(text_post)
label.prepend(self._textnode_pre)
label.append(self._textnode_post)
self.checked_color = checked_color or primary
@property
def checked_color(self):
return self._checked_color
@checked_color.setter
def checked_color(self, value):
self._checked_color = value
self._dom_node.style.setProperty("--color", _get_rgb(value))
@property
def text_pre(self):
return self._textnode_pre.textContent
@text_pre.setter
def text_pre(self, value):
self._textnode_pre.textContent = value
@property
def text_post(self):
return self._textnode_post.textContent
@text_post.setter
def text_post(self, value):
self._textnode_post.textContent = value
text = text_post # override the CheckBox property
| 30.61875 | 155 | 0.683609 |
4a20dafa46ccc66cd904c280fe20e96de1554d62 | 13,872 | py | Python | samples/openapi3/client/petstore/python/petstore_api/model/dog.py | deleonio/openapi-generator | 6eaba7d0886ff4d683d826ae8324270bacbd3233 | [
"Apache-2.0"
] | 3 | 2021-09-19T20:02:36.000Z | 2021-12-15T13:11:39.000Z | samples/openapi3/client/petstore/python/petstore_api/model/dog.py | deleonio/openapi-generator | 6eaba7d0886ff4d683d826ae8324270bacbd3233 | [
"Apache-2.0"
] | 29 | 2021-04-07T07:38:57.000Z | 2022-03-30T12:10:22.000Z | samples/openapi3/client/petstore/python/petstore_api/model/dog.py | deleonio/openapi-generator | 6eaba7d0886ff4d683d826ae8324270bacbd3233 | [
"Apache-2.0"
] | 1 | 2021-08-11T10:03:42.000Z | 2021-08-11T10:03:42.000Z | """
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from petstore_api.exceptions import ApiAttributeError
def lazy_import():
from petstore_api.model.animal import Animal
from petstore_api.model.dog_all_of import DogAllOf
globals()['Animal'] = Animal
globals()['DogAllOf'] = DogAllOf
class Dog(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_name': (str,), # noqa: E501
'breed': (str,), # noqa: E501
'color': (str,), # noqa: E501
}
@cached_property
def discriminator():
val = {
}
if not val:
return None
return {'class_name': val}
attribute_map = {
'class_name': 'className', # noqa: E501
'breed': 'breed', # noqa: E501
'color': 'color', # noqa: E501
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""Dog - a model defined in OpenAPI
Keyword Args:
class_name (str):
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
breed (str): [optional] # noqa: E501
color (str): [optional] if omitted the server will use the default value of "red" # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""Dog - a model defined in OpenAPI
Keyword Args:
class_name (str):
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
breed (str): [optional] # noqa: E501
color (str): [optional] if omitted the server will use the default value of "red" # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
Animal,
DogAllOf,
],
'oneOf': [
],
}
| 42.552147 | 174 | 0.578431 |
4a20db3424f66aad6eedebb00800e7a1c7b53d46 | 14,707 | py | Python | scam/consensus/multiprocess_validation.py | grayfallstown/scam-blockchain | 2183020cc74bbd1a63dda6eb0d0e73c2a3429594 | [
"Apache-2.0"
] | 12 | 2021-08-04T14:35:02.000Z | 2022-02-09T04:31:44.000Z | scam/consensus/multiprocess_validation.py | grayfallstown/scam-blockchain | 2183020cc74bbd1a63dda6eb0d0e73c2a3429594 | [
"Apache-2.0"
] | 8 | 2021-08-04T20:58:10.000Z | 2021-09-11T17:08:28.000Z | scam/consensus/multiprocess_validation.py | grayfallstown/scam-blockchain | 2183020cc74bbd1a63dda6eb0d0e73c2a3429594 | [
"Apache-2.0"
] | 4 | 2021-07-28T09:50:55.000Z | 2022-03-15T08:43:53.000Z | import asyncio
import logging
import traceback
from concurrent.futures.process import ProcessPoolExecutor
from dataclasses import dataclass
from typing import Dict, List, Optional, Sequence, Tuple, Union, Callable
from scam.consensus.block_header_validation import validate_finished_header_block
from scam.consensus.block_record import BlockRecord
from scam.consensus.blockchain_interface import BlockchainInterface
from scam.consensus.constants import ConsensusConstants
from scam.consensus.cost_calculator import NPCResult
from scam.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from scam.consensus.full_block_to_block_record import block_to_block_record
from scam.consensus.get_block_challenge import get_block_challenge
from scam.consensus.pot_iterations import calculate_iterations_quality, is_overflow_block
from scam.full_node.mempool_check_conditions import get_name_puzzle_conditions
from scam.types.blockchain_format.coin import Coin
from scam.types.blockchain_format.sized_bytes import bytes32
from scam.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from scam.types.full_block import FullBlock
from scam.types.generator_types import BlockGenerator
from scam.types.header_block import HeaderBlock
from scam.util.block_cache import BlockCache
from scam.util.errors import Err
from scam.util.generator_tools import get_block_header, tx_removals_and_additions
from scam.util.ints import uint16, uint64, uint32
from scam.util.streamable import Streamable, dataclass_from_dict, streamable
log = logging.getLogger(__name__)
@dataclass(frozen=True)
@streamable
class PreValidationResult(Streamable):
error: Optional[uint16]
required_iters: Optional[uint64] # Iff error is None
npc_result: Optional[NPCResult] # Iff error is None and block is a transaction block
def batch_pre_validate_blocks(
constants_dict: Dict,
blocks_pickled: Dict[bytes, bytes],
full_blocks_pickled: Optional[List[bytes]],
header_blocks_pickled: Optional[List[bytes]],
prev_transaction_generators: List[Optional[bytes]],
npc_results: Dict[uint32, bytes],
check_filter: bool,
expected_difficulty: List[uint64],
expected_sub_slot_iters: List[uint64],
) -> List[bytes]:
blocks = {}
for k, v in blocks_pickled.items():
blocks[k] = BlockRecord.from_bytes(v)
results: List[PreValidationResult] = []
constants: ConsensusConstants = dataclass_from_dict(ConsensusConstants, constants_dict)
if full_blocks_pickled is not None and header_blocks_pickled is not None:
assert ValueError("Only one should be passed here")
if full_blocks_pickled is not None:
for i in range(len(full_blocks_pickled)):
try:
block: FullBlock = FullBlock.from_bytes(full_blocks_pickled[i])
tx_additions: List[Coin] = []
removals: List[bytes32] = []
npc_result: Optional[NPCResult] = None
if block.height in npc_results:
npc_result = NPCResult.from_bytes(npc_results[block.height])
assert npc_result is not None
if npc_result.npc_list is not None:
removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
else:
removals, tx_additions = [], []
if block.transactions_generator is not None and npc_result is None:
prev_generator_bytes = prev_transaction_generators[i]
assert prev_generator_bytes is not None
assert block.transactions_info is not None
block_generator: BlockGenerator = BlockGenerator.from_bytes(prev_generator_bytes)
assert block_generator.program == block.transactions_generator
npc_result = get_name_puzzle_conditions(
block_generator,
min(constants.MAX_BLOCK_COST_CLVM, block.transactions_info.cost),
cost_per_byte=constants.COST_PER_BYTE,
safe_mode=True,
)
removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
header_block = get_block_header(block, tx_additions, removals)
required_iters, error = validate_finished_header_block(
constants,
BlockCache(blocks),
header_block,
check_filter,
expected_difficulty[i],
expected_sub_slot_iters[i],
)
error_int: Optional[uint16] = None
if error is not None:
error_int = uint16(error.code.value)
results.append(PreValidationResult(error_int, required_iters, npc_result))
except Exception:
error_stack = traceback.format_exc()
log.error(f"Exception: {error_stack}")
results.append(PreValidationResult(uint16(Err.UNKNOWN.value), None, None))
elif header_blocks_pickled is not None:
for i in range(len(header_blocks_pickled)):
try:
header_block = HeaderBlock.from_bytes(header_blocks_pickled[i])
required_iters, error = validate_finished_header_block(
constants,
BlockCache(blocks),
header_block,
check_filter,
expected_difficulty[i],
expected_sub_slot_iters[i],
)
error_int = None
if error is not None:
error_int = uint16(error.code.value)
results.append(PreValidationResult(error_int, required_iters, None))
except Exception:
error_stack = traceback.format_exc()
log.error(f"Exception: {error_stack}")
results.append(PreValidationResult(uint16(Err.UNKNOWN.value), None, None))
return [bytes(r) for r in results]
async def pre_validate_blocks_multiprocessing(
constants: ConsensusConstants,
constants_json: Dict,
block_records: BlockchainInterface,
blocks: Sequence[Union[FullBlock, HeaderBlock]],
pool: ProcessPoolExecutor,
check_filter: bool,
npc_results: Dict[uint32, NPCResult],
get_block_generator: Optional[Callable],
batch_size: int,
wp_summaries: Optional[List[SubEpochSummary]] = None,
) -> Optional[List[PreValidationResult]]:
"""
This method must be called under the blockchain lock
If all the full blocks pass pre-validation, (only validates header), returns the list of required iters.
if any validation issue occurs, returns False.
Args:
check_filter:
constants_json:
pool:
constants:
block_records:
blocks: list of full blocks to validate (must be connected to current chain)
npc_results
get_block_generator
"""
prev_b: Optional[BlockRecord] = None
# Collects all the recent blocks (up to the previous sub-epoch)
recent_blocks: Dict[bytes32, BlockRecord] = {}
recent_blocks_compressed: Dict[bytes32, BlockRecord] = {}
num_sub_slots_found = 0
num_blocks_seen = 0
if blocks[0].height > 0:
if not block_records.contains_block(blocks[0].prev_header_hash):
return [PreValidationResult(uint16(Err.INVALID_PREV_BLOCK_HASH.value), None, None)]
curr = block_records.block_record(blocks[0].prev_header_hash)
num_sub_slots_to_look_for = 3 if curr.overflow else 2
while (
curr.sub_epoch_summary_included is None
or num_blocks_seen < constants.NUMBER_OF_TIMESTAMPS
or num_sub_slots_found < num_sub_slots_to_look_for
) and curr.height > 0:
if num_blocks_seen < constants.NUMBER_OF_TIMESTAMPS or num_sub_slots_found < num_sub_slots_to_look_for:
recent_blocks_compressed[curr.header_hash] = curr
if curr.first_in_sub_slot:
assert curr.finished_challenge_slot_hashes is not None
num_sub_slots_found += len(curr.finished_challenge_slot_hashes)
recent_blocks[curr.header_hash] = curr
if curr.is_transaction_block:
num_blocks_seen += 1
curr = block_records.block_record(curr.prev_hash)
recent_blocks[curr.header_hash] = curr
recent_blocks_compressed[curr.header_hash] = curr
block_record_was_present = []
for block in blocks:
block_record_was_present.append(block_records.contains_block(block.header_hash))
diff_ssis: List[Tuple[uint64, uint64]] = []
for block in blocks:
if block.height != 0:
assert block_records.contains_block(block.prev_header_hash)
if prev_b is None:
prev_b = block_records.block_record(block.prev_header_hash)
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
constants, len(block.finished_sub_slots) > 0, prev_b, block_records
)
overflow = is_overflow_block(constants, block.reward_chain_block.signage_point_index)
challenge = get_block_challenge(constants, block, BlockCache(recent_blocks), prev_b is None, overflow, False)
if block.reward_chain_block.challenge_chain_sp_vdf is None:
cc_sp_hash: bytes32 = challenge
else:
cc_sp_hash = block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash()
q_str: Optional[bytes32] = block.reward_chain_block.proof_of_space.verify_and_get_quality_string(
constants, challenge, cc_sp_hash
)
if q_str is None:
for i, block_i in enumerate(blocks):
if not block_record_was_present[i] and block_records.contains_block(block_i.header_hash):
block_records.remove_block_record(block_i.header_hash)
return None
required_iters: uint64 = calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
q_str,
block.reward_chain_block.proof_of_space.size,
difficulty,
cc_sp_hash,
)
block_rec = block_to_block_record(
constants,
block_records,
required_iters,
block,
None,
)
if block_rec.sub_epoch_summary_included is not None and wp_summaries is not None:
idx = int(block.height / constants.SUB_EPOCH_BLOCKS) - 1
next_ses = wp_summaries[idx]
if not block_rec.sub_epoch_summary_included.get_hash() == next_ses.get_hash():
log.error("sub_epoch_summary does not match wp sub_epoch_summary list")
return None
# Makes sure to not override the valid blocks already in block_records
if not block_records.contains_block(block_rec.header_hash):
block_records.add_block_record(block_rec) # Temporarily add block to dict
recent_blocks[block_rec.header_hash] = block_rec
recent_blocks_compressed[block_rec.header_hash] = block_rec
else:
recent_blocks[block_rec.header_hash] = block_records.block_record(block_rec.header_hash)
recent_blocks_compressed[block_rec.header_hash] = block_records.block_record(block_rec.header_hash)
prev_b = block_rec
diff_ssis.append((difficulty, sub_slot_iters))
block_dict: Dict[bytes32, Union[FullBlock, HeaderBlock]] = {}
for i, block in enumerate(blocks):
block_dict[block.header_hash] = block
if not block_record_was_present[i]:
block_records.remove_block_record(block.header_hash)
recent_sb_compressed_pickled = {bytes(k): bytes(v) for k, v in recent_blocks_compressed.items()}
npc_results_pickled = {}
for k, v in npc_results.items():
npc_results_pickled[k] = bytes(v)
futures = []
# Pool of workers to validate blocks concurrently
for i in range(0, len(blocks), batch_size):
end_i = min(i + batch_size, len(blocks))
blocks_to_validate = blocks[i:end_i]
if any([len(block.finished_sub_slots) > 0 for block in blocks_to_validate]):
final_pickled = {bytes(k): bytes(v) for k, v in recent_blocks.items()}
else:
final_pickled = recent_sb_compressed_pickled
b_pickled: Optional[List[bytes]] = None
hb_pickled: Optional[List[bytes]] = None
previous_generators: List[Optional[bytes]] = []
for block in blocks_to_validate:
# We ONLY add blocks which are in the past, based on header hashes (which are validated later) to the
# prev blocks dict. This is important since these blocks are assumed to be valid and are used as previous
# generator references
prev_blocks_dict: Dict[uint32, Union[FullBlock, HeaderBlock]] = {}
curr_b: Union[FullBlock, HeaderBlock] = block
while curr_b.prev_header_hash in block_dict:
curr_b = block_dict[curr_b.prev_header_hash]
prev_blocks_dict[curr_b.header_hash] = curr_b
if isinstance(block, FullBlock):
assert get_block_generator is not None
if b_pickled is None:
b_pickled = []
b_pickled.append(bytes(block))
try:
block_generator: Optional[BlockGenerator] = await get_block_generator(block, prev_blocks_dict)
except ValueError:
return None
if block_generator is not None:
previous_generators.append(bytes(block_generator))
else:
previous_generators.append(None)
else:
if hb_pickled is None:
hb_pickled = []
hb_pickled.append(bytes(block))
futures.append(
asyncio.get_running_loop().run_in_executor(
pool,
batch_pre_validate_blocks,
constants_json,
final_pickled,
b_pickled,
hb_pickled,
previous_generators,
npc_results_pickled,
check_filter,
[diff_ssis[j][0] for j in range(i, end_i)],
[diff_ssis[j][1] for j in range(i, end_i)],
)
)
# Collect all results into one flat list
return [
PreValidationResult.from_bytes(result)
for batch_result in (await asyncio.gather(*futures))
for result in batch_result
]
| 46.103448 | 117 | 0.659346 |
4a20db9677114435c75ffa5bb36e4ff4e39ccc8b | 33,553 | py | Python | python/ccxt/bithumb.py | nagabrahmam-mantha/ccxt | b7ec0c03b935a63064b8b73226b39f1099933969 | [
"MIT"
] | 3 | 2020-03-04T06:38:18.000Z | 2022-01-23T13:40:12.000Z | python/ccxt/bithumb.py | nagabrahmam-mantha/ccxt | b7ec0c03b935a63064b8b73226b39f1099933969 | [
"MIT"
] | null | null | null | python/ccxt/bithumb.py | nagabrahmam-mantha/ccxt | b7ec0c03b935a63064b8b73226b39f1099933969 | [
"MIT"
] | 1 | 2022-03-01T07:02:42.000Z | 2022-03-01T07:02:42.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bithumb(Exchange):
def describe(self):
return self.deep_extend(super(bithumb, self).describe(), {
'id': 'bithumb',
'name': 'Bithumb',
'countries': ['KR'], # South Korea
'rateLimit': 500,
'has': {
'cancelOrder': True,
'CORS': True,
'createMarketOrder': True,
'createOrder': True,
'fetchBalance': True,
'fetchIndexOHLCV': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'withdraw': True,
},
'hostname': 'bithumb.com',
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/30597177-ea800172-9d5e-11e7-804c-b9d4fa9b56b0.jpg',
'api': {
'public': 'https://api.{hostname}/public',
'private': 'https://api.{hostname}',
},
'www': 'https://www.bithumb.com',
'doc': 'https://apidocs.bithumb.com',
'fees': 'https://en.bithumb.com/customer_support/info_fee',
},
'api': {
'public': {
'get': [
'ticker/{currency}',
'ticker/all',
'ticker/ALL_BTC',
'ticker/ALL_KRW',
'orderbook/{currency}',
'orderbook/all',
'transaction_history/{currency}',
'transaction_history/all',
'candlestick/{currency}/{interval}',
],
},
'private': {
'post': [
'info/account',
'info/balance',
'info/wallet_address',
'info/ticker',
'info/orders',
'info/user_transactions',
'info/order_detail',
'trade/place',
'trade/cancel',
'trade/btc_withdrawal',
'trade/krw_deposit',
'trade/krw_withdrawal',
'trade/market_buy',
'trade/market_sell',
],
},
},
'fees': {
'trading': {
'maker': self.parse_number('0.0025'),
'taker': self.parse_number('0.0025'),
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'exceptions': {
'Bad Request(SSL)': BadRequest,
'Bad Request(Bad Method)': BadRequest,
'Bad Request.(Auth Data)': AuthenticationError, # {"status": "5100", "message": "Bad Request.(Auth Data)"}
'Not Member': AuthenticationError,
'Invalid Apikey': AuthenticationError, # {"status":"5300","message":"Invalid Apikey"}
'Method Not Allowed.(Access IP)': PermissionDenied,
'Method Not Allowed.(BTC Adress)': InvalidAddress,
'Method Not Allowed.(Access)': PermissionDenied,
'Database Fail': ExchangeNotAvailable,
'Invalid Parameter': BadRequest,
'5600': ExchangeError,
'Unknown Error': ExchangeError,
'After May 23th, recent_transactions is no longer, hence users will not be able to connect to recent_transactions': ExchangeError, # {"status":"5100","message":"After May 23th, recent_transactions is no longer, hence users will not be able to connect to recent_transactions"}
},
'timeframes': {
'1m': '1m',
'3m': '3m',
'5m': '5m',
'10m': '10m',
'30m': '30m',
'1h': '1h',
'6h': '6h',
'12h': '12h',
'1d': '24h',
},
'options': {
'quoteCurrencies': {
'BTC': {
'limits': {
'cost': {
'min': 0.0002,
'max': 100,
},
},
},
'KRW': {
'limits': {
'cost': {
'min': 500,
'max': 5000000000,
},
},
},
},
},
'commonCurrencies': {
'FTC': 'FTC2',
'MIR': 'MIR COIN',
'SOC': 'Soda Coin',
},
})
def amount_to_precision(self, symbol, amount):
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def fetch_markets(self, params={}):
result = []
quoteCurrencies = self.safe_value(self.options, 'quoteCurrencies', {})
quotes = list(quoteCurrencies.keys())
for i in range(0, len(quotes)):
quote = quotes[i]
extension = self.safe_value(quoteCurrencies, quote, {})
method = 'publicGetTickerALL' + quote
response = getattr(self, method)(params)
data = self.safe_value(response, 'data')
currencyIds = list(data.keys())
for j in range(0, len(currencyIds)):
currencyId = currencyIds[j]
if currencyId == 'date':
continue
market = data[currencyId]
base = self.safe_currency_code(currencyId)
symbol = currencyId + '/' + quote
active = True
if isinstance(market, list):
numElements = len(market)
if numElements == 0:
active = False
entry = self.deep_extend({
'id': currencyId,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
'type': 'spot',
'spot': True,
'active': active,
'precision': {
'amount': 4,
'price': 4,
},
'limits': {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {}, # set via options
},
'baseId': None,
'quoteId': None,
}, extension)
result.append(entry)
return result
def fetch_balance(self, params={}):
self.load_markets()
request = {
'currency': 'ALL',
}
response = self.privatePostInfoBalance(self.extend(request, params))
result = {'info': response}
balances = self.safe_value(response, 'data')
codes = list(self.currencies.keys())
for i in range(0, len(codes)):
code = codes[i]
account = self.account()
currency = self.currency(code)
lowerCurrencyId = self.safe_string_lower(currency, 'id')
account['total'] = self.safe_string(balances, 'total_' + lowerCurrencyId)
account['used'] = self.safe_string(balances, 'in_use_' + lowerCurrencyId)
account['free'] = self.safe_string(balances, 'available_' + lowerCurrencyId)
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'currency': market['base'] + '_' + market['quote'],
}
if limit is not None:
request['count'] = limit # default 30, max 30
response = self.publicGetOrderbookCurrency(self.extend(request, params))
#
# {
# "status":"0000",
# "data":{
# "timestamp":"1587621553942",
# "payment_currency":"KRW",
# "order_currency":"BTC",
# "bids":[
# {"price":"8652000","quantity":"0.0043"},
# {"price":"8651000","quantity":"0.0049"},
# {"price":"8650000","quantity":"8.4791"},
# ],
# "asks":[
# {"price":"8654000","quantity":"0.119"},
# {"price":"8655000","quantity":"0.254"},
# {"price":"8658000","quantity":"0.119"},
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
timestamp = self.safe_integer(data, 'timestamp')
return self.parse_order_book(data, symbol, timestamp, 'bids', 'asks', 'price', 'quantity')
def parse_ticker(self, ticker, market=None):
#
# fetchTicker, fetchTickers
#
# {
# "opening_price":"227100",
# "closing_price":"228400",
# "min_price":"222300",
# "max_price":"230000",
# "units_traded":"82618.56075337",
# "acc_trade_value":"18767376138.6031",
# "prev_closing_price":"227100",
# "units_traded_24H":"151871.13484676",
# "acc_trade_value_24H":"34247610416.8974",
# "fluctate_24H":"8700",
# "fluctate_rate_24H":"3.96",
# "date":"1587710327264", # fetchTickers inject self
# }
#
timestamp = self.safe_integer(ticker, 'date')
symbol = self.safe_symbol(None, market)
open = self.safe_number(ticker, 'opening_price')
close = self.safe_number(ticker, 'closing_price')
baseVolume = self.safe_number(ticker, 'units_traded_24H')
quoteVolume = self.safe_number(ticker, 'acc_trade_value_24H')
vwap = self.vwap(baseVolume, quoteVolume)
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'max_price'),
'low': self.safe_number(ticker, 'min_price'),
'bid': self.safe_number(ticker, 'buy_price'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'sell_price'),
'askVolume': None,
'vwap': vwap,
'open': open,
'close': close,
'last': close,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetTickerAll(params)
#
# {
# "status":"0000",
# "data":{
# "BTC":{
# "opening_price":"9045000",
# "closing_price":"9132000",
# "min_price":"8938000",
# "max_price":"9168000",
# "units_traded":"4619.79967497",
# "acc_trade_value":"42021363832.5187",
# "prev_closing_price":"9041000",
# "units_traded_24H":"8793.5045804",
# "acc_trade_value_24H":"78933458515.4962",
# "fluctate_24H":"530000",
# "fluctate_rate_24H":"6.16"
# },
# "date":"1587710878669"
# }
# }
#
result = {}
data = self.safe_value(response, 'data', {})
timestamp = self.safe_integer(data, 'date')
tickers = self.omit(data, 'date')
ids = list(tickers.keys())
for i in range(0, len(ids)):
id = ids[i]
symbol = id
market = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = tickers[id]
isArray = isinstance(ticker, list)
if not isArray:
ticker['date'] = timestamp
result[symbol] = self.parse_ticker(ticker, market)
return self.filter_by_array(result, 'symbol', symbols)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'currency': market['base'],
}
response = self.publicGetTickerCurrency(self.extend(request, params))
#
# {
# "status":"0000",
# "data":{
# "opening_price":"227100",
# "closing_price":"228400",
# "min_price":"222300",
# "max_price":"230000",
# "units_traded":"82618.56075337",
# "acc_trade_value":"18767376138.6031",
# "prev_closing_price":"227100",
# "units_traded_24H":"151871.13484676",
# "acc_trade_value_24H":"34247610416.8974",
# "fluctate_24H":"8700",
# "fluctate_rate_24H":"3.96",
# "date":"1587710327264"
# }
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_ticker(data, market)
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1576823400000, # 기준 시간
# '8284000', # 시가
# '8286000', # 종가
# '8289000', # 고가
# '8276000', # 저가
# '15.41503692' # 거래량
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 5),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'currency': market['base'],
'interval': self.timeframes[timeframe],
}
response = self.publicGetCandlestickCurrencyInterval(self.extend(request, params))
#
# {
# 'status': '0000',
# 'data': {
# [
# 1576823400000, # 기준 시간
# '8284000', # 시가
# '8286000', # 종가
# '8289000', # 고가
# '8276000', # 저가
# '15.41503692' # 거래량
# ],
# [
# 1576824000000, # 기준 시간
# '8284000', # 시가
# '8281000', # 종가
# '8289000', # 고가
# '8275000', # 저가
# '6.19584467' # 거래량
# ],
# }
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "transaction_date":"2020-04-23 22:21:46",
# "type":"ask",
# "units_traded":"0.0125",
# "price":"8667000",
# "total":"108337"
# }
#
# fetchOrder(private)
#
# {
# "transaction_date": "1572497603902030",
# "price": "8601000",
# "units": "0.005",
# "fee_currency": "KRW",
# "fee": "107.51",
# "total": "43005"
# }
#
# a workaround for their bug in date format, hours are not 0-padded
timestamp = None
transactionDatetime = self.safe_string(trade, 'transaction_date')
if transactionDatetime is not None:
parts = transactionDatetime.split(' ')
numParts = len(parts)
if numParts > 1:
transactionDate = parts[0]
transactionTime = parts[1]
if len(transactionTime) < 8:
transactionTime = '0' + transactionTime
timestamp = self.parse8601(transactionDate + ' ' + transactionTime)
else:
timestamp = self.safe_integer_product(trade, 'transaction_date', 0.001)
if timestamp is not None:
timestamp -= 9 * 3600000 # they report UTC + 9 hours, server in Korean timezone
type = None
side = self.safe_string(trade, 'type')
side = 'sell' if (side == 'ask') else 'buy'
id = self.safe_string(trade, 'cont_no')
symbol = None
if market is not None:
symbol = market['symbol']
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string_2(trade, 'units_traded', 'units')
costString = self.safe_string(trade, 'total')
fee = None
feeCostString = self.safe_string(trade, 'fee')
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'fee_currency')
feeCurrencyCode = self.common_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
return self.safe_trade({
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': None,
'type': type,
'side': side,
'takerOrMaker': None,
'price': priceString,
'amount': amountString,
'cost': costString,
'fee': fee,
}, market)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'currency': market['base'],
}
if limit is None:
request['count'] = limit # default 20, max 100
response = self.publicGetTransactionHistoryCurrency(self.extend(request, params))
#
# {
# "status":"0000",
# "data":[
# {
# "transaction_date":"2020-04-23 22:21:46",
# "type":"ask",
# "units_traded":"0.0125",
# "price":"8667000",
# "total":"108337"
# },
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_trades(data, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'order_currency': market['id'],
'payment_currency': market['quote'],
'units': amount,
}
method = 'privatePostTradePlace'
if type == 'limit':
request['price'] = price
request['type'] = 'bid' if (side == 'buy') else 'ask'
else:
method = 'privatePostTradeMarket' + self.capitalize(side)
response = getattr(self, method)(self.extend(request, params))
id = self.safe_string(response, 'order_id')
if id is None:
raise InvalidOrder(self.id + ' createOrder() did not return an order id')
return {
'info': response,
'symbol': symbol,
'type': type,
'side': side,
'id': id,
}
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'order_id': id,
'count': 1,
'order_currency': market['base'],
'payment_currency': market['quote'],
}
response = self.privatePostInfoOrderDetail(self.extend(request, params))
#
# {
# "status": "0000",
# "data": {
# order_date: '1603161798539254',
# type: 'ask',
# order_status: 'Cancel',
# order_currency: 'BTC',
# payment_currency: 'KRW',
# watch_price: '0',
# order_price: '13344000',
# order_qty: '0.0125',
# cancel_date: '1603161803809993',
# cancel_type: '사용자취소',
# contract: [
# {
# transaction_date: '1603161799976383',
# price: '13344000',
# units: '0.0015',
# fee_currency: 'KRW',
# fee: '0',
# total: '20016'
# }
# ],
# }
# }
#
data = self.safe_value(response, 'data')
return self.parse_order(self.extend(data, {'order_id': id}), market)
def parse_order_status(self, status):
statuses = {
'Pending': 'open',
'Completed': 'closed',
'Cancel': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
#
# fetchOrder
#
# {
# "transaction_date": "1572497603668315",
# "type": "bid",
# "order_status": "Completed",
# "order_currency": "BTC",
# "payment_currency": "KRW",
# "order_price": "8601000",
# "order_qty": "0.007",
# "cancel_date": "",
# "cancel_type": "",
# "contract": [
# {
# "transaction_date": "1572497603902030",
# "price": "8601000",
# "units": "0.005",
# "fee_currency": "KRW",
# "fee": "107.51",
# "total": "43005"
# },
# ]
# }
#
# {
# order_date: '1603161798539254',
# type: 'ask',
# order_status: 'Cancel',
# order_currency: 'BTC',
# payment_currency: 'KRW',
# watch_price: '0',
# order_price: '13344000',
# order_qty: '0.0125',
# cancel_date: '1603161803809993',
# cancel_type: '사용자취소',
# contract: [
# {
# transaction_date: '1603161799976383',
# price: '13344000',
# units: '0.0015',
# fee_currency: 'KRW',
# fee: '0',
# total: '20016'
# }
# ],
# }
#
# fetchOpenOrders
#
# {
# "order_currency": "BTC",
# "payment_currency": "KRW",
# "order_id": "C0101000007408440032",
# "order_date": "1571728739360570",
# "type": "bid",
# "units": "5.0",
# "units_remaining": "5.0",
# "price": "501000",
# }
#
timestamp = self.safe_integer_product(order, 'order_date', 0.001)
sideProperty = self.safe_value_2(order, 'type', 'side')
side = 'buy' if (sideProperty == 'bid') else 'sell'
status = self.parse_order_status(self.safe_string(order, 'order_status'))
price = self.safe_number_2(order, 'order_price', 'price')
type = 'limit'
if price == 0:
price = None
type = 'market'
amount = self.safe_number_2(order, 'order_qty', 'units')
remaining = self.safe_number(order, 'units_remaining')
if remaining is None:
if status == 'closed':
remaining = 0
elif status != 'canceled':
remaining = amount
symbol = None
baseId = self.safe_string(order, 'order_currency')
quoteId = self.safe_string(order, 'payment_currency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
if (base is not None) and (quote is not None):
symbol = base + '/' + quote
if (symbol is None) and (market is not None):
symbol = market['symbol']
id = self.safe_string(order, 'order_id')
rawTrades = self.safe_value(order, 'contract', [])
trades = self.parse_trades(rawTrades, market, None, None, {
'side': side,
'symbol': symbol,
'order': id,
})
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'amount': amount,
'cost': None,
'average': None,
'filled': None,
'remaining': remaining,
'status': status,
'fee': None,
'trades': trades,
})
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
if limit is None:
limit = 100
request = {
'count': limit,
'order_currency': market['base'],
'payment_currency': market['quote'],
}
if since is not None:
request['after'] = since
response = self.privatePostInfoOrders(self.extend(request, params))
#
# {
# "status": "0000",
# "data": [
# {
# "order_currency": "BTC",
# "payment_currency": "KRW",
# "order_id": "C0101000007408440032",
# "order_date": "1571728739360570",
# "type": "bid",
# "units": "5.0",
# "units_remaining": "5.0",
# "price": "501000",
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_orders(data, market, since, limit)
def cancel_order(self, id, symbol=None, params={}):
side_in_params = ('side' in params)
if not side_in_params:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a `side` parameter(sell or buy)')
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a `symbol` argument')
market = self.market(symbol)
side = 'bid' if (params['side'] == 'buy') else 'ask'
params = self.omit(params, ['side', 'currency'])
# https://github.com/ccxt/ccxt/issues/6771
request = {
'order_id': id,
'type': side,
'order_currency': market['base'],
'payment_currency': market['quote'],
}
return self.privatePostTradeCancel(self.extend(request, params))
def cancel_unified_order(self, order, params={}):
request = {
'side': order['side'],
}
return self.cancel_order(order['id'], order['symbol'], self.extend(request, params))
def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
self.load_markets()
currency = self.currency(code)
request = {
'units': amount,
'address': address,
'currency': currency['id'],
}
if currency == 'XRP' or currency == 'XMR' or currency == 'EOS' or currency == 'STEEM':
destination = self.safe_string(params, 'destination')
if (tag is None) and (destination is None):
raise ArgumentsRequired(self.id + ' ' + code + ' withdraw() requires a tag argument or an extra destination param')
elif tag is not None:
request['destination'] = tag
response = self.privatePostTradeBtcWithdrawal(self.extend(request, params))
return {
'info': response,
'id': None,
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
endpoint = '/' + self.implode_params(path, params)
url = self.implode_hostname(self.urls['api'][api]) + endpoint
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
body = self.urlencode(self.extend({
'endpoint': endpoint,
}, query))
nonce = str(self.nonce())
auth = endpoint + "\0" + body + "\0" + nonce # eslint-disable-line quotes
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha512)
signature64 = self.decode(self.string_to_base64(signature))
headers = {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
'Api-Key': self.apiKey,
'Api-Sign': signature64,
'Api-Nonce': nonce,
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if 'status' in response:
#
# {"status":"5100","message":"After May 23th, recent_transactions is no longer, hence users will not be able to connect to recent_transactions"}
#
status = self.safe_string(response, 'status')
message = self.safe_string(response, 'message')
if status is not None:
if status == '0000':
return # no error
elif message == '거래 진행중인 내역이 존재하지 않습니다':
# https://github.com/ccxt/ccxt/issues/9017
return # no error
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions, status, feedback)
self.throw_exactly_matched_exception(self.exceptions, message, feedback)
raise ExchangeError(feedback)
| 39.106061 | 292 | 0.458588 |
4a20dba65542f6d6e552a94371287171f2fc6491 | 745 | py | Python | valleydeight/object_types.py | benkrikler/valedictory | 36968f4da5bb5a6c4e85bd7f02fdc32db08e9579 | [
"MIT"
] | 1 | 2019-02-22T17:12:48.000Z | 2019-02-22T17:12:48.000Z | valleydeight/object_types.py | benkrikler/valedictory | 36968f4da5bb5a6c4e85bd7f02fdc32db08e9579 | [
"MIT"
] | 1 | 2019-02-22T16:40:36.000Z | 2019-02-22T16:40:36.000Z | valleydeight/object_types.py | benkrikler/valleydeight | 36968f4da5bb5a6c4e85bd7f02fdc32db08e9579 | [
"MIT"
] | null | null | null | from .base import BaseValidator
__all__ = ["Object"]
class Object(BaseValidator):
def __init__(self, class_type, expand_dicts=True, expand_lists=True, args=None, **kwargs):
super(BaseValidator, self).__init__(**kwargs)
self.class_type = class_type
self.expand_dicts = expand_dicts
self.expand_lists = expand_lists
self.args = args
def __call__(self, instance):
if self.args:
instance = self.args(instance)
if self.expand_dicts and isinstance(instance, dict):
return self.class_type(**instance)
if self.expand_lists and isinstance(instance, (tuple, list)):
return self.class_type(*instance)
return self.class_type(instance)
| 31.041667 | 94 | 0.66443 |
4a20dbf8b3e9200f09a683b836d62147c6b1a800 | 430 | py | Python | industrial_benchmark_python/test_gym_wrapper.py | siemens/industrialbenchmark | a38da654c87c16436e2382f68da3bf264e5523d0 | [
"Apache-2.0"
] | 74 | 2017-06-27T11:56:59.000Z | 2022-03-24T04:18:22.000Z | industrial_benchmark_python/test_gym_wrapper.py | siemens/industrialbenchmark | a38da654c87c16436e2382f68da3bf264e5523d0 | [
"Apache-2.0"
] | 6 | 2017-11-02T14:53:28.000Z | 2021-06-18T13:58:46.000Z | industrial_benchmark_python/test_gym_wrapper.py | siemens/industrialbenchmark | a38da654c87c16436e2382f68da3bf264e5523d0 | [
"Apache-2.0"
] | 23 | 2017-02-19T00:06:27.000Z | 2022-02-25T12:39:25.000Z | from industrial_benchmark_python.IBGym import IBGym
import numpy as np
DISCOUNT = 0.97
env = IBGym(70)
env.reset()
returns = []
for _ in range(100):
acc_return = 0.
for i in range(100):
state, reward, done, info = env.step(env.action_space.sample())
acc_return += reward * DISCOUNT**i
returns.append(acc_return / 100.)
print("random actions achieved return", np.mean(returns), "+-", np.std(returns)) | 26.875 | 80 | 0.67907 |
4a20dc19f34d16c2ebafb74b85bd1694815f05ca | 47,884 | py | Python | src/cogent3/core/moltype.py | StephenRogers1/cogent3 | 1116a0ab14d9c29a560297205546714e2db1896c | [
"BSD-3-Clause"
] | null | null | null | src/cogent3/core/moltype.py | StephenRogers1/cogent3 | 1116a0ab14d9c29a560297205546714e2db1896c | [
"BSD-3-Clause"
] | null | null | null | src/cogent3/core/moltype.py | StephenRogers1/cogent3 | 1116a0ab14d9c29a560297205546714e2db1896c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
moltype.py
MolType provides services for resolving ambiguities, or providing the
correct ambiguity for recoding. It also maintains the mappings between
different kinds of alphabets, sequences and alignments.
One issue with MolTypes is that they need to know about Sequence, Alphabet,
and other objects, but, at the same time, those objects need to know about
the MolType. It is thus essential that the connection between these other
types and the MolType can be made after the objects are created.
"""
__author__ = "Peter Maxwell, Gavin Huttley and Rob Knight"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Peter Maxwell", "Gavin Huttley", "Rob Knight", "Daniel McDonald"]
__license__ = "BSD-3"
__version__ = "2020.12.21a"
__maintainer__ = "Gavin Huttley"
__email__ = "[email protected]"
__status__ = "Production"
import json
import re
from collections import defaultdict
from random import choice
from string import ascii_letters as letters
import numpy
from numpy import (
arange,
array,
asarray,
newaxis,
ravel,
remainder,
take,
transpose,
uint8,
uint16,
uint32,
zeros,
)
from cogent3.core.alignment import (
Alignment,
ArrayAlignment,
SequenceCollection,
)
from cogent3.core.alphabet import (
Alphabet,
AlphabetError,
CharAlphabet,
Enumeration,
_make_complement_array,
)
from cogent3.core.genetic_code import get_code
from cogent3.core.sequence import (
ABSequence,
ArrayDnaCodonSequence,
ArrayDnaSequence,
ArrayProteinSequence,
ArrayProteinWithStopSequence,
ArrayRnaCodonSequence,
ArrayRnaSequence,
ArraySequence,
ByteSequence,
DnaSequence,
NucleicAcidSequence,
ProteinSequence,
ProteinWithStopSequence,
RnaSequence,
)
from cogent3.core.sequence import Sequence as DefaultSequence
from cogent3.data.molecular_weight import DnaMW, ProteinMW, RnaMW
from cogent3.util.misc import (
FunctionWrapper,
add_lowercase,
get_object_provenance,
iterable,
)
from cogent3.util.transform import KeepChars, first_index_in_set
Float = numpy.core.numerictypes.sctype2char(float)
Int = numpy.core.numerictypes.sctype2char(int)
maketrans = str.maketrans
translate = str.translate
IUPAC_gap = "-"
IUPAC_missing = "?"
IUPAC_DNA_chars = ["T", "C", "A", "G"]
IUPAC_DNA_ambiguities = {
"N": ("A", "C", "T", "G"),
"R": ("A", "G"),
"Y": ("C", "T"),
"W": ("A", "T"),
"S": ("C", "G"),
"K": ("T", "G"),
"M": ("C", "A"),
"B": ("C", "T", "G"),
"D": ("A", "T", "G"),
"H": ("A", "C", "T"),
"V": ("A", "C", "G"),
}
IUPAC_DNA_ambiguities_complements = {
"A": "T",
"C": "G",
"G": "C",
"T": "A",
"-": "-",
"M": "K",
"K": "M",
"N": "N",
"R": "Y",
"Y": "R",
"W": "W",
"S": "S",
"X": "X", # not technically an IUPAC ambiguity, but used by repeatmasker
"V": "B",
"B": "V",
"H": "D",
"D": "H",
}
IUPAC_DNA_complements = {"A": "T", "C": "G", "G": "C", "T": "A", "-": "-"}
IUPAC_DNA_complements = {"A": "T", "C": "G", "G": "C", "T": "A", "-": "-"}
# note change in standard order from DNA
IUPAC_RNA_chars = ["U", "C", "A", "G"]
IUPAC_RNA_ambiguities = {
"N": ("A", "C", "U", "G"),
"R": ("A", "G"),
"Y": ("C", "U"),
"W": ("A", "U"),
"S": ("C", "G"),
"K": ("U", "G"),
"M": ("C", "A"),
"B": ("C", "U", "G"),
"D": ("A", "U", "G"),
"H": ("A", "C", "U"),
"V": ("A", "C", "G"),
}
IUPAC_RNA_ambiguities_complements = {
"A": "U",
"C": "G",
"G": "C",
"U": "A",
"-": "-",
"M": "K",
"K": "M",
"N": "N",
"R": "Y",
"Y": "R",
"W": "W",
"S": "S",
"X": "X", # not technically an IUPAC ambiguity, but used by repeatmasker
"V": "B",
"B": "V",
"H": "D",
"D": "H",
}
IUPAC_RNA_complements = {"A": "U", "C": "G", "G": "C", "U": "A", "-": "-"}
# Standard RNA pairing: GU pairs count as 'weak' pairs
RnaStandardPairs = {
("A", "U"): True, # True vs False for 'always' vs 'sometimes' pairing
("C", "G"): True,
("G", "C"): True,
("U", "A"): True,
("G", "U"): False,
("U", "G"): False,
}
# Watson-Crick RNA pairing only: GU pairs don't count as pairs
RnaWCPairs = {("A", "U"): True, ("C", "G"): True, ("G", "C"): True, ("U", "A"): True}
# RNA pairing with GU counted as standard pairs
RnaGUPairs = {
("A", "U"): True,
("C", "G"): True,
("G", "C"): True,
("U", "A"): True,
("G", "U"): True,
("U", "G"): True,
}
# RNA pairing with GU, AA, GA, CA and UU mismatches allowed as weak pairs
RnaExtendedPairs = {
("A", "U"): True,
("C", "G"): True,
("G", "C"): True,
("U", "A"): True,
("G", "U"): False,
("U", "G"): False,
("A", "A"): False,
("G", "A"): False,
("A", "G"): False,
("C", "A"): False,
("A", "C"): False,
("U", "U"): False,
}
# Standard DNA pairing: only Watson-Crick pairs count as pairs
DnaStandardPairs = {
("A", "T"): True,
("C", "G"): True,
("G", "C"): True,
("T", "A"): True,
}
# protein letters & ambiguity codes
IUPAC_PROTEIN_code_aa = {
"A": "Alanine",
"C": "Cysteine",
"D": "Aspartic Acid",
"E": "Glutamic Acid",
"F": "Phenylalanine",
"G": "Glycine",
"H": "Histidine",
"I": "Isoleucine",
"K": "Lysine",
"L": "Leucine",
"M": "Methionine",
"N": "Asparagine",
"P": "Proline",
"Q": "Glutamine",
"R": "Arginine",
"S": "Serine",
"T": "Threonine",
"V": "Valine",
"W": "Tryptophan",
"Y": "Tyrosine",
"*": "STOP",
}
IUPAC_PROTEIN_chars = [
"A",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"K",
"L",
"M",
"N",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"Y",
]
PROTEIN_WITH_STOP_chars = [
"A",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"K",
"L",
"M",
"N",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"Y",
"*",
]
IUPAC_PROTEIN_ambiguities = {"B": ["N", "D"], "X": IUPAC_PROTEIN_chars, "Z": ["Q", "E"]}
PROTEIN_WITH_STOP_ambiguities = {
"B": ["N", "D"],
"X": PROTEIN_WITH_STOP_chars,
"Z": ["Q", "E"],
}
class FoundMatch(Exception):
"""Raised when a match is found in a deep loop to skip many levels"""
pass
def make_matches(monomers=None, gaps=None, degenerates=None):
"""Makes a dict of symbol pairs (i,j) -> strictness.
Strictness is True if i and j always match and False if they sometimes
match (e.g. A always matches A, but W sometimes matches R).
"""
result = {}
# allow defaults to be left blank without problems
monomers = monomers or {}
gaps = gaps or {}
degenerates = degenerates or {}
# all monomers always match themselves and no other monomers
for i in monomers:
result[(i, i)] = True
# all gaps always match all other gaps
for i in gaps:
for j in gaps:
result[(i, j)] = True
# monomers sometimes match degenerates that contain them
for i in monomers:
for j in degenerates:
if i in degenerates[j]:
result[(i, j)] = False
result[(j, i)] = False
# degenerates sometimes match degenerates that contain at least one of
# the same monomers
for i in degenerates:
for j in degenerates:
try:
for i_symbol in degenerates[i]:
if i_symbol in degenerates[j]:
result[(i, j)] = False
raise FoundMatch
except FoundMatch:
pass # flow control: break out of doubly nested loop
return result
def make_pairs(pairs=None, monomers=None, gaps=None, degenerates=None):
"""Makes a dict of symbol pairs (i,j) -> strictness.
Expands pairs into all possible pairs using degen symbols.
Strictness is True if i and j always pair, and False if they 'weakly' pair
(e.g. GU pairs or if it is possible that they pair).
If you want to make GU pairs count as 'always matching', pass in pairs
that have (G,U) and (U, G) mapped to True rather than False.
"""
result = {}
# allow defaults to be left blank without problems
pairs = pairs or {}
monomers = monomers or {}
gaps = gaps or {}
degenerates = degenerates or {}
# add in the original pairs: should be complete monomer pairs
result.update(pairs)
# all gaps 'weakly' pair with each other
for i in gaps:
for j in gaps:
result[(i, j)] = False
# monomers sometimes pair with degenerates if the monomer's complement
# is in the degenerate symbol
for i in monomers:
for j in degenerates:
found = False
try:
for curr_j in degenerates[j]:
# check if (i,curr_j) and/or (curr_j,i) is a valid pair:
# not mutually required if pairs are not all commutative!
if (i, curr_j) in pairs:
result[(i, j)] = False
found = True
if (curr_j, i) in pairs:
result[(j, i)] = False
found = True
if found:
raise FoundMatch
except FoundMatch:
pass # flow control: break out of nested loop
# degenerates sometimes pair with each other if the first degenerate
# contains the complement of one of the bases in the second degenerate
for i in degenerates:
for j in degenerates:
try:
for curr_i in degenerates[i]:
for curr_j in degenerates[j]:
if (curr_i, curr_j) in pairs:
result[(i, j)] = False
raise FoundMatch
except FoundMatch:
pass # just using for flow control
# don't forget the return value!
return result
# RnaPairingRules is a dict of {name:(base_pairs,degen_pairs)} where base_pairs
# is a dict with the non-degenerate pairing rules and degen_pairs is a dict with
# both the degenerate and non-degenerate pairing rules.
# NOTE: uses make_pairs to augment the initial dict after construction.
RnaPairingRules = {
"Standard": RnaStandardPairs,
"WC": RnaWCPairs,
"GU": RnaGUPairs,
"Extended": RnaExtendedPairs,
}
for k, v in list(RnaPairingRules.items()):
RnaPairingRules[k] = (v, make_pairs(v))
class CoreObjectGroup(object):
"""Container relating gapped, ungapped, degen, and non-degen objects."""
_types = ["base", "degen", "gap", "degen_gap"]
def __init__(self, base, degen=None, gapped=None, degen_gapped=None):
"""Returns new CoreObjectGroup. Only base is required"""
self.base = base
self.degen = degen
self.gapped = gapped
self.degen_gapped = degen_gapped
self._items = [base, degen, gapped, degen_gapped]
self._set_relationships()
def _set_relationships(self):
"""Sets relationships between the different "flavors"."""
self.base.gapped = self.gapped
self.base.ungapped = self.base
self.base.degen = self.degen
self.base.non_degen = self.base
statements = [
"self.degen.gapped = self.degen_gapped",
"self.degen.ungapped = self.degen",
"self.degen.degen = self.degen",
"self.degen.non_degen = self.base",
"self.gapped.gapped = self.gapped",
"self.gapped.ungapped = self.base",
"self.gapped.degen = self.degen_gapped",
"self.gapped.non_degen = self.gapped",
"self.degen_gapped.gapped = self.degen_gapped",
"self.degen_gapped.ungapped = self.degen",
"self.degen_gapped.degen = self.degen_gapped",
"self.degen_gapped.non_degen = self.gapped",
]
for s in statements:
try:
exec(s)
except AttributeError:
pass
def __getitem__(self, i):
"""Allows container to be indexed into, by type of object (e.g. gap)."""
return self.__dict__[i]
def which_type(self, a):
"""Returns the type of an alphabet in self, or None if not present."""
return self._types[self._items.find(a)]
class AlphabetGroup(CoreObjectGroup):
"""Container relating gapped, ungapped, degen, and non-degen alphabets."""
def __init__(
self,
chars,
degens,
gap=IUPAC_gap,
missing=IUPAC_missing,
moltype=None,
constructor=None,
):
"""Returns new AlphabetGroup."""
if constructor is None:
if max(list(map(len, chars))) == 1:
constructor = CharAlphabet
chars = "".join(chars)
degens = "".join(degens)
else:
constructor = Alphabet # assume multi-char
self.base = constructor(chars, moltype=moltype)
self.degen = constructor(chars + degens, moltype=moltype)
self.gapped = constructor(chars + gap, gap, moltype=moltype)
self.degen_gapped = constructor(
chars + gap + degens + missing, gap, moltype=moltype
)
self._items = [self.base, self.degen, self.gapped, self.degen_gapped]
self._set_relationships()
# set complements if MolType was specified
if moltype is not None:
comps = moltype.complements
for i in self._items:
i._complement_array = _make_complement_array(i, comps)
# colours for HTML representation
def _expand_colors(base, colors):
base = base.copy()
base.update({ch: clr for chars, clr in colors.items() for ch in chars})
return base
class _DefaultValue:
def __init__(self, value):
self.value = value
def __call__(self):
return self.value
_gray = _DefaultValue("gray")
_base_colors = defaultdict(_gray)
NT_COLORS = _expand_colors(
_base_colors, {"A": "#FF0102", "C": "black", "G": "green", "T": "blue", "U": "blue"}
)
AA_COLORS = _expand_colors(
_base_colors,
{
"GAVLI": "#009999",
"FYW": "#ff6600",
"CM": "orange",
"ST": "#009900",
"KRH": "#FF0102",
"DE": "blue",
"NQ": "#993300",
"P": "#cc0099",
},
)
class MolType(object):
"""MolType: Handles operations that depend on the sequence type (e.g. DNA).
The MolType knows how to connect alphabets, sequences, alignments, and so
forth, and how to disambiguate ambiguous symbols and perform base
pairing (where appropriate).
WARNING: Objects passed to a MolType become associated with that MolType,
i.e. if you pass ProteinSequence to a new MolType you make up, all
ProteinSequences will now be associated with the new MolType. This may
not be what you expect. Use preserve_existing_moltypes=True if you
don't want to reset the moltype.
"""
def __init__(
self,
motifset,
gap=IUPAC_gap,
missing=IUPAC_missing,
gaps=None,
seq_constructor=None,
ambiguities=None,
label=None,
complements=None,
pairs=None,
mw_calculator=None,
add_lower=False,
preserve_existing_moltypes=False,
make_alphabet_group=False,
array_seq_constructor=None,
colors=None,
):
"""Returns a new MolType object. Note that the parameters are in flux.
Parameters
----------
motifset
Alphabet or sequence of items in the default
alphabet. Does not include degenerates.
gap
default gap symbol
missing
symbol for missing data
gaps
any other symbols that should be treated as gaps (doesn't have
to include gap or missing; they will be silently added)
seq_constructor
Class for constructing sequences.
ambiguities
dict of char:tuple, doesn't include gaps (these are
hard-coded as - and ?, and added later.
label
text label, don't know what this is used for. Unnecessary?
complements
dict of symbol:symbol showing how the non-degenerate
single characters complement each other. Used for constructing
on the fly the complement table, incl. support for must_pair and
can_pair.
pairs
dict in which keys are pairs of symbols that can pair
with each other, values are True (must pair) or False (might
pair). Currently, the meaning of GU pairs as 'weak' is conflated
with the meaning of degenerate symbol pairs (which might pair
with each other but don't necessarily, depending on how the
symbol is resolved). This should be refactored.
mw_calculator
f(seq) -> molecular weight.
add_lower
if True (default: False) adds the lowercase versions of
everything into the alphabet. Slated for deletion.
preserve_existing_moltypes
if True (default: False), does not
set the MolType of the things added in **kwargs to self.
make_alphabet_group
if True, makes an AlphabetGroup relating
the various alphabets to one another.
array_seq_constructor
sequence type for array sequence
colors
dict mapping moltype characters to colors for display
Note on "degenerates" versus "ambiguities": self.degenerates contains
_only_ mappings for degenerate symbols, whereas self.ambiguities
contains mappings for both degenerate and non-degenerate symbols.
Sometimes you want one, sometimes the other, so both are provided.
"""
self._serialisable = {k: v for k, v in locals().items() if k != "self"}
self.gap = gap
self.missing = missing
self.gaps = frozenset([gap, missing])
if gaps:
self.gaps = self.gaps.union(frozenset(gaps))
self.label = label
# set the sequence constructor
if seq_constructor is None:
seq_constructor = "".join # safe default string constructor
elif not preserve_existing_moltypes:
seq_constructor.moltype = self
self._make_seq = seq_constructor
# set the ambiguities
ambigs = {self.missing: tuple(motifset) + (self.gap,), self.gap: (self.gap,)}
if ambiguities:
ambigs.update(ambiguities)
for c in motifset:
ambigs[c] = (c,)
self.ambiguities = ambigs
# set complements -- must set before we make the alphabet group
self.complements = complements or {}
if make_alphabet_group: # note: must use _original_ ambiguities here
self.alphabets = AlphabetGroup(motifset, ambiguities, moltype=self)
self.alphabet = self.alphabets.base
else:
if isinstance(motifset, Enumeration):
self.alphabet = motifset
elif max(len(motif) for motif in motifset) == 1:
self.alphabet = CharAlphabet(motifset, moltype=self)
else:
self.alphabet = Alphabet(motifset, moltype=self)
# set the other properties
self.degenerates = ambiguities and ambiguities.copy() or {}
self.degenerates[self.missing] = "".join(motifset) + self.gap
self.matches = make_matches(motifset, self.gaps, self.degenerates)
self.pairs = pairs and pairs.copy() or {}
self.pairs.update(make_pairs(pairs, motifset, self.gaps, self.degenerates))
self.mw_calculator = mw_calculator
# add lowercase characters, if we're doing that
if add_lower:
self._add_lowercase()
# cache various other data that make the calculations faster
self._make_all()
self._make_comp_table()
# a gap can be a true gap char or a degenerate character, typically '?'
# we therefore want to ensure consistent treatment across the definition
# of characters as either gap or degenerate
self.gap_string = "".join(self.gaps)
strict_gap = "".join(set(self.gap_string) - set(self.degenerates))
self.strip_degenerate = FunctionWrapper(
KeepChars(strict_gap + "".join(self.alphabet))
)
self.strip_bad = FunctionWrapper(KeepChars("".join(self.All)))
to_keep = set(self.alphabet) ^ set(self.degenerates) - set(self.gaps)
self.strip_bad_and_gaps = FunctionWrapper(KeepChars("".join(to_keep)))
# make inverse degenerates from degenerates
# ensure that lowercase versions also exist if appropriate
inv_degens = {}
for key, val in list(self.degenerates.items()):
inv_degens[frozenset(val)] = key.upper()
if add_lower:
inv_degens[frozenset("".join(val).lower())] = key.lower()
for m in self.alphabet:
inv_degens[frozenset(m)] = m
if add_lower:
inv_degens[frozenset("".join(m).lower())] = m.lower()
for m in self.gaps:
inv_degens[frozenset(m)] = m
self.inverse_degenerates = inv_degens
# set array type for modeling alphabets
try:
self.array_type = self.alphabet.array_type
except AttributeError:
self.array_type = None
# set modeling sequence
self._make_array_seq = array_seq_constructor
self._colors = colors or defaultdict(_DefaultValue("black"))
def __repr__(self):
"""String representation of MolType.
WARNING: This doesn't allow you to reconstruct the object in its present
incarnation.
"""
return "MolType(%s)" % (self.alphabet,)
def __getnewargs_ex__(self, *args, **kw):
data = self.to_rich_dict(for_pickle=True)
return (), data
def to_rich_dict(self, for_pickle=False):
data = self._serialisable.copy()
if not for_pickle: # we rely on reconstruction from label
data = dict(type=get_object_provenance(self), moltype=self.label)
data["version"] = __version__
return data
def to_json(self):
"""returns result of json formatted string"""
data = self.to_rich_dict(for_pickle=False)
return json.dumps(data)
def to_regex(self, seq):
"""returns a regex pattern with ambiguities expanded to a character set"""
if not self.is_valid(seq):
raise ValueError(f"'{seq}' is invalid for this moltype")
degen_indices = self.get_degenerate_positions(sequence=seq, include_gap=False)
seq = list(seq) # seq can now be modified
for index in degen_indices:
expanded = self.ambiguities[seq[index]]
seq[index] = f"[{''.join(expanded)}]"
return "".join(seq)
def gettype(self):
"""Returns type, e.g. 'dna', 'rna', 'protein'. Delete?"""
return self.label
def make_seq(self, seq, name=None, **kwargs):
"""Returns sequence of correct type."""
return self._make_seq(seq, name, **kwargs)
def make_array_seq(self, seq, name=None, **kwargs):
"""
creates an array sequence
Parameters
----------
seq
characters or array
name : str
kwargs
keyword arguments for the ArraySequence constructor.
Returns
-------
ArraySequence
"""
alphabet = kwargs.pop("alphabet", None)
if alphabet is None and hasattr(self, "alphabets"):
alphabet = self.alphabets.degen_gapped
elif alphabet is None:
alphabet = self.alphabet
return self._make_array_seq(seq, alphabet=alphabet, name=name, **kwargs)
def verify_sequence(self, seq, gaps_allowed=True, wildcards_allowed=True):
"""Checks whether sequence is valid on the default alphabet.
Has special-case handling for gaps and wild-cards. This mechanism is
probably useful to have in parallel with the validation routines that
check specifically whether the sequence has gaps, degenerate symbols,
etc., or that explicitly take an alphabet as input.
"""
alpha = frozenset(self.ambiguities)
if gaps_allowed:
alpha = alpha.union(self.gaps)
if wildcards_allowed:
alpha = alpha.union(self.missing)
try:
nonalpha = re.compile("[^%s]" % re.escape("".join(alpha)))
badchar = nonalpha.search(seq)
if badchar:
motif = badchar.group()
raise AlphabetError(motif)
except TypeError: # not alphabetic sequence: try slow method
for motif in seq:
if motif not in alpha:
raise AlphabetError(motif)
def is_ambiguity(self, querymotif):
"""Return True if querymotif is an amibiguity character in alphabet.
Parameters
----------
querymotif
the motif being queried.
"""
return len(self.ambiguities[querymotif]) > 1
def _what_ambiguity(self, motifs):
"""The code that represents all of 'motifs', and minimal others.
Does this duplicate DegenerateFromSequence directly?
"""
most_specific = len(self.alphabet) + 1
result = self.missing
for (code, motifs2) in list(self.ambiguities.items()):
for c in motifs:
if c not in motifs2:
break
else:
if len(motifs2) < most_specific:
most_specific = len(motifs2)
result = code
return result
def what_ambiguity(self, motifs):
"""The code that represents all of 'motifs', and minimal others.
Does this duplicate DegenerateFromSequence directly?
"""
if not hasattr(self, "_reverse_ambiguities"):
self._reverse_ambiguities = {}
motifs = frozenset(motifs)
if motifs not in self._reverse_ambiguities:
self._reverse_ambiguities[motifs] = self._what_ambiguity(motifs)
return self._reverse_ambiguities[motifs]
def _add_lowercase(self):
"""Adds lowercase versions of keys and vals to each internal dict."""
for name in [
"alphabet",
"degenerates",
"gaps",
"complements",
"pairs",
"matches",
]:
curr = getattr(self, name)
# temp hack to get around re-ordering
if isinstance(curr, Alphabet):
curr = tuple(curr)
new = add_lowercase(curr)
setattr(self, name, new)
def _make_all(self):
"""Sets self.All, which contains all the symbols self knows about.
Note that the value of items in self.All will be the string containing
the possibly degenerate set of symbols that the items expand to.
"""
all = {}
for i in self.alphabet:
curr = str(i)
all[i] = i
for key, val in list(self.degenerates.items()):
all[key] = val
for i in self.gaps:
all[i] = i
self.All = all
def _make_comp_table(self):
"""Sets self.ComplementTable, which maps items onto their complements.
Note: self.ComplementTable is only set if self.complements exists.
"""
if self.complements:
self.ComplementTable = maketrans(
"".join(list(self.complements.keys())),
"".join(list(self.complements.values())),
)
def complement(self, item):
"""Returns complement of item, using data from self.complements.
Always tries to return same type as item: if item looks like a dict,
will return list of keys.
"""
if not self.complements:
raise TypeError(
"Tried to complement sequence using alphabet without complements."
)
try:
return item.translate(self.ComplementTable)
except (AttributeError, TypeError):
item = iterable(item)
get = self.complements.get
return item.__class__([get(i, i) for i in item])
def rc(self, item):
"""Returns reverse complement of item w/ data from self.complements.
Always returns same type as input.
"""
comp = list(self.complement(item))
comp.reverse()
if isinstance(item, str):
return item.__class__("".join(comp))
else:
return item.__class__(comp)
def strand_symmetric_motifs(self, motif_length=1):
"""returns ordered pairs of strand complementary motifs"""
if not self.pairs:
raise TypeError("moltype must be DNA or RNA")
motif_set = self.alphabet.get_word_alphabet(word_length=motif_length)
motif_pairs = []
for m in motif_set:
pair = tuple(sorted([m, self.complement(m)]))
motif_pairs.append(pair)
motif_pairs = set(motif_pairs)
return motif_pairs
def __contains__(self, item):
"""A MolType contains every character it knows about."""
return item in self.All
def __iter__(self):
"""A MolType iterates only over the characters in its Alphabet.."""
return iter(self.alphabet)
def is_gap(self, char):
"""Returns True if char is a gap."""
return char in self.gaps
def is_gapped(self, sequence):
"""Returns True if sequence contains gaps."""
return self.first_gap(sequence) is not None
def is_degenerate(self, sequence):
"""Returns True if sequence contains degenerate characters."""
return self.first_degenerate(sequence) is not None
def is_valid(self, sequence):
"""Returns True if sequence contains no items that are not in self."""
try:
return self.first_invalid(sequence) is None
except:
return False
def is_strict(self, sequence):
"""Returns True if sequence contains only items in self.alphabet."""
try:
return (len(sequence) == 0) or (self.first_non_strict(sequence) is None)
except:
return False
def valid_on_alphabet(self, sequence, alphabet=None):
"""Returns True if sequence contains only items in alphabet.
alphabet can actually be anything that implements __contains__.
Defaults to self.alphabet if not supplied.
"""
if alphabet is None:
alphabet = self.alphabet
return first_index_in_set(sequence, alphabet) is not None
def first_not_in_alphabet(self, sequence, alphabet=None):
"""Returns index of first item not in alphabet, or None.
Defaults to self.alphabet if alphabet not supplied.
"""
if alphabet is None:
alphabet = self.alphabet
return first_index_in_set(sequence, alphabet)
def first_gap(self, sequence):
"""Returns the index of the first gap in the sequence, or None."""
gap = self.gaps
for i, s in enumerate(sequence):
if s in gap:
return i
return None
def first_degenerate(self, sequence):
"""Returns the index of first degenerate symbol in sequence, or None."""
degen = self.degenerates
for i, s in enumerate(sequence):
if s in degen:
return i
return None
def first_invalid(self, sequence):
"""Returns the index of first invalid symbol in sequence, or None."""
all = self.All
for i, s in enumerate(sequence):
if s not in all:
return i
return None
def first_non_strict(self, sequence):
"""Returns the index of first non-strict symbol in sequence, or None."""
monomers = self.alphabet
for i, s in enumerate(sequence):
if s not in monomers:
return i
return None
def disambiguate(self, sequence, method="strip"):
"""Returns a non-degenerate sequence from a degenerate one.
method can be 'strip' (deletes any characters not in monomers or gaps)
or 'random'(assigns the possibilities at random, using equal
frequencies).
"""
if method == "strip":
try:
return sequence.__class__(self.strip_degenerate(sequence))
except:
ambi = self.degenerates
def not_ambiguous(x):
return x not in ambi
return sequence.__class__(list(filter(not_ambiguous, sequence)))
elif method == "random":
degen = self.degenerates
result = []
for i in sequence:
if i in degen:
result.append(choice(degen[i]))
else:
result.append(i)
if isinstance(sequence, str):
return sequence.__class__("".join(result))
else:
return sequence.__class__(result)
else:
raise NotImplementedError("Got unknown method %s" % method)
def degap(self, sequence):
"""Deletes all gap characters from sequence."""
try:
trans = dict([(i, None) for i in map(ord, self.gaps)])
return sequence.__class__(sequence.translate(trans))
except AttributeError:
gap = self.gaps
def not_gap(x):
return x not in gap
return sequence.__class__(list(filter(not_gap, sequence)))
def gap_indices(self, sequence):
"""Returns list of indices of all gaps in the sequence, or []."""
gaps = self.gaps
return [i for i, s in enumerate(sequence) if s in gaps]
def gap_vector(self, sequence):
"""Returns list of bool indicating gap or non-gap in sequence."""
return list(map(self.is_gap, sequence))
def gap_maps(self, sequence):
"""Returns tuple containing dicts mapping between gapped and ungapped.
First element is a dict such that d[ungapped_coord] = gapped_coord.
Second element is a dict such that d[gapped_coord] = ungapped_coord.
Note that the dicts will be invalid if the sequence changes after the
dicts are made.
The gaps themselves are not in the dictionary, so use d.get() or test
'if pos in d' to avoid KeyErrors if looking up all elements in a gapped
sequence.
"""
ungapped = {}
gapped = {}
num_gaps = 0
for i, is_gap in enumerate(self.gap_vector(sequence)):
if is_gap:
num_gaps += 1
else:
ungapped[i] = i - num_gaps
gapped[i - num_gaps] = i
return gapped, ungapped
def count_gaps(self, sequence):
"""Counts the gaps in the specified sequence."""
gaps = self.gaps
gap_count = sum(1 for s in sequence if s in gaps)
return gap_count
def get_degenerate_positions(self, sequence, include_gap=True):
"""returns indices matching degenerate characters"""
degen = list(self.degenerates)
if include_gap:
degen.append(self.gap)
return [i for i, c in enumerate(sequence) if c in degen]
def count_degenerate(self, sequence):
"""Counts the degenerate bases in the specified sequence."""
degen = self.degenerates
degen_count = 0
for s in sequence:
if s in degen:
degen_count += 1
return degen_count
def possibilities(self, sequence):
"""Counts number of possible sequences matching the sequence.
Uses self.degenerates to decide how many possibilites there are at
each position in the sequence.
"""
degen = self.degenerates
count = 1
for s in sequence:
if s in degen:
count *= len(degen[s])
return count
def mw(self, sequence, method="random", delta=None):
"""Returns the molecular weight of the sequence.
If the sequence is ambiguous, uses method (random or strip) to
disambiguate the sequence.
if delta is present, uses it instead of the standard weight adjustment.
"""
if not sequence:
return 0
try:
return self.mw_calculator(sequence, delta)
except KeyError: # assume sequence was ambiguous
return self.mw_calculator(self.disambiguate(sequence, method), delta)
def can_match(self, first, second):
"""Returns True if every pos in 1st could match same pos in 2nd.
Truncates at length of shorter sequence.
gaps are only allowed to match other gaps.
"""
m = self.matches
for pair in zip(first, second):
if pair not in m:
return False
return True
def can_mismatch(self, first, second):
"""Returns True if any position in 1st could cause a mismatch with 2nd.
Truncates at length of shorter sequence.
gaps are always counted as matches.
"""
m = self.matches
if not first or not second:
return False
for pair in zip(first, second):
if not m.get(pair, None):
return True
return False
def must_match(self, first, second):
"""Returns True if all positions in 1st must match positions in second."""
return not self.can_mismatch(first, second)
def can_pair(self, first, second):
"""Returns True if first and second could pair.
Pairing occurs in reverse order, i.e. last position of second with
first position of first, etc.
Truncates at length of shorter sequence.
gaps are only allowed to pair with other gaps, and are counted as 'weak'
(same category as GU and degenerate pairs).
NOTE: second must be able to be reverse
"""
p = self.pairs
sec = list(second)
sec.reverse()
for pair in zip(first, sec):
if pair not in p:
return False
return True
def can_mispair(self, first, second):
"""Returns True if any position in 1st could mispair with 2nd.
Pairing occurs in reverse order, i.e. last position of second with
first position of first, etc.
Truncates at length of shorter sequence.
gaps are always counted as possible mispairs, as are weak pairs like GU.
"""
p = self.pairs
if not first or not second:
return False
sec = list(second)
sec.reverse()
for pair in zip(first, sec):
if not p.get(pair, None):
return True
return False
def must_pair(self, first, second):
"""Returns True if all positions in 1st must pair with second.
Pairing occurs in reverse order, i.e. last position of second with
first position of first, etc.
"""
return not self.can_mispair(first, second)
def degenerate_from_seq(self, sequence):
"""Returns least degenerate symbol corresponding to chars in sequence.
First tries to look up in self.inverse_degenerates. Then disambiguates
and tries to look up in self.inverse_degenerates. Then tries converting
the case (tries uppercase before lowercase). Raises TypeError if
conversion fails.
"""
symbols = frozenset(sequence)
# check if symbols are already known
inv_degens = self.inverse_degenerates
result = inv_degens.get(symbols, None)
if result:
return result
# then, try converting the symbols
degens = self.All
converted = set()
for sym in symbols:
for char in degens[sym]:
converted.add(char)
symbols = frozenset(converted)
result = inv_degens.get(symbols, None)
if result:
return result
# then, try converting case
symbols = frozenset([s.upper() for s in symbols])
result = inv_degens.get(symbols, None)
if result:
return result
symbols = frozenset([s.lower() for s in symbols])
result = inv_degens.get(symbols, None)
if result:
return result
# finally, try to find the minimal subset containing the symbols
symbols = frozenset([s.upper() for s in symbols])
lengths = {}
for i in inv_degens:
if symbols.issubset(i):
lengths[len(i)] = i
if lengths: # found at least some matches
sorted = list(lengths.keys())
sorted.sort()
return inv_degens[lengths[sorted[0]]]
# if we got here, nothing worked
raise TypeError("Cannot find degenerate char for symbols: %s" % symbols)
def get_css_style(self, colors=None, font_size=12, font_family="Lucida Console"):
"""returns string of CSS classes and {character: <CSS class name>, ...}
Parameters
----------
colors
{char
font_size
in points
font_family
name of a monospace font
"""
colors = colors or self._colors
# !important required to stop some browsers over-riding the style sheet ...!!
template = (
'.%s_%s{font-family: "%s",monospace !important; '
"font-size: %dpt !important; color: %s; }"
)
label = self.label or ""
styles = _style_defaults[label].copy()
styles.update(
{c: "_".join([c, label]) for c in list(self.alphabet) + ["terminal_ambig"]}
)
css = [
template % (char, label, font_family, font_size, colors[char])
for char in list(styles) + ["ambig"]
]
return css, styles
ASCII = MolType(
# A default type for text read from a file etc. when we don't
# want to prematurely assume DNA or Protein.
seq_constructor=DefaultSequence,
motifset=letters,
ambiguities={},
label="text",
array_seq_constructor=ArraySequence,
)
DNA = MolType(
seq_constructor=DnaSequence,
motifset=IUPAC_DNA_chars,
ambiguities=IUPAC_DNA_ambiguities,
label="dna",
mw_calculator=DnaMW,
complements=IUPAC_DNA_ambiguities_complements,
pairs=DnaStandardPairs,
make_alphabet_group=True,
array_seq_constructor=ArrayDnaSequence,
colors=NT_COLORS,
)
RNA = MolType(
seq_constructor=RnaSequence,
motifset=IUPAC_RNA_chars,
ambiguities=IUPAC_RNA_ambiguities,
label="rna",
mw_calculator=RnaMW,
complements=IUPAC_RNA_ambiguities_complements,
pairs=RnaStandardPairs,
make_alphabet_group=True,
array_seq_constructor=ArrayRnaSequence,
colors=NT_COLORS,
)
PROTEIN = MolType(
seq_constructor=ProteinSequence,
motifset=IUPAC_PROTEIN_chars,
ambiguities=IUPAC_PROTEIN_ambiguities,
mw_calculator=ProteinMW,
make_alphabet_group=True,
array_seq_constructor=ArrayProteinSequence,
label="protein",
colors=AA_COLORS,
)
PROTEIN_WITH_STOP = MolType(
seq_constructor=ProteinWithStopSequence,
motifset=PROTEIN_WITH_STOP_chars,
ambiguities=PROTEIN_WITH_STOP_ambiguities,
mw_calculator=ProteinMW,
make_alphabet_group=True,
array_seq_constructor=ArrayProteinWithStopSequence,
label="protein_with_stop",
colors=AA_COLORS,
)
BYTES = MolType(
# A default type for arbitrary chars read from a file etc. when we don't
# want to prematurely assume _anything_ about the data.
seq_constructor=ByteSequence,
motifset=list(map(chr, list(range(256)))),
ambiguities={},
array_seq_constructor=ArraySequence,
label="bytes",
)
# the None value catches cases where a moltype has no label attribute
_style_defaults = {
getattr(mt, "label", ""): defaultdict(
_DefaultValue("ambig_%s" % getattr(mt, "label", ""))
)
for mt in (ASCII, BYTES, DNA, RNA, PROTEIN, PROTEIN_WITH_STOP, None)
}
# following is a two-state MolType useful for testing
AB = MolType(
seq_constructor=ABSequence,
motifset="ab",
ambiguities={},
array_seq_constructor=ArraySequence,
label="ab",
)
class _CodonAlphabet(Alphabet):
"""Codon alphabets are DNA TupleAlphabets with a genetic code attribute and some codon-specific methods"""
def _with(self, motifs):
a = Alphabet._with(self, motifs)
a.__class__ = type(self)
a._gc = self._gc
return a
def is_sense_codon(self, codon):
return not self._gc.is_stop(codon)
def is_stop_codon(self, codon):
return self._gc.is_stop(codon)
def get_genetic_code(self):
return self._gc
def CodonAlphabet(gc=1, include_stop_codons=False):
if isinstance(gc, (int, str)):
gc = get_code(gc)
if include_stop_codons:
motifset = list(gc.codons)
else:
motifset = list(gc.sense_codons)
motifset = [codon.upper().replace("U", "T") for codon in motifset]
a = _CodonAlphabet(motifset, moltype=DNA)
a._gc = gc
return a
def _method_codon_alphabet(ignore, *args, **kwargs):
"""If CodonAlphabet is set as a property, it gets self as extra 1st arg."""
return CodonAlphabet(*args, **kwargs)
STANDARD_CODON = CodonAlphabet()
# Modify NucleicAcidSequence to avoid circular import
NucleicAcidSequence.codon_alphabet = _method_codon_alphabet
NucleicAcidSequence.protein = PROTEIN
ArrayRnaSequence.moltype = RNA
ArrayRnaSequence.alphabet = RNA.alphabets.degen_gapped
ArrayDnaSequence.moltype = DNA
ArrayDnaSequence.alphabet = DNA.alphabets.degen_gapped
ArrayProteinSequence.moltype = PROTEIN
ArrayProteinSequence.alphabet = PROTEIN.alphabets.degen_gapped
ArrayProteinWithStopSequence.moltype = PROTEIN_WITH_STOP
ArrayProteinWithStopSequence.alphabet = PROTEIN_WITH_STOP.alphabets.degen_gapped
ArraySequence.alphabet = BYTES.alphabet
ArrayAlignment.alphabet = BYTES.alphabet
ArrayAlignment.moltype = BYTES
ArrayDnaCodonSequence.alphabet = DNA.alphabets.base.Triples
ArrayRnaCodonSequence.alphabet = RNA.alphabets.base.Triples
# Modify Alignment to avoid circular import
Alignment.moltype = ASCII
SequenceCollection.moltype = BYTES
def _make_moltype_dict():
env = globals()
moltypes = {}
for key in env:
obj = env[key]
if not isinstance(obj, MolType):
continue
if obj.label is not None:
moltypes[obj.label] = obj
return moltypes
moltypes = _make_moltype_dict()
def get_moltype(name):
"""returns the moltype with the matching name attribute"""
if isinstance(name, MolType):
return name
name = name.lower()
if name not in moltypes:
raise ValueError('unknown moltype "%s"' % name)
return moltypes[name]
def available_moltypes():
"""returns Table listing available moltypes"""
from cogent3.util.table import Table
rows = []
for n, m in moltypes.items():
v = str(m)
num = len(list(m))
if num > 10:
v = f"{v[:39]}..."
rows.append([n, num, v])
header = ["Abbreviation", "Number of states", "Moltype"]
title = "Specify a moltype by the Abbreviation (case insensitive)."
result = Table(header=header, data=rows, title=title, index_name="Abbreviation")
result = result.sorted(columns=["Number of states", "Abbreviation"])
result.format_column("Abbreviation", repr)
return result
| 31.711258 | 110 | 0.599574 |
4a20dc1fbde09375778b625c05d4cdb99acc7a13 | 476 | py | Python | apps/users/migrations/0004_employee_non_employee.py | iamjdcollins/districtwebsite | 89e2aea47ca3d221665bc23586a4374421be5800 | [
"MIT"
] | null | null | null | apps/users/migrations/0004_employee_non_employee.py | iamjdcollins/districtwebsite | 89e2aea47ca3d221665bc23586a4374421be5800 | [
"MIT"
] | null | null | null | apps/users/migrations/0004_employee_non_employee.py | iamjdcollins/districtwebsite | 89e2aea47ca3d221665bc23586a4374421be5800 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-12-04 16:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_employee_in_directory'),
]
operations = [
migrations.AddField(
model_name='employee',
name='non_employee',
field=models.BooleanField(db_index=True, default=False),
),
]
| 22.666667 | 68 | 0.630252 |
4a20dccddccf13012c67541905786914710c119f | 3,673 | py | Python | djangoappengine/main/main.py | elreydetoda/feedsanitizer | 81a7cfc3f3d1312cd8c236b3e8cc9e84b0d72932 | [
"MIT"
] | 2 | 2020-03-13T15:07:55.000Z | 2020-03-14T14:45:42.000Z | djangoappengine/main/main.py | elreydetoda/feedsanitizer | 81a7cfc3f3d1312cd8c236b3e8cc9e84b0d72932 | [
"MIT"
] | null | null | null | djangoappengine/main/main.py | elreydetoda/feedsanitizer | 81a7cfc3f3d1312cd8c236b3e8cc9e84b0d72932 | [
"MIT"
] | 1 | 2020-10-16T04:11:58.000Z | 2020-10-16T04:11:58.000Z | import os
import sys
# Add parent folder to sys.path, so we can import boot.
# App Engine causes main.py to be reloaded if an exception gets raised
# on the first request of a main.py instance, so don't add project_dir multiple
# times.
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
if project_dir not in sys.path or sys.path.index(project_dir) > 0:
while project_dir in sys.path:
sys.path.remove(project_dir)
sys.path.insert(0, project_dir)
for path in sys.path[:]:
if path != project_dir and os.path.isdir(os.path.join(path, 'django')):
sys.path.remove(path)
break
# Remove the standard version of Django.
if 'django' in sys.modules and sys.modules['django'].VERSION < (1, 2):
for k in [k for k in sys.modules
if k.startswith('django.') or k == 'django']:
del sys.modules[k]
from djangoappengine.boot import setup_env, setup_logging, env_ext
setup_env()
from django.core.handlers.wsgi import WSGIHandler
from google.appengine.ext.webapp.util import run_wsgi_app
from django.conf import settings
def log_traceback(*args, **kwargs):
import logging
logging.exception('Exception in request:')
from django.core import signals
signals.got_request_exception.connect(log_traceback)
def real_main():
# Reset path and environment variables
global path_backup
try:
sys.path = path_backup[:]
except:
path_backup = sys.path[:]
os.environ.update(env_ext)
setup_logging()
# Create a Django application for WSGI
application = WSGIHandler()
# Add the staticfiles handler if necessary
if settings.DEBUG and 'django.contrib.staticfiles' in settings.INSTALLED_APPS:
from django.contrib.staticfiles.handlers import StaticFilesHandler
application = StaticFilesHandler(application)
# Run the WSGI CGI handler with that application.
run_wsgi_app(application)
def profile_main(func):
import logging, cProfile, pstats, random, StringIO
only_forced_profile = getattr(settings, 'ONLY_FORCED_PROFILE', False)
profile_percentage = getattr(settings, 'PROFILE_PERCENTAGE', None)
if (only_forced_profile and
'profile=forced' not in os.environ.get('QUERY_STRING')) or \
(not only_forced_profile and profile_percentage and
float(profile_percentage) / 100.0 <= random.random()):
return func()
prof = cProfile.Profile()
prof = prof.runctx('func()', globals(), locals())
stream = StringIO.StringIO()
stats = pstats.Stats(prof, stream=stream)
sort_by = getattr(settings, 'SORT_PROFILE_RESULTS_BY', 'time')
if not isinstance(sort_by, (list, tuple)):
sort_by = (sort_by,)
stats.sort_stats(*sort_by)
restrictions = []
profile_pattern = getattr(settings, 'PROFILE_PATTERN', None)
if profile_pattern:
restrictions.append(profile_pattern)
max_results = getattr(settings, 'MAX_PROFILE_RESULTS', 80)
if max_results and max_results != 'all':
restrictions.append(max_results)
stats.print_stats(*restrictions)
extra_output = getattr(settings, 'EXTRA_PROFILE_OUTPUT', None) or ()
if not isinstance(sort_by, (list, tuple)):
extra_output = (extra_output,)
if 'callees' in extra_output:
stats.print_callees()
if 'callers' in extra_output:
stats.print_callers()
logging.info('Profile data:\n%s', stream.getvalue())
def make_profileable(func):
if getattr(settings, 'ENABLE_PROFILER', False):
return lambda: profile_main(func)
return func
main = make_profileable(real_main)
if __name__ == '__main__':
main()
| 34.980952 | 90 | 0.702968 |
4a20dcdf5e0de8acee4af7b43c390fc92b57384e | 2,981 | py | Python | generate.py | igor0/gpt-neox | 3ad61952c290669d3741c01f767d41fdee5215c5 | [
"Apache-2.0"
] | 1,871 | 2020-12-22T14:44:29.000Z | 2022-03-31T14:21:40.000Z | generate.py | igor0/gpt-neox | 3ad61952c290669d3741c01f767d41fdee5215c5 | [
"Apache-2.0"
] | 300 | 2020-12-23T17:51:43.000Z | 2022-03-30T17:34:42.000Z | generate.py | igor0/gpt-neox | 3ad61952c290669d3741c01f767d41fdee5215c5 | [
"Apache-2.0"
] | 235 | 2020-12-23T19:45:19.000Z | 2022-03-31T20:33:47.000Z | #!/usr/bin/env python
# coding=utf-8
# Copyright (c) 2021 Josh Levy-Kramer <[email protected]>. All rights reserved.
# This file is based on code by the authors denoted below and has been modified from its original version.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from megatron.utils import print_rank_0, setup_for_inference_or_eval
from megatron.text_generation_utils import generate_samples_input_from_file, generate_samples_from_prompt, generate_samples_unconditional, generate_samples_interactive
def main():
"""
Generate text/sample model
"""
model, neox_args = setup_for_inference_or_eval()
if neox_args.text_gen_type == 'unconditional':
print_rank_0('Generating samples unconditionally')
assert neox_args.sample_output_file is not None
generate_samples_unconditional(
neox_args=neox_args,
model=model,
number_of_samples=neox_args.num_samples,
output_file=neox_args.sample_output_file,
maximum_tokens = neox_args.maximum_tokens,
recompute = neox_args.recompute,
temperature = neox_args.temperature,
top_k = neox_args.top_k,
top_p = neox_args.top_p
)
elif neox_args.text_gen_type == 'input-file':
print_rank_0(f'Generating samples from input file {neox_args.sample_input_file}')
assert neox_args.sample_input_file is not None
generate_samples_input_from_file(
neox_args=neox_args,
model=model,
input_file=neox_args.sample_input_file,
output_file=neox_args.sample_output_file,
maximum_tokens = neox_args.maximum_tokens,
recompute = neox_args.recompute,
temperature = neox_args.temperature,
top_k = neox_args.top_k,
top_p = neox_args.top_p
)
elif neox_args.text_gen_type == 'interactive':
generate_samples_interactive(
neox_args=neox_args,
model=model,
recompute = neox_args.recompute,
temperature = neox_args.temperature,
maximum_tokens = neox_args.maximum_tokens,
top_k = neox_args.top_k,
top_p = neox_args.top_p
)
else:
raise ValueError(f"`text-gen-type` either not specified or not recognised: {neox_args.text_gen_type}")
if __name__ == "__main__":
main()
| 39.746667 | 167 | 0.691714 |
4a20dd2d5f49d77a0b4d479ff84edd0bc875ad0a | 3,305 | py | Python | adafruit_register/i2c_bit.py | jepler/Adafruit_CircuitPython_Register | 9f86b5179936bcb81d9765de2fe25c140b42036f | [
"MIT"
] | 1 | 2020-09-27T20:08:57.000Z | 2020-09-27T20:08:57.000Z | adafruit_register/i2c_bit.py | jepler/Adafruit_CircuitPython_Register | 9f86b5179936bcb81d9765de2fe25c140b42036f | [
"MIT"
] | null | null | null | adafruit_register/i2c_bit.py | jepler/Adafruit_CircuitPython_Register | 9f86b5179936bcb81d9765de2fe25c140b42036f | [
"MIT"
] | null | null | null | # The MIT License (MIT)
#
# Copyright (c) 2016 Scott Shawcroft for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# pylint: disable=too-few-public-methods
"""
`adafruit_register.i2c_bit`
====================================================
Single bit registers
* Author(s): Scott Shawcroft
"""
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_Register.git"
class RWBit:
"""
Single bit register that is readable and writeable.
Values are `bool`
:param int register_address: The register address to read the bit from
:param type bit: The bit index within the byte at ``register_address``
:param int register_width: The number of bytes in the register. Defaults to 1.
:param bool lsb_first: Is the first byte we read from I2C the LSB? Defaults to true
"""
def __init__(self, register_address, bit, register_width=1, lsb_first=True):
self.bit_mask = 1 << (bit % 8) # the bitmask *within* the byte!
self.buffer = bytearray(1 + register_width)
self.buffer[0] = register_address
if lsb_first:
self.byte = bit // 8 + 1 # the byte number within the buffer
else:
self.byte = register_width - (bit // 8) # the byte number within the buffer
def __get__(self, obj, objtype=None):
with obj.i2c_device as i2c:
i2c.write_then_readinto(self.buffer, self.buffer, out_end=1, in_start=1)
return bool(self.buffer[self.byte] & self.bit_mask)
def __set__(self, obj, value):
with obj.i2c_device as i2c:
i2c.write_then_readinto(self.buffer, self.buffer, out_end=1, in_start=1)
if value:
self.buffer[self.byte] |= self.bit_mask
else:
self.buffer[self.byte] &= ~self.bit_mask
i2c.write(self.buffer)
class ROBit(RWBit):
"""Single bit register that is read only. Subclass of `RWBit`.
Values are `bool`
:param int register_address: The register address to read the bit from
:param type bit: The bit index within the byte at ``register_address``
:param int register_width: The number of bytes in the register. Defaults to 1.
"""
def __set__(self, obj, value):
raise AttributeError()
| 38.430233 | 88 | 0.688956 |
4a20dd8089114cba15741c74746eee58c62c53e9 | 3,677 | py | Python | pyocd/target/target_LPC824M201JHI33.py | aabadie/pyOCD | 5c05b0fd2d04c9de5400c3d026d13f8822d3ce3d | [
"Apache-2.0"
] | null | null | null | pyocd/target/target_LPC824M201JHI33.py | aabadie/pyOCD | 5c05b0fd2d04c9de5400c3d026d13f8822d3ce3d | [
"Apache-2.0"
] | null | null | null | pyocd/target/target_LPC824M201JHI33.py | aabadie/pyOCD | 5c05b0fd2d04c9de5400c3d026d13f8822d3ce3d | [
"Apache-2.0"
] | null | null | null | """
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ..flash.flash import Flash
from ..core.coresight_target import (SVDFile, CoreSightTarget)
from ..core.memory_map import (FlashRegion, RamRegion, MemoryMap)
FLASH_ALGO = {
'load_address' : 0x10000000,
'instructions' : [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0x47700a80, 0x21004842, 0x22016301, 0x63416342, 0x6b416342, 0xd0fc07c9, 0x493e6382, 0x70082002,
0x47702000, 0x47702000, 0x4c3bb5f8, 0x25002032, 0x261f444c, 0x493960a6, 0x60206065, 0x4f384449,
0x91004620, 0x696047b8, 0xd10b2800, 0x203460a6, 0x60206065, 0x60e04833, 0x99004620, 0x696047b8,
0xd0002800, 0xbdf82001, 0x4d2bb5f8, 0x444d0a84, 0x492a606c, 0x60ac2032, 0x60284449, 0x460f4e28,
0x47b04628, 0x28006968, 0x606cd10b, 0x60ac2034, 0x48246028, 0x463960e8, 0x47b04628, 0x28006968,
0x2001d000, 0xb5f8bdf8, 0x00054614, 0x6861d10e, 0x68e26820, 0x68a11840, 0x18401889, 0x18406921,
0x18406961, 0x184069a1, 0x61e04240, 0x0aa84e12, 0x2132444e, 0x60316070, 0x60b04910, 0x4f104449,
0x91004630, 0x697047b8, 0xd10e2800, 0x20336075, 0x603060b4, 0x02402001, 0x480a60f0, 0x46306130,
0x47b89900, 0x28006970, 0x2001d000, 0x0000bdf8, 0x40048040, 0x40048000, 0x00000004, 0x00000018,
0x1fff1ff1, 0x00002ee0, 0x00000000,
],
'pc_init' : 0x10000025,
'pc_erase_sector' : 0x10000089,
'pc_program_page' : 0x100000C7,
'pc_eraseAll' : 0x10000049,
# Double buffering is not supported since sector size differs from page size
'static_base' : 0x10000000 + 0x00000020 + 0x00000128,
'begin_data' : 0x10000000 + 0x00000800, # Analyzer uses a max of 128 B data (32 pages * 4 bytes / page)
'begin_stack' : 0x10000800,
'min_program_length' : 1024,
'analyzer_supported' : True,
'analyzer_address' : 0x10001000 # Analyzer 0x10001000..0x10000600
}
class Flash_lpc824(Flash):
def __init__(self, target):
super(Flash_lpc824, self).__init__(target, FLASH_ALGO)
# TODO - temporary until flash algo is rebuilt with 1K page program size
def program_page(self, flashPtr, bytes):
write_size = 512
for i in range(0, 2):
data = bytes[i * write_size : (i + 1) * write_size]
Flash.program_page(self, flashPtr + i * write_size, data)
class LPC824(CoreSightTarget):
memoryMap = MemoryMap(
FlashRegion( start=0, length=0x8000, blocksize=0x400, is_boot_memory=True,
flash_class=Flash_lpc824),
RamRegion( start=0x10000000, length=0x2000)
)
def __init__(self, link):
super(LPC824, self).__init__(link, self.memoryMap)
def reset_stop_on_reset(self, software_reset=None, map_to_user=True):
super(LPC824, self).reset_stop_on_reset(software_reset)
# Remap to use flash and set SP and SP accordingly
if map_to_user:
self.write_memory(0x40048000, 0x2, 32)
sp = self.read_memory(0x0)
pc = self.read_memory(0x4)
self.write_core_register('sp', sp)
self.write_core_register('pc', pc)
| 43.77381 | 107 | 0.717433 |
4a20dde54f36c6d053aeb419765bca818ed2beab | 4,654 | py | Python | modules/ImgTrans.py | hanzhi713/WeChat-CLI-Tool | c2ae8f20bf4bdab4dec2abfe92e8cec94e377ad2 | [
"MIT"
] | 7 | 2017-11-30T00:19:03.000Z | 2019-05-17T01:51:06.000Z | modules/ImgTrans.py | hanzhi713/WeChat-CLI-Tool | c2ae8f20bf4bdab4dec2abfe92e8cec94e377ad2 | [
"MIT"
] | null | null | null | modules/ImgTrans.py | hanzhi713/WeChat-CLI-Tool | c2ae8f20bf4bdab4dec2abfe92e8cec94e377ad2 | [
"MIT"
] | null | null | null | from .__templates__ import Interactive
from .__config__ import multi_process, terminal_QR
import numpy as np
import time
import itchat
import io
from cmath import *
from PIL import Image
if multi_process:
from multiprocessing import Process
else:
from .__stoppable__ import Process
class ImgTrans(Interactive):
alias = "imgtf"
__author__ = "Hanzhi Zhou"
title = "Image Transformation"
description = "\n".join(["Perform arbitrary image transformation by complex mapping"])
parameters = "[function] [kernel size]"
example = "\n".join(["Example: /imgtf c:c**1.2 5\n",
"This will perform a complex mapping f(c)=c^1.2 on the image you sent then smooth it with convolution kernel of size 5*5"])
# convert the sparse matrix dictionary (mapping (x, y) to (b, g, r)) to a numpy three dimensional array
@staticmethod
def toMatrix(newDict):
global const
arrs = newDict.keys()
xRange = max(arrs, key=lambda x: x[0])[0] - min(arrs, key=lambda x: x[0])[0]
yRange = max(arrs, key=lambda x: x[1])[1] - min(arrs, key=lambda x: x[1])[1]
shiftX = xRange // 2
shiftY = yRange // 2
imgArr = np.zeros((yRange, xRange, 3), np.uint8)
for x in range(xRange):
for y in range(yRange):
imgArr[y, x, :] = np.array(newDict.get((x - shiftX, y - shiftY), [255, 255, 255]), np.uint8)
return imgArr
# interpolate the pixels with a matrix of size (size*size)
@staticmethod
def avPixels(newImg, m, n, bgr, size, c):
a = round(m)
b = round(n)
for i in range(-c, size - c):
for j in range(-c, size - c):
(x, y) = (a + i, b + j)
if newImg.get((x, y)) is None:
newImg[(x, y)] = bgr
@staticmethod
def transform(x, y, orgX, orgY, f):
c = complex(x - orgX, y - orgY)
return f(c)
@classmethod
def parse_args(cls, from_user, args):
assert len(args) >= 2, "Two parameters are required: [function] and [kernel size]"
f = eval("lambda " + args[0])
assert type(f(complex(0, 0))) == complex, "Illegal Complex Function!"
assert args[1].isdigit(), "A positive integer is required for specifying the kernel size"
return args[0], int(args[1])
def __init__(self, from_user, args):
super(self.__class__, self).__init__(from_user, args)
self.f, self.kernel = args
self.process = None
self.send_separator(from_user)
itchat.send_msg("Please send an image", from_user)
def msg_handler(self, msg):
if msg['Text'] == '/q':
if self.process is not None:
self.process.terminate()
self.finished = True
itchat.send_msg('Command interrupted', msg['FromUserName'])
self.send_separator(msg['FromUserName'])
return True
else:
itchat.send_msg("If you want to switch command, please type /q to quit current session first", msg['FromUserName'])
return False
def file_handler(self, file):
if not self.finished:
itchat.send_msg("Image Received.\nProcessing...", file['FromUserName'])
file_b = io.BytesIO(file['Text']())
self.process = Process(target=self.exec_task,
args=(file.fileName.split('.')[1], file_b, file['FromUserName'], self.f,))
self.process.start()
else:
itchat.send_msg("Processing...\nPlease be patient...", file['FromUserName'])
def exec_task(self, pic_type, file_b, from_user, f):
if multi_process:
itchat.auto_login(hotReload=True, enableCmdQR=terminal_QR)
func = eval("lambda " + f)
t = time.clock()
img = np.asarray(Image.open(file_b))
height, width = img.shape[0:2]
orgX, orgY = (width // 2, height // 2)
c = self.kernel // 2
newImg = {}
for x in range(width):
for y in range(height):
xy = ImgTrans.transform(x, y, orgX, orgY, func)
ImgTrans.avPixels(newImg, xy.real, xy.imag, img[y, x, :], self.kernel, c)
imgArr = ImgTrans.toMatrix(newImg)
buf = io.BytesIO()
Image.fromarray(imgArr).save(buf, format=pic_type, quality=75, compression_level=5)
buf.seek(0)
itchat.send_image(None, from_user, None, buf)
itchat.send_msg("Time spent = {}s".format(round(time.clock() - t, 2)), from_user)
self.send_separator(from_user)
self.finished = True
file_b.close()
buf.close()
| 39.109244 | 148 | 0.587667 |
4a20de5d8952b578d0e768ba31a61df95bbfd5fc | 15,833 | py | Python | .aux/venv/lib/python3.9/site-packages/bandit/core/manager.py | sonntagsgesicht/regtest | 160ef1089f797fbade48160efb0e1a386adbada7 | [
"Apache-2.0"
] | null | null | null | .aux/venv/lib/python3.9/site-packages/bandit/core/manager.py | sonntagsgesicht/regtest | 160ef1089f797fbade48160efb0e1a386adbada7 | [
"Apache-2.0"
] | 1 | 2020-11-26T17:32:49.000Z | 2020-11-26T17:32:49.000Z | .aux/venv/lib/python3.9/site-packages/bandit/core/manager.py | sonntagsgesicht/regtest | 160ef1089f797fbade48160efb0e1a386adbada7 | [
"Apache-2.0"
] | 1 | 2021-02-22T13:55:32.000Z | 2021-02-22T13:55:32.000Z | # -*- coding:utf-8 -*-
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# SPDX-License-Identifier: Apache-2.0
import collections
import fnmatch
import json
import logging
import os
import sys
import tokenize
import traceback
import six
from bandit.core import constants as b_constants
from bandit.core import extension_loader
from bandit.core import issue
from bandit.core import meta_ast as b_meta_ast
from bandit.core import metrics
from bandit.core import node_visitor as b_node_visitor
from bandit.core import test_set as b_test_set
LOG = logging.getLogger(__name__)
class BanditManager(object):
scope = []
def __init__(self, config, agg_type, debug=False, verbose=False,
quiet=False, profile=None, ignore_nosec=False):
'''Get logger, config, AST handler, and result store ready
:param config: config options object
:type config: bandit.core.BanditConfig
:param agg_type: aggregation type
:param debug: Whether to show debug messages or not
:param verbose: Whether to show verbose output
:param quiet: Whether to only show output in the case of an error
:param profile_name: Optional name of profile to use (from cmd line)
:param ignore_nosec: Whether to ignore #nosec or not
:return:
'''
self.debug = debug
self.verbose = verbose
self.quiet = quiet
if not profile:
profile = {}
self.ignore_nosec = ignore_nosec
self.b_conf = config
self.files_list = []
self.excluded_files = []
self.b_ma = b_meta_ast.BanditMetaAst()
self.skipped = []
self.results = []
self.baseline = []
self.agg_type = agg_type
self.metrics = metrics.Metrics()
self.b_ts = b_test_set.BanditTestSet(config, profile)
# set the increment of after how many files to show progress
self.progress = b_constants.progress_increment
self.scores = []
def get_skipped(self):
ret = []
# "skip" is a tuple of name and reason, decode just the name
for skip in self.skipped:
if isinstance(skip[0], bytes):
ret.append((skip[0].decode('utf-8'), skip[1]))
else:
ret.append(skip)
return ret
def get_issue_list(self,
sev_level=b_constants.LOW,
conf_level=b_constants.LOW):
return self.filter_results(sev_level, conf_level)
def populate_baseline(self, data):
'''Populate a baseline set of issues from a JSON report
This will populate a list of baseline issues discovered from a previous
run of bandit. Later this baseline can be used to filter out the result
set, see filter_results.
'''
items = []
try:
jdata = json.loads(data)
items = [issue.issue_from_dict(j) for j in jdata["results"]]
except Exception as e:
LOG.warning("Failed to load baseline data: %s", e)
self.baseline = items
def filter_results(self, sev_filter, conf_filter):
'''Returns a list of results filtered by the baseline
This works by checking the number of results returned from each file we
process. If the number of results is different to the number reported
for the same file in the baseline, then we return all results for the
file. We can't reliably return just the new results, as line numbers
will likely have changed.
:param sev_filter: severity level filter to apply
:param conf_filter: confidence level filter to apply
'''
results = [i for i in self.results if
i.filter(sev_filter, conf_filter)]
if not self.baseline:
return results
unmatched = _compare_baseline_results(self.baseline, results)
# if it's a baseline we'll return a dictionary of issues and a list of
# candidate issues
return _find_candidate_matches(unmatched, results)
def results_count(self, sev_filter=b_constants.LOW,
conf_filter=b_constants.LOW):
'''Return the count of results
:param sev_filter: Severity level to filter lower
:param conf_filter: Confidence level to filter
:return: Number of results in the set
'''
return len(self.get_issue_list(sev_filter, conf_filter))
def output_results(self, lines, sev_level, conf_level, output_file,
output_format, template=None):
'''Outputs results from the result store
:param lines: How many surrounding lines to show per result
:param sev_level: Which severity levels to show (LOW, MEDIUM, HIGH)
:param conf_level: Which confidence levels to show (LOW, MEDIUM, HIGH)
:param output_file: File to store results
:param output_format: output format plugin name
:param template: Output template with non-terminal tags <N>
(default: {abspath}:{line}:
{test_id}[bandit]: {severity}: {msg})
:return: -
'''
try:
formatters_mgr = extension_loader.MANAGER.formatters_mgr
if output_format not in formatters_mgr:
output_format = 'screen' if sys.stdout.isatty() else 'txt'
formatter = formatters_mgr[output_format]
report_func = formatter.plugin
if output_format == 'custom':
report_func(self, fileobj=output_file, sev_level=sev_level,
conf_level=conf_level, template=template)
else:
report_func(self, fileobj=output_file, sev_level=sev_level,
conf_level=conf_level, lines=lines)
except Exception as e:
raise RuntimeError("Unable to output report using '%s' formatter: "
"%s" % (output_format, str(e)))
def discover_files(self, targets, recursive=False, excluded_paths=''):
'''Add tests directly and from a directory to the test set
:param targets: The command line list of files and directories
:param recursive: True/False - whether to add all files from dirs
:return:
'''
# We'll mantain a list of files which are added, and ones which have
# been explicitly excluded
files_list = set()
excluded_files = set()
excluded_path_globs = self.b_conf.get_option('exclude_dirs') or []
included_globs = self.b_conf.get_option('include') or ['*.py']
# if there are command line provided exclusions add them to the list
if excluded_paths:
for path in excluded_paths.split(','):
if os.path.isdir(path):
path = os.path.join(path, '*')
excluded_path_globs.append(path)
# build list of files we will analyze
for fname in targets:
# if this is a directory and recursive is set, find all files
if os.path.isdir(fname):
if recursive:
new_files, newly_excluded = _get_files_from_dir(
fname,
included_globs=included_globs,
excluded_path_strings=excluded_path_globs
)
files_list.update(new_files)
excluded_files.update(newly_excluded)
else:
LOG.warning("Skipping directory (%s), use -r flag to "
"scan contents", fname)
else:
# if the user explicitly mentions a file on command line,
# we'll scan it, regardless of whether it's in the included
# file types list
if _is_file_included(fname, included_globs,
excluded_path_globs,
enforce_glob=False):
files_list.add(fname)
else:
excluded_files.add(fname)
self.files_list = sorted(files_list)
self.excluded_files = sorted(excluded_files)
def run_tests(self):
'''Runs through all files in the scope
:return: -
'''
self._show_progress("%s [" % len(self.files_list))
# if we have problems with a file, we'll remove it from the files_list
# and add it to the skipped list instead
new_files_list = list(self.files_list)
for count, fname in enumerate(self.files_list):
LOG.debug("working on file : %s", fname)
if len(self.files_list) > self.progress:
# is it time to update the progress indicator?
if count % self.progress == 0:
self._show_progress("%s.. " % count, flush=True)
try:
if fname == '-':
sys.stdin = os.fdopen(sys.stdin.fileno(), 'rb', 0)
self._parse_file('<stdin>', sys.stdin, new_files_list)
else:
with open(fname, 'rb') as fdata:
self._parse_file(fname, fdata, new_files_list)
except IOError as e:
self.skipped.append((fname, e.strerror))
new_files_list.remove(fname)
self._show_progress("]\n", flush=True)
# reflect any files which may have been skipped
self.files_list = new_files_list
# do final aggregation of metrics
self.metrics.aggregate()
def _show_progress(self, message, flush=False):
'''Show progress on stderr
Write progress message to stderr, if number of files warrants it and
log level is high enough.
:param message: The message to write to stderr
:param flush: Whether to flush stderr after writing the message
:return:
'''
if len(self.files_list) > self.progress and \
LOG.getEffectiveLevel() <= logging.INFO:
sys.stderr.write(message)
if flush:
sys.stderr.flush()
def _parse_file(self, fname, fdata, new_files_list):
try:
# parse the current file
data = fdata.read()
lines = data.splitlines()
self.metrics.begin(fname)
self.metrics.count_locs(lines)
if self.ignore_nosec:
nosec_lines = set()
else:
try:
fdata.seek(0)
if six.PY2:
tokens = tokenize.generate_tokens(fdata.readline)
else:
tokens = tokenize.tokenize(fdata.readline)
nosec_lines = set(
lineno for toktype, tokval, (lineno, _), _, _ in tokens
if toktype == tokenize.COMMENT and
'#nosec' in tokval or '# nosec' in tokval)
except tokenize.TokenError:
nosec_lines = set()
score = self._execute_ast_visitor(fname, data, nosec_lines)
self.scores.append(score)
self.metrics.count_issues([score, ])
except KeyboardInterrupt:
sys.exit(2)
except SyntaxError:
self.skipped.append((fname,
"syntax error while parsing AST from file"))
new_files_list.remove(fname)
except Exception as e:
LOG.error("Exception occurred when executing tests against "
"%s. Run \"bandit --debug %s\" to see the full "
"traceback.", fname, fname)
self.skipped.append((fname, 'exception while scanning file'))
new_files_list.remove(fname)
LOG.debug(" Exception string: %s", e)
LOG.debug(" Exception traceback: %s", traceback.format_exc())
def _execute_ast_visitor(self, fname, data, nosec_lines):
'''Execute AST parse on each file
:param fname: The name of the file being parsed
:param data: Original file contents
:param lines: The lines of code to process
:return: The accumulated test score
'''
score = []
res = b_node_visitor.BanditNodeVisitor(fname, self.b_ma,
self.b_ts, self.debug,
nosec_lines, self.metrics)
score = res.process(data)
self.results.extend(res.tester.results)
return score
def _get_files_from_dir(files_dir, included_globs=None,
excluded_path_strings=None):
if not included_globs:
included_globs = ['*.py']
if not excluded_path_strings:
excluded_path_strings = []
files_list = set()
excluded_files = set()
for root, _, files in os.walk(files_dir):
for filename in files:
path = os.path.join(root, filename)
if _is_file_included(path, included_globs, excluded_path_strings):
files_list.add(path)
else:
excluded_files.add(path)
return files_list, excluded_files
def _is_file_included(path, included_globs, excluded_path_strings,
enforce_glob=True):
'''Determine if a file should be included based on filename
This utility function determines if a file should be included based
on the file name, a list of parsed extensions, excluded paths, and a flag
specifying whether extensions should be enforced.
:param path: Full path of file to check
:param parsed_extensions: List of parsed extensions
:param excluded_paths: List of paths (globbing supported) from which we
should not include files
:param enforce_glob: Can set to false to bypass extension check
:return: Boolean indicating whether a file should be included
'''
return_value = False
# if this is matches a glob of files we look at, and it isn't in an
# excluded path
if _matches_glob_list(path, included_globs) or not enforce_glob:
if (not _matches_glob_list(path, excluded_path_strings) and
not any(x in path for x in excluded_path_strings)):
return_value = True
return return_value
def _matches_glob_list(filename, glob_list):
for glob in glob_list:
if fnmatch.fnmatch(filename, glob):
return True
return False
def _compare_baseline_results(baseline, results):
"""Compare a baseline list of issues to list of results
This function compares a baseline set of issues to a current set of issues
to find results that weren't present in the baseline.
:param baseline: Baseline list of issues
:param results: Current list of issues
:return: List of unmatched issues
"""
return [a for a in results if a not in baseline]
def _find_candidate_matches(unmatched_issues, results_list):
"""Returns a dictionary with issue candidates
For example, let's say we find a new command injection issue in a file
which used to have two. Bandit can't tell which of the command injection
issues in the file are new, so it will show all three. The user should
be able to pick out the new one.
:param unmatched_issues: List of issues that weren't present before
:param results_list: Master list of current Bandit findings
:return: A dictionary with a list of candidates for each issue
"""
issue_candidates = collections.OrderedDict()
for unmatched in unmatched_issues:
issue_candidates[unmatched] = ([i for i in results_list if
unmatched == i])
return issue_candidates
| 37.968825 | 79 | 0.604434 |
4a20e01b8630e0fd4453470d9f4910ddea4a2456 | 8,782 | py | Python | src/sagemaker_containers/_transformer.py | uditbhatia/sagemaker-containers | 3c499c8a4e00c7ff7486a4632c9330b5ea2313d3 | [
"Apache-2.0"
] | null | null | null | src/sagemaker_containers/_transformer.py | uditbhatia/sagemaker-containers | 3c499c8a4e00c7ff7486a4632c9330b5ea2313d3 | [
"Apache-2.0"
] | null | null | null | src/sagemaker_containers/_transformer.py | uditbhatia/sagemaker-containers | 3c499c8a4e00c7ff7486a4632c9330b5ea2313d3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import json
import textwrap
import traceback
from six.moves import http_client
from sagemaker_containers import _encoders, _env, _errors, _functions, _worker
def default_model_fn(model_dir):
"""Function responsible to load the model.
For more information about model loading https://github.com/aws/sagemaker-python-sdk#model-loading.
Args:
model_dir (str): The directory where model files are stored.
Returns:
(obj) the loaded model.
"""
raise NotImplementedError(textwrap.dedent("""
Please provide a model_fn implementation.
See documentation for model_fn at https://github.com/aws/sagemaker-python-sdk
"""))
def default_input_fn(input_data, content_type):
"""Takes request data and de-serializes the data into an object for prediction.
When an InvokeEndpoint operation is made against an Endpoint running SageMaker model server,
the model server receives two pieces of information:
- The request Content-Type, for example "application/json"
- The request data, which is at most 5 MB (5 * 1024 * 1024 bytes) in size.
The input_fn is responsible to take the request data and pre-process it before prediction.
Args:
input_data (obj): the request data.
content_type (str): the request Content-Type.
Returns:
(obj): data ready for prediction.
"""
return _encoders.decode(input_data, content_type)
def default_predict_fn(data, model):
"""Function responsible for model predictions.
Args:
model (obj): model loaded by model_fn
data: de-serializes data returned by input_fn
Returns:
(obj): data ready for prediction.
"""
raise NotImplementedError(textwrap.dedent("""
Please provide a predict_fn implementation.
See documentation for predict_fn at https://github.com/aws/sagemaker-python-sdk
"""))
def default_output_fn(prediction, accept):
"""Function responsible to serialize the prediction for the response.
Args:
prediction (obj): prediction returned by predict_fn .
accept (str): accept content-type expected by the client.
Returns:
(worker.Response): a Flask response object with the following args:
* Args:
response: the serialized data to return
accept: the content-type that the data was transformed to.
"""
return _worker.Response(_encoders.encode(prediction, accept), accept)
class Transformer(object):
"""The Transformer is a proxy between the worker and the framework transformation functions.
It implements the default framework functions for serving.
Examples:
>>>import os
>>>from sagemaker_containers import _env, _modules, _transformer
>>>import Keras
>>>ServingEnv = _env.ServingEnv()
>>>
>>>def predict_fn(model, data):
>>> return model.predict(data)
>>>
>>>def model_fn(model_dir):
>>> return Keras.models.load_model(os.path.join(model_dir, 'minimlmodel'))
>>>
>>>transformer = _transformer.Transformer(predict_fn=predict_fn, model_fn=model_fn)
>>>
>>>mod = _modules.download_and_import(ServingEnv.module_dir, ServingEnv.module_name)
>>>transformer.load_user_fns(mod)
"""
def __init__(self, model_fn=None, input_fn=None, predict_fn=None, output_fn=None,
transform_fn=None, error_class=_errors.ClientError):
"""Default constructor. Wraps the any non default framework function in an error class to isolate
framework from user errors.
Args:
model_fn (fn): Function responsible to load the model.
input_fn (fn): Takes request data and de-serializes the data into an object for prediction.
predict_fn (fn): Function responsible for model predictions.
output_fn (fn): Function responsible to serialize the prediction for the response.
transform_fn (fn): Function responsible for taking input data and returning a prediction
as a serialized response. This function takes the place of ``input_fn``,
``predict_fn``, and ``output_fn``.
error_class (Exception): Error class used to separate framework and user errors.
"""
self._model = None
self._model_fn = _functions.error_wrapper(model_fn, error_class) if model_fn else default_model_fn
if transform_fn and (input_fn or predict_fn or output_fn):
raise ValueError('Cannot use transform_fn implementation with input_fn, predict_fn, and/or output_fn')
if transform_fn is not None:
self._transform_fn = _functions.error_wrapper(transform_fn, error_class)
else:
self._transform_fn = self._default_transform_fn
self._input_fn = _functions.error_wrapper(input_fn, error_class) if input_fn else default_input_fn
self._predict_fn = _functions.error_wrapper(predict_fn, error_class) if predict_fn else default_predict_fn
self._output_fn = _functions.error_wrapper(output_fn, error_class) if output_fn else default_output_fn
self._error_class = error_class
def initialize(self): # type: () -> None
"""Execute any initialization necessary to start making predictions with the Transformer.
The default implementation is used to load the model.
This function is called by sagemaker_containers.beta.framework.worker.Worker,
before starting the Flask application.
The gunicorn server forks multiple workers, executing multiple Flask applications in parallel.
This function will be called once per each worker.
It does not have return type or arguments.
"""
self._model = self._model_fn(_env.model_dir)
def transform(self): # type: () -> _worker.Response
"""Take a request with input data, deserialize it, make a prediction, and return a
serialized response.
Returns:
sagemaker_containers.beta.framework.worker.Response: a Flask response object with
the following args:
* response: the serialized data to return
* accept: the content type that the data was serialized into
"""
request = _worker.Request()
result = self._transform_fn(self._model, request.content, request.content_type, request.accept)
if isinstance(result, tuple):
# transforms tuple in Response for backwards compatibility
return _worker.Response(response=result[0], accept=result[1])
return result
def _default_transform_fn(self, model, content, content_type, accept):
"""Make predictions against the model and return a serialized response.
This serves as the default implementation of transform_fn, used when the user has not
implemented one themselves.
Args:
model (obj): model loaded by model_fn.
content: request content.
content_type (str): the request Content-Type.
accept (str): accept content-type expected by the client.
Returns:
sagemaker_containers.beta.framework.worker.Response or tuple:
the serialized response data and its content type, either as a Response object or
a tuple of the form (response_data, content_type)
"""
try:
data = self._input_fn(content, content_type)
except _errors.UnsupportedFormatError as e:
return self._error_response(e, http_client.UNSUPPORTED_MEDIA_TYPE)
prediction = self._predict_fn(data, model)
try:
result = self._output_fn(prediction, accept)
except _errors.UnsupportedFormatError as e:
return self._error_response(e, http_client.NOT_ACCEPTABLE)
return result
def _error_response(self, error, status_code):
body = json.dumps({'error': error.__class__.__name__,
'error-message': str(error),
'stack-trace': traceback.format_exc()})
return _worker.Response(response=body, status=status_code)
| 40.846512 | 114 | 0.686062 |
4a20e066c34b6d78fb1764d2feb7ec119a5b3dc3 | 13,471 | py | Python | sdk/python/pulumi_openstack/vpnaas/ip_sec_policy.py | ederst/pulumi-openstack | e11af3f9ade3bc0de8b3feb9db5a9e86ad5ba989 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_openstack/vpnaas/ip_sec_policy.py | ederst/pulumi-openstack | e11af3f9ade3bc0de8b3feb9db5a9e86ad5ba989 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-03-11T09:01:32.000Z | 2021-03-11T09:01:32.000Z | sdk/python/pulumi_openstack/vpnaas/ip_sec_policy.py | ederst/pulumi-openstack | e11af3f9ade3bc0de8b3feb9db5a9e86ad5ba989 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['IpSecPolicy']
class IpSecPolicy(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auth_algorithm: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
encapsulation_mode: Optional[pulumi.Input[str]] = None,
encryption_algorithm: Optional[pulumi.Input[str]] = None,
lifetimes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpSecPolicyLifetimeArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
pfs: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
transform_protocol: Optional[pulumi.Input[str]] = None,
value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages a V2 Neutron IPSec policy resource within OpenStack.
## Example Usage
```python
import pulumi
import pulumi_openstack as openstack
policy1 = openstack.vpnaas.IpSecPolicy("policy1")
```
## Import
Policies can be imported using the `id`, e.g.
```sh
$ pulumi import openstack:vpnaas/ipSecPolicy:IpSecPolicy policy_1 832cb7f3-59fe-40cf-8f64-8350ffc03272
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] auth_algorithm: The authentication hash algorithm. Valid values are sha1, sha256, sha384, sha512.
Default is sha1. Changing this updates the algorithm of the existing policy.
:param pulumi.Input[str] description: The human-readable description for the policy.
Changing this updates the description of the existing policy.
:param pulumi.Input[str] encapsulation_mode: The encapsulation mode. Valid values are tunnel and transport. Default is tunnel.
Changing this updates the existing policy.
:param pulumi.Input[str] encryption_algorithm: The encryption algorithm. Valid values are 3des, aes-128, aes-192 and so on.
The default value is aes-128. Changing this updates the existing policy.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpSecPolicyLifetimeArgs']]]] lifetimes: The lifetime of the security association. Consists of Unit and Value.
:param pulumi.Input[str] name: The name of the policy. Changing this updates the name of
the existing policy.
:param pulumi.Input[str] pfs: The perfect forward secrecy mode. Valid values are Group2, Group5 and Group14. Default is Group5.
Changing this updates the existing policy.
:param pulumi.Input[str] region: The region in which to obtain the V2 Networking client.
A Networking client is needed to create an IPSec policy. If omitted, the
`region` argument of the provider is used. Changing this creates a new
policy.
:param pulumi.Input[str] tenant_id: The owner of the policy. Required if admin wants to
create a policy for another project. Changing this creates a new policy.
:param pulumi.Input[str] transform_protocol: The transform protocol. Valid values are ESP, AH and AH-ESP.
Changing this updates the existing policy. Default is ESP.
:param pulumi.Input[Mapping[str, Any]] value_specs: Map of additional options.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['auth_algorithm'] = auth_algorithm
__props__['description'] = description
__props__['encapsulation_mode'] = encapsulation_mode
__props__['encryption_algorithm'] = encryption_algorithm
__props__['lifetimes'] = lifetimes
__props__['name'] = name
__props__['pfs'] = pfs
__props__['region'] = region
__props__['tenant_id'] = tenant_id
__props__['transform_protocol'] = transform_protocol
__props__['value_specs'] = value_specs
super(IpSecPolicy, __self__).__init__(
'openstack:vpnaas/ipSecPolicy:IpSecPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
auth_algorithm: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
encapsulation_mode: Optional[pulumi.Input[str]] = None,
encryption_algorithm: Optional[pulumi.Input[str]] = None,
lifetimes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpSecPolicyLifetimeArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
pfs: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
transform_protocol: Optional[pulumi.Input[str]] = None,
value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None) -> 'IpSecPolicy':
"""
Get an existing IpSecPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] auth_algorithm: The authentication hash algorithm. Valid values are sha1, sha256, sha384, sha512.
Default is sha1. Changing this updates the algorithm of the existing policy.
:param pulumi.Input[str] description: The human-readable description for the policy.
Changing this updates the description of the existing policy.
:param pulumi.Input[str] encapsulation_mode: The encapsulation mode. Valid values are tunnel and transport. Default is tunnel.
Changing this updates the existing policy.
:param pulumi.Input[str] encryption_algorithm: The encryption algorithm. Valid values are 3des, aes-128, aes-192 and so on.
The default value is aes-128. Changing this updates the existing policy.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpSecPolicyLifetimeArgs']]]] lifetimes: The lifetime of the security association. Consists of Unit and Value.
:param pulumi.Input[str] name: The name of the policy. Changing this updates the name of
the existing policy.
:param pulumi.Input[str] pfs: The perfect forward secrecy mode. Valid values are Group2, Group5 and Group14. Default is Group5.
Changing this updates the existing policy.
:param pulumi.Input[str] region: The region in which to obtain the V2 Networking client.
A Networking client is needed to create an IPSec policy. If omitted, the
`region` argument of the provider is used. Changing this creates a new
policy.
:param pulumi.Input[str] tenant_id: The owner of the policy. Required if admin wants to
create a policy for another project. Changing this creates a new policy.
:param pulumi.Input[str] transform_protocol: The transform protocol. Valid values are ESP, AH and AH-ESP.
Changing this updates the existing policy. Default is ESP.
:param pulumi.Input[Mapping[str, Any]] value_specs: Map of additional options.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["auth_algorithm"] = auth_algorithm
__props__["description"] = description
__props__["encapsulation_mode"] = encapsulation_mode
__props__["encryption_algorithm"] = encryption_algorithm
__props__["lifetimes"] = lifetimes
__props__["name"] = name
__props__["pfs"] = pfs
__props__["region"] = region
__props__["tenant_id"] = tenant_id
__props__["transform_protocol"] = transform_protocol
__props__["value_specs"] = value_specs
return IpSecPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="authAlgorithm")
def auth_algorithm(self) -> pulumi.Output[str]:
"""
The authentication hash algorithm. Valid values are sha1, sha256, sha384, sha512.
Default is sha1. Changing this updates the algorithm of the existing policy.
"""
return pulumi.get(self, "auth_algorithm")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The human-readable description for the policy.
Changing this updates the description of the existing policy.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="encapsulationMode")
def encapsulation_mode(self) -> pulumi.Output[str]:
"""
The encapsulation mode. Valid values are tunnel and transport. Default is tunnel.
Changing this updates the existing policy.
"""
return pulumi.get(self, "encapsulation_mode")
@property
@pulumi.getter(name="encryptionAlgorithm")
def encryption_algorithm(self) -> pulumi.Output[str]:
"""
The encryption algorithm. Valid values are 3des, aes-128, aes-192 and so on.
The default value is aes-128. Changing this updates the existing policy.
"""
return pulumi.get(self, "encryption_algorithm")
@property
@pulumi.getter
def lifetimes(self) -> pulumi.Output[Sequence['outputs.IpSecPolicyLifetime']]:
"""
The lifetime of the security association. Consists of Unit and Value.
"""
return pulumi.get(self, "lifetimes")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the policy. Changing this updates the name of
the existing policy.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def pfs(self) -> pulumi.Output[str]:
"""
The perfect forward secrecy mode. Valid values are Group2, Group5 and Group14. Default is Group5.
Changing this updates the existing policy.
"""
return pulumi.get(self, "pfs")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
The region in which to obtain the V2 Networking client.
A Networking client is needed to create an IPSec policy. If omitted, the
`region` argument of the provider is used. Changing this creates a new
policy.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[str]:
"""
The owner of the policy. Required if admin wants to
create a policy for another project. Changing this creates a new policy.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter(name="transformProtocol")
def transform_protocol(self) -> pulumi.Output[str]:
"""
The transform protocol. Valid values are ESP, AH and AH-ESP.
Changing this updates the existing policy. Default is ESP.
"""
return pulumi.get(self, "transform_protocol")
@property
@pulumi.getter(name="valueSpecs")
def value_specs(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
Map of additional options.
"""
return pulumi.get(self, "value_specs")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 48.110714 | 177 | 0.654443 |
4a20e07d4b1b10db8796f9d543041b4116ad4930 | 339 | py | Python | auctions/migrations/0010_auto_20201216_2221.py | juannajul/CS50Ecommerce | d3e8b07b4f3266f99075d408c42019426d6b7f07 | [
"MIT"
] | null | null | null | auctions/migrations/0010_auto_20201216_2221.py | juannajul/CS50Ecommerce | d3e8b07b4f3266f99075d408c42019426d6b7f07 | [
"MIT"
] | null | null | null | auctions/migrations/0010_auto_20201216_2221.py | juannajul/CS50Ecommerce | d3e8b07b4f3266f99075d408c42019426d6b7f07 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.8 on 2020-12-17 02:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('auctions', '0009_auction_winner'),
]
operations = [
migrations.AlterModelOptions(
name='bid',
options={'ordering': ('bid',)},
),
]
| 18.833333 | 47 | 0.578171 |
4a20e091d257e92a398bc6fbddf4de28b5dc4a6c | 2,026 | py | Python | config/urls.py | zhukovvlad/beerhunt-project | e841f4946c08275e9d189605ffe9026d6657d63f | [
"MIT"
] | null | null | null | config/urls.py | zhukovvlad/beerhunt-project | e841f4946c08275e9d189605ffe9026d6657d63f | [
"MIT"
] | null | null | null | config/urls.py | zhukovvlad/beerhunt-project | e841f4946c08275e9d189605ffe9026d6657d63f | [
"MIT"
] | null | null | null | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from django.views import defaults as default_views
from django.views.generic import TemplateView
urlpatterns = [
# path("", TemplateView.as_view(template_name="pages/home.html"), name="home"),
path("", include("beerhunter.my_syte.urls", namespace="site")),
path(
"about/", TemplateView.as_view(template_name="pages/about.html"), name="about"
),
# Django Admin, use {% url 'admin:index' %}
path(settings.ADMIN_URL, admin.site.urls),
# User management
path("users/", include("beerhunter.users.urls", namespace="users")),
path("accounts/", include("allauth.urls")),
# Your stuff: custom urls includes go here
path(
'beers/',
include('beerhunter.beers.urls', namespace='beers')
),
path(
'breweries/',
include('beerhunter.breweries.urls', namespace='breweries')
),
path(
'hops/',
include('beerhunter.hops.urls', namespace='hops')
),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
| 33.213115 | 86 | 0.630306 |
4a20e0e608e7b3a2c1730d310fd90880d5354170 | 2,286 | py | Python | emmet-core/emmet/core/symmetry.py | JaGeo/emmet | db01498d1136fc499961277f0b0edce3b9ddf386 | [
"BSD-3-Clause-LBNL"
] | null | null | null | emmet-core/emmet/core/symmetry.py | JaGeo/emmet | db01498d1136fc499961277f0b0edce3b9ddf386 | [
"BSD-3-Clause-LBNL"
] | null | null | null | emmet-core/emmet/core/symmetry.py | JaGeo/emmet | db01498d1136fc499961277f0b0edce3b9ddf386 | [
"BSD-3-Clause-LBNL"
] | null | null | null | from typing import Any, Dict
from pydantic import BaseModel, Field
from pymatgen.core import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer, spglib
from emmet.core.settings import EmmetSettings
from emmet.core.utils import ValueEnum
SETTINGS = EmmetSettings()
class CrystalSystem(ValueEnum):
"""
The crystal system of the lattice
"""
tri = "Triclinic"
mono = "Monoclinic"
ortho = "Orthorhombic"
tet = "Tetragonal"
trig = "Trigonal"
hex_ = "Hexagonal"
cubic = "Cubic"
class SymmetryData(BaseModel):
"""
Defines a symmetry data set for materials documents
"""
crystal_system: CrystalSystem = Field(
None, title="Crystal System", description="The crystal system for this lattice"
)
symbol: str = Field(
None,
title="Space Group Symbol",
description="The spacegroup symbol for the lattice",
)
number: int = Field(
None,
title="Space Group Number",
description="The spacegroup number for the lattice",
)
point_group: str = Field(
None, title="Point Group Symbol", description="The point group for the lattice"
)
symprec: float = Field(
None,
title="Symmetry Finding Precision",
description="The precision given to spglib to determine the symmetry of this lattice",
)
version: str = Field(None, title="SPGLib version")
@classmethod
def from_structure(cls, structure: Structure) -> "SymmetryData":
symprec = SETTINGS.SYMPREC
sg = SpacegroupAnalyzer(structure, symprec=symprec)
symmetry: Dict[str, Any] = {"symprec": symprec}
if not sg.get_symmetry_dataset():
sg = SpacegroupAnalyzer(structure, 1e-3, 1)
symmetry["symprec"] = 1e-3
symmetry.update(
{
"source": "spglib",
"symbol": sg.get_space_group_symbol(),
"number": sg.get_space_group_number(),
"point_group": sg.get_point_group_symbol(),
"crystal_system": CrystalSystem(sg.get_crystal_system().title()),
"hall": sg.get_hall(),
"version": spglib.__version__,
}
)
return SymmetryData(**symmetry)
| 27.878049 | 94 | 0.62336 |
4a20e0f159e2ab0f0e6790966bba8cc48fcc8d1a | 1,139 | py | Python | xyz12/pf-auth/pf_auth/model/operator.py | problemfighter/pf-flask-react | ac38e3fa0fac0765eea7ec3c03937f93c55a9815 | [
"Apache-2.0"
] | 3 | 2021-11-22T07:57:23.000Z | 2022-01-15T09:08:50.000Z | xyz12/pf-auth/pf_auth/model/operator.py | problemfighter/pf-flask-react | ac38e3fa0fac0765eea7ec3c03937f93c55a9815 | [
"Apache-2.0"
] | null | null | null | xyz12/pf-auth/pf_auth/model/operator.py | problemfighter/pf-flask-react | ac38e3fa0fac0765eea7ec3c03937f93c55a9815 | [
"Apache-2.0"
] | null | null | null | from pf_auth.common.password_util import get_password_hash, validate_password
from pf_sqlalchemy.db.orm import Base, database
class Operator(Base):
firstName = database.Column("first_name", database.String(100))
lastName = database.Column("last_name", database.String(100))
name = database.Column("name", database.String(100))
email = database.Column("email", database.String(100), unique=True, index=True)
username = database.Column("username", database.String(100), unique=True, index=True)
password_hash = database.Column("password_hash", database.String(150), nullable=False, index=True)
isVerified = database.Column("is_verified", database.Boolean, default=True)
token = database.Column("token", database.String(200))
tokens = database.relationship('OperatorToken', backref='operator', lazy=True)
@property
def password(self):
return self.password_hash
@password.setter
def password(self, password):
self.password_hash = get_password_hash(password)
def verify_password(self, password) -> bool:
return validate_password(password, self.password_hash)
| 43.807692 | 102 | 0.737489 |
4a20e2977d57238d22e875d2bb102955f3714cee | 5,226 | py | Python | workflow/scripts/convert_refseq_to_prokka_gff.py | microbial-pangenomes-lab/2021_ecoli_pathogenicity | f25925c21679e2f89692ae3cfa512060c8bc04bf | [
"MIT"
] | 1 | 2022-01-14T07:21:08.000Z | 2022-01-14T07:21:08.000Z | workflow/scripts/convert_refseq_to_prokka_gff.py | microbial-pangenomes-lab/2021_ecoli_pathogenicity | f25925c21679e2f89692ae3cfa512060c8bc04bf | [
"MIT"
] | null | null | null | workflow/scripts/convert_refseq_to_prokka_gff.py | microbial-pangenomes-lab/2021_ecoli_pathogenicity | f25925c21679e2f89692ae3cfa512060c8bc04bf | [
"MIT"
] | 1 | 2022-03-10T12:02:34.000Z | 2022-03-10T12:02:34.000Z | import sys, os
import argparse
import gffutils as gff
from io import StringIO
from Bio import SeqIO
def clean_gff_string(gff_string):
splitlines = gff_string.splitlines()
lines_to_delete = []
for index in range(len(splitlines)):
if '##sequence-region' in splitlines[index]:
lines_to_delete.append(index)
for index in sorted(lines_to_delete, reverse=True):
del splitlines[index]
cleaned_gff = "\n".join(splitlines)
return cleaned_gff
def convert(gfffile, outputfile, fastafile, is_ignore_overlapping):
#Split file and parse
with open(gfffile, 'r') as infile:
lines = infile.read().replace(',','')
if fastafile is None:
split = lines.split('##FASTA')
if len(split) != 2:
print("Problem reading GFF3 file: ", gfffile)
raise RuntimeError("Error reading GFF3 input!")
else:
with open(fastafile, 'r') as infile:
fasta_lines = infile.read()
split = [lines, fasta_lines]
with StringIO(split[1]) as temp_fasta:
sequences = list(SeqIO.parse(temp_fasta, 'fasta'))
for seq in sequences:
seq.description = ""
parsed_gff = gff.create_db(clean_gff_string(split[0]),
dbfn=":memory:",
force=True,
keep_order=False,
merge_strategy="create_unique",
sort_attribute_values=True,
from_string=True)
with open(outputfile, 'w') as outfile:
# write gff part
outfile.write("##gff-version 3\n")
for seq in sequences:
outfile.write(
" ".join(["##sequence-region", seq.id, "1",
str(len(seq.seq))]) + "\n")
prev_chrom = ""
prev_end = -1
ids = set()
seen = set()
seq_order = []
for entry in parsed_gff.all_features(featuretype=(),
order_by=('seqid', 'start')):
entry.chrom = entry.chrom.split()[0]
# skip non CDS
if "CDS" not in entry.featuretype: continue
# skip overlapping CDS if option is set
if entry.chrom == prev_chrom and entry.start < prev_end and is_ignore_overlapping:
continue
# skip CDS that dont appear to be complete or have a premature stop codon
premature_stop = False
for sequence_index in range(len(sequences)):
scaffold_id = sequences[sequence_index].id
if scaffold_id == entry.seqid:
gene_sequence = sequences[sequence_index].seq[(
entry.start - 1):entry.stop]
if (len(gene_sequence) % 3 > 0) or (len(gene_sequence) <
34):
premature_stop = True
break
if entry.strand == "-":
gene_sequence = gene_sequence.reverse_complement()
if "*" in str(gene_sequence.translate())[:-1]:
premature_stop = True
break
if premature_stop: continue
c = 1
while entry.attributes['locus_tag'][0] in ids:
entry.attributes['locus_tag'][0] += "." + str(c)
c += 1
ids.add(entry.attributes['locus_tag'][0])
entry.attributes['ID'][0] = entry.attributes['locus_tag'][0]
prev_chrom = entry.chrom
prev_end = entry.end
if entry.chrom not in seen:
seq_order.append(entry.chrom)
seen.add(entry.chrom)
print(entry, file=outfile)
# write fasta part
outfile.write("##FASTA\n")
sequences = [
seq for x in seq_order for seq in sequences if seq.id == x
]
if len(sequences) != len(seen):
raise RuntimeError("Mismatch between fasta and GFF!")
SeqIO.write(sequences, outfile, "fasta")
return
def main():
parser = argparse.ArgumentParser(
description='Converts refseq GFF3 to prokka format.')
parser.add_argument('-g',
'--gff',
dest='gff',
type=str,
required=True,
help='input gff file name')
parser.add_argument(
'-f',
'--fasta',
dest='fasta',
type=str,
default=None,
help='input fasta file name (if separate from the GFF)')
parser.add_argument('-o',
'--out',
dest='out',
type=str,
required=True,
help='output file name')
parser.add_argument(
'--is_ignore_overlapping',
action="store_true",
help="set to ignore CDS that overlap (that's common in bacteria)")
args = parser.parse_args()
convert(args.gff, args.out, args.fasta, args.is_ignore_overlapping)
return
if __name__ == '__main__':
main()
| 33.935065 | 94 | 0.516265 |
4a20e37679cc40d6a48571a8c5a66c9fa8852e99 | 1,396 | py | Python | database/__init__.py | c3wien/Poke-That-Gmeind | 4481e81d74cff311e7e64b0c8b5643b9d6bbe31e | [
"MIT"
] | null | null | null | database/__init__.py | c3wien/Poke-That-Gmeind | 4481e81d74cff311e7e64b0c8b5643b9d6bbe31e | [
"MIT"
] | null | null | null | database/__init__.py | c3wien/Poke-That-Gmeind | 4481e81d74cff311e7e64b0c8b5643b9d6bbe31e | [
"MIT"
] | null | null | null | import atexit
from tempfile import gettempdir
from os.path import join
from sqlalchemy import create_engine, exc, event, select
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from config import DB_USER, DB_PASSWORD, DB_HOST, DB_NAME
if all([DB_USER, DB_PASSWORD, DB_HOST, DB_NAME]):
db_path = "mysql+pymysql://{}:{}@{}/{}?charset=utf8".format(DB_USER, DB_PASSWORD, DB_HOST, DB_NAME)
else:
db_path = "sqlite:///" + join(gettempdir(), "luftfilterbegehren.db")
engine = create_engine(db_path, convert_unicode=True, pool_recycle=3600)
atexit.register(lambda engine: engine.dispose(), engine)
db_session = scoped_session(sessionmaker(bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
def init_db():
from . import models
Base.metadata.create_all(bind=engine)
@event.listens_for(engine, "engine_connect")
def ping_connection(connection, branch):
if branch:
return
save_should_close_with_result = connection.should_close_with_result
connection.should_close_with_result = False
try:
connection.scalar(select([1]))
except exc.DBAPIError as err:
if err.connection_invalidated:
connection.scalar(select([1]))
else:
raise
finally:
connection.should_close_with_result = save_should_close_with_result
| 31.022222 | 103 | 0.743553 |
4a20e37c26924dcbe502727f6968c166da881f0c | 339 | py | Python | lib/west_tools/tool_tests/__init__.py | poharrison/westpa | 8618ab598f9bb38a7bc1479932f5332b137dfcbc | [
"MIT"
] | 140 | 2015-01-07T23:30:36.000Z | 2022-03-28T17:15:30.000Z | lib/west_tools/tool_tests/__init__.py | burntyellow/westpa | 9dc62478fcef0001b9c038cd56a40b6be1b9d64a | [
"MIT"
] | 157 | 2015-01-03T03:38:36.000Z | 2022-03-31T14:12:16.000Z | lib/west_tools/tool_tests/__init__.py | burntyellow/westpa | 9dc62478fcef0001b9c038cd56a40b6be1b9d64a | [
"MIT"
] | 56 | 2015-01-02T21:21:40.000Z | 2022-03-03T16:27:54.000Z | from westpa import rc, h5io
data_manager = rc.get_data_manager()
##Store west.h5 file in RAM for testing
west_file_name = 'west.h5'
west_file = h5io.WESTPAH5File(west_file_name, driver='core', backing_store=False)
data_manager.we_h5file = west_file
data_manager.we_h5file_version = int(west_file['/'].attrs['west_file_format_version'])
| 30.818182 | 86 | 0.79351 |
4a20e3c2add43eb8ca6cbb03a40e8b2a755c5f9e | 232 | py | Python | sonosrestapi/music_serve_account.py | hallo02/sonos-rest-api-wrapper | 6d2dca7dcf4b778c5cda57703df5f1c8eacb58e9 | [
"MIT"
] | null | null | null | sonosrestapi/music_serve_account.py | hallo02/sonos-rest-api-wrapper | 6d2dca7dcf4b778c5cda57703df5f1c8eacb58e9 | [
"MIT"
] | null | null | null | sonosrestapi/music_serve_account.py | hallo02/sonos-rest-api-wrapper | 6d2dca7dcf4b778c5cda57703df5f1c8eacb58e9 | [
"MIT"
] | 1 | 2019-10-13T09:45:35.000Z | 2019-10-13T09:45:35.000Z | class Music_serive_account:
def __init__(self, id , user_id_hash_code, is_guest, service):
self.id = id
self.user_id_hash_code = user_id_hash_code
self.is_guets = is_guest
self.service = service
| 29 | 66 | 0.681034 |
4a20e40281c5dc041885023b89fbe038568a2273 | 334 | py | Python | InfoExtractor/doc-extractor/__init__.py | flysky2008/hub_dlcp | fcc7ad722a79f325048009d3b5700f6fe1492de7 | [
"Apache-2.0"
] | null | null | null | InfoExtractor/doc-extractor/__init__.py | flysky2008/hub_dlcp | fcc7ad722a79f325048009d3b5700f6fe1492de7 | [
"Apache-2.0"
] | null | null | null | InfoExtractor/doc-extractor/__init__.py | flysky2008/hub_dlcp | fcc7ad722a79f325048009d3b5700f6fe1492de7 | [
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Add projects here
from ner_tf_BiLSTM import data
from ner_tf_BiLSTM import eval
from ner_tf_BiLSTM import main
from ner_tf_BiLSTM import model
from ner_tf_BiLSTM import utils
__all__ = { data, eval, main, model, utils}
| 22.266667 | 43 | 0.832335 |
4a20e415ed06aed8433c062f57a173761e2ef5d8 | 3,563 | py | Python | backtrader/backtrader/indicators/lrsi.py | harshabakku/live-back-testing-trader | 1fd69c7598dc15bea740f160eed886f396bcba2c | [
"MIT"
] | 1 | 2021-07-14T22:04:08.000Z | 2021-07-14T22:04:08.000Z | backtrader/backtrader/indicators/lrsi.py | ajmal017/LiveBackTestingTrader | 8b4f5804c0aa6046128f6706582f9cde78a0519a | [
"MIT"
] | null | null | null | backtrader/backtrader/indicators/lrsi.py | ajmal017/LiveBackTestingTrader | 8b4f5804c0aa6046128f6706582f9cde78a0519a | [
"MIT"
] | 3 | 2021-03-07T16:29:40.000Z | 2022-03-17T21:42:38.000Z | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015, 2016, 2017 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from . import PeriodN
__all__ = ['LaguerreRSI', 'LRSI', 'LaguerreFilter', 'LAGF']
class LaguerreRSI(PeriodN):
'''
Defined by John F. Ehlers in `Cybernetic Analysis for Stock and Futures`,
2004, published by Wiley. `ISBN: 978-0-471-46307-8`
The Laguerre RSI tries to implements a better RSI by providing a sort of
*Time Warp without Time Travel* using a Laguerre filter. This provides for
faster reactions to price changes
``gamma`` is meant to have values between ``0.2`` and ``0.8``, with the
best balance found theoretically at the default of ``0.5``
'''
alias = ('LRSI',)
lines = ('lrsi',)
params = (
('gamma', 0.5),
('period', 6),
)
plotinfo = dict(
plotymargin=0.15,
plotyticks=[0.0, 0.2, 0.5, 0.8, 1.0]
)
l0, l1, l2, l3 = 0.0, 0.0, 0.0, 0.0
def next(self):
l0_1 = self.l0 # cache previous intermediate values
l1_1 = self.l1
l2_1 = self.l2
g = self.p.gamma # avoid more lookups
self.l0 = l0 = (1.0 - g) * self.data + g * l0_1
self.l1 = l1 = -g * l0 + l0_1 + g * l1_1
self.l2 = l2 = -g * l1 + l1_1 + g * l2_1
self.l3 = l3 = -g * l2 + l2_1 + g * self.l3
cu = 0.0
cd = 0.0
if l0 >= l1:
cu = l0 - l1
else:
cd = l1 - l0
if l1 >= l2:
cu += l1 - l2
else:
cd += l2 - l1
if l2 >= l3:
cu += l2 - l3
else:
cd += l3 - l2
den = cu + cd
self.lines.lrsi[0] = 1.0 if not den else cu / den
class LaguerreFilter(PeriodN):
'''
Defined by John F. Ehlers in `Cybernetic Analysis for Stock and Futures`,
2004, published by Wiley. `ISBN: 978-0-471-46307-8`
``gamma`` is meant to have values between ``0.2`` and ``0.8``, with the
best balance found theoretically at the default of ``0.5``
'''
alias = ('LAGF',)
lines = ('lfilter',)
params = (('gamma', 0.5),)
plotinfo = dict(subplot=False)
l0, l1, l2, l3 = 0.0, 0.0, 0.0, 0.0
def next(self):
l0_1 = self.l0 # cache previous intermediate values
l1_1 = self.l1
l2_1 = self.l2
g = self.p.gamma # avoid more lookups
self.l0 = l0 = (1.0 - g) * self.data + g * l0_1
self.l1 = l1 = -g * l0 + l0_1 + g * l1_1
self.l2 = l2 = -g * l1 + l1_1 + g * l2_1
self.l3 = l3 = -g * l2 + l2_1 + g * self.l3
self.lines.lfilter[0] = (l0 + (2 * l1) + (2 * l2) + l3) / 6
| 31.254386 | 79 | 0.546169 |
4a20e48b38248954f6d7fc46dd09cfa5711c4795 | 4,722 | py | Python | loveshare/settings.py | reduceweight/loveshare | 570250272b3b5b36e682ebf4b6deb5f83f350386 | [
"MIT"
] | 1 | 2018-04-11T05:41:43.000Z | 2018-04-11T05:41:43.000Z | loveshare/settings.py | reduceweight/loveshare | 570250272b3b5b36e682ebf4b6deb5f83f350386 | [
"MIT"
] | 8 | 2018-07-15T13:45:20.000Z | 2022-03-11T23:17:36.000Z | loveshare/settings.py | reduceweight/loveshare | 570250272b3b5b36e682ebf4b6deb5f83f350386 | [
"MIT"
] | null | null | null | """
Django settings for loveshare project.
Generated by 'django-admin startproject' using Django 2.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os, sys, datetime
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,os.path.join(BASE_DIR,"apps"))
root = environ.Path(BASE_DIR)
public_root = root.path('public/')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
##环境变量
# reading .env file
# environ.Env.read_env()
env = environ.Env(
# set casting, default value
DEBUG=(bool, True)
)
APP_ENV = env('APP_ENV', default='docker')
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('SECRET_KEY', default='%cb0n^gkm9x5&c1rpwb-gxx7yhim_$#2apv7z5kj8b(w^hgfoa')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env('DEBUG')
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'core.apps.CoreConfig',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'loveshare.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'loveshare.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL', default='sqlite:///db.sqlite3')
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
MEDIA_ROOT = env('APP_MEDIA_ROOT', default=public_root('media'))
MEDIA_URL = '/media/'
STATIC_ROOT = env('APP_STATIC_ROOT', default=public_root('static'))
STATIC_URL = '/static/'
# REST_FRAMEWORK
# http://www.django-rest-framework.org/
REST_FRAMEWORK = {
'DATETIME_FORMAT': '%Y-%m-%d %H:%M:%S',
# 分页显示
'DEFAULT_PAGINATION_CLASS': 'core.pagination.PageNumberPagination',
'PAGE_SIZE': 20,
# 配置过滤
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.backends.DjangoFilterBackend',),
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.AnonRateThrottle',
'rest_framework.throttling.UserRateThrottle'
),
'DEFAULT_THROTTLE_RATES': {
'anon': '100/min',
'user': '1000/min'
},
# 登录授权
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'core.permissions.IsAdminOrIsOwner',
)
}
JWT_AUTH = {
'JWT_REFRESH_EXPIRATION_DELTA': datetime.timedelta(days=5),
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=5),
}
| 30.464516 | 95 | 0.702457 |
4a20e4c07861531a6a40f48209f5deefdef7b43f | 3,221 | py | Python | fast_image_classification/train.py | Franciutte/FastImageClassification | c82601665ff09e2dd5690a3445f07690181d6899 | [
"MIT"
] | 10 | 2020-04-18T13:21:44.000Z | 2022-03-26T12:00:18.000Z | fast_image_classification/train.py | Franciutte/FastImageClassification | c82601665ff09e2dd5690a3445f07690181d6899 | [
"MIT"
] | 5 | 2020-10-12T09:04:14.000Z | 2022-02-10T01:14:49.000Z | fast_image_classification/train.py | Franciutte/FastImageClassification | c82601665ff09e2dd5690a3445f07690181d6899 | [
"MIT"
] | 8 | 2020-04-06T08:30:05.000Z | 2022-03-26T12:00:31.000Z | import pandas as pd
import yaml
import argparse
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from fast_image_classification.models import get_model_classification
from fast_image_classification.training_utilities import (
dataframe_to_list_samples,
batch_generator,
)
def train_from_csv(csv_path, data_config_path, training_config_path):
df = pd.read_csv(csv_path)
train, val = train_test_split(df, test_size=0.2, random_state=1337)
with open(data_config_path, "r") as f:
data_config = yaml.load(f, yaml.SafeLoader)
with open(training_config_path, "r") as f:
training_config = yaml.load(f, yaml.SafeLoader)
train_samples = dataframe_to_list_samples(
train,
binary_targets=data_config["targets"],
base_path=data_config["images_base_path"],
image_name_col=data_config["image_name_col"],
)
val_samples = dataframe_to_list_samples(
val,
binary_targets=data_config["targets"],
base_path=data_config["images_base_path"],
image_name_col=data_config["image_name_col"],
)
model = get_model_classification(
input_shape=tuple(data_config["input_shape"]),
n_classes=len(data_config["targets"]),
)
train_gen = batch_generator(
train_samples,
resize_size=data_config["resize_shape"],
augment=training_config["use_augmentation"],
)
val_gen = batch_generator(val_samples, resize_size=data_config["resize_shape"])
checkpoint = ModelCheckpoint(
training_config["model_path"],
monitor="val_loss",
verbose=1,
save_best_only=True,
mode="min",
)
reduce = ReduceLROnPlateau(monitor="val_loss", mode="min", patience=10, min_lr=1e-7)
early = EarlyStopping(monitor="val_loss", mode="min", patience=30)
model.fit_generator(
train_gen,
steps_per_epoch=len(train_samples) // training_config["batch_size"],
validation_data=val_gen,
validation_steps=len(val_samples) // training_config["batch_size"],
epochs=training_config["epochs"],
callbacks=[checkpoint, reduce, early],
)
if __name__ == "__main__":
"""
python train.py --csv_path "../example/data.csv" \
--data_config_path "../example/data_config.yaml" \
--training_config_path "../example/training_config.yaml"
"""
parser = argparse.ArgumentParser()
parser.add_argument("--csv_path", help="csv_path", default="../example/data.csv")
parser.add_argument(
"--data_config_path",
help="data_config_path",
default="../example/data_config.yaml",
)
parser.add_argument(
"--training_config_path",
help="training_config_path",
default="../example/training_config.yaml",
)
args = parser.parse_args()
csv_path = args.csv_path
data_config_path = args.data_config_path
training_config_path = args.training_config_path
train_from_csv(
csv_path=csv_path,
data_config_path=data_config_path,
training_config_path=training_config_path,
)
| 33.206186 | 88 | 0.687054 |
4a20e4e67bc9757b8d7437c4af6d11088de4a875 | 2,535 | py | Python | cogdl/tasks/multiplex_node_classification.py | kwyoke/cogdl | df919b4fc7db40f8b035665edbcc7ed59f9d448e | [
"MIT"
] | 1 | 2021-10-19T13:32:23.000Z | 2021-10-19T13:32:23.000Z | cogdl/tasks/multiplex_node_classification.py | LONG-9621/cogdl | 5e1d70240a3bced319b0f0e04af79acb72f65bed | [
"MIT"
] | null | null | null | cogdl/tasks/multiplex_node_classification.py | LONG-9621/cogdl | 5e1d70240a3bced319b0f0e04af79acb72f65bed | [
"MIT"
] | null | null | null | import copy
import os
import random
import warnings
from collections import defaultdict
import networkx as nx
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn.functional as F
from scipy import sparse as sp
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.utils import shuffle as skshuffle
from tqdm import tqdm
from cogdl import options
from cogdl.data import Dataset
from cogdl.datasets import build_dataset
from cogdl.models import build_model, register_model
from . import BaseTask, register_task
warnings.filterwarnings("ignore")
@register_task("multiplex_node_classification")
class MultiplexNodeClassification(BaseTask):
"""Node classification task."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument("--hidden-size", type=int, default=128)
# fmt: on
def __init__(self, args, dataset=None, model=None):
super(MultiplexNodeClassification, self).__init__(args)
dataset = build_dataset(args) if dataset is None else dataset
self.data = dataset[0]
self.label_matrix = self.data.y
self.num_nodes, self.num_classes = dataset.num_nodes, dataset.num_classes
self.hidden_size = args.hidden_size
self.model = build_model(args) if model is None else model
self.args = args
self.device = torch.device('cpu' if args.cpu else 'cuda')
self.model = self.model.to(self.device)
def train(self):
G = nx.DiGraph()
G.add_edges_from(self.data.edge_index.t().tolist())
if self.args.model != "gcc":
embeddings = self.model.train(G, self.data.pos.tolist())
else:
embeddings = self.model.train(self.data)
embeddings = np.hstack((embeddings, self.data.x.numpy()))
# Select nodes which have label as training data
train_index = torch.cat((self.data.train_node, self.data.valid_node)).numpy()
test_index = self.data.test_node.numpy()
y = self.data.y.numpy()
X_train, y_train = embeddings[train_index], y[train_index]
X_test, y_test = embeddings[test_index], y[test_index]
clf = LogisticRegression()
clf.fit(X_train, y_train)
preds = clf.predict(X_test)
test_f1 = f1_score(y_test, preds, average="micro")
return dict(f1=test_f1)
| 33.8 | 85 | 0.687179 |
4a20e55b58c0666dfd5791291a482f767bdc0a5f | 1,049 | py | Python | budget.py | kyaradawbin/accountant | 190b34da29d26a98e9e001e6c6d7dfd4b47b5fc9 | [
"MIT"
] | 1 | 2019-03-01T03:38:12.000Z | 2019-03-01T03:38:12.000Z | budget.py | kyaradawbin/accountant | 190b34da29d26a98e9e001e6c6d7dfd4b47b5fc9 | [
"MIT"
] | null | null | null | budget.py | kyaradawbin/accountant | 190b34da29d26a98e9e001e6c6d7dfd4b47b5fc9 | [
"MIT"
] | null | null | null | def budget():
salary = float(input("Please input your salary: "))
monthly_income = round(salary/12, 2)
print("Your monthly income is:", monthly_income)
print()
housing = round(monthly_income * .25, 2)
transport = round(monthly_income * .15, 2)
food = round(monthly_income * .12, 2)
savings = round(monthly_income * .10, 2)
utilities = round(monthly_income * .10, 2)
charity = round(monthly_income * .05, 2)
entertainment = round(monthly_income * .05, 2)
medical = round(monthly_income * .05, 2)
gift = round(monthly_income * .05, 2)
print("Housing:", housing)
print("Transportation:", transport)
print("Food:", food)
print("Savings:", savings)
print("Utilities:", utilities)
print("Charity:", charity)
print("Entertainment:", entertainment)
print("Medical", medical)
print("Holidays/Gifts:", gift)
print()
total = housing + transport + food + savings + utilities + charity + entertainment + medical + gift
print("Total spent:", total)
budget()
| 31.787879 | 103 | 0.64633 |
4a20e5c76b6ad66c5944653030d1e864b28372c8 | 27,620 | py | Python | tests/httpx/models/test_responses.py | newvicx/httpx_extensions | 4a3d6e6789e9f328f49a3b27def20b0416cdb748 | [
"MIT"
] | null | null | null | tests/httpx/models/test_responses.py | newvicx/httpx_extensions | 4a3d6e6789e9f328f49a3b27def20b0416cdb748 | [
"MIT"
] | null | null | null | tests/httpx/models/test_responses.py | newvicx/httpx_extensions | 4a3d6e6789e9f328f49a3b27def20b0416cdb748 | [
"MIT"
] | null | null | null | import json
import pickle
import pytest
import httpx
from httpx._compat import brotli
import httpx_extensions
class StreamingBody:
def __iter__(self):
yield b"Hello, "
yield b"world!"
def streaming_body():
yield b"Hello, "
yield b"world!"
async def async_streaming_body():
yield b"Hello, "
yield b"world!"
def test_response():
response = httpx_extensions.ResponseMixin(
200,
content=b"Hello, world!",
request=httpx.Request("GET", "https://example.org"),
)
assert response.status_code == 200
assert response.reason_phrase == "OK"
assert response.text == "Hello, world!"
assert response.request.method == "GET"
assert response.request.url == "https://example.org"
assert not response.is_error
def test_response_content():
response = httpx_extensions.ResponseMixin(200, content="Hello, world!")
assert response.status_code == 200
assert response.reason_phrase == "OK"
assert response.text == "Hello, world!"
assert response.headers == {"Content-Length": "13"}
def test_response_text():
response = httpx_extensions.ResponseMixin(200, text="Hello, world!")
assert response.status_code == 200
assert response.reason_phrase == "OK"
assert response.text == "Hello, world!"
assert response.headers == {
"Content-Length": "13",
"Content-Type": "text/plain; charset=utf-8",
}
def test_response_html():
response = httpx_extensions.ResponseMixin(200, html="<html><body>Hello, world!</html></body>")
assert response.status_code == 200
assert response.reason_phrase == "OK"
assert response.text == "<html><body>Hello, world!</html></body>"
assert response.headers == {
"Content-Length": "39",
"Content-Type": "text/html; charset=utf-8",
}
def test_response_json():
response = httpx_extensions.ResponseMixin(200, json={"hello": "world"})
assert response.status_code == 200
assert response.reason_phrase == "OK"
assert response.json() == {"hello": "world"}
assert response.headers == {
"Content-Length": "18",
"Content-Type": "application/json",
}
def test_raise_for_status():
request = httpx.Request("GET", "https://example.org")
# 2xx status codes are not an error.
response = httpx_extensions.ResponseMixin(200, request=request)
response.raise_for_status()
# 1xx status codes are informational responses.
response = httpx_extensions.ResponseMixin(101, request=request)
assert response.is_informational
with pytest.raises(httpx.HTTPStatusError) as exc_info:
response.raise_for_status()
assert str(exc_info.value) == (
"Informational response '101 Switching Protocols' for url 'https://example.org'\n"
"For more information check: https://httpstatuses.com/101"
)
# 3xx status codes are redirections.
headers = {"location": "https://other.org"}
response = httpx_extensions.ResponseMixin(303, headers=headers, request=request)
assert response.is_redirect
with pytest.raises(httpx.HTTPStatusError) as exc_info:
response.raise_for_status()
assert str(exc_info.value) == (
"Redirect response '303 See Other' for url 'https://example.org'\n"
"Redirect location: 'https://other.org'\n"
"For more information check: https://httpstatuses.com/303"
)
# 4xx status codes are a client error.
response = httpx_extensions.ResponseMixin(403, request=request)
assert response.is_client_error
assert response.is_error
with pytest.raises(httpx.HTTPStatusError) as exc_info:
response.raise_for_status()
assert str(exc_info.value) == (
"Client error '403 Forbidden' for url 'https://example.org'\n"
"For more information check: https://httpstatuses.com/403"
)
# 5xx status codes are a server error.
response = httpx_extensions.ResponseMixin(500, request=request)
assert response.is_server_error
assert response.is_error
with pytest.raises(httpx.HTTPStatusError) as exc_info:
response.raise_for_status()
assert str(exc_info.value) == (
"Server error '500 Internal Server Error' for url 'https://example.org'\n"
"For more information check: https://httpstatuses.com/500"
)
# Calling .raise_for_status without setting a request instance is
# not valid. Should raise a runtime error.
response = httpx_extensions.ResponseMixin(200)
with pytest.raises(RuntimeError):
response.raise_for_status()
def test_response_repr():
response = httpx_extensions.ResponseMixin(
200,
content=b"Hello, world!",
)
assert repr(response) == "<Response [200 OK]>"
def test_response_content_type_encoding():
"""
Use the charset encoding in the Content-Type header if possible.
"""
headers = {"Content-Type": "text-plain; charset=latin-1"}
content = "Latin 1: ÿ".encode("latin-1")
response = httpx_extensions.ResponseMixin(
200,
content=content,
headers=headers,
)
assert response.text == "Latin 1: ÿ"
assert response.encoding == "latin-1"
def test_response_autodetect_encoding():
"""
Autodetect encoding if there is no Content-Type header.
"""
content = "おはようございます。".encode("utf-8")
response = httpx_extensions.ResponseMixin(
200,
content=content,
)
assert response.text == "おはようございます。"
assert response.encoding is None
def test_response_fallback_to_autodetect():
"""
Fallback to autodetection if we get an invalid charset in the Content-Type header.
"""
headers = {"Content-Type": "text-plain; charset=invalid-codec-name"}
content = "おはようございます。".encode("utf-8")
response = httpx_extensions.ResponseMixin(
200,
content=content,
headers=headers,
)
assert response.text == "おはようございます。"
assert response.encoding is None
def test_response_no_charset_with_ascii_content():
"""
A response with ascii encoded content should decode correctly,
even with no charset specified.
"""
content = b"Hello, world!"
headers = {"Content-Type": "text/plain"}
response = httpx_extensions.ResponseMixin(
200,
content=content,
headers=headers,
)
assert response.status_code == 200
assert response.encoding is None
assert response.text == "Hello, world!"
def test_response_no_charset_with_utf8_content():
"""
A response with UTF-8 encoded content should decode correctly,
even with no charset specified.
"""
content = "Unicode Snowman: ☃".encode("utf-8")
headers = {"Content-Type": "text/plain"}
response = httpx_extensions.ResponseMixin(
200,
content=content,
headers=headers,
)
assert response.text == "Unicode Snowman: ☃"
assert response.encoding is None
# Does not pass with httpx.Response either
# def test_response_no_charset_with_iso_8859_1_content():
# """
# A response with ISO 8859-1 encoded content should decode correctly,
# even with no charset specified.
# """
# content = "Accented: Österreich abcdefghijklmnopqrstuzwxyz".encode("iso-8859-1")
# headers = {"Content-Type": "text/plain"}
# response = httpx_extensions.ResponseMixin(
# 200,
# content=content,
# headers=headers,
# )
# assert response.text == "Accented: Österreich abcdefghijklmnopqrstuzwxyz"
# assert response.charset_encoding is None
# assert response.apparent_encoding is not None
def test_response_no_charset_with_cp_1252_content():
"""
A response with Windows 1252 encoded content should decode correctly,
even with no charset specified.
"""
content = "Euro Currency: € abcdefghijklmnopqrstuzwxyz".encode("cp1252")
headers = {"Content-Type": "text/plain"}
response = httpx_extensions.ResponseMixin(
200,
content=content,
headers=headers,
)
assert response.text == "Euro Currency: € abcdefghijklmnopqrstuzwxyz"
assert response.charset_encoding is None
assert response.apparent_encoding is not None
def test_response_non_text_encoding():
"""
Default to apparent encoding for non-text content-type headers.
"""
headers = {"Content-Type": "image/png"}
response = httpx_extensions.ResponseMixin(
200,
content=b"xyz",
headers=headers,
)
assert response.text == "xyz"
assert response.encoding is None
def test_response_set_explicit_encoding():
headers = {
"Content-Type": "text-plain; charset=utf-8"
} # Deliberately incorrect charset
response = httpx_extensions.ResponseMixin(
200,
content="Latin 1: ÿ".encode("latin-1"),
headers=headers,
)
response.encoding = "latin-1"
assert response.text == "Latin 1: ÿ"
assert response.encoding == "latin-1"
def test_response_force_encoding():
response = httpx_extensions.ResponseMixin(
200,
content="Snowman: ☃".encode("utf-8"),
)
response.encoding = "iso-8859-1"
assert response.status_code == 200
assert response.reason_phrase == "OK"
assert response.text == "Snowman: â\x98\x83"
assert response.encoding == "iso-8859-1"
def test_read():
response = httpx_extensions.ResponseMixin(
200,
content=b"Hello, world!",
)
assert response.status_code == 200
assert response.text == "Hello, world!"
assert response.encoding is None
assert response.is_closed
content = response.read()
assert content == b"Hello, world!"
assert response.content == b"Hello, world!"
assert response.is_closed
def test_empty_read():
response = httpx_extensions.ResponseMixin(200)
assert response.status_code == 200
assert response.text == ""
assert response.encoding is None
assert response.is_closed
content = response.read()
assert content == b""
assert response.content == b""
assert response.is_closed
@pytest.mark.asyncio
async def test_aread():
response = httpx_extensions.ResponseMixin(
200,
content=b"Hello, world!",
)
assert response.status_code == 200
assert response.text == "Hello, world!"
assert response.encoding is None
assert response.is_closed
content = await response.aread()
assert content == b"Hello, world!"
assert response.content == b"Hello, world!"
assert response.is_closed
@pytest.mark.asyncio
async def test_empty_aread():
response = httpx_extensions.ResponseMixin(200)
assert response.status_code == 200
assert response.text == ""
assert response.encoding is None
assert response.is_closed
content = await response.aread()
assert content == b""
assert response.content == b""
assert response.is_closed
def test_iter_raw():
response = httpx_extensions.ResponseMixin(
200,
content=streaming_body(),
)
raw = b""
for part in response.iter_raw():
raw += part
assert raw == b"Hello, world!"
def test_iter_raw_with_chunksize():
response = httpx_extensions.ResponseMixin(200, content=streaming_body())
parts = [part for part in response.iter_raw(chunk_size=5)]
assert parts == [b"Hello", b", wor", b"ld!"]
response = httpx_extensions.ResponseMixin(200, content=streaming_body())
parts = [part for part in response.iter_raw(chunk_size=13)]
assert parts == [b"Hello, world!"]
response = httpx_extensions.ResponseMixin(200, content=streaming_body())
parts = [part for part in response.iter_raw(chunk_size=20)]
assert parts == [b"Hello, world!"]
def test_iter_raw_on_iterable():
response = httpx_extensions.ResponseMixin(
200,
content=StreamingBody(),
)
raw = b""
for part in response.iter_raw():
raw += part
assert raw == b"Hello, world!"
def test_iter_raw_on_async():
response = httpx_extensions.ResponseMixin(
200,
content=async_streaming_body(),
)
with pytest.raises(RuntimeError):
[part for part in response.iter_raw()]
def test_close_on_async():
response = httpx_extensions.ResponseMixin(
200,
content=async_streaming_body(),
)
with pytest.raises(RuntimeError):
response.close()
def test_iter_raw_increments_updates_counter():
response = httpx_extensions.ResponseMixin(200, content=streaming_body())
num_downloaded = response.num_bytes_downloaded
for part in response.iter_raw():
assert len(part) == (response.num_bytes_downloaded - num_downloaded)
num_downloaded = response.num_bytes_downloaded
@pytest.mark.asyncio
async def test_aiter_raw():
response = httpx_extensions.ResponseMixin(200, content=async_streaming_body())
raw = b""
async for part in response.aiter_raw():
raw += part
assert raw == b"Hello, world!"
@pytest.mark.asyncio
async def test_aiter_raw_with_chunksize():
response = httpx_extensions.ResponseMixin(200, content=async_streaming_body())
parts = [part async for part in response.aiter_raw(chunk_size=5)]
assert parts == [b"Hello", b", wor", b"ld!"]
response = httpx_extensions.ResponseMixin(200, content=async_streaming_body())
parts = [part async for part in response.aiter_raw(chunk_size=13)]
assert parts == [b"Hello, world!"]
response = httpx_extensions.ResponseMixin(200, content=async_streaming_body())
parts = [part async for part in response.aiter_raw(chunk_size=20)]
assert parts == [b"Hello, world!"]
@pytest.mark.asyncio
async def test_aiter_raw_on_sync():
response = httpx_extensions.ResponseMixin(
200,
content=streaming_body(),
)
with pytest.raises(RuntimeError):
[part async for part in response.aiter_raw()]
@pytest.mark.asyncio
async def test_aclose_on_sync():
response = httpx_extensions.ResponseMixin(
200,
content=streaming_body(),
)
with pytest.raises(RuntimeError):
await response.aclose()
@pytest.mark.asyncio
async def test_aiter_raw_increments_updates_counter():
response = httpx_extensions.ResponseMixin(200, content=async_streaming_body())
num_downloaded = response.num_bytes_downloaded
async for part in response.aiter_raw():
assert len(part) == (response.num_bytes_downloaded - num_downloaded)
num_downloaded = response.num_bytes_downloaded
def test_iter_bytes():
response = httpx_extensions.ResponseMixin(200, content=b"Hello, world!")
content = b""
for part in response.iter_bytes():
content += part
assert content == b"Hello, world!"
def test_iter_bytes_with_chunk_size():
response = httpx_extensions.ResponseMixin(200, content=streaming_body())
parts = [part for part in response.iter_bytes(chunk_size=5)]
assert parts == [b"Hello", b", wor", b"ld!"]
response = httpx_extensions.ResponseMixin(200, content=streaming_body())
parts = [part for part in response.iter_bytes(chunk_size=13)]
assert parts == [b"Hello, world!"]
response = httpx_extensions.ResponseMixin(200, content=streaming_body())
parts = [part for part in response.iter_bytes(chunk_size=20)]
assert parts == [b"Hello, world!"]
def test_iter_bytes_with_empty_response():
response = httpx_extensions.ResponseMixin(200, content=b"")
parts = [part for part in response.iter_bytes()]
assert parts == []
@pytest.mark.asyncio
async def test_aiter_bytes():
response = httpx_extensions.ResponseMixin(
200,
content=b"Hello, world!",
)
content = b""
async for part in response.aiter_bytes():
content += part
assert content == b"Hello, world!"
@pytest.mark.asyncio
async def test_aiter_bytes_with_chunk_size():
response = httpx_extensions.ResponseMixin(200, content=async_streaming_body())
parts = [part async for part in response.aiter_bytes(chunk_size=5)]
assert parts == [b"Hello", b", wor", b"ld!"]
response = httpx_extensions.ResponseMixin(200, content=async_streaming_body())
parts = [part async for part in response.aiter_bytes(chunk_size=13)]
assert parts == [b"Hello, world!"]
response = httpx_extensions.ResponseMixin(200, content=async_streaming_body())
parts = [part async for part in response.aiter_bytes(chunk_size=20)]
assert parts == [b"Hello, world!"]
def test_iter_text():
response = httpx_extensions.ResponseMixin(
200,
content=b"Hello, world!",
)
content = ""
for part in response.iter_text():
content += part
assert content == "Hello, world!"
def test_iter_text_with_chunk_size():
response = httpx_extensions.ResponseMixin(200, content=b"Hello, world!")
parts = [part for part in response.iter_text(chunk_size=5)]
assert parts == ["Hello", ", wor", "ld!"]
response = httpx_extensions.ResponseMixin(200, content=b"Hello, world!")
parts = [part for part in response.iter_text(chunk_size=13)]
assert parts == ["Hello, world!"]
response = httpx_extensions.ResponseMixin(200, content=b"Hello, world!")
parts = [part for part in response.iter_text(chunk_size=20)]
assert parts == ["Hello, world!"]
@pytest.mark.asyncio
async def test_aiter_text():
response = httpx_extensions.ResponseMixin(
200,
content=b"Hello, world!",
)
content = ""
async for part in response.aiter_text():
content += part
assert content == "Hello, world!"
@pytest.mark.asyncio
async def test_aiter_text_with_chunk_size():
response = httpx_extensions.ResponseMixin(200, content=b"Hello, world!")
parts = [part async for part in response.aiter_text(chunk_size=5)]
assert parts == ["Hello", ", wor", "ld!"]
response = httpx_extensions.ResponseMixin(200, content=b"Hello, world!")
parts = [part async for part in response.aiter_text(chunk_size=13)]
assert parts == ["Hello, world!"]
response = httpx_extensions.ResponseMixin(200, content=b"Hello, world!")
parts = [part async for part in response.aiter_text(chunk_size=20)]
assert parts == ["Hello, world!"]
def test_iter_lines():
response = httpx_extensions.ResponseMixin(
200,
content=b"Hello,\nworld!",
)
content = [line for line in response.iter_lines()]
assert content == ["Hello,\n", "world!"]
@pytest.mark.asyncio
async def test_aiter_lines():
response = httpx_extensions.ResponseMixin(
200,
content=b"Hello,\nworld!",
)
content = []
async for line in response.aiter_lines():
content.append(line)
assert content == ["Hello,\n", "world!"]
def test_sync_streaming_response():
response = httpx_extensions.ResponseMixin(
200,
content=streaming_body(),
)
assert response.status_code == 200
assert not response.is_closed
content = response.read()
assert content == b"Hello, world!"
assert response.content == b"Hello, world!"
assert response.is_closed
@pytest.mark.asyncio
async def test_async_streaming_response():
response = httpx_extensions.ResponseMixin(
200,
content=async_streaming_body(),
)
assert response.status_code == 200
assert not response.is_closed
content = await response.aread()
assert content == b"Hello, world!"
assert response.content == b"Hello, world!"
assert response.is_closed
def test_cannot_read_after_stream_consumed():
response = httpx_extensions.ResponseMixin(
200,
content=streaming_body(),
)
content = b""
for part in response.iter_bytes():
content += part
with pytest.raises(httpx.StreamConsumed):
response.read()
@pytest.mark.asyncio
async def test_cannot_aread_after_stream_consumed():
response = httpx_extensions.ResponseMixin(
200,
content=async_streaming_body(),
)
content = b""
async for part in response.aiter_bytes():
content += part
with pytest.raises(httpx.StreamConsumed):
await response.aread()
def test_cannot_read_after_response_closed():
response = httpx_extensions.ResponseMixin(
200,
content=streaming_body(),
)
response.close()
with pytest.raises(httpx.StreamClosed):
response.read()
@pytest.mark.asyncio
async def test_cannot_aread_after_response_closed():
response = httpx_extensions.ResponseMixin(
200,
content=async_streaming_body(),
)
await response.aclose()
with pytest.raises(httpx.StreamClosed):
await response.aread()
@pytest.mark.asyncio
async def test_elapsed_not_available_until_closed():
response = httpx_extensions.ResponseMixin(
200,
content=async_streaming_body(),
)
with pytest.raises(RuntimeError):
response.elapsed
def test_unknown_status_code():
response = httpx_extensions.ResponseMixin(
600,
)
assert response.status_code == 600
assert response.reason_phrase == ""
assert response.text == ""
def test_json_with_specified_encoding():
data = {"greeting": "hello", "recipient": "world"}
content = json.dumps(data).encode("utf-16")
headers = {"Content-Type": "application/json, charset=utf-16"}
response = httpx_extensions.ResponseMixin(
200,
content=content,
headers=headers,
)
assert response.json() == data
def test_json_with_options():
data = {"greeting": "hello", "recipient": "world", "amount": 1}
content = json.dumps(data).encode("utf-16")
headers = {"Content-Type": "application/json, charset=utf-16"}
response = httpx_extensions.ResponseMixin(
200,
content=content,
headers=headers,
)
assert response.json(parse_int=str)["amount"] == "1"
@pytest.mark.parametrize(
"encoding",
[
"utf-8",
"utf-8-sig",
"utf-16",
"utf-16-be",
"utf-16-le",
"utf-32",
"utf-32-be",
"utf-32-le",
],
)
def test_json_without_specified_charset(encoding):
data = {"greeting": "hello", "recipient": "world"}
content = json.dumps(data).encode(encoding)
headers = {"Content-Type": "application/json"}
response = httpx_extensions.ResponseMixin(
200,
content=content,
headers=headers,
)
assert response.json() == data
@pytest.mark.parametrize(
"encoding",
[
"utf-8",
"utf-8-sig",
"utf-16",
"utf-16-be",
"utf-16-le",
"utf-32",
"utf-32-be",
"utf-32-le",
],
)
def test_json_with_specified_charset(encoding):
data = {"greeting": "hello", "recipient": "world"}
content = json.dumps(data).encode(encoding)
headers = {"Content-Type": f"application/json; charset={encoding}"}
response = httpx_extensions.ResponseMixin(
200,
content=content,
headers=headers,
)
assert response.json() == data
@pytest.mark.parametrize(
"headers, expected",
[
(
{"Link": "<https://example.com>; rel='preload'"},
{"preload": {"rel": "preload", "url": "https://example.com"}},
),
(
{"Link": '</hub>; rel="hub", </resource>; rel="self"'},
{
"hub": {"url": "/hub", "rel": "hub"},
"self": {"url": "/resource", "rel": "self"},
},
),
],
)
def test_link_headers(headers, expected):
response = httpx_extensions.ResponseMixin(
200,
content=None,
headers=headers,
)
assert response.links == expected
@pytest.mark.parametrize("header_value", (b"deflate", b"gzip", b"br"))
def test_decode_error_with_request(header_value):
headers = [(b"Content-Encoding", header_value)]
body = b"test 123"
compressed_body = brotli.compress(body)[3:]
with pytest.raises(httpx.DecodingError):
httpx_extensions.ResponseMixin(
200,
headers=headers,
content=compressed_body,
)
with pytest.raises(httpx.DecodingError):
httpx_extensions.ResponseMixin(
200,
headers=headers,
content=compressed_body,
request=httpx.Request("GET", "https://www.example.org/"),
)
@pytest.mark.parametrize("header_value", (b"deflate", b"gzip", b"br"))
def test_value_error_without_request(header_value):
headers = [(b"Content-Encoding", header_value)]
body = b"test 123"
compressed_body = brotli.compress(body)[3:]
with pytest.raises(httpx.DecodingError):
httpx_extensions.ResponseMixin(200, headers=headers, content=compressed_body)
def test_response_with_unset_request():
response = httpx_extensions.ResponseMixin(200, content=b"Hello, world!")
assert response.status_code == 200
assert response.reason_phrase == "OK"
assert response.text == "Hello, world!"
assert not response.is_error
def test_set_request_after_init():
response = httpx_extensions.ResponseMixin(200, content=b"Hello, world!")
response.request = httpx.Request("GET", "https://www.example.org")
assert response.request.method == "GET"
assert response.request.url == "https://www.example.org"
def test_cannot_access_unset_request():
response = httpx_extensions.ResponseMixin(200, content=b"Hello, world!")
with pytest.raises(RuntimeError):
response.request
def test_generator_with_transfer_encoding_header():
def content():
yield b"test 123" # pragma: nocover
response = httpx_extensions.ResponseMixin(200, content=content())
assert response.headers == {"Transfer-Encoding": "chunked"}
def test_generator_with_content_length_header():
def content():
yield b"test 123" # pragma: nocover
headers = {"Content-Length": "8"}
response = httpx_extensions.ResponseMixin(200, content=content(), headers=headers)
assert response.headers == {"Content-Length": "8"}
def test_response_picklable():
response = httpx_extensions.ResponseMixin(
200,
content=b"Hello, world!",
request=httpx.Request("GET", "https://example.org"),
)
pickle_response = pickle.loads(pickle.dumps(response))
assert pickle_response.is_closed is True
assert pickle_response.is_stream_consumed is True
assert pickle_response.next_request is None
assert pickle_response.stream is not None
assert pickle_response.content == b"Hello, world!"
assert pickle_response.status_code == 200
assert pickle_response.request.url == response.request.url
assert pickle_response.extensions == {}
assert pickle_response.history == []
@pytest.mark.asyncio
async def test_response_async_streaming_picklable():
response = httpx_extensions.ResponseMixin(200, content=async_streaming_body())
pickle_response = pickle.loads(pickle.dumps(response))
with pytest.raises(httpx.ResponseNotRead):
pickle_response.content
with pytest.raises(httpx.StreamClosed):
await pickle_response.aread()
assert pickle_response.is_stream_consumed is False
assert pickle_response.num_bytes_downloaded == 0
assert pickle_response.headers == {"Transfer-Encoding": "chunked"}
response = httpx_extensions.ResponseMixin(200, content=async_streaming_body())
await response.aread()
pickle_response = pickle.loads(pickle.dumps(response))
assert pickle_response.is_stream_consumed is True
assert pickle_response.content == b"Hello, world!"
assert pickle_response.num_bytes_downloaded == 13 | 29.196617 | 98 | 0.672556 |
4a20e6cc9c684b8d96a5e98318c1c11b41576a6a | 591 | py | Python | lib/third_party/cloud_ml_engine_sdk/dataflow/_error_filter.py | bopopescu/Google-Cloud-SDK-1 | c4683bacb2f6192d8a816932e438a0493085469b | [
"Apache-2.0"
] | null | null | null | lib/third_party/cloud_ml_engine_sdk/dataflow/_error_filter.py | bopopescu/Google-Cloud-SDK-1 | c4683bacb2f6192d8a816932e438a0493085469b | [
"Apache-2.0"
] | null | null | null | lib/third_party/cloud_ml_engine_sdk/dataflow/_error_filter.py | bopopescu/Google-Cloud-SDK-1 | c4683bacb2f6192d8a816932e438a0493085469b | [
"Apache-2.0"
] | 1 | 2020-07-24T20:13:29.000Z | 2020-07-24T20:13:29.000Z | """Utilities for cleaning dataflow errors to be user friendly."""
TENSORFLOW_OP_MATCHER = "\n\nCaused by op"
def filter_tensorflow_error(error_string):
"""Removes information from a tensorflow error to hide Dataflow details.
TF appends the operation details if they exist, but the stacktrace
is not useful to the user, so we remove it if present.
Args:
error_string: PredictionError error detail, error caught during Session.run
Returns:
error_string with only base error message instead of full traceback.
"""
return error_string.split(TENSORFLOW_OP_MATCHER)[0]
| 31.105263 | 79 | 0.769882 |
4a20e7c7324697d656d802a939b4183facf0a343 | 10,549 | py | Python | airflow/utils/log/es_task_handler.py | suensummit/airflow | 37a342d0e96a91ce2d34085e225a4e86f54c4e21 | [
"Apache-2.0"
] | 1 | 2021-11-04T20:11:58.000Z | 2021-11-04T20:11:58.000Z | airflow/utils/log/es_task_handler.py | suensummit/airflow | 37a342d0e96a91ce2d34085e225a4e86f54c4e21 | [
"Apache-2.0"
] | 3 | 2020-07-07T20:39:24.000Z | 2021-09-29T17:34:46.000Z | airflow/utils/log/es_task_handler.py | suensummit/airflow | 37a342d0e96a91ce2d34085e225a4e86f54c4e21 | [
"Apache-2.0"
] | 1 | 2020-11-04T03:17:51.000Z | 2020-11-04T03:17:51.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import sys
# Using `from elasticsearch import *` would break elasticsearch mocking used in unit test.
import elasticsearch
import pendulum
from elasticsearch_dsl import Search
from airflow.configuration import conf
from airflow.utils import timezone
from airflow.utils.helpers import parse_template_string
from airflow.utils.log.file_task_handler import FileTaskHandler
from airflow.utils.log.json_formatter import JSONFormatter
from airflow.utils.log.logging_mixin import LoggingMixin
class ElasticsearchTaskHandler(FileTaskHandler, LoggingMixin):
"""
ElasticsearchTaskHandler is a python log handler that
reads logs from Elasticsearch. Note logs are not directly
indexed into Elasticsearch. Instead, it flushes logs
into local files. Additional software setup is required
to index the log into Elasticsearch, such as using
Filebeat and Logstash.
To efficiently query and sort Elasticsearch results, we assume each
log message has a field `log_id` consists of ti primary keys:
`log_id = {dag_id}-{task_id}-{execution_date}-{try_number}`
Log messages with specific log_id are sorted based on `offset`,
which is a unique integer indicates log message's order.
Timestamp here are unreliable because multiple log messages
might have the same timestamp.
"""
PAGE = 0
MAX_LINE_PER_PAGE = 1000
def __init__(self, base_log_folder, filename_template,
log_id_template, end_of_log_mark,
write_stdout, json_format, json_fields,
host='localhost:9200',
es_kwargs=conf.getsection("elasticsearch_configs") or {}):
"""
:param base_log_folder: base folder to store logs locally
:param log_id_template: log id template
:param host: Elasticsearch host name
"""
super().__init__(
base_log_folder, filename_template)
self.closed = False
self.log_id_template, self.log_id_jinja_template = \
parse_template_string(log_id_template)
self.client = elasticsearch.Elasticsearch([host], **es_kwargs)
self.mark_end_on_close = True
self.end_of_log_mark = end_of_log_mark
self.write_stdout = write_stdout
self.json_format = json_format
self.json_fields = [label.strip() for label in json_fields.split(",")]
self.handler = None
def _render_log_id(self, ti, try_number):
if self.log_id_jinja_template:
jinja_context = ti.get_template_context()
jinja_context['try_number'] = try_number
return self.log_id_jinja_template.render(**jinja_context)
if self.json_format:
execution_date = self._clean_execution_date(ti.execution_date)
else:
execution_date = ti.execution_date.isoformat()
return self.log_id_template.format(dag_id=ti.dag_id,
task_id=ti.task_id,
execution_date=execution_date,
try_number=try_number)
@staticmethod
def _clean_execution_date(execution_date):
"""
Clean up an execution date so that it is safe to query in elasticsearch
by removing reserved characters.
# https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#_reserved_characters
:param execution_date: execution date of the dag run.
"""
return execution_date.strftime("%Y_%m_%dT%H_%M_%S_%f")
def _read(self, ti, try_number, metadata=None):
"""
Endpoint for streaming log.
:param ti: task instance object
:param try_number: try_number of the task instance
:param metadata: log metadata,
can be used for steaming log reading and auto-tailing.
:return: a list of log documents and metadata.
"""
if not metadata:
metadata = {'offset': 0}
if 'offset' not in metadata:
metadata['offset'] = 0
offset = metadata['offset']
log_id = self._render_log_id(ti, try_number)
logs = self.es_read(log_id, offset, metadata)
next_offset = offset if not logs else logs[-1].offset
# Ensure a string here. Large offset numbers will get JSON.parsed incorrectly
# on the client. Sending as a string prevents this issue.
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number/MAX_SAFE_INTEGER
metadata['offset'] = str(next_offset)
# end_of_log_mark may contain characters like '\n' which is needed to
# have the log uploaded but will not be stored in elasticsearch.
metadata['end_of_log'] = False if not logs \
else logs[-1].message == self.end_of_log_mark.strip()
cur_ts = pendulum.now()
# Assume end of log after not receiving new log for 5 min,
# as executor heartbeat is 1 min and there might be some
# delay before Elasticsearch makes the log available.
if 'last_log_timestamp' in metadata:
last_log_ts = timezone.parse(metadata['last_log_timestamp'])
if cur_ts.diff(last_log_ts).in_minutes() >= 5 or 'max_offset' in metadata \
and offset >= metadata['max_offset']:
metadata['end_of_log'] = True
if offset != next_offset or 'last_log_timestamp' not in metadata:
metadata['last_log_timestamp'] = str(cur_ts)
# If we hit the end of the log, remove the actual end_of_log message
# to prevent it from showing in the UI.
i = len(logs) if not metadata['end_of_log'] else len(logs) - 1
message = '\n'.join([log.message for log in logs[0:i]])
return message, metadata
def es_read(self, log_id, offset, metadata):
"""
Returns the logs matching log_id in Elasticsearch and next offset.
Returns '' if no log is found or there was an error.
:param log_id: the log_id of the log to read.
:type log_id: str
:param offset: the offset start to read log from.
:type offset: str
:param metadata: log metadata, used for steaming log download.
:type metadata: dict
"""
# Offset is the unique key for sorting logs given log_id.
s = Search(using=self.client) \
.query('match_phrase', log_id=log_id) \
.sort('offset')
s = s.filter('range', offset={'gt': int(offset)})
max_log_line = s.count()
if 'download_logs' in metadata and metadata['download_logs'] and 'max_offset' not in metadata:
try:
metadata['max_offset'] = s[max_log_line - 1].execute()[-1].offset if max_log_line > 0 else 0
except Exception:
self.log.exception('Could not get current log size with log_id: {}'.format(log_id))
logs = []
if max_log_line != 0:
try:
logs = s[self.MAX_LINE_PER_PAGE * self.PAGE:self.MAX_LINE_PER_PAGE] \
.execute()
except Exception as e:
self.log.exception('Could not read log with log_id: %s, error: %s', log_id, str(e))
return logs
def set_context(self, ti):
"""
Provide task_instance context to airflow task handler.
:param ti: task instance object
"""
super().set_context(ti)
self.mark_end_on_close = not ti.raw
if self.write_stdout:
self.handler = logging.StreamHandler(stream=sys.__stdout__)
self.handler.setLevel(self.level)
if self.json_format and not ti.raw:
self.handler.setFormatter(
JSONFormatter(self.formatter._fmt, json_fields=self.json_fields, extras={
'dag_id': str(ti.dag_id),
'task_id': str(ti.task_id),
'execution_date': self._clean_execution_date(ti.execution_date),
'try_number': str(ti.try_number)}))
else:
self.handler.setFormatter(self.formatter)
else:
super().set_context(ti)
def emit(self, record):
if self.write_stdout:
self.formatter.format(record)
if self.handler is not None:
self.handler.emit(record)
else:
super().emit(record)
def flush(self):
if self.handler is not None:
self.handler.flush()
def close(self):
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
return
if not self.mark_end_on_close:
self.closed = True
return
# Case which context of the handler was not set.
if self.handler is None:
self.closed = True
return
# Reopen the file stream, because FileHandler.close() would be called
# first in logging.shutdown() and the stream in it would be set to None.
if self.handler.stream is None or self.handler.stream.closed:
self.handler.stream = self.handler._open()
# Mark the end of file using end of log mark,
# so we know where to stop while auto-tailing.
self.handler.stream.write(self.end_of_log_mark)
if self.write_stdout:
self.handler.close()
sys.stdout = sys.__stdout__
super().close()
self.closed = True
| 39.657895 | 128 | 0.640345 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.