max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
protector/tests/query_test/test_query.py | adobe/opentsdb-protector | 9 | 12794951 | # Copyright 2019 Adobe
# All Rights Reserved.
#
# NOTICE: Adobe permits you to use, modify, and distribute this file in
# accordance with the terms of the Adobe license agreement accompanying
# it. If you have received this file from a source other than Adobe,
# then your use, modification, or distribution of it requires the prior
# written permission of Adobe.
#
import unittest
import json
from protector.query.query import OpenTSDBQuery, OpenTSDBResponse
import time
class TestQuery(unittest.TestCase):
def setUp(self):
self.response1 = "[]"
self.response2 = """
[
{
"metric": "this.metric",
"tags": {
"env": "prod",
"recipientDomain": "gmail.com",
"channel": "email"
},
"aggregateTags": [
"hostname"
],
"dps": {
"1623619500": 0,
"1623619560": 0,
"1623619620": 0
}
},
{
"metric": "this.metric",
"tags": {
"env": "prod",
"recipientDomain": "gmail.com",
"channel": "email"
},
"aggregateTags": [
"hostname"
],
"dps": {
"1623619500": 0,
"1623619560": 0,
"1623619620": 0
}
},
{
"statsSummary": {
"avgAggregationTime": 0.806912,
"avgHBaseTime": 3.874463,
"avgQueryScanTime": 5.436076,
"avgScannerTime": 3.888163,
"avgScannerUidToStringTime": 0,
"avgSerializationTime": 0.808312,
"dpsPostFilter": 145,
"dpsPreFilter": 145,
"emittedDPs": 1440,
"maxAggregationTime": 0.806912,
"maxHBaseTime": 5.170471,
"maxQueryScanTime": 5.436076,
"maxScannerUidToStringTime": 0,
"maxSerializationTime": 0.808312,
"maxUidToStringTime": 0.0255,
"processingPreWriteTime": 8.480518,
"queryIdx_00": {
"aggregationTime": 0.806912,
"avgHBaseTime": 3.874463,
"avgScannerTime": 3.888163,
"avgScannerUidToStringTime": 0,
"dpsPostFilter": 145,
"dpsPreFilter": 145,
"emittedDPs": 1440,
"groupByTime": 0,
"maxHBaseTime": 5.170471,
"maxScannerUidToStringTime": 0,
"queryIndex": 0,
"queryScanTime": 5.436076,
"rowsPostFilter": 129,
"rowsPreFilter": 129,
"saltScannerMergeTime": 0.163702,
"serializationTime": 0.808312,
"successfulScan": 20,
"uidPairsResolved": 0,
"uidToStringTime": 0.0255
},
"rowsPostFilter": 129,
"rowsPreFilter": 129,
"successfulScan": 20,
"uidPairsResolved": 0
}
}
]
"""
self.response2_ret = [
{
"metric": "this.metric",
"tags": {
"env": "prod",
"recipientDomain": "gmail.com",
"channel": "email"
},
"aggregateTags": [
"hostname"
],
"dps": {
"1623619500": 0,
"1623619560": 0,
"1623619620": 0
}
},
{
"metric": "this.metric",
"tags": {
"env": "prod",
"recipientDomain": "gmail.com",
"channel": "email"
},
"aggregateTags": [
"hostname"
],
"dps": {
"1623619500": 0,
"1623619560": 0,
"1623619620": 0
}
}
]
self.stats2 = {
"avgAggregationTime": 0.806912,
"avgHBaseTime": 3.874463,
"avgQueryScanTime": 5.436076,
"avgScannerTime": 3.888163,
"avgScannerUidToStringTime": 0,
"avgSerializationTime": 0.808312,
"dpsPostFilter": 145,
"dpsPreFilter": 145,
"emittedDPs": 1440,
"maxAggregationTime": 0.806912,
"maxHBaseTime": 5.170471,
"maxQueryScanTime": 5.436076,
"maxScannerUidToStringTime": 0,
"maxSerializationTime": 0.808312,
"maxUidToStringTime": 0.0255,
"processingPreWriteTime": 8.480518,
"rowsPostFilter": 129,
"rowsPreFilter": 129,
"successfulScan": 20,
"uidPairsResolved": 0
}
self.response3 = """
[
{
"metric": "this.metric",
"tags": {
"env": "prod",
"recipientDomain": "gmail.com",
"channel": "email"
},
"aggregateTags": [
"hostname"
],
"dps": {
"1623619500": 0,
"1623619560": 0,
"1623619620": 0
}
},
{
"metric": "this.metric",
"tags": {
"env": "prod",
"recipientDomain": "gmail.com",
"channel": "email"
},
"aggregateTags": [
"hostname"
],
"dps": {
"1623619500": 0,
"1623619560": 0,
"1623619620": 0
}
}
]
"""
def test_ok_empty_response(self):
r = OpenTSDBResponse(self.response1)
self.assertTrue(not r.get_stats())
def test_ok_normal_response(self):
r = OpenTSDBResponse(self.response2)
# expected response with summary stripped
p = json.dumps(self.response2_ret, sort_keys=True)
# test that response summary is correctly stripped
self.assertEqual(p, r.to_json(True))
# test that stats are properly collected
self.assertDictEqual(self.stats2, r.get_stats())
def test_missing_stats_response(self):
r = OpenTSDBResponse(self.response3)
# no error is raised, just logged
self.assertTrue(not r.get_stats()) | 2.046875 | 2 |
openmdao.lib/src/openmdao/lib/differentiators/fd_helper.py | swryan/OpenMDAO-Framework | 0 | 12794952 | <gh_stars>0
""" Object that can take a subsection of a model and perform finite difference
on it."""
#import cPickle
#import StringIO
from copy import deepcopy
# pylint: disable-msg=E0611,F0401
from openmdao.lib.casehandlers.api import ListCaseRecorder
from openmdao.lib.drivers.distributioncasedriver import \
DistributionCaseDriver, FiniteDifferenceGenerator
class FDhelper(object):
''' An object that takes a subsection of a model and performs a finite
difference. The cases are run with a point distribution generator. Thus,
we can take advantage of multiprocessing if it is available.
'''
def __init__(self, model, comps, wrt, outs, stepsize=1.0e-6, order=1,
form='CENTRAL'):
''' Takes a model and a list of component names in that model. The
model is deepcopied to create a copy. All but the needed comps are
removed from the model.
model: Assembly
Parent assembly of the components we want to finite difference.
comps: list( string )
List of component names that we want to finite difference as a
group.
wrt: list( string )
List of variable paths to use as finite difference inputs.
outs: list( string )
List of variable paths to return as outputs.
stepsize: float
Default stepsize to use.
order: int
Finite Difference order. Only first order is supported right now.
form: string
Choose from 'CENTRAL', 'FORWARD', and "BACKWARD'. Default is central
differencing.
'''
# Copy model. We need to null out the reference to the parent before
# we copy.
save_parent = model.parent
model.parent = None
try:
self.model = deepcopy(model)
finally:
model.parent = save_parent
# Get rid of the comps we don't need
for item in self.model.list_containers():
if item not in comps + ['driver']:
self.model.remove(item)
# Remove all connections to the assembly boundary
bdy_inputs = self.model.list_inputs()
bdy_outputs = self.model.list_outputs()
for conn in self.model.list_connections():
if conn[0] in bdy_inputs or conn[1] in bdy_outputs:
self.model.disconnect(conn[0], conn[1])
# Distribution driver to drive the finite difference calculation
self.model.add('driver', DistributionCaseDriver())
gen = FiniteDifferenceGenerator(self.model.driver)
self.model.driver.distribution_generator = gen
self.model.driver.workflow.add(comps)
for item in wrt:
self.model.driver.add_parameter(item, low=-1e99, high=1e99,
fd_step=stepsize)
self.model.driver.case_outputs = outs
self.model.driver.ffd_order = 1
gen.num_parameters = len(wrt)
gen.form = form
gen.order = order
# Save a reference to the original model so that we can increment the
# execution counter as needed.
self.copy_source = model
# All execution counts should be reset to zero.
for comp in self.model.driver.workflow.__iter__():
comp.exec_count = 0
comp.derivative_exec_count = 0
def run(self, input_dict, output_dict):
""" Performs finite difference of our submodel with respect to wrt.
Variables are intialized with init_vals.
input_dict: dict( string : value )
Dictionary of baseline values for input paramters.
input_dict: dict( string : value )
Dictionary of baseline values for desired outputs.
"""
# Set all initial values
for varname, value in input_dict.iteritems():
self.model.set(varname, value)
self.model.driver.recorders = [ListCaseRecorder()]
if self.model.driver.distribution_generator.form != 'CENTRAL':
self.model.driver.distribution_generator.skip_baseline = True
# Calculate finite differences.
# FFAD mode is supported.
self.model.driver.calc_derivatives(first=True)
self.model.run()
# Return all needed derivatives
cases = self.model.driver.recorders[0].cases
icase = 0
derivs = {}
for out in self.model.driver.case_outputs:
derivs[out] = {}
for wrt, val in self.model.driver.get_parameters().iteritems():
if self.model.driver.distribution_generator.form == 'CENTRAL':
delx = cases[icase][wrt] - cases[icase+1][wrt]
for out in self.model.driver.case_outputs:
derivs[out][wrt] = \
(cases[icase][out] - cases[icase+1][out])/delx
icase += 2
else:
delx = cases[icase][wrt] - input_dict[wrt]
for out in self.model.driver.case_outputs:
derivs[out][wrt] = \
(cases[icase][out] - output_dict[out])/delx
icase += 1
# Add the execution count from the copies to the originals.
for comp in self.model.driver.workflow.__iter__():
source_comp = self.copy_source.get(comp.name)
source_comp.exec_count += comp.exec_count
comp.exec_count = 0
source_comp.derivative_exec_count += comp.derivative_exec_count
comp.derivative_exec_count = 0
return derivs
def list_wrt(self):
""" Returns a list of variable paths that we are differencing with
respect to.
"""
return self.model.driver.get_parameters().keys()
def list_outs(self):
""" Returns a list of variable paths that we are differencing.
"""
return self.model.driver.case_outputs
| 2.421875 | 2 |
generators/app/templates/__main__.py | sbanwart/generator-morepath | 2 | 12794953 | import morepath
from .app import App
def run():
print('Running app...')
morepath.autoscan()
App.commit()
morepath.run(App())
if __name__ == '__main__':
run()
| 1.5625 | 2 |
google/appengine/datastore/datastore_stub_util.py | semk/hypergae | 1 | 12794954 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utility functions shared between the file and sqlite datastore stubs."""
import md5
from google.appengine.api import datastore_types
from google.appengine.api.datastore_errors import BadRequestError
from google.appengine.datastore import datastore_index
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import datastore_pb
from google.appengine.runtime import apiproxy_errors
def ValidateQuery(query, filters, orders, max_query_components):
"""Validate a datastore query with normalized filters, orders.
Raises an ApplicationError when any of the following conditions are violated:
- transactional queries have an ancestor
- queries that are not too large
(sum of filters, orders, ancestor <= max_query_components)
- ancestor (if any) app and namespace match query app and namespace
- kindless queries only filter on __key__ and only sort on __key__ ascending
- multiple inequality (<, <=, >, >=) filters all applied to the same property
- filters on __key__ compare to a reference in the same app and namespace as
the query
- if an inequality filter on prop X is used, the first order (if any) must
be on X
Args:
query: query to validate
filters: normalized (by datastore_index.Normalize) filters from query
orders: normalized (by datastore_index.Normalize) orders from query
max_query_components: limit on query complexity
"""
def BadRequest(message):
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.BAD_REQUEST, message)
key_prop_name = datastore_types._KEY_SPECIAL_PROPERTY
unapplied_log_timestamp_us_name = (
datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY)
if query.has_transaction():
if not query.has_ancestor():
BadRequest('Only ancestor queries are allowed inside transactions.')
num_components = len(filters) + len(orders)
if query.has_ancestor():
num_components += 1
if num_components > max_query_components:
BadRequest('query is too large. may not have more than %s filters'
' + sort orders ancestor total' % max_query_components)
if query.has_ancestor():
ancestor = query.ancestor()
if query.app() != ancestor.app():
BadRequest('query app is %s but ancestor app is %s' %
(query.app(), ancestor.app()))
if query.name_space() != ancestor.name_space():
BadRequest('query namespace is %s but ancestor namespace is %s' %
(query.name_space(), ancestor.name_space()))
ineq_prop_name = None
for filter in filters:
if filter.property_size() != 1:
BadRequest('Filter has %d properties, expected 1' %
filter.property_size())
prop = filter.property(0)
prop_name = prop.name().decode('utf-8')
if prop_name == key_prop_name:
if not prop.value().has_referencevalue():
BadRequest('%s filter value must be a Key' % key_prop_name)
ref_val = prop.value().referencevalue()
if ref_val.app() != query.app():
BadRequest('%s filter app is %s but query app is %s' %
(key_prop_name, ref_val.app(), query.app()))
if ref_val.name_space() != query.name_space():
BadRequest('%s filter namespace is %s but query namespace is %s' %
(key_prop_name, ref_val.name_space(), query.name_space()))
if (filter.op() in datastore_index.INEQUALITY_OPERATORS and
prop_name != unapplied_log_timestamp_us_name):
if ineq_prop_name is None:
ineq_prop_name = prop_name
elif ineq_prop_name != prop_name:
BadRequest(('Only one inequality filter per query is supported. '
'Encountered both %s and %s') % (ineq_prop_name, prop_name))
if ineq_prop_name is not None and orders:
first_order_prop = orders[0].property().decode('utf-8')
if first_order_prop != ineq_prop_name:
BadRequest('The first sort property must be the same as the property '
'to which the inequality filter is applied. In your query '
'the first sort property is %s but the inequality filter '
'is on %s' % (first_order_prop, ineq_prop_name))
if not query.has_kind():
for filter in filters:
prop_name = filter.property(0).name().decode('utf-8')
if (prop_name != key_prop_name and
prop_name != unapplied_log_timestamp_us_name):
BadRequest('kind is required for non-__key__ filters')
for order in orders:
prop_name = order.property().decode('utf-8')
if not (prop_name == key_prop_name and
order.direction() is datastore_pb.Query_Order.ASCENDING):
BadRequest('kind is required for all orders except __key__ ascending')
def ParseKeyFilteredQuery(filters, orders):
"""Parse queries which only allow filters and ascending-orders on __key__.
Raises exceptions for illegal queries.
Args:
filters: the normalized filters of a query.
orders: the normalized orders of a query.
Returns:
The key range (start, start_inclusive, end, end_inclusive) requested
in the query.
"""
remaining_filters = []
start_key = None
start_inclusive = False
end_key = None
end_inclusive = False
key_prop = datastore_types._KEY_SPECIAL_PROPERTY
for f in filters:
op = f.op()
if not (f.property_size() == 1 and
f.property(0).name() == key_prop and
not (op == datastore_pb.Query_Filter.IN or
op == datastore_pb.Query_Filter.EXISTS)):
remaining_filters.append(f)
continue
val = f.property(0).value()
if not val.has_referencevalue():
raise BadRequestError('__key__ kind must be compared to a key')
limit = datastore_types.FromReferenceProperty(val)
if op == datastore_pb.Query_Filter.LESS_THAN:
if end_key is None or limit <= end_key:
end_key = limit
end_inclusive = False
elif (op == datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL or
op == datastore_pb.Query_Filter.EQUAL):
if end_key is None or limit < end_key:
end_key = limit
end_inclusive = True
if op == datastore_pb.Query_Filter.GREATER_THAN:
if start_key is None or limit >= start_key:
start_key = limit
start_inclusive = False
elif (op == datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL or
op == datastore_pb.Query_Filter.EQUAL):
if start_key is None or limit > start_key:
start_key = limit
start_inclusive = True
remaining_orders = []
for o in orders:
if not (o.direction() == datastore_pb.Query_Order.ASCENDING and
o.property() == datastore_types._KEY_SPECIAL_PROPERTY):
remaining_orders.append(o)
else:
break
if remaining_filters:
raise BadRequestError(
'Only comparison filters on ' + key_prop + ' supported')
if remaining_orders:
raise BadRequestError('Only ascending order on ' + key_prop + ' supported')
return (start_key, start_inclusive, end_key, end_inclusive)
def ParseKindQuery(query, filters, orders):
"""Parse __kind__ (schema) queries.
Raises exceptions for illegal queries.
Args:
query: A Query PB.
filters: the normalized filters from query.
orders: the normalized orders from query.
Returns:
The kind range (start, start_inclusive, end, end_inclusive) requested
in the query.
"""
if query.has_ancestor():
raise BadRequestError('ancestor queries not allowed')
start_kind, start_inclusive, end_kind, end_inclusive = ParseKeyFilteredQuery(
filters, orders)
return (_KindKeyToString(start_kind), start_inclusive,
_KindKeyToString(end_kind), end_inclusive)
def _KindKeyToString(key):
"""Extract kind name from __kind__ key.
Raises an ApplicationError if the key is not of the form '__kind__'/name.
Args:
key: a key for a __kind__ instance, or a false value.
Returns:
kind specified by key, or key if key is a false value.
"""
if not key:
return key
key_path = key.to_path()
if (len(key_path) == 2 and key_path[0] == '__kind__' and
isinstance(key_path[1], basestring)):
return key_path[1]
raise BadRequestError('invalid Key for __kind__ table')
def ParseNamespaceQuery(query, filters, orders):
"""Parse __namespace__ queries.
Raises exceptions for illegal queries.
Args:
query: A Query PB.
filters: the normalized filters from query.
orders: the normalized orders from query.
Returns:
The kind range (start, start_inclusive, end, end_inclusive) requested
in the query.
"""
if query.has_ancestor():
raise BadRequestError('ancestor queries not allowed')
start_kind, start_inclusive, end_kind, end_inclusive = ParseKeyFilteredQuery(
filters, orders)
return (_NamespaceKeyToString(start_kind), start_inclusive,
_NamespaceKeyToString(end_kind), end_inclusive)
def _NamespaceKeyToString(key):
"""Extract namespace name from __namespace__ key.
Raises an ApplicationError if the key is not of the form '__namespace__'/name
or '__namespace__'/_EMPTY_NAMESPACE_ID.
Args:
key: a key for a __namespace__ instance, or a false value.
Returns:
namespace specified by key, or key if key is a false value.
"""
if not key:
return key
key_path = key.to_path()
if len(key_path) == 2 and key_path[0] == '__namespace__':
if key_path[1] == datastore_types._EMPTY_NAMESPACE_ID:
return ''
if isinstance(key_path[1], basestring):
return key_path[1]
raise BadRequestError('invalid Key for __namespace__ table')
def SynthesizeUserId(email):
"""Return a synthetic user ID from an email address.
Note that this is not the same user ID found in the production system.
Args:
email: An email address.
Returns:
A string userid derived from the email address.
"""
user_id_digest = md5.new(email.lower()).digest()
user_id = '1' + ''.join(['%02d' % ord(x) for x in user_id_digest])[:20]
return user_id
def FillUsersInQuery(filters):
"""Fill in a synthetic user ID for all user properties in a set of filters.
Args:
filters: The normalized filters from query.
"""
for filter in filters:
for property in filter.property_list():
FillUser(property)
def FillUser(property):
"""Fill in a synthetic user ID for a user properties.
Args:
property: A Property which may have a user value.
"""
if property.value().has_uservalue():
uid = SynthesizeUserId(property.value().uservalue().email())
if uid:
property.mutable_value().mutable_uservalue().set_obfuscated_gaiaid(uid)
| 1.859375 | 2 |
cart/migrations/0004_auto_20220119_1206.py | rahulbiswas24680/Django-Ecommerce | 0 | 12794955 | <filename>cart/migrations/0004_auto_20220119_1206.py
# Generated by Django 3.2.9 on 2022-01-19 12:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shop', '0003_product_favourite_products'),
('cart', '0003_auto_20220119_0620'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='products',
),
migrations.AddField(
model_name='order',
name='product',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='shop.product'),
),
]
| 1.328125 | 1 |
moto/datasync/__init__.py | symroe/moto | 0 | 12794956 | from ..core.models import base_decorator
from .models import datasync_backends
datasync_backend = datasync_backends["us-east-1"]
mock_datasync = base_decorator(datasync_backends)
| 1.65625 | 2 |
ui/ui.py | zinuzian-portfolio/ImageLab2019-ContourFinder | 4 | 12794957 | <filename>ui/ui.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'basic.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1331, 732)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setMinimumSize(QtCore.QSize(800, 400))
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(-1, -1, 1386, 720))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.mainLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.mainLayout.setContentsMargins(50, 50, 50, 50)
self.mainLayout.setObjectName("mainLayout")
self.imageWidget = QtWidgets.QWidget(self.horizontalLayoutWidget)
self.imageWidget.setMinimumSize(QtCore.QSize(500, 300))
self.imageWidget.setObjectName("imageWidget")
self.imageLayout = QtWidgets.QVBoxLayout(self.imageWidget)
self.imageLayout.setContentsMargins(20, 20, 20, 20)
self.imageLayout.setObjectName("imageLayout")
self.imageLabel = DrawableQLabel(self.imageWidget)
self.imageLabel.setObjectName("imageLabel")
self.imageLayout.addWidget(self.imageLabel, 0, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter)
self.mainLayout.addWidget(self.imageWidget)
self.controlPanelWidget = QtWidgets.QWidget(self.horizontalLayoutWidget)
self.controlPanelWidget.setMaximumSize(QtCore.QSize(300, 16777215))
self.controlPanelWidget.setObjectName("controlPanelWidget")
self.controlPanelLayout = QtWidgets.QVBoxLayout(self.controlPanelWidget)
self.controlPanelLayout.setContentsMargins(20, 20, 20, 20)
self.controlPanelLayout.setObjectName("controlPanelLayout")
self.openFileBtn = QtWidgets.QPushButton(self.controlPanelWidget)
self.openFileBtn.setMinimumSize(QtCore.QSize(200, 0))
self.openFileBtn.setMaximumSize(QtCore.QSize(200, 16777215))
self.openFileBtn.setObjectName("openFileBtn")
self.controlPanelLayout.addWidget(self.openFileBtn)
self.choosePointBtn = QtWidgets.QPushButton(self.controlPanelWidget)
self.choosePointBtn.setMinimumSize(QtCore.QSize(200, 0))
self.choosePointBtn.setMaximumSize(QtCore.QSize(200, 16777215))
self.choosePointBtn.setObjectName("choosePointBtn")
self.controlPanelLayout.addWidget(self.choosePointBtn)
self.findContourBtn = QtWidgets.QPushButton(self.controlPanelWidget)
self.findContourBtn.setMinimumSize(QtCore.QSize(200, 0))
self.findContourBtn.setMaximumSize(QtCore.QSize(200, 16777215))
self.findContourBtn.setObjectName("findContourBtn")
self.controlPanelLayout.addWidget(self.findContourBtn)
self.contourOptionWidget = QtWidgets.QWidget(self.controlPanelWidget)
self.contourOptionWidget.setMinimumSize(QtCore.QSize(200, 0))
self.contourOptionWidget.setMaximumSize(QtCore.QSize(200, 16777215))
self.contourOptionWidget.setObjectName("contourOptionWidget")
self.contourOptionLayout = QtWidgets.QFormLayout(self.contourOptionWidget)
self.contourOptionLayout.setContentsMargins(5, 5, 5, 5)
self.contourOptionLayout.setObjectName("contourOptionLayout")
self.thicknessLabel = QtWidgets.QLabel(self.contourOptionWidget)
self.thicknessLabel.setObjectName("thicknessLabel")
self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.thicknessLabel)
self.thicknessComboBox = QtWidgets.QComboBox(self.contourOptionWidget)
self.thicknessComboBox.setObjectName("thicknessComboBox")
self.thicknessComboBox.addItem("")
self.thicknessComboBox.addItem("")
self.thicknessComboBox.addItem("")
self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.thicknessComboBox)
self.colorLabel = QtWidgets.QLabel(self.contourOptionWidget)
self.colorLabel.setObjectName("colorLabel")
self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.colorLabel)
self.colorBtn = QtWidgets.QPushButton(self.contourOptionWidget)
self.colorBtn.setText("")
self.colorBtn.setObjectName("colorBtn")
self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.colorBtn)
self.label_2 = QtWidgets.QLabel(self.contourOptionWidget)
self.label_2.setObjectName("label_2")
self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.valueLine = QtWidgets.QLineEdit(self.contourOptionWidget)
self.valueLine.setObjectName("valueLine")
self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.valueLine)
self.controlPanelLayout.addWidget(self.contourOptionWidget)
self.deleteContourBtn = QtWidgets.QPushButton(self.controlPanelWidget)
self.deleteContourBtn.setMinimumSize(QtCore.QSize(200, 0))
self.deleteContourBtn.setMaximumSize(QtCore.QSize(200, 16777215))
self.deleteContourBtn.setObjectName("deleteContourBtn")
self.controlPanelLayout.addWidget(self.deleteContourBtn)
self.saveContourBtn = QtWidgets.QPushButton(self.controlPanelWidget)
self.saveContourBtn.setMinimumSize(QtCore.QSize(200, 0))
self.saveContourBtn.setMaximumSize(QtCore.QSize(200, 16777215))
self.saveContourBtn.setObjectName("saveContourBtn")
self.controlPanelLayout.addWidget(self.saveContourBtn)
self.mainLayout.addWidget(self.controlPanelWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1331, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuEdit = QtWidgets.QMenu(self.menubar)
self.menuEdit.setObjectName("menuEdit")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menubar)
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.actionOpen = QtWidgets.QAction(MainWindow)
self.actionOpen.setObjectName("actionOpen")
self.actionOpen_2 = QtWidgets.QAction(MainWindow)
self.actionOpen_2.setObjectName("actionOpen_2")
self.actionSave = QtWidgets.QAction(MainWindow)
self.actionSave.setObjectName("actionSave")
self.actionQuit = QtWidgets.QAction(MainWindow)
self.actionQuit.setObjectName("actionQuit")
self.menuFile.addAction(self.actionOpen)
self.menuFile.addAction(self.actionOpen_2)
self.menuFile.addAction(self.actionSave)
self.menuFile.addAction(self.actionQuit)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuEdit.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.imageLabel.setText(_translate("MainWindow", "Image will appear here"))
self.openFileBtn.setText(_translate("MainWindow", "Open New Image"))
self.choosePointBtn.setText(_translate("MainWindow", "Choose a Point"))
self.findContourBtn.setText(_translate("MainWindow", "Find Contour"))
self.thicknessLabel.setText(_translate("MainWindow", "Thickness:"))
self.thicknessComboBox.setItemText(0, _translate("MainWindow", "1"))
self.thicknessComboBox.setItemText(1, _translate("MainWindow", "2"))
self.thicknessComboBox.setItemText(2, _translate("MainWindow", "3"))
self.colorLabel.setText(_translate("MainWindow", "Color:"))
self.label_2.setText(_translate("MainWindow", "Value (0.0~1.0):"))
self.valueLine.setText(_translate("MainWindow", "0.5"))
self.deleteContourBtn.setText(_translate("MainWindow", "Delete Contour"))
self.saveContourBtn.setText(_translate("MainWindow", "Save Contour"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuEdit.setTitle(_translate("MainWindow", "Edit"))
self.menuHelp.setTitle(_translate("MainWindow", "Help"))
self.actionOpen.setText(_translate("MainWindow", "New"))
self.actionOpen_2.setText(_translate("MainWindow", "Open"))
self.actionSave.setText(_translate("MainWindow", "Save"))
self.actionQuit.setText(_translate("MainWindow", "Quit"))
from Drawable import DrawableQLabel
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 2 | 2 |
alerta/webhooks/stackdriver.py | sauber/alerta | 0 | 12794958 | import json
import logging
from datetime import datetime
from typing import Any, Dict
from flask import current_app, g, jsonify, request
from flask_cors import cross_origin
from alerta.auth.decorators import permission
from alerta.exceptions import ApiError, RejectException
from alerta.models.alert import Alert
from alerta.models.enums import Scope
from alerta.utils.api import add_remote_ip, assign_customer, process_alert
from alerta.utils.audit import write_audit_trail
from . import webhooks
LOG = logging.getLogger(__name__)
JSON = Dict[str, Any]
def parse_stackdriver(notification: JSON) -> Alert:
incident = notification['incident']
state = incident['state']
# 'documentation' is an optional field that you can use to customize
# your alert sending a json
if 'documentation' in incident:
try:
content = json.loads(incident['documentation']['content'])
incident.update(content)
except Exception as e:
LOG.warning("Invalid documentation content: '{}'".format(incident['documentation']))
service = []
status = None
create_time = None # type: ignore
severity = incident.get('severity', 'critical')
if incident['policy_name']:
service.append(incident['policy_name'])
if state == 'open':
create_time = datetime.utcfromtimestamp(incident['started_at'])
elif state == 'acknowledged':
status = 'ack'
elif state == 'closed':
severity = 'ok'
create_time = datetime.utcfromtimestamp(incident['ended_at'])
else:
severity = 'indeterminate'
return Alert(
resource=incident['resource_name'],
event=incident['condition_name'],
environment=incident.get('environment', 'Production'),
severity=severity,
status=status,
service=service,
group=incident.get('group', 'Cloud'),
text=incident['summary'],
attributes={
'incidentId': incident['incident_id'],
'resourceId': incident['resource_id'],
'moreInfo': '<a href="%s" target="_blank">Stackdriver Console</a>' % incident['url']
},
customer=incident.get('customer'),
origin=incident.get('origin', 'Stackdriver'),
event_type='stackdriverAlert',
create_time=create_time,
raw_data=notification
)
@webhooks.route('/webhooks/stackdriver', methods=['OPTIONS', 'POST'])
@cross_origin()
@permission(Scope.write_webhooks)
def stackdriver():
try:
incomingAlert = parse_stackdriver(request.get_json(force=True))
except ValueError as e:
raise ApiError(str(e), 400)
incomingAlert.customer = assign_customer(wanted=incomingAlert.customer)
add_remote_ip(request, incomingAlert)
try:
alert = process_alert(incomingAlert)
except RejectException as e:
raise ApiError(str(e), 403)
except Exception as e:
raise ApiError(str(e), 500)
text = 'stackdriver alert received via webhook'
write_audit_trail.send(current_app._get_current_object(), event='webhook-received', message=text, user=g.user,
customers=g.customers, scopes=g.scopes, resource_id=alert.id, type='alert', request=request)
if alert:
return jsonify(status='ok', id=alert.id, alert=alert.serialize), 201
else:
raise ApiError('insert or update of StackDriver notification failed', 500)
| 2.203125 | 2 |
blog/models.py | dkirel/HappyLens | 0 | 12794959 | <gh_stars>0
import datetime
import random
import string
import bcrypt
import os
from django.db import models
from django.conf import settings
from django.contrib import admin
from django import forms
from djangotoolbox.fields import EmbeddedModelField, ListField
SOURCES = (('du', 'Direct Upload'),
('tu', 'Tumblr Post'),
('tw', 'Tweet'),
('ig', 'Instagram'),
('fb', 'Facebook'))
def get_image_path(self, other_field):
return 'uploads/' + self.project.replace(' ', '_').lower() + '/' + os.path.basename(self.image_name)
class Profile(models.Model):
name = models.CharField(blank=False, max_length=100)
photo = models.ImageField(blank=False, max_length=50)
personal_info = models.CharField(blank=False, max_length=1000)
def __unicode__(self):
return self.name
class ProfileForm(forms.ModelForm):
personal_info = forms.CharField(required=True, widget=forms.Textarea, max_length=1000)
class Meta:
model = Profile
class ProfileAdmin(admin.ModelAdmin):
form = ProfileForm
class Project(models.Model):
name = models.CharField(blank=False, max_length=100)
description = models.CharField(blank=True, max_length=500)
def __unicode__(self):
return self.name
class ProjectForm(forms.ModelForm):
description = forms.CharField(required=False, widget=forms.Textarea, max_length=1000)
class Meta:
model = Project
class ProjectAdmin(admin.ModelAdmin):
form = ProjectForm
class Photo(models.Model):
image_name = models.CharField(blank=False, max_length=300)
project = models.CharField(blank=False, choices=((p.name, p.name) for p in Project.objects.all()), max_length=100)
file = models.ImageField(blank=False, upload_to=get_image_path)
thumbnail_path = models.CharField(max_length=500, editable=False)
project_cover = models.BooleanField()
def __unicode__(self):
return self.project + '/' + self.image_name
def to_json(self):
return {
'image_name': self.image_name,
'image_path': self.image_path,
'thumbnail_path': self.thumbnail_path,
'width': self.width,
'height': self.height
}
"""
class Post(models.Model):
slug = models.CharField(blank=False, max_length=200)
title = models.CharField(blank=False, max_length=200)
project = models.CharField(blank=False, max_length=200)
timestamp = models.DateTimeField(blank=False, default=datetime.datetime.now)
photos = EmbeddedModelField('Photo', blank=False)
tags = ListField(models.CharField())
source = models.CharField(blank=False, choices=SOURCES, max_length=50)
meta = {
'indexes': [
{'fields': ['slug'], 'unique': True, 'types': False}
]
}
def to_json(self):
d = {'title': self.title,
'slug': self.slug,
'date': self.date.strftime('%Y-%m-%d'),
'source': self.source,
'photos': [p.to_json() for p in self.photos]}
if hasattr(self, 'tags'):
d['tags'] = self.tags
return d
"""
admin.site.register(Profile, ProfileAdmin)
admin.site.register(Project, ProjectAdmin)
admin.site.register(Photo)
| 2.125 | 2 |
ibm_watson/version.py | mmmryd/python-sdk | 0 | 12794960 | __version__ = '5.2.3'
| 1.0625 | 1 |
takeyourmeds/utils/decorators.py | takeyourmeds/takeyourmeds-web | 11 | 12794961 | from django.conf import settings
from django.contrib.auth.decorators import user_passes_test
logout_required = user_passes_test(
lambda x: not x.is_authenticated(),
settings.LOGIN_REDIRECT_URL,
)
superuser_required = user_passes_test(lambda u: (u.is_authenticated() and u.is_superuser))
| 2 | 2 |
tests/test_logic/test_tree/test_functions.py | cdhiraj40/wemake-python-styleguide | 1,931 | 12794962 | import pytest
from wemake_python_styleguide.logic.tree import functions
@pytest.mark.parametrize(('function_call', 'function_name'), [
# Simple builtin functions
('print("Hello world!")', 'print'),
('int("10")', 'int'),
('bool(1)', 'bool'),
('open("/tmp/file.txt", "r")', 'open'),
('str(10)', 'str'),
# Functions in modules
('datetime.timedelta(days=1)', 'datetime.timedelta'),
('cmath.sqrt(100)', 'cmath.sqrt'),
# Functions in (made up) objects
('dt.strftime("%H:%M")', 'dt.strftime'),
('obj.funct()', 'obj.funct'),
])
def test_given_function_called_no_split(
parse_ast_tree, function_call: str, function_name: str,
) -> None:
"""Test given_function_called without splitting the modules."""
tree = parse_ast_tree(function_call)
node = tree.body[0].value
called_function = functions.given_function_called(node, [function_name])
assert called_function == function_name
@pytest.mark.parametrize(('function_call', 'function_name'), [
# Simple builtin functions
('print("Hello world!")', 'print'),
('int("10")', 'int'),
('bool(1)', 'bool'),
('open("/tmp/file.txt", "r")', 'open'),
('str(10)', 'str'),
# Functions in modules
('datetime.timedelta(days=1)', 'timedelta'),
('cmath.sqrt(100)', 'sqrt'),
# Functions in (made up) objects
('dt.strftime("%H:%M")', 'strftime'),
('obj.funct()', 'funct'),
])
def test_given_function_called_with_split(
parse_ast_tree, function_call: str, function_name: str,
) -> None:
"""Test given_function_called splitting the modules."""
tree = parse_ast_tree(function_call)
node = tree.body[0].value
called_function = functions.given_function_called(
node,
[function_name],
split_modules=True,
)
assert called_function == function_name
| 2.65625 | 3 |
python/niveau2/1-NombresAVirgulesEtAutresOutils/2.py | ThomasProg/France-IOI | 2 | 12794963 | nbLieues = float(input())
print(nbLieues / 0.707)
| 2.359375 | 2 |
microsoft_problems/problem_9.py | loftwah/Daily-Coding-Problem | 129 | 12794964 | """This problem was asked Microsoft.
Using a read7() method that returns 7 characters from a file, implement readN(n) which reads n characters.
For example, given a file with the content “Hello world”, three read7() returns “Hello w”, “orld” and then “”.
""" | 2.96875 | 3 |
src/arrays/word-ladder-2.py | vighnesh153/ds-algo | 0 | 12794965 | <reponame>vighnesh153/ds-algo
from collections import defaultdict
def generate_graph(word_list):
graph = defaultdict(set)
for word in word_list:
for i in range(len(word)):
new_word = word[:i] + '*' + word[i + 1:]
graph[new_word].add(word)
graph[word].add(new_word)
return graph
def get_neighbours(graph, word):
neighbours = set()
for star_word in graph[word]:
for neighbour in graph[star_word]:
neighbours.add(neighbour)
neighbours.remove(word)
return neighbours
def solve(begin_word, end_word, word_list):
graph = generate_graph([begin_word, *word_list])
if end_word not in graph:
return []
result = []
queue = [(begin_word, [begin_word])]
visited = set()
while len(queue) > 0:
next_level = []
for word, path in queue:
visited.add(word)
for neighbour in get_neighbours(graph, word):
if neighbour in visited:
continue
if neighbour == end_word:
result.append(path + [end_word])
continue
next_level.append((neighbour, path + [neighbour]))
if len(result) > 0:
break
queue = next_level
return result
| 3.546875 | 4 |
weld-python/weld/grizzly/core/indexes/base.py | tustvold/weld | 2,912 | 12794966 | <gh_stars>1000+
from abc import ABC
class Index(ABC):
"""
Base class for an index in Grizzly.
"""
pass
| 1.765625 | 2 |
vacca/main.py | tango-controls/VACCA | 2 | 12794967 | <gh_stars>1-10
#!/usr/bin/env python
"""
Vacca runner; this file emulates this call:
>taurusgui vacca
Config file (or Property name) is obtained from shell args, then env,
then properties in this order.
If empty, a DEFAULT profile is created pointing to default.py
MAIN CODE FOR PANELS GENERATION IS IN vacca.config SUBMODULE
"""
import sys,os,re,time,imp,traceback
args = sys.argv[1:]
files = []
__doc__ = """
The file vacca/main.py is the vaccagui launcher
It creates the taurusgui environment and sets all environment variables.
vaccagui usage
--------------
Launching vacca, loading configuration from a target.py:
> vaccagui [target.py or $CONFIG] [$OPTION=...]
$CONFIG will load values from VACCA.$CONFIG property in the database
$OPTION=... can be used to override values declared properties or target.py
Environment variables (optional, will be initialized in vacca/main.py):
VACCA_CONFIG : if set, equivalent to passing target.py as argument
VACCA_DIR : directory to resources needed by target.py (target.py folder by default)
VACCA_PATH : path to vacca module (initialized by imp.find_module())
If not set, default values are those set as VACCA properties in Tango DB.
Reset of QSettings files:
> vaccagui --reset #The last saved perspective will be removed
> vaccagui --clean #All the .ini files will be removed
Other options:
> vaccagui --helps #Prints this text
> vaccagui --list #Prints available configurations
"""
if args and args[0].strip('- ') == 'help':
print(__doc__)
sys.exit(0)
def remove_last_config(filename):
print('vacca.remove_last_config(%s)'%filename)
lines = open(filename).readlines()
sections = dict((l.strip(),i) for i,l in enumerate(lines)
if re.match('[\[][a-zA-Z]*[\]]',l))
sections['[End]'] = len(lines)
begin = sections['[General]']
end = min(i for i in sections.values() if i>begin)
fo = open(filename,'w')
fo.writelines(lines[:begin]+lines[end:])
fo.close()
folder = os.getenv('HOME')+'/.config/VACCA/'
options = [a for a in args if a.startswith('-')]
values = [a for a in args if '=' in a]
if '--clean' in args:
print('Removing last vacca configs (%s/*.ini)'%folder)
os.remove(folder+'*.ini')
if len(args)==1: sys.exit(0)
elif '--reset' in args:
inits = [a for a in os.walk(f).next()[2] if a.endswith('.ini')]
print('Removing last vacca configs (%s)'%inits)
[remove_last_config(folder+filename) for filename in inits]
if len(args)==1: sys.exit(0)
elif '--help' in args or '-h' in args or '-?' in args:
print(__doc__)
sys.exit(0)
elif '--list' in args:
import vacca.utils as vu
configs = vu.get_config_properties()
print('\nVaccaConfigs:')
print('\n\t'+'\n\t'.join(configs)+'\n')
sys.exit(0)
if '--panel' not in args:
files = [a for a in args if a not in options+values]
files = files or [os.getenv('VACCA_CONFIG')]
###############################################################################
print('-'*80)
print("In vacca.main(%s) ..."%args)
###############################################################################
# Delayed imports
import taurus
from taurus.core.util import argparse
from taurus.qt.qtgui.application import TaurusApplication
from taurus.qt.qtgui.taurusgui import TaurusGui
######################################################################
# Set tangoFormatter as default formatter
try:
from taurus.core.tango.util import tangoFormatter
from taurus.qt.qtgui.base import TaurusBaseComponent
TaurusBaseComponent.FORMAT = tangoFormatter
except:
pass
######################################################################
import vacca.utils as vu
import vacca
configs = vu.get_config_properties() or vu.create_config_properties()
if not files or not files[0]: files = [configs.keys()[0]]
dirname = os.getenv('VACCA_DIR') or ''
config = os.getenv('VACCA_CONFIG') or ''
if files[0] in configs:
print('Loading %s'%files[0])
data = vu.get_config_properties(files[0])
config = config or data.get('VACCA_CONFIG',files[0])
dirname = dirname or data.get('VACCA_DIR',dirname)
else:
config = config or files[0]
if os.path.isfile(config):
config = os.path.abspath(config)
elif config:
try:
import imp
print('Loading %s as python module'%config)
config = imp.find_module(config.replace('.','/'))[1]
dirname = os.path.dirname(config)
except:
pass
dirname = dirname or os.path.dirname(config) or \
vu.get_vacca_property('VACCA_DIR',extract=1) or ''
vu.VACCA_DIR = os.environ['VACCA_DIR'] = dirname
vu.VACCA_CONFIG = os.environ['VACCA_CONFIG'] = config
print('Vacca Environment variables (vacca.main):')
print('\n'.join(map(str,(t for t in os.environ.items() if 'VACCA' in t[0]))))
### MAIN CODE FOR PANELS GENERATION IS IN vacca.config SUBMODULE
print '-'*80
if '--panel' in options:
import vacca.panel
ret = vacca.panel.main(args[:1]+args[-1:])
else:
confname = 'vaccagui'
app = TaurusApplication()
gui = TaurusGui(None, confname=confname)
gui.show()
ret = app.exec_()
taurus.info('Finished execution of TaurusGui')
sys.exit(ret)
| 2.828125 | 3 |
src/signal_processing.py | ninashp/Soundproof | 0 | 12794968 | <gh_stars>0
"""
This file contains the code for signal processing module of application.
"""
import numpy as np
from configuration import get_config
import scipy
import librosa
config = get_config() # get arguments from parser
# constants for output of identify_call_type
FIRST_SPEAKER_CUSTOMER = 0
FIRST_SPEAKER_REPRESENTATIVE = 1
def print_call_type(call_type):
""" Used to print call type
Input: call type int
"""
if call_type == FIRST_SPEAKER_CUSTOMER:
print("First speaker is customer")
else:
print("First speaker is representative")
def identify_call_type(call_file):
""" Identify who speaks first in the call according to phone tone.
If a call starts with a dial tone customer speaks first, else representative.
Input: path to a call
Output: FIRST_SPEAKER_CUSTOMER if customer speaks first,
FIRST_SPEAKER_REPRESENTATIVE if representetor speaks first
"""
# from the first frame of sound measure 1.5 sec and look for 400Hz tone
nof_frames = librosa.core.time_to_samples(1.5, sr=config.sr)
call_audio, _ = librosa.core.load(call_file, config.sr)
intervals = librosa.effects.split(call_audio, top_db = 20)
tone_fft = scipy.fft(call_audio[intervals[0][0]:intervals[0][0]+nof_frames])
tone_fft_mag = np.absolute(tone_fft) # spectral magnitude
f = np.linspace(0, config.sr, nof_frames) # frequency variable
if (round(f[np.argmax(tone_fft_mag)]) == 400 and max(tone_fft_mag)>config.dialing_tone_thresh):
# dialing tone detected! this means represntative is calling to the customer, customer speaks first
return FIRST_SPEAKER_CUSTOMER
else:
# this means customer is calling to the call center, represntative speaks first
return FIRST_SPEAKER_REPRESENTATIVE
def extract_utterances_from_a_call(call_file):
""" Get a file, output a numpy array with frames exreacted from the call of voice.
The frames are utterances of minimal length, split by 20DB limit
Input: audio file path
Output: list of numpy arrays, each of them representing a single speech utterance
list of numpy array representing the timestamp start and end of each utterance in the call
"""
# extract audio
call_audio, _ = librosa.core.load(call_file, config.sr)
# split the audio to voice and no-voice according to amplitude
intervals = librosa.effects.split(call_audio, top_db = 20)
# lower bound of utterance length - below that discard
utter_min_len = (config.tisv_frame * config.hop + config.window) * config.sr
utterances_list = []
utterances_timestamps = []
for interval in intervals:
# Check that utterance length is sufficient
if (interval[1]-interval[0]) > utter_min_len:
utterances_list.append(call_audio[interval[0]:interval[1]])
utterances_timestamps.append(np.array([librosa.core.samples_to_time(interval[0], sr=config.sr),
librosa.core.samples_to_time(interval[1], sr=config.sr)]))
return utterances_list, utterances_timestamps
def extract_spectrograms_from_utterances(utterances_list):
""" Get a list of utterances and extract spectrograms binned in mel-binning for each frame
Input: list of numpy arrays, each of them representing a single speech utterance
Output: list of numpy arrays, each of them representing a spectrogram of a single speech utterance
"""
spectrograms_list = []
# iterate on all utterances, extract spectrogram from each
for utterance in utterances_list:
spect = librosa.core.stft(y=utterance, n_fft = config.nfft,
win_length=int(config.window * config.sr), hop_length=int(config.hop * config.sr))
spect = np.abs(spect) ** 2
mel_basis = librosa.filters.mel(sr=config.sr, n_fft=config.nfft, n_mels=config.mel_nof)
# log mel spectrogram of utterances
spect_bins = np.log10(np.dot(mel_basis, spect) + 1e-6)
spectrograms_list.append(np.transpose(spect_bins))
return spectrograms_list
def split_segment_to_frames(seg):
""" Given an audio segment, split it into frames according to size config.tisv_frame
Input: seg - audio segment
Output: list of frames
"""
# Extrct spectrogram
spect = np.transpose(extract_spectrograms_from_utterances([seg])[0])
#Get config.tisv_frame STFT windows with 50% overlap
STFT_frames = []
for j in range(0, spect.shape[1], int(.12/config.hop)):
if j + config.tisv_frame < spect.shape[1]:
STFT_frames.append(np.transpose(spect[:,j:j+config.tisv_frame]))
else:
break
return STFT_frames
| 2.984375 | 3 |
videoretrieval/base/base_dataset.py | googleinterns/via-content-understanding | 1 | 12794969 | <reponame>googleinterns/via-content-understanding
"""Defines a base class for datasets.
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABC as AbstractClass
from abc import abstractmethod
import pathlib
import tensorflow as tf
class BaseVideoDataset(AbstractClass):
"""Base class for video datasets."""
@property
@abstractmethod
def dataset_name(self):
"""Gets the name of the dataset."""
pass
@property
@abstractmethod
def dataset_downloaded(self):
"""A boolean describing if the dataset is downloaded."""
pass
@property
def data(self):
"""Gets a tf.data object representing the dataset"""
pass
@abstractmethod
def download_dataset(self):
"""Downloads the dataset."""
pass
@abstractmethod
def download_and_cache_precomputed_features(self):
"""Downloads and caches precomputed features for the given dataset."""
pass
@property
@abstractmethod
def video_captions():
"""Returns a dict of that maps from video id to a list of captions."""
pass
@property
@abstractmethod
def captions_per_video(self):
"""Number of captions per video."""
pass
@property
@abstractmethod
def train_valid_test_ids(self):
"""Returns a tuple of sets providing ids for the dataset splits.
Returns: a tuple of sets, where the first set contains the ids for the
train data, the second for the validation data, and the third for the
test data."""
pass
def build_generator(self, data):
"""Builds a generator that yields each element from data."""
for example in data:
yield example
def build_id_caption_pair_generator_dataset(self, data):
"""Builds a tf.data Dataset out of id caption pairs in data."""
generator = lambda: self.build_generator(data)
return tf.data.Dataset.from_generator(generator, (tf.string, tf.string))
@property
def id_caption_pair_datasets(self):
"""Get id caption pair datasets for each split in this dataset.
Returns: a tuple of three tuples. Each tuple has two elements, the first
is a tf.data.Dataset of video id caption pairs, and the second element
is the name of the split as a string. In the retured tuple, the first
element is the data for the train split, followed by the valid and test
sets. The three splits returned are for "train", "valid", and "test"
splits. The returned data is structured as follows: (
(tf.data.Dataset instance, "train"),
(tf.data.Dataset instance, "valid"),
(tf.data.Dataset instance, "test"))
"""
train_ids, valid_ids, test_ids = self.train_valid_test_ids
train_data = []
valid_data = []
test_data = []
for video_id, caption in self.video_captions:
if video_id in train_ids:
train_data.append((video_id, caption))
elif video_id in valid_ids:
valid_data.append((video_id, caption))
elif video_id in test_ids:
test_data.append((video_id, caption))
else:
print(f"Orphan pair: id: {video_id}, caption: {hash(caption)}")
self.num_of_train_examples = len(train_data)
self.num_of_valid_examples = len(valid_data)
self.num_of_test_examples = len(test_data)
train_dataset = self.build_id_caption_pair_generator_dataset(train_data)
valid_dataset = self.build_id_caption_pair_generator_dataset(valid_data)
test_dataset = self.build_id_caption_pair_generator_dataset(test_data)
return (
(train_dataset, "train"),
(valid_dataset, "valid"),
(test_dataset, "test")
)
def num_of_examples_by_split(self, split_name):
"""Gets the number of examples in the given split in this dataset.
Args:
split_name: the name of the dataset split, as a string (case
insensitive). The split name can be "train", "valid", or "test".
Returns: an integer that represents the number of examples in the given
split.
Raises: ValueError if the split name is not "train", "valid", or "test".
"""
if "num_of_train_examples" not in dir(self):
# Accessing the property self.id_caption_pair_datasets counts the
# number of examples in each split
_ = self.id_caption_pair_datasets
split_name = split_name.lower()
if split_name == "train":
return self.num_of_train_examples
elif split_name == "valid":
return self.num_of_valid_examples
elif split_name == "test":
return self.num_of_test_examples
else:
raise ValueError("Illegal split name")
| 2.390625 | 2 |
vispupu/colors.py | sscihz/vispupu | 0 | 12794970 | <filename>vispupu/colors.py<gh_stars>0
color_styles = {
"Turquoise": [
"#1abc9c",
"#e8f8f5",
"#d1f2eb",
'#a3e4d7',
'#76d7c4',
'#48c9b0',
'#1abc9c',
'#17a589',
'#148f77',
'#117864',
'#0e6251'],
"Green Sea":[
'#16a085',
'#e8f6f3',
'#d0ece7',
'#a2d9ce',
'#73c6b6',
'#45b39d',
'#16a085',
'#138d75',
'#117a65',
'#0e6655',
'#0b5345'
],
"Emerald":[
'#2ecc71',
'#eafaf1',
"#d5f5e3",
"#abebc6",
"#82e0aa",
"#58d68d",
"#2ecc71",
"#28b463",
"#239b56",
"#1d8348",
"#186a3b"
],
'Nephritis':[
'#27ae60',
'#e9f7ef',
'#d4efdf',
'#a9dfbf',
'#7dcea0',
'#52be80',
'#27ae60',
'#229954',
'#1e8449',
'#196f3d',
'#145a32',
],
'<NAME>':[
'#3498db',
'#ebf5fb',
'#d6eaf8',
'#aed6f1',
'#85c1e9',
'#5dade2',
'#3498db',
'#2e86c1',
'#2874a6',
'#21618c',
'#1b4f72',
],
'Belize Hole':[
'#2980b9',
'#eaf2f8',
'#d4e6f1',
'#a9cce3',
'#7fb3d5',
'#5499c7',
'#2980b9',
'#2471a3',
'#1f618d',
'#1a5276',
'#154360,'
],
'Amethyst':[
'#9b59b6',
'#f5eef8',
'#ebdef0',
'#d7bde2',
'#c39bd3',
'#af7ac5',
'#9b59b6',
'#884ea0',
'#76448a',
'#633974',
'#512e5f',
],
'Wisteria' :[
'#8e44ad',
'#f4ecf7',
'#e8daef',
'#d2b4de',
'#bb8fce',
'#a569bd',
'#8e44ad',
'#7d3c98',
'#6c3483',
'#5b2c6f',
'#4a235a',
],
'Wet Asphalt':[
'#34495e',
'#ebedef',
'#d6dbdf',
'#aeb6bf',
'#85929e',
'#5d6d7e',
'#34495e',
'#2e4053',
'#283747',
'#212f3c',
'#1b2631',
],
'Midnight Blue':[
'#2c3e50',
'#eaecee',
'#d5d8dc',
'#abb2b9',
'#808b96',
'#566573',
'#2c3e50',
'#273746',
'#212f3d',
'#1c2833',
'#17202a',
],
'Sunflower':[
'#f1c40f',
'#fef9e7',
'#fcf3cf',
'#f9e79f',
'#f7dc6f',
'#f4d03f',
'#f1c40f',
'#d4ac0d',
'#b7950b',
'#9a7d0a',
'#7d6608',
],
'Orange':[
'#f39c12',
'#fef5e7',
'#fdebd0',
'#fad7a0',
'#f8c471',
'#f5b041',
'#f39c12',
'#d68910',
'#b9770e',
'#9c640c',
'#7e5109',
],
"Carrot":[
'#e67e22',
'#fdf2e9',
'#fae5d3',
'#f5cba7',
'#f0b27a',
'#eb984e',
'#e67e22',
'#ca6f1e',
'#af601a',
'#935116',
'#784212',
],
'Pumpkin':[
'#d35400',
'#fbeee6',
'#f6ddcc',
'#edbb99',
'#e59866',
'#dc7633',
'#d35400',
'#ba4a00',
'#a04000',
'#873600',
'#6e2c00',
],
'Alizarin':[
'#e74c3c',
'#fdedec',
'#fadbd8',
'#f5b7b1',
'#f1948a',
'#ec7063',
'#e74c3c',
'#cb4335',
'#b03a2e',
'#943126',
'#78281f',
],
'Pomegranate':[
'#c0392b',
'#f9ebea',
'#f2d7d5',
'#e6b0aa',
'#d98880',
'#cd6155',
'#c0392b',
'#a93226',
'#922b21',
'#7b241c',
'#641e16',
],
'Clouds':[
'#ecf0f1',
'#fdfefe',
'#fbfcfc',
'#f7f9f9',
'#f4f6f7',
'#f0f3f4',
'#ecf0f1',
'#d0d3d4',
'#b3b6b7',
'#979a9a',
'#7b7d7d',
],
"Silver":[
'#bdc3c7',
'#f8f9f9',
'#f2f3f4',
'#e5e7e9',
'#d7dbdd',
'#cacfd2',
'#bdc3c7',
'#a6acaf',
'#909497',
'#797d7f',
'#626567',
],
"Concrete":[
'#95a5a6',
'#f4f6f6',
'#eaeded',
'#d5dbdb',
'#bfc9ca',
'#aab7b8',
'#95a5a6',
'#839192',
'#717d7e',
'#5f6a6a',
'#4d5656'
],
"Asbestos" :[
'#f2f4f4',
'#e5e8e8',
'#ccd1d1',
'#b2babb',
'#99a3a4',
'#7f8c8d',
'#707b7c',
'#616a6b',
'#515a5a',
'#424949'
]
} | 1.59375 | 2 |
apps/sshchan/views.py | zhoumjane/devops_backend | 53 | 12794971 | from django.shortcuts import render
# Create your views here.
from django.shortcuts import render, HttpResponse
from utils.tools.tools import unique
from devops_backend.settings import TMP_DIR
from rest_framework import viewsets, filters, mixins, status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from servers.models import Server
import os
# def index(request):
# return render(request, 'index.html')
class IndexViewSet(viewsets.ViewSet, mixins.ListModelMixin):
permission_classes = (IsAuthenticated, )
def list(self, request, *args, **kwargs):
remote_addr = self.request.META.get('HTTP_X_FORWARD_FOR') if self.request.META.get('HTTP_X_FORWARD_FOR') else self.request.META.get('REMOTE_ADDR')
try:
ip_addr = self.request.query_params['ip']
except:
return Response({"permission": False}, status=status.HTTP_403_FORBIDDEN)
permission_str = 'servers.login_' + ip_addr
if not (self.request.user.has_perm('servers.login_server') or self.request.user.has_perm(permission_str)):
return Response({"permission": False}, status=status.HTTP_403_FORBIDDEN)
try:
Server.objects.filter(ip=ip_addr)
except Exception as e:
return Response({"permission": False}, status=status.HTTP_400_BAD_REQUEST)
try:
port = self.request.query_params['port']
except Exception as e:
port = '22'
try:
user = self.request.query_params['user']
except Exception as e:
user = 'root'
content = {
'host': ip_addr,
'port': port,
'user': user,
'current_user': self.request.user,
'remote_addr': remote_addr
}
return render(request, 'index.html', content)
def upload_ssh_key(request):
if request.method == 'POST':
pkey = request.FILES.get('pkey')
ssh_key = pkey.read().decode('utf-8')
while True:
filename = unique()
ssh_key_path = os.path.join(TMP_DIR, filename)
if not os.path.isfile(ssh_key_path):
with open(ssh_key_path, 'w+') as f:
f.write(ssh_key)
break
else:
continue
return HttpResponse(filename) | 2.015625 | 2 |
aib/custom/formdefn_funcs.py | FrankMillman/AccInABox | 3 | 12794972 | from collections import OrderedDict as OD
from lxml import etree
# parser = etree.XMLParser(remove_blank_text=True)
import db.objects
import db.api
db_session = db.api.start_db_session() # need independent connection for reading
import os.path
import __main__
schema_path = os.path.join(os.path.dirname(__main__.__file__), 'schemas')
xsd_parser = etree.XMLParser(
schema=etree.XMLSchema(file=os.path.join(schema_path, 'form.xsd')),
attribute_defaults=True, remove_comments=True, remove_blank_text=True)
from common import AibError
from common import log, debug
async def init_xml(caller, xml):
# called from setup_form after form_name if form does not exist
form_defn = caller.data_objects['form']
form_xml = etree.Element('form')
form_xml.set('name', await form_defn.getval('form_name'))
etree.SubElement(form_xml, 'db_objects')
etree.SubElement(form_xml, 'mem_objects')
etree.SubElement(form_xml, 'input_params')
etree.SubElement(form_xml, 'output_params')
frame = etree.SubElement(form_xml, 'frame')
etree.SubElement(frame, 'toolbar')
etree.SubElement(frame, 'body')
etree.SubElement(frame, 'button_row')
etree.SubElement(frame, 'frame_methods')
await form_defn.setval('form_xml', form_xml)
await load_form_xml(caller, xml)
#-----------------------------------------------------------------------------
# form_funcs
#-----------------------------------------------------------------------------
async def load_form_xml(caller, xml):
# called from setup_form 'on_start_frame'
form_defn = caller.data_objects['form']
form_vars = caller.data_objects['form_vars']
frame_vars = caller.data_objects['frame_vars']
inline_vars = caller.data_objects['inline_vars']
await inline_vars.delete_all()
form_xml = await form_defn.getval('form_xml')
if form_xml is None:
await form_vars.init()
await frame_vars.init()
return
init_vals={}
init_vals['dbobj_xml'] = form_xml.find('db_objects')
init_vals['memobj_xml'] = form_xml.find('mem_objects')
init_vals['inputs_xml'] = form_xml.find('input_params')
init_vals['outputs_xml'] = form_xml.find('output_params')
init_vals['before_start_form'] = await form_vars.get_val_from_xml(
'before_start_form', form_xml.get('before_start_form'))
init_vals['after_start_form'] = await form_vars.get_val_from_xml(
'after_start_form', form_xml.get('after_start_form'))
init_vals['on_close_form'] = await form_vars.get_val_from_xml(
'on_close_form', form_xml.get('on_close_form'))
await form_vars.init(init_vals=init_vals)
obj_names = caller.data_objects['obj_names']
await obj_names.delete_all()
col_names = caller.data_objects['col_names']
await col_names.delete_all()
dbobj_xml = await form_vars.getval('dbobj_xml')
for dbobj_elem in dbobj_xml.iter('db_obj'):
"""
async with db_session.get_connection() as db_mem_conn:
conn = db_mem_conn.db
sql = (
"SELECT row_id, short_descr FROM {}.db_tables WHERE table_name = '{}'"
.format(caller.company, dbobj_elem.get('table_name'))
)
cur = await conn.exec_sql(sql)
table_id, descr = await cur.__anext__()
await obj_names.init(init_vals={
'name': dbobj_elem.get('name'), 'descr': descr})
await obj_names.save()
sql = (
"SELECT col_name, short_descr FROM {}.db_columns "
"WHERE table_id = {} "
"AND col_name NOT IN ('row_id', 'created_id', 'deleted_id') "
"ORDER BY col_type, seq"
.format(caller.company, table_id)
)
async for col_name, descr in await conn.exec_sql(sql):
await col_names.init(init_vals={ #'obj_id': obj_row_id,
'name': col_name, 'descr': descr})
await col_names.save()
"""
# """
obj_name = dbobj_elem.get('name')
table_name = dbobj_elem.get('table_name')
db_table = await db.objects.get_db_table(
form_defn.context, caller.company, table_name)
await obj_names.init(init_vals={
'name': obj_name, 'descr': db_table.short_descr})
await obj_names.save()
for seq, col_defn in enumerate(db_table.col_list):
await col_names.init(init_vals={'name': col_defn.col_name,
'descr': col_defn.short_descr, 'seq': seq})
await col_names.save()
# """
memobj_xml = await form_vars.getval('memobj_xml')
for memobj in memobj_xml.iter('mem_obj'):
await obj_names.init(init_vals={
'name': memobj.get('name'), 'descr': memobj.get('descr')})
await obj_names.save()
obj_row_id = await obj_names.getval('row_id')
for seq, memcol in enumerate(memobj.iter('mem_col')):
await col_names.init(init_vals={'name': memcol.get('col_name'),
'descr': memcol.get('short_descr'), 'seq': seq})
await col_names.save()
frame_xml = form_xml.find('frame')
init_vals={}
init_vals['toolbar_xml'] = frame_xml.find('toolbar')
init_vals['body_xml'] = frame_xml.find('body')
init_vals['buttonrow_xml'] = frame_xml.find('button_row')
init_vals['methods_xml'] = frame_xml.find('frame_methods')
init_vals['main_object'] = frame_xml.get('main_object')
init_vals['obj_descr'] = frame_xml.get('obj_descr')
await frame_vars.init(init_vals=init_vals)
for inline_xml in form_xml.iterchildren('inline_form'): # do not descend
init_vals = {
'name': inline_xml.get('name'),
'title': inline_xml.get('title'),
'frame_xml': inline_xml.find('frame'),
}
await inline_vars.init(init_vals=init_vals)
await inline_vars.save()
async def dump_form_xml(caller, xml):
# called from setup_form 'before_save'
form_defn = caller.data_objects['form']
form_vars = caller.data_objects['form_vars']
frame_vars = caller.data_objects['frame_vars']
form_xml = etree.Element('form')
form_xml.set('name', await form_defn.getval('form_name'))
form_xml.set('title', await form_defn.getval('title'))
await set_if_not_none(form_xml, form_vars, 'before_start_form')
await set_if_not_none(form_xml, form_vars, 'after_start_form')
await set_if_not_none(form_xml, form_vars, 'on_close_form')
form_xml.append(await form_vars.getval('dbobj_xml'))
form_xml.append(await form_vars.getval('memobj_xml'))
form_xml.append(await form_vars.getval('inputs_xml'))
form_xml.append(await form_vars.getval('outputs_xml'))
frame_xml = etree.SubElement(form_xml, 'frame')
await set_if_not_none(frame_xml, frame_vars, 'main_object')
await set_if_not_none(frame_xml, frame_vars, 'obj_descr')
frame_xml.append(await frame_vars.getval('toolbar_xml'))
frame_xml.append(await frame_vars.getval('body_xml'))
frame_xml.append(await frame_vars.getval('buttonrow_xml'))
frame_xml.append(await frame_vars.getval('methods_xml'))
inline_vars = caller.data_objects['inline_vars']
all_inline = inline_vars.select_many(where=[], order=[])
async for _ in all_inline:
inline_xml = etree.SubElement(form_xml, 'inline_form')
inline_xml.set('name', await inline_vars.getval('name'))
inline_xml.set('title', await inline_vars.getval('title'))
inline_xml.append(await inline_vars.getval('frame_xml'))
# inline_params = await form_vars.getval('inline_xml')
# for name, frame_xml in inline_params:
# inline_xml = etree.SubElement(form_xml, 'inline_form')
# inline_xml.set('name', name)
# inline_xml.append(frame_xml)
# validate result using schema
try:
etree.fromstring(etree.tostring(form_xml), parser=xsd_parser)
except (etree.XMLSyntaxError, ValueError, TypeError) as e:
raise AibError(head='XmlError', body=e.args[0])
# update form_definition with new form_xml
await form_defn.setval('form_xml', form_xml)
"""
# the next bit is a trick
# we want to 'save' form_vars, to trigger on_clean()
# however, inline_xml is a 'list' which includes etree Elements
# this cannot be serialised to JSON, so the save fails
# the trick is as follows -
# save all values in init_vals
# call form_vars.restore(), which triggers on_clean()
# call form_vars.init() with init_vals, which puts back the values
init_vals = {}
for col_defn in form_vars.db_table.col_list[1:]: # exclude 'row_id'
col_name = col_defn.col_name
init_vals[col_name] = await form_vars.getval(col_name)
await form_vars.restore()
await form_vars.init(init_vals=init_vals, display=False)
form_vars.init_vals = {}
"""
#-----------------------------------------------------------------------------
# db_obj
#-----------------------------------------------------------------------------
dbobj_cols = ('name', 'table_name', 'parent', 'fkey', 'cursor', 'is_formview_obj')
async def load_db_obj(caller, xml):
# called from setup_form_dbobj 'on_start_frame'
form_vars = caller.data_objects['form_vars']
dbobj_xml = await form_vars.getval('dbobj_xml')
dbobj = caller.data_objects['dbobj']
await dbobj.delete_all()
for seq, obj_xml in enumerate(dbobj_xml):
# init_vals = {col: await dbobj.get_val_from_xml(col, obj_xml.get(col))
# for col in dbobj_cols}
init_vals = {}
for col in dbobj_cols:
init_vals[col] = await dbobj.get_val_from_xml(col, obj_xml.get(col))
init_vals['seq'] = seq
await dbobj.init(display=False, init_vals=init_vals)
await dbobj.save()
await dbobj.init()
async def dump_db_obj(caller, xml):
# called from setup_form_dbobj 'do_save'
form_vars = caller.data_objects['form_vars']
dbobj_xml = await form_vars.getval('dbobj_xml')
orig_dbobj = set((dbobj.get('name') for dbobj in dbobj_xml))
obj_names = caller.data_objects['obj_names']
col_names = caller.data_objects['col_names']
dbobj = caller.data_objects['dbobj']
dbobjs_xml = etree.Element('db_objects')
all_dbobj = dbobj.select_many(where=[], order=[('seq', False)])
async for _ in all_dbobj:
dbobj_xml = etree.SubElement(dbobjs_xml, 'db_obj')
for col in dbobj_cols:
await set_if_not_none(dbobj_xml, dbobj, col)
obj_name = await dbobj.getval('name')
if obj_name in orig_dbobj:
orig_dbobj.remove(obj_name)
else:
"""
async with db_session.get_connection() as db_mem_conn:
conn = db_mem_conn.db
sql = (
"SELECT row_id, short_descr FROM {}.db_tables WHERE table_name = '{}'"
.format(caller.company, await dbobj.getval('table_name'))
)
cur = await conn.exec_sql(sql)
table_id, descr = await cur.__anext__()
await obj_names.init(init_vals={
'name': obj_name, 'descr': descr})
await obj_names.save()
sql = (
"SELECT col_name, short_descr FROM {}.db_columns "
"WHERE table_id = {} "
"AND col_name NOT IN ('row_id', 'created_id', 'deleted_id') "
"ORDER BY col_type, seq"
.format(caller.company, table_id)
)
async for col_name, descr in await conn.exec_sql(sql):
await col_names.init(init_vals={ #'obj_id': obj_row_id,
'name': col_name, 'descr': descr})
await col_names.save()
"""
# """
table_name = await dbobj.getval('table_name')
db_table = await db.objects.get_db_table(
form_vars.context, caller.company, table_name)
await obj_names.init(init_vals={
'name': obj_name, 'descr': db_table.short_descr})
await obj_names.save()
for col_defn in db_table.col_list:
await col_names.init(init_vals={
'name': col_defn.col_name, 'descr': col_defn.short_descr})
await col_names.save()
# """
for deleted_obj in orig_dbobj: # anything left has been deleted
await obj_names.init(init_vals={'name': deleted_obj})
await obj_names.delete()
await form_vars.setval('dbobj_xml', dbobjs_xml)
#-----------------------------------------------------------------------------
# mem_obj
#-----------------------------------------------------------------------------
memobj_cols = ('name', 'descr', 'parent', 'sequence', 'sub_types', 'tree_params',
'actions', 'clone_from')
memcol_cols = ('col_name', 'col_type', 'data_type', 'short_descr', 'long_descr',
'col_head', 'key_field', 'data_source', 'condition', 'allow_null', 'allow_amend', 'max_len',
'db_scale', 'scale_ptr', 'dflt_val', 'dflt_rule', 'col_checks', 'fkey', 'choices', 'sql')
async def load_mem_obj(caller, xml):
# called from setup_form_memobj 'on_start_frame'
form_vars = caller.data_objects['form_vars']
memobj_xml = await form_vars.getval('memobj_xml')
memobj = caller.data_objects['memobj']
memcol = caller.data_objects['memcol']
await memcol.delete_all()
await memobj.delete_all()
for seq, obj_xml in enumerate(memobj_xml):
# init_vals = {col: memobj.get_val_from_xml(col, obj_xml.get(col))
# for col in memobj_cols}
init_vals = {}
for col in memobj_cols:
init_vals[col] = await memobj.get_val_from_xml(col, obj_xml.get(col))
init_vals['seq'] = seq
await memobj.init(display=False, init_vals=init_vals)
await memobj.save()
#set up memcols for this memobj
for seq, memcol_xml in enumerate(obj_xml.iter('mem_col')):
# init_vals = {col: memcol.get_val_from_xml(col, memcol_xml.get(col))
# for col in memcol_cols}
init_vals = {}
for col in memcol_cols:
init_vals[col] = await memcol.get_val_from_xml(col, memcol_xml.get(col))
init_vals['seq'] = seq
await memcol.init(display=False, init_vals=init_vals)
await memcol.save()
await memobj.init()
await memobj.init()
async def dump_mem_obj(caller, xml):
# called from setup_form_memobj 'before_save'
form_vars = caller.data_objects['form_vars']
memobj_xml = await form_vars.getval('memobj_xml')
orig_memobj = set((memobj.get('name') for memobj in memobj_xml))
obj_names = caller.data_objects['obj_names']
col_names = caller.data_objects['col_names']
memobj = caller.data_objects['memobj']
memcol = caller.data_objects['memcol']
memobjs_xml = etree.Element('mem_objects')
all_memobj = memobj.select_many(where=[], order=[('seq', False)])
async for _ in all_memobj:
memobj_xml = etree.SubElement(memobjs_xml, 'mem_obj')
for col in memobj_cols:
await set_if_not_none(memobj_xml, memobj, col)
all_memcol = memcol.select_many(where=[], order=[('seq', False)])
async for _ in all_memcol:
memcol_xml = etree.SubElement(memobj_xml, 'mem_col')
for col in memcol_cols:
await set_if_not_none(memcol_xml, memcol, col)
obj_name = await memobj.getval('name')
if obj_name in orig_memobj:
await obj_names.init(init_vals={'name': obj_name})
orig_memobj.remove(obj_name)
else:
await obj_names.init(init_vals={
'name': obj_name,
'descr': await memobj.getval('descr'),
# 'seq': await memobj.getval('seq'), # seq is db_obj then mem_obj, so n/a
})
await obj_names.save()
all_cols = memcol.select_many(where=[], order=[])
async for _ in all_cols:
await col_names.init(init_vals={
'name': await memcol.getval('col_name'),
'descr': await memcol.getval('short_descr'),
'seq': await memcol.getval('seq')})
await col_names.save()
for deleted_obj in orig_memobj: # anything left has been deleted
await obj_names.init(init_vals={'name': deleted_obj})
await obj_names.delete()
await form_vars.setval('memobj_xml', memobjs_xml)
#-----------------------------------------------------------------------------
# io_parms
#-----------------------------------------------------------------------------
input_cols = ('name', 'type', 'target', 'required')
output_cols = ('name', 'type', 'source')
async def load_ioparms(caller, xml):
# called from setup_form_ioparams 'on_start_frame'
form_vars = caller.data_objects['form_vars']
inputs_xml = await form_vars.getval('inputs_xml')
outputs_xml = await form_vars.getval('outputs_xml')
inputs = caller.data_objects['inputs']
await inputs.delete_all()
for seq, input_xml in enumerate(inputs_xml):
# init_vals = {col: inputs.get_val_from_xml(col, input_xml.get(col))
# for col in input_cols}
init_vals = {}
for col in input_cols:
init_vals[col] = await inputs.get_val_from_xml(col, input_xml.get(col))
init_vals['seq'] = seq
await inputs.init(display=False, init_vals=init_vals)
await inputs.save()
outputs = caller.data_objects['outputs']
await outputs.delete_all()
for seq, output_xml in enumerate(outputs_xml):
# init_vals = {col: outputs.get_val_from_xml(col, output_xml.get(col))
# for col in output_cols}
init_vals = {}
for col in output_cols:
init_vals[col] = await outputs.get_val_from_xml(col, output_xml.get(col))
init_vals['seq'] = seq
await outputs.init(display=False, init_vals=init_vals)
await outputs.save()
async def dump_ioparms(caller, xml):
# called from setup_form_ioparams 'do_save'
form_vars = caller.data_objects['form_vars']
inputs_xml = etree.Element('input_params')
inputs = caller.data_objects['inputs']
all_inputs = inputs.select_many(where=[], order=[('seq', False)])
async for _ in all_inputs:
input_xml = etree.SubElement(inputs_xml, 'input_param')
for col in input_cols:
await set_if_not_none(input_xml, inputs, col)
await form_vars.setval('inputs_xml', inputs_xml)
outputs_xml = etree.Element('output_params')
outputs = caller.data_objects['outputs']
all_outputs = outputs.select_many(where=[], order=[('seq', False)])
async for _ in all_outputs:
output_xml = etree.SubElement(outputs_xml, 'output_param')
for col in output_cols:
await set_if_not_none(output_xml, outputs, col)
await form_vars.setval('outputs_xml', outputs_xml)
#-----------------------------------------------------------------------------
# inline forms
#-----------------------------------------------------------------------------
async def load_inline(caller, xml):
# called from setup_form_inline grid_frame 'on_start_frame'
inline_vars = caller.data_objects['inline_vars']
frame_vars = caller.data_objects['frame_vars']
if inline_vars.exists:
frame_xml = await inline_vars.getval('frame_xml')
init_vals={}
init_vals['toolbar_xml'] = frame_xml.find('toolbar')
init_vals['body_xml'] = frame_xml.find('body')
init_vals['buttonrow_xml'] = frame_xml.find('button_row')
init_vals['methods_xml'] = frame_xml.find('frame_methods')
init_vals['main_object'] = frame_xml.get('main_object')
init_vals['obj_descr'] = frame_xml.get('obj_descr')
else:
frame_xml = etree.Element('frame')
init_vals={}
init_vals['toolbar_xml'] = etree.SubElement(frame_xml, 'toolbar')
init_vals['body_xml'] = etree.SubElement(frame_xml, 'body')
init_vals['buttonrow_xml'] = etree.SubElement(frame_xml, 'button_row')
init_vals['methods_xml'] = etree.SubElement(frame_xml, 'frame_methods')
await frame_vars.init(init_vals=init_vals)
async def dump_inline(caller, xml):
# called from setup_form_inlline grid_frame 'before_save'
inline_vars = caller.data_objects['inline_vars']
frame_vars = caller.data_objects['frame_vars']
frame_xml = etree.Element('frame')
await set_if_not_none(frame_xml, frame_vars, 'main_object')
await set_if_not_none(frame_xml, frame_vars, 'obj_descr')
frame_xml.append(await frame_vars.getval('toolbar_xml'))
frame_xml.append(await frame_vars.getval('body_xml'))
frame_xml.append(await frame_vars.getval('buttonrow_xml'))
frame_xml.append(await frame_vars.getval('methods_xml'))
await inline_vars.setval('frame_xml', frame_xml)
#-----------------------------------------------------------------------------
# toolbar
#-----------------------------------------------------------------------------
tool_cols = ('type', 'label', 'tip', 'lng', 'name', 'obj_name', 'col_name', 'shortcut', 'action')
async def before_start_toolbar(caller, xml):
# called from setup_form_toolbar 'before_start_form'
# parent = caller.parent
# while True:
# if 'obj_names' in parent.data_objects:
# caller.data_objects['obj_names'] = parent.data_objects['obj_names']
# caller.data_objects['col_names'] = parent.data_objects['col_names']
# break
# parent = parent.parent
pass
async def load_toolbar(caller, xml):
# called from setup_form_frame.toolbar 'on_start_frame'
form_vars = caller.data_objects['form_vars']
toolbar_xml = await form_vars.getval('toolbar_xml')
if toolbar_xml is None:
toolbar_xml = etree.Element('toolbar')
await form_vars.setval('toolbar_xml', toolbar_xml)
await form_vars.setval('tb_template',
await form_vars.get_val_from_xml('tb_template', toolbar_xml.get('template')))
await form_vars.setval('tb_title',
await form_vars.get_val_from_xml('tb_title', toolbar_xml.get('title')))
await form_vars.save()
tool = caller.data_objects['tool']
await tool.delete_all()
for seq, tool_xml in enumerate(toolbar_xml):
# init_vals = {col: tool.get_val_from_xml(col, tool_xml.get(col))
# for col in tool_cols}
init_vals = {}
for col in tool_cols:
init_vals[col] = await tool.get_val_from_xml(col, tool_xml.get(col))
init_vals['seq'] = seq
await tool.init(display=False, init_vals=init_vals)
await tool.save()
async def dump_toolbar(caller, xml):
# called from setup_form_frame.toolbar 'before_save'
form_vars = caller.data_objects['form_vars']
tool = caller.data_objects['tool']
toolbar_xml = etree.Element('toolbar')
await set_if_not_none(toolbar_xml, form_vars, 'tb_template', 'template')
await set_if_not_none(toolbar_xml, form_vars, 'tb_title', 'title')
all_tools = tool.select_many(where=[], order=[('seq', False)])
async for _ in all_tools:
tool_xml = etree.SubElement(toolbar_xml, 'tool')
for col in tool_cols:
await set_if_not_none(tool_xml, tool, col)
await form_vars.setval('toolbar_xml', toolbar_xml)
#-----------------------------------------------------------------------------
# buttonrow
#-----------------------------------------------------------------------------
button_cols = ('btn_id', 'btn_label', 'lng', 'btn_default',
'btn_enabled', 'btn_validate', 'action', 'validation', 'help_msg')
async def load_buttonrow(caller, xml):
# called from setup_form_buttonrow 'on_start_frame'
form_vars = caller.data_objects['form_vars']
buttonrow_xml = await form_vars.getval('buttonrow_xml')
if buttonrow_xml is None:
buttonrow_xml = etree.Element('button_row')
await form_vars.setval('buttonrow_xml', buttonrow_xml)
await form_vars.setval('btn_template',
await form_vars.get_val_from_xml('btn_template', buttonrow_xml.get('template')))
await form_vars.save()
button = caller.data_objects['button']
await button.delete_all()
for seq, button_xml in enumerate(buttonrow_xml):
# init_vals = {col: button.get_val_from_xml(col, button_xml.get(col))
# for col in button_cols}
init_vals = {}
for col in button_cols:
init_vals[col] = await button.get_val_from_xml(col, button_xml.get(col))
init_vals['seq'] = seq
await button.init(display=False, init_vals=init_vals)
await button.save()
async def dump_buttonrow(caller, xml):
# called from setup_form_buttonrow 'before_save'
form_vars = caller.data_objects['form_vars']
button = caller.data_objects['button']
buttonrow_xml = etree.Element('button_row')
await set_if_not_none(buttonrow_xml, form_vars, 'btn_template', 'template')
all_buttons = button.select_many(where=[], order=[('seq', False)])
async for _ in all_buttons:
button_xml = etree.SubElement(buttonrow_xml, 'button')
for col in button_cols:
await set_if_not_none(button_xml, button, col)
await form_vars.setval('buttonrow_xml', buttonrow_xml)
#-----------------------------------------------------------------------------
# methods
#-----------------------------------------------------------------------------
method_cols = ('name', 'obj_name', 'action')
async def load_methods(caller, xml):
# called from setup_form_methods 'on_start_frame'
form_vars = caller.data_objects['form_vars']
method = caller.data_objects['method']
await method.delete_all()
methods_xml = await form_vars.getval('methods_xml')
if methods_xml is None:
methods_xml = etree.Element('frame_methods')
await form_vars.setval('methods_xml', methods_xml)
await form_vars.setval('method_template',
await form_vars.get_val_from_xml('method_template', methods_xml.get('template')))
await form_vars.save()
for seq, method_xml in enumerate(methods_xml):
# init_vals = {col: method.get_val_from_xml(col, method_xml.get(col))
# for col in method_cols}
init_vals = {}
for col in method_cols:
init_vals[col] = await method.get_val_from_xml(col, method_xml.get(col))
init_vals['seq'] = seq
await method.init(display=False, init_vals=init_vals)
await method.save()
async def dump_methods(caller, xml):
# called from setup_form_methods 'before_save'
form_vars = caller.data_objects['form_vars']
method = caller.data_objects['method']
methods_xml = etree.Element('frame_methods')
await set_if_not_none(methods_xml, form_vars, 'method_template', 'template')
all_methods = method.select_many(where=[], order=[('seq', False)])
async for _ in all_methods:
method_xml = etree.SubElement(methods_xml, 'method')
for col in method_cols:
await set_if_not_none(method_xml, method, col)
await form_vars.setval('methods_xml', methods_xml)
#-----------------------------------------------------------------------------
# body
#-----------------------------------------------------------------------------
body_cols = ('main_object', 'obj_descr', 'rowspan', 'colspan', 'value', 'obj_name', 'col_name',
'lng', 'height', 'pwd', 'readonly', 'choice', 'lookup', 'radio', 'before',
'form_dflt', 'validation', 'after', 'btn_id', 'btn_label', 'btn_enabled', 'btn_validate',
'action', 'help_msg', 'nb_label', 'subtype_obj', 'subtype_col', 'data_object', 'growable',
'num_grid_rows', 'cursor_name', 'form_name', 'auto_start', 'auto_startrow',
'toolbar', 'combo_type', 'group_name', 'member_name', 'pyfunc', 'prev', 'align', 'src', 'op', 'tgt')
async def before_start_body(caller, xml):
# called from setup_form_body 'before_start_form'
# parent = caller.parent
# while True:
# if 'obj_names' in parent.data_objects:
# caller.data_objects['obj_names'] = parent.data_objects['obj_names']
# caller.data_objects['col_names'] = parent.data_objects['col_names']
# break
# parent = parent.parent
pass
async def load_body(caller, xml):
# called from setup_form_body 'on_start_frame'
form_vars = caller.data_objects['form_vars']
body = caller.data_objects['body']
"""
obj_names = caller.data_objects['obj_names']
col_names = caller.data_objects['col_names']
all_obj = obj_names.select_many(where=[], order=[])
async for _ in all_obj:
print(obj_names)
all_col = col_names.select_many(where=[], order=[])
async for _ in all_col:
print(col_names)
print()
"""
"""
obj_name_fld = await body.getfld('obj_name')
obj_names = obj_name_fld.foreign_key['tgt_field'].db_obj
await obj_names.delete_all()
col_name_fld = await body.getfld('col_name')
col_names = col_name_fld.foreign_key['tgt_field'].db_obj
await col_names.delete_all()
dbobj_xml = await form_vars.getval('dbobj_xml')
for dbobj in dbobj_xml.iter('db_obj'):
async with db_session.get_connection() as db_mem_conn:
conn = db_mem_conn.db
sql = (
"SELECT row_id, short_descr FROM {}.db_tables WHERE table_name = '{}'"
.format(caller.company, dbobj.get('table_name'))
)
cur = await conn.exec_sql(sql)
table_id, descr = await cur.__anext__()
await obj_names.init(init_vals={
'name': dbobj.get('name'), 'descr': descr})
await obj_names.save()
sql = (
"SELECT col_name, short_descr FROM {}.db_columns "
"WHERE table_id = {} AND col_type != 'virt' "
"AND col_name NOT IN ('row_id', 'created_id', 'deleted_id') "
.format(caller.company, table_id)
)
async for col_name, descr in await conn.exec_sql(sql):
await col_names.init(init_vals={ #'obj_id': obj_row_id,
'name': col_name, 'descr': descr})
await col_names.save()
memobj_xml = await form_vars.getval('memobj_xml')
for memobj in memobj_xml.iter('mem_obj'):
await obj_names.init(init_vals={
'name': memobj.get('name'), 'descr': memobj.get('descr')})
await obj_names.save()
obj_row_id = await obj_names.getval('row_id')
for memcol in memobj.iter('mem_col'):
await col_names.init(init_vals={'obj_id': obj_row_id,
'name': memcol.get('col_name'), 'descr': memcol.get('short_descr')})
await col_names.save()
"""
body_xml = await form_vars.getval('body_xml')
if body_xml is None:
body_xml = etree.Element('body')
await form_vars.setval('body_xml', body_xml)
await body.delete_all(from_upd_on_save=True) # a trick to prevent running 'on_clean'
for seq, elem_xml in enumerate(body_xml):
init_vals = {}
init_vals['elem'] = elem_xml
init_vals['type'] = elem_xml.tag
init_vals['seq'] = seq
for fld in body.sub_types['type'][elem_xml.tag]:
val = await body.get_val_from_xml(fld.col_name, elem_xml.get(fld.col_name))
if val is not None:
init_vals[fld.col_name] = val
await body.init(display=False, init_vals=init_vals)
await body.save(from_upd_on_save=True) # a trick to prevent running 'on_clean'
# could make an alias, without gui link (cleaner?)
async def dump_body(caller, xml):
# called from setup_form_body 'before_save'
body = caller.data_objects['body']
body_xml = etree.Element('body')
all_body = body.select_many(where=[], order=[('seq', False)])
async for _ in all_body:
elem_xml = etree.SubElement(body_xml, await body.getval('type'))
for col in body_cols:
await set_if_not_none(elem_xml, body, col)
elem_xml[:] = (await body.getval('elem'))[:]
form_vars = caller.data_objects['form_vars']
await form_vars.setval('body_xml', body_xml)
#-----------------------------------------------------------------------------
# body_elem
#-----------------------------------------------------------------------------
async def load_body_elem(caller, xml):
# called from setup_form_body.grid_frame 'on_start_frame'
body = caller.data_objects['body']
# N.B. do not use this to store attributes - use sub_type columns instead
# only use it to store sub_elements
# P.S. it is ok to store copies of attributes in separate mem_objects,
# get the values from 'body' on loading, and replace the values
# in 'body' on dumping
elem_type = await body.getval('type')
elem_xml = await body.getval('elem')
if elem_type == 'grid':
grid_vars = caller.data_objects['grid_vars']
init_vals={}
init_vals['toolbar_xml'] = elem_xml.find('toolbar')
init_vals['columns_xml'] = elem_xml.find('cur_columns')
init_vals['filter_xml'] = elem_xml.find('cur_filter')
init_vals['sequence_xml'] = elem_xml.find('cur_sequence')
init_vals['methods_xml'] = elem_xml.find('grid_methods')
await grid_vars.init(init_vals=init_vals)
elif elem_type == 'grid_frame':
gridframe_vars = caller.data_objects['gridframe_vars']
init_vals={}
init_vals['main_object'] = await body.getval('main_object')
init_vals['obj_descr'] = await body.getval('obj_descr')
init_vals['toolbar_xml'] = elem_xml.find('toolbar')
init_vals['body_xml'] = elem_xml.find('body')
init_vals['buttonrow_xml'] = elem_xml.find('button_row')
init_vals['methods_xml'] = elem_xml.find('frame_methods')
await gridframe_vars.init(init_vals=init_vals)
elif elem_type == 'tree_frame':
treeframe_vars = caller.data_objects['treeframe_vars']
init_vals={}
init_vals['main_object'] = await body.getval('main_object')
init_vals['obj_descr'] = await body.getval('obj_descr')
init_vals['combo_type'] = await body.getval('combo_type')
init_vals['toolbar_xml'] = elem_xml.find('toolbar')
init_vals['body_xml'] = elem_xml.find('body')
init_vals['buttonrow_xml'] = elem_xml.find('button_row')
init_vals['methods_xml'] = elem_xml.find('frame_methods')
await treeframe_vars.init(init_vals=init_vals)
elif elem_type == 'subtype_frame':
subtype_vars = caller.data_objects['subtype_vars']
await subtype_vars.init(init_vals={
'subtype_obj': await body.getval('subtype_obj'),
'subtype_col': await body.getval('subtype_col'),
'lng': await body.getval('lng'),
})
subtypes = caller.data_objects['subtypes']
await subtypes.delete_all()
for subtype in elem_xml.iter('subtype_body'):
await subtypes.init(init_vals={
'subtype_id': subtype.get('subtype_id'),
'body_xml': subtype,
})
await subtypes.save()
async def dump_body_elem(caller, xml):
# called from setup_form_body.grid_frame 'before_save'
body = caller.data_objects['body']
elem_type = await body.getval('type')
elem_xml = await body.getval('elem')
if elem_type == 'grid':
grid_vars = caller.data_objects['grid_vars']
# await body.setval('data_object', await grid_vars.getval('data_object'))
# await body.setval('obj_descr', await grid_vars.getval('obj_descr'))
# await body.setval('growable', await grid_vars.getval('growable'))
# await body.setval('num_grid_rows', await grid_vars.getval('num_grid_rows'))
# await body.setval('cursor_name', await grid_vars.getval('cursor_name'))
# await body.setval('form_name', await grid_vars.getval('form_name'))
# await body.setval('auto_start', await grid_vars.getval('auto_start'))
# await body.setval('auto_startrow', await grid_vars.getval('auto_startrow'))
if elem_xml is None:
elem_xml = etree.Element(elem_type)
etree.SubElement(elem_xml, 'toolbar')
etree.SubElement(elem_xml, 'cur_columns')
etree.SubElement(elem_xml, 'cur_filter')
etree.SubElement(elem_xml, 'cur_sequence')
etree.SubElement(elem_xml, 'grid_methods')
elem_xml.find('toolbar')[:] = (await grid_vars.getval('toolbar_xml'))[:]
elem_xml.find('cur_columns')[:] = (await grid_vars.getval('columns_xml'))[:]
elem_xml.find('cur_filter')[:] = (await grid_vars.getval('filter_xml'))[:]
elem_xml.find('cur_sequence')[:] = (await grid_vars.getval('sequence_xml'))[:]
elem_xml.find('grid_methods')[:] = (await grid_vars.getval('methods_xml'))[:]
elif elem_type == 'grid_frame':
gridframe_vars = caller.data_objects['gridframe_vars']
# await body.setval('main_object', await gridframe_vars.getval('main_object'))
# await body.setval('obj_descr', await gridframe_vars.getval('obj_descr'))
if elem_xml is None:
elem_xml = etree.Element(elem_type)
etree.SubElement(elem_xml, 'toolbar')
etree.SubElement(elem_xml, 'body')
etree.SubElement(elem_xml, 'button_row')
etree.SubElement(elem_xml, 'frame_methods')
elem_xml.find('toolbar')[:] = (await gridframe_vars.getval('toolbar_xml'))[:]
elem_xml.find('body')[:] = (await gridframe_vars.getval('body_xml'))[:]
elem_xml.find('button_row')[:] = (await gridframe_vars.getval('buttonrow_xml'))[:]
elem_xml.find('frame_methods')[:] = (await gridframe_vars.getval('methods_xml'))[:]
elif elem_type == 'tree_frame':
treeframe_vars = caller.data_objects['treeframe_vars']
await body.setval('main_object', await treeframe_vars.getval('main_object'))
await body.setval('obj_descr', await treeframe_vars.getval('obj_descr'))
await body.setval('combo_type', await treeframe_vars.getval('combo_type'))
if elem_xml is None:
elem_xml = etree.Element(elem_type)
etree.SubElement(elem_xml, 'toolbar')
etree.SubElement(elem_xml, 'body')
etree.SubElement(elem_xml, 'button_row')
etree.SubElement(elem_xml, 'frame_methods')
elem_xml.find('toolbar')[:] = (await treeframe_vars.getval('toolbar_xml'))[:]
elem_xml.find('body')[:] = (await treeframe_vars.getval('body_xml'))[:]
elem_xml.find('button_row')[:] = (await treeframe_vars.getval('buttonrow_xml'))[:]
elem_xml.find('frame_methods')[:] = (await treeframe_vars.getval('methods_xml'))[:]
elif elem_type == 'subtype_frame':
subtype_vars = caller.data_objects['subtype_vars']
await body.setval('subtype_obj', await subtype_vars.getval('subtype_obj'))
await body.setval('subtype_col', await subtype_vars.getval('subtype_col'))
await body.setval('lng', await subtype_vars.getval('lng'))
if elem_xml is None:
elem_xml = etree.Element(elem_type)
subtypes = caller.data_objects['subtypes']
subtypes_xml = etree.Element('subtypes_temp')
all_subtypes = subtypes.select_many(where=[], order=[('subtype_id', False)])
async for _ in all_subtypes:
subtype_xml = etree.SubElement(subtypes_xml, 'subtype_body')
await set_if_not_none(subtype_xml, subtypes, 'subtype_id')
subtype_xml[:] = (await subtypes.getval('body_xml'))[:]
elem_xml[:] = subtypes_xml[:]
elif elem_xml is None:
elem_xml = etree.Element(elem_type)
await body.setval('elem', elem_xml)
async def set_if_not_none(elem_xml, db_obj, col_name, attr_name=None):
# create attribute on xml element, but only if not None or default
xml_val = await db_obj.get_val_for_xml(col_name) # returns None if None or equal to default
if xml_val is not None:
if attr_name is None: # if not specified, use col_name
attr_name = col_name
elem_xml.set(attr_name, xml_val)
| 2.265625 | 2 |
src/Plotting/linear_plots.py | goeckslab/MarkerIntensityPredictor | 3 | 12794973 | <filename>src/Plotting/linear_plots.py<gh_stars>1-10
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
def __create_intensity_heatmap_plot(self):
fig, ax = plt.subplots(figsize=(30, 30), dpi=300) # Sample figsize in inches
sns.heatmap(self.train_data.X_train, xticklabels=self.train_data.markers)
ax.set_title("Marker intensities")
fig = ax.get_figure()
fig.tight_layout()
fig.savefig(Path(f"results/lr/marker_heatmap.png"), bbox_inches='tight')
plt.close()
def __create_r2_accuracy_plot(self):
"""
Creates a bar plot showing the accuracy of the model for each marker
:return:
"""
ax = sns.catplot(
data=self.prediction_scores, kind="bar",
x="Score", y="Marker", hue="Model",
ci="sd", palette="dark", alpha=.6, height=6
)
ax.despine(left=True)
ax.set_axis_labels("R2 Score", "Marker")
ax.set(xlim=(0, 1))
if self.test_file is None:
# ax.fig.suptitle("Single file")
plt.title("Single File", y=1.02)
ax.legend.set_title("Model")
ax.savefig(Path(f"results/lr/{self.train_file_name}_score_predictions.png"))
elif self.train_file is None:
plt.title("Multi Files", y=1.02)
ax.legend.set_title("Model")
ax.savefig(Path(f"results/lr/{self.test_file_name}_multi_score_predictions.png"))
else:
plt.title("Train Test File", y=1.02)
ax.legend.set_title("Model")
ax.savefig(Path(f"results/lr/{self.train_file_name}_{self.test_file_name}_score_predictions.png"))
plt.close()
| 2.890625 | 3 |
g8/src/uber/wall_follower.py | YaBoyWonder/Racecar | 1 | 12794974 | <reponame>YaBoyWonder/Racecar<gh_stars>1-10
#!/usr/bin/env python
"""
YaBoyWonder LICENSE: Apache-2.0
"""
import os
import rospy
from enum import Enum
from ackermann_msgs.msg import AckermannDriveStamped
from sensor_msgs.msg import LaserScan
from controllers import PIDController
class WallFollowerNode:
def __init__(self):
rospy.init_node(STATE_MACHINE_NODE_NAME)
self.laser_subscriber = rospy.Subscriber('/scan', LaserScan,
self.laser_callback, queue_size=10)
self.drive_publisher = rospy.Publisher('/vesc/high_level/ackermann_cmd_mux/input/nav_0', AckermannDriveStamped,
queue_size=10)
self.steering_controller = PIDController(kP=1.2, kD=0)
self.drive_msg = AckermannDriveStamped()
self.cv_bridge = CvBridge()
self.goal_distance = 0.8
self.drive_speed = 1.2
def laser_callback(self, msg):
cur_dist = min(msg.ranges[int(0.4*len(msg.ranges)):int(0.6*len(msg.ranges))])
front_dist = msg.ranges(len(msg.ranges)/2)
steering_output = self.steering_controller.output(cur_dist, self.goal_distance)
if front_dist < 2.0:
self.drive_speed = 0.7
else:
self.drive_speed = 1.2
self.drive_msg.drive.steering_angle = output
self.drive_msg.drive.speed = self.drive_speed
print("Distance from wall: %s" % cur_dist)
print("Error: %s" % (cur_dist - self.goal_distance))
print("State: %s" % self.state)
self.drive_publisher.publish(self.drive_msg)
if __name__ == '__main__':
init()
s = WallFollowerNode()
rospy.spin()
| 2.515625 | 3 |
amcp_pylib/core/client.py | dolejska-daniel/amcp-pylib | 5 | 12794975 | from .command import Command
from .connection import Connection
from .client_base import ClientBase
from amcp_pylib.response import ResponseBase, ResponseFactory
class Client(ClientBase):
""" Simple connection client class. """
def connect(self, host: str = "127.0.0.1", port: int = 5250):
if not self.connection:
self.connection = Connection(host, port)
def send(self, command: Command) -> ResponseBase:
return self.send_raw(bytes(command))
def send_raw(self, data: bytes) -> ResponseBase:
self.connection.send(data)
return self.process_response()
def process_response(self) -> ResponseBase:
data = self.connection.receive()
return ResponseFactory.create_from_bytes(data)
| 2.78125 | 3 |
CausalImpactExplainer/reporting.py | SpikeLab-CL/CausalImpactExplainer | 7 | 12794976 | <filename>CausalImpactExplainer/reporting.py
############
#
# Helper functions to report results of experiments
#
#
############
from os import error
import pandas as pd
from typing import Dict, List, Union, Optional, Tuple
import numpy as np
from .utils import ExperimentOutput
import causalimpact
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
Fig_and_Axes = Tuple[matplotlib.figure.Figure, matplotlib.axes._subplots.Subplot]
def center_weights(values, scales, variances):
return (values.numpy().mean(axis=0) - scales.numpy().mean(axis=0)) / np.sqrt(
variances.numpy().mean(axis=0) # type:ignore
)
def get_standardized_coefficients(causal_impact_object: causalimpact.CausalImpact):
return center_weights(
causal_impact_object.model_samples["SparseLinearRegression/_weights_noncentered"], # type: ignore
causal_impact_object.model_samples["SparseLinearRegression/_local_scales_noncentered"], # type: ignore
causal_impact_object.model_samples[
"SparseLinearRegression/_local_scale_variances"
], # type: ignore
)
def get_dictionary_of_standardized_coefficients(
causal_impact_object: Union[causalimpact.CausalImpact],
) -> dict: # type: ignore
"""
Returns a dictionary with the name of the columns and the standardized coefficients
"""
try:
standardized_coefficients = get_standardized_coefficients(causal_impact_object)
return {
col_name: coeff
for col_name, coeff in zip(
causal_impact_object.pre_data.columns[1::], standardized_coefficients
)
}
except Exception as e:
print(e)
def plot_distribution_of_results(
experiment_results: Dict[str, ExperimentOutput],
max_p_value_cut: float = 1.0,
min_abs_effect_size_cut: Optional[float] = None,
figsize=(12, 6),
) -> Fig_and_Axes:
"""
Plots distribution of absolute and relative effects
Filters by a max_p_value and a minimum absolute effect size
TODO: filter also relative effects by minimum absolute effect size
"""
experiment_outputs = experiment_results.values()
absolute_effects = [
output.results_summary.loc["abs_effect", "cumulative"]
for output in experiment_outputs
if output.trained_model.p_value < max_p_value_cut
]
relative_effects = [
output.results_summary.loc["rel_effect", "cumulative"]
for output in experiment_outputs
if output.trained_model.p_value < max_p_value_cut
]
# filter by min_abs_effect_size
if min_abs_effect_size_cut is not None:
absolute_effects = absolute_effects[absolute_effects >= min_abs_effect_size_cut]
fig, axes = plt.subplots(1, 2, figsize=figsize)
sns.distplot(absolute_effects, ax=axes[0])
axes[0].set_title("Distribution of absolute effects")
sns.distplot(relative_effects, ax=axes[1])
axes[1].set_title("Distribution of relative effects")
return fig, axes
def summary_of_results(
results_df: pd.DataFrame,
max_p_value_cut: float,
min_abs_effect_size_cut: float,
percentiles=[0.1, 0.25, 0.5, 0.75, 0.9],
) -> pd.DataFrame:
"""
Returns a table with the sum, median, average and percentiles
of the results
"""
return pd.DataFrame({"x": [1], "y": [3]})
def get_most_important_control_time_series(
significance_cutoff: float, max_number_of_columns: int
) -> Dict[str, List[str]]:
"""
Per experiment id, gets a list of the most important variables
significance_cutoff is the minimum value to select the most important
variables
"""
return {"id1": ["var2", "var3"], "id2": ["var3", "var8"]}
def plot_most_important_control_time_series(experiment_dict):
"""
Plots the most important control time series
"""
pass
def get_diagnostic_values_per_experiment():
"""
Gets diagnostic values per experiment, such as R^2 fit
in the training period
"""
pass
| 2.65625 | 3 |
18/test.py | AlecRosenbaum/adventofcode2017 | 0 | 12794977 | <reponame>AlecRosenbaum/adventofcode2017<gh_stars>0
import unittest
from solution import solution_part_one, solution_part_two
class TestPartOne(unittest.TestCase):
TEST_INPUT = """
set a 1
add a 2
mul a a
mod a 5
snd a
set a 0
rcv a
jgz a -1
set a 1
jgz a -2
"""
def test_one(self):
self.assertEqual(solution_part_one(self.TEST_INPUT), 4)
# class TestPartTwo(unittest.TestCase):
# def test_one(self):
# self.assertEqual(solution_part_two(3), 1222153)
if __name__ == "__main__":
unittest.main()
| 3.171875 | 3 |
tests/debug/debug_transformer_transform.py | v-smwang/HanLP | 2 | 12794978 | <reponame>v-smwang/HanLP
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-01-11 18:37
from hanlp.datasets.ner.msra import MSRA_NER_TRAIN
from hanlp.components.taggers.transformers.transformer_transform import TransformerTransform
transform = TransformerTransform(max_seq_length=128)
for x, y in transform.file_to_inputs(MSRA_NER_TRAIN):
assert len(x) == len(y)
if not len(x) or len(x) > 126:
print(x)
| 2.25 | 2 |
Dataset/Leetcode/train/5/479.py | kkcookies99/UAST | 0 | 12794979 | <filename>Dataset/Leetcode/train/5/479.py
class Solution:
def XXX(self, s: str) -> str:
if s==s[::-1]:return s
maxlen,middle=1,0
i=1
len_s=len(s)
rev=s[0]
if(s[0]==s[1]):
rev=s[0:2]
maxlen=2
while i<len_s:
if(maxlen%2!=0):
odd=s[i-maxlen//2-1:i+maxlen//2+2]
even=s[i-maxlen//2:i+maxlen//2+2]
else:
odd=s[i-maxlen//2:i+maxlen//2+1]
even=s[i-maxlen//2:i+maxlen//2+2]
if (odd==odd[::-1]) and len(odd)>maxlen:
rev=odd
maxlen=len(odd)
continue
if(even==even[::-1] and len(even)>maxlen):
maxlen=len(even)
rev=even
continue
i+=1
return rev
| 2.953125 | 3 |
full-problems/floorInSortedArray.py | vikas-t/DS-Algo | 0 | 12794980 | <reponame>vikas-t/DS-Algo
#!/usr/bin/python
#https://practice.geeksforgeeks.org/problems/floor-in-a-sorted-array/0
def sol(arr, n, k):
f = None
for i in range(n):
if arr[i] <= k:
f = i
if not f:
return -1
return f | 3.234375 | 3 |
util/import struct.py | aadiljamal/project | 2 | 12794981 | <reponame>aadiljamal/project
import struct
from struct import unpack
def unpack_drawing(file_handle):
key_id, = unpack('Q', file_handle.read(8))
country_code, = unpack('2s', file_handle.read(2))
recognized, = unpack('b', file_handle.read(1))
timestamp, = unpack('I', file_handle.read(4))
n_strokes, = unpack('H', file_handle.read(2))
image = []
for i in range(n_strokes):
n_points, = unpack('H', file_handle.read(2))
fmt = str(n_points) + 'B'
x = unpack(fmt, file_handle.read(n_points))
y = unpack(fmt, file_handle.read(n_points))
image.append((x, y))
return {
'key_id': key_id,
'country_code': country_code,
'recognized': recognized,
'timestamp': timestamp,
'image': image
}
def unpack_drawings(filename):
with open(filename, 'rb') as f:
while True:
try:
yield unpack_drawing(f)
except struct.error:
break
for drawing in unpack_drawings('test_image2.jpeg'):
# do something with the drawing
print(drawing['image']) | 2.859375 | 3 |
downloader/RateLimiter.py | pixolution/PixolutionImageDownloader | 9 | 12794982 | <reponame>pixolution/PixolutionImageDownloader<gh_stars>1-10
#!bin/python3
# -*- coding: utf-8 -*-1
import threading
import functools
import time
from downloader.ThreadSafe import SingletonMixin
from downloader.ThreadSafe import synchronized
"""
A rate limiter to ensure that only a given number of call are made
per given time interval
"""
class RateLimiter(SingletonMixin):
"""
Constructor with 50 actions per second as default. Call setup to reconfigure
"""
def __init__(self):
self.rate=50.0
self.per=1.0
self.allowance = self.rate
self.last_check = self.now()
"""
Set up the rate limiter with the given number of actions in the given
interval in seconds.
You need to call this method first to configure the RateLimiter
"""
def setup(self, number_actions, interval):
if number_actions > 0.0 and number_actions < 1.0:
raise Exception("number_actions needs to be greater or equal 1.0")
self.rate=float(number_actions)
self.per=float(interval)
self.allowance = self.rate
self.last_check = self.now()
if self.rate < 0:
print("set up RateLimiter: disabled (no rate limiting)")
else:
print("set up RateLimiter: ",self.rate," actions per ",self.per," seconds")
def now(self):
return time.time()
"""
Call this method before you call your action that should respect the rate
limit. In case the rate limit is exceeded this method blocks until the given
number of actions per interval is fulfiled again.
This method is thread safe. For algorithm used see:
https://stackoverflow.com/questions/667508/whats-a-good-rate-limiting-algorithm
This is a token bucket algorithm without queue. The bucket is allowance.
The bucket size is rate. The allowance += … line is an optimization of adding
a token every rate per seconds.
"""
@synchronized
def acquire(self):
# return immediately if rate limit is disabled
if self.rate<0:
return
# else process the acquire request, and block until token is available
current = self.now()
time_passed = current - self.last_check
self.last_check = current
self.allowance += time_passed * (self.rate / self.per)
if self.allowance > self.rate:
self.allowance = self.rate
self.allowance -= 1.0
if self.allowance < 1.0:
# wait until next bucket is available
time.sleep( (1-self.allowance) * (self.per/self.rate))
| 3.171875 | 3 |
lebanese_channels/channel_ids.py | ChadiEM/Lebanese-Channels | 10 | 12794983 | from lebanese_channels.channel import Channel
# noinspection PyUnresolvedReferences
from lebanese_channels.services import *
CHANNEL_LIST = []
for cls in Channel.__subclasses__():
CHANNEL_LIST.append(cls())
CHANNEL_LIST = sorted(CHANNEL_LIST, key=lambda x: x.get_name())
| 2.140625 | 2 |
tests/pyconverter-test/cases/array_generics2.py | jaydeetay/pxt | 977 | 12794984 | <gh_stars>100-1000
obstacles: List[List[number]] = []
obstacles.removeAt(0).removeAt(0) | 1.351563 | 1 |
test/test_log_utils.py | knkgun/federalist-garden-build | 5 | 12794985 | <gh_stars>1-10
import logging
from unittest.mock import patch
from log_utils.get_logger import (
LogFilter, Formatter, get_logger, init_logging,
set_log_attrs, DEFAULT_LOG_LEVEL)
from log_utils.db_handler import DBHandler
class TestLogFilter():
def test_it_filters_message_with_default_mask(self):
priv_values = ['foobar']
msg = 'hellofoobar'
filter = LogFilter(priv_values)
record = logging.makeLogRecord({'msg': msg})
result = filter.filter(record)
assert(result is True)
assert(record.getMessage() == f'hello{LogFilter.DEFAULT_MASK}')
def test_it_filters_message_with_custom_mask(self):
priv_values = ['foobar']
mask = 'TheNumber42'
msg = 'hellofoobar'
filter = LogFilter(priv_values, mask)
record = logging.makeLogRecord({'msg': msg})
result = filter.filter(record)
assert(result is True)
assert(record.getMessage() == f'hello{mask}')
def test_it_does_not_log_empty_messages(self):
priv_values = []
msg = ''
filter = LogFilter(priv_values)
record = logging.makeLogRecord({'msg': msg})
result = filter.filter(record)
assert(result is False)
def test_it_replaces_message_invalid_access_key(self):
priv_values = []
msg = f'hello{LogFilter.INVALID_ACCESS_KEY}'
filter = LogFilter(priv_values)
record = logging.makeLogRecord({'msg': msg})
result = filter.filter(record)
assert(result is True)
assert(record.getMessage() == (
'Whoops, our S3 keys were rotated during your '
'build and became out of date. This was not a '
'problem with your site build, but if you restart '
'the failed build it should work on the next try. '
'Sorry for the inconvenience!'
))
class TestFormatter():
@patch('logging.Formatter.format')
def test_it_populates_empty_strings_if_key_is_missing(self, mock_format):
keys = ['foobar']
formatter = Formatter(keys)
record = logging.makeLogRecord({})
formatter.format(record)
assert(record.foobar == '')
mock_format.assert_called_once_with(record)
@patch('logging.Formatter.format')
def test_it_ignores_key_if_present(self, mock_format):
keys = ['foobar']
formatter = Formatter(keys)
record = logging.makeLogRecord({'foobar': 'Hello!'})
formatter.format(record)
assert(record.foobar == 'Hello!')
mock_format.assert_called_once_with(record)
class TestGetLogger():
def test_it_returns_a_logger_with_an_adapter_with_extras(self):
name = 'foobar'
attrs = {'foo': 'bar'}
set_log_attrs(attrs)
adapter = get_logger(name)
assert(type(adapter) == logging.LoggerAdapter)
assert(adapter.logger.name == name)
assert(adapter.extra == attrs)
@patch('psycopg2.connect')
@patch('logging.basicConfig')
class TestInitLogging():
def test_it_adds_a_stream_and_db_handlers(self, mock_basic_config, _):
init_logging([], {'buildid': 1234}, 'foo')
_, kwargs = mock_basic_config.call_args
assert(kwargs['level'] == DEFAULT_LOG_LEVEL)
assert(len(kwargs['handlers']) == 2)
assert(type(kwargs['handlers'][0]) == logging.StreamHandler)
assert(type(kwargs['handlers'][1]) == DBHandler)
| 2.515625 | 3 |
src/world_entity/player.py | rahul38888/opworld | 0 | 12794986 | from ursina.prefabs.first_person_controller import FirstPersonController
class Player(FirstPersonController):
def __init__(self, position, init_health):
super(Player, self).__init__()
self.position = position
self.health = init_health
self.last_max_jump_pos = 0
| 2.375 | 2 |
DAEGC/model.py | devvrit/DAEGC | 0 | 12794987 | import torch
import torch.nn as nn
import torch.nn.functional as F
from layer import GATLayer
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class GAT(nn.Module):
def __init__(self, num_features, hidden_size, embedding_size, alpha):
super(GAT, self).__init__()
self.hidden_size = hidden_size
self.embedding_size = embedding_size
self.alpha = alpha
self.conv1 = GATLayer(num_features, hidden_size, alpha)
self.conv2 = GATLayer(hidden_size, embedding_size, alpha)
def forward(self, x, adj, M):
h = self.conv1(x, adj, M)
h = self.conv2(h, adj, M)
z = F.normalize(h, p=2, dim=1)
A_pred = self.dot_product_decode(z)
return A_pred, z
def dot_product_decode(self, Z):
A_pred = torch.sigmoid(torch.matmul(Z, Z.t()))
return A_pred
class pseudo_gat(nn.Module):
def __init__(self, num_features, hidden_size):
super(pseudo_gat, self).__init__()
self.w1 = nn.Linear(num_features, hidden_size)
self.iden = nn.Parameter(data = torch.randn((num_points, hidden_dims),dtype=torch.float).to(device), requires_grad=True)
def forward(self, x, adj, M):
z = self.w1(x) + iden
A_pred = torch.sigmoid(torch.matmul(z, z.t()))
return A_pred, z
| 2.53125 | 3 |
frontend/migrations/0003_page_js.py | MetLee/hackergame | 48 | 12794988 | # Generated by Django 2.1.12 on 2019-10-14 14:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('frontend', '0002_auto_20191010_0025'),
]
operations = [
migrations.AddField(
model_name='page',
name='js',
field=models.TextField(blank=True, help_text='会被放入 <code>script</code> 的 JS'),
),
]
| 1.4375 | 1 |
src/common/trainer.py | ilyailyash/Torch-Voice-activity-detection | 1 | 12794989 | import time
from functools import partial
from pathlib import Path
import toml
import torch
import colorful
import numpy as np
import matplotlib.pyplot as plt
from joblib import Parallel, delayed
from torch.cuda.amp import GradScaler
from sklearn.metrics import DetCurveDisplay
import src.util.metrics as metrics
from src.util import visualization
from src.util.acoustic_utils import stft, istft
from src.util.utils import prepare_empty_dir, ExecutionTime
plt.switch_backend('agg')
class BaseTrainer:
def __init__(self,
dist,
rank,
config,
resume: bool,
model,
loss_function,
optimizer,
scheduler):
self.color_tool = colorful
self.color_tool.use_style("solarized")
self.model = model
self.optimizer = optimizer
self.scheduler = scheduler
self.loss_function = loss_function
# DistributedDataParallel (DDP)
self.rank = rank
self.dist = dist
# Automatic mixed precision (AMP)
self.use_amp = config["meta"]["use_amp"]
self.scaler = GradScaler(enabled=self.use_amp)
# Acoustics
self.acoustic_config = config["acoustic"]
# Supported STFT
n_fft = self.acoustic_config["n_fft"]
hop_length = self.acoustic_config["hop_length"]
win_length = self.acoustic_config["win_length"]
center = self.acoustic_config["center"]
self.torch_stft = partial(stft, n_fft=n_fft, hop_length=hop_length, win_length=win_length,
device=self.rank, center=center)
self.istft = partial(istft, n_fft=n_fft, hop_length=hop_length, win_length=win_length,
device=self.rank, center=center)
# Trainer.train in config
self.train_config = config["trainer"]["train"]
self.epochs = self.train_config["epochs"]
self.save_checkpoint_interval = self.train_config["save_checkpoint_interval"]
self.clip_grad_norm_value = self.train_config["clip_grad_norm_value"]
assert self.save_checkpoint_interval >= 1
# Trainer.validation in config
self.validation_config = config["trainer"]["validation"]
self.validation_interval = self.validation_config["validation_interval"]
self.save_max_metric_score = self.validation_config["save_max_metric_score"]
assert self.validation_interval >= 1
# Trainer.visualization in config
self.visualization_config = config["trainer"]["visualization"]
# In the 'train.py' file, if the 'resume' item is True, we will update the following args:
self.start_epoch = 1
self.best_score = -np.inf if self.save_max_metric_score else np.inf
self.save_dir = Path(config["meta"]["save_dir"]).expanduser().absolute() / config["meta"]["experiment_name"]
self.checkpoints_dir = self.save_dir / "checkpoints"
self.logs_dir = self.save_dir / "logs"
self.thresholds = {'eer': 0,
'fpr_1': 0,
'fnr_1': 0}
if resume:
self._resume_checkpoint()
if config["meta"]["preloaded_model_path"]:
self._preload_model(Path(config["preloaded_model_path"]))
if self.rank == 0:
prepare_empty_dir([self.checkpoints_dir, self.logs_dir], resume=resume)
self.writer = visualization.writer(self.logs_dir.as_posix())
self.writer.add_text(
tag="Configuration",
text_string=f"<pre> \n{toml.dumps(config)} \n</pre>",
global_step=1
)
print(self.color_tool.cyan("The configurations are as follows: "))
print(self.color_tool.cyan("=" * 40))
print(self.color_tool.cyan(toml.dumps(config)[:-1])) # except "\n"
print(self.color_tool.cyan("=" * 40))
with open((self.save_dir / f"{time.strftime('%Y-%m-%d %H:%M:%S')}.toml").as_posix(), "w") as handle:
toml.dump(config, handle)
self._print_networks([self.model])
def _preload_model(self, model_path):
"""
Preload model parameters (in "*.tar" format) at the start of experiment.
Args:
model_path (Path): The file path of the *.tar file
"""
model_path = model_path.expanduser().absolute()
assert model_path.exists(), f"The file {model_path.as_posix()} is not exist. please check path."
map_location = {'cuda:%d' % 0: 'cuda:%d' % self.rank}
model_checkpoint = torch.load(model_path.as_posix(), map_location=map_location)
self.model.load_state_dict(model_checkpoint["model"], strict=False)
if self.rank == 0:
print(f"Model preloaded successfully from {model_path.as_posix()}.")
def _resume_checkpoint(self):
"""
Resume experiment from the latest checkpoint.
"""
latest_model_path = self.checkpoints_dir.expanduser().absolute() / "latest_model.tar"
assert latest_model_path.exists(), f"{latest_model_path} does not exist, can not load latest checkpoint."
self.dist.barrier() # see https://stackoverflow.com/questions/59760328/how-does-torch-distributed-barrier-work
map_location = {'cuda:%d' % 0: 'cuda:%d' % self.rank}
checkpoint = torch.load(latest_model_path.as_posix(), map_location=map_location)
self.start_epoch = checkpoint["epoch"] + 1
self.best_score = checkpoint["best_score"]
self.optimizer.load_state_dict(checkpoint["optimizer"])
self.scheduler.load_state_dict(checkpoint["scheduler"])
self.scaler.load_state_dict(checkpoint["scaler"])
self.model.load_state_dict(checkpoint["model"])
self.thresholds = checkpoint["thresholds"]
if self.rank == 0:
print(f"Model checkpoint loaded. Training will begin at {self.start_epoch} epoch.")
def _save_checkpoint(self, epoch, is_best_epoch=False):
"""
Save checkpoint to "<save_dir>/<config name>/checkpoints" directory, which consists of:
- the epoch number
- the best metric score in history
- the optimizer parameters
- the model parameters
Args:
is_best_epoch (bool): In current epoch, if the model get a best metric score (is_best_epoch=True),
the checkpoint of model will be saved as "<save_dir>/checkpoints/best_model.tar".
"""
print(f"\t Saving {epoch} epoch model checkpoint...")
state_dict = {
"epoch": epoch,
"best_score": self.best_score,
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"scaler": self.scaler.state_dict(),
"model": self.model.state_dict(),
"thresholds": self.thresholds
}
# "latest_model.tar"
# Contains all checkpoint information, including the optimizer parameters, the model parameters, etc.
# New checkpoint will overwrite the older one.
torch.save(state_dict, (self.checkpoints_dir / "latest_model.tar").as_posix())
# "model_{epoch_number}.tar"
# Contains all checkpoint information, like "latest_model.tar". However, the newer information will no overwrite the older one.
torch.save(state_dict, (self.checkpoints_dir / f"model_{str(epoch).zfill(4)}.tar").as_posix())
# If the model get a best metric score (is_best_epoch=True) in the current epoch,
# the model checkpoint will be saved as "best_model.tar."
# The newer best-scored checkpoint will overwrite the older one.
if is_best_epoch:
print(self.color_tool.red(f"\t Found a best score in the {epoch} epoch, saving..."))
torch.save(state_dict, (self.checkpoints_dir / "best_model.tar").as_posix())
def _is_best_epoch(self, score, save_max_metric_score=True):
"""
Check if the current model got the best metric score
"""
if save_max_metric_score and score >= self.best_score:
self.best_score = score
return True
elif not save_max_metric_score and score <= self.best_score:
self.best_score = score
return True
else:
return False
@staticmethod
def _print_networks(models: list):
print(f"This project contains {len(models)} models, the number of the parameters is: ")
params_of_all_networks = 0
for idx, model in enumerate(models, start=1):
params_of_network = 0
for param in model.parameters():
params_of_network += param.numel()
print(f"\tNetwork {idx}: {params_of_network / 1e6} million.")
params_of_all_networks += params_of_network
print(f"The amount of parameters in the project is {params_of_all_networks / 1e6} million.")
def _set_models_to_train_mode(self):
self.model.train()
def _set_models_to_eval_mode(self):
self.model.eval()
@staticmethod
def get_thresholds(labels, scores):
eer_t, eer, fpr_1_t, fpr_1_fnr, fnr_1_t, fnr_1_fpr = metrics.compute_thresholds(labels, scores)
return eer_t, fpr_1_t, fnr_1_t, eer, fpr_1_fnr, fnr_1_fpr
def metrics_visualization(self, labels, predicted, metrics_list, epoch):
"""
Get metrics on validation dataset by paralleling.
"""
assert "ROC_AUC" in metrics_list
# Check if the metric is registered in "util.metrics" file.
for i in metrics_list:
assert i in metrics.REGISTERED_METRICS.keys(), f"{i} is not registered, please check 'util.metrics' file."
fpr, tpr, thresholds = metrics.roc_curve(labels.reshape(-1), predicted.reshape(-1))
roc_auc_mean = 0
for metric_name in metrics_list:
mean_score = metrics.REGISTERED_METRICS[metric_name](fpr, tpr)
# Add the mean value of the metric to tensorboard
self.writer.add_scalar(f"Validation/{metric_name}", mean_score, epoch)
if metric_name == "ROC_AUC":
roc_auc_mean = mean_score
fig, axes = plt.subplots(1, 1, figsize=(6, 6))
display = DetCurveDisplay(fpr=fpr, fnr=1 - tpr, estimator_name=f'ROC_AUC = {roc_auc_mean}')
display.plot(axes)
self.writer.add_figure(f"DetCurve", fig, epoch)
eer_t, fpr_1_t, fnr_1_t, _, _, _ = self.get_thresholds(labels.reshape(-1), predicted.reshape(-1))
f1, _, _, precision, recall = metrics.get_f1((predicted.reshape(-1) > eer_t).int(), labels.reshape(-1))
self.writer.add_scalar(f"Validation/F1", f1, epoch)
self.writer.add_scalar(f"Validation/Precision", precision, epoch)
self.writer.add_scalar(f"Validation/recall", recall, epoch)
self.thresholds = {'eer': eer_t,
'fpr_1': fpr_1_t,
'fnr_1': fnr_1_t}
return roc_auc_mean
def train(self):
for epoch in range(self.start_epoch, self.epochs + 1):
if self.rank == 0:
print(self.color_tool.yellow(f"{'=' * 15} {epoch} epoch {'=' * 15}"))
print("[0 seconds] Begin training...")
timer = ExecutionTime()
self._set_models_to_train_mode()
self._train_epoch(epoch)
# Only use the first GPU (process) to the validation.
if self.rank == 0:
if epoch % self.validation_interval == 0:
print(f"[{timer.duration()} seconds] Training has finished, validation is in progress...")
if self.save_checkpoint_interval != 0 and (epoch % self.save_checkpoint_interval == 0):
self._save_checkpoint(epoch)
self._set_models_to_eval_mode()
metric_score = self._validation_epoch(epoch)
if self.save_checkpoint_interval != 0 and (epoch % self.save_checkpoint_interval == 0):
self._save_checkpoint(epoch)
if self._is_best_epoch(metric_score, save_max_metric_score=self.save_max_metric_score):
self._save_checkpoint(epoch, is_best_epoch=True)
print(f"[{timer.duration()} seconds] This epoch has finished.")
def _train_epoch(self, epoch):
raise NotImplementedError
def _validation_epoch(self, epoch):
raise NotImplementedError
| 1.953125 | 2 |
python-worm/star-pie.py | AHongKong/Python-Practice | 0 | 12794990 | <gh_stars>0
from pyecharts import ThemeRiver
from pyecharts import Pie
rate = []
with open('quan.txt', mode='r',encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
arr = line.split(',')
if len(arr) == 5:
rate.append(arr[3].replace('\n',''))
print(rate.count('5')+rate.count('4.5'))
print(rate.count('4')+rate.count('3.5'))
print(rate.count('3')+rate.count('2.5'))
print(rate.count('2')+rate.count('1.5'))
print(rate.count('1')+rate.count('0.5'))
attr = ['五星','四星','三星','二星','一星']
v1 = [3324,1788,1293,553,1653]
pie = Pie('饼图-星级图示例',
title_pos='center',
width=900
)
pie.add("7-17", attr, v1,
center=[75,50],
is_random=True,
radius=[30,75],
rosetype='area',
is_legend_show=False,
is_label_show=True
)
pie.render()
| 2.421875 | 2 |
server/moback.py | dhilipsiva/moback | 2 | 12794991 | import api
app = api.app
# URLs
## API URLs
app.add_url_rule('/', view_func=api.index)
app.add_url_rule(
'/login',
view_func=api.login, methods=['POST', ])
app.add_url_rule(
'/register',
view_func=api.register, methods=['POST', ])
app.add_url_rule(
'/forgot_password',
view_func=api.forgot_password, methods=['GET', 'POST', ])
app.add_url_rule(
'/reset_page',
view_func=api.reset_page, methods=['GET', 'POST', ])
API_VER_1 = '/api/v1/'
app.add_url_rule(
API_VER_1 + 'profile/<person_id>',
view_func=api.profile, methods=['GET', ])
app.add_url_rule(
API_VER_1 + 'score',
view_func=api.score, methods=['GET', 'POST', ])
app.add_url_rule(
API_VER_1 + 'login_with_fb',
view_func=api.login_with_fb, methods=['POST', ])
app.add_url_rule(
API_VER_1 + 'leaderboard',
view_func=api.leaderboard, methods=['GET', ])
app.add_url_rule(
API_VER_1 + 'user_scores',
view_func=api.user_scores, methods=['GET', ])
if __name__ == '__main__':
app.run(
port=8888,
debug=True,
host='0.0.0.0')
| 1.96875 | 2 |
awardsapp/migrations/0002_auto_20210407_1707.py | RYAN2540/awwwards | 0 | 12794992 | <gh_stars>0
# Generated by Django 3.1.7 on 2021-04-07 14:07
import awardsapp.models
import cloudinary.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('awardsapp', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='project',
options={'ordering': ['-post_date']},
),
migrations.RenameField(
model_name='profile',
old_name='portfolio_link',
new_name='link',
),
migrations.AlterField(
model_name='profile',
name='location',
field=models.CharField(max_length=40),
),
migrations.AlterField(
model_name='profile',
name='profile_pic',
field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Profile Picture'),
),
migrations.AlterField(
model_name='project',
name='screenshot',
field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Project screenshot'),
),
migrations.CreateModel(
name='Vote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post_date', models.DateTimeField(auto_now_add=True)),
('design', awardsapp.models.IntegerRangeField()),
('usability', awardsapp.models.IntegerRangeField()),
('content', awardsapp.models.IntegerRangeField()),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='awardsapp.project')),
('voter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='awardsapp.profile')),
],
options={
'ordering': ['-post_date'],
},
),
]
| 1.796875 | 2 |
mtgmonte/mtgutils.py | Erotemic/mtgmonte | 0 | 12794993 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import utool as ut
import itertools
import six
import operator
print, rrr, profile = ut.inject2(__name__, '[mtgutils]')
# Then check for color considerations
def can_cast(spell_sequence, mana_combos):
"""
Returns if a spell sequence is castable given the current mana sources
Args:
spell_sequence (list):
mana_combos (list):
Returns:
bool: valid
CommandLine:
python -m mtgmonte.mtgutils --test-can_cast
python -m mtgmonte.mtgutils --exec-can_cast:0
python -m mtgmonte.mtgutils --exec-can_cast:1
Setup:
>>> # ENABLE_DOCTEST
>>> from mtgmonte.mtgutils import * # NOQA
>>> from mtgmonte import mtgobjs
>>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Volcanic Island', 'Tundra', 'Plateau']))
>>> land_list = mtgobjs.load_cards(['Mountain', 'Island', 'Flooded Strand', 'Shivan Reef'])
>>> mana_combos = possible_mana_combinations(land_list, deck)
Example0:
>>> # ENABLE_DOCTEST
>>> spell_sequence = mtgobjs.load_cards(['White Knight', 'Delver of Secrets'])
>>> valid = can_cast(spell_sequence, mana_combos)
>>> result = ('valid = %s' % (str(valid),))
>>> print(result)
valid = False
Example1:
>>> # ENABLE_DOCTEST
>>> spell_sequence = mtgobjs.load_cards(['Lightning Angel'])
>>> valid = can_cast(spell_sequence, mana_combos)
>>> result = ('valid = %s' % (str(valid),))
>>> print(result)
valid = True
"""
mana_costs = [s.mana_cost2 for s in spell_sequence]
combined_cost = sum(mana_costs)
valid = False
for mana_combo in mana_combos:
# print('mana_combo = %r' % (mana_combo,))
combo2 = reduce(operator.add, mana_combo)
# TODO: phyrexian / hybrid
if combined_cost.satisfied_by(combo2):
valid = True
break
return valid
<EMAIL>
def possible_mana_combinations(land_list, deck=None):
"""
CommandLine:
python -m mtgmonte.mtgutils --test-possible_mana_combinations
Example:
>>> # ENABLE_DOCTEST
>>> from mtgmonte.mtgutils import * # NOQA
>>> from mtgmonte import mtgobjs
>>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Tropical Island', 'Sunken Hollow', 'Island']))
>>> land_list = mtgobjs.load_cards(['Ancient Tomb', 'Island', 'Flooded Strand', 'Flooded Strand', 'Shivan Reef'])
>>> card = land_list[-1]
>>> mana_combos = possible_mana_combinations(land_list, deck)
>>> result = (ut.repr2(mana_combos, nl=1, strvals=True, nobraces=True))
>>> print(result)
({CC}, {U}, {G}, {U}, {C}),
({CC}, {U}, {G}, {B}, {C}),
({CC}, {U}, {U}, {U}, {C}),
({CC}, {U}, {U}, {B}, {C}),
({CC}, {U}, {G}, {U}, {R}),
({CC}, {U}, {G}, {B}, {R}),
({CC}, {U}, {U}, {U}, {R}),
({CC}, {U}, {U}, {B}, {R}),
"""
from mtgmonte import mtgobjs
avail_mana = [land.mana_potential2(deck=deck, recurse=False)
for land in land_list]
avail_mana = filter(len, avail_mana)
mana_combos1 = list(ut.iprod(*avail_mana))
# Encode the idea that two fetches cant fetch the same land
non_class1 = [
[c for c in co if not isinstance(c, six.string_types)]
for co in mana_combos1
]
flags = [len(co) == 0 or len(set(co)) == len(co) for co in non_class1]
mana_combos2 = ut.compress(mana_combos1, flags)
mana_combos3 = [[[c] if isinstance(c, mtgobjs.ManaSet) else
c.mana_potential2(deck=deck)
for c in co] for co in mana_combos2]
unflat_combos3 = [list(ut.iprod(*co)) for co in mana_combos3]
mana_combos4 = ut.flatten(unflat_combos3)
#mana_combos4 = [reduce(operator.add, m) for m in mana_combos4]
#z = reduce(operator.add, m)
#import utool
#utool.embed()
# avail_mana = [land.mana_potential(deck=deck) for land in land_list]
# avail_mana = filter(len, avail_mana)
# mana_combos4 = list(ut.iprod(*avail_mana))
combo_ids = [tuple(sorted(x)) for x in mana_combos4]
flags = ut.flag_unique_items(combo_ids)
mana_combos = ut.compress(mana_combos4, flags)
#mana_combos = list(map(tuple, [''.join(c) for c in mana_combos]))
return mana_combos
def get_max_avail_cmc(land_list, deck=None):
"""
CommandLine:
python -m mtgmonte.mtgutils --test-get_max_avail_cmc
Example:
>>> # ENABLE_DOCTEST
>>> from mtgmonte.mtgutils import * # NOQA
>>> from mtgmonte import mtgobjs
>>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Tropical Island', 'Sunken Hollow', 'Island']))
>>> land_list = mtgobjs.load_cards(['Ancient Tomb', 'Tundra', 'Island', 'Flooded Strand', 'Flooded Strand'])
>>> card = land_list[-1]
>>> max_avail_cmc = get_max_avail_cmc(land_list, deck)
>>> result = (ut.repr2(max_avail_cmc, nl=1, strvals=True, nobraces=True))
>>> print(result)
6
"""
avail_mana = [land.mana_potential2(deck=deck, recurse=True) for land in land_list]
avail_mana = filter(len, avail_mana)
maxgen_list = [max(map(len, mana)) for mana in avail_mana]
max_avail_cmc = sum(maxgen_list)
return max_avail_cmc
def get_cmc_feasible_sequences(spell_list, max_avail_cmc):
# Get spells castable on their own
flags = [spell.cmc <= max_avail_cmc for spell in spell_list]
feasible_spells = ut.compress(spell_list, flags)
cmc_feasible_sequences = []
for num in range(1, len(feasible_spells) + 1):
spell_combos = list(itertools.combinations(feasible_spells, num))
for combo in spell_combos:
total = sum([spell.cmc for spell in combo])
if total <= max_avail_cmc:
cmc_feasible_sequences.append(combo)
return cmc_feasible_sequences
#def hacky_knapsack_solns():
# # first determine which spells are castable without color consideration
# # make knapsack items
# total_avail_mana = len(land_list)
# flags = [spell.cmc < total_avail_mana for spell in spell_list]
# feasible_spells = ut.compress(spell_list, flags)
# items = [(1, spell.cmc, idx) for idx, spell in enumerate(feasible_spells)]
# total_val, subset = ut.knapsack(items, total_avail_mana)
# spell_sequence = ut.take(feasible_spells, ut.get_list_column(subset, 2))
# # http://stackoverflow.com/questions/30007102/number-of-all-combinations-in-knapsack-task
# # TODO:
# # http://stackoverflow.com/questions/30554290/how-to-derive-all-solutions-from-knapsack-dp-matrix
# #items = [1,1,3,4,5]
# items = [2, 3, 4, 3, 3, 5, 4, 1, 1, 3]
# knapsack = []
# limit = 7
# #<EMAIL>
# def print_solutions(current_item, knapsack, current_sum):
# #if all items have been processed print the solution and return:
# if current_item == len(items):
# print(knapsack)
# return
# #don't take the current item and go check others
# print_solutions(current_item + 1, list(knapsack), current_sum)
# #take the current item if the value doesn't exceed the limit
# if (current_sum + items[current_item] <= limit):
# knapsack.append(items[current_item])
# current_sum += items[current_item]
# #current item taken go check others
# print_solutions(current_item + 1, knapsack, current_sum )
#print_solutions(0, knapsack, 0)
if __name__ == '__main__':
r"""
CommandLine:
python -m mtgmonte.mtgutils
python -m mtgmonte.mtgutils --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| 2.5625 | 3 |
torchlit/utils/__init__.py | himanshu-dutta/torchlit | 1 | 12794994 | <reponame>himanshu-dutta/torchlit
def to_device(data, device):
if isinstance(data, (list, tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True) | 2.765625 | 3 |
src/data/reconstruction_datasets.py | martius-lab/beta-nll | 0 | 12794995 | import numpy as np
import torch
from torchvision import datasets, transforms
DEFAULT_DATA_DIR = "/is/rg/al/Projects/prob-models/data/"
class ReconstructionDataset(torch.utils.data.Dataset):
def __init__(
self, name, split="train", flatten=True, train_split=0.8, data_dir=None
):
assert split in ("train", "val", "test")
if data_dir is None:
data_dir = DEFAULT_DATA_DIR
load_train = split == "train" or split == "val"
if name == "mnist":
dataset = datasets.MNIST(
data_dir,
train=load_train,
download=True,
transform=transforms.ToTensor(),
)
elif name == "fashion-mnist":
dataset = datasets.FashionMNIST(
data_dir,
train=load_train,
download=True,
transform=transforms.ToTensor(),
)
else:
raise ValueError("Unknown dataset name {name}")
self.images = torch.stack([x[0] for x in dataset], axis=0)
if split == "train" or split == "val":
train_samples = int(train_split * len(self.images))
rng = np.random.RandomState(45)
idxs = rng.permutation(len(self.images))
if split == "train":
train_idxs = idxs[:train_samples]
self.images = self.images[train_idxs]
else:
val_idxs = idxs[train_samples:]
self.images = self.images[val_idxs]
self._shape = self.images.shape[1:]
if flatten:
self.images = self.images.reshape(len(self.images), -1)
example = self[0]
if flatten:
self.input_dim = example[0].shape[0]
self.target_dim = example[1].shape[0]
else:
self.input_dim = example[0]
self.target_dim = example[1]
@property
def shape(self):
return self._shape
def to_tensors(self):
return self.images, self.images
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
img = self.images[idx]
return img, img
| 2.796875 | 3 |
getfutures.py | pushkarlanke/stockcode | 0 | 12794996 |
# coding: utf-8
# In[1]:
import pandas as pd
import nsepy as ns
from datetime import date
# In[2]:
stocks = pd.read_csv("stocklist.csv")
# In[3]:
stocks
# In[4]:
expiry = date(2017,2,23),date(2017,3,30),date(2017,4,27),date(2017,5,25),date(2017,6,29),
date(2017,7,27),date(2017,8,31),date(2017,9,28),date(2017,10,26),date(2017,11,30),date(2017,12,28) ]
# In[5]:
futureframe = []
for dt in expiry :
expiry_dt = dt
for idx,name in stocks.iterrows():
try :
Symbol = name['SYMBOL']
df = ns.get_history(symbol=Symbol,start=date(2016,11,1),end=date(2017,12,31),
futures= True,
expiry_date = expiry_dt
)
df['turnover_cr'] = df['Turnover']/1000000000000
futureframe.append(df)
except:
print ("error at symbol", name['SYMBOL'])
# In[6]:
futures = pd.concat(futureframe)
futures.to_csv('futuresdata.csv')
# In[ ]:
# In[7]:
# In[ ]:
| 2.890625 | 3 |
OKR_techsup_ga.py | omnisci/pymapd-examples | 5 | 12794997 | <reponame>omnisci/pymapd-examples
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 15 15:48:38 2018
@author: ericgrant
"""
import argparse
from apiclient.discovery import build
import httplib2
from oauth2client import client
from oauth2client import file
from oauth2client import tools
import pandas as pd
from pandas.io.json import json_normalize
from omnisci_utils import get_credentials
from omnisci_utils import wake_and_connect_to_mapd
from omnisci_utils import drop_table_mapd
from omnisci_utils import disconnect_mapd
from parsing_utils import format_date_cols
from parsing_utils import format_int_col
from parsing_utils import format_str_col
from parsing_utils import format_flt_col
file_path = '/Users/ericgrant/Downloads/OKR_Dashboards/xfer/'
file_geocodes = file_path + 'AdWords API Location Criteria 2018-09-04.csv'
# parameters for OmniSci Cloud
mapdhost = 'use2-api.mapd.cloud'
mapdport = 443
mapdprotocol = 'https'
mapddbname = 'mapd'
mapduser = 'mapd'
omnisci_keyfile = file_path + 'omnisci_keys.json'
wait_interval = 25
# parameters for Google API
SCOPES = ['https://www.googleapis.com/auth/analytics.readonly']
DISCOVERY_URI = ('https://analyticsreporting.googleapis.com/$discovery/rest')
VIEW_ID = '93521025'
CLIENT_SECRETS_PATH = file_path + 'client_secrets.json' # path to client_secrets.json file.
start_date = '2017-04-01'
tables_and_files = [
#blog post views
('techsup_ga_blogvisits', file_path + 'techsup_ga_blogvisits.csv', {'c1_timestamp'}, {}, {'time_on_page', 'unique_pageviews'}, {}, {'geo_city_code'}, "%Y%m%d",
'CREATE TABLE IF NOT EXISTS techsup_ga_blogvisits (blog_title TEXT ENCODING DICT(8), blog_url TEXT ENCODING DICT(8), referral_path TEXT ENCODING DICT(8), c1_timestamp TIMESTAMP, unique_pageviews FLOAT, time_on_page FLOAT, source TEXT ENCODING DICT(8), city_name TEXT ENCODING DICT(8), city_canonical_name TEXT ENCODING DICT(8), country_code TEXT ENCODING DICT(8));',
{
'reportRequests': [
{
'viewId': VIEW_ID,
'dateRanges': [
{'startDate': start_date, 'endDate': 'today'}
],
'pageSize': 10000,
'metrics': [
{'expression': 'ga:uniquePageviews'},
{'expression': 'ga:timeOnPage'}
],
'dimensions': [
{'name': 'ga:pageTitle'},
{'name': 'ga:pagePath'},
{'name': 'ga:referralPath'},
{'name': 'ga:date'},
{'name': 'ga:cityID'}
],
'dimensionFilterClauses': [
{'filters': [
{'dimensionName': 'ga:pageTitle', 'operator': 'PARTIAL', 'expressions': ['blog']}
]}
]}
]}
)
]
# GOOGLE ANALYTICS FUNCTIONS
def initialize_analyticsreporting():
"""Initializes the analyticsreporting service object.
Returns:
analytics an authorized analyticsreporting service object.
"""
# Parse command-line arguments.
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser])
flags = parser.parse_args([])
# Set up a Flow object to be used if we need to authenticate.
flow = client.flow_from_clientsecrets(
CLIENT_SECRETS_PATH, scope=SCOPES,
message=tools.message_if_missing(CLIENT_SECRETS_PATH))
# Prepare credentials, and authorize HTTP object with them.
# If the credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# credentials will get written back to a file.
storage = file.Storage('analyticsreporting.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = tools.run_flow(flow, storage, flags)
http = credentials.authorize(http=httplib2.Http())
# Build the service object.
analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI)
return analytics
def get_report(analytics, bodycontent):
# Use the Analytics Service Object to query the Analytics Reporting API V4.
return analytics.reports().batchGet(
body=bodycontent).execute()
def print_response(response):
"""Parses and prints the Analytics Reporting API V4 response"""
for report in response.get('reports', []):
columnHeader = report.get('columnHeader', {})
dimensionHeaders = columnHeader.get('dimensions', [])
metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])
rows = report.get('data', {}).get('rows', [])
for row in rows:
dimensions = row.get('dimensions', [])
dateRangeValues = row.get('metrics', [])
for header, dimension in zip(dimensionHeaders, dimensions):
print (header + ': ' + dimension)
for i, values in enumerate(dateRangeValues):
print ('Date range (' + str(i) + ')')
for metricHeader, value in zip(metricHeaders, values.get('values')):
print (metricHeader.get('name') + ': ' + value)
def format_data(response):
reports = response['reports'][0]
columnHeader = reports['columnHeader']['dimensions']
metricHeader = reports['columnHeader']['metricHeader']['metricHeaderEntries']
columns = columnHeader
for metric in metricHeader:
columns.append(metric['name'])
data = json_normalize(reports['data']['rows'])
data_dimensions = pd.DataFrame(data['dimensions'].tolist())
data_metrics = pd.DataFrame(data['metrics'].tolist())
data_metrics = data_metrics.applymap(lambda x: x['values'])
data_metrics = pd.DataFrame(data_metrics[0].tolist())
result = pd.concat([data_dimensions, data_metrics], axis=1, ignore_index=True)
result.columns = ["blog_title", "blog_url", "referral_path", "c1_timestamp", "geo_city_code", "unique_pageviews", "time_on_page"] # set the column names
return (result)
def output_to_csv(df, fn):
df.to_csv(fn, index=False)
# OMNISCI FUNCTIONS
def source(url):
if 'tag' in url:
source = 'tag'
elif 'author' in url:
source = 'author'
elif url == 'www.mapd.com/blog':
source = 'mapd blog landing page'
elif url == 'www.mapd.com/blog/':
source = 'mapd blog landing page'
elif url == 'www.omnisci.com/blog':
source = 'omnisci blog landing page'
elif url == 'www.mapd.com/blog/':
source = 'omnisci blog landing page'
elif 'community.mapd.com' in url:
source = 'mapd community forum'
elif 'community.omnisci.com' in url:
source = 'omnisci community forum'
else:
source = 'other / direct'
return (source)
def parse_source(df):
df['source'] = df['blog_url'].apply(source)
return df
def parse_city(df):
dfcity = pd.read_csv(file_geocodes)
dfcity.columns = ['geo_city_code', 'city_name', 'city_canonical_name', 'city_parent_code', 'country_code', 'city_target_type', 'city_status'] # set the column names
format_str_col(dfcity, {'geo_city_code'})
df = pd.merge(df, dfcity, on=['geo_city_code'], how='left')
return df
def parse_geo_data(df):
df = parse_source(df)
df = parse_city(df)
return df
# Load CSV to dataframe
def parse_data(csvfile, dtcols, intcols, floatcols, strcols, renamings, tfrmt):
df = pd.read_csv(csvfile)
df.reset_index(drop=True, inplace=True)
format_date_cols(df, dtcols, tfrmt) #force the column containing datetime values to be recast from strings to timestamps
format_int_col(df, intcols)
format_str_col(df, strcols)
format_flt_col(df, floatcols)
df = parse_geo_data(df)
df = df.drop('geo_city_code', 1)
df = df.drop('city_parent_code', 1)
df = df.drop('city_target_type', 1)
df = df.drop('city_status', 1)
return df
def wake_up_omnisci():
# get OmniSci credentials
dfcreds = pd.DataFrame()
dfcreds = get_credentials(omnisci_keyfile)
# connect to OmniSci, allowing time for the instance to wake
connection = wake_and_connect_to_mapd(dfcreds['write_key_name'], dfcreds['write_key_secret'], mapdhost, mapddbname)
return connection
# MAIN
def main():
# connect to omnisci
connection = wake_up_omnisci()
if connection != "RETRY":
# loop through tables and reports
for os_table, csv_file, dt_cols, int_cols, float_cols, str_cols, rename_cols, time_format, creationstring, reportbody in tables_and_files:
# connect to Google Analytics
analytics = initialize_analyticsreporting()
response = get_report(analytics, reportbody)
# format the data into the columnar tables OmniSci wants
df = format_data(response)
# save the dataframe to a file
output_to_csv(df, csv_file)
# create the new dataframe from the file contents
df = parse_data(csv_file, dt_cols, int_cols, float_cols, str_cols, rename_cols, time_format)
print ('loading dataframe into table ' + os_table)
drop_table_mapd(connection, os_table) #drop the old table
connection.execute(creationstring)
connection.load_table(os_table, df) #load the new table into OmniSci
# disconnect from OmniSci
disconnect_mapd(connection)
else:
print('could not wake OmniSci; exiting')
if __name__ == '__main__':
main() | 2.34375 | 2 |
tests/conftest.py | coma64/nextcloud-notes-api | 3 | 12794998 | from typing import Iterator
from _pytest.fixtures import FixtureRequest
from pytest import fixture
from nextcloud_notes_api import Note
def _example_note() -> Note:
return Note(
title='Spam',
content='Bacon',
category='Todo',
favorite=True,
id=1337,
# https://stackoverflow.com/questions/59199985/why-is-datetimes-timestamp-method-returning-oserror-errno-22-invalid-a
modified=100_000,
)
@fixture
def example_note() -> Note:
"""
Returns:
Note: Note with all attributes set
"""
return _example_note()
@fixture
def example_note_gen(request: FixtureRequest) -> Iterator[Note]:
"""
Args:
request (FixtureRequest): `request.param` is the length of the generator
Yields:
Note: Example note, see `example_note()`
"""
return (_example_note() for _ in range(request.param))
| 2.265625 | 2 |
lookerapi/apis/__init__.py | jcarah/python_sdk | 0 | 12794999 | <reponame>jcarah/python_sdk
from __future__ import absolute_import
# import apis into api package
from .api_auth_api import ApiAuthApi
from .auth_api import AuthApi
from .color_collection_api import ColorCollectionApi
from .config_api import ConfigApi
from .connection_api import ConnectionApi
from .content_api import ContentApi
from .dashboard_api import DashboardApi
from .data_action_api import DataActionApi
from .datagroup_api import DatagroupApi
from .folder_api import FolderApi
from .group_api import GroupApi
from .homepage_api import HomepageApi
from .integration_api import IntegrationApi
from .look_api import LookApi
from .lookml_model_api import LookmlModelApi
from .project_api import ProjectApi
from .query_api import QueryApi
from .render_task_api import RenderTaskApi
from .role_api import RoleApi
from .scheduled_plan_api import ScheduledPlanApi
from .session_api import SessionApi
from .space_api import SpaceApi
from .theme_api import ThemeApi
from .user_api import UserApi
from .user_attribute_api import UserAttributeApi
from .workspace_api import WorkspaceApi
| 1.09375 | 1 |
static/dht22Measure.py | organicopium/pigrow | 0 | 12795000 | import sqlite3
from sqlite3 import Error
import time
import datetime
db = "/home/pi/projects/pigrow/db.sqlite3"
def create_table(conn):
create_table_query = """ CREATE TABLE IF NOT EXISTS dht_data (
id integer PRIMARY KEY,
humidity real ,
temperature real,
ts text
);"""
try:
c = conn.cursor()
c.execute(create_table_query)
except Error as e:
print(e)
def create_connection(db):
conn = None
try:
conn = sqlite3.connect(db)
print(sqlite3.version)
except Error as e:
print(e)
return conn
def insertMeasure(conn, measure):
insert_query = ''' INSERT INTO dht_data(humidity, temperature, ts)
VALUES(?, ?, ?) '''
cur = conn.cursor()
cur.execute(insert_query, measure)
conn.commit()
def work():
import Adafruit_DHT
sensor = Adafruit_DHT.DHT22
sensor_pin = 18
while True:
conn = create_connection(db)
create_table(conn)
humidity, temperature = Adafruit_DHT.read_retry(sensor, sensor_pin)
ts = datetime.datetime.now().timestamp()
measure = (humidity, temperature, ts)
insertMeasure(conn, measure)
print("inserted {}".format(measure))
conn.close()
time.sleep(20)
print("Database connection does not exist")
if __name__ == '__main__':
work()
| 3.59375 | 4 |
Users/serializers.py | gordiig/Un_RSOI_Curs_Auth | 0 | 12795001 | <filename>Users/serializers.py
from django.contrib.auth.models import User
from rest_framework import serializers
from rest_framework.validators import UniqueValidator
from Users.utils import is_moderator
class UserSerializer(serializers.ModelSerializer):
"""
Сериализатор спискового представления юзера
"""
is_moderator = serializers.SerializerMethodField()
class Meta:
model = User
fields = [
'id',
'username',
'email',
'is_superuser',
'is_moderator',
]
def get_is_moderator(self, instance: User):
return is_moderator(instance)
class RegisterSerializer(serializers.ModelSerializer):
"""
Сериализатор для рекистрации пользователя
"""
username = serializers.CharField(max_length=128, validators=[UniqueValidator(queryset=User.objects.all())])
email = serializers.EmailField(required=False, max_length=256, allow_blank=True)
password = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True)
class Meta:
model = User
fields = [
'username',
'email',
'password',
]
def create(self, validated_data):
new = User.objects.create(username=validated_data['username'], email=validated_data.get('email', ''))
new.set_password(validated_data['password'])
new.save()
return new
class ChangePasswordSerializer(serializers.ModelSerializer):
"""
Сериализатор формы смены пароля
"""
password = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True)
password_confirm = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256,
write_only=True)
old_password = serializers.CharField(allow_null=False, allow_blank=False, max_length=256, write_only=True)
class Meta:
model = User
fields = [
'password',
'password_<PASSWORD>',
'old_password',
]
def update(self, instance: User, validated_data):
if not instance.check_password(validated_data['old_password']):
raise serializers.ValidationError('Текущий пароль введен неверно')
if validated_data['password'] != validated_data['password_confirm']:
raise serializers.ValidationError('Пароли не сходятся')
instance.set_password(validated_data['password'])
instance.save()
return instance
| 2.359375 | 2 |
VideoMix/makeNumberedVideos.py | ctralie/SlidingWindowVideoTDA | 6 | 12795002 | import sys
sys.path.append("../")
from VideoTools import *
import subprocess
import os
import scipy.misc
MAXHEIGHT = 160
MINWIDTH = 120
def saveVideoID(I, IDims, fileprefix, ID, FrameRate = 30, NumberFrames = 30):
N = I.shape[0]
print(I.shape)
if I.shape[0] > FrameRate*5:
I = I[0:FrameRate*5, :]
N = I.shape[0]
frame = np.array([])
print("IDims = ", IDims)
for i in range(N):
frame = np.reshape(I[i, :], IDims)
frame[frame < 0] = 0
frame[frame > 1] = 1
if IDims[0] > MAXHEIGHT:
fac1 = MAXHEIGHT/float(IDims[0])
fac2 = MINWIDTH/float(IDims[1])
fac = max(fac1, fac2)
if i == 0:
print("Resizing by %g"%fac)
frame = scipy.misc.imresize(frame, fac)
mpimage.imsave("%s%i.png"%(TEMP_STR, i+1), frame)
PS = 60
if frame.shape[1] > MINWIDTH*1.5:
PS = int(30.0*frame.shape[1]/MINWIDTH)
for i in range(NumberFrames):
command = ["convert", "%s%i.png"%(TEMP_STR, N), "-fill", "red", "-pointsize", "%i"%PS, "-draw", 'text 20,60 %s%.3i%s'%("'", ID, "'"), "%s%i.png"%(TEMP_STR, N+i+1)]
print(command)
subprocess.call(command)
print(N + i + 1)
#Convert to video using avconv
for t in ["avi", "webm", "ogg"]:
filename = "%s.%s"%(fileprefix, t)
#Overwrite by default
if os.path.exists(filename):
os.remove(filename)
command = [AVCONV_BIN,
'-r', "%i"%FrameRate,
'-i', TEMP_STR + '%d.png',
'-r', "%i"%FrameRate,
'-b', '30000k',
filename]
subprocess.call(command)
#Clean up
for i in range(N+NumberFrames):
os.remove("%s%i.png"%(TEMP_STR, i+1))
np.random.seed(100)
IDs = np.random.permutation(999)
i = 0
Videos = ["OrigVideos/%s"%v for v in os.listdir("OrigVideos")]
for V in Videos:
print("Saving %s..."%V)
(I, IDims) = loadVideo(V)
saveVideoID(I, IDims, "NumberedVideos/%i"%i, IDs[i])
i = i + 1
| 2.5 | 2 |
fileshare/site_admin/views.py | sqz269/fileshare-flask | 4 | 12795003 | from flask import Blueprint, render_template
site_admin = Blueprint("admin_site", __name__, url_prefix="/admin",template_folder="template", static_folder="static", static_url_path="/admin/static")
@site_admin.route("/")
def admin():
return render_template("index.html")
| 2.1875 | 2 |
diofant/tests/printing/test_mathematica.py | rajkk1/diofant | 57 | 12795004 | <filename>diofant/tests/printing/test_mathematica.py
"""Mathematica code printing tests."""
import pytest
from diofant import (QQ, Catalan, Derivative, Dummy, E, Eq, EulerGamma,
Function, Gt, Heaviside, Integer, Integral, Lambda, Le,
Limit, Matrix, Max, Min, Ne, Or, Piecewise, Poly,
Rational, RootOf, RootSum, SparseMatrix, Sum, Tuple, acos,
acosh, acot, acoth, asin, asinh, atan, atanh, binomial,
conjugate, cos, cosh, cot, coth, csch, erfc, exp,
factorial, factorial2, false, fibonacci, gamma, hyper, im,
log, loggamma, mathematica_code, meijerg, oo, pi,
polygamma, polylog, re, rf, sech, sign, sin, sinh,
symbols, tan, tanh, true, zeta)
from diofant.abc import x, y, z
__all__ = ()
f = Function('f')
def test_Integer():
assert mathematica_code(Integer(67)) == '67'
assert mathematica_code(Integer(-1)) == '-1'
def test_Rational():
assert mathematica_code(Rational(3, 7)) == '3/7'
assert mathematica_code(Rational(18, 9)) == '2'
assert mathematica_code(Rational(3, -7)) == '-3/7'
assert mathematica_code(Rational(-3, -7)) == '3/7'
assert mathematica_code(x + Rational(3, 7)) == 'x + 3/7'
assert mathematica_code(Rational(3, 7)*x) == '(3/7)*x'
def test_symbols():
assert mathematica_code(x) == 'x'
d = Dummy('d')
assert mathematica_code(d) == f'd{d.dummy_index}'
def test_Function():
assert mathematica_code(f(x, y, z)) == 'f[x, y, z]'
assert mathematica_code(sin(x) ** cos(x)) == 'Sin[x]^Cos[x]'
assert mathematica_code(sign(x)) == 'Sign[x]'
assert mathematica_code(atanh(x), user_functions={'atanh': 'ArcTanh'}) == 'ArcTanh[x]'
assert (mathematica_code(meijerg(((1, 1), (3, 4)), ((1,), ()), x)) ==
'MeijerG[{{1, 1}, {3, 4}}, {{1}, {}}, x]')
assert (mathematica_code(hyper((1, 2, 3), (3, 4), x)) ==
'HypergeometricPFQ[{1, 2, 3}, {3, 4}, x]')
assert mathematica_code(Min(x, y)) == 'Min[x, y]'
assert mathematica_code(Max(x, y)) == 'Max[x, y]'
assert mathematica_code(Max(x, 2)) == 'Max[2, x]' # issue sympy/sympy#15344
assert mathematica_code(binomial(x, y)) == 'Binomial[x, y]'
assert mathematica_code(log(x)) == 'Log[x]'
assert mathematica_code(tan(x)) == 'Tan[x]'
assert mathematica_code(cot(x)) == 'Cot[x]'
assert mathematica_code(asin(x)) == 'ArcSin[x]'
assert mathematica_code(acos(x)) == 'ArcCos[x]'
assert mathematica_code(atan(x)) == 'ArcTan[x]'
assert mathematica_code(acot(x)) == 'ArcCot[x]'
assert mathematica_code(sinh(x)) == 'Sinh[x]'
assert mathematica_code(cosh(x)) == 'Cosh[x]'
assert mathematica_code(tanh(x)) == 'Tanh[x]'
assert mathematica_code(coth(x)) == 'Coth[x]'
assert mathematica_code(asinh(x)) == 'ArcSinh[x]'
assert mathematica_code(acosh(x)) == 'ArcCosh[x]'
assert mathematica_code(atanh(x)) == 'ArcTanh[x]'
assert mathematica_code(acoth(x)) == 'ArcCoth[x]'
assert mathematica_code(sech(x)) == 'Sech[x]'
assert mathematica_code(csch(x)) == 'Csch[x]'
assert mathematica_code(erfc(x)) == 'Erfc[x]'
assert mathematica_code(conjugate(x)) == 'Conjugate[x]'
assert mathematica_code(re(x)) == 'Re[x]'
assert mathematica_code(im(x)) == 'Im[x]'
assert mathematica_code(polygamma(x, y)) == 'PolyGamma[x, y]'
assert mathematica_code(factorial(x)) == 'Factorial[x]'
assert mathematica_code(factorial2(x)) == 'Factorial2[x]'
assert mathematica_code(rf(x, y)) == 'Pochhammer[x, y]'
assert mathematica_code(gamma(x)) == 'Gamma[x]'
assert mathematica_code(zeta(x)) == 'Zeta[x]'
assert mathematica_code(Heaviside(x)) == 'UnitStep[x]'
assert mathematica_code(fibonacci(x)) == 'Fibonacci[x]'
assert mathematica_code(polylog(x, y)) == 'PolyLog[x, y]'
assert mathematica_code(loggamma(x)) == 'LogGamma[x]'
class MyFunc1(Function):
@classmethod
def eval(cls, x):
pass
class MyFunc2(Function):
@classmethod
def eval(cls, x, y):
pass
pytest.raises(ValueError,
lambda: mathematica_code(MyFunc1(x),
user_functions={'MyFunc1':
['Myfunc1']}))
assert mathematica_code(MyFunc1(x),
user_functions={'MyFunc1':
'Myfunc1'}) == 'Myfunc1[x]'
assert mathematica_code(MyFunc2(x, y),
user_functions={'MyFunc2':
[(lambda *x: False,
'Myfunc2')]}) == 'MyFunc2[x, y]'
def test_Lambda():
f1 = Lambda(x, x**2)
assert mathematica_code(f1) == 'Function[{x}, x^2]'
f2 = Lambda((x, y), x + 2*y)
assert mathematica_code(f2) == 'Function[{x, y}, x + 2*y]'
def test_Derivative():
assert mathematica_code(Derivative(f(x), x, x)) == 'Hold[D[f[x], x, x]]'
assert mathematica_code(Derivative(sin(x), x)) == 'Hold[D[Sin[x], x]]'
assert mathematica_code(Derivative(x, x)) == 'Hold[D[x, x]]'
assert mathematica_code(Derivative(sin(x)*y**4, (x, 2))) == 'Hold[D[y^4*Sin[x], x, x]]'
assert mathematica_code(Derivative(sin(x)*y**4, x, y, x)) == 'Hold[D[y^4*Sin[x], x, y, x]]'
assert mathematica_code(Derivative(sin(x)*y**4, x, (y, 3), x)) == 'Hold[D[y^4*Sin[x], x, y, y, y, x]]'
def test_Pow():
assert mathematica_code(x**3) == 'x^3'
assert mathematica_code(x**(y**3)) == 'x^(y^3)'
assert mathematica_code(1/(f(x)*3.5)**(x - y**x)/(x**2 + y)) == \
'(3.5*f[x])^(-x + y^x)/(x^2 + y)'
assert mathematica_code(x**-1.0) == 'x^(-1.0)'
assert mathematica_code(x**Rational(2, 3)) == 'x^(2/3)'
def test_Mul():
A, B, C, D = symbols('A B C D', commutative=False)
assert mathematica_code(x*y*z) == 'x*y*z'
assert mathematica_code(x*y*A) == 'x*y*A'
assert mathematica_code(x*y*A*B) == 'x*y*A**B'
assert mathematica_code(x*y*A*B*C) == 'x*y*A**B**C'
assert mathematica_code(x*A*B*(C + D)*A*y) == 'x*y*A**B**(C + D)**A'
def test_constants():
assert mathematica_code(pi) == 'Pi'
assert mathematica_code(oo) == 'Infinity'
assert mathematica_code(-oo) == '-Infinity'
assert mathematica_code(EulerGamma) == 'EulerGamma'
assert mathematica_code(Catalan) == 'Catalan'
assert mathematica_code(E) == 'E'
def test_containers():
assert mathematica_code([1, 2, 3, [4, 5, [6, 7]], 8, [9, 10], 11]) == \
'{1, 2, 3, {4, 5, {6, 7}}, 8, {9, 10}, 11}'
assert mathematica_code((1, 2, (3, 4))) == '{1, 2, {3, 4}}'
assert mathematica_code([1]) == '{1}'
assert mathematica_code((1,)) == '{1}'
assert mathematica_code(Tuple(*[1, 2, 3])) == '{1, 2, 3}'
def test_Integral():
assert mathematica_code(Integral(sin(sin(x)), x)) == 'Hold[Integrate[Sin[Sin[x]], x]]'
assert mathematica_code(Integral(exp(-x**2 - y**2),
(x, -oo, oo),
(y, -oo, oo))) == \
'Hold[Integrate[E^(-x^2 - y^2), {x, -Infinity, Infinity}, ' \
'{y, -Infinity, Infinity}]]'
def test_Sum():
assert mathematica_code(Sum(sin(x), (x, 0, 10))) == 'Hold[Sum[Sin[x], {x, 0, 10}]]'
assert mathematica_code(Sum(exp(-x**2 - y**2),
(x, -oo, oo),
(y, -oo, oo))) == \
'Hold[Sum[E^(-x^2 - y^2), {x, -Infinity, Infinity}, ' \
'{y, -Infinity, Infinity}]]'
def test_Matrix():
assert mathematica_code(Matrix()) == '{}'
m = Matrix([[1, 2], [3, 4444]])
assert mathematica_code(m) == mathematica_code(m.as_immutable()) == '{{1, 2}, {3, 4444}}'
m = SparseMatrix(m)
assert mathematica_code(m) == mathematica_code(m.as_immutable()) == '{{1, 2}, {3, 4444}}'
def test_Relational():
assert mathematica_code(Eq(x, y)) == 'x == y'
assert mathematica_code(Ne(x, y/(1 + y**2))) == 'x != (y/(y^2 + 1))'
assert mathematica_code(Le(0, x**2)) == '0 <= x^2'
assert mathematica_code(Gt(pi, 3, evaluate=False)) == 'Pi > 3'
def test_Booleans():
assert mathematica_code(true) == 'True'
assert mathematica_code(false) == 'False'
def test_Piecewise():
g = Piecewise((0, Or(x <= -1, x >= 1)), (1 - x, x > 0), (1 + x, True))
assert (mathematica_code(g) ==
'Piecewise[{{0, x >= 1 || x <= -1}, '
'{-x + 1, x > 0}, {x + 1, True}}]')
def test_RootOf():
p = Poly(x**3 + y*x + 1, x)
assert mathematica_code(RootOf(p, 0)) == 'Root[#^3 + #*y + 1 &, 1]'
def test_RootSum():
r = RootSum(x**3 + x + 3, Lambda(y, log(y*z)))
assert mathematica_code(r) == ('RootSum[Function[{x}, x^3 + x + 3], '
'Function[{y}, Log[y*z]]]')
def test_AlgebraicElement():
r = RootOf(x**7 + 3*x - 1, 3)
K = QQ.algebraic_field(r)
a = K([1, 0, 3, 2, 1])
assert mathematica_code(a) == ('AlgebraicNumber[Root[#^7 + 3*# - 1 &, 4],'
' {1, 0, 3, 2, 1}]')
def test_Limit():
e = Limit(sin(x)/x, x, 0)
assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0, Direction -> -1]]'
e = Limit(sin(x)/x, x, 0, '-')
assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0, Direction -> 1]]'
e = Limit(sin(x)/x, x, 0, 'real')
assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0, Direction -> Reals]]'
| 2.390625 | 2 |
wait.py | Zadigo/zacoby | 1 | 12795005 | import time
from threading import Timer
from typing import Callable
from pydispatch import dispatcher
from zacoby import global_logger
from zacoby.exceptions import ElementDoesNotExist, MethodError
# from zacoby.pipeline import Pipeline
from zacoby.settings import settings
from zacoby.signals import signal
class DriverMixin:
def __init__(self, driver: Callable, timeout: int):
self.driver = driver
self.timeout = timeout
# signal.send(dispatcher.Any, self, timeout=timeout)
class Wait(DriverMixin):
def __init__(self, name: str, driver: Callable, timeout: int=10):
super().__init__(driver, timeout)
self.name = name
self.exceptions = []
self.results = []
def _start_polling(self, func, **kwargs):
# result = None
# results = []
end_time = sum([time.time(), self.timeout])
global_logger.info(f'Waiting for element [{self.name}] - ({self.timeout}s)...')
while True:
try:
result = func(driver=self.driver, **kwargs)
except Exception:
raise
else:
# return result
self.results.append(result)
time.sleep(self.timeout)
if time.time() > end_time:
break
# raise TimeoutError()
def until(self, func: Callable, **kwargs):
self._start_polling(func, **kwargs)
return self
def until_not(self, func: Callable, **kwargs):
self._start_polling(func, **kwargs)
return self
def chains(self, *funcs: Callable, method='until'):
authorized_methods = ['until', 'until_not']
if method not in authorized_methods:
raise MethodError()
for func in funcs:
pass
def logical_map(self, methods: dict):
container = []
for key, method in methods.items():
container.append(method())
return self
class Pause(DriverMixin):
def _start_pause(self, callback = None):
result = []
if callback is not None:
if not callable(callback):
raise TypeError('Callback should be a callable')
timer = Timer(self.timeout, function=callback, kwargs={'driver': self.driver, 'result': result})
else:
timer = Timer(self.timeout, function=lambda: True)
timer.start()
global_logger.info(f'Entering sleep mode ({self.timeout}s)')
timer.join()
if not timer.is_alive():
timer.cancel()
return result if result else None
| 2.125 | 2 |
reference/gtf2fasta.py | joshsbloom/eQTL_BYxRM | 3 | 12795006 | <reponame>joshsbloom/eQTL_BYxRM
#!/usr/local/bin/python
from optparse import OptionParser
from BCBio import GFF
from Bio.Seq import Seq
from Bio import SeqIO
from Bio.Alphabet import generic_dna
from Bio.SeqRecord import SeqRecord
import subprocess
import os
import sys
def lookupSequences(files):
gtf_file = open(files['gtf_file'])
records = []
for rec in GFF.parse(gtf_file):
chrom = rec.id
for feature in rec.features:
if feature.sub_features == []:
seq = lookup_sequence(files, feature, chrom)
id = feature.qualifiers['transcript_id'][0]
strand = feature.strand
else:
seq = Seq("", generic_dna)
id = feature.id
for subf in feature.sub_features:
seq = seq + lookup_sequence(files, subf, chrom)
strand = subf.strand
if strand is -1:
seq = seq.reverse_complement()
records.append(SeqRecord(seq, id=id))
SeqIO.write(records, sys.stdout, "fasta")
def lookup_sequence(files, feature, chrom):
"""
use samtools to look up the sequence
"""
args = [files['samtools'], "faidx", files['seq_file'], str(chrom) +
":" + str(int(str(feature.location.start))+1) + "-" +
str(feature.location.end)]
child = subprocess.Popen(args, stdout=subprocess.PIPE)
seq = ""
for line in child.stdout:
if line.strip()[0] == ">":
continue
seq = seq + line.strip()
seq = Seq(seq, generic_dna)
return seq
def which(program):
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def main():
usage = "usage: gtf2fasta seq_file gtf_file"
parser = OptionParser()
(options, args) = parser.parse_args()
samtools = which("samtools")
if samtools is None:
print "samtools must executable, add it to your path or " \
"download it from http://samtools.sourceforge.net/"
exit(-1)
files = {}
files['samtools'] = samtools
if len(args) != 2:
print usage
exit(-1)
files['seq_file'] = args[0]
files['gtf_file'] = args[1]
if not os.path.exists(files['seq_file']):
print "seq_file does not exist"
print usage
exit(-1)
if not os.path.exists(files['gtf_file']):
print "gtf_file does not exist"
print usage
exit(-1)
lookupSequences(files)
if __name__ == "__main__":
main()
| 2.390625 | 2 |
rqrunner.py | angstwad/arlo-sms | 0 | 12795007 | # -*- coding: utf-8 -*-
import sys
from redis import Redis
from rq import Queue, Connection, Worker
from mailhook.config import config
# Preload libraries
import twilio
# Provide queue names to listen to as arguments to this script,
# similar to rqworker
redis_conn = Redis(config.REDIS_HOST)
with Connection(redis_conn):
qs = map(Queue, sys.argv[1:]) or [Queue()]
w = Worker(qs)
w.work()
| 1.976563 | 2 |
libs/RVNpyIPFS/_IPFSrpcClient.py | sLiinuX/wxRaven | 11 | 12795008 | '''
Created on 5 janv. 2022
@author: slinux
'''
import sys
# The answer is that the module xmlrpc is part of python3
import xmlrpc.client
import os
import logging
class IPFS_RPC_Client(object):
#Put your server IP here
_ip='0.0.0.0'
_port=1234
_url = ""
_client = None
def __init__(self, ip="127.0.0.1", port=9000, useHTTPS=False):
self._ip = ip
self._port = port
self.logger = logging.getLogger('wxRaven')
self.url = 'http://{}:{}'.format(ip, port)
if useHTTPS:
self.url = 'https://{}'.format(ip)
if ip.__contains__('http'):
self.url = '{}'.format(ip)
self.logger.info(f'Creating a new IPFS RPC Client at {self.url}')
self._client = xmlrpc.client.ServerProxy(self.url)
def sendFile(self, filename):
curDir = os.path.dirname(os.path.realpath(__file__))
#filename = sys.argv[1]
#fpn = curDir + '/' + filename
fpn = filename
localadd, remotefnae = os.path.split(filename)
self.logger.info(' filename -> ({})'.format(filename))
self.logger.info(' fpn -> ({})'.format(remotefnae))
if not os.path.exists(fpn):
self.logger.info('Missing file -> ({})'.format(fpn))
#sys.exit(1)
_resultUpload = None
with open(fpn, "rb") as handle:
binary_data = xmlrpc.client.Binary(handle.read())
_resultUpload = self._client.server_receive_file(binary_data, remotefnae)
self.logger.info(f'_resultUpload = {_resultUpload}')
return _resultUpload
def sendJSON(self, JSON):
self.logger.info(f'JSON = {JSON}')
_resultUpload = self._client.server_receive_json(JSON)
#self.logger.info(f'_resultUpload = {_resultUpload}')
return _resultUpload
#.add_json(self.compile_message(message))
| 2.640625 | 3 |
codedigger/codeforces/views.py | prayutsu/Backend | 0 | 12795009 | <gh_stars>0
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import generics, mixins, permissions
from .models import user, country, organization, contest
from .serializers import UserSerializer, CountrySerializer, OrganizationSerializer, ContestSerializer
from user.serializers import GuruSerializer
from problem.serializers import ProbSerializer
import json, requests
from django.http import JsonResponse
from user.models import Profile
from django.db.models import Q
from django.template.loader import render_to_string
from user.permissions import *
def data(URL):
return requests.get(URL).json()
class MentorAPIView(
mixins.CreateModelMixin,
generics.ListAPIView,
):
permission_classes = [AuthenticatedActivated]
serializer_class = GuruSerializer
def get(self, request):
return JsonResponse({
'status':
'OK',
'result':
Profile.objects.get(owner=self.request.user).gurus.split(',')[1:-1]
})
def put(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid(raise_exception=True):
serializer.add(
validated_data=request.data,
instance=Profile.objects.get(owner=self.request.user))
return Response({'status': 'OK', 'result': 'Added Successfully'})
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.delete(
instance=Profile.objects.get(owner=self.request.user),
data=request.data)
return Response({'status': 'OK', 'result': 'Deleted Successfully'})
from .cron import codeforces_update_problems
def testing(request):
codeforces_update_problems()
return JsonResponse({'status': 'OK'})
| 2.015625 | 2 |
vininfo/brands.py | ghilesmeddour/vininfo | 60 | 12795010 | <filename>vininfo/brands.py
from .common import Brand
from .details import *
class Lada(Brand):
extractor = AvtoVazDetails
class Nissan(Brand):
extractor = NissanDetails
class Opel(Brand):
extractor = OpelDetails
class Renault(Brand):
extractor = RenaultDetails
| 1.796875 | 2 |
puzzles/aoc.py | oogles/aoc_base | 0 | 12795011 | import datetime
import inspect
import os
import sys
class Puzzle:
# The delimiter to use to separate the input data into a list for subsequent
# processing. E.g. '\n', ',', etc. Delimited items can be processed prior to
# being added to the input list by overriding _process_input_item().
# Set to None to read the data in whole. In this case, data can be processed
# by overriding _process_input_data().
input_delimiter = '\n'
def __init__(self, sample=False, verbosity=2):
self.sample = sample
self.verbosity = verbosity
def get_input_file_name(self):
path = os.path.dirname(os.path.abspath(inspect.getfile(self.__class__)))
filename = 'sample' if self.sample else 'input'
return os.path.join(path, filename)
def process_input_item(self, input_line):
return input_line
def process_input_data(self, input_data):
return input_data
def get_input(self):
input_file = self.get_input_file_name()
delimiter = self.input_delimiter
process_item = self.process_input_item
with open(input_file, 'r') as f:
if delimiter == '\n':
# Trim whitespace from and process each line in the input file,
# skipping any blank lines
input_data = []
for line in f.readlines():
line = line.strip()
if line:
input_data.append(process_item(line))
else:
raw_input = f.read().strip() # trim whitespace (e.g. newlines)
if delimiter:
# Trim whitespace from and process each item in the raw
# input data after applying the configured delimiter
input_data = [process_item(item.strip()) for item in raw_input.split(delimiter)]
else:
# Process the raw input data directly
input_data = self.process_input_data(raw_input)
return input_data
def _do_solve(self, solvers):
v = self.verbosity
max_v = v > 1
line_endings = '\n' if max_v else ''
# Get input
if max_v:
sample = '**SAMPLE** ' if self.sample else ''
print('=' * 50, f'\n\nProcessing {sample}', end='')
print('Input... ', end=line_endings)
start = datetime.datetime.now()
try:
input_data = self.get_input()
except FileNotFoundError:
print(f'No input data file found (looked in {self.get_input_file_name()}).')
return
t = (datetime.datetime.now() - start).total_seconds()
if self.input_delimiter == '\n':
input_desc = f'has {len(input_data)} lines'
elif self.input_delimiter:
input_desc = f'has {len(input_data)} items'
else:
size = sys.getsizeof(input_data)
input_desc = f'is {size} bytes'
if max_v:
print('Input ', end='')
print(f'{input_desc} ({type(input_data)}) [{t}s]')
# Run solvers
for part, solver in solvers:
if self.input_delimiter:
# Copy the data so each part is free to manipulate it without
# affecting subsequent parts
part_input_data = input_data[:]
else:
part_input_data = input_data
if max_v:
print('\nSolving ', end='')
print('Part {}... '.format(part), end=line_endings)
start = datetime.datetime.now()
solution = solver(part_input_data)
t = (datetime.datetime.now() - start).total_seconds()
if max_v:
print('Solution: ', end='')
print('{} [{}s]'.format(solution, t))
if max_v:
print('\n', '=' * 50, sep='')
def _part1(self, input_data):
raise NotImplementedError()
def _part2(self, input_data):
raise NotImplementedError()
def solve_part1(self):
self._do_solve([(1, self._part1)])
def solve_part2(self):
self._do_solve([(2, self._part2)])
def solve(self):
self._do_solve([(1, self._part1), (2, self._part2)])
| 3.5625 | 4 |
main.py | PravunathSingh/Realtime-Tweets | 0 | 12795012 | # create a credentials.py file with the following keys
from credentials import ckey, csecret, atoken, asecret
from tweepy import Stream, OAuthHandler
from tweepy.streaming import StreamListener
class listener(StreamListener):
def on_data(self, data):
print(data)
return True
def on_error(self, status):
print(status)
auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
twitterStream = Stream(auth, listener())
twitterStream.filter(track=['python'])
| 2.53125 | 3 |
apps/sushi/migrations/0038_jsonfield.py | techlib/czechelib-stats | 1 | 12795013 | <reponame>techlib/czechelib-stats
# Generated by Django 3.1.3 on 2020-11-20 13:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sushi', '0037_broken_credentials'),
]
operations = [
migrations.AlterField(
model_name='sushicredentials',
name='extra_params',
field=models.JSONField(blank=True, default=dict),
),
migrations.AlterField(
model_name='sushifetchattempt',
name='processing_info',
field=models.JSONField(default=dict, help_text='Internal info'),
),
]
| 1.632813 | 2 |
l2_attack.py | jcperdomo/universal_noise | 0 | 12795014 | ## l2_attack.py -- attack a network optimizing for l_2 distance
##
## Copyright (C) 2016, <NAME> <<EMAIL>>.
##
## This program is licenced under the BSD 2-Clause licence,
## contained in the LICENCE file in this directory.
## Modified by <NAME> 2017
import tensorflow as tf
import numpy as np
BINARY_SEARCH_STEPS = 9 # number of times to adjust the constant with binary search
MAX_ITERATIONS = 10000 # number of iterations to perform gradient descent
ABORT_EARLY = True # if we stop improving, abort gradient descent early
LEARNING_RATE = 1e-2 # larger values converge faster to less accurate results, default 1e-2
TARGETED = False # should we target one specific class? or just be wrong?
CONFIDENCE = 0 # how strong the adversarial example should be
INITIAL_CONST = 1e-3 # the initial constant c to pick as a first guess
class CarliniL2:
def __init__(self, sess, models, batch_size=1, confidence = CONFIDENCE,
targeted = TARGETED, learning_rate = LEARNING_RATE,
binary_search_steps = BINARY_SEARCH_STEPS, max_iterations = MAX_ITERATIONS,
abort_early = ABORT_EARLY,
initial_const = INITIAL_CONST,
boxmin = -0.5, boxmax = 0.5):
"""
The L_2 optimized attack.
This attack is the most efficient and should be used as the primary
attack to evaluate potential defenses.
Returns adversarial examples for the supplied model.
confidence: Confidence of adversarial examples: higher produces examples
that are farther away, but more strongly classified as adversarial.
batch_size: Number of attacks to run simultaneously.
targeted: True if we should perform a targetted attack, False otherwise.
learning_rate: The learning rate for the attack algorithm. Smaller values
produce better results but are slower to converge.
binary_search_steps: The number of times we perform binary search to
find the optimal tradeoff-constant between distance and confidence.
max_iterations: The maximum number of iterations. Larger values are more
accurate; setting too small will require a large learning rate and will
produce poor results.
abort_early: If true, allows early aborts if gradient descent gets stuck.
initial_const: The initial tradeoff-constant to use to tune the relative
importance of distance and confidence. If binary_search_steps is large,
the initial constant is not important.
boxmin: Minimum pixel value (default -0.5).
boxmax: Maximum pixel value (default 0.5).
"""
image_size, num_channels, num_labels = models[0].image_size, models[0].num_channels, models[0].num_labels
self.sess = sess
self.TARGETED = targeted
self.LEARNING_RATE = learning_rate
self.MAX_ITERATIONS = max_iterations
self.BINARY_SEARCH_STEPS = binary_search_steps
self.ABORT_EARLY = abort_early
self.CONFIDENCE = confidence
self.initial_const = initial_const
self.batch_size = batch_size
self.num_models = len(models)
self.num_labels = num_labels
shape = (batch_size,image_size,image_size,num_channels)
# the variable we're going to optimize over
modifier = tf.Variable(np.zeros(shape,dtype=np.float32))
# these are variables to be more efficient in sending data to tf
self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32)
self.tlab = tf.Variable(np.zeros((batch_size,num_labels)), dtype=tf.float32)
self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32)
self.weights = tf.Variable(np.zeros(self.num_models), dtype=tf.float32)
# and here's what we use to assign them
self.assign_timg = tf.placeholder(tf.float32, shape)
self.assign_tlab = tf.placeholder(tf.float32, (batch_size, num_labels))
self.assign_const = tf.placeholder(tf.float32, [batch_size])
self.assign_weights = tf.placeholder(tf.float32, [self.num_models])
# the resulting image, tanh'd to keep bounded from boxmin to boxmax
self.boxmul = (boxmax - boxmin) / 2.
self.boxplus = (boxmin + boxmax) / 2.
self.newimg = tf.tanh(modifier + self.timg) * self.boxmul + self.boxplus
# prediction BEFORE-SOFTMAX of the model
self.outputs = [model.predict(self.newimg) for model in models]
# distance to the input data
self.l2dist = tf.reduce_sum(tf.square(self.newimg-(tf.tanh(self.timg) * self.boxmul + self.boxplus)),[1,2,3])
# compute the probability of the label class versus the maximum other
reals = []
others = []
for i in xrange(self.num_models):
real = tf.reduce_sum((self.tlab) * self.outputs[i], 1)
other = tf.reduce_max((1 - self.tlab)*self.outputs[i] - (self.tlab*10000), 1)
reals.append(real)
others.append(other)
self.reals, self.others = reals, others
loss1list = []
if self.TARGETED:
# if targetted, optimize for making the other class most likely
for i in xrange(self.num_models):
loss1list.append(tf.maximum(0.0, self.weights[i] * (others[i] - reals[i] + self.CONFIDENCE)))
else:
# if untargeted, optimize for making this class least likely.
for i in xrange(self.num_models):
loss1list.append(tf.maximum(0.0, self.weights[i] * (reals[i] - others[i] + self.CONFIDENCE)))
self.loss1list = loss1list # TODO: remove
# sum up the losses
self.loss2 = tf.reduce_sum(self.l2dist)
self.loss1 = tf.reduce_sum(self.const * tf.add_n(self.loss1list))
self.loss = self.loss1 + self.loss2
self.reals = reals
self.others = others
# Setup the adam optimizer and keep track of variables we're creating
start_vars = set(x.name for x in tf.global_variables())
optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE)
self.train = optimizer.minimize(self.loss, var_list=[modifier])
end_vars = tf.global_variables()
new_vars = [x for x in end_vars if x.name not in start_vars]
# these are the variables to initialize when we run
self.setup = []
self.setup.append(self.timg.assign(self.assign_timg))
self.setup.append(self.tlab.assign(self.assign_tlab))
self.setup.append(self.const.assign(self.assign_const))
self.setup.append(self.weights.assign(self.assign_weights))
self.init = tf.variables_initializer(var_list=[modifier]+new_vars)
def attack(self, imgs, targets, weights):
"""
Perform the L_2 attack on the given images for the given targets.
If self.targeted is true, then the targets represents the target labels.
If self.targeted is false, then targets are the original class labels.
"""
r = []
# print('go up to',len(imgs))
for i in range(0,len(imgs),self.batch_size):
# print('tick',i)
r.extend(self.attack_batch(imgs[i:i+self.batch_size], targets[i:i+self.batch_size], weights))
return np.array(r)
def attack_batch(self, imgs, labs, weights):
"""
Run the attack on a batch of images and labels.
"""
def compareLoss(x, y):
"""
x is an np array of shape num_models x num_classes
y is the true label or target label of the class
returns a number in [0,1] indicating the expected loss of the learner
"""
if not isinstance(x, (float, int, np.int64)):
x = np.copy(x)
for v in x: # update the target scores for each individual prediction
if self.TARGETED:
v[y] -= self.CONFIDENCE
else:
v[y] += self.CONFIDENCE
x = np.argmax(x, 1) # these are the predictions of each hypothesis
if self.TARGETED:
return np.dot(x == y, weights)
else:
return np.dot(x != y, weights)
batch_size = self.batch_size
# convert to tanh-space
imgs = np.arctanh((imgs - self.boxplus) / self.boxmul * 0.999999)
# set the lower and upper bounds accordingly
lower_bound = np.zeros(batch_size)
CONST = np.ones(batch_size)*self.initial_const
upper_bound = np.ones(batch_size)*1e10
# the best l2, score, and image attack
o_bestl2 = [1e10]*batch_size
o_bestscore = [-1]*batch_size
o_bestattack = [np.zeros(imgs[0].shape)]*batch_size
for outer_step in range(self.BINARY_SEARCH_STEPS):
# completely reset adam's internal state.
self.sess.run(self.init)
batch = imgs[:batch_size]
batchlab = labs[:batch_size]
bestl2 = [1e10]*batch_size
bestscore = [0.0]*batch_size
# set the variables so that we don't have to send them over again
self.sess.run(self.setup, {self.assign_timg: batch,
self.assign_tlab: batchlab,
self.assign_const: CONST,
self.assign_weights: weights})
# print "Outer Step ", outer_step, "Current C ", CONST, lower_bound, upper_bound
prev = 1e10 # used to be e6
for iteration in range(self.MAX_ITERATIONS):
# perform the attack
_, l, l2s, scores, nimg = self.sess.run([self.train, self.loss,
self.l2dist, self.outputs,
self.newimg])
scores = np.array(scores).reshape(self.batch_size, self.num_models, self.num_labels)
# if iteration % 200 == 0:
# print(iteration, self.sess.run((self.loss, self.loss1, self.loss2)))
# check if we should abort search if we're getting nowhere. (check every 10%)
if self.ABORT_EARLY and iteration%(self.MAX_ITERATIONS * .10) == 0:
if l > prev*.9999:
break
prev = l
for e,(l2,sc,ii) in enumerate(zip(l2s,scores,nimg)):
currLoss = compareLoss(sc, np.argmax(batchlab[e])) # expected loss of the learner
if currLoss > bestscore[e]: # we've found a clear improvement for this value of c
bestl2[e] = l2
bestscore[e] = currLoss
if currLoss == bestscore[e] and l2 < bestl2[e]:
bestl2[e] = l2
if currLoss > o_bestscore[e]:
o_bestl2[e] = l2
o_bestscore[e] = currLoss
o_bestattack[e] = ii
if currLoss == o_bestscore[e] and l2 < o_bestl2[e]:
o_bestl2[e] = l2
o_bestattack[e] = ii
# finished trying out the adam optimizer for a particular c, now need to decide on the next value
# adjust the constant as needed
for e in range(batch_size):
if bestscore[e] == 1.0:
upper_bound[e] = min(upper_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e])/2
else:
lower_bound[e] = max(lower_bound[e],CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e])/2
else:
CONST[e] *= 100
# return the best solution found
return o_bestattack
| 2.953125 | 3 |
inspecta/__init__.py | grimen/python-inspecta | 0 | 12795015 |
# =========================================
# IMPORTS
# --------------------------------------
import rootpath
rootpath.append()
# =========================================
# EXPORTS
# --------------------------------------
from inspecta.inspector import *
| 1.554688 | 2 |
lab01/app/views.py | cesar-limachi/TECSUP-DAE-2021-2-B-CESAR-LIMACHI | 0 | 12795016 | from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
def index(request):
return HttpResponse("Desde la vista App")
def sumar(request, numero1, numero2):
sum = numero1 + numero2
return HttpResponse("La suma de %s + %s = %s" % (numero1, numero2, sum))
### !! %s = sicnifica que sera de tipo string !! %f = sera de tipo flotante
def restar(request, numero1, numero2):
res = numero1 - numero2
return HttpResponse("La resta de %s - %s = %s" % (numero1, numero2, res))
### !! %s = sicnifica que sera de tipo string !! %f = sera de tipo flotante
def multiplicar(request, numero1, numero2):
mul = numero1 * numero2
return HttpResponse("La multiplicacion de %s * %s = %s" % (numero1, numero2, mul))
### !! %s = sicnifica que sera de tipo string !! %f = sera de tipo flotante
def dividir(request, numero1, numero2):
div = numero1 / numero2
return HttpResponse("La division de %s / %s = %f" % (numero1, numero2, div))
### !! %s = sicnifica que sera de tipo string !! %f = sera de tipo flotante
| 2.796875 | 3 |
examples/simplequery.py | kenichi884/tomotoio | 1 | 12795017 | <filename>examples/simplequery.py<gh_stars>1-10
import logging as log
from time import sleep
from utils import createCubes, releaseCubes
# Identify each cube by the color and the sound signal,
# and report the battery level on the console.
cubes = createCubes(initialReport=True)
try:
for k in range(10):
for i, c in enumerate(cubes):
log.info("Cube #%d, Iteration #%d" % (i + 1, k + 1))
log.info(c.getConfigProtocolVersion())
log.info(c.toioID.get())
log.info(c.motion.get())
log.info(c.button.get())
log.info(c.battery.get())
sleep(0.5)
finally:
# Disconnect
releaseCubes(cubes)
| 2.890625 | 3 |
students/K33401/Tikhonova_Elena/Lr1/task_4/server.py | ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021 | 4 | 12795018 | <filename>students/K33401/Tikhonova_Elena/Lr1/task_4/server.py
import socket
import threading
def get_connect(socket, number):
print("I am thread number", number)
clientsocket, address = socket.accept()
clientsocket.send(b'Please enter your name: ')
name = clientsocket.recv(1024)
name = name.decode("utf-8")
messages[name] = []
print(name, 'is connected')
while True:
data = clientsocket.recv(1024)
if data == b'q':
print(name, 'left the chat')
break
else:
if data != b'.':
message = name + ':' + data.decode("utf-8")
print(message)
for key in messages:
if key != name:
messages[key].append(message.encode("utf-8"))
while True:
if bool(messages[name]):
clientsocket.send(messages[name].pop(0)+b'\n')
else:
break
if __name__ == "__main__":
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('localhost', 8080))
s.listen(10)
messages = {}
messages['all'] = []
threads = []
for i in range(3):
x = threading.Thread(target=get_connect, args=(s,i+1,))
threads.append(x)
x.start()
for i in range(3):
threads[i].join()
#print(messages['all'])
| 3.40625 | 3 |
Python studying/Codes of examples/A2.1-temp.py | BoyangSheng/Skill-studing | 1 | 12795019 | a = 1
b = 2
temp = a
a = b
b = temp
print(a,b) | 3.265625 | 3 |
apps/cars/migrations/0001_initial.py | Marpop/demo-car-app | 0 | 12795020 | import django.core.validators
import django.db.models.deletion
import django.utils.timezone
from django.db import migrations, models
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Car",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="created",
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="modified",
),
),
("maker", models.CharField(max_length=50)),
("model", models.CharField(max_length=50)),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="Rate",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="created",
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="modified",
),
),
(
"rate",
models.PositiveSmallIntegerField(
validators=[
django.core.validators.MinValueValidator(1),
django.core.validators.MaxValueValidator(5),
]
),
),
(
"car",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="rates",
to="cars.car",
),
),
],
options={
"abstract": False,
},
),
]
| 1.921875 | 2 |
src/loaders/loader_json.py | rkulyn/telegram-dutch-taxbot | 2 | 12795021 | import os
import json
from .exceptions import DataLoadException
class JsonDataLoader:
"""
Load base calculation data
from provided JSON file.
"""
def __init__(self, path):
self._path = path
def load(self):
if not os.path.exists(self._path):
raise DataLoadException("Data file does not exist.")
with open(self._path, "r") as f:
data = json.load(f)
return data
| 3.4375 | 3 |
ippon/cup_fight/views.py | morynicz/ippon_back | 0 | 12795022 | from rest_framework import viewsets, permissions
import ippon.cup_fight.permissions as cfp
import ippon.cup_fight.serializers as cfs
import ippon.models.cup_fight as cfm
class CupFightViewSet(viewsets.ModelViewSet):
queryset = cfm.CupFight.objects.all()
serializer_class = cfs.CupFightSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
cfp.IsCupFightOwnerOrReadOnly)
| 1.8125 | 2 |
src/wai/common/adams/imaging/locateobjects/constants.py | waikato-datamining/wai-common | 0 | 12795023 | """
Module for constants relating to locating objects.
"""
# The key for the X location
KEY_X: str = "x"
# The key for the Y location
KEY_Y: str = "y"
# The key for the width
KEY_WIDTH: str = "width"
# The key for the height
KEY_HEIGHT: str = "height"
# The key for the location
KEY_LOCATION: str = "location"
# The key for the Xs of the polygon in the meta-data (comma-separated list)
KEY_POLY_X: str = "poly_x"
# The key for the Ys of the polygon in the meta-data (comma-separated list)
KEY_POLY_Y: str = "poly_y"
# The key for the overall count
KEY_COUNT: str = "count"
# The key for the index of a group
KEY_INDEX: str = "index"
| 2.671875 | 3 |
tests/test_model_field_list.py | havron/wtforms-alchemy | 161 | 12795024 | <gh_stars>100-1000
import sqlalchemy as sa
from wtforms.fields import FormField
from wtforms_components import PassiveHiddenField
from tests import FormRelationsTestCase, MultiDict
from wtforms_alchemy import ModelFieldList, ModelForm
class ModelFieldListTestCase(FormRelationsTestCase):
def create_models(self):
class Event(self.base):
__tablename__ = 'event'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255), nullable=False)
class Location(self.base):
__tablename__ = 'location'
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255), nullable=True)
event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id))
event = sa.orm.relationship(Event, backref='locations')
self.Event = Event
self.Location = Location
def save(self, event=None, data=None):
if not data:
data = {
'name': u'Some event',
'locations-0-name': u'Some location',
'locations-0-description': u'Some description'
}
if not event:
event = self.Event()
self.session.add(event)
form = self.EventForm(MultiDict(data))
else:
form = self.EventForm(MultiDict(data), obj=event)
form.validate()
form.populate_obj(event)
self.session.commit()
return event
class TestReplaceStrategy(ModelFieldListTestCase):
def create_forms(self):
class LocationForm(ModelForm):
class Meta:
model = self.Location
class EventForm(ModelForm):
class Meta:
model = self.Event
locations = ModelFieldList(FormField(LocationForm))
self.LocationForm = LocationForm
self.EventForm = EventForm
def test_assigment_and_deletion(self):
self.save()
event = self.session.query(self.Event).first()
assert event.locations[0].name == u'Some location'
data = {
'name': u'Some event'
}
form = self.EventForm(MultiDict(data))
form.validate()
form.populate_obj(event)
self.session.commit()
event = self.session.query(self.Event).first()
assert event.locations == []
class TestUpdateStrategy(ModelFieldListTestCase):
def create_models(self):
class Event(self.base):
__tablename__ = 'event'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255), nullable=False)
class Location(self.base):
__tablename__ = 'location'
TYPES = (u'', u'football field', u'restaurant')
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255), nullable=True)
description = sa.Column(sa.Unicode(255), default=u'')
type = sa.Column(
sa.Unicode(255),
info={'choices': zip(TYPES, TYPES)},
default=u''
)
event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id))
event = sa.orm.relationship(Event, backref='locations')
def __repr__(self):
return 'Location(id=%r, name=%r)' % (self.id, self.name)
self.Event = Event
self.Location = Location
def create_forms(self):
class LocationForm(ModelForm):
class Meta:
model = self.Location
only = ['name', 'description', 'type']
id = PassiveHiddenField()
class EventForm(ModelForm):
class Meta:
model = self.Event
locations = ModelFieldList(
FormField(LocationForm),
population_strategy='update'
)
self.LocationForm = LocationForm
self.EventForm = EventForm
def test_with_none_as_formdata_for_existing_objects(self):
event = self.save()
form = self.EventForm(MultiDict(), obj=event)
assert form.locations[0].data['id']
def test_single_entry_update(self):
event = self.save()
location_id = event.locations[0].id
data = {
'name': u'Some event',
'locations-0-id': location_id,
'locations-0-name': u'Some other location'
}
self.save(event, data)
assert len(event.locations) == 1
assert event.locations[0].id == location_id
assert event.locations[0].name == u'Some other location'
def test_creates_new_objects_for_entries_with_unknown_identifiers(self):
event = self.save()
location_id = event.locations[0].id
data = {
'name': u'Some event',
'locations-0-id': 12,
'locations-0-name': u'Some other location'
}
self.save(event, data)
assert event.locations
assert event.locations[0].id != location_id
def test_replace_entry(self):
data = {
'name': u'Some event',
'locations-0-name': u'Some location',
'locations-0-description': u'Some description',
'locations-0-type': u'restaurant'
}
event = self.save(data=data)
location_id = event.locations[0].id
self.session.commit()
data = {
'name': u'Some event',
'locations-0-name': u'Some other location',
}
self.save(event, data)
location = event.locations[0]
assert location.id != location_id
assert location.name == u'Some other location'
assert location.description == u''
assert location.type == u''
assert len(event.locations) == 1
def test_replace_and_update(self):
data = {
'name': u'Some event',
'locations-0-name': u'Location 1',
'locations-0-description': u'Location 1 description',
'locations-1-name': u'Location 2',
'locations-1-description': u'Location 2 description',
}
event = self.save(data=data)
self.session.commit()
data = {
'name': u'Some event',
'locations-0-id': event.locations[1].id,
'locations-0-name': u'Location 2 updated',
'locations-0-description': u'Location 2 description updated',
'locations-1-name': u'Location 3',
}
self.save(event, data)
self.session.commit()
location = event.locations[0]
location2 = event.locations[1]
assert location.name == u'Location 2 updated'
assert location.description == u'Location 2 description updated'
assert len(event.locations) == 2
assert location2.name == u'Location 3'
assert location2.description == u''
def test_multiple_entries(self):
event = self.save()
location_id = event.locations[0].id
data = {
'name': u'Some event',
'locations-0-name': u'Some location',
'locations-1-id': str(location_id), # test coercing works
'locations-1-name': u'Some other location',
'locations-2-name': u'Third location',
'locations-3-id': 123,
'locations-3-name': u'Fourth location'
}
self.save(event, data)
assert len(event.locations) == 4
assert event.locations[0].id == location_id
assert event.locations[0].name == u'Some other location'
assert event.locations[1].name == u'Some location'
assert event.locations[2].name == u'Third location'
assert event.locations[3].name == u'Fourth location'
def test_delete_all_field_list_entries(self):
event = self.save()
data = {
'name': u'Some event'
}
self.save(event, data)
assert not event.locations
def test_update_and_remove(self):
location = self.Location(
name=u'Location #2'
)
event = self.Event(
name=u'Some event',
locations=[
self.Location(
name=u'Location #1'
),
location
]
)
self.session.add(event)
self.session.commit()
data = {
'locations-0-id': location.id,
'locations-0-name': u'Location',
}
self.save(event, data)
self.session.refresh(event)
assert len(event.locations) == 1
assert event.locations[0] == location
| 2.328125 | 2 |
jd/api/__init__.py | fengjinqi/linjuanbang | 5 | 12795025 | from jd.api.rest import *
from jd.api.base import FileItem | 1.070313 | 1 |
eslearn/utils/test.py | dongmengshi/easylearn | 0 | 12795026 | <reponame>dongmengshi/easylearn<filename>eslearn/utils/test.py
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 28 15:07:49 2018
@author: lenovo
"""
from lc_svc_oneVsRest import oneVsRest
import numpy as np
import pandas as pd
from lc_read_write_Mat import read_mat
import sys
sys.path.append(r'D:\myCodes\LC_MVPA\Python\MVPA_Python\utils')
sys.path.append(r'D:\myCodes\LC_MVPA\Python\MVPA_Python\classfication')
# X
fileName = r'J:\分类测试_20180828\Ne-L_VS_Ne-R_n=709'
dataset_name = 'coef'
dataset_struct, dataset = read_mat(fileName, dataset_name)
X = dataset
X = pd.DataFrame(X)
# y
s = pd.read_excel(r'J:\分类测试_20180828\机器学习-ID.xlsx')
dgns = s['诊断'].values
# comb
xandy = pd.concat([pd.DataFrame(dgns), X], axis=1)
# NaN
xandy = xandy.dropna()
#
X = xandy.iloc[:, 1:].values
y = xandy.iloc[:, 0].values
X = np.reshape(X, [len(X), X.shape[1]])
y = [int(d) for d in y]
# predict and test
comp = [prd - y][0]
Acc = np.sum(comp == 0) / len(comp)
| 2.203125 | 2 |
datamgt/urls.py | timmo-d/MyTrader | 0 | 12795027 | <filename>datamgt/urls.py
from django.urls import path
from . import views
app_name = 'datamgt'
urlpatterns = [
#path('', views.DataMgtPageView.as_view(), name='index'),
path('', views.create, name='index'),
]
| 1.875 | 2 |
src/semsel/exceptions.py | modist-io/semsel | 0 | 12795028 | # -*- encoding: utf-8 -*-
# Copyright (c) 2019 <NAME> <<EMAIL>>
# ISC License <https://choosealicense.com/licenses/isc>
"""Contains custom exceptions and errors."""
from typing import Optional
class SemselException(Exception):
"""Module-wide exception namespace."""
def __init__(self, message: str):
"""Initialize the exception instance.
:param str message: The user-intended exception message
"""
super().__init__(message)
self.message = message
class ParseFailure(SemselException):
"""Raised during BNF-grammar parsing / transformation failures."""
pass
class InvalidExpression(SemselException):
"""Raised when evaluation of an expression is shown to have conflicts."""
pass
| 2.390625 | 2 |
call_sibling.py | sahlinet/tumbo-demoapp | 0 | 12795029 | def func(self):
return self.siblings.sibling_A(self)
| 1.679688 | 2 |
Chapter 11/Traversing_through_a_linked_list_4_2_1_method1.py | bpbpublications/Advance-Core-Python-Programming | 0 | 12795030 | <gh_stars>0
class Node:
def __init__(self,data = None):
self.data = data
self.reference = None
#EXECUTION
objNode1 = Node(1)
objNode2 = Node(2)
objNode3 = Node(3)
objNode4 = Node(4)
objNode1.reference = objNode2
objNode2.reference = objNode3
objNode3.reference = objNode4
objNode4.reference = None
presentNode = objNode1
while presentNode:
print("DATA VALUE = ",presentNode.data)
presentNode = presentNode.reference
| 3.015625 | 3 |
mtc/core/evaluator.py | MIC-DKFZ/n2c2-challenge-2019 | 1 | 12795031 | """
.. module:: evaluator
:synopsis: Holding all evaluator classes!
.. moduleauthor:: <NAME>
"""
from typing import List, Union, Dict
from abc import ABC, abstractmethod
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
from sklearn.metrics import classification_report
from mtc.core.sentence import Sentence
def Evaluator(name, *args, **kwargs):
"""
All evaluator classes should be called via this method
"""
for cls in EvaluatorBaseClass.__subclasses__():
if cls.__name__ == name:
return cls(*args, **kwargs)
raise ValueError('No evalutor named %s' % name)
class EvaluatorBaseClass(ABC):
"""
Any evaluator class must inherit from this class
"""
@property
def key_name(self):
"""Name must be unique!"""
return self.__class__.__name__
def evaluate(self, *args, **kwargs) -> List[Sentence]:
"""Add embeddings to all words in a list of sentences. If embeddings are already added, updates only if embeddings
are non-static."""
# if only one sentence is passed, convert to list of sentence
self._evaluate_internal(*args, **kwargs)
@abstractmethod
def _evaluate_internal(self, sentences: List[Sentence]) -> List[Sentence]:
"""Private method for adding embeddings to all words in a list of sentences."""
pass
@abstractmethod
def get_params(self) -> Dict:
pass
class PearsonCorrelationCoefficientEvaluator(EvaluatorBaseClass):
def __init__(self):
super().__init__()
self.results = dict()
@property
def key_name(self):
"""Name must be unique!"""
return f"{self.__class__.__name__}"
def _evaluate_internal(self, y_eval, y_eval_predicted, *args, **kwargs):
# y_train = np.take(exp_data['y'], exp_data['idx_train'], axis=0)
# y_pred_train = np.take(exp_data['y_pred'], exp_data['idx_train'], axis=0)
# y_test = np.take(exp_data['y'], exp_data['idx_dev'], axis=0)
# y_pred_test = np.take(exp_data['y_pred'], exp_data['idx_dev'], axis=0)
self.results['pearson'] = [pearsonr(y_eval_predicted, y_eval)[0]]
# self.results['pearson_test_set'] = [pearsonr(y_pred_test, y_test)[0]]
# print('on training set with pcc: %f' % self.results['pearson'][0])
print('PCC: %f' % self.results['pearson'][0])
def get_params(self) -> Dict:
params = dict()
params['name'] = self.key_name
params['append'] = True
params.update(self.results)
return params
class PredictionAccuracyBySentence(EvaluatorBaseClass):
def __init__(self):
super().__init__()
self.results = None
self.diff_dict = {}
@property
def key_name(self):
"""Name must be unique!"""
return f"{self.__class__.__name__}"
def _evaluate_internal(self, y_eval, y_eval_predicted, test_index, rsa_a_eval, rsa_b_eval):
self.diff_dict = {
'diff': list(abs(y_eval-y_eval_predicted)),
'sen_idx': test_index,
'gold_standard': y_eval,
'pred': y_eval_predicted,
'raw_sentences_a': rsa_a_eval,
'raw_sentences_b': rsa_b_eval
}
def get_params(self) -> Dict:
params = dict()
params['name'] = self.key_name
params['append'] = False
params['diff_dict'] = self.diff_dict
return params
if __name__ == '__main__':
from mtc.core.preprocessor import Preprocessor
# from mtc.core.sentence import Sentence
# from sklearn import linear_model, ensemble
#
# preprocessor = Preprocessor('DefaultPreprocessor')
#
# sentence_a = [
# Sentence('Hallo du, wie geht es dir?', preprocessor, {'ground_truth': 3}),
# Sentence('Mein Name ist Tina.', preprocessor, {'ground_truth':2})
# ]
# sentence_b = [
# Sentence('Hi du, wie geht\'s?', preprocessor),
# Sentence('Mein Name ist Paul', preprocessor),
# ]
#
# clf = linear_model.Lasso(alpha=0.1)
# classifier = Classifier('SelectiveClassifier', clf=clf, classifier_methods=[{'method': 'sequence_matcher_similarity'}])
# evaluator = Evaluator('PCCE')
#
# classifier.fit(sentence_a, sentence_b)
# classifier.predict(sentence_a, sentence_b)
# evaluator.evaluate(sentence_a[0])
| 2.828125 | 3 |
backend/urls.py | opustm/mvp-backend | 0 | 12795032 | from django.contrib import admin
from django.urls import path, include
from .views import documentation
from .router import router
from rest_framework_jwt.views import obtain_jwt_token
urlpatterns = [
path('tokenAuth/', obtain_jwt_token),
path('', documentation, name='documentation'),
path('', include('main.urls')),
path('', include(router.urls)),
path('admin/', admin.site.urls),
]
| 1.664063 | 2 |
template/tree_traversal.py | zh-plus/CodeJam | 3 | 12795033 | from queue import Queue
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
@staticmethod
def from_array(array):
# bfs construct binary tree
root = TreeNode(array[0])
q = Queue()
q.put(root)
i = 1
while not q.empty() and i != len(array):
node = q.get()
node.left = TreeNode(array[i])
q.put(node.left)
if i + 1 != len(array):
node.right = TreeNode(array[i + 1])
q.put(node.right)
i += 2
return root
def ldr(root: TreeNode):
stack = []
node = root
result = []
while node or stack:
# go to the most left node
while node:
stack.append(node)
node = node.left
node = stack.pop()
result.append(node.val)
node = node.right
return ' '.join(list(map(str, result)))
def dlr(root: TreeNode):
stack = []
node = root
result = []
while node or stack:
while node:
result.append(node.val)
stack.append(node)
node = node.left
node = stack.pop()
node = node.right
return ' '.join(list(map(str, result)))
def lrd(root: TreeNode):
"""
After getting DRL(reverse RL for DLR), reverse output to LRD.
"""
stack = []
node = root
result = []
while node or stack:
while node:
result.append(node.val)
stack.append(node)
node = node.right
node = stack.pop()
node = node.left
return ' '.join(list(map(str, reversed(result))))
if __name__ == '__main__':
root = TreeNode(1)
root.right = TreeNode(2)
root.right.left = TreeNode(3)
print(ldr(root))
| 3.765625 | 4 |
Warmup/compare_the_triplet.py | osamadel/Hacker-Rank | 0 | 12795034 | def compare(a, b):
scoreA = 0
scoreB = 0
for i in range(len(a)):
if a[i] > b[i]:
scoreA += 1
elif a[i] < b[i]:
scoreB += 1
return (scoreA, scoreB)
a = list(map(int, input().split()))
b = list(map(int, input().split()))
scoreA, scoreB = compare(a,b)
print(scoreA, scoreB) | 3.46875 | 3 |
beerbackend/beer/api.py | eternalnoob/beerbackend | 1 | 12795035 | from beerbackend.user.models import Beer, families, User
from flask_restful import Resource, Api, reqparse, fields, marshal_with
from flask.json import jsonify
import os
import json
beer_get_parse = reqparse.RequestParser()
beer_get_parse.add_argument('beer_name', dest='beer_name',
type=str, required=True,
help='The Name of the beer')
PBR = {
"sour": 1,
"malty": 1,
"family": "pale-lager",
"hoppy": 1,
"name": "PBR",
"abv": 1,
"wood": 1,
"bitter": 1,
"color": 1,
"roasty": 1,
"spice": 1,
"sweet": 1,
"fruit": 1
}
class BeerApi(Resource):
def get(self):
args = beer_get_parse.parse_args()
name = args.beer_name
beer = Beer.query.filter(Beer.beer_name == name).first()
print(name)
print(beer)
if beer:
return beer.to_data()
else:
return None
class BeersApi(Resource):
def get(self):
beers = Beer.query.all()
if beers:
return{"beers": [beer.to_data() for beer in beers]}
else:
return {"beers": []}
def put(self):
print(os.getcwd())
with open('beers.json','r') as fin:
beers = json.load(fin)
for beer in beers["beers"]:
family = None
if beer.get("family").lower() in families.values():
family = list(families.values()).index(beer.get("family").lower()) + 1
else:
family = 1 #default to 1 if not a family we know
Beer.create(beer_name=beer["name"], abv=beer["abv"], bitter=beer["bitter"],
color=beer["color"], fruit=beer["fruit"], hoppy=beer["hoppy"],
malty=beer["malty"], roasty=beer["roasty"], sweet=beer["sweet"],
spice=beer["spice"], wood=beer["wood"], family=family,
smoke=beer["smoke"], sour=beer["sour"])
| 2.96875 | 3 |
samples/data-serialization/ds-python-grpc/main.py | obecto/perper | 24 | 12795036 | <reponame>obecto/perper<filename>samples/data-serialization/ds-python-grpc/main.py<gh_stars>10-100
import grpc
import fabric_pb2_grpc
import fabric_pb2
from SimpleData import SimpleData
from pyignite.datatypes.prop_codes import *
from AffinityKey import *
from Notification import CallData
from Notification import Notification, StreamItemNotification, StreamTriggerNotification, CallResultNotification, CallTriggerNotification, CallData
from PerperThinClient import PerperThinClient
agent_delegate = "Application"
channel = grpc.insecure_channel('localhost:40400')
stub = fabric_pb2_grpc.FabricStub(channel) # Used to call rpc methods
notification_filter = fabric_pb2.NotificationFilter(agentDelegate=agent_delegate);
print(f"Using notification filter with parameters {notification_filter}");
print("Processing notifications...")
client = PerperThinClient()
client.compact_footer = True
client.connect('localhost', 10800)
client.register_binary_type(NotificationKeyLong, affinity_key_field = "affinity")
client.register_binary_type(NotificationKeyString, affinity_key_field = "affinity")
client.register_binary_type(SimpleData)
calls = client.get_cache("calls")
cache_config = {
PROP_NAME: f"{agent_delegate}-$notifications",
PROP_CACHE_KEY_CONFIGURATION: [
{
'type_name': 'NotificationKey',
'affinity_key_field_name': 'affinity'
}
]
}
notifications_cache = client.get_cache(cache_config);
for notification in stub.Notifications(notification_filter):
item = None
key = None
print(notification)
if not (notification.stringAffinity in (None, "")):
key = NotificationKeyString(key=notification.notificationKey, affinity=notification.stringAffinity)
if (notification.intAffinity != 0) :
key = NotificationKeyLong(key=notification.notificationKey, affinity=notification.intAffinity)
item = notifications_cache.get(key)
assert item != None
if "cache" in vars(item):
item_cache = client.get_cache(item.cache);
final_item = item_cache.get(item.key);
print(f"Retrieved Item: {final_item}");
notifications_cache.get_and_remove(key);
| 2.1875 | 2 |
src/nexgen/beamlines/__init__.py | DominicOram/nexgen | 0 | 12795037 | """Utilities for writing NeXus files for beamlines at Diamond Light Source."""
| 0.972656 | 1 |
talks/2016-09-27-intro-tensorflow/visualize_activations.py | ramhiser/deep-learning-with-tensorflow-meetup | 12 | 12795038 | <reponame>ramhiser/deep-learning-with-tensorflow-meetup
from models.base_convnet import inference
import matplotlib.pyplot as plt
import tensorflow as tf
import cv2
import os
IMAGE_SIZE = 24
def plotNNFilter(units):
"""
Function to plot a certain layer
:param units: convnet layer
"""
filters = units.shape[3]
plt.figure(1, figsize=(20, 20))
for i in xrange(0, filters):
plt.subplot(7, 6, i+1)
plt.title('Filter ' + str(i))
plt.imshow(units[0, :, :, i], interpolation="nearest", cmap="gray")
plt.tight_layout(pad=3.0)
plt.show()
def run_model_image(checkpoint_file, image):
"""
Run an image through the trained model and vizualize its activations
:param checkpoint_file: The saved model parameters for the basic model
:param image: The supplied image (same dimensions as training).
"""
with tf.Graph().as_default():
image = tf.reshape(image, [IMAGE_SIZE, IMAGE_SIZE, 1])
image = tf.image.per_image_whitening(image)
image = tf.reshape(image, [1, IMAGE_SIZE, IMAGE_SIZE, 1])
image = tf.cast(image, tf.float32)
relu1, relu2, relu3 = inference(train=False, images=image, visualize=True)
saver = tf.train.Saver(tf.all_variables())
sess = tf.Session()
saver.restore(sess=sess, save_path=checkpoint_file)
units = relu1.eval(session=sess)
plotNNFilter(units)
units = relu2.eval(session=sess)
plotNNFilter(units)
units = relu3.eval(session=sess)
plotNNFilter(units)
def plot_activations(image_dir):
"""
Plot the activations for a given image
:param checkpoint_file: Where the model is saved
:param image_dir:
"""
read_image = cv2.imread(image_dir, 0)
read_image = cv2.resize(read_image, (24, 24), interpolation=cv2.INTER_AREA)
run_model_image(checkpoint_file='./base/base.ckpt', image=read_image)
def visualize_activations():
# Get the test image directory and then run the test images through the model
# and visualize activations.
wd = os.getcwd()
test_images = wd + '/test_data/number_samples/'
for image_dir in os.listdir(test_images):
print image_dir
plot_activations(test_images + image_dir) | 3.359375 | 3 |
geepee/pep_models.py | MattAshman/geepee | 24 | 12795039 | <filename>geepee/pep_models.py
"""Summary
# TODO: this should reuse base models!
"""
import sys
import math
import numpy as np
import scipy.linalg as npalg
from scipy import special
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import time
import pdb
from scipy.cluster.vq import kmeans2
from utils import *
from kernels import *
from lik_layers import Gauss_Layer, Probit_Layer, Gauss_Emis
from base_models import Base_SGPR, Base_SGPLVM, Base_SGPSSM
from config import *
class SGP_Layer(object):
"""Summary
Attributes:
Din (TYPE): Description
Dout (TYPE): Description
Kuu (TYPE): Description
Kuuinv (TYPE): Description
ls (TYPE): Description
M (TYPE): Description
mu (TYPE): Description
N (TYPE): Description
sf (int): Description
Splusmm (TYPE): Description
Su (TYPE): Description
Suinv (TYPE): Description
SuinvMu (TYPE): Description
t1 (TYPE): Description
t2 (TYPE): Description
zu (TYPE): Description
"""
def __init__(self, no_train, input_size, output_size, no_pseudo):
"""Summary
Args:
no_train (TYPE): Description
input_size (TYPE): Description
output_size (TYPE): Description
no_pseudo (TYPE): Description
"""
self.Din = Din = input_size
self.Dout = Dout = output_size
self.M = M = no_pseudo
self.N = N = no_train
# factor variables
self.t1 = np.zeros([N, Dout, M])
self.t2 = np.zeros([N, Dout, M, M])
# TODO
self.mu = np.zeros([Dout, M, ])
self.Su = np.zeros([Dout, M, M])
self.SuinvMu = np.zeros([Dout, M, ])
self.Suinv = np.zeros([Dout, M, M])
self.Splusmm = np.zeros([Dout, M, M])
# numpy variable for inducing points, Kuuinv, Kuu and its gradients
self.zu = np.zeros([M, Din])
self.Kuu = np.zeros([M, M])
self.Kuuinv = np.zeros([M, M])
# variables for the hyperparameters
self.ls = np.zeros([Din, ])
self.sf = 0
def compute_phi_prior(self):
"""Summary
Returns:
TYPE: Description
"""
(sign, logdet) = np.linalg.slogdet(self.Kuu)
logZ_prior = self.Dout * 0.5 * logdet
return logZ_prior
def compute_phi_posterior(self):
"""Summary
Returns:
TYPE: Description
"""
(sign, logdet) = np.linalg.slogdet(self.Su)
phi_posterior = 0.5 * np.sum(logdet)
phi_posterior += 0.5 * \
np.sum(self.mu * np.linalg.solve(self.Su, self.mu))
return phi_posterior
def compute_phi_cavity(self):
"""Summary
Returns:
TYPE: Description
"""
logZ_posterior = 0
(sign, logdet) = np.linalg.slogdet(self.Suhat)
phi_cavity = 0.5 * np.sum(logdet)
phi_cavity += 0.5 * \
np.sum(self.muhat * np.linalg.solve(self.Suhat, self.muhat))
return phi_cavity
def compute_phi(self, alpha=1.0):
"""Summary
Args:
alpha (float, optional): Description
Returns:
TYPE: Description
"""
N = self.N
scale_post = N * 1.0 / alpha - 1.0
scale_cav = - N * 1.0 / alpha
scale_prior = 1
phi_prior = self.compute_phi_prior()
phi_post = self.compute_phi_posterior()
phi_cav = self.compute_phi_cavity()
phi = scale_prior * phi_prior + scale_post * phi_post + scale_cav * phi_cav
return phi
def forward_prop_thru_cav(self, n, mx, vx=None, alpha=1.0):
"""Summary
Args:
n (TYPE): Description
mx (TYPE): Description
vx (None, optional): Description
alpha (float, optional): Description
Returns:
TYPE: Description
"""
if vx is None:
return self._forward_prop_deterministic_thru_cav(n, mx, alpha)
else:
return self._forward_prop_random_thru_cav_mm(n, mx, vx, alpha)
def _forward_prop_deterministic_thru_cav(self, n, x, alpha):
"""Summary
Args:
n (TYPE): Description
x (TYPE): Description
alpha (TYPE): Description
Returns:
TYPE: Description
"""
muhat, Suhat, SuinvMuhat, Suinvhat = self.compute_cavity(n, alpha)
Kuuinv = self.Kuuinv
Ahat = np.einsum('ab,ndb->nda', Kuuinv, muhat)
Bhat = np.einsum(
'ab,ndbc->ndac',
Kuuinv, np.einsum('ndab,bc->ndac', Suhat, Kuuinv)) - Kuuinv
kff = np.exp(2 * self.sf)
kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu)
mout = np.einsum('nm,ndm->nd', kfu, Ahat)
Bkfukuf = np.einsum('ndab,na,nb->nd', Bhat, kfu, kfu)
vout = kff + Bkfukuf
extra_res = [muhat, Suhat, SuinvMuhat, Suinvhat, kfu, Ahat, Bhat]
return mout, vout, extra_res
def _forward_prop_random_thru_cav_mm(self, n, mx, vx, alpha):
"""Summary
Args:
n (TYPE): Description
mx (TYPE): Description
vx (TYPE): Description
alpha (TYPE): Description
Returns:
TYPE: Description
"""
muhat, Suhat, SuinvMuhat, Suinvhat = self.compute_cavity(n, alpha)
Kuuinv = self.Kuuinv
Ahat = np.einsum('ab,ndb->nda', Kuuinv, muhat)
Smm = Suhat + np.einsum('nda,ndb->ndab', muhat, muhat)
Bhat = np.einsum(
'ab,ndbc->ndac',
Kuuinv, np.einsum('ndab,bc->ndac', Smm, Kuuinv)) - Kuuinv
psi0 = np.exp(2 * self.sf)
psi1, psi2 = compute_psi_weave(
2 * self.ls, 2 * self.sf, mx, vx, self.zu)
mout = np.einsum('nm,ndm->nd', psi1, Ahat)
Bhatpsi2 = np.einsum('ndab,nab->nd', Bhat, psi2)
vout = psi0 + Bhatpsi2 - mout**2
extra_res = [muhat, Suhat, SuinvMuhat,
Suinvhat, Smm, psi1, psi2, Ahat, Bhat]
return mout, vout, extra_res
def forward_prop_thru_post(self, mx, vx=None):
"""Summary
Args:
mx (TYPE): Description
vx (None, optional): Description
Returns:
TYPE: Description
"""
if vx is None:
return self._forward_prop_deterministic_thru_post(mx)
else:
return self._forward_prop_random_thru_post_mm(mx, vx)
def _forward_prop_deterministic_thru_post(self, x):
"""Summary
Args:
x (TYPE): Description
Returns:
TYPE: Description
"""
Kuuinv = self.Kuuinv
A = np.einsum('ab,db->da', Kuuinv, self.mu)
B = np.einsum(
'ab,dbc->dac',
Kuuinv, np.einsum('dab,bc->dac', self.Su, Kuuinv)) - Kuuinv
kff = np.exp(2 * self.sf)
kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu)
mout = np.einsum('nm,dm->nd', kfu, A)
Bpsi2 = np.einsum('dab,na,nb->nd', B, kfu, kfu)
vout = kff + Bpsi2
return mout, vout
# TODO
def _forward_prop_random_thru_post_mm(self, mx, vx):
"""Summary
Args:
mx (TYPE): Description
vx (TYPE): Description
Returns:
TYPE: Description
"""
Kuuinv = self.Kuuinv
A = np.einsum('ab,db->da', Kuuinv, self.mu)
Smm = self.Su + np.einsum('da,db->dab', self.mu, self.mu)
B = np.einsum(
'ab,dbc->dac',
Kuuinv, np.einsum('dab,bc->dac', Smm, Kuuinv)) - Kuuinv
psi0 = np.exp(2.0 * self.sf)
psi1, psi2 = compute_psi_weave(
2 * self.ls, 2 * self.sf, mx, vx, self.zu)
mout = np.einsum('nm,dm->nd', psi1, A)
Bpsi2 = np.einsum('dab,nab->nd', B, psi2)
vout = psi0 + Bpsi2 - mout**2
return mout, vout
def backprop_grads_lvm(self, m, v, dm, dv, extra_args, mx, vx, alpha=1.0):
"""Summary
Args:
m (TYPE): Description
v (TYPE): Description
dm (TYPE): Description
dv (TYPE): Description
extra_args (TYPE): Description
mx (TYPE): Description
vx (TYPE): Description
alpha (float, optional): Description
Returns:
TYPE: Description
"""
N = self.N
M = self.M
ls = np.exp(self.ls)
sf2 = np.exp(2 * self.sf)
zu = self.zu
Kuuinv = self.Kuuinv
a = extra_args
muhat, Suhat, SuinvMuhat, Suinvhat, Smm, psi1, psi2, Ahat, Bhat = \
a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8]
# compute grads wrt Ahat and Bhat
dm_all = dm - 2 * dv * m
dAhat = np.einsum('nd,nm->ndm', dm_all, psi1)
dBhat = np.einsum('nd,nab->ndab', dv, psi2)
# compute grads wrt psi1 and psi2
dpsi1 = np.einsum('nd,ndm->nm', dm_all, Ahat)
dpsi2 = np.einsum('nd,ndab->nab', dv, Bhat)
dsf2, dls, dzu, dmx, dvx = compute_psi_derivatives(
dpsi1, psi1, dpsi2, psi2, ls, sf2, mx, vx, zu)
dvcav = np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat, Kuuinv)
dmcav = 2 * np.einsum('ndab,ndb->nda', dvcav, muhat) \
+ np.einsum('ab,ndb->nda', Kuuinv, dAhat)
grad_hyper = {}
grad_input = {'mx': dmx, 'vx': dvx, 'mcav': dmcav, 'vcav': dvcav}
return grad_hyper, grad_input
def backprop_grads_reg(self, m, v, dm, dv, extra_args, x, alpha=1.0):
"""Summary
Args:
m (TYPE): Description
v (TYPE): Description
dm (TYPE): Description
dv (TYPE): Description
extra_args (TYPE): Description
x (TYPE): Description
alpha (float, optional): Description
Returns:
TYPE: Description
"""
a = extra_args
muhat, Suhat, SuinvMuhat, Suinvhat, kfu, Ahat, Bhat = \
a[0], a[1], a[2], a[3], a[4], a[5], a[6]
Kuuinv = self.Kuuinv
# compute grads wrt Ahat and Bhat
dAhat = np.einsum('nd,nm->ndm', dm, kfu)
dBhat = np.einsum('nd,na,nb->ndab', dv, kfu, kfu)
dvcav = np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat, Kuuinv)
dmcav = np.einsum('ab,ndb->nda', Kuuinv, dAhat)
grad_hyper = {}
grad_cav = {'mcav': dmcav, 'vcav': dvcav}
return grad_hyper, grad_cav
def update_factor(self, n, alpha, grad_cav, extra_args, decay=0):
"""Summary
Args:
n (TYPE): Description
alpha (TYPE): Description
grad_cav (TYPE): Description
extra_args (TYPE): Description
decay (int, optional): Description
Returns:
TYPE: Description
"""
muhat, Suhat, SuinvMuhat, Suinvhat = \
extra_args[0], extra_args[1], extra_args[2], extra_args[3]
dmcav, dvcav = grad_cav['mcav'], grad_cav['vcav']
# perform Power-EP update
munew = muhat + np.einsum('ndab,ndb->nda', Suhat, dmcav)
inner = np.einsum('nda,ndb->ndab', dmcav, dmcav) - 2 * dvcav
Sunew = Suhat - np.einsum(
'ndab,ndbc->ndac',
Suhat, np.einsum('ndab,ndbc->ndac', inner, Suhat))
Suinvnew = np.linalg.inv(Sunew)
SuinvMunew = np.einsum('ndab,ndb->nda', Suinvnew, munew)
t2_frac = Suinvnew - Suinvhat
t1_frac = SuinvMunew - SuinvMuhat
t1_old = self.t1[n, :, :]
t2_old = self.t2[n, :, :, :]
t1_new = (1.0 - alpha) * t1_old + t1_frac
t2_new = (1.0 - alpha) * t2_old + t2_frac
if t1_new.shape[0] == 1:
# TODO: do damping here?
self.t1[n, :, :] = t1_new
self.t2[n, :, :, :] = t2_new
# TODO: update posterior
self.Su = Sunew[0, :, :, :]
self.mu = munew[0, :, :]
self.Suinv = Suinvnew[0, :, :, :]
self.SuinvMu = SuinvMunew[0, :, :]
else:
# parallel update
self.t1[n, :, :] = decay * t1_old + (1 - decay) * t1_new
self.t2[n, :, :, :] = decay * t2_old + (1 - decay) * t2_new
self.update_posterior()
# axs[0].errorbar(self.zu[:, 0]+0.05, self.mu[0, :], fmt='+r', yerr=np.sqrt(np.diag(self.Su[0, :, :])))
# axs[1].errorbar(self.zu[:, 0]+0.05, self.mu[1, :], fmt='+r', yerr=np.sqrt(np.diag(self.Su[1, :, :])))
# axs[0].set_title('n = %d' % n[0])
# plt.show()
def sample(self, x):
"""Summary
Args:
x (TYPE): Description
Returns:
TYPE: Description
"""
Su = self.Su
mu = self.mu
Lu = np.linalg.cholesky(Su)
epsilon = np.random.randn(self.Dout, self.M)
u_sample = mu + np.einsum('dab,db->da', Lu, epsilon)
kff = compute_kernel(2 * self.ls, 2 * self.sf, x, x)
kff += np.diag(JITTER * np.ones(x.shape[0]))
kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu)
qfu = np.dot(kfu, self.Kuuinv)
mf = np.einsum('nm,dm->nd', qfu, u_sample)
vf = kff - np.dot(qfu, kfu.T)
Lf = np.linalg.cholesky(vf)
epsilon = np.random.randn(x.shape[0], self.Dout)
f_sample = mf + np.einsum('ab,bd->ad', Lf, epsilon)
return f_sample
def compute_kuu(self):
"""Summary
Returns:
TYPE: Description
"""
# update kuu and kuuinv
ls = self.ls
sf = self.sf
Dout = self.Dout
M = self.M
zu = self.zu
self.Kuu = compute_kernel(2 * ls, 2 * sf, zu, zu)
self.Kuu += np.diag(JITTER * np.ones((M, )))
self.Kuuinv = np.linalg.inv(self.Kuu)
def compute_cavity(self, n, alpha=1.0):
"""Summary
Args:
n (TYPE): Description
alpha (float, optional): Description
Returns:
TYPE: Description
"""
# compute the leave one out moments
t1n = self.t1[n, :, :]
t2n = self.t2[n, :, :, :]
Suinvhat = self.Suinv - alpha * t2n
SuinvMuhat = self.SuinvMu - alpha * t1n
Suhat = np.linalg.inv(Suinvhat)
muhat = np.einsum('ndab,ndb->nda', Suhat, SuinvMuhat)
return muhat, Suhat, SuinvMuhat, Suinvhat
def update_posterior(self):
"""Summary
Returns:
TYPE: Description
"""
# compute the posterior approximation
self.Suinv = self.Kuuinv + np.sum(self.t2, axis=0)
self.SuinvMu = np.sum(self.t1, axis=0)
self.Su = np.linalg.inv(self.Suinv)
self.mu = np.einsum('dab,db->da', self.Su, self.SuinvMu)
def init_hypers(self, x_train=None, key_suffix=''):
"""Summary
Args:
x_train (None, optional): Description
key_suffix (str, optional): Description
Returns:
TYPE: Description
"""
# dict to hold hypers, inducing points and parameters of q(U)
N = self.N
M = self.M
Din = self.Din
Dout = self.Dout
if x_train is None:
ls = np.log(np.ones((Din, )) + 0.1 * np.random.rand(Din, ))
sf = np.log(np.array([1]))
zu = np.tile(np.linspace(-1, 1, M).reshape((M, 1)), (1, Din))
else:
if N < 10000:
centroids, label = kmeans2(x_train, M, minit='points')
else:
randind = np.random.permutation(N)
centroids = x_train[randind[0:M], :]
zu = centroids
if N < 10000:
X1 = np.copy(x_train)
else:
randind = np.random.permutation(N)
X1 = X[randind[:5000], :]
x_dist = cdist(X1, X1, 'euclidean')
triu_ind = np.triu_indices(N)
ls = np.zeros((Din, ))
d2imed = np.median(x_dist[triu_ind])
for i in range(Din):
ls[i] = np.log(d2imed + 1e-16)
sf = np.log(np.array([0.5]))
params = dict()
params['sf' + key_suffix] = sf
params['ls' + key_suffix] = ls
params['zu' + key_suffix] = zu
return params
def get_hypers(self, key_suffix=''):
"""Summary
Args:
key_suffix (str, optional): Description
Returns:
TYPE: Description
"""
params = {}
M = self.M
Din = self.Din
Dout = self.Dout
params['ls' + key_suffix] = self.ls
params['sf' + key_suffix] = self.sf
params_zu_i = self.zu
params['zu' + key_suffix] = self.zu
return params
def update_hypers(self, params, key_suffix=''):
"""Summary
Args:
params (TYPE): Description
key_suffix (str, optional): Description
Returns:
TYPE: Description
"""
self.ls = params['ls' + key_suffix]
self.sf = params['sf' + key_suffix]
self.zu = params['zu' + key_suffix]
# update Kuu given new hypers
self.compute_kuu()
# compute mu and Su for each layer
self.update_posterior()
class SGPR(Base_SGPR):
"""Summary
Attributes:
Din (TYPE): Description
Dout (TYPE): Description
lik_layer (TYPE): Description
M (TYPE): Description
N (TYPE): Description
sgp_layer (TYPE): Description
updated (bool): Description
x_train (TYPE): Description
"""
def __init__(self, x_train, y_train, no_pseudo, lik='Gaussian'):
"""Summary
Args:
x_train (TYPE): Description
y_train (TYPE): Description
no_pseudo (TYPE): Description
lik (str, optional): Description
Raises:
NotImplementedError: Description
"""
super(SGPR, self).__init__(x_train, y_train, no_pseudo, lik)
self.sgp_layer = SGP_Layer(self.N, self.Din, self.Dout, self.M)
def inference(self, alpha=1.0, no_epochs=10, parallel=False, decay=0.5):
"""Summary
Args:
alpha (float, optional): Description
no_epochs (int, optional): Description
parallel (bool, optional): Description
decay (float, optional): Description
Returns:
TYPE: Description
"""
try:
for e in range(no_epochs):
if e % 50 == 0:
print 'epoch %d/%d' % (e, no_epochs)
if not parallel:
for n in range(self.N):
yn = self.y_train[n, :].reshape([1, self.Dout])
xn = self.x_train[n, :].reshape([1, self.Din])
(mn, vn, extra_res) = \
self.sgp_layer.forward_prop_thru_cav(
[n], xn, alpha=alpha)
logZn, dmn, dvn = \
self.lik_layer.compute_log_Z(mn, vn, yn, alpha)
grad_hyper, grad_cav = self.sgp_layer.backprop_grads_reg(
mn, vn, dmn, dvn, extra_res, xn, alpha=alpha)
self.sgp_layer.update_factor(
[n], alpha, grad_cav, extra_res)
else:
# parallel update for entire dataset
# TODO: minibatch parallel
idxs = np.arange(self.N)
y = self.y_train[idxs, :]
x = self.x_train[idxs, :]
(m, v, extra_res) = \
self.sgp_layer.forward_prop_thru_cav(
idxs, x, alpha=alpha)
logZ, dm, dv = \
self.lik_layer.compute_log_Z(m, v, y, alpha)
grad_hyper, grad_cav = self.sgp_layer.backprop_grads_reg(
m, v, dm, dv, extra_res, x, alpha=alpha)
self.sgp_layer.update_factor(
idxs, alpha, grad_cav, extra_res, decay=decay)
except KeyboardInterrupt:
print 'Caught KeyboardInterrupt ...'
class SGPLVM(Base_SGPLVM):
"""Summary
Attributes:
Din (TYPE): Description
Dout (TYPE): Description
lik_layer (TYPE): Description
M (TYPE): Description
N (TYPE): Description
sgp_layer (TYPE): Description
t01 (TYPE): Description
t02 (TYPE): Description
tx1 (TYPE): Description
tx2 (TYPE): Description
updated (bool): Description
"""
def __init__(self, y_train, hidden_size, no_pseudo,
lik='Gaussian', prior_mean=0, prior_var=1):
"""Summary
Args:
y_train (TYPE): Description
hidden_size (TYPE): Description
no_pseudo (TYPE): Description
lik (str, optional): Description
prior_mean (int, optional): Description
prior_var (int, optional): Description
Raises:
NotImplementedError: Description
"""
super(SGPLVM, self).__init__(y_train, hidden_size, no_pseudo, lik, prior_mean, prior_var)
self.sgp_layer = SGP_Layer(self.N, self.Din, self.Dout, self.M)
# natural params for latent variables
self.tx1 = np.zeros((self.N, self.Din))
self.tx2 = np.zeros((self.N, self.Din))
self.t01 = prior_mean / prior_var
self.t02 = 1.0 / prior_var
# TODO: alternatitve method for non real-valued data
post_m = PCA_reduce(y_train, self.Din)
post_m_mean = np.mean(post_m, axis=0)
post_m_std = np.std(post_m, axis=0)
post_m = (post_m - post_m_mean) / post_m_std
post_v = 0.1 * np.ones_like(post_m)
post_2 = 1.0 / post_v
post_1 = post_2 * post_m
self.tx1 = post_1 - self.t01
self.tx2 = post_2 - self.t02
def inference(self, alpha=1.0, no_epochs=10, parallel=False, decay=0):
"""Summary
Args:
alpha (float, optional): Description
no_epochs (int, optional): Description
parallel (bool, optional): Description
decay (int, optional): Description
Returns:
TYPE: Description
"""
try:
for e in range(no_epochs):
if e % 50 == 0:
print 'epoch %d/%d' % (e, no_epochs)
if not parallel:
for n in range(self.N):
yn = self.y_train[n, :].reshape([1, self.Dout])
cav_m_n, cav_v_n, _, _ = self.compute_cavity_x([
n], alpha)
(mn, vn, extra_res) = \
self.sgp_layer.forward_prop_thru_cav(
[n], cav_m_n, cav_v_n, alpha=alpha)
logZn, dmn, dvn = \
self.lik_layer.compute_log_Z(mn, vn, yn, alpha)
grad_hyper, grad_cav = self.sgp_layer.backprop_grads_lvm(
mn, vn, dmn, dvn, extra_res, cav_m_n, cav_v_n, alpha=alpha)
self.sgp_layer.update_factor(
[n], alpha, grad_cav, extra_res, decay=decay)
self.update_factor_x(
[n], alpha, grad_cav, cav_m_n, cav_v_n, decay=decay)
else:
# parallel update for entire dataset
# TODO: minibatch parallel
idxs = np.arange(self.N)
y = self.y_train[idxs, :]
cav_m, cav_v, _, _ = self.compute_cavity_x(idxs, alpha)
(m, v, extra_res) = \
self.sgp_layer.forward_prop_thru_cav(
idxs, cav_m, cav_v, alpha=alpha)
logZ, dm, dv = \
self.lik_layer.compute_log_Z(m, v, y, alpha)
grad_hyper, grad_cav = self.sgp_layer.backprop_grads_lvm(
m, v, dm, dv, extra_res, cav_m, cav_v, alpha=alpha)
self.sgp_layer.update_factor(
idxs, alpha, grad_cav, extra_res, decay=decay)
self.update_factor_x(
idxs, alpha, grad_cav, cav_m, cav_v, decay=decay)
except KeyboardInterrupt:
print 'Caught KeyboardInterrupt ...'
def compute_cavity_x(self, n, alpha):
"""Summary
Args:
n (TYPE): Description
alpha (TYPE): Description
Returns:
TYPE: Description
"""
# prior factor
cav_x1 = self.t01 + (1 - alpha) * self.tx1[n, :]
cav_x2 = self.t02 + (1 - alpha) * self.tx2[n, :]
cav_v = 1.0 / cav_x2
cav_m = cav_v * cav_x1
return cav_m, cav_v, cav_x1, cav_x2
def update_factor_x(self, n, alpha, grad_cav, cav_m, cav_v, decay=0.0):
"""Summary
Args:
n (TYPE): Description
alpha (TYPE): Description
grad_cav (TYPE): Description
cav_m (TYPE): Description
cav_v (TYPE): Description
decay (float, optional): Description
Returns:
TYPE: Description
"""
dmx = grad_cav['mx']
dvx = grad_cav['vx']
new_m = cav_m + cav_v * dmx
new_v = cav_v - cav_v**2 * (dmx**2 - 2 * dvx)
new_p2 = 1.0 / new_v
new_p1 = new_p2 * new_m
frac_t2 = new_p2 - 1.0 / cav_v
frac_t1 = new_p1 - cav_m / cav_v
# neg_idxs = np.where(frac_t2 < 0)
# frac_t2[neg_idxs] = 0
cur_t1 = self.tx1[n, :]
cur_t2 = self.tx2[n, :]
tx1_new = (1 - alpha) * cur_t1 + frac_t1
tx2_new = (1 - alpha) * cur_t2 + frac_t2
tx1_new = decay * cur_t1 + (1 - decay) * tx1_new
tx2_new = decay * cur_t2 + (1 - decay) * tx2_new
self.tx1[n, :] = tx1_new
self.tx2[n, :] = tx2_new
def get_posterior_x(self):
"""Summary
Returns:
TYPE: Description
"""
post_1 = self.t01 + self.tx1
post_2 = self.t02 + self.tx2
vx = 1.0 / post_2
mx = post_1 / post_2
return mx, vx
# def predict_f(self, inputs):
# """Summary
# Args:
# inputs (TYPE): Description
# Returns:
# TYPE: Description
# """
# if not self.updated:
# self.sgp_layer.update_posterior()
# self.updated = True
# mf, vf = self.sgp_layer.forward_prop_thru_post(inputs)
# return mf, vf
# def sample_f(self, inputs, no_samples=1):
# """Summary
# Args:
# inputs (TYPE): Description
# no_samples (int, optional): Description
# Returns:
# TYPE: Description
# """
# if not self.updated:
# self.sgp_layer.update_posterior()
# self.updated = True
# K = no_samples
# fs = np.zeros((inputs.shape[0], self.Dout, K))
# # TODO: remove for loop here
# for k in range(K):
# fs[:, :, k] = self.sgp_layer.sample(inputs)
# return fs
# def predict_y(self, inputs):
# """Summary
# Args:
# inputs (TYPE): Description
# Returns:
# TYPE: Description
# """
# if not self.updated:
# self.sgp_layer.update_posterior()
# self.updated = True
# mf, vf = self.sgp_layer.forward_prop_thru_post(inputs)
# my, vy = self.lik_layer.output_probabilistic(mf, vf)
# return my, vy
# def update_hypers(self, params):
# """Summary
# Args:
# params (TYPE): Description
# Returns:
# TYPE: Description
# """
# self.sgp_layer.update_hypers(params)
# self.lik_layer.update_hypers(params)
# def init_hypers(self):
# """Summary
# Returns:
# TYPE: Description
# """
# sgp_params = self.sgp_layer.init_hypers()
# lik_params = self.lik_layer.init_hypers()
# init_params = dict(sgp_params)
# init_params.update(lik_params)
# return init_params
# def get_hypers(self):
# """Summary
# Returns:
# TYPE: Description
# """
# sgp_params = self.sgp_layer.get_hypers()
# lik_params = self.lik_layer.get_hypers()
# params = dict(sgp_params)
# params.update(lik_params)
# return params
class SGPSSM(Base_SGPSSM):
"""Summary
Attributes:
Din (TYPE): Description
Dout (TYPE): Description
emi_layer (TYPE): Description
lik (TYPE): Description
M (TYPE): Description
N (TYPE): Description
sgp_layer (TYPE): Description
sn (int): Description
updated (bool): Description
x_next_1 (TYPE): Description
x_next_2 (TYPE): Description
x_prev_1 (TYPE): Description
x_prev_2 (TYPE): Description
x_prior_1 (TYPE): Description
x_prior_2 (TYPE): Description
x_up_1 (TYPE): Description
x_up_2 (TYPE): Description
"""
def __init__(self, y_train, hidden_size, no_pseudo,
lik='Gaussian', prior_mean=0, prior_var=1,
x_control=None, gp_emi=False, control_to_emi=True):
"""Summary
Args:
y_train (TYPE): Description
hidden_size (TYPE): Description
no_pseudo (TYPE): Description
lik (str, optional): Description
prior_mean (int, optional): Description
prior_var (int, optional): Description
Raises:
NotImplementedError: Description
"""
super(SGPSSM, self).__init__(
y_train, hidden_size, no_pseudo,
lik, prior_mean, prior_var,
x_control, gp_emi, control_to_emi)
self.dyn_layer = SGP_Layer(
self.N - 1, self.Din + self.Dcon_dyn, self.Din, self.M)
if gp_emi:
self.emi_layer = SGP_Layer(
self.N, self.Din + self.Dcon_emi, self.Dout, self.M)
# natural params for latent variables
N = self.N
Din = self.Din
self.x_prev_1 = np.zeros((N, Din))
self.x_prev_2 = np.zeros((N, Din))
self.x_next_1 = np.zeros((N, Din))
self.x_next_2 = np.zeros((N, Din))
self.x_up_1 = np.zeros((N, Din))
self.x_up_2 = np.zeros((N, Din))
self.x_prior_1 = prior_mean / prior_var
self.x_prior_2 = 1.0 / prior_var
self.UP, self.PREV, self.NEXT = 'UP', 'PREV', 'NEXT'
def inf_parallel(self, epoch, alpha, decay):
"""Summary
Args:
epoch (TYPE): Description
alpha (TYPE): Description
decay (TYPE): Description
Returns:
TYPE: Description
"""
# merge info from output
cav_up_m, cav_up_v, _, _ = self.compute_cavity_x(self.UP, alpha)
if not self.gp_emi:
# only do this once at the begining for gaussian emission lik
if isinstance(self.emi_layer, Gauss_Layer) and epoch == 0:
up_1, up_2 = self.emi_layer.compute_factor(
cav_up_m, cav_up_v, alpha)
self.x_up_1 = up_1
self.x_up_2 = up_2
else:
up_1, up_2 = self.emi_layer.compute_factor(
cav_up_m, cav_up_v, alpha)
self.x_up_1 = up_1
self.x_up_2 = up_2
# deal with the dynamics factors here
cav_t_m, cav_t_v, cav_t_1, cav_t_2 = \
self.compute_cavity_x(self.PREV, alpha)
cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2 = \
self.compute_cavity_x(self.NEXT, alpha)
idxs = np.arange(self.N - 1)
(mprop, vprop, extra_res) = \
self.dyn_layer.forward_prop_thru_cav(
idxs, cav_tm1_m, cav_tm1_v, alpha=alpha)
logZ, dmprop, dvprop, dmt, dvt = \
self.compute_transition_tilted(
mprop, vprop, cav_t_m, cav_t_v, alpha)
grad_hyper, grad_cav = self.dyn_layer.backprop_grads_lvm(
mprop, vprop, dmprop, dvprop,
extra_res, cav_tm1_m, cav_tm1_v, alpha=alpha)
self.dyn_layer.update_factor(
idxs, alpha, grad_cav, extra_res, decay=decay)
self.update_factor_x(
self.NEXT,
grad_cav['mx'], grad_cav['vx'],
cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2,
decay=decay, alpha=alpha)
self.update_factor_x(
self.PREV,
dmt, dvt,
cav_t_m, cav_t_v, cav_t_1, cav_t_2,
decay=decay, alpha=alpha)
def inf_sequential(self, epoch, alpha, decay):
"""Summary
Args:
epoch (TYPE): Description
alpha (TYPE): Description
decay (TYPE): Description
Returns:
TYPE: Description
"""
# merge info from output
cav_up_m, cav_up_v, _, _ = self.compute_cavity_x(self.UP, alpha)
if not self.gp_emi:
# only do this once at the begining for gaussian emission lik
if isinstance(self.emi_layer, Gauss_Layer) and epoch == 0:
up_1, up_2 = self.emi_layer.compute_factor(
cav_up_m, cav_up_v, alpha)
self.x_up_1 = up_1
self.x_up_2 = up_2
else:
up_1, up_2 = self.emi_layer.compute_factor(
cav_up_m, cav_up_v, alpha)
self.x_up_1 = up_1
self.x_up_2 = up_2
for n in range(0, self.N - 1):
# deal with the dynamics factors here
cav_t_m, cav_t_v, cav_t_1, cav_t_2 = \
self.compute_cavity_x_sequential(self.PREV, [n + 1], alpha)
cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2 = \
self.compute_cavity_x_sequential(self.NEXT, [n], alpha)
(mprop, vprop, extra_res) = \
self.dyn_layer.forward_prop_thru_cav(
[n], cav_tm1_m, cav_tm1_v, alpha=alpha)
logZ, dmprop, dvprop, dmt, dvt = \
self.compute_transition_tilted(
mprop, vprop, cav_t_m, cav_t_v, alpha)
grad_hyper, grad_cav = self.dyn_layer.backprop_grads_lvm(
mprop, vprop, dmprop, dvprop,
extra_res, cav_tm1_m, cav_tm1_v, alpha=alpha)
self.dyn_layer.update_factor(
[n], alpha, grad_cav, extra_res, decay=decay)
self.update_factor_x_sequential(
self.NEXT,
grad_cav['mx'], grad_cav['vx'],
cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2, [n],
decay=decay, alpha=alpha)
self.update_factor_x_sequential(
self.PREV,
dmt, dvt,
cav_t_m, cav_t_v, cav_t_1, cav_t_2, [n + 1],
decay=decay, alpha=alpha)
def inference(self, alpha=1.0, no_epochs=10, parallel=True, decay=0):
"""Summary
Args:
alpha (float, optional): Description
no_epochs (int, optional): Description
parallel (bool, optional): Description
decay (int, optional): Description
Returns:
TYPE: Description
"""
try:
for e in range(no_epochs):
if e % 50 == 0:
print 'epoch %d/%d' % (e, no_epochs)
if parallel:
self.inf_parallel(e, alpha, decay)
else:
self.inf_sequential(e, alpha, decay)
except KeyboardInterrupt:
print 'Caught KeyboardInterrupt ...'
def compute_cavity_x(self, mode, alpha):
"""Summary
Args:
mode (TYPE): Description
alpha (TYPE): Description
Returns:
TYPE: Description
Raises:
NotImplementedError: Description
"""
if mode == self.UP:
cav_up_1 = self.x_prev_1 + self.x_next_1 + \
(1 - alpha) * self.x_up_1
cav_up_2 = self.x_prev_2 + self.x_next_2 + \
(1 - alpha) * self.x_up_2
cav_up_1[0, :] += self.x_prior_1
cav_up_2[0, :] += self.x_prior_2
return cav_up_1 / (cav_up_2 + 1e-16), 1.0 / (cav_up_2 + 1e-16), cav_up_1, cav_up_2
elif mode == self.PREV:
idxs = np.arange(1, self.N)
cav_prev_1 = self.x_up_1[idxs, :] + self.x_next_1[idxs, :]
cav_prev_2 = self.x_up_2[idxs, :] + self.x_next_2[idxs, :]
cav_prev_1 += (1 - alpha) * self.x_prev_1[idxs, :]
cav_prev_2 += (1 - alpha) * self.x_prev_2[idxs, :]
return cav_prev_1 / cav_prev_2, 1.0 / cav_prev_2, cav_prev_1, cav_prev_2
elif mode == self.NEXT:
idxs = np.arange(0, self.N - 1)
cav_next_1 = self.x_up_1[idxs, :] + self.x_prev_1[idxs, :]
cav_next_2 = self.x_up_2[idxs, :] + self.x_prev_2[idxs, :]
cav_next_1 += (1 - alpha) * self.x_next_1[idxs, :]
cav_next_2 += (1 - alpha) * self.x_next_2[idxs, :]
return cav_next_1 / cav_next_2, 1.0 / cav_next_2, cav_next_1, cav_next_2
else:
raise NotImplementedError('unknown mode')
def compute_cavity_x_sequential(self, mode, idxs, alpha):
"""Summary
Args:
mode (TYPE): Description
idxs (TYPE): Description
alpha (TYPE): Description
Returns:
TYPE: Description
Raises:
NotImplementedError: Description
"""
if mode == self.UP:
cav_up_1 = self.x_prev_1 + self.x_next_1 + \
(1 - alpha) * self.x_up_1
cav_up_2 = self.x_prev_2 + self.x_next_2 + \
(1 - alpha) * self.x_up_2
cav_up_1[0, :] += self.x_prior_1
cav_up_2[0, :] += self.x_prior_2
return cav_up_1 / (cav_up_2 + 1e-16), 1.0 / (cav_up_2 + 1e-16), cav_up_1, cav_up_2
elif mode == self.PREV:
cav_prev_1 = self.x_up_1[idxs, :] + self.x_next_1[idxs, :]
cav_prev_2 = self.x_up_2[idxs, :] + self.x_next_2[idxs, :]
cav_prev_1 += (1 - alpha) * self.x_prev_1[idxs, :]
cav_prev_2 += (1 - alpha) * self.x_prev_2[idxs, :]
return cav_prev_1 / cav_prev_2, 1.0 / cav_prev_2, cav_prev_1, cav_prev_2
elif mode == self.NEXT:
cav_next_1 = self.x_up_1[idxs, :] + self.x_prev_1[idxs, :]
cav_next_2 = self.x_up_2[idxs, :] + self.x_prev_2[idxs, :]
cav_next_1 += (1 - alpha) * self.x_next_1[idxs, :]
cav_next_2 += (1 - alpha) * self.x_next_2[idxs, :]
return cav_next_1 / cav_next_2, 1.0 / cav_next_2, cav_next_1, cav_next_2
else:
raise NotImplementedError('unknown mode')
def compute_transition_tilted(self, m_prop, v_prop, m_t, v_t, alpha):
"""Summary
Args:
m_prop (TYPE): Description
v_prop (TYPE): Description
m_t (TYPE): Description
v_t (TYPE): Description
alpha (TYPE): Description
Returns:
TYPE: Description
"""
sn2 = np.exp(2 * self.sn)
v_sum = v_t + v_prop + sn2 / alpha
m_diff = m_t - m_prop
exp_term = -0.5 * m_diff**2 / v_sum
const_term = -0.5 * np.log(2 * np.pi * v_sum)
alpha_term = 0.5 * (1 - alpha) * np.log(2 *
np.pi * sn2) - 0.5 * np.log(alpha)
logZ = exp_term + const_term + alpha_term
dvt = -0.5 / v_sum + 0.5 * m_diff**2 / v_sum**2
dvprop = -0.5 / v_sum + 0.5 * m_diff**2 / v_sum**2
dmt = m_diff / v_sum
dmprop = m_diff / v_sum
return logZ, dmprop, dvprop, dmt, dvt
def update_factor_x(
self, mode, dmcav, dvcav, mcav, vcav, n1cav, n2cav,
decay=0.0, alpha=1.0):
"""Summary
Args:
mode (TYPE): Description
dmcav (TYPE): Description
dvcav (TYPE): Description
mcav (TYPE): Description
vcav (TYPE): Description
n1cav (TYPE): Description
n2cav (TYPE): Description
decay (float, optional): Description
alpha (float, optional): Description
Returns:
TYPE: Description
Raises:
NotImplementedError: Description
"""
new_m = mcav + vcav * dmcav
new_v = vcav - vcav**2 * (dmcav**2 - 2 * dvcav)
new_n2 = 1.0 / new_v
new_n1 = new_n2 * new_m
frac_n2 = new_n2 - n2cav
frac_n1 = new_n1 - n1cav
if mode == self.NEXT:
idxs = np.arange(0, self.N - 1)
cur_n1 = self.x_next_1[idxs, :]
cur_n2 = self.x_next_2[idxs, :]
n1_new = (1 - alpha) * cur_n1 + frac_n1
n2_new = (1 - alpha) * cur_n2 + frac_n2
self.x_next_1[idxs, :] = decay * cur_n1 + (1 - decay) * n1_new
self.x_next_2[idxs, :] = decay * cur_n2 + (1 - decay) * n2_new
elif mode == self.PREV:
idxs = np.arange(1, self.N)
cur_n1 = self.x_prev_1[idxs, :]
cur_n2 = self.x_prev_2[idxs, :]
n1_new = (1 - alpha) * cur_n1 + frac_n1
n2_new = (1 - alpha) * cur_n2 + frac_n2
self.x_prev_1[idxs, :] = decay * cur_n1 + (1 - decay) * n1_new
self.x_prev_2[idxs, :] = decay * cur_n2 + (1 - decay) * n2_new
else:
raise NotImplementedError('unknown mode')
def update_factor_x_sequential(
self, mode, dmcav, dvcav, mcav, vcav, n1cav, n2cav,
idxs, decay=0.0, alpha=1.0):
"""Summary
Args:
mode (TYPE): Description
dmcav (TYPE): Description
dvcav (TYPE): Description
mcav (TYPE): Description
vcav (TYPE): Description
n1cav (TYPE): Description
n2cav (TYPE): Description
idxs (TYPE): Description
decay (float, optional): Description
alpha (float, optional): Description
Returns:
TYPE: Description
Raises:
NotImplementedError: Description
"""
new_m = mcav + vcav * dmcav
new_v = vcav - vcav**2 * (dmcav**2 - 2 * dvcav)
new_n2 = 1.0 / new_v
new_n1 = new_n2 * new_m
frac_n2 = new_n2 - n2cav
frac_n1 = new_n1 - n1cav
if mode == self.NEXT:
cur_n1 = self.x_next_1[idxs, :]
cur_n2 = self.x_next_2[idxs, :]
n1_new = (1 - alpha) * cur_n1 + frac_n1
n2_new = (1 - alpha) * cur_n2 + frac_n2
self.x_next_1[idxs, :] = decay * cur_n1 + (1 - decay) * n1_new
self.x_next_2[idxs, :] = decay * cur_n2 + (1 - decay) * n2_new
elif mode == self.PREV:
cur_n1 = self.x_prev_1[idxs, :]
cur_n2 = self.x_prev_2[idxs, :]
n1_new = (1 - alpha) * cur_n1 + frac_n1
n2_new = (1 - alpha) * cur_n2 + frac_n2
self.x_prev_1[idxs, :] = decay * cur_n1 + (1 - decay) * n1_new
self.x_prev_2[idxs, :] = decay * cur_n2 + (1 - decay) * n2_new
else:
raise NotImplementedError('unknown mode')
def get_posterior_x(self):
"""Summary
Returns:
TYPE: Description
"""
post_1 = self.x_next_1 + self.x_prev_1 + self.x_up_1
post_2 = self.x_next_2 + self.x_prev_2 + self.x_up_2
post_1[0, :] += self.x_prior_1
post_2[0, :] += self.x_prior_2
vx = 1.0 / post_2
mx = post_1 / post_2
return mx, vx
def get_posterior_y(self):
"""Summary
Returns:
TYPE: Description
"""
mx, vx = self.get_posterior_x()
my, vy, vyn = self.emi_layer.output_probabilistic(mx, vx)
return my, vy, vyn
# def predict_f(self, inputs):
# """Summary
# Args:
# inputs (TYPE): Description
# Returns:
# TYPE: Description
# """
# if not self.updated:
# self.sgp_layer.update_posterior()
# self.updated = True
# mf, vf = self.sgp_layer.forward_prop_thru_post(inputs)
# return mf, vf
# def sample_f(self, inputs, no_samples=1):
# """Summary
# Args:
# inputs (TYPE): Description
# no_samples (int, optional): Description
# Returns:
# TYPE: Description
# """
# if not self.updated:
# self.sgp_layer.update_posterior()
# self.updated = True
# K = no_samples
# fs = np.zeros((inputs.shape[0], self.Dout, K))
# # TODO: remove for loop here
# for k in range(K):
# fs[:, :, k] = self.sgp_layer.sample(inputs)
# return fs
# def predict_y(self, inputs):
# """Summary
# Args:
# inputs (TYPE): Description
# Returns:
# TYPE: Description
# """
# if not self.updated:
# self.sgp_layer.update_posterior()
# self.updated = True
# mf, vf = self.sgp_layer.forward_prop_thru_post(inputs)
# my, vy = self.emi_layer.output_probabilistic(mf, vf)
# return my, vy
# def update_hypers(self, params):
# """Summary
# Args:
# params (TYPE): Description
# Returns:
# TYPE: Description
# """
# self.sgp_layer.update_hypers(params)
# self.emi_layer.update_hypers(params)
# self.sn = params['sn']
# def init_hypers(self):
# """Summary
# Returns:
# TYPE: Description
# """
# sgp_params = self.sgp_layer.init_hypers()
# lik_params = self.emi_layer.init_hypers()
# ssm_params = {'sn': np.log(0.001)}
# init_params = dict(sgp_params)
# init_params.update(lik_params)
# init_params.update(ssm_params)
# return init_params
# def get_hypers(self):
# """Summary
# Returns:
# TYPE: Description
# """
# sgp_params = self.sgp_layer.get_hypers()
# emi_params = self.emi_layer.get_hypers()
# ssm_params = {'sn': self.sn}
# params = dict(sgp_params)
# params.update(emi_params)
# params.update(ssm_params)
# return params
class SGP_Layer_rank_one(object):
"""Summary
Attributes:
Din (TYPE): Description
Dout (TYPE): Description
Kuu (TYPE): Description
Kuuinv (TYPE): Description
ls (TYPE): Description
M (TYPE): Description
mu (TYPE): Description
N (TYPE): Description
sf (int): Description
Splusmm (TYPE): Description
Su (TYPE): Description
Suinv (TYPE): Description
SuinvMu (TYPE): Description
t1 (TYPE): Description
t2 (TYPE): Description
zu (TYPE): Description
"""
def __init__(self, no_train, input_size, output_size, no_pseudo):
"""Summary
Args:
no_train (TYPE): Description
input_size (TYPE): Description
output_size (TYPE): Description
no_pseudo (TYPE): Description
"""
self.Din = Din = input_size
self.Dout = Dout = output_size
self.M = M = no_pseudo
self.N = N = no_train
# factor variables
self.variances = np.zeros([N, Dout])
self.variances.fill(1e20)
self.means = np.zeros([N, Dout])
# pep variables
self.gamma = np.zeros([Dout, M])
self.beta = np.zeros([Dout, M, M])
# numpy variable for inducing points, Kuuinv, Kuu and its gradients
self.zu = np.zeros([M, Din])
self.Kuu = np.zeros([M, M])
self.Kuuinv = np.zeros([M, M])
# variables for the hyperparameters
self.ls = np.zeros([Din, ])
self.sf = 0
def forward_prop_thru_post(self, x):
"""Summary
Args:
x (TYPE): Description
Returns:
TYPE: Description
"""
Kuuinv = self.Kuuinv
A = np.einsum('ab,db->da', Kuuinv, self.mu)
B = np.einsum(
'ab,dbc->dac',
Kuuinv, np.einsum('dab,bc->dac', self.Su, Kuuinv)) - Kuuinv
kff = np.exp(2 * self.sf)
kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu)
mout = np.einsum('nm,dm->nd', kfu, A)
Bpsi2 = np.einsum('dab,na,nb->nd', B, kfu, kfu)
vout = kff + Bpsi2
return mout, vout
def sample(self, x):
"""Summary
Args:
x (TYPE): Description
Returns:
TYPE: Description
"""
Su = self.Su
mu = self.mu
Lu = np.linalg.cholesky(Su)
epsilon = np.random.randn(self.Dout, self.M)
u_sample = mu + np.einsum('dab,db->da', Lu, epsilon)
kff = compute_kernel(2 * self.ls, 2 * self.sf, x, x)
kff += np.diag(JITTER * np.ones(x.shape[0]))
kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu)
qfu = np.dot(kfu, self.Kuuinv)
mf = np.einsum('nm,dm->nd', qfu, u_sample)
vf = kff - np.dot(qfu, kfu.T)
Lf = np.linalg.cholesky(vf)
epsilon = np.random.randn(x.shape[0], self.Dout)
f_sample = mf + np.einsum('ab,bd->ad', Lf, epsilon)
return f_sample
def compute_kuu(self):
"""Summary
Returns:
TYPE: Description
"""
# update kuu and kuuinv
ls = self.ls
sf = self.sf
Dout = self.Dout
M = self.M
zu = self.zu
self.Kuu = compute_kernel(2 * ls, 2 * sf, zu, zu)
self.Kuu += np.diag(JITTER * np.ones((M, )))
self.Kuuinv = np.linalg.inv(self.Kuu)
def update_posterior(self, x_train=None, new_hypers=False):
"""Summary
Returns:
TYPE: Description
"""
# compute the posterior approximation
if new_hypers and x_train is not None:
Kfu = compute_kernel(2*self.ls, 2*self.sf, x_train, self.zu)
KuuinvKuf = np.dot(self.Kuuinv, Kfu.T)
self.Kfu = Kfu
self.KuuinvKuf = KuuinvKuf
self.Kff_diag = compute_kernel_diag(2*self.ls, 2*self.sf, x_train)
KuuinvKuf_div_var = np.einsum('an,nd->dan', self.KuuinvKuf, 1.0 / self.variances)
T2u = np.einsum('dan,bn->dab', KuuinvKuf_div_var, self.KuuinvKuf)
T1u = np.einsum('bn,nd->db', self.KuuinvKuf, self.means / self.variances)
Vinv = self.Kuuinv + T2u
self.Suinv = Vinv
self.Su = np.linalg.inv(Vinv)
self.mu = np.einsum('dab,db->da', self.Su, T1u)
self.gamma = np.einsum('ab,db->da', self.Kuuinv, self.mu)
self.beta = self.Kuuinv - np.einsum('ab,dbc->dac',
self.Kuuinv,
np.einsum('dab,bc->dac', self.Su, self.Kuuinv))
def init_hypers(self, x_train=None, key_suffix=''):
"""Summary
Args:
x_train (None, optional): Description
key_suffix (str, optional): Description
Returns:
TYPE: Description
"""
# dict to hold hypers, inducing points and parameters of q(U)
N = self.N
M = self.M
Din = self.Din
Dout = self.Dout
if x_train is None:
ls = np.log(np.ones((Din, )) + 0.1 * np.random.rand(Din, ))
sf = np.log(np.array([1]))
zu = np.tile(np.linspace(-1, 1, M).reshape((M, 1)), (1, Din))
else:
if N < 10000:
centroids, label = kmeans2(x_train, M, minit='points')
else:
randind = np.random.permutation(N)
centroids = x_train[randind[0:M], :]
zu = centroids
if N < 10000:
X1 = np.copy(x_train)
else:
randind = np.random.permutation(N)
X1 = X[randind[:5000], :]
x_dist = cdist(X1, X1, 'euclidean')
triu_ind = np.triu_indices(N)
ls = np.zeros((Din, ))
d2imed = np.median(x_dist[triu_ind])
for i in range(Din):
ls[i] = np.log(d2imed + 1e-16)
sf = np.log(np.array([0.5]))
params = dict()
params['sf' + key_suffix] = sf
params['ls' + key_suffix] = ls
params['zu' + key_suffix] = zu
return params
def get_hypers(self, key_suffix=''):
"""Summary
Args:
key_suffix (str, optional): Description
Returns:
TYPE: Description
"""
params = {}
M = self.M
Din = self.Din
Dout = self.Dout
params['ls' + key_suffix] = self.ls
params['sf' + key_suffix] = self.sf
params_zu_i = self.zu
params['zu' + key_suffix] = self.zu
return params
def update_hypers(self, params, x_train, key_suffix=''):
"""Summary
Args:
params (TYPE): Description
key_suffix (str, optional): Description
Returns:
TYPE: Description
"""
self.ls = params['ls' + key_suffix]
self.sf = params['sf' + key_suffix]
self.zu = params['zu' + key_suffix]
# update Kuu given new hypers
self.compute_kuu()
# compute mu and Su for each layer
self.update_posterior(x_train, new_hypers=True)
def compute_cavity(self, idxs, alpha):
# deletion
p_i = self.KuuinvKuf[:, idxs].T[:, np.newaxis, :]
k_i = self.Kfu[idxs, :]
k_ii = self.Kff_diag[idxs][:, np.newaxis]
gamma = self.gamma
beta = self.beta
h_si = p_i - np.einsum('dab,nb->nda', beta, k_i)
variance_i = self.variances[idxs, :]
mean_i = self.means[idxs, :]
dlogZd_dmi2 = 1.0 / (variance_i/alpha -
np.sum(k_i[:, np.newaxis, :] * h_si, axis=2))
dlogZd_dmi = -dlogZd_dmi2 * (mean_i -
np.sum(k_i[:, np.newaxis, :] * gamma, axis=2))
hd1 = h_si * dlogZd_dmi[:, :, np.newaxis]
hd2h = np.einsum('nda,ndb->ndab', h_si, h_si) * dlogZd_dmi2[:, :, np.newaxis, np.newaxis]
gamma_si = gamma + hd1
beta_si = beta - hd2h
# projection
h = p_i - np.einsum('ndab,nb->nda', beta_si, k_i)
m_si_i = np.einsum('na,nda->nd', k_i, gamma_si)
v_si_ii = k_ii - np.einsum('na,ndab,nb->nd', k_i, beta_si, k_i)
return m_si_i, v_si_ii, [h, beta_si, gamma_si]
def update_factor(self, idxs, mcav, vcav, dm, dm2, dv, alpha, prop_info):
h, beta_si, gamma_si = prop_info[0], prop_info[1], prop_info[2]
k_i = self.Kfu[idxs, :]
variance_i = self.variances[idxs, :]
mean_i = self.means[idxs, :]
var_i_new = -1.0 / dm2 - np.sum(k_i[:, np.newaxis, :] * h, axis=2)
mean_i_new = mcav - dm / dm2
var_new_parallel = 1 / (1 / var_i_new + 1 / variance_i * (1 - alpha))
mean_div_var_i_new = (mean_i_new / var_i_new +
mean_i / variance_i * (1 - alpha))
mean_new_parallel = mean_div_var_i_new * var_new_parallel
# if alpha == 1:
# rho = 0.5
# n1_new = 1.0 / var_new_parallel
# n2_new = mean_new_parallel / var_new_parallel
# n1_ori = 1.0 / variance_i
# n2_ori = mean_i / variance_i
# n1_damped = rho * n1_new + (1.0 - rho) * n1_ori
# n2_damped = rho * n2_new + (1.0 - rho) * n2_ori
# var_new_parallel = 1.0 / n1_damped
# mean_new_parallel = var_new_parallel * n2_damped
self.variances[idxs, :] = var_new_parallel
self.means[idxs, :] = mean_new_parallel
def backprop_grads_reg(self, idxs, m, v, dm, dm2, dv, x, alpha, prop_info):
N = self.N
Nb = idxs.shape[0]
sf2 = np.exp(2 * self.sf)
Dout = self.Dout
Kuu = self.Kuu
Kuuinv = self.Kuuinv
Su = self.Su
mu = self.mu
Suinv = self.Suinv
p_i = self.KuuinvKuf[:, idxs].T
h, beta_si, gamma_si = prop_info[0], prop_info[1], prop_info[2]
kfu = self.Kfu[idxs, :]
variance_i = self.variances[idxs, :]
mean_i = self.means[idxs, :]
# compute cavity covariance
betacavKuu = np.einsum('ndab,bc->ndac', beta_si, Kuu)
mcav = np.einsum('ab,ndb->nda', Kuu, gamma_si)
Sucav = Kuu - np.einsum('ab,ndbc->ndac', Kuu, betacavKuu)
signSu, logdetSu = np.linalg.slogdet(Su)
signKuu, logdetKuu = np.linalg.slogdet(Kuu)
Suinvm = np.einsum('dab,db->da', Suinv, mu)
term1 = 0.5 * (np.sum(logdetSu) - Dout * logdetKuu + np.sum(mu * Suinvm))
variance_i = self.variances[idxs, :]
mean_i = self.means[idxs, :]
tn = 1.0 / variance_i
gn = mean_i
wnScav = np.einsum('na,ndab->ndb', p_i, Sucav)
wnScavwn = np.einsum('ndb,nb->nd', wnScav, p_i)
wnScavSinvm = np.sum(wnScav * Suinvm, axis=2)
wnS = np.einsum('na,dab->ndb', p_i, Su)
wnSwn = np.sum(wnS * p_i[:, np.newaxis, :], axis=2)
mwn = np.sum(mu * p_i[:, np.newaxis, :], axis=2)
oneminuswnSwn = 1 - alpha * tn * wnSwn
term2a = 0.5 * alpha * tn**2 * gn**2 * wnScavwn
term2b = - gn * tn * wnScavSinvm
term2c = 0.5 * tn * mwn**2 / oneminuswnSwn
term2d = -0.5 / alpha * np.log(oneminuswnSwn)
term2 = N / Nb * np.sum(term2a + term2b + term2c + term2d)
sgp_contrib = - term1 - term2
KuuinvMcav = np.einsum('ab,ndb->nda', Kuuinv, mcav)
dmiKuuinvMcav = dm[:, :, np.newaxis] * KuuinvMcav
dKuu_via_mi = -np.einsum('nda,nb->ab', dmiKuuinvMcav, p_i)
VcavKuuinvKufi = np.einsum('ndab,nb->nda', Sucav, p_i)
KuuinvVcavKuuinvKufi = np.einsum('ab,ndb->nda', Kuuinv, VcavKuuinvKufi)
p_idlogZ_dvi = p_i[:, np.newaxis, :] * dv[:, :, np.newaxis]
temp1 = - np.einsum('nda,ndb->ab', KuuinvVcavKuuinvKufi, p_idlogZ_dvi)
temp2 = np.transpose(temp1, [0, 1])
temp3 = np.einsum('na,ndb->ab', p_i, p_idlogZ_dvi)
dKuu_via_vi = temp1 + temp2 + temp3
dKuu_via_logZ = dKuu_via_mi + dKuu_via_vi
dKfu_via_mi = dmiKuuinvMcav
dKfu_via_vi = 2 * dv[:, :, np.newaxis] * (-p_i[:, np.newaxis, :] + KuuinvVcavKuuinvKufi)
dKfu_via_logZ = np.sum(dKfu_via_mi + dKfu_via_vi, axis=1)
dsf2, dls, dzu = compute_kfu_derivatives(
dKfu_via_logZ, kfu, np.exp(self.ls), sf2, x, self.zu)
dls = dls * np.exp(self.ls)
dsf2 += np.sum(dv)
dsf = 2 * sf2 * dsf2
# compute the gradients
Vmm = Su + np.einsum('da,db->dab', mu, mu)
S = self.Dout * Kuuinv - np.sum(np.einsum('ab,dbc->dac',
Kuuinv, np.einsum('dab,bc->dac', Vmm, Kuuinv)), axis=0)
S = 0.5 * S + dKuu_via_logZ
dhyp = d_trace_MKzz_dhypers(
2*self.ls, 2*self.sf, self.zu, S,
Kuu - np.diag(JITTER * np.ones(self.M)))
grads = {}
grads['sf'] = 2*dhyp[0] + dsf
grads['ls'] = 2*dhyp[1] + dls
grads['zu'] = dhyp[2] + dzu
return sgp_contrib, grads
class SGPR_rank_one(Base_SGPR):
"""Summary
Attributes:
Din (TYPE): Description
Dout (TYPE): Description
lik_layer (TYPE): Description
M (TYPE): Description
N (TYPE): Description
sgp_layer (TYPE): Description
updated (bool): Description
x_train (TYPE): Description
"""
def __init__(self, x_train, y_train, no_pseudo, lik='Gaussian'):
"""Summary
Args:
x_train (TYPE): Description
y_train (TYPE): Description
no_pseudo (TYPE): Description
lik (str, optional): Description
Raises:
NotImplementedError: Description
"""
super(SGPR_rank_one, self).__init__(x_train, y_train, no_pseudo, lik)
self.sgp_layer = SGP_Layer_rank_one(self.N, self.Din, self.Dout, self.M)
def objective_function(self, params, mb_size, alpha=1.0, prop_mode=PROP_MM):
"""Summary
Args:
params (TYPE): Description
mb_size (TYPE): Description
alpha (float, optional): Description
prop_mode (TYPE, optional): Description
Returns:
TYPE: Description
"""
N = self.N
if mb_size >= N:
idxs = np.arange(N)
xb = self.x_train
yb = self.y_train
else:
idxs = np.random.choice(N, mb_size, replace=False)
xb = self.x_train[idxs, :]
yb = self.y_train[idxs, :]
# update model with new hypers
self.update_hypers(params)
# run power-EP and compute grads
no_ep_sweeps = 10 # TODO: put this in config
parallel = True # TODO: put this in config
energy, grad_all = self.run_pep(idxs, no_ep_sweeps, alpha, parallel,
compute_energy=True)
return energy, grad_all
def run_pep(self, train_idxs, no_sweeps, alpha, parallel, compute_energy=False, display_steps=10):
if parallel:
return self.run_pep_parallel(
train_idxs, no_sweeps, alpha, compute_energy, display_steps)
else:
# TODO
return self.run_pep_sequential(
train_idxs, no_sweeps, alpha, compute_energy, display_steps)
def run_pep_parallel(self, idxs, no_sweeps, alpha, compute_energy, display_steps):
batch_size = idxs.shape[0]
scale_logZ = - self.N * 1.0 / batch_size / alpha
# scale_logZ = 0
xb = self.x_train[idxs, :]
yb = self.y_train[idxs, :]
energy = {}
grad_all = {}
for k in range(no_sweeps):
# if k % display_steps == 0:
# print 'PEP, epoch: %d / %d' % (k, no_sweeps)
find_log_lik = compute_energy and (k == no_sweeps-1)
m_cav, v_cav, prop_info = self.sgp_layer.compute_cavity(idxs, alpha)
logZ, dm, dv, dm2 = self.lik_layer.compute_log_Z(
m_cav, v_cav, yb, alpha, compute_dm2=True)
# dm *= 0
# dm2 *= 0
# dm2 += 1e-16
# dv *= 0
self.sgp_layer.update_factor(
idxs, m_cav, v_cav, dm, dm2, dv, alpha, prop_info)
self.sgp_layer.update_posterior(None, new_hypers=False)
if find_log_lik:
N = self.N
lik_contrib = scale_logZ * np.sum(logZ)
dm_s = scale_logZ * dm
dv_s = scale_logZ * dv
dm2_s = scale_logZ * dm2
sgp_contrib, sgp_grad = self.sgp_layer.backprop_grads_reg(
idxs, m_cav, v_cav, dm_s, dm2_s, dv_s, xb, alpha, prop_info)
lik_grad = self.lik_layer.backprop_grads(
m_cav, v_cav, dm, dv, alpha, scale_logZ)
energy = sgp_contrib + lik_contrib
grad_all = {}
for key in sgp_grad.keys():
grad_all[key] = sgp_grad[key]
for key in lik_grad.keys():
grad_all[key] = lik_grad[key]
energy /= N
for key in grad_all.keys():
grad_all[key] /= N
return energy, grad_all
def run_pep_sequential(self, idxs, no_sweeps, alpha, compute_energy,
display_steps):
batch_size = idxs.shape[0]
scale_logZ = - self.N * 1.0 / batch_size / alpha
xb = self.x_train[idxs, :]
yb = self.y_train[idxs, :]
for k in range(no_sweeps):
if k % display_steps == 0:
print 'PEP, epoch: %d / %d' % (k, no_sweeps)
find_log_lik = compute_energy and (k == no_sweeps-1)
for i in range(batch_size):
m_cav, v_cav, prop_info = self.sgp_layer.compute_cavity(
[idxs[i]], alpha)
logZ, dm, dv, dm2 = self.lik_layer.compute_log_Z(
m_cav, v_cav, yb[i], alpha, compute_dm2=True)
self.sgp_layer.update_factor(
[idxs[i]], m_cav, v_cav, dm, dm2, dv, alpha, prop_info)
self.sgp_layer.update_posterior(None, new_hypers=False)
def update_hypers(self, params):
"""Summary
Args:
params (TYPE): Description
"""
self.sgp_layer.update_hypers(params, self.x_train)
self.lik_layer.update_hypers(params)
| 2.125 | 2 |
DrawBot.roboFontExt/lib/settings.py | michielkauwatjoe/drawBotRoboFontExtension | 6 | 12795040 | from vanilla import *
from mojo.extensions import getExtensionDefault, setExtensionDefault
class DrawBotSettingsController(object):
def __init__(self):
self.w = Window((250, 45), "DrawBot Settings")
self.w.openPythonFilesInDrawBot = CheckBox((10, 10, -10, 22),
"Open .py files directly in DrawBot.",
value=getExtensionDefault("com.drawBot.openPyFileDirectly", False),
callback=self.openPythonFilesInDrawBotCallback)
self.w.open()
def openPythonFilesInDrawBotCallback(self, sender):
setExtensionDefault("com.drawBot.openPyFileDirectly", sender.get())
DrawBotSettingsController()
| 2.15625 | 2 |
abupy/FactorBuyBu/ABuFactorBuyDM.py | luqin/firefly | 1 | 12795041 | # -*- encoding:utf-8 -*-
"""
买入择时示例因子:动态自适应双均线策略
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import math
import numpy as np
from .ABuFactorBuyBase import AbuFactorBuyXD, BuyCallMixin
from ..IndicatorBu.ABuNDMa import calc_ma_from_prices
from ..CoreBu.ABuPdHelper import pd_resample
from ..TLineBu.ABuTL import AbuTLine
__author__ = '阿布'
__weixin__ = 'abu_quant'
# noinspection PyAttributeOutsideInit
class AbuDoubleMaBuy(AbuFactorBuyXD, BuyCallMixin):
"""示例买入动态自适应双均线策略"""
def _init_self(self, **kwargs):
"""
kwargs中可选参数:fast: 均线快线周期,默认不设置,使用自适应动态快线
kwargs中可选参数:slow: 均线慢线周期,默认不设置,使用自适应动态慢线
kwargs中可选参数:resample_max: 动态慢线可设置参数重采样周期最大值,默认100,即动态慢线最大100
kwargs中可选参数:resample_min: 动态慢线可设置参数重采样周期最小值,默认10,即动态慢线最小10
kwargs中可选参数:change_threshold:动态慢线可设置参数代表慢线的选取阀值,默认0.12
"""
# 均线快线周期,默认使用5天均线
self.ma_fast = kwargs.pop('fast', -1)
self.dynamic_fast = False
if self.ma_fast == -1:
self.ma_fast = 5
self.dynamic_fast = True
# 均线慢线周期,默认使用60天均线
self.ma_slow = kwargs.pop('slow', -1)
self.dynamic_slow = False
if self.ma_slow == -1:
self.ma_slow = 60
self.dynamic_slow = True
# 动态慢线可设置参数重采样周期最大值,默认90
self.resample_max = kwargs.pop('resample_max', 100)
# 动态慢线可设置参数重采样周期最小值,默认10
self.resample_min = kwargs.pop('resample_min', 10)
# 动态慢线可设置参数代表慢线的选取阀值,默认0.12
self.change_threshold = kwargs.pop('change_threshold', 0.12)
if self.ma_fast >= self.ma_slow:
# 慢线周期必须大于快线
raise ValueError('ma_fast >= self.ma_slow !')
# xd周期数据需要比ma_slow大一天,这样计算ma就可以拿到今天和昨天两天的ma,用来判断金叉,死叉
kwargs['xd'] = self.ma_slow + 1
# 设置好xd后可以直接使用基类针对xd的初始化
super(AbuDoubleMaBuy, self)._init_self(**kwargs)
# 在输出生成的orders_pd中显示的名字
self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow)
def _dynamic_calc_fast(self, today):
"""
根据大盘最近一个月走势震荡程度,动态决策快线的值,规则如下:
如果大盘最近一个月走势使用:
一次拟合可以表达:fast=slow * 0.05 eg: slow=60->fast=60*0.05=3
二次拟合可以表达:fast=slow * 0.15 eg: slow=60->fast=60*0.15=9
三次拟合可以表达:fast=slow * 0.3 eg: slow=60->fast=60*0.3=18
四次及以上拟合可以表达:fast=slow * 0.5 eg: slow=60->fast=60*0.5=30
"""
# 策略中拥有self.benchmark,即交易基准对象,AbuBenchmark实例对象,benchmark.kl_pd即对应的市场大盘走势
benchmark_df = self.benchmark.kl_pd
# 拿出大盘的今天
benchmark_today = benchmark_df[benchmark_df.date == today.date]
if benchmark_today.empty:
# 默认值为慢线的0.15
return math.ceil(self.ma_slow * 0.15)
# 要拿大盘最近一个月的走势,准备切片的start,end
end_key = int(benchmark_today.iloc[0].key)
start_key = end_key - 20
if start_key < 0:
# 默认值为慢线的0.15
return math.ceil(self.ma_slow * 0.15)
# 使用切片切出从今天开始向前20天的数据
benchmark_month = benchmark_df[start_key:end_key + 1]
# 通过大盘最近一个月的收盘价格做为参数构造AbuTLine对象
benchmark_month_line = AbuTLine(benchmark_month.close, 'benchmark month line')
# 计算这个月最少需要几次拟合才能代表走势曲线
least = benchmark_month_line.show_least_valid_poly(show=False)
if least == 1:
# 一次拟合可以表达:fast=slow * 0.05 eg: slow=60->fast=60*0.05=3
return math.ceil(self.ma_slow * 0.05)
elif least == 2:
# 二次拟合可以表达:fast=slow * 0.15 eg: slow=60->fast=60*0.15=9
return math.ceil(self.ma_slow * 0.15)
elif least == 3:
# 三次拟合可以表达:fast=slow * 0.3 eg: slow=60->fast=60*0.3=18
return math.ceil(self.ma_slow * 0.3)
else:
# 四次及以上拟合可以表达:fast=slow * 0.5 eg: slow=60->fast=60*0.5=30
return math.ceil(self.ma_slow * 0.5)
def _dynamic_calc_slow(self, today):
"""
动态决策慢线的值,规则如下:
切片最近一段时间的金融时间序列,对金融时间序列进行变换周期重新采样,
对重新采样的结果进行pct_change处理,对pct_change序列取abs绝对值,
对pct_change绝对值序列取平均,即算出重新采样的周期内的平均变化幅度,
上述的变换周期由10, 15,20,30....进行迭代, 直到计算出第一个重新
采样的周期内的平均变化幅度 > 0.12的周期做为slow的取值
"""
last_kl = self.past_today_kl(today, self.resample_max)
if last_kl.empty:
# 返回慢线默认值60
return 60
for slow in np.arange(self.resample_min, self.resample_max, 5):
rule = '{}D'.format(slow)
change = abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean()
"""
eg: pd_resample(last_kl.close, rule, how='mean')
2014-07-23 249.0728
2014-09-03 258.3640
2014-10-15 240.8663
2014-11-26 220.1552
2015-01-07 206.0070
2015-02-18 198.0932
2015-04-01 217.9791
2015-05-13 251.3640
2015-06-24 266.4511
2015-08-05 244.3334
2015-09-16 236.2250
2015-10-28 222.0441
2015-12-09 222.0574
2016-01-20 177.2303
2016-03-02 226.8766
2016-04-13 230.6000
2016-05-25 216.7596
2016-07-06 222.6420
abs(pd_resample(last_kl.close, rule, how='mean').pct_change())
2014-09-03 0.037
2014-10-15 0.068
2014-11-26 0.086
2015-01-07 0.064
2015-02-18 0.038
2015-04-01 0.100
2015-05-13 0.153
2015-06-24 0.060
2015-08-05 0.083
2015-09-16 0.033
2015-10-28 0.060
2015-12-09 0.000
2016-01-20 0.202
2016-03-02 0.280
2016-04-13 0.016
2016-05-25 0.060
2016-07-06 0.027
abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean():
0.080
"""
if change > self.change_threshold:
"""
返回第一个大于change_threshold的slow,
change_threshold默认为0.12,以周期突破的策略一般需要在0.08以上,0.12是为快线留出套利空间
"""
return slow
# 迭代np.arange(min, max, 5)都不符合就返回max
return self.resample_max
def fit_month(self, today):
# fit_month即在回测策略中每一个月执行一次的方法
if self.dynamic_slow:
# 一定要先动态算ma_slow,因为动态计算fast依赖slow
self.ma_slow = self._dynamic_calc_slow(today)
if self.dynamic_fast:
# 动态计算快线
self.ma_fast = self._dynamic_calc_fast(today)
# 动态重新计算后,改变在输出生成的orders_pd中显示的名字
self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow)
# import logging
# logging.debug('{}:{}-fast={}|slow={}'.format(self.kl_pd.name, today.date, self.ma_fast, self.ma_slow))
def fit_day(self, today):
"""双均线买入择时因子,信号快线上穿慢行形成金叉做为买入信号"""
# 计算快线
fast_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_fast), min_periods=1)
# 计算慢线
slow_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_slow), min_periods=1)
if len(fast_line) >= 2 and len(slow_line) >= 2:
# 今天的快线值
fast_today = fast_line[-1]
# 昨天的快线值
fast_yesterday = fast_line[-2]
# 今天的慢线值
slow_today = slow_line[-1]
# 昨天的慢线值
slow_yesterday = slow_line[-2]
if slow_yesterday >= fast_yesterday and fast_today > slow_today:
# 快线上穿慢线, 形成买入金叉,使用了今天收盘价格,明天买入
return self.buy_tomorrow()
"""可以选择是否覆盖AbuFactorBuyXD中的buy_tomorrow来增大交易频率,默认基类中self.skip_days = self.xd降低了频率"""
# def buy_tomorrow(self):
# return self.make_buy_order(self.today_ind)
| 2.25 | 2 |
req_compile/metadata/patch.py | sputt/qer | 6 | 12795042 | """Patching modules and objects"""
import contextlib
import sys
def begin_patch(module, member, new_value):
if isinstance(module, str):
if module not in sys.modules:
return None
module = sys.modules[module]
if not hasattr(module, member):
old_member = None
else:
old_member = getattr(module, member)
setattr(module, member, new_value)
return module, member, old_member
def end_patch(token):
if token is None:
return
module, member, old_member = token
if old_member is None:
delattr(module, member)
else:
setattr(module, member, old_member)
@contextlib.contextmanager
def patch(*args):
"""Manager a patch in a contextmanager"""
tokens = []
for idx in range(0, len(args), 3):
module, member, new_value = args[idx : idx + 3]
tokens.append(begin_patch(module, member, new_value))
try:
yield
finally:
for token in tokens[::-1]:
end_patch(token)
| 3.15625 | 3 |
algoritmos/sort_recursive.py | corahama/python | 1 | 12795043 | def sort_array(stack):
if len(stack) == 0:
return stack
top = stack.pop()
sort_array(stack)
insert_element_in_ordered_stack(stack, top)
return stack
def insert_element_in_ordered_stack(stack, value):
if len(stack) == 0 or stack[-1] <= value:
stack.append(value)
return
top = stack.pop()
insert_element_in_ordered_stack(stack, value)
stack.append(top)
if __name__ == '__main__':
print(sort_array([-5, 2, -2, 4, 3, 1]))
| 4.15625 | 4 |
samples/parallel-processing-california-housing-data/ml_service/pipelines/build_pipeline.py | h2floh/MLOpsManufacturing-1 | 20 | 12795044 | <filename>samples/parallel-processing-california-housing-data/ml_service/pipelines/build_pipeline.py
"""Build pipeline."""
from datetime import datetime
from logging import INFO, Formatter, StreamHandler, getLogger
from azureml.core import Environment, Workspace
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core.runconfig import RunConfiguration
from azureml.pipeline.core import (Pipeline, PipelineData, PipelineEndpoint)
from azureml.pipeline.core._restclients.aeva.models.error_response import \
ErrorResponseException
from azureml.pipeline.steps import (ParallelRunConfig, ParallelRunStep,
PythonScriptStep)
from ml_service.util.env_variables import Env
from ml_service.util.pipeline_utils import get_compute
def main():
"""Build pipeline."""
# Environment variables
env = Env()
# Azure ML workspace
aml_workspace = Workspace.get(
name=env.workspace_name,
subscription_id=env.subscription_id,
resource_group=env.resource_group,
)
logger.info(f"Azure ML workspace: {aml_workspace}")
# Azure ML compute cluster
aml_compute = get_compute(aml_workspace, env.compute_name)
logger.info(f"Aazure ML compute cluster: {aml_compute}")
# Azure ML environment
environment = Environment(name=env.aml_env_name)
conda_dep = CondaDependencies(conda_dependencies_file_path="./local_development/dev_dependencies.yml")
environment.python.conda_dependencies = conda_dep
run_config = RunConfiguration()
run_config.environment = environment
# Pipeline Data
preparation_pipelinedata = PipelineData("preparation_pipelinedata", is_directory=True).as_dataset()
extraction_pipelinedata = PipelineData("extraction_pipelinedata", is_directory=True)
training_pipelinedata = PipelineData("training_pipelinedata", is_directory=True)
# List of pipeline steps
step_list = list()
preparation_step = PythonScriptStep(
name="preparation-step",
compute_target=aml_compute,
source_directory=env.sources_directory_train,
script_name=env.preparation_step_script_path,
outputs=[preparation_pipelinedata],
arguments=[
"--input_path", env.input_dir,
"--output_path", preparation_pipelinedata,
"--datastore_name", env.blob_datastore_name
],
runconfig=run_config
)
step_list.append(preparation_step)
parallel_run_config = ParallelRunConfig(
source_directory=env.sources_directory_train,
entry_script=env.extraction_step_script_path,
mini_batch_size=env.mini_batch_size,
error_threshold=env.error_threshold,
output_action="append_row",
environment=environment,
compute_target=aml_compute,
node_count=env.node_count,
run_invocation_timeout=env.run_invocation_timeout,
process_count_per_node=env.process_count_per_node,
append_row_file_name="extraction_output.txt")
extraction_step = ParallelRunStep(
name="extraction-step",
inputs=[preparation_pipelinedata],
output=extraction_pipelinedata,
arguments=[
"--output_dir", extraction_pipelinedata
],
parallel_run_config=parallel_run_config
)
step_list.append(extraction_step)
training_step = PythonScriptStep(
name="traning-step",
compute_target=aml_compute,
source_directory=env.sources_directory_train,
script_name=env.training_step_script_path,
inputs=[extraction_pipelinedata],
outputs=[training_pipelinedata],
arguments=[
"--input_dir", extraction_pipelinedata,
"--output_dir", training_pipelinedata
],
runconfig=run_config
)
step_list.append(training_step)
# Build pipeline
pipeline = Pipeline(workspace=aml_workspace, steps=step_list)
pipeline.validate()
logger.info(f"Built pipeline {pipeline}")
# Publish pipeline
published_pipeline = pipeline.publish(env.pipeline_name, description=env.pipeline_name, version=datetime.utcnow().isoformat())
try:
pipeline_endpoint = PipelineEndpoint.get(workspace=aml_workspace, name=env.pipeline_endpoint_name)
pipeline_endpoint.add_default(published_pipeline)
except ErrorResponseException:
pipeline_endpoint = PipelineEndpoint.publish(workspace=aml_workspace, name=env.pipeline_endpoint_name, pipeline=published_pipeline, description=env.pipeline_endpoint_name)
if __name__ == "__main__":
logger = getLogger(__name__)
logger.setLevel(INFO)
logger.propagate = False
sh = StreamHandler()
sh.setFormatter(Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s"))
logger.addHandler(sh)
main()
| 2.234375 | 2 |
aoc2021/test_day2.py | jonsth131/aoc | 0 | 12795045 | <reponame>jonsth131/aoc
from day2 import part1, part2
test_input = ["forward 5", "down 5", "forward 8", "up 3", "down 8", "forward 2"]
def test_part1():
assert part1(test_input) == 150
def test_part2():
assert part2(test_input) == 900
| 2.921875 | 3 |
python/interactivepython1/src/unittest/python/rpsls_tests.py | edmathew/playground | 0 | 12795046 | <gh_stars>0
import unittest
import sys
from rpsls import rpsls
from cStringIO import StringIO
class RPSLSTest(unittest.TestCase):
def setUp(self):
sys.stdout = StringIO()
def tearDown(self):
sys.stdout.close()
sys.stdout = sys.__stdout__
def test_can_call_rpsls_module(self):
rpsls.name_to_number("Test")
def test_rock_is_zero(self):
self.assertEqual(rpsls.name_to_number("rock"), 0)
def test_Spock_is_one(self):
self.assertEqual(rpsls.name_to_number("Spock"), 1)
def test_spock_is_invalid(self):
self.assertEqual(rpsls.name_to_number("spock"), None)
def test_paper_is_two(self):
self.assertEqual(rpsls.name_to_number("paper"), 2)
def test_lizard_is_three(self):
self.assertEqual(rpsls.name_to_number("lizard"), 3)
def test_scissors_is_four(self):
self.assertEqual(rpsls.name_to_number("scissors"), 4)
def test_number_to_name_restores_name_to_number(self):
self.assertEqual("rock", rpsls.number_to_name(rpsls.name_to_number("rock")))
self.assertEqual("Spock", rpsls.number_to_name(rpsls.name_to_number("Spock")))
self.assertEqual("paper", rpsls.number_to_name(rpsls.name_to_number("paper")))
self.assertEqual("lizard", rpsls.number_to_name(rpsls.name_to_number("lizard")))
self.assertEqual("scissors", rpsls.number_to_name(rpsls.name_to_number("scissors")))
def test_invalid_number_returns_None(self):
self.assertEqual(rpsls.number_to_name(-1), None)
def test_first_execution_print_player_chooses(self):
rpsls.rpsls_counter = 0
rpsls.rpsls("Spock")
output = sys.stdout.getvalue()
self.assertEquals(output[:14], "Player chooses")
def test_rpsls_blank_line_if_consecutive_games(self):
rpsls.rpsls_counter = 0
#First Execution
rpsls.rpsls("Test")
sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = StringIO()
#Second execution
rpsls.rpsls("Spock")
first_line = sys.stdout.getvalue()
self.assertEquals(first_line[-2:], "\n\n")
def test_victory_rules(self):
"""
Paper covers Rock
Rock crushes Lizard
Lizard poisons Spock
Spock smashes Scissors
Scissors decapitates Lizard
Lizard eats Paper
Paper disproves Spock
Spock vaporizes Rock
(and as it always has) Rock crushes scissors
"""
rulesList = [
["paper", "scissors", "Computer wins!\n"],
["scissors", "paper", "Player wins!\n"],
["rock", "lizard", "Player wins!\n"],
["Spock", "lizard", "Computer wins!\n"],
["Spock", "scissors", "Player wins!\n"],
["lizard", "scissors", "Computer wins!\n"],
["paper", "Spock", "Player wins!\n"],
["rock", "Spock", "Computer wins!\n"],
["rock", "rock", "Player and computer tie!\n"],
["scissors", "rock", "Computer wins!\n"]]
for r in rulesList:
self.assertWinningRule(r[0], r[1], r[2])
def assertWinningRule(self, player_choice, cpu_choice, winner):
self.tearDown()
self.setUp()
player_number = rpsls.name_to_number(player_choice)
cpu_number = rpsls.name_to_number(cpu_choice)
rpsls.rpsls_compute_winner(player_number, cpu_number)
response = sys.stdout.getvalue()
self.assertEqual(response, winner) | 2.796875 | 3 |
examen/p2/p2.py | Ale-Torrico/computacion_para_ingenieria | 0 | 12795047 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 9 07:33:20 2022
@author: AMD
"""
# Dada la lista [10,20,30,10,5, 1, 3, 5, 4] separar en dos listas
# una lista debe contener solo los pares y la segunda lista solo debe contener los impares
lista = [10,20,30,10,5, 1, 3, 5, 4]
Pares=[]
Impares=[]
for num in lista:
if num % 2 == 0:
Pares.append(num)
else:
Impares.append(num)
print(lista)
print(Pares)
print(Impares)
| 3.625 | 4 |
penut/__init__.py | penut85420/Penut | 0 | 12795048 | <filename>penut/__init__.py<gh_stars>0
from .utils import TimeCost, walk_dir, td2s, timedelta2string | 1.164063 | 1 |
setup.py | Jrsnow8921/SnowNasaPython | 0 | 12795049 | <gh_stars>0
from distutils.core import setup
setup(
name = 'SnowNasaPython',
packages = ['SnowNasaPython'],
version = '1.7',
description = 'A python lib to interact with Nasas API',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/Jrsnow8921/SnowNasaPython.git',
download_url = 'https://github.com/Jrsnow8921/SnowNasaPython/archive/1.7.tar.gz',
keywords = ['Nasa', 'API'],
classifiers = [],
py_modules = ['snownasapython'],
)
| 1.242188 | 1 |
lib/mclib/setup.py | yusht000/clickhouse-learning | 0 | 12795050 | <gh_stars>0
__author__ = 'lucky'
from setuptools import setup, find_packages
setup(
name='mclib',
version='1.0.0',
keywords=['mclib'],
description=' mock data for ck ',
author='shengtao.yu',
author_email='<EMAIL>',
packages=find_packages(include=['mclib', 'mclib.*']),
install_requires=[
'faker',
'clickhouse-driver',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
python_requires=">=3.6",
)
| 1.476563 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.